repo
stringclasses 1
value | pull_number
int64 878
3.02k
| instance_id
stringclasses 9
values | issue_numbers
listlengths 1
2
| base_commit
stringclasses 9
values | patch
stringclasses 9
values | test_patch
stringclasses 9
values | problem_statement
stringclasses 9
values | hints_text
stringclasses 4
values | created_at
stringclasses 9
values | version
stringclasses 3
values |
|---|---|---|---|---|---|---|---|---|---|---|
AleoNet/snarkOS
| 3,015
|
AleoNet__snarkOS-3015
|
[
"2983"
] |
bd165ecfcb81e72a167b2a984ba28531d543ab44
|
diff --git a/node/bft/events/src/challenge_response.rs b/node/bft/events/src/challenge_response.rs
index 0fc678a86d..aad59f760f 100644
--- a/node/bft/events/src/challenge_response.rs
+++ b/node/bft/events/src/challenge_response.rs
@@ -17,6 +17,7 @@ use super::*;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ChallengeResponse<N: Network> {
pub signature: Data<Signature<N>>,
+ pub nonce: u64,
}
impl<N: Network> EventTrait for ChallengeResponse<N> {
@@ -30,6 +31,7 @@ impl<N: Network> EventTrait for ChallengeResponse<N> {
impl<N: Network> ToBytes for ChallengeResponse<N> {
fn write_le<W: Write>(&self, mut writer: W) -> IoResult<()> {
self.signature.write_le(&mut writer)?;
+ self.nonce.write_le(&mut writer)?;
Ok(())
}
}
@@ -37,8 +39,9 @@ impl<N: Network> ToBytes for ChallengeResponse<N> {
impl<N: Network> FromBytes for ChallengeResponse<N> {
fn read_le<R: Read>(mut reader: R) -> IoResult<Self> {
let signature = Data::read_le(&mut reader)?;
+ let nonce = u64::read_le(&mut reader)?;
- Ok(Self { signature })
+ Ok(Self { signature, nonce })
}
}
@@ -53,7 +56,7 @@ pub mod prop_tests {
};
use bytes::{Buf, BufMut, BytesMut};
- use proptest::prelude::{BoxedStrategy, Strategy};
+ use proptest::prelude::{any, BoxedStrategy, Strategy};
use test_strategy::proptest;
type CurrentNetwork = snarkvm::prelude::Testnet3;
@@ -70,7 +73,9 @@ pub mod prop_tests {
}
pub fn any_challenge_response() -> BoxedStrategy<ChallengeResponse<CurrentNetwork>> {
- any_signature().prop_map(|sig| ChallengeResponse { signature: Data::Object(sig) }).boxed()
+ (any_signature(), any::<u64>())
+ .prop_map(|(sig, nonce)| ChallengeResponse { signature: Data::Object(sig), nonce })
+ .boxed()
}
#[proptest]
diff --git a/node/bft/events/src/lib.rs b/node/bft/events/src/lib.rs
index d49073b1c5..7a3c6d30ef 100644
--- a/node/bft/events/src/lib.rs
+++ b/node/bft/events/src/lib.rs
@@ -118,7 +118,7 @@ impl<N: Network> From<DisconnectReason> for Event<N> {
impl<N: Network> Event<N> {
/// The version of the event protocol; it can be incremented in order to force users to update.
- pub const VERSION: u32 = 5;
+ pub const VERSION: u32 = 6;
/// Returns the event name.
#[inline]
diff --git a/node/bft/src/gateway.rs b/node/bft/src/gateway.rs
index d6b8c000d4..ecbfdb110a 100644
--- a/node/bft/src/gateway.rs
+++ b/node/bft/src/gateway.rs
@@ -1176,11 +1176,13 @@ impl<N: Network> Gateway<N> {
/* Step 3: Send the challenge response. */
// Sign the counterparty nonce.
- let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng) else {
+ let response_nonce: u64 = rng.gen();
+ let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat();
+ let Ok(our_signature) = self.account.sign_bytes(&data, rng) else {
return Err(error(format!("Failed to sign the challenge request nonce from '{peer_addr}'")));
};
// Send the challenge response.
- let our_response = ChallengeResponse { signature: Data::Object(our_signature) };
+ let our_response = ChallengeResponse { signature: Data::Object(our_signature), nonce: response_nonce };
send_event(&mut framed, peer_addr, Event::ChallengeResponse(our_response)).await?;
// Add the peer to the gateway.
@@ -1229,11 +1231,13 @@ impl<N: Network> Gateway<N> {
let rng = &mut rand::rngs::OsRng;
// Sign the counterparty nonce.
- let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng) else {
+ let response_nonce: u64 = rng.gen();
+ let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat();
+ let Ok(our_signature) = self.account.sign_bytes(&data, rng) else {
return Err(error(format!("Failed to sign the challenge request nonce from '{peer_addr}'")));
};
// Send the challenge response.
- let our_response = ChallengeResponse { signature: Data::Object(our_signature) };
+ let our_response = ChallengeResponse { signature: Data::Object(our_signature), nonce: response_nonce };
send_event(&mut framed, peer_addr, Event::ChallengeResponse(our_response)).await?;
// Sample a random nonce.
@@ -1290,14 +1294,14 @@ impl<N: Network> Gateway<N> {
expected_nonce: u64,
) -> Option<DisconnectReason> {
// Retrieve the components of the challenge response.
- let ChallengeResponse { signature } = response;
+ let ChallengeResponse { signature, nonce } = response;
// Perform the deferred non-blocking deserialization of the signature.
let Ok(signature) = spawn_blocking!(signature.deserialize_blocking()) else {
warn!("{CONTEXT} Gateway handshake with '{peer_addr}' failed (cannot deserialize the signature)");
return Some(DisconnectReason::InvalidChallengeResponse);
};
// Verify the signature.
- if !signature.verify_bytes(&peer_address, &expected_nonce.to_le_bytes()) {
+ if !signature.verify_bytes(&peer_address, &[expected_nonce.to_le_bytes(), nonce.to_le_bytes()].concat()) {
warn!("{CONTEXT} Gateway handshake with '{peer_addr}' failed (invalid signature)");
return Some(DisconnectReason::InvalidChallengeResponse);
}
diff --git a/node/router/messages/src/challenge_response.rs b/node/router/messages/src/challenge_response.rs
index 3c75d4db19..d51c5bb717 100644
--- a/node/router/messages/src/challenge_response.rs
+++ b/node/router/messages/src/challenge_response.rs
@@ -25,6 +25,7 @@ use std::borrow::Cow;
pub struct ChallengeResponse<N: Network> {
pub genesis_header: Header<N>,
pub signature: Data<Signature<N>>,
+ pub nonce: u64,
}
impl<N: Network> MessageTrait for ChallengeResponse<N> {
@@ -38,13 +39,18 @@ impl<N: Network> MessageTrait for ChallengeResponse<N> {
impl<N: Network> ToBytes for ChallengeResponse<N> {
fn write_le<W: io::Write>(&self, mut writer: W) -> io::Result<()> {
self.genesis_header.write_le(&mut writer)?;
- self.signature.write_le(&mut writer)
+ self.signature.write_le(&mut writer)?;
+ self.nonce.write_le(&mut writer)
}
}
impl<N: Network> FromBytes for ChallengeResponse<N> {
fn read_le<R: io::Read>(mut reader: R) -> io::Result<Self> {
- Ok(Self { genesis_header: Header::read_le(&mut reader)?, signature: Data::read_le(reader)? })
+ Ok(Self {
+ genesis_header: Header::read_le(&mut reader)?,
+ signature: Data::read_le(&mut reader)?,
+ nonce: u64::read_le(reader)?,
+ })
}
}
@@ -80,8 +86,12 @@ pub mod prop_tests {
}
pub fn any_challenge_response() -> BoxedStrategy<ChallengeResponse<CurrentNetwork>> {
- (any_signature(), any_genesis_header())
- .prop_map(|(sig, genesis_header)| ChallengeResponse { signature: Data::Object(sig), genesis_header })
+ (any_signature(), any_genesis_header(), any::<u64>())
+ .prop_map(|(sig, genesis_header, nonce)| ChallengeResponse {
+ signature: Data::Object(sig),
+ genesis_header,
+ nonce,
+ })
.boxed()
}
diff --git a/node/router/messages/src/lib.rs b/node/router/messages/src/lib.rs
index baa512b5b4..09b065a49d 100644
--- a/node/router/messages/src/lib.rs
+++ b/node/router/messages/src/lib.rs
@@ -111,7 +111,7 @@ impl<N: Network> From<DisconnectReason> for Message<N> {
impl<N: Network> Message<N> {
/// The version of the network protocol; it can be incremented in order to force users to update.
- pub const VERSION: u32 = 13;
+ pub const VERSION: u32 = 14;
/// Returns the message name.
#[inline]
diff --git a/node/router/src/handshake.rs b/node/router/src/handshake.rs
index 7ccb67fa89..195298eaea 100644
--- a/node/router/src/handshake.rs
+++ b/node/router/src/handshake.rs
@@ -164,12 +164,15 @@ impl<N: Network> Router<N> {
}
/* Step 3: Send the challenge response. */
+ let response_nonce: u64 = rng.gen();
+ let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat();
// Sign the counterparty nonce.
- let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng) else {
+ let Ok(our_signature) = self.account.sign_bytes(&data, rng) else {
return Err(error(format!("Failed to sign the challenge request nonce from '{peer_addr}'")));
};
// Send the challenge response.
- let our_response = ChallengeResponse { genesis_header, signature: Data::Object(our_signature) };
+ let our_response =
+ ChallengeResponse { genesis_header, signature: Data::Object(our_signature), nonce: response_nonce };
send(&mut framed, peer_addr, Message::ChallengeResponse(our_response)).await?;
// Add the peer to the router.
@@ -213,11 +216,14 @@ impl<N: Network> Router<N> {
let rng = &mut OsRng;
// Sign the counterparty nonce.
- let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng) else {
+ let response_nonce: u64 = rng.gen();
+ let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat();
+ let Ok(our_signature) = self.account.sign_bytes(&data, rng) else {
return Err(error(format!("Failed to sign the challenge request nonce from '{peer_addr}'")));
};
// Send the challenge response.
- let our_response = ChallengeResponse { genesis_header, signature: Data::Object(our_signature) };
+ let our_response =
+ ChallengeResponse { genesis_header, signature: Data::Object(our_signature), nonce: response_nonce };
send(&mut framed, peer_addr, Message::ChallengeResponse(our_response)).await?;
// Sample a random nonce.
@@ -303,7 +309,7 @@ impl<N: Network> Router<N> {
expected_nonce: u64,
) -> Option<DisconnectReason> {
// Retrieve the components of the challenge response.
- let ChallengeResponse { genesis_header, signature } = response;
+ let ChallengeResponse { genesis_header, signature, nonce } = response;
// Verify the challenge response, by checking that the block header matches.
if genesis_header != expected_genesis_header {
@@ -316,7 +322,7 @@ impl<N: Network> Router<N> {
return Some(DisconnectReason::InvalidChallengeResponse);
};
// Verify the signature.
- if !signature.verify_bytes(&peer_address, &expected_nonce.to_le_bytes()) {
+ if !signature.verify_bytes(&peer_address, &[expected_nonce.to_le_bytes(), nonce.to_le_bytes()].concat()) {
warn!("Handshake with '{peer_addr}' failed (invalid signature)");
return Some(DisconnectReason::InvalidChallengeResponse);
}
|
diff --git a/node/tests/common/test_peer.rs b/node/tests/common/test_peer.rs
index d480c6a3e1..8d9d5e39bb 100644
--- a/node/tests/common/test_peer.rs
+++ b/node/tests/common/test_peer.rs
@@ -140,10 +140,13 @@ impl Handshake for TestPeer {
let peer_request = expect_message!(Message::ChallengeRequest, framed, peer_addr);
// Sign the nonce.
- let signature = self.account().sign_bytes(&peer_request.nonce.to_le_bytes(), rng).unwrap();
+ let response_nonce: u64 = rng.gen();
+ let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat();
+ let signature = self.account().sign_bytes(&data, rng).unwrap();
// Send the challenge response.
- let our_response = ChallengeResponse { genesis_header, signature: Data::Object(signature) };
+ let our_response =
+ ChallengeResponse { genesis_header, signature: Data::Object(signature), nonce: response_nonce };
framed.send(Message::ChallengeResponse(our_response)).await?;
}
ConnectionSide::Responder => {
@@ -151,10 +154,13 @@ impl Handshake for TestPeer {
let peer_request = expect_message!(Message::ChallengeRequest, framed, peer_addr);
// Sign the nonce.
- let signature = self.account().sign_bytes(&peer_request.nonce.to_le_bytes(), rng).unwrap();
+ let response_nonce: u64 = rng.gen();
+ let data = [peer_request.nonce.to_le_bytes(), response_nonce.to_le_bytes()].concat();
+ let signature = self.account().sign_bytes(&data, rng).unwrap();
// Send our challenge bundle.
- let our_response = ChallengeResponse { genesis_header, signature: Data::Object(signature) };
+ let our_response =
+ ChallengeResponse { genesis_header, signature: Data::Object(signature), nonce: response_nonce };
framed.send(Message::ChallengeResponse(our_response)).await?;
let our_request = ChallengeRequest::new(local_ip.port(), self.node_type(), self.address(), rng.gen());
framed.send(Message::ChallengeRequest(our_request)).await?;
|
[Bug] Validator sign arbitrary nonce can lead to downgraded length of secure bit.
# https://hackerone.com/reports/2279770
## Summary:
During handshake, validator directly sign nonce (u64) sent from counterparty:
```
let Ok(our_signature) = self.account.sign_bytes(&peer_request.nonce.to_le_bytes(), rng)
```
The attacker can exploit this to let validator to sign the message they want.
For example, the attacker can brute force and try to find some transaction `hash` that satify `hash < u64.max`. Then let the validator sign this hash as `peer_request.nonce`. In this case the length of secure bit is downgraded from `252/2 = 126` bits to `(252-64)/2 = 94` bits.
## Proof-of-Concept (PoC)
As described above.
## Fix Suggestions:
Instead of directly sign the nonce, the validator can sign something like `hash('validator_handshake_nonce', nonce)`. Also, consider adding prefix string when generating `batch_id` and `block_hash` to avoid future schema conflict.
|
2024-01-17T23:29:29Z
|
2.2
|
|
AleoNet/snarkOS
| 2,902
|
AleoNet__snarkOS-2902
|
[
"2894"
] |
4896a1200a4605d1de6fe6cb53e1efa9ccdb6152
|
diff --git a/Cargo.lock b/Cargo.lock
index e98d384892..b9f871cccb 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3096,6 +3096,7 @@ dependencies = [
"num_cpus",
"once_cell",
"parking_lot",
+ "paste",
"pea2pea",
"rand",
"rand_chacha",
diff --git a/node/Cargo.toml b/node/Cargo.toml
index c76dac9006..664bf96a06 100644
--- a/node/Cargo.toml
+++ b/node/Cargo.toml
@@ -113,6 +113,9 @@ version = "0.1"
[dev-dependencies.deadline]
version = "0.2"
+[dev-dependencies.paste]
+version = "1"
+
[dev-dependencies.pea2pea]
version = "0.46"
diff --git a/node/router/src/helpers/cache.rs b/node/router/src/helpers/cache.rs
index befd5e9a90..87a0d8831e 100644
--- a/node/router/src/helpers/cache.rs
+++ b/node/router/src/helpers/cache.rs
@@ -53,6 +53,8 @@ pub struct Cache<N: Network> {
seen_outbound_solutions: RwLock<LinkedHashMap<SolutionKey<N>, OffsetDateTime>>,
/// The map of transaction IDs to their last seen timestamp.
seen_outbound_transactions: RwLock<LinkedHashMap<TransactionKey<N>, OffsetDateTime>>,
+ /// The map of peer IPs to the number of sent peer requests.
+ seen_outbound_peer_requests: RwLock<IndexMap<SocketAddr, u32>>,
}
impl<N: Network> Default for Cache<N> {
@@ -75,6 +77,7 @@ impl<N: Network> Cache<N> {
seen_outbound_puzzle_requests: Default::default(),
seen_outbound_solutions: RwLock::new(LinkedHashMap::with_capacity(MAX_CACHE_SIZE)),
seen_outbound_transactions: RwLock::new(LinkedHashMap::with_capacity(MAX_CACHE_SIZE)),
+ seen_outbound_peer_requests: Default::default(),
}
}
}
@@ -166,6 +169,21 @@ impl<N: Network> Cache<N> {
) -> Option<OffsetDateTime> {
Self::refresh_and_insert(&self.seen_outbound_transactions, (peer_ip, transaction))
}
+
+ /// Returns `true` if the cache contains a peer request from the given peer.
+ pub fn contains_outbound_peer_request(&self, peer_ip: SocketAddr) -> bool {
+ self.seen_outbound_peer_requests.read().get(&peer_ip).map(|r| *r > 0).unwrap_or(false)
+ }
+
+ /// Increment the peer IP's number of peer requests, returning the updated number of peer requests.
+ pub fn increment_outbound_peer_requests(&self, peer_ip: SocketAddr) -> u32 {
+ Self::increment_counter(&self.seen_outbound_peer_requests, peer_ip)
+ }
+
+ /// Decrement the peer IP's number of peer requests, returning the updated number of peer requests.
+ pub fn decrement_outbound_peer_requests(&self, peer_ip: SocketAddr) -> u32 {
+ Self::decrement_counter(&self.seen_outbound_peer_requests, peer_ip)
+ }
}
impl<N: Network> Cache<N> {
@@ -336,4 +354,35 @@ mod tests {
// Check that the cache still contains the transaction.
assert_eq!(cache.seen_outbound_transactions.read().len(), 1);
}
+
+ #[test]
+ fn test_outbound_peer_request() {
+ let cache = Cache::<CurrentNetwork>::default();
+ let peer_ip = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 1234);
+
+ // Check the cache is empty.
+ assert!(cache.seen_outbound_peer_requests.read().is_empty());
+ assert!(!cache.contains_outbound_peer_request(peer_ip));
+
+ // Increment the peer requests.
+ assert_eq!(cache.increment_outbound_peer_requests(peer_ip), 1);
+
+ // Check the cache contains the peer request.
+ assert!(cache.contains_outbound_peer_request(peer_ip));
+
+ // Increment the peer requests again for the same peer IP.
+ assert_eq!(cache.increment_outbound_peer_requests(peer_ip), 2);
+
+ // Check the cache still contains the peer request.
+ assert!(cache.contains_outbound_peer_request(peer_ip));
+
+ // Decrement the peer requests.
+ assert_eq!(cache.decrement_outbound_peer_requests(peer_ip), 1);
+
+ // Decrement the peer requests again.
+ assert_eq!(cache.decrement_outbound_peer_requests(peer_ip), 0);
+
+ // Check the cache is empty.
+ assert!(!cache.contains_outbound_peer_request(peer_ip));
+ }
}
diff --git a/node/router/src/inbound.rs b/node/router/src/inbound.rs
index ac1790f211..0b68800f9a 100644
--- a/node/router/src/inbound.rs
+++ b/node/router/src/inbound.rs
@@ -117,10 +117,16 @@ pub trait Inbound<N: Network>: Reading + Outbound<N> {
true => Ok(()),
false => bail!("Peer '{peer_ip}' sent an invalid peer request"),
},
- Message::PeerResponse(message) => match self.peer_response(peer_ip, &message.peers) {
- true => Ok(()),
- false => bail!("Peer '{peer_ip}' sent an invalid peer response"),
- },
+ Message::PeerResponse(message) => {
+ if !self.router().cache.contains_outbound_peer_request(peer_ip) {
+ bail!("Peer '{peer_ip}' is not following the protocol (unexpected peer response)")
+ }
+
+ match self.peer_response(peer_ip, &message.peers) {
+ true => Ok(()),
+ false => bail!("Peer '{peer_ip}' sent an invalid peer response"),
+ }
+ }
Message::Ping(message) => {
// Ensure the message protocol version is not outdated.
if message.version < Message::<N>::VERSION {
diff --git a/node/router/src/lib.rs b/node/router/src/lib.rs
index b51284190b..332cf07075 100644
--- a/node/router/src/lib.rs
+++ b/node/router/src/lib.rs
@@ -25,7 +25,6 @@ mod helpers;
pub use helpers::*;
mod handshake;
-pub use handshake::*;
mod heartbeat;
pub use heartbeat::*;
diff --git a/node/router/src/outbound.rs b/node/router/src/outbound.rs
index ab1a4d037a..20713962c9 100644
--- a/node/router/src/outbound.rs
+++ b/node/router/src/outbound.rs
@@ -59,6 +59,10 @@ pub trait Outbound<N: Network>: Writing<Message = Message<N>> {
if matches!(message, Message::PuzzleRequest(_)) {
self.router().cache.increment_outbound_puzzle_requests(peer_ip);
}
+ // If the message type is a peer request, increment the cache.
+ if matches!(message, Message::PeerRequest(_)) {
+ self.router().cache.increment_outbound_peer_requests(peer_ip);
+ }
// Retrieve the message name.
let name = message.name();
// Send the message to the peer.
|
diff --git a/node/tests/disconnect.rs b/node/tests/disconnect.rs
new file mode 100644
index 0000000000..5b6ec3b925
--- /dev/null
+++ b/node/tests/disconnect.rs
@@ -0,0 +1,191 @@
+// Copyright (C) 2019-2023 Aleo Systems Inc.
+// This file is part of the snarkOS library.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at:
+// http://www.apache.org/licenses/LICENSE-2.0
+
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#![recursion_limit = "256"]
+
+#[allow(dead_code)]
+mod common;
+use common::{node::*, test_peer::TestPeer};
+
+use snarkos_node_router::Outbound;
+use snarkos_node_tcp::P2P;
+
+use deadline::deadline;
+use std::time::Duration;
+
+// Macro to simply construct disconnect cases.
+// Syntax:
+// - (full_node |> test_peer): full node disconnects from the synthetic test peer.
+// - (full_node <| test_peer): synthetic test peer disconnects from the full node.
+//
+// Test naming: full_node::handshake_<node or peer>_side::test_peer.
+macro_rules! test_disconnect {
+ ($node_type:ident, $peer_type:ident, $node_disconnects:expr, $($attr:meta)?) => {
+ #[tokio::test]
+ $(#[$attr])?
+ async fn $peer_type() {
+ use deadline::deadline;
+ use pea2pea::Pea2Pea;
+ use snarkos_node_router::Outbound;
+ use snarkos_node_tcp::P2P;
+ use std::time::Duration;
+
+ // $crate::common::initialise_logger(2);
+
+ // Spin up a full node.
+ let node = $crate::$node_type().await;
+
+ // Spin up a test peer (synthetic node).
+ let peer = $crate::TestPeer::$peer_type().await;
+ let peer_addr = peer.node().listening_addr().unwrap();
+
+ // Connect the node to the test peer.
+ node.router().connect(peer_addr).unwrap().await.unwrap();
+
+ // Check the peer counts.
+ let node_clone = node.clone();
+ deadline!(Duration::from_secs(5), move || node_clone.router().number_of_connected_peers() == 1);
+ let node_clone = node.clone();
+ deadline!(Duration::from_secs(5), move || node_clone.tcp().num_connected() == 1);
+ let peer_clone = peer.clone();
+ deadline!(Duration::from_secs(5), move || peer_clone.node().num_connected() == 1);
+
+ // Disconnect.
+ if $node_disconnects {
+ node.router().disconnect(node.tcp().connected_addrs()[0]).await.unwrap();
+ } else {
+ peer.node().disconnect(peer.node().connected_addrs()[0]).await;
+ }
+
+ // Check the peer counts have been updated.
+ let node_clone = node.clone();
+ deadline!(Duration::from_secs(5), move || node_clone.router().number_of_connected_peers() == 0);
+ deadline!(Duration::from_secs(5), move || node.tcp().num_connected() == 0);
+ deadline!(Duration::from_secs(5), move || peer.node().num_connected() == 0);
+
+ }
+ };
+
+ // Node side disconnect.
+ ($($node_type:ident |> $peer_type:ident $(= $attr:meta)?),*) => {
+ mod disconnect_node_side {
+ $(
+ test_disconnect!($node_type, $peer_type, true, $($attr)?);
+ )*
+ }
+ };
+
+ // Peer side disconnect.
+ ($($node_type:ident <| $peer_type:ident $(= $attr:meta)?),*) => {
+ mod disconnect_peer_side {
+ $(
+ test_disconnect!($node_type, $peer_type, false, $($attr)?);
+ )*
+ }
+ };
+}
+
+mod client {
+ // Full node disconnects from synthetic peer.
+ test_disconnect! {
+ client |> client,
+ client |> validator,
+ client |> prover
+ }
+
+ // Synthetic peer disconnects from the full node.
+ test_disconnect! {
+ client <| client,
+ client <| validator,
+ client <| prover
+ }
+}
+
+mod prover {
+ // Full node disconnects from synthetic peer.
+ test_disconnect! {
+ prover |> client,
+ prover |> validator,
+ prover |> prover
+ }
+
+ // Synthetic peer disconnects from the full node.
+ test_disconnect! {
+ prover <| client,
+ prover <| validator,
+ prover <| prover
+ }
+}
+
+mod validator {
+ // Full node disconnects from synthetic peer.
+ test_disconnect! {
+ validator |> client,
+ validator |> validator,
+ validator |> prover
+ }
+
+ // Synthetic peer disconnects from the full node.
+ test_disconnect! {
+ validator <| client,
+ validator <| validator,
+ validator <| prover
+ }
+}
+
+#[tokio::test(flavor = "multi_thread")]
+async fn duplicate_disconnect_attempts() {
+ // common::initialise_logger(3);
+
+ // Spin up 2 full nodes.
+ let node1 = validator().await;
+ let node2 = validator().await;
+ let addr2 = node2.tcp().listening_addr().unwrap();
+
+ // Connect node1 to node2.
+ assert!(node1.router().connect(addr2).unwrap().await.unwrap());
+
+ // Prepare disconnect attempts.
+ let node1_clone = node1.clone();
+ let disconn1 = tokio::spawn(async move { node1_clone.router().disconnect(addr2).await.unwrap() });
+ let node1_clone = node1.clone();
+ let disconn2 = tokio::spawn(async move { node1_clone.router().disconnect(addr2).await.unwrap() });
+ let node1_clone = node1.clone();
+ let disconn3 = tokio::spawn(async move { node1_clone.router().disconnect(addr2).await.unwrap() });
+
+ // Attempt to disconnect the 1st node from the other one several times at once.
+ let (result1, result2, result3) = tokio::join!(disconn1, disconn2, disconn3);
+ // A small anti-flakiness buffer.
+
+ // Count the successes.
+ let mut successes = 0;
+ if result1.unwrap() {
+ successes += 1;
+ }
+ if result2.unwrap() {
+ successes += 1;
+ }
+ if result3.unwrap() {
+ successes += 1;
+ }
+
+ // There may only be a single success.
+ assert_eq!(successes, 1);
+
+ // Connection checks.
+ let node1_clone = node1.clone();
+ deadline!(Duration::from_secs(5), move || node1_clone.router().number_of_connected_peers() == 0);
+ let node2_clone = node2.clone();
+ deadline!(Duration::from_secs(5), move || node2_clone.router().number_of_connected_peers() == 0);
+}
diff --git a/node/tests/peering.rs b/node/tests/peering.rs
index 5b6ec3b925..1bae7f6ddc 100644
--- a/node/tests/peering.rs
+++ b/node/tests/peering.rs
@@ -12,180 +12,71 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#![recursion_limit = "256"]
-
#[allow(dead_code)]
mod common;
-use common::{node::*, test_peer::TestPeer};
+use common::test_peer::TestPeer;
-use snarkos_node_router::Outbound;
+use snarkos_node_router::{
+ messages::{Message, PeerResponse},
+ Outbound,
+};
use snarkos_node_tcp::P2P;
use deadline::deadline;
+use paste::paste;
+use pea2pea::{protocols::Writing, Pea2Pea};
use std::time::Duration;
-// Macro to simply construct disconnect cases.
-// Syntax:
-// - (full_node |> test_peer): full node disconnects from the synthetic test peer.
-// - (full_node <| test_peer): synthetic test peer disconnects from the full node.
-//
-// Test naming: full_node::handshake_<node or peer>_side::test_peer.
-macro_rules! test_disconnect {
- ($node_type:ident, $peer_type:ident, $node_disconnects:expr, $($attr:meta)?) => {
- #[tokio::test]
- $(#[$attr])?
- async fn $peer_type() {
- use deadline::deadline;
- use pea2pea::Pea2Pea;
- use snarkos_node_router::Outbound;
- use snarkos_node_tcp::P2P;
- use std::time::Duration;
-
- // $crate::common::initialise_logger(2);
-
- // Spin up a full node.
- let node = $crate::$node_type().await;
-
- // Spin up a test peer (synthetic node).
- let peer = $crate::TestPeer::$peer_type().await;
- let peer_addr = peer.node().listening_addr().unwrap();
-
- // Connect the node to the test peer.
- node.router().connect(peer_addr).unwrap().await.unwrap();
-
- // Check the peer counts.
- let node_clone = node.clone();
- deadline!(Duration::from_secs(5), move || node_clone.router().number_of_connected_peers() == 1);
- let node_clone = node.clone();
- deadline!(Duration::from_secs(5), move || node_clone.tcp().num_connected() == 1);
- let peer_clone = peer.clone();
- deadline!(Duration::from_secs(5), move || peer_clone.node().num_connected() == 1);
-
- // Disconnect.
- if $node_disconnects {
- node.router().disconnect(node.tcp().connected_addrs()[0]).await.unwrap();
- } else {
- peer.node().disconnect(peer.node().connected_addrs()[0]).await;
+macro_rules! test_reject_unsolicited_peer_response {
+ ($($node_type:ident),*) => {
+ $(
+ paste! {
+ #[tokio::test]
+ async fn [<$node_type _rejects_unsolicited_peer_response>]() {
+ // Spin up a full node.
+ let node = $crate::common::node::$node_type().await;
+
+ // Spin up a test peer (synthetic node), it doesn't really matter what type it is.
+ let peer = TestPeer::validator().await;
+ let peer_addr = peer.node().listening_addr().unwrap();
+
+ // Connect the node to the test peer.
+ node.router().connect(peer_addr).unwrap().await.unwrap();
+
+ // Check the peer counts.
+ let node_clone = node.clone();
+ deadline!(Duration::from_secs(5), move || node_clone.router().number_of_connected_peers() == 1);
+ let node_clone = node.clone();
+ deadline!(Duration::from_secs(5), move || node_clone.tcp().num_connected() == 1);
+ let peer_clone = peer.clone();
+ deadline!(Duration::from_secs(5), move || peer_clone.node().num_connected() == 1);
+
+ // Check the candidate peers.
+ assert_eq!(node.router().number_of_candidate_peers(), 0);
+
+ let peers = vec!["1.1.1.1:1111".parse().unwrap(), "2.2.2.2:2222".parse().unwrap()];
+
+ // Send a `PeerResponse` to the node.
+ assert!(
+ peer.unicast(
+ *peer.node().connected_addrs().first().unwrap(),
+ Message::PeerResponse(PeerResponse { peers: peers.clone() })
+ )
+ .is_ok()
+ );
+
+ // Wait for the peer to be disconnected for a protocol violation.
+ let node_clone = node.clone();
+ deadline!(Duration::from_secs(5), move || node_clone.router().number_of_connected_peers() == 0);
+
+ // Make sure the sent addresses weren't inserted in the candidate peers.
+ for peer in peers {
+ assert!(!node.router().candidate_peers().contains(&peer));
+ }
+ }
}
-
- // Check the peer counts have been updated.
- let node_clone = node.clone();
- deadline!(Duration::from_secs(5), move || node_clone.router().number_of_connected_peers() == 0);
- deadline!(Duration::from_secs(5), move || node.tcp().num_connected() == 0);
- deadline!(Duration::from_secs(5), move || peer.node().num_connected() == 0);
-
- }
- };
-
- // Node side disconnect.
- ($($node_type:ident |> $peer_type:ident $(= $attr:meta)?),*) => {
- mod disconnect_node_side {
- $(
- test_disconnect!($node_type, $peer_type, true, $($attr)?);
- )*
- }
+ )*
};
-
- // Peer side disconnect.
- ($($node_type:ident <| $peer_type:ident $(= $attr:meta)?),*) => {
- mod disconnect_peer_side {
- $(
- test_disconnect!($node_type, $peer_type, false, $($attr)?);
- )*
- }
- };
-}
-
-mod client {
- // Full node disconnects from synthetic peer.
- test_disconnect! {
- client |> client,
- client |> validator,
- client |> prover
- }
-
- // Synthetic peer disconnects from the full node.
- test_disconnect! {
- client <| client,
- client <| validator,
- client <| prover
- }
-}
-
-mod prover {
- // Full node disconnects from synthetic peer.
- test_disconnect! {
- prover |> client,
- prover |> validator,
- prover |> prover
- }
-
- // Synthetic peer disconnects from the full node.
- test_disconnect! {
- prover <| client,
- prover <| validator,
- prover <| prover
- }
}
-mod validator {
- // Full node disconnects from synthetic peer.
- test_disconnect! {
- validator |> client,
- validator |> validator,
- validator |> prover
- }
-
- // Synthetic peer disconnects from the full node.
- test_disconnect! {
- validator <| client,
- validator <| validator,
- validator <| prover
- }
-}
-
-#[tokio::test(flavor = "multi_thread")]
-async fn duplicate_disconnect_attempts() {
- // common::initialise_logger(3);
-
- // Spin up 2 full nodes.
- let node1 = validator().await;
- let node2 = validator().await;
- let addr2 = node2.tcp().listening_addr().unwrap();
-
- // Connect node1 to node2.
- assert!(node1.router().connect(addr2).unwrap().await.unwrap());
-
- // Prepare disconnect attempts.
- let node1_clone = node1.clone();
- let disconn1 = tokio::spawn(async move { node1_clone.router().disconnect(addr2).await.unwrap() });
- let node1_clone = node1.clone();
- let disconn2 = tokio::spawn(async move { node1_clone.router().disconnect(addr2).await.unwrap() });
- let node1_clone = node1.clone();
- let disconn3 = tokio::spawn(async move { node1_clone.router().disconnect(addr2).await.unwrap() });
-
- // Attempt to disconnect the 1st node from the other one several times at once.
- let (result1, result2, result3) = tokio::join!(disconn1, disconn2, disconn3);
- // A small anti-flakiness buffer.
-
- // Count the successes.
- let mut successes = 0;
- if result1.unwrap() {
- successes += 1;
- }
- if result2.unwrap() {
- successes += 1;
- }
- if result3.unwrap() {
- successes += 1;
- }
-
- // There may only be a single success.
- assert_eq!(successes, 1);
-
- // Connection checks.
- let node1_clone = node1.clone();
- deadline!(Duration::from_secs(5), move || node1_clone.router().number_of_connected_peers() == 0);
- let node2_clone = node2.clone();
- deadline!(Duration::from_secs(5), move || node2_clone.router().number_of_connected_peers() == 0);
-}
+test_reject_unsolicited_peer_response!(client, prover, validator);
|
[Bug] A malicious peer can directly send PeerResponse to other peers with high frequency and flood network with fake peer info
# https://hackerone.com/reports/2272999
## Summary:
The router does not check if the PeerResponse is a direct response of a previous PeerRequest https://github.com/AleoHQ/snarkOS/blob/testnet3/node/router/src/inbound.rs#L120
A malicious peer can directly send `PeerResponse` to other peers with high frequency. In this way, the other peers' `candidate_peers` will be full of malicious peer. Gradually, the good peer's connected peers are all malicious. Finally the good peers are disjoint with each oher and whole network is controlled by the malicious peers.
## Steps To Reproduce:
1. Clone https://github.com/AleoHQ/snarkOS and checkout `testnet3`
2. Add the following code at: node/router/src/heartbeat.rs `heartbeat` function
```
/// Handles the heartbeat request.
fn heartbeat(&self) {
self.safety_check_minimum_number_of_peers();
self.log_connected_peers();
//////////////////// ADD CODE HERE ////////////////////
// We can directly send PeerResponse to other peers (without previous PeerRequest). We can send PeerResponse with high frequency. Attacker can leverage this to attck the whole network.
for peer_ip in self.router().connected_peers().into_iter() {
let ip = "1.1.1.1:111";
let malicious_addr: SocketAddr = ip
.parse()
.expect("Unable to parse socket address");
self.send(peer_ip, Message::PeerResponse( PeerResponse { peers: vec![malicious_addr; 100] }));
}
/////////////////// ADD CODE HERE ////////////////////
// Remove any stale connected peers.
self.remove_stale_connected_peers();
// Remove the oldest connected peer.
....
```
3. add log at node/router/src/inbound.rs `peer_response` function
```
/// Handles a `PeerResponse` message.
fn peer_response(&self, _peer_ip: SocketAddr, peers: &[SocketAddr]) -> bool {
// Filter out invalid addresses.
//////////////////// ADD LOG HERE ////////////////////
warn!("received peer_response {:?}", peers);
//////////////////// ADD LOG HERE ////////////////////
let peers = peers.iter().copied().filter(|ip| self.router().is_valid_peer_ip(ip)).collect::<Vec<_>>();
// Adds the given peer IPs to the list of candidate peers.
self.router().insert_candidate_peers(&peers);
true
}
```
4. run ./devnet.sh
5. check log, we will find something like
```
WARN received peer_response [1.1.1.1:111, 1.1.1.1:111, 1.1.1.1:111, 1.1.1.1:111
```
6. Attack success: Though the good peer haven't made `PeerRequest`, it received malicious `PeerResponse`.
## Proof-of-Concept (PoC)
How this bug can be exploited:
1. The malicious peer starts sending a high frequency of PeerResponse messages directly to other peers. The PeerResponse messages contain information about the other peers which are all controlled by the hacker.
2. The other peers receive these PeerResponse messages and update their candidate_peers list with the information provided by the malicious peer.
3. Due to the high frequency of messages and the continuous updates, the candidate_peers list of the other peers becomes populated mainly or entirely with malicious peer entries.
4. As the good peers' candidate_peers list becomes filled with malicious peers, the chances of connecting to other good peers decrease significantly.
5. Over time, the good peers' connections are predominated by malicious peers, reducing the opportunities for good peers to communicate with each other.
6. Eventually, the good peers become disjointed from each other, as their connections are primarily with malicious peers.
7. With the network effectively controlled by malicious peers, they can manipulate communication, block transactions, tamper with data, or perform other malicious activities without detection or intervention from the good peers.
## Supporting Material/References:

## Impact
This bug have signigicant impact on all kinds of node: Prover, Validator and Client. The bug allows a malicious peer to flood the network with fake peer information, causing good peers to connect primarily with malicious peers. This gives the malicious peer control over the network and disrupts communication among the good peers. The malicious peers can tamper with transactions, manipulate data, and potentially launch further attacks. This undermines trust in the network and compromises its security and reliability.
## Fix Suggestions:
In the short term, verify every `PeerResponse ` is a real response of `PeerRequest`. In the middle or long term, consider formulating specification about the network layer. Also, consider using battle tested framework like `devp2p` and `libp2p`.
|
2023-12-07T20:12:09Z
|
2.2
|
|
AleoNet/snarkOS
| 2,221
|
AleoNet__snarkOS-2221
|
[
"2149"
] |
05e50dd0de11d06b93a31e979c78fb1d9942e181
|
diff --git a/node/messages/src/challenge_request.rs b/node/messages/src/challenge_request.rs
index eaf86a7b3f..35fa4497aa 100644
--- a/node/messages/src/challenge_request.rs
+++ b/node/messages/src/challenge_request.rs
@@ -48,3 +48,9 @@ impl<N: Network> MessageTrait for ChallengeRequest<N> {
Ok(Self { version, listener_port, node_type, address, nonce })
}
}
+
+impl<N: Network> ChallengeRequest<N> {
+ pub fn new(listener_port: u16, node_type: NodeType, address: Address<N>, nonce: u64) -> Self {
+ Self { version: Message::<N>::VERSION, listener_port, node_type, address, nonce }
+ }
+}
diff --git a/node/messages/src/helpers/codec.rs b/node/messages/src/helpers/codec.rs
index 83a6a9bd22..eedf3e26e5 100644
--- a/node/messages/src/helpers/codec.rs
+++ b/node/messages/src/helpers/codec.rs
@@ -21,6 +21,9 @@ use ::bytes::{BufMut, BytesMut};
use core::marker::PhantomData;
use tokio_util::codec::{Decoder, Encoder, LengthDelimitedCodec};
+/// The maximum size of a message that can be transmitted during the handshake.
+const MAXIMUM_HANDSHAKE_MESSAGE_SIZE: usize = 1024 * 1024; // 1 MiB
+
/// The maximum size of a message that can be transmitted in the network.
const MAXIMUM_MESSAGE_SIZE: usize = 128 * 1024 * 1024; // 128 MiB
@@ -30,10 +33,20 @@ pub struct MessageCodec<N: Network> {
_phantom: PhantomData<N>,
}
+impl<N: Network> MessageCodec<N> {
+ /// Increases the maximum permitted message size post-handshake.
+ pub fn update_max_message_len(&mut self) {
+ self.codec = LengthDelimitedCodec::builder().max_frame_length(MAXIMUM_MESSAGE_SIZE).little_endian().new_codec();
+ }
+}
+
impl<N: Network> Default for MessageCodec<N> {
fn default() -> Self {
Self {
- codec: LengthDelimitedCodec::builder().max_frame_length(MAXIMUM_MESSAGE_SIZE).little_endian().new_codec(),
+ codec: LengthDelimitedCodec::builder()
+ .max_frame_length(MAXIMUM_HANDSHAKE_MESSAGE_SIZE)
+ .little_endian()
+ .new_codec(),
_phantom: Default::default(),
}
}
diff --git a/node/router/src/handshake.rs b/node/router/src/handshake.rs
index fc473495b4..773fbb783a 100644
--- a/node/router/src/handshake.rs
+++ b/node/router/src/handshake.rs
@@ -43,8 +43,40 @@ impl<N: Network> P2P for Router<N> {
}
}
+/// A macro unwrapping the expected handshake message or returning an error for unexpected messages.
+#[macro_export]
+macro_rules! expect_message {
+ ($msg_ty:path, $framed:expr, $peer_addr:expr) => {
+ match $framed.try_next().await? {
+ // Received the expected message, proceed.
+ Some($msg_ty(data)) => {
+ trace!("Received '{}' from '{}'", data.name(), $peer_addr);
+ data
+ }
+ // Received a disconnect message, abort.
+ Some(Message::Disconnect(reason)) => {
+ return Err(error(format!("'{}' disconnected: {reason:?}", $peer_addr)))
+ }
+ // Received an unexpected message, abort.
+ _ => return Err(error(format!("'{}' did not follow the handshake protocol", $peer_addr))),
+ }
+ };
+}
+
+/// A macro for cutting a handshake short if message verification fails.
+#[macro_export]
+macro_rules! handle_verification {
+ ($result:expr, $framed:expr, $peer_addr:expr) => {
+ if let Some(reason) = $result {
+ trace!("Sending 'Disconnect' to '{}'", $peer_addr);
+ $framed.send(Message::Disconnect(Disconnect { reason: reason.clone() })).await?;
+ return Err(error(format!("Dropped '{}' for reason: {reason:?}", $peer_addr)));
+ }
+ };
+}
+
impl<N: Network> Router<N> {
- /// Performs the handshake protocol.
+ /// Executes the handshake protocol.
pub async fn handshake<'a>(
&'a self,
peer_addr: SocketAddr,
@@ -58,133 +90,162 @@ impl<N: Network> Router<N> {
debug!("Received a connection request from '{peer_addr}'");
None
} else {
+ debug!("Connecting to {peer_addr}...");
Some(peer_addr)
};
// Perform the handshake; we pass on a mutable reference to peer_ip in case the process is broken at any point in time.
- let handshake_result = self.handshake_inner(peer_addr, &mut peer_ip, stream, peer_side, genesis_header).await;
+ let mut handshake_result = if peer_side == ConnectionSide::Responder {
+ self.handshake_inner_initiator(peer_addr, &mut peer_ip, stream, genesis_header).await
+ } else {
+ self.handshake_inner_responder(peer_addr, &mut peer_ip, stream, genesis_header).await
+ };
// Remove the address from the collection of connecting peers (if the handshake got to the point where it's known).
if let Some(ip) = peer_ip {
self.connecting_peers.lock().remove(&ip);
}
+ // If the handshake succeeded, announce it and increase the message size limit.
+ if let Ok((ref peer_ip, ref mut framed)) = handshake_result {
+ info!("Connected to '{peer_ip}'");
+ framed.codec_mut().update_max_message_len();
+ }
+
handshake_result
}
- /// A helper that facilitates some extra error handling in `Router::handshake`.
- async fn handshake_inner<'a>(
+ /// The connection initiator side of the handshake.
+ async fn handshake_inner_initiator<'a>(
&'a self,
peer_addr: SocketAddr,
peer_ip: &mut Option<SocketAddr>,
stream: &'a mut TcpStream,
- peer_side: ConnectionSide,
genesis_header: Header<N>,
) -> io::Result<(SocketAddr, Framed<&mut TcpStream, MessageCodec<N>>)> {
// Construct the stream.
let mut framed = Framed::new(stream, MessageCodec::<N>::default());
+ // This value is immediately guaranteed to be present, so it can be unwrapped.
+ let peer_ip = peer_ip.unwrap();
+
/* Step 1: Send the challenge request. */
// Initialize an RNG.
let rng = &mut OsRng;
// Sample a random nonce.
- let nonce_a = rng.gen();
+ let our_nonce = rng.gen();
// Send a challenge request to the peer.
- let message_a = Message::<N>::ChallengeRequest(ChallengeRequest {
- version: Message::<N>::VERSION,
- listener_port: self.local_ip().port(),
- node_type: self.node_type,
- address: self.address(),
- nonce: nonce_a,
- });
- trace!("Sending '{}-A' to '{peer_addr}'", message_a.name());
- framed.send(message_a).await?;
-
- /* Step 2: Receive the challenge request. */
+ let our_request = ChallengeRequest::new(self.local_ip().port(), self.node_type, self.address(), our_nonce);
+ trace!("Sending '{}' to '{peer_addr}'", our_request.name());
+ framed.send(Message::ChallengeRequest(our_request)).await?;
+
+ /* Step 2: Receive the peer's challenge response followed by the challenge request. */
+
+ // Listen for the challenge response message.
+ let peer_response = expect_message!(Message::ChallengeResponse, framed, peer_addr);
// Listen for the challenge request message.
- let request_b = match framed.try_next().await? {
- // Received the challenge request message, proceed.
- Some(Message::ChallengeRequest(data)) => data,
- // Received a disconnect message, abort.
- Some(Message::Disconnect(reason)) => return Err(error(format!("'{peer_addr}' disconnected: {reason:?}"))),
- // Received an unexpected message, abort.
- _ => return Err(error(format!("'{peer_addr}' did not send a challenge request"))),
- };
- trace!("Received '{}-B' from '{peer_addr}'", request_b.name());
+ let peer_request = expect_message!(Message::ChallengeRequest, framed, peer_addr);
- // Obtain the peer's listening address if it's an inbound connection.
- if peer_ip.is_none() {
- *peer_ip = Some(SocketAddr::new(peer_addr.ip(), request_b.listener_port));
- }
+ // Verify the challenge response. If a disconnect reason was returned, send the disconnect message and abort.
+ handle_verification!(
+ self.verify_challenge_response(peer_addr, peer_request.address, peer_response, genesis_header, our_nonce)
+ .await,
+ framed,
+ peer_addr
+ );
+
+ // Verify the challenge request. If a disconnect reason was returned, send the disconnect message and abort.
+ handle_verification!(self.verify_challenge_request(peer_addr, &peer_request), framed, peer_addr);
+
+ /* Step 3: Send the challenge response. */
+
+ // Sign the counterparty nonce.
+ let our_signature = self
+ .account
+ .sign_bytes(&peer_request.nonce.to_le_bytes(), rng)
+ .map_err(|_| error(format!("Failed to sign the challenge request nonce from '{peer_addr}'")))?;
+
+ // Send the challenge response.
+ let our_response = ChallengeResponse { genesis_header, signature: Data::Object(our_signature) };
+ trace!("Sending '{}' to '{peer_addr}'", our_response.name());
+ framed.send(Message::ChallengeResponse(our_response)).await?;
+
+ // Add the peer to the router.
+ self.insert_connected_peer(Peer::new(peer_ip, &peer_request), peer_addr);
+
+ Ok((peer_ip, framed))
+ }
+
+ /// The connection responder side of the handshake.
+ async fn handshake_inner_responder<'a>(
+ &'a self,
+ peer_addr: SocketAddr,
+ peer_ip: &mut Option<SocketAddr>,
+ stream: &'a mut TcpStream,
+ genesis_header: Header<N>,
+ ) -> io::Result<(SocketAddr, Framed<&mut TcpStream, MessageCodec<N>>)> {
+ // Construct the stream.
+ let mut framed = Framed::new(stream, MessageCodec::<N>::default());
+
+ /* Step 1: Receive the challenge request. */
+
+ // Listen for the challenge request message.
+ let peer_request = expect_message!(Message::ChallengeRequest, framed, peer_addr);
- // This value is now guaranteed to be present, so it can be unwrapped.
+ // Obtain the peer's listening address.
+ *peer_ip = Some(SocketAddr::new(peer_addr.ip(), peer_request.listener_port));
let peer_ip = peer_ip.unwrap();
// Knowing the peer's listening address, ensure it is allowed to connect.
- if peer_side == ConnectionSide::Initiator {
- if let Err(forbidden_message) = self.ensure_peer_is_allowed(peer_ip) {
- return Err(error(format!("{forbidden_message}")));
- }
+ if let Err(forbidden_message) = self.ensure_peer_is_allowed(peer_ip) {
+ return Err(error(format!("{forbidden_message}")));
}
// Verify the challenge request. If a disconnect reason was returned, send the disconnect message and abort.
- if let Some(reason) = self.verify_challenge_request(peer_addr, &request_b) {
- trace!("Sending 'Disconnect' to '{peer_addr}'");
- framed.send(Message::Disconnect(Disconnect { reason: reason.clone() })).await?;
- return Err(error(format!("Dropped '{peer_addr}' for reason: {reason:?}")));
- }
+ handle_verification!(self.verify_challenge_request(peer_addr, &peer_request), framed, peer_addr);
- /* Step 3: Send the challenge response. */
+ /* Step 2: Send the challenge response followed by own challenge request. */
+
+ // Initialize an RNG.
+ let rng = &mut OsRng;
// Sign the counterparty nonce.
- let signature_b = self
+ let our_signature = self
.account
- .sign_bytes(&request_b.nonce.to_le_bytes(), rng)
+ .sign_bytes(&peer_request.nonce.to_le_bytes(), rng)
.map_err(|_| error(format!("Failed to sign the challenge request nonce from '{peer_addr}'")))?;
+ // Sample a random nonce.
+ let our_nonce = rng.gen();
+
// Send the challenge response.
- let message_b =
- Message::ChallengeResponse(ChallengeResponse { genesis_header, signature: Data::Object(signature_b) });
- trace!("Sending '{}-B' to '{peer_addr}'", message_b.name());
- framed.send(message_b).await?;
+ let our_response = ChallengeResponse { genesis_header, signature: Data::Object(our_signature) };
+ trace!("Sending '{}' to '{peer_addr}'", our_response.name());
+ framed.send(Message::ChallengeResponse(our_response)).await?;
+
+ // Send the challenge request.
+ let our_request = ChallengeRequest::new(self.local_ip().port(), self.node_type, self.address(), our_nonce);
+ trace!("Sending '{}' to '{peer_addr}'", our_request.name());
+ framed.send(Message::ChallengeRequest(our_request)).await?;
- /* Step 4: Receive the challenge response. */
+ /* Step 3: Receive the challenge response. */
// Listen for the challenge response message.
- let response_a = match framed.try_next().await? {
- // Received the challenge response message, proceed.
- Some(Message::ChallengeResponse(data)) => data,
- // Received a disconnect message, abort.
- Some(Message::Disconnect(reason)) => return Err(error(format!("'{peer_addr}' disconnected: {reason:?}"))),
- // Received an unexpected message, abort.
- _ => return Err(error(format!("'{peer_addr}' did not send a challenge response"))),
- };
- trace!("Received '{}-A' from '{peer_addr}'", response_a.name());
+ let peer_response = expect_message!(Message::ChallengeResponse, framed, peer_addr);
// Verify the challenge response. If a disconnect reason was returned, send the disconnect message and abort.
- if let Some(reason) =
- self.verify_challenge_response(peer_addr, request_b.address, response_a, genesis_header, nonce_a).await
- {
- trace!("Sending 'Disconnect' to '{peer_addr}'");
- framed.send(Message::Disconnect(Disconnect { reason: reason.clone() })).await?;
- return Err(error(format!("Dropped '{peer_addr}' for reason: {reason:?}")));
- }
-
- /* Step 5: Add the peer to the router. */
-
- // Prepare the peer.
- let peer_address = request_b.address;
- let peer_type = request_b.node_type;
- let peer_version = request_b.version;
-
- // Construct the peer.
- let peer = Peer::new(peer_ip, peer_address, peer_type, peer_version);
- // Insert the connected peer in the router.
- self.insert_connected_peer(peer, peer_addr);
- info!("Connected to '{peer_ip}'");
+ handle_verification!(
+ self.verify_challenge_response(peer_addr, peer_request.address, peer_response, genesis_header, our_nonce)
+ .await,
+ framed,
+ peer_addr
+ );
+
+ // Add the peer to the router.
+ self.insert_connected_peer(Peer::new(peer_ip, &peer_request), peer_addr);
Ok((peer_ip, framed))
}
diff --git a/node/router/src/helpers/peer.rs b/node/router/src/helpers/peer.rs
index f0d1d8e496..29016539cc 100644
--- a/node/router/src/helpers/peer.rs
+++ b/node/router/src/helpers/peer.rs
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
-use snarkos_node_messages::NodeType;
+use snarkos_node_messages::{ChallengeRequest, NodeType};
use snarkvm::prelude::{Address, Network};
use parking_lot::RwLock;
@@ -39,12 +39,12 @@ pub struct Peer<N: Network> {
impl<N: Network> Peer<N> {
/// Initializes a new instance of `Peer`.
- pub fn new(listening_ip: SocketAddr, address: Address<N>, node_type: NodeType, version: u32) -> Self {
+ pub fn new(listening_ip: SocketAddr, challenge_request: &ChallengeRequest<N>) -> Self {
Self {
peer_ip: listening_ip,
- address,
- node_type,
- version,
+ address: challenge_request.address,
+ node_type: challenge_request.node_type,
+ version: challenge_request.version,
first_seen: Instant::now(),
last_seen: Arc::new(RwLock::new(Instant::now())),
}
diff --git a/node/router/src/lib.rs b/node/router/src/lib.rs
index fe657336db..9473e0646c 100644
--- a/node/router/src/lib.rs
+++ b/node/router/src/lib.rs
@@ -145,7 +145,6 @@ impl<N: Network> Router<N> {
let router = self.clone();
tokio::spawn(async move {
// Attempt to connect to the candidate peer.
- debug!("Connecting to {peer_ip}...");
match router.tcp.connect(peer_ip).await {
// Remove the peer from the candidate peers.
Ok(()) => router.remove_candidate_peer(peer_ip),
|
diff --git a/node/router/tests/connect.rs b/node/router/tests/connect.rs
index f5db28463a..f3793dca0b 100644
--- a/node/router/tests/connect.rs
+++ b/node/router/tests/connect.rs
@@ -23,8 +23,6 @@ use core::time::Duration;
#[tokio::test]
async fn test_connect_without_handshake() {
- initialize_logger(3);
-
// Create 2 routers.
let node0 = validator(0, 2).await;
let node1 = client(0, 2).await;
@@ -81,8 +79,6 @@ async fn test_connect_without_handshake() {
#[tokio::test]
async fn test_connect_with_handshake() {
- initialize_logger(3);
-
// Create 2 routers.
let node0 = validator(0, 2).await;
let node1 = client(0, 2).await;
@@ -159,8 +155,6 @@ async fn test_connect_with_handshake() {
#[ignore]
#[tokio::test]
async fn test_connect_simultaneously_with_handshake() {
- initialize_logger(3);
-
// Create 2 routers.
let node0 = validator(0, 2).await;
let node1 = client(0, 2).await;
diff --git a/node/router/tests/disconnect.rs b/node/router/tests/disconnect.rs
index efc612c0e4..b7d07a6293 100644
--- a/node/router/tests/disconnect.rs
+++ b/node/router/tests/disconnect.rs
@@ -23,8 +23,6 @@ use core::time::Duration;
#[tokio::test]
async fn test_disconnect_without_handshake() {
- initialize_logger(3);
-
// Create 2 routers.
let node0 = validator(0, 1).await;
let node1 = client(0, 1).await;
@@ -67,8 +65,6 @@ async fn test_disconnect_without_handshake() {
#[tokio::test]
async fn test_disconnect_with_handshake() {
- initialize_logger(3);
-
// Create 2 routers.
let node0 = validator(0, 1).await;
let node1 = client(0, 1).await;
diff --git a/node/tests/common/test_peer.rs b/node/tests/common/test_peer.rs
index a7a24e56c5..183786b12b 100644
--- a/node/tests/common/test_peer.rs
+++ b/node/tests/common/test_peer.rs
@@ -15,7 +15,8 @@
// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
use snarkos_account::Account;
-use snarkos_node_messages::{ChallengeRequest, ChallengeResponse, Data, Message, MessageCodec, NodeType};
+use snarkos_node_messages::{ChallengeRequest, ChallengeResponse, Data, Message, MessageCodec, MessageTrait, NodeType};
+use snarkos_node_router::expect_message;
use snarkvm::prelude::{error, Address, Block, FromBytes, Network, TestRng, Testnet3 as CurrentNetwork};
use std::{
@@ -35,6 +36,7 @@ use pea2pea::{
};
use rand::Rng;
use tokio_util::codec::Framed;
+use tracing::*;
const ALEO_MAXIMUM_FORK_DEPTH: u32 = 4096;
@@ -119,47 +121,49 @@ impl Handshake for TestPeer {
let local_ip = self.node().listening_addr().expect("listening address should be present");
+ let peer_addr = conn.addr();
+ let node_side = !conn.side();
let stream = self.borrow_stream(&mut conn);
let mut framed = Framed::new(stream, MessageCodec::<CurrentNetwork>::default());
- // Send a challenge request to the peer.
- let message = Message::<CurrentNetwork>::ChallengeRequest(ChallengeRequest {
- version: Message::<CurrentNetwork>::VERSION,
- listener_port: local_ip.port(),
- node_type: self.node_type(),
- address: self.address(),
- nonce: rng.gen(),
- });
- framed.send(message).await?;
-
- // Listen for the challenge request.
- let request_b = match framed.try_next().await? {
- // Received the challenge request message, proceed.
- Some(Message::ChallengeRequest(data)) => data,
- // Received a disconnect message, abort.
- Some(Message::Disconnect(reason)) => return Err(error(format!("disconnected: {reason:?}"))),
- // Received an unexpected message, abort.
- _ => return Err(error("didn't send a challenge request")),
- };
-
- // TODO(nkls): add assertions on the contents.
-
- // Sign the nonce.
- let signature = self.account().sign_bytes(&request_b.nonce.to_le_bytes(), rng).unwrap();
-
// Retrieve the genesis block header.
let genesis_header = *sample_genesis_block().header();
- // Send the challenge response.
- let message =
- Message::ChallengeResponse(ChallengeResponse { genesis_header, signature: Data::Object(signature) });
- framed.send(message).await?;
-
- // Receive the challenge response.
- let Message::ChallengeResponse(challenge_response) = framed.try_next().await.unwrap().unwrap() else {
- panic!("didn't get challenge response")
- };
- assert_eq!(challenge_response.genesis_header, genesis_header);
+ // TODO(nkls): add assertions on the contents of messages.
+ match node_side {
+ ConnectionSide::Initiator => {
+ // Send a challenge request to the peer.
+ let our_request = ChallengeRequest::new(local_ip.port(), self.node_type(), self.address(), rng.gen());
+ framed.send(Message::ChallengeRequest(our_request)).await?;
+
+ // Receive the peer's challenge bundle.
+ let _peer_response = expect_message!(Message::ChallengeResponse, framed, peer_addr);
+ let peer_request = expect_message!(Message::ChallengeRequest, framed, peer_addr);
+
+ // Sign the nonce.
+ let signature = self.account().sign_bytes(&peer_request.nonce.to_le_bytes(), rng).unwrap();
+
+ // Send the challenge response.
+ let our_response = ChallengeResponse { genesis_header, signature: Data::Object(signature) };
+ framed.send(Message::ChallengeResponse(our_response)).await?;
+ }
+ ConnectionSide::Responder => {
+ // Listen for the challenge request.
+ let peer_request = expect_message!(Message::ChallengeRequest, framed, peer_addr);
+
+ // Sign the nonce.
+ let signature = self.account().sign_bytes(&peer_request.nonce.to_le_bytes(), rng).unwrap();
+
+ // Send our challenge bundle.
+ let our_response = ChallengeResponse { genesis_header, signature: Data::Object(signature) };
+ framed.send(Message::ChallengeResponse(our_response)).await?;
+ let our_request = ChallengeRequest::new(local_ip.port(), self.node_type(), self.address(), rng.gen());
+ framed.send(Message::ChallengeRequest(our_request)).await?;
+
+ // Listen for the challenge response.
+ let _peer_response = expect_message!(Message::ChallengeResponse, framed, peer_addr);
+ }
+ }
Ok(conn)
}
|
Unify the `Connecting to '{peer_ip}'...` message in `Router::handshake`
Unify the `Connecting to '{peer_ip}'...` message in `Router::handshake`.
_Originally posted by @howardwu in https://github.com/AleoHQ/snarkOS/pull/2108#pullrequestreview-1203846755_
|
2023-01-24T15:07:38Z
|
2.0
|
|
AleoNet/snarkOS
| 1,527
|
AleoNet__snarkOS-1527
|
[
"1522"
] |
9f0b304cb8407eec27077f212a86a79308528919
|
diff --git a/Cargo.lock b/Cargo.lock
index beb2ebf665..e6269726b3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -220,9 +220,9 @@ dependencies = [
[[package]]
name = "bumpalo"
-version = "3.8.0"
+version = "3.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c"
+checksum = "fe438c9d2f2b0fb88a112154ed81e30b0a491c29322afe1db3b6eec5811f5ba0"
[[package]]
name = "byteorder"
@@ -783,9 +783,9 @@ dependencies = [
[[package]]
name = "generic-array"
-version = "0.14.4"
+version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817"
+checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803"
dependencies = [
"typenum",
"version_check",
@@ -1983,7 +1983,7 @@ dependencies = [
[[package]]
name = "snarkvm"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"snarkvm-dpc",
"snarkvm-utilities",
@@ -1992,7 +1992,7 @@ dependencies = [
[[package]]
name = "snarkvm-algorithms"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"anyhow",
"blake2",
@@ -2021,7 +2021,7 @@ dependencies = [
[[package]]
name = "snarkvm-curves"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"derivative",
"rand",
@@ -2035,7 +2035,7 @@ dependencies = [
[[package]]
name = "snarkvm-derives"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"proc-macro-crate",
"proc-macro-error",
@@ -2047,7 +2047,7 @@ dependencies = [
[[package]]
name = "snarkvm-dpc"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"anyhow",
"base58",
@@ -2079,7 +2079,7 @@ dependencies = [
[[package]]
name = "snarkvm-fields"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"anyhow",
"derivative",
@@ -2092,7 +2092,7 @@ dependencies = [
[[package]]
name = "snarkvm-gadgets"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"anyhow",
"derivative",
@@ -2112,7 +2112,7 @@ dependencies = [
[[package]]
name = "snarkvm-marlin"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"bincode",
"blake2",
@@ -2138,7 +2138,7 @@ dependencies = [
[[package]]
name = "snarkvm-parameters"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"aleo-std",
"anyhow",
@@ -2155,7 +2155,7 @@ dependencies = [
[[package]]
name = "snarkvm-polycommit"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"derivative",
"digest 0.9.0",
@@ -2173,12 +2173,12 @@ dependencies = [
[[package]]
name = "snarkvm-profiler"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
[[package]]
name = "snarkvm-r1cs"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"anyhow",
"cfg-if",
@@ -2194,7 +2194,7 @@ dependencies = [
[[package]]
name = "snarkvm-utilities"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=ff10c20#ff10c20512bb7f7853bf8c3f308d951e78a02290"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=459ea96#459ea966476e73b3c5296bf34ef508e0e7bcf547"
dependencies = [
"anyhow",
"bincode",
diff --git a/Cargo.toml b/Cargo.toml
index e78e12b054..02387aacab 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -24,7 +24,7 @@ default = []
test = []
[dependencies]
-snarkvm = { git = "https://github.com/AleoHQ/snarkVM.git", rev = "ff10c20" }
+snarkvm = { git = "https://github.com/AleoHQ/snarkVM.git", rev = "459ea96" }
#snarkvm = { path = "../snarkVM" }
bytes = "1.0.0"
diff --git a/src/environment/mod.rs b/src/environment/mod.rs
index 621b350a7f..093ea7841e 100644
--- a/src/environment/mod.rs
+++ b/src/environment/mod.rs
@@ -19,8 +19,10 @@ use snarkvm::dpc::Network;
use once_cell::sync::OnceCell;
use std::{
+ collections::HashSet,
fmt::Debug,
marker::PhantomData,
+ net::SocketAddr,
sync::{atomic::AtomicBool, Arc},
};
@@ -40,9 +42,9 @@ pub trait Environment: 'static + Clone + Debug + Default + Send + Sync {
const DEFAULT_RPC_PORT: u16 = 3030 + Self::Network::NETWORK_ID;
/// The list of beacon nodes to bootstrap the node server with.
- const BEACON_NODES: [&'static str; 0] = [];
+ const BEACON_NODES: &'static [&'static str] = &[];
/// The list of sync nodes to bootstrap the node server with.
- const SYNC_NODES: [&'static str; 13] = ["127.0.0.1:4131", "127.0.0.1:4133", "127.0.0.1:4134", "127.0.0.1:4135", "127.0.0.1:4136", "127.0.0.1:4137", "127.0.0.1:4138", "127.0.0.1:4139", "127.0.0.1:4140", "127.0.0.1:4141", "127.0.0.1:4142", "127.0.0.1:4143", "127.0.0.1:4144"];
+ const SYNC_NODES: &'static [&'static str] = &["127.0.0.1:4135"];
/// The duration in seconds to sleep in between heartbeat executions.
const HEARTBEAT_IN_SECS: u64 = 9;
@@ -73,6 +75,18 @@ pub trait Environment: 'static + Clone + Debug + Default + Send + Sync {
/// The maximum number of failures tolerated before disconnecting from a peer.
const MAXIMUM_NUMBER_OF_FAILURES: usize = 1024;
+ /// Returns the list of beacon nodes to bootstrap the node server with.
+ fn beacon_nodes() -> &'static HashSet<SocketAddr> {
+ static NODES: OnceCell<HashSet<SocketAddr>> = OnceCell::new();
+ NODES.get_or_init(|| Self::BEACON_NODES.iter().map(|ip| ip.parse().unwrap()).collect())
+ }
+
+ /// Returns the list of sync nodes to bootstrap the node server with.
+ fn sync_nodes() -> &'static HashSet<SocketAddr> {
+ static NODES: OnceCell<HashSet<SocketAddr>> = OnceCell::new();
+ NODES.get_or_init(|| Self::SYNC_NODES.iter().map(|ip| ip.parse().unwrap()).collect())
+ }
+
/// Returns the tasks handler for the node.
fn tasks() -> &'static Tasks<tokio::task::JoinHandle<()>> {
static TASKS: OnceCell<Tasks<tokio::task::JoinHandle<()>>> = OnceCell::new();
@@ -135,7 +149,7 @@ impl<N: Network> Environment for Prover<N> {
type Network = N;
const NODE_TYPE: NodeType = NodeType::Prover;
const COINBASE_IS_PUBLIC: bool = true;
- const MINIMUM_NUMBER_OF_PEERS: usize = 1;
+ const MINIMUM_NUMBER_OF_PEERS: usize = 2;
const MAXIMUM_NUMBER_OF_PEERS: usize = 21;
}
@@ -158,7 +172,7 @@ pub struct ClientTrial<N: Network>(PhantomData<N>);
impl<N: Network> Environment for ClientTrial<N> {
type Network = N;
const NODE_TYPE: NodeType = NodeType::Client;
- const SYNC_NODES: [&'static str; 13] = [
+ const SYNC_NODES: &'static [&'static str] = &[
"144.126.219.193:4132", "165.232.145.194:4132", "143.198.164.241:4132", "188.166.7.13:4132", "167.99.40.226:4132",
"159.223.124.150:4132", "137.184.192.155:4132", "147.182.213.228:4132", "137.184.202.162:4132", "159.223.118.35:4132",
"161.35.106.91:4132", "157.245.133.62:4132", "143.198.166.150:4132",
@@ -174,7 +188,7 @@ pub struct MinerTrial<N: Network>(PhantomData<N>);
impl<N: Network> Environment for MinerTrial<N> {
type Network = N;
const NODE_TYPE: NodeType = NodeType::Miner;
- const SYNC_NODES: [&'static str; 13] = [
+ const SYNC_NODES: &'static [&'static str] = &[
"144.126.219.193:4132", "165.232.145.194:4132", "143.198.164.241:4132", "188.166.7.13:4132", "167.99.40.226:4132",
"159.223.124.150:4132", "137.184.192.155:4132", "147.182.213.228:4132", "137.184.202.162:4132", "159.223.118.35:4132",
"161.35.106.91:4132", "157.245.133.62:4132", "143.198.166.150:4132",
@@ -191,7 +205,7 @@ pub struct OperatorTrial<N: Network>(PhantomData<N>);
impl<N: Network> Environment for OperatorTrial<N> {
type Network = N;
const NODE_TYPE: NodeType = NodeType::Operator;
- const SYNC_NODES: [&'static str; 13] = [
+ const SYNC_NODES: &'static [&'static str] = &[
"144.126.219.193:4132", "165.232.145.194:4132", "143.198.164.241:4132", "188.166.7.13:4132", "167.99.40.226:4132",
"159.223.124.150:4132", "137.184.192.155:4132", "147.182.213.228:4132", "137.184.202.162:4132", "159.223.118.35:4132",
"161.35.106.91:4132", "157.245.133.62:4132", "143.198.166.150:4132",
@@ -208,7 +222,7 @@ pub struct ProverTrial<N: Network>(PhantomData<N>);
impl<N: Network> Environment for ProverTrial<N> {
type Network = N;
const NODE_TYPE: NodeType = NodeType::Prover;
- const SYNC_NODES: [&'static str; 13] = [
+ const SYNC_NODES: &'static [&'static str] = &[
"144.126.219.193:4132", "165.232.145.194:4132", "143.198.164.241:4132", "188.166.7.13:4132", "167.99.40.226:4132",
"159.223.124.150:4132", "137.184.192.155:4132", "147.182.213.228:4132", "137.184.202.162:4132", "159.223.118.35:4132",
"161.35.106.91:4132", "157.245.133.62:4132", "143.198.166.150:4132",
diff --git a/src/helpers/block_requests.rs b/src/helpers/block_requests.rs
index b9d3297b4a..4e1a954eb7 100644
--- a/src/helpers/block_requests.rs
+++ b/src/helpers/block_requests.rs
@@ -18,7 +18,7 @@ use crate::{network::ledger::PeersState, Environment};
use snarkos_storage::{BlockLocators, LedgerState};
use snarkvm::dpc::prelude::*;
-use std::{collections::HashSet, net::SocketAddr};
+use std::net::SocketAddr;
/// Checks if any of the peers are ahead and have a larger block height, if they are on a fork, and their block locators.
/// The maximum known block height and cumulative weight are tracked for the purposes of further operations.
@@ -27,8 +27,6 @@ pub fn find_maximal_peer<N: Network, E: Environment>(
maximum_block_height: &mut u32,
maximum_cumulative_weight: &mut u128,
) -> Option<(SocketAddr, bool, BlockLocators<N>)> {
- let sync_nodes: HashSet<SocketAddr> = E::SYNC_NODES.iter().map(|ip| ip.parse().unwrap()).collect();
-
// Determine if the peers state has any sync nodes.
// TODO: have nodes sync up to tip - 4096 with only sync nodes, then switch to syncing with the longest chain.
let peers_contains_sync_node = false;
@@ -40,7 +38,7 @@ pub fn find_maximal_peer<N: Network, E: Environment>(
for (peer_ip, peer_state) in peers_state.iter() {
// Only update the maximal peer if there are no sync nodes or the peer is a sync node.
- if !peers_contains_sync_node || sync_nodes.contains(peer_ip) {
+ if !peers_contains_sync_node || E::sync_nodes().contains(peer_ip) {
// Update the maximal peer state if the peer is ahead and the peer knows if you are a fork or not.
// This accounts for (Case 1 and Case 2(a))
if let Some((_, _, is_on_fork, block_height, block_locators)) = peer_state {
diff --git a/src/network/message.rs b/src/network/message.rs
index be4c8c801a..a1837cb96e 100644
--- a/src/network/message.rs
+++ b/src/network/message.rs
@@ -19,7 +19,7 @@ use crate::{
Environment,
};
use snarkos_storage::BlockLocators;
-use snarkvm::prelude::*;
+use snarkvm::{dpc::posw::PoSWProof, prelude::*};
use ::bytes::{Buf, BytesMut};
use anyhow::{anyhow, Result};
@@ -106,8 +106,8 @@ pub enum Message<N: Network, E: Environment> {
PoolRegister(Address<N>),
/// PoolRequest := (share_difficulty, block_template)
PoolRequest(u64, Data<BlockTemplate<N>>),
- /// PoolResponse := (address, block_header)
- PoolResponse(Address<N>, Data<BlockHeader<N>>),
+ /// PoolResponse := (address, nonce, proof)
+ PoolResponse(Address<N>, N::PoSWNonce, Data<PoSWProof<N>>),
/// Unused
#[allow(unused)]
Unused(PhantomData<E>),
@@ -197,7 +197,12 @@ impl<N: Network, E: Environment> Message<N, E> {
Self::PoolRequest(share_difficulty, block_template) => {
Ok([bincode::serialize(share_difficulty)?, block_template.serialize_blocking()?].concat())
}
- Self::PoolResponse(address, block) => Ok([bincode::serialize(address)?, block.serialize_blocking()?].concat()),
+ Self::PoolResponse(address, nonce, proof) => Ok([
+ bincode::serialize(address)?,
+ bincode::serialize(nonce)?,
+ proof.serialize_blocking()?,
+ ]
+ .concat()),
Self::Unused(_) => Ok(vec![]),
}
}
@@ -261,7 +266,11 @@ impl<N: Network, E: Environment> Message<N, E> {
10 => Self::UnconfirmedTransaction(bincode::deserialize(data)?),
11 => Self::PoolRegister(bincode::deserialize(data)?),
12 => Self::PoolRequest(bincode::deserialize(&data[0..8])?, Data::Buffer(data[8..].to_vec())),
- 13 => Self::PoolResponse(bincode::deserialize(&data[0..32])?, Data::Buffer(data[32..].to_vec())),
+ 13 => Self::PoolResponse(
+ bincode::deserialize(&data[0..32])?,
+ bincode::deserialize(&data[32..64])?,
+ Data::Buffer(data[64..].to_vec()),
+ ),
_ => return Err(anyhow!("Invalid message ID {}", id)),
};
diff --git a/src/network/operator.rs b/src/network/operator.rs
index 0a4f7d4444..568081c362 100644
--- a/src/network/operator.rs
+++ b/src/network/operator.rs
@@ -27,7 +27,7 @@ use crate::{
ProverRouter,
};
use snarkos_storage::{storage::Storage, OperatorState};
-use snarkvm::dpc::prelude::*;
+use snarkvm::dpc::{prelude::*, PoSWProof};
use anyhow::Result;
use rand::thread_rng;
@@ -55,10 +55,10 @@ type OperatorHandler<N> = mpsc::Receiver<OperatorRequest<N>>;
///
#[derive(Debug)]
pub enum OperatorRequest<N: Network> {
- /// PoolRegister := (peer_ip, worker_address)
+ /// PoolRegister := (peer_ip, prover_address)
PoolRegister(SocketAddr, Address<N>),
- /// PoolResponse := (peer_ip, proposed_block_header, worker_address)
- PoolResponse(SocketAddr, BlockHeader<N>, Address<N>),
+ /// PoolResponse := (peer_ip, prover_address, nonce, proof)
+ PoolResponse(SocketAddr, Address<N>, N::PoSWNonce, PoSWProof<N>),
}
/// The predefined base share difficulty.
@@ -263,42 +263,17 @@ impl<N: Network, E: Environment> Operator<N, E> {
warn!("[PoolRegister] No current block template exists");
}
}
- OperatorRequest::PoolResponse(peer_ip, block_header, prover) => {
+ OperatorRequest::PoolResponse(peer_ip, prover, nonce, proof) => {
if let Some(block_template) = self.block_template.read().await.clone() {
- // Ensure the given block header corresponds to the correct block height.
- if block_template.block_height() != block_header.height() {
- warn!("[PoolResponse] Peer {} sent a stale block.", peer_ip);
- return;
- }
- // Ensure the timestamp in the block template matches in the block header.
- if block_template.block_timestamp() != block_header.timestamp() {
- warn!("[PoolResponse] Peer {} sent a block with an incorrect timestamp.", peer_ip);
- return;
- }
- // Ensure the difficulty target in the block template matches in the block header.
- if block_template.difficulty_target() != block_header.difficulty_target() {
- warn!("[PoolResponse] Peer {} sent a block with an incorrect difficulty target.", peer_ip);
- return;
- }
- // Ensure the previous ledger root in the block template matches in the block header.
- if block_template.previous_ledger_root() != block_header.previous_ledger_root() {
- warn!("[PoolResponse] Peer {} sent a block with an incorrect ledger root.", peer_ip);
- return;
- }
- // Ensure the transactions root in the block header matches the one from the block template.
- if block_template.transactions().transactions_root() != block_header.transactions_root() {
- warn!("[PoolResponse] Peer {} has changed the list of block transactions.", peer_ip);
- return;
- }
// Ensure the given nonce from the prover is new.
- if self.known_nonces.read().await.contains(&block_header.nonce()) {
+ if self.known_nonces.read().await.contains(&nonce) {
warn!("[PoolResponse] Peer {} sent a duplicate share", peer_ip);
// TODO (julesdesmit): punish?
return;
}
// Update known nonces.
- self.known_nonces.write().await.insert(block_header.nonce());
+ self.known_nonces.write().await.insert(nonce);
// Retrieve the share difficulty for the given prover.
let share_difficulty = {
@@ -313,12 +288,12 @@ impl<N: Network, E: Environment> Operator<N, E> {
};
// Ensure the share difficulty target is met, and the PoSW proof is valid.
- let block_height = block_header.height();
+ let block_height = block_template.block_height();
if !N::posw().verify(
block_height,
share_difficulty,
- &[*block_header.to_header_root().unwrap(), *block_header.nonce()],
- block_header.proof(),
+ &[*block_template.to_header_root().unwrap(), *nonce],
+ &proof,
) {
warn!("[PoolResponse] PoSW proof verification failed");
return;
@@ -336,8 +311,8 @@ impl<N: Network, E: Environment> Operator<N, E> {
let coinbase_record = block_template.coinbase_record().clone();
match self.state.increment_share(block_height, coinbase_record, &prover) {
Ok(..) => info!(
- "Operator received a valid share from {} ({}) for block {}",
- peer_ip, prover, block_height,
+ "Operator has received a valid share from {} ({}) for block {}",
+ prover, peer_ip, block_height,
),
Err(error) => error!("{}", error),
}
@@ -345,11 +320,19 @@ impl<N: Network, E: Environment> Operator<N, E> {
// If the block has satisfactory difficulty and is valid, proceed to broadcast it.
let previous_block_hash = block_template.previous_block_hash();
let transactions = block_template.transactions().clone();
- if let Ok(block) = Block::from(previous_block_hash, block_header, transactions) {
- info!("Operator has found unconfirmed block {} ({})", block.height(), block.hash());
- let request = LedgerRequest::UnconfirmedBlock(self.local_ip, block, self.prover_router.clone());
- if let Err(error) = self.ledger_router.send(request).await {
- warn!("Failed to broadcast mined block - {}", error);
+ if let Ok(block_header) = BlockHeader::<N>::from(
+ block_template.previous_ledger_root(),
+ block_template.transactions().transactions_root(),
+ BlockHeaderMetadata::new(&block_template),
+ nonce,
+ proof,
+ ) {
+ if let Ok(block) = Block::from(previous_block_hash, block_header, transactions) {
+ info!("Operator has found unconfirmed block {} ({})", block.height(), block.hash());
+ let request = LedgerRequest::UnconfirmedBlock(self.local_ip, block, self.prover_router.clone());
+ if let Err(error) = self.ledger_router.send(request).await {
+ warn!("Failed to broadcast mined block - {}", error);
+ }
}
}
} else {
diff --git a/src/network/peer.rs b/src/network/peer.rs
index 140496c278..86f0c645ad 100644
--- a/src/network/peer.rs
+++ b/src/network/peer.rs
@@ -706,15 +706,15 @@ impl<N: Network, E: Environment> Peer<N, E> {
warn!("[PoolRequest] could not deserialize block template");
}
}
- Message::PoolResponse(address, block_header) => {
+ Message::PoolResponse(address, nonce, proof) => {
if E::NODE_TYPE != NodeType::Operator {
trace!("Skipping 'PoolResponse' from {}", peer_ip);
- } else if let Ok(block_header) = block_header.deserialize().await {
- if let Err(error) = operator_router.send(OperatorRequest::PoolResponse(peer_ip, block_header, address)).await {
+ } else if let Ok(proof) = proof.deserialize().await {
+ if let Err(error) = operator_router.send(OperatorRequest::PoolResponse(peer_ip, address, nonce, proof)).await {
warn!("[PoolResponse] {}", error);
}
} else {
- warn!("[PoolResponse] could not deserialize block");
+ warn!("[PoolResponse] could not deserialize proof");
}
}
Message::Unused(_) => break, // Peer is not following the protocol.
diff --git a/src/network/peers.rs b/src/network/peers.rs
index c4cc57c05a..f4beb24f46 100644
--- a/src/network/peers.rs
+++ b/src/network/peers.rs
@@ -196,8 +196,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
///
pub async fn connected_sync_nodes(&self) -> HashSet<SocketAddr> {
let connected_peers: HashSet<SocketAddr> = self.connected_peers.read().await.keys().into_iter().copied().collect();
- let sync_nodes: HashSet<SocketAddr> = E::SYNC_NODES.iter().map(|ip| ip.parse().unwrap()).collect();
- connected_peers.intersection(&sync_nodes).copied().collect()
+ connected_peers.intersection(E::sync_nodes()).copied().collect()
}
///
@@ -206,8 +205,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
///
pub async fn number_of_connected_sync_nodes(&self) -> usize {
let connected_peers: HashSet<SocketAddr> = self.connected_peers.read().await.keys().into_iter().copied().collect();
- let sync_nodes: HashSet<SocketAddr> = E::SYNC_NODES.iter().map(|ip| ip.parse().unwrap()).collect();
- connected_peers.intersection(&sync_nodes).count()
+ connected_peers.intersection(E::sync_nodes()).count()
}
///
@@ -324,10 +322,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
.read()
.await
.iter()
- .filter(|(&peer_ip, _)| {
- let peer_str = peer_ip.to_string();
- !E::SYNC_NODES.contains(&peer_str.as_str()) && !E::BEACON_NODES.contains(&peer_str.as_str())
- })
+ .filter(|(peer_ip, _)| !E::sync_nodes().contains(peer_ip) && !E::beacon_nodes().contains(peer_ip))
.take(num_excess_peers)
.map(|(&peer_ip, _)| peer_ip)
.collect::<Vec<SocketAddr>>();
@@ -347,6 +342,8 @@ impl<N: Network, E: Environment> Peers<N, E> {
let number_of_connected_sync_nodes = connected_sync_nodes.len();
let num_excess_sync_nodes = number_of_connected_sync_nodes.saturating_sub(1);
if num_excess_sync_nodes > 0 {
+ debug!("Exceeded maximum number of sync nodes");
+
// Proceed to send disconnect requests to these peers.
for peer_ip in connected_sync_nodes
.iter()
@@ -373,14 +370,12 @@ impl<N: Network, E: Environment> Peers<N, E> {
};
// Add the sync nodes to the list of candidate peers.
- let sync_nodes: Vec<SocketAddr> = E::SYNC_NODES.iter().map(|ip| ip.parse().unwrap()).collect();
if number_of_connected_sync_nodes == 0 {
- self.add_candidate_peers(&sync_nodes).await;
+ self.add_candidate_peers(E::sync_nodes().iter()).await;
}
// Add the beacon nodes to the list of candidate peers.
- let beacon_nodes: Vec<SocketAddr> = E::BEACON_NODES.iter().map(|ip| ip.parse().unwrap()).collect();
- self.add_candidate_peers(&beacon_nodes).await;
+ self.add_candidate_peers(E::beacon_nodes().iter()).await;
// Attempt to connect to more peers if the number of connected peers is below the minimum threshold.
// Select the peers randomly from the list of candidate peers.
@@ -393,7 +388,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
.choose_multiple(&mut OsRng::default(), midpoint_number_of_peers)
{
// Ensure this node is not connected to more than the permitted number of sync nodes.
- if sync_nodes.contains(&peer_ip) && number_of_connected_sync_nodes >= 1 {
+ if E::sync_nodes().contains(&peer_ip) && number_of_connected_sync_nodes >= 1 {
continue;
}
@@ -528,7 +523,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
self.send(recipient, Message::PeerResponse(connected_peers)).await;
}
PeersRequest::ReceivePeerResponse(peer_ips) => {
- self.add_candidate_peers(&peer_ips).await;
+ self.add_candidate_peers(peer_ips.iter()).await;
}
}
}
@@ -539,19 +534,17 @@ impl<N: Network, E: Environment> Peers<N, E> {
/// This method skips adding any given peers if the combined size exceeds the threshold,
/// as the peer providing this list could be subverting the protocol.
///
- async fn add_candidate_peers(&self, peers: &[SocketAddr]) {
+ async fn add_candidate_peers<'a, T: ExactSizeIterator<Item = &'a SocketAddr> + IntoIterator>(&self, peers: T) {
// Acquire the candidate peers write lock.
let mut candidate_peers = self.candidate_peers.write().await;
// Ensure the combined number of peers does not surpass the threshold.
- if candidate_peers.len() + peers.len() < E::MAXIMUM_CANDIDATE_PEERS {
- // Proceed to insert each new candidate peer IP.
- for peer_ip in peers.iter().take(E::MAXIMUM_CANDIDATE_PEERS) {
- // Ensure the peer is not self and is a new candidate peer.
- let is_self = *peer_ip == self.local_ip
- || (peer_ip.ip().is_unspecified() || peer_ip.ip().is_loopback()) && peer_ip.port() == self.local_ip.port();
- if !is_self && !self.is_connected_to(*peer_ip).await {
- candidate_peers.insert(*peer_ip);
- }
+ for peer_ip in peers.take(E::MAXIMUM_CANDIDATE_PEERS.saturating_sub(candidate_peers.len())) {
+ // Ensure the peer is not self and is a new candidate peer.
+ let is_self = *peer_ip == self.local_ip
+ || (peer_ip.ip().is_unspecified() || peer_ip.ip().is_loopback()) && peer_ip.port() == self.local_ip.port();
+ if !is_self && !self.is_connected_to(*peer_ip).await {
+ // Proceed to insert each new candidate peer IP.
+ candidate_peers.insert(*peer_ip);
}
}
}
@@ -587,10 +580,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
.connected_peers()
.await
.iter()
- .filter(|peer_ip| {
- let peer_str = peer_ip.to_string();
- *peer_ip != &sender && !E::SYNC_NODES.contains(&peer_str.as_str()) && !E::BEACON_NODES.contains(&peer_str.as_str())
- })
+ .filter(|peer_ip| *peer_ip != &sender && !E::sync_nodes().contains(peer_ip) && !E::beacon_nodes().contains(peer_ip))
.copied()
.collect::<Vec<_>>()
{
diff --git a/src/network/prover.rs b/src/network/prover.rs
index 24e746f957..b0dc51994b 100644
--- a/src/network/prover.rs
+++ b/src/network/prover.rs
@@ -26,7 +26,7 @@ use crate::{
PeersRouter,
};
use snarkos_storage::{storage::Storage, ProverState};
-use snarkvm::dpc::prelude::*;
+use snarkvm::dpc::{posw::PoSWProof, prelude::*};
use anyhow::{anyhow, Result};
use rand::thread_rng;
@@ -245,6 +245,7 @@ impl<N: Network, E: Environment> Prover<N, E> {
E::status().update(State::Mining);
let thread_pool = self.thread_pool.clone();
+ let block_height = block_template.block_height();
let block_template = block_template.clone();
let result = task::spawn_blocking(move || {
@@ -260,8 +261,11 @@ impl<N: Network, E: Environment> Prover<N, E> {
&[*block_header.to_header_root().unwrap(), *block_header.nonce()],
block_header.proof(),
) {
- let proof_difficulty = block_header.proof().to_proof_difficulty()?;
- return Ok::<(BlockHeader<N>, u64), anyhow::Error>((block_header, proof_difficulty));
+ return Ok::<(N::PoSWNonce, PoSWProof<N>, u64), anyhow::Error>((
+ block_header.nonce(),
+ block_header.proof().clone(),
+ block_header.proof().to_proof_difficulty()?,
+ ));
}
}
})
@@ -271,21 +275,20 @@ impl<N: Network, E: Environment> Prover<N, E> {
E::status().update(State::Ready);
match result {
- Ok(Ok((block_header, proof_difficulty))) => {
+ Ok(Ok((nonce, proof, proof_difficulty))) => {
info!(
"Prover successfully mined a share for unconfirmed block {} with proof difficulty of {}",
- block_header.height(),
- proof_difficulty
+ block_height, proof_difficulty
);
// Send a `PoolResponse` to the operator.
- let message = Message::PoolResponse(recipient, Data::Object(block_header));
+ let message = Message::PoolResponse(recipient, nonce, Data::Object(proof));
if let Err(error) = self.peers_router.send(PeersRequest::MessageSend(operator_ip, message)).await {
warn!("[PoolResponse] {}", error);
}
}
Ok(Err(error)) => trace!("{}", error),
- Err(error) => trace!("{}", anyhow!("Could not mine next block {}", error)),
+ Err(error) => trace!("{}", anyhow!("Failed to mine the next block {}", error)),
}
}
}
diff --git a/storage/Cargo.toml b/storage/Cargo.toml
index 1859c378e2..06fc6f2ad9 100644
--- a/storage/Cargo.toml
+++ b/storage/Cargo.toml
@@ -17,7 +17,7 @@ license = "GPL-3.0"
edition = "2018"
[dependencies]
-snarkvm = { git = "https://github.com/AleoHQ/snarkVM.git", rev = "ff10c20" }
+snarkvm = { git = "https://github.com/AleoHQ/snarkVM.git", rev = "459ea96" }
#snarkvm = { path = "../../snarkVM" }
[dependencies.anyhow]
|
diff --git a/testing/Cargo.toml b/testing/Cargo.toml
index 300bb826b9..313469f9d5 100644
--- a/testing/Cargo.toml
+++ b/testing/Cargo.toml
@@ -25,7 +25,7 @@ path = "../storage"
[dependencies.snarkvm]
git = "https://github.com/AleoHQ/snarkVM.git"
-rev = "ff10c20"
+rev = "459ea96"
#path = "../../snarkVM"
[dependencies.anyhow]
|
[Bug] Pool operator reports "could not deserialize block"
There are still issues:
The operator reports `could not deserialize block` on `PoolResponse`.
It looks like when deserializing `BlockHeader`, there is another `is_valid` check (`snarkVM/dpc/src/block/header.rs`, line 375 -> 143 -> 230) so the operator will be unable to deserialize the header. We might need a `from_unchecked` for `BlockHeader` as well.
_Originally posted by @HarukaMa in https://github.com/AleoHQ/snarkOS/issues/1492#issuecomment-1000500810_
I guess this comment was ignored and I was using a modified snarkVM so I didn't think about this again.
The line numbers might be out of date but it should still be the deserialization from bytes process.
|
network/peers.rs
I find this code much improved, so one has actually an idea what's going on if de-serialization fails.
Message::PoolRequest(share_difficulty, block_template) => {
if E::NODE_TYPE != NodeType::Prover {
trace!("Skipping 'PoolRequest' from {}", peer_ip);
} else {
match block_template.deserialize().await {
Ok(block_template) => {
if let Err(error) = prover_router.send(ProverRequest::PoolRequest(peer_ip, share_difficulty, block_template)).await {
warn!("[PoolRequest] {}", error);
}
}
Err(error) => {
warn!("[PoolRequest] could not deserialize block template from {}: {}", peer_ip, error);
}
}
}
}
Message::PoolResponse(address, block_header) => {
if E::NODE_TYPE != NodeType::Operator {
trace!("Skipping 'PoolResponse' from {}", peer_ip);
} else {
match block_header.deserialize().await {
Ok(block_header) => {
if let Err(error) = operator_router.send(OperatorRequest::PoolResponse(peer_ip, block_header, address)).await {
warn!("[PoolResponse] {}", error);
}
}
Err(error) => {
warn!("[PoolResponse] could not deserialize block from {}: {}", peer_ip, error);
}
}
}
}
That said, the code now reports as the issue
`2022-01-04T16:06:38.772476Z WARN [PoolResponse] could not deserialize block from 192.168.1.114:4006: Invalid block header`
I find it odd, to report an issue on de-serialization, when in fact the de-serialization worked, but a downstream check is_valid(), returned false.
Can confirm that we have same warning.
|
2022-01-06T02:55:34Z
|
2.0
|
AleoNet/snarkOS
| 1,427
|
AleoNet__snarkOS-1427
|
[
"1492"
] |
11679d4e08efa53660eca852a8e7de3275f5f73f
|
diff --git a/Cargo.lock b/Cargo.lock
index 3148b2de22..0e6d9d84ab 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -1983,7 +1983,7 @@ dependencies = [
[[package]]
name = "snarkvm"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"snarkvm-dpc",
"snarkvm-utilities",
@@ -1992,7 +1992,7 @@ dependencies = [
[[package]]
name = "snarkvm-algorithms"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"anyhow",
"blake2",
@@ -2021,7 +2021,7 @@ dependencies = [
[[package]]
name = "snarkvm-curves"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"derivative",
"rand",
@@ -2035,7 +2035,7 @@ dependencies = [
[[package]]
name = "snarkvm-derives"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"proc-macro-crate",
"proc-macro-error",
@@ -2047,7 +2047,7 @@ dependencies = [
[[package]]
name = "snarkvm-dpc"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"anyhow",
"base58",
@@ -2079,7 +2079,7 @@ dependencies = [
[[package]]
name = "snarkvm-fields"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"anyhow",
"derivative",
@@ -2092,7 +2092,7 @@ dependencies = [
[[package]]
name = "snarkvm-gadgets"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"anyhow",
"derivative",
@@ -2112,7 +2112,7 @@ dependencies = [
[[package]]
name = "snarkvm-marlin"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"bincode",
"blake2",
@@ -2138,7 +2138,7 @@ dependencies = [
[[package]]
name = "snarkvm-parameters"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"aleo-std",
"anyhow",
@@ -2155,7 +2155,7 @@ dependencies = [
[[package]]
name = "snarkvm-polycommit"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"derivative",
"digest 0.9.0",
@@ -2173,12 +2173,12 @@ dependencies = [
[[package]]
name = "snarkvm-profiler"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
[[package]]
name = "snarkvm-r1cs"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"anyhow",
"cfg-if",
@@ -2194,7 +2194,7 @@ dependencies = [
[[package]]
name = "snarkvm-utilities"
version = "0.7.5"
-source = "git+https://github.com/AleoHQ/snarkVM.git?rev=02c814c#02c814c34c8f9f8b46401e447b6799e01b19ede1"
+source = "git+https://github.com/AleoHQ/snarkVM.git?rev=6bafa31#6bafa3134279ee2239a1755c4d328abe69e640dd"
dependencies = [
"anyhow",
"bincode",
diff --git a/Cargo.toml b/Cargo.toml
index b8d2dad65d..feff73a552 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -24,11 +24,11 @@ default = []
test = []
[dependencies]
-snarkvm = { git = "https://github.com/AleoHQ/snarkVM.git", rev = "02c814c" }
+snarkvm = { git = "https://github.com/AleoHQ/snarkVM.git", rev = "b70bf72" }
#snarkvm = { path = "../snarkVM" }
bytes = "1.0.0"
-futures = { version = "0.3.19", features = ["thread-pool"]}
+futures = { version = "0.3.19", features = ["thread-pool"] }
crossterm = { version = "0.22" }
tui = { version = "0.16.0", default-features = false, features = ["crossterm"] }
diff --git a/README.md b/README.md
index 72cb206c25..275f103bf2 100644
--- a/README.md
+++ b/README.md
@@ -160,6 +160,9 @@ OPTIONS:
--miner <miner> Specify this as a mining node, with the given miner address
--network <network> Specify the network of this node [default: 2]
--node <node> Specify the IP address and port for the node server [default: 0.0.0.0:4132]
+ --operator <operator> Specify this as an operating node, with the given operator address
+ --pool <pool> Specify the pool that a prover node is contributing to
+ --prover <prover> Specify this as a prover node, with the given prover address
--rpc <rpc> Specify the IP address and port for the RPC server [default: 0.0.0.0:3032]
--password <rpc-password> Specify the password for the RPC server [default: pass]
--username <rpc-username> Specify the username for the RPC server [default: root]
diff --git a/src/environment/mod.rs b/src/environment/mod.rs
index 925dbba9a2..8c092b786e 100644
--- a/src/environment/mod.rs
+++ b/src/environment/mod.rs
@@ -33,6 +33,23 @@ pub enum NodeType {
Beacon,
/// A sync node is a discovery node, capable of syncing nodes for the network.
Sync,
+ /// An operating node is a full node, capable of coordinating provers in a pool.
+ Operator,
+ /// A proving node is a full node, capable of producing proofs for a pool.
+ Prover,
+}
+
+impl NodeType {
+ pub fn description(&self) -> &str {
+ match self {
+ Self::Client => "a client node",
+ Self::Miner => "a mining node",
+ Self::Beacon => "a beacon node",
+ Self::Sync => "a sync node",
+ Self::Operator => "an operating node",
+ Self::Prover => "a proving node",
+ }
+ }
}
impl fmt::Display for NodeType {
@@ -114,6 +131,30 @@ impl<N: Network> Environment for Miner<N> {
const MAXIMUM_NUMBER_OF_PEERS: usize = 21;
}
+#[derive(Clone, Debug, Default)]
+pub struct Operator<N: Network>(PhantomData<N>);
+
+#[rustfmt::skip]
+impl<N: Network> Environment for Operator<N> {
+ type Network = N;
+ const NODE_TYPE: NodeType = NodeType::Operator;
+ const COINBASE_IS_PUBLIC: bool = true;
+ const MINIMUM_NUMBER_OF_PEERS: usize = 1;
+ const MAXIMUM_NUMBER_OF_PEERS: usize = 1000;
+}
+
+#[derive(Clone, Debug, Default)]
+pub struct Prover<N: Network>(PhantomData<N>);
+
+#[rustfmt::skip]
+impl<N: Network> Environment for Prover<N> {
+ type Network = N;
+ const NODE_TYPE: NodeType = NodeType::Prover;
+ const COINBASE_IS_PUBLIC: bool = true;
+ const MINIMUM_NUMBER_OF_PEERS: usize = 1;
+ const MAXIMUM_NUMBER_OF_PEERS: usize = 21;
+}
+
#[derive(Clone, Debug, Default)]
pub struct SyncNode<N: Network>(PhantomData<N>);
@@ -158,3 +199,37 @@ impl<N: Network> Environment for MinerTrial<N> {
const MAXIMUM_NUMBER_OF_PEERS: usize = 21;
const COINBASE_IS_PUBLIC: bool = true;
}
+
+#[derive(Clone, Debug, Default)]
+pub struct OperatorTrial<N: Network>(PhantomData<N>);
+
+#[rustfmt::skip]
+impl<N: Network> Environment for OperatorTrial<N> {
+ type Network = N;
+ const NODE_TYPE: NodeType = NodeType::Operator;
+ const SYNC_NODES: [&'static str; 13] = [
+ "144.126.219.193:4132", "165.232.145.194:4132", "143.198.164.241:4132", "188.166.7.13:4132", "167.99.40.226:4132",
+ "159.223.124.150:4132", "137.184.192.155:4132", "147.182.213.228:4132", "137.184.202.162:4132", "159.223.118.35:4132",
+ "161.35.106.91:4132", "157.245.133.62:4132", "143.198.166.150:4132",
+ ];
+ const MINIMUM_NUMBER_OF_PEERS: usize = 11;
+ const MAXIMUM_NUMBER_OF_PEERS: usize = 1000;
+ const COINBASE_IS_PUBLIC: bool = true;
+}
+
+#[derive(Clone, Debug, Default)]
+pub struct ProverTrial<N: Network>(PhantomData<N>);
+
+#[rustfmt::skip]
+impl<N: Network> Environment for ProverTrial<N> {
+ type Network = N;
+ const NODE_TYPE: NodeType = NodeType::Prover;
+ const SYNC_NODES: [&'static str; 13] = [
+ "144.126.219.193:4132", "165.232.145.194:4132", "143.198.164.241:4132", "188.166.7.13:4132", "167.99.40.226:4132",
+ "159.223.124.150:4132", "137.184.192.155:4132", "147.182.213.228:4132", "137.184.202.162:4132", "159.223.118.35:4132",
+ "161.35.106.91:4132", "157.245.133.62:4132", "143.198.166.150:4132",
+ ];
+ const MINIMUM_NUMBER_OF_PEERS: usize = 11;
+ const MAXIMUM_NUMBER_OF_PEERS: usize = 21;
+ const COINBASE_IS_PUBLIC: bool = true;
+}
diff --git a/src/network/ledger.rs b/src/network/ledger.rs
index 2b1926bd17..35c4daf557 100644
--- a/src/network/ledger.rs
+++ b/src/network/ledger.rs
@@ -896,40 +896,6 @@ impl<N: Network, E: Environment> Ledger<N, E> {
}
}
}
-
- // TODO (howardwu): TEMPORARY - Evaluate the merits of this experiment after seeing the results.
- // If the node is a sync node and the node is currently syncing,
- // reduce the number of connections down to the minimum threshold,
- // to improve the speed with which the node syncs back to tip.
- // FIXME: causes sync nodes to hang when at maximum peers
- /*
- if E::NODE_TYPE == NodeType::Sync && self.status.is_syncing() {
- debug!("Temporarily reducing the number of connected peers to sync");
-
- // Lock peers_state for further processing.
- let peers_state = self.peers_state.read().await;
-
- // Determine the peers to disconnect from.
- // Attention - We are reducing this to the `MINIMUM_NUMBER_OF_PEERS`, *not* `MAXIMUM_NUMBER_OF_PEERS`.
- let num_excess_peers = peers_state.len().saturating_sub(E::MINIMUM_NUMBER_OF_PEERS);
- let peer_ips_to_disconnect = peers_state
- .iter()
- .filter(|(&ip, _)| ip != peer_ip)
- .take(num_excess_peers)
- .map(|(&ip, _)| ip)
- .collect::<Vec<SocketAddr>>();
-
- // Release the lock over peers_state.
- drop(peers_state);
-
- trace!("Found {} peers to temporarily disconnect", peer_ips_to_disconnect.len());
-
- // Proceed to disconnect and restrict these peers.
- for peer_ip in peer_ips_to_disconnect {
- self.disconnect_and_restrict(peer_ip, "disconnecting to sync").await;
- }
- }
- */
}
}
diff --git a/src/network/message.rs b/src/network/message.rs
index ccf86ca49c..680cbb7b70 100644
--- a/src/network/message.rs
+++ b/src/network/message.rs
@@ -99,6 +99,12 @@ pub enum Message<N: Network, E: Environment> {
UnconfirmedBlock(u32, N::BlockHash, Data<Block<N>>),
/// UnconfirmedTransaction := (transaction)
UnconfirmedTransaction(Transaction<N>),
+ /// PoolRegister := (address)
+ PoolRegister(Address<N>),
+ /// PoolRequest := (share_difficulty, block_template)
+ PoolRequest(u64, Data<BlockTemplate<N>>),
+ /// PoolResponse := (address, block_header)
+ PoolResponse(Address<N>, Data<BlockHeader<N>>),
/// Unused
#[allow(unused)]
Unused(PhantomData<E>),
@@ -120,6 +126,9 @@ impl<N: Network, E: Environment> Message<N, E> {
Self::Pong(..) => "Pong",
Self::UnconfirmedBlock(..) => "UnconfirmedBlock",
Self::UnconfirmedTransaction(..) => "UnconfirmedTransaction",
+ Self::PoolRegister(..) => "PoolRegister",
+ Self::PoolRequest(..) => "PoolRequest",
+ Self::PoolResponse(..) => "PoolResponse",
Self::Unused(..) => "Unused",
}
}
@@ -139,7 +148,10 @@ impl<N: Network, E: Environment> Message<N, E> {
Self::Pong(..) => 8,
Self::UnconfirmedBlock(..) => 9,
Self::UnconfirmedTransaction(..) => 10,
- Self::Unused(..) => 11,
+ Self::PoolRegister(..) => 11,
+ Self::PoolRequest(..) => 12,
+ Self::PoolResponse(..) => 13,
+ Self::Unused(..) => 14,
}
}
@@ -178,6 +190,11 @@ impl<N: Network, E: Environment> Message<N, E> {
]
.concat()),
Self::UnconfirmedTransaction(transaction) => Ok(bincode::serialize(transaction)?),
+ Self::PoolRegister(address) => Ok(bincode::serialize(address)?),
+ Self::PoolRequest(share_difficulty, block_template) => {
+ Ok([bincode::serialize(share_difficulty)?, block_template.serialize_blocking()?].concat())
+ }
+ Self::PoolResponse(address, block) => Ok([bincode::serialize(address)?, block.serialize_blocking()?].concat()),
Self::Unused(_) => Ok(vec![]),
}
}
@@ -239,6 +256,9 @@ impl<N: Network, E: Environment> Message<N, E> {
Data::Buffer(data[36..].to_vec()),
),
10 => Self::UnconfirmedTransaction(bincode::deserialize(data)?),
+ 11 => Self::PoolRegister(bincode::deserialize(data)?),
+ 12 => Self::PoolRequest(bincode::deserialize(&data[0..8])?, Data::Buffer(data[8..].to_vec())),
+ 13 => Self::PoolResponse(bincode::deserialize(&data[0..32])?, Data::Buffer(data[32..].to_vec())),
_ => return Err(anyhow!("Invalid message ID {}", id)),
};
diff --git a/src/network/mod.rs b/src/network/mod.rs
index da993204b3..0a2549d9f0 100644
--- a/src/network/mod.rs
+++ b/src/network/mod.rs
@@ -20,6 +20,9 @@ pub(crate) use ledger::{LedgerRequest, LedgerRouter};
pub mod message;
pub use message::*;
+pub mod operator;
+pub use operator::*;
+
pub(crate) mod peers;
pub(crate) use peers::*;
diff --git a/src/network/operator.rs b/src/network/operator.rs
new file mode 100644
index 0000000000..13b63bc7dc
--- /dev/null
+++ b/src/network/operator.rs
@@ -0,0 +1,363 @@
+// Copyright (C) 2019-2021 Aleo Systems Inc.
+// This file is part of the snarkOS library.
+
+// The snarkOS library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// The snarkOS library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
+
+use crate::{
+ helpers::Tasks,
+ Data,
+ Environment,
+ LedgerReader,
+ LedgerRequest,
+ LedgerRouter,
+ Message,
+ NodeType,
+ PeersRequest,
+ PeersRouter,
+ ProverRouter,
+};
+use snarkos_storage::{storage::Storage, OperatorState};
+use snarkvm::dpc::prelude::*;
+
+use anyhow::Result;
+use rand::thread_rng;
+use rayon::{ThreadPool, ThreadPoolBuilder};
+use std::{
+ collections::{HashMap, HashSet},
+ net::SocketAddr,
+ path::Path,
+ sync::Arc,
+ time::{Duration, Instant},
+};
+use tokio::{
+ sync::{mpsc, oneshot, RwLock},
+ task,
+ task::JoinHandle,
+};
+
+/// Shorthand for the parent half of the `Operator` message channel.
+pub(crate) type OperatorRouter<N> = mpsc::Sender<OperatorRequest<N>>;
+#[allow(unused)]
+/// Shorthand for the child half of the `Operator` message channel.
+type OperatorHandler<N> = mpsc::Receiver<OperatorRequest<N>>;
+
+///
+/// An enum of requests that the `Operator` struct processes.
+///
+#[derive(Debug)]
+pub enum OperatorRequest<N: Network> {
+ /// PoolRegister := (peer_ip, worker_address)
+ PoolRegister(SocketAddr, Address<N>),
+ /// PoolResponse := (peer_ip, proposed_block_header, worker_address)
+ PoolResponse(SocketAddr, BlockHeader<N>, Address<N>),
+}
+
+/// The predefined base share difficulty.
+const BASE_SHARE_DIFFICULTY: u64 = u64::MAX;
+/// The operator heartbeat in seconds.
+const HEARTBEAT_IN_SECONDS: Duration = Duration::from_secs(1);
+
+///
+/// An operator for a program on a specific network in the node server.
+///
+#[derive(Debug)]
+pub struct Operator<N: Network, E: Environment> {
+ /// The address of the operator.
+ address: Option<Address<N>>,
+ /// The local address of this node.
+ local_ip: SocketAddr,
+ /// The thread pool of the operator.
+ thread_pool: Arc<ThreadPool>,
+ /// The state storage of the operator.
+ state: Arc<OperatorState<N>>,
+ /// The current block template that is being mined on by the operator.
+ block_template: RwLock<Option<BlockTemplate<N>>>,
+ /// A list of provers and their associated state := (last_submitted, share_difficulty)
+ provers: RwLock<HashMap<Address<N>, (Instant, u64)>>,
+ /// A list of the known nonces for the current round.
+ known_nonces: RwLock<HashSet<N::PoSWNonce>>,
+ /// The operator router of the node.
+ operator_router: OperatorRouter<N>,
+ /// The pool of unconfirmed transactions.
+ memory_pool: Arc<RwLock<MemoryPool<N>>>,
+ /// The peers router of the node.
+ peers_router: PeersRouter<N, E>,
+ /// The ledger state of the node.
+ ledger_reader: LedgerReader<N>,
+ /// The ledger router of the node.
+ ledger_router: LedgerRouter<N>,
+ /// The prover router of the node.
+ prover_router: ProverRouter<N>,
+}
+
+impl<N: Network, E: Environment> Operator<N, E> {
+ /// Initializes a new instance of the operator.
+ pub async fn open<S: Storage, P: AsRef<Path> + Copy>(
+ tasks: &Tasks<JoinHandle<()>>,
+ path: P,
+ address: Option<Address<N>>,
+ local_ip: SocketAddr,
+ memory_pool: Arc<RwLock<MemoryPool<N>>>,
+ peers_router: PeersRouter<N, E>,
+ ledger_reader: LedgerReader<N>,
+ ledger_router: LedgerRouter<N>,
+ prover_router: ProverRouter<N>,
+ ) -> Result<Arc<Self>> {
+ // Initialize an mpsc channel for sending requests to the `Operator` struct.
+ let (operator_router, mut operator_handler) = mpsc::channel(1024);
+ // Initialize the operator thread pool.
+ let thread_pool = ThreadPoolBuilder::new()
+ .stack_size(8 * 1024 * 1024)
+ .num_threads((num_cpus::get() / 8 * 7).max(1))
+ .build()?;
+
+ // Initialize the operator.
+ let operator = Arc::new(Self {
+ address,
+ local_ip,
+ state: Arc::new(OperatorState::open_writer::<S, P>(path)?),
+ thread_pool: Arc::new(thread_pool),
+ block_template: RwLock::new(None),
+ provers: Default::default(),
+ known_nonces: Default::default(),
+ operator_router,
+ memory_pool,
+ peers_router,
+ ledger_reader,
+ ledger_router,
+ prover_router,
+ });
+
+ if E::NODE_TYPE == NodeType::Operator {
+ // Initialize the handler for the operator.
+ let operator_clone = operator.clone();
+ let (router, handler) = oneshot::channel();
+ tasks.append(task::spawn(async move {
+ // Notify the outer function that the task is ready.
+ let _ = router.send(());
+ // Asynchronously wait for a operator request.
+ while let Some(request) = operator_handler.recv().await {
+ operator_clone.update(request).await;
+ }
+ }));
+ // Wait until the operator handler is ready.
+ let _ = handler.await;
+ }
+
+ if E::NODE_TYPE == NodeType::Operator {
+ if let Some(recipient) = operator.address {
+ // Initialize an update loop for the block template.
+ let operator = operator.clone();
+ let (router, handler) = oneshot::channel();
+ tasks.append(task::spawn(async move {
+ // Notify the outer function that the task is ready.
+ let _ = router.send(());
+ // TODO (julesdesmit): Add logic to the loop to retarget share difficulty.
+ loop {
+ // Determine if the current block template is stale.
+ let is_block_template_stale = match &*operator.block_template.read().await {
+ Some(template) => operator.ledger_reader.latest_block_height().saturating_add(1) != template.block_height(),
+ None => true,
+ };
+
+ // Update the block template if it is stale.
+ if is_block_template_stale {
+ // Construct a new block template.
+ let transactions = operator.memory_pool.read().await.transactions();
+ let ledger_reader = operator.ledger_reader.clone();
+ let thread_pool = operator.thread_pool.clone();
+ let result = task::spawn_blocking(move || {
+ thread_pool.install(move || {
+ match ledger_reader.get_block_template(
+ recipient,
+ E::COINBASE_IS_PUBLIC,
+ &transactions,
+ &mut thread_rng(),
+ ) {
+ Ok(block_template) => Ok(block_template),
+ Err(error) => Err(format!("Failed to produce a new block template: {}", error)),
+ }
+ })
+ })
+ .await;
+
+ // Update the block template.
+ match result {
+ Ok(Ok(block_template)) => {
+ // Acquire the write lock to update the block template.
+ *operator.block_template.write().await = Some(block_template);
+ // Clear the set of known nonces.
+ operator.known_nonces.write().await.clear();
+ }
+ Ok(Err(error_message)) => error!("{}", error_message),
+ Err(error) => error!("{}", error),
+ };
+ }
+
+ // Proceed to sleep for a preset amount of time.
+ tokio::time::sleep(HEARTBEAT_IN_SECONDS).await;
+ }
+ }));
+ // Wait until the operator handler is ready.
+ let _ = handler.await;
+ } else {
+ error!("Missing operator address. Please specify an Aleo address in order to operate a pool");
+ }
+ }
+
+ Ok(operator)
+ }
+
+ /// Returns an instance of the operator router.
+ pub fn router(&self) -> OperatorRouter<N> {
+ self.operator_router.clone()
+ }
+
+ /// Returns all the shares in storage.
+ pub fn to_shares(&self) -> Vec<((u32, Record<N>), HashMap<Address<N>, u64>)> {
+ self.state.to_shares()
+ }
+
+ /// Returns the shares for a specific block, given the block height and coinbase record commitment.
+ pub fn get_shares_for_block(&self, block_height: u32, coinbase_record: Record<N>) -> Result<HashMap<Address<N>, u64>> {
+ self.state.get_shares_for_block(block_height, coinbase_record)
+ }
+
+ /// Returns the shares for a specific prover, given a ledger and the prover address.
+ pub fn get_shares_for_prover(&self, prover: &Address<N>) -> u64 {
+ self.state.get_shares_for_prover(&self.ledger_reader, prover)
+ }
+
+ ///
+ /// Performs the given `request` to the operator.
+ /// All requests must go through this `update`, so that a unified view is preserved.
+ ///
+ pub(super) async fn update(&self, request: OperatorRequest<N>) {
+ match request {
+ OperatorRequest::PoolRegister(peer_ip, address) => {
+ if let Some(block_template) = self.block_template.read().await.clone() {
+ // Ensure this prover exists in the list first, and retrieve their share difficulty.
+ let share_difficulty = self
+ .provers
+ .write()
+ .await
+ .entry(address)
+ .or_insert((Instant::now(), BASE_SHARE_DIFFICULTY))
+ .1;
+
+ // Route a `PoolRequest` to the peer.
+ let message = Message::PoolRequest(share_difficulty, Data::Object(block_template));
+ if let Err(error) = self.peers_router.send(PeersRequest::MessageSend(peer_ip, message)).await {
+ warn!("[PoolRequest] {}", error);
+ }
+ } else {
+ warn!("[PoolRegister] No current block template exists");
+ }
+ }
+ OperatorRequest::PoolResponse(peer_ip, block_header, prover) => {
+ if let Some(block_template) = self.block_template.read().await.clone() {
+ // Ensure the given block header corresponds to the correct block height.
+ if block_template.block_height() != block_header.height() {
+ warn!("[PoolResponse] Peer {} sent a stale block.", peer_ip);
+ return;
+ }
+ // Ensure the timestamp in the block template matches in the block header.
+ if block_template.block_timestamp() != block_header.timestamp() {
+ warn!("[PoolResponse] Peer {} sent a block with an incorrect timestamp.", peer_ip);
+ return;
+ }
+ // Ensure the difficulty target in the block template matches in the block header.
+ if block_template.difficulty_target() != block_header.difficulty_target() {
+ warn!("[PoolResponse] Peer {} sent a block with an incorrect difficulty target.", peer_ip);
+ return;
+ }
+ // Ensure the previous ledger root in the block template matches in the block header.
+ if block_template.previous_ledger_root() != block_header.previous_ledger_root() {
+ warn!("[PoolResponse] Peer {} sent a block with an incorrect ledger root.", peer_ip);
+ return;
+ }
+ // Ensure the transactions root in the block header matches the one from the block template.
+ if block_template.transactions().transactions_root() != block_header.transactions_root() {
+ warn!("[PoolResponse] Peer {} has changed the list of block transactions.", peer_ip);
+ return;
+ }
+ // Ensure the given nonce from the prover is new.
+ if self.known_nonces.read().await.contains(&block_header.nonce()) {
+ warn!("[PoolResponse] Peer {} sent a duplicate share", peer_ip);
+ // TODO (julesdesmit): punish?
+ return;
+ }
+
+ // Update known nonces.
+ self.known_nonces.write().await.insert(block_header.nonce());
+
+ // Retrieve the share difficulty for the given prover.
+ let share_difficulty = {
+ let provers = self.provers.read().await.clone();
+ match provers.get(&prover) {
+ Some((_, share_difficulty)) => *share_difficulty,
+ None => {
+ self.provers.write().await.insert(prover, (Instant::now(), BASE_SHARE_DIFFICULTY));
+ BASE_SHARE_DIFFICULTY
+ }
+ }
+ };
+
+ // Ensure the share difficulty target is met, and the PoSW proof is valid.
+ let block_height = block_header.height();
+ if !N::posw().verify(
+ block_height,
+ share_difficulty,
+ &vec![*block_header.to_header_root().unwrap(), *block_header.nonce()],
+ block_header.proof(),
+ ) {
+ warn!("[PoolResponse] PoSW proof verification failed");
+ return;
+ }
+
+ // Update the internal state for this prover.
+ if let Some(ref mut prover) = self.provers.write().await.get_mut(&prover) {
+ prover.0 = Instant::now();
+ } else {
+ error!("Prover should have existing info");
+ return;
+ }
+
+ // Increment the share count for the prover.
+ let coinbase_record = block_template.coinbase_record().clone();
+ match self.state.increment_share(block_height, coinbase_record, &prover) {
+ Ok(..) => info!(
+ "Operator received a valid share from {} ({}) for block {}",
+ peer_ip, prover, block_height,
+ ),
+ Err(error) => error!("{}", error),
+ }
+
+ // If the block has satisfactory difficulty and is valid, proceed to broadcast it.
+ let previous_block_hash = block_template.previous_block_hash();
+ let transactions = block_template.transactions().clone();
+ if let Ok(block) = Block::from(previous_block_hash, block_header, transactions) {
+ info!("Operator has found unconfirmed block {} ({})", block.height(), block.hash());
+ let request = LedgerRequest::UnconfirmedBlock(self.local_ip, block, self.prover_router.clone());
+ if let Err(error) = self.ledger_router.send(request).await {
+ warn!("Failed to broadcast mined block - {}", error);
+ }
+ }
+ } else {
+ warn!("[PoolResponse] No current block template exists");
+ }
+ }
+ }
+ }
+}
diff --git a/src/network/peers.rs b/src/network/peers.rs
index 10c1b375a7..e1e494f276 100644
--- a/src/network/peers.rs
+++ b/src/network/peers.rs
@@ -23,6 +23,8 @@ use crate::{
LedgerRouter,
Message,
NodeType,
+ OperatorRequest,
+ OperatorRouter,
ProverRequest,
ProverRouter,
};
@@ -66,16 +68,30 @@ type ConnectionResult = oneshot::Sender<Result<()>>;
///
#[derive(Debug)]
pub enum PeersRequest<N: Network, E: Environment> {
- /// Connect := (peer_ip, ledger_reader, ledger_router, prover_router, connection_result)
- Connect(SocketAddr, LedgerReader<N>, LedgerRouter<N>, ProverRouter<N>, ConnectionResult),
- /// Heartbeat := (ledger_reader, ledger_router, prover_router)
- Heartbeat(LedgerReader<N>, LedgerRouter<N>, ProverRouter<N>),
+ /// Connect := (peer_ip, ledger_reader, ledger_router, operator_router, prover_router, connection_result)
+ Connect(
+ SocketAddr,
+ LedgerReader<N>,
+ LedgerRouter<N>,
+ OperatorRouter<N>,
+ ProverRouter<N>,
+ ConnectionResult,
+ ),
+ /// Heartbeat := (ledger_reader, ledger_router, operator_router, prover_router)
+ Heartbeat(LedgerReader<N>, LedgerRouter<N>, OperatorRouter<N>, ProverRouter<N>),
/// MessagePropagate := (peer_ip, message)
MessagePropagate(SocketAddr, Message<N, E>),
/// MessageSend := (peer_ip, message)
MessageSend(SocketAddr, Message<N, E>),
- /// PeerConnecting := (stream, peer_ip, ledger_reader, ledger_router, prover_router)
- PeerConnecting(TcpStream, SocketAddr, LedgerReader<N>, LedgerRouter<N>, ProverRouter<N>),
+ /// PeerConnecting := (stream, peer_ip, ledger_reader, ledger_router, operator_router, prover_router)
+ PeerConnecting(
+ TcpStream,
+ SocketAddr,
+ LedgerReader<N>,
+ LedgerRouter<N>,
+ OperatorRouter<N>,
+ ProverRouter<N>,
+ ),
/// PeerConnected := (peer_ip, peer_nonce, outbound_router)
PeerConnected(SocketAddr, u64, OutboundRouter<N, E>),
/// PeerDisconnected := (peer_ip)
@@ -258,7 +274,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
///
pub(super) async fn update(&self, request: PeersRequest<N, E>, tasks: &Tasks<JoinHandle<()>>) {
match request {
- PeersRequest::Connect(peer_ip, ledger_reader, ledger_router, prover_router, connection_result) => {
+ PeersRequest::Connect(peer_ip, ledger_reader, ledger_router, operator_router, prover_router, connection_result) => {
// Ensure the peer IP is not this node.
if peer_ip == self.local_ip
|| (peer_ip.ip().is_unspecified() || peer_ip.ip().is_loopback()) && peer_ip.port() == self.local_ip.port()
@@ -308,6 +324,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
ledger_reader,
ledger_router,
prover_router,
+ operator_router,
self.connected_nonces().await,
Some(connection_result),
tasks.clone(),
@@ -327,10 +344,9 @@ impl<N: Network, E: Environment> Peers<N, E> {
}
}
}
- PeersRequest::Heartbeat(ledger_reader, ledger_router, prover_router) => {
+ PeersRequest::Heartbeat(ledger_reader, ledger_router, operator_router, prover_router) => {
// Obtain the number of connected peers.
let number_of_connected_peers = self.number_of_connected_peers().await;
-
// Ensure the number of connected peers is below the maximum threshold.
if number_of_connected_peers > E::MAXIMUM_NUMBER_OF_PEERS {
debug!("Exceeded maximum number of connected peers");
@@ -420,8 +436,14 @@ impl<N: Network, E: Environment> Peers<N, E> {
// Initialize the connection process.
let (router, handler) = oneshot::channel();
- let request =
- PeersRequest::Connect(peer_ip, ledger_reader.clone(), ledger_router.clone(), prover_router.clone(), router);
+ let request = PeersRequest::Connect(
+ peer_ip,
+ ledger_reader.clone(),
+ ledger_router.clone(),
+ operator_router.clone(),
+ prover_router.clone(),
+ router,
+ );
if let Err(error) = self.peers_router.send(request).await {
warn!("Failed to transmit the request: '{}'", error);
}
@@ -438,7 +460,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
PeersRequest::MessageSend(sender, message) => {
self.send(sender, message).await;
}
- PeersRequest::PeerConnecting(stream, peer_ip, ledger_reader, ledger_router, prover_router) => {
+ PeersRequest::PeerConnecting(stream, peer_ip, ledger_reader, ledger_router, operator_router, prover_router) => {
// Ensure the peer IP is not this node.
if peer_ip == self.local_ip
|| (peer_ip.ip().is_unspecified() || peer_ip.ip().is_loopback()) && peer_ip.port() == self.local_ip.port()
@@ -509,6 +531,7 @@ impl<N: Network, E: Environment> Peers<N, E> {
ledger_reader,
ledger_router,
prover_router,
+ operator_router,
self.connected_nonces().await,
None,
tasks.clone(),
@@ -887,6 +910,7 @@ impl<N: Network, E: Environment> Peer<N, E> {
ledger_reader: LedgerReader<N>,
ledger_router: LedgerRouter<N>,
prover_router: ProverRouter<N>,
+ operator_router: OperatorRouter<N>,
connected_nonces: Vec<u64>,
connection_result: Option<ConnectionResult>,
tasks: Tasks<task::JoinHandle<()>>,
@@ -1056,11 +1080,12 @@ impl<N: Network, E: Environment> Peer<N, E> {
Ok(block) => {
// TODO (howardwu): TEMPORARY - Remove this after testnet2.
// Sanity check for a V12 ledger.
- if N::NETWORK_ID == 2 && block.height() > snarkvm::dpc::testnet2::V12_UPGRADE_BLOCK_HEIGHT {
- if block.header().proof().as_ref().unwrap_or(N::genesis_block().header().proof().as_ref().unwrap()).is_hiding() {
- warn!("Peer {} is not V12-compliant, proceeding to disconnect", peer_ip);
- break;
- }
+ if N::NETWORK_ID == 2
+ && block.height() > snarkvm::dpc::testnet2::V12_UPGRADE_BLOCK_HEIGHT
+ && block.header().proof().is_hiding()
+ {
+ warn!("Peer {} is not V12-compliant, proceeding to disconnect", peer_ip);
+ break;
}
// Route the `BlockResponse` to the ledger.
@@ -1118,11 +1143,12 @@ impl<N: Network, E: Environment> Peer<N, E> {
// TODO (howardwu): TEMPORARY - Remove this after testnet2.
// Sanity check for a V12 ledger.
- if N::NETWORK_ID == 2 && block_header.height() > snarkvm::dpc::testnet2::V12_UPGRADE_BLOCK_HEIGHT {
- if block_header.proof().as_ref().unwrap_or(N::genesis_block().header().proof().as_ref().unwrap()).is_hiding() {
- warn!("Peer {} is not V12-compliant, proceeding to disconnect", peer_ip);
- break;
- }
+ if N::NETWORK_ID == 2
+ && block_header.height() > snarkvm::dpc::testnet2::V12_UPGRADE_BLOCK_HEIGHT
+ && block_header.proof().is_hiding()
+ {
+ warn!("Peer {} is not V12-compliant, proceeding to disconnect", peer_ip);
+ break;
}
// Update the block header of the peer.
@@ -1266,6 +1292,35 @@ impl<N: Network, E: Environment> Peer<N, E> {
}
}
}
+ Message::PoolRegister(address) => {
+ if E::NODE_TYPE != NodeType::Operator {
+ trace!("Skipping 'PoolRegister' from {}", peer_ip);
+ } else if let Err(error) = operator_router.send(OperatorRequest::PoolRegister(peer_ip, address)).await {
+ warn!("[PoolRegister] {}", error);
+ }
+ }
+ Message::PoolRequest(share_difficulty, block_template) => {
+ if E::NODE_TYPE != NodeType::Prover {
+ trace!("Skipping 'PoolRequest' from {}", peer_ip);
+ } else if let Ok(block_template) = block_template.deserialize().await {
+ if let Err(error) = prover_router.send(ProverRequest::PoolRequest(peer_ip, share_difficulty, block_template)).await {
+ warn!("[PoolRequest] {}", error);
+ }
+ } else {
+ warn!("[PoolRequest] could not deserialize block template");
+ }
+ }
+ Message::PoolResponse(address, block_header) => {
+ if E::NODE_TYPE != NodeType::Operator {
+ trace!("Skipping 'PoolResponse' from {}", peer_ip);
+ } else if let Ok(block_header) = block_header.deserialize().await {
+ if let Err(error) = operator_router.send(OperatorRequest::PoolResponse(peer_ip, block_header, address)).await {
+ warn!("[PoolResponse] {}", error);
+ }
+ } else {
+ warn!("[PoolResponse] could not deserialize block");
+ }
+ }
Message::Unused(_) => break, // Peer is not following the protocol.
}
}
diff --git a/src/network/prover.rs b/src/network/prover.rs
index a1b2896768..526af07f86 100644
--- a/src/network/prover.rs
+++ b/src/network/prover.rs
@@ -16,6 +16,7 @@
use crate::{
helpers::{State, Status, Tasks},
+ Data,
Environment,
LedgerReader,
LedgerRequest,
@@ -28,7 +29,7 @@ use crate::{
use snarkos_storage::{storage::Storage, ProverState};
use snarkvm::dpc::prelude::*;
-use anyhow::Result;
+use anyhow::{anyhow, Result};
use rand::thread_rng;
use rayon::{ThreadPool, ThreadPoolBuilder};
use std::{
@@ -38,6 +39,7 @@ use std::{
atomic::{AtomicBool, Ordering},
Arc,
},
+ time::Duration,
};
use tokio::{
sync::{mpsc, oneshot, RwLock},
@@ -51,11 +53,16 @@ pub(crate) type ProverRouter<N> = mpsc::Sender<ProverRequest<N>>;
/// Shorthand for the child half of the `Prover` message channel.
type ProverHandler<N> = mpsc::Receiver<ProverRequest<N>>;
+/// The miner heartbeat in seconds.
+const MINER_HEARTBEAT_IN_SECONDS: Duration = Duration::from_secs(2);
+
///
/// An enum of requests that the `Prover` struct processes.
///
#[derive(Debug)]
pub enum ProverRequest<N: Network> {
+ /// PoolRequest := (peer_ip, share_difficulty, block_template)
+ PoolRequest(SocketAddr, u64, BlockTemplate<N>),
/// MemoryPoolClear := (block)
MemoryPoolClear(Option<Block<N>>),
/// UnconfirmedTransaction := (peer_ip, transaction)
@@ -69,8 +76,12 @@ pub enum ProverRequest<N: Network> {
pub struct Prover<N: Network, E: Environment> {
/// The state storage of the prover.
state: Arc<ProverState<N>>,
- /// The thread pool for the miner.
- miner: Arc<ThreadPool>,
+ /// The Aleo address of the prover.
+ address: Option<Address<N>>,
+ /// The IP address of the connected pool.
+ pool: Option<SocketAddr>,
+ /// The thread pool for the prover.
+ thread_pool: Arc<ThreadPool>,
/// The prover router of the node.
prover_router: ProverRouter<N>,
/// The pool of unconfirmed transactions.
@@ -92,8 +103,9 @@ impl<N: Network, E: Environment> Prover<N, E> {
pub async fn open<S: Storage, P: AsRef<Path> + Copy>(
tasks: &mut Tasks<JoinHandle<()>>,
path: P,
- miner: Option<Address<N>>,
+ address: Option<Address<N>>,
local_ip: SocketAddr,
+ pool_ip: Option<SocketAddr>,
status: &Status,
terminator: &Arc<AtomicBool>,
peers_router: PeersRouter<N, E>,
@@ -102,8 +114,8 @@ impl<N: Network, E: Environment> Prover<N, E> {
) -> Result<Arc<Self>> {
// Initialize an mpsc channel for sending requests to the `Prover` struct.
let (prover_router, mut prover_handler) = mpsc::channel(1024);
- // Initialize the prover pool.
- let pool = ThreadPoolBuilder::new()
+ // Initialize the prover thread pool.
+ let thread_pool = ThreadPoolBuilder::new()
.stack_size(8 * 1024 * 1024)
.num_threads((num_cpus::get() / 8 * 7).max(1))
.build()?;
@@ -111,7 +123,9 @@ impl<N: Network, E: Environment> Prover<N, E> {
// Initialize the prover.
let prover = Arc::new(Self {
state: Arc::new(ProverState::open_writer::<S, P>(path)?),
- miner: Arc::new(pool),
+ address,
+ pool: pool_ip,
+ thread_pool: Arc::new(thread_pool),
prover_router,
memory_pool: Arc::new(RwLock::new(MemoryPool::new())),
status: status.clone(),
@@ -138,78 +152,32 @@ impl<N: Network, E: Environment> Prover<N, E> {
let _ = handler.await;
}
- // Initialize a new instance of the miner.
- if E::NODE_TYPE == NodeType::Miner {
- if let Some(recipient) = miner {
- // Initialize the prover process.
- let prover = prover.clone();
- let tasks_clone = tasks.clone();
- let (router, handler) = oneshot::channel();
- tasks.append(task::spawn(async move {
- // Notify the outer function that the task is ready.
- let _ = router.send(());
- loop {
- // If `terminator` is `false` and the status is not `Peering` or `Mining` already, mine the next block.
- if !prover.terminator.load(Ordering::SeqCst) && !prover.status.is_peering() && !prover.status.is_mining() {
- // Set the status to `Mining`.
- prover.status.update(State::Mining);
-
- // Prepare the unconfirmed transactions, terminator, and status.
- let state = prover.state.clone();
- let miner = prover.miner.clone();
- let canon = prover.ledger_reader.clone(); // This is *safe* as the ledger only reads.
- let unconfirmed_transactions = prover.memory_pool.read().await.transactions();
- let terminator = prover.terminator.clone();
- let status = prover.status.clone();
- let ledger_router = prover.ledger_router.clone();
- let prover_router = prover.prover_router.clone();
-
- tasks_clone.append(task::spawn(async move {
- // Mine the next block.
- let result = task::spawn_blocking(move || {
- miner.install(move || {
- canon.mine_next_block(
- recipient,
- E::COINBASE_IS_PUBLIC,
- &unconfirmed_transactions,
- &terminator,
- &mut thread_rng(),
- )
- })
- })
- .await
- .map_err(|e| e.into());
-
- // Set the status to `Ready`.
- status.update(State::Ready);
+ // Initialize the miner, if the node type is a miner.
+ if E::NODE_TYPE == NodeType::Miner && prover.pool.is_none() {
+ Self::start_miner(tasks, prover.clone(), local_ip).await;
+ }
- match result {
- Ok(Ok((block, coinbase_record))) => {
- debug!("Miner has found unconfirmed block {} ({})", block.height(), block.hash());
- // Store the coinbase record.
- if let Err(error) = state.add_coinbase_record(block.height(), coinbase_record) {
- warn!("[Miner] Failed to store coinbase record - {}", error);
- }
+ // Initialize the prover, if the node type is a prover.
+ if E::NODE_TYPE == NodeType::Prover && prover.pool.is_some() {
+ let prover = prover.clone();
+ let (router, handler) = oneshot::channel();
+ task::spawn(async move {
+ // Notify the outer function that the task is ready.
+ let _ = router.send(());
+ loop {
+ // Sleep for `1` second.
+ tokio::time::sleep(std::time::Duration::from_secs(1)).await;
- // Broadcast the next block.
- let request = LedgerRequest::UnconfirmedBlock(local_ip, block, prover_router.clone());
- if let Err(error) = ledger_router.send(request).await {
- warn!("Failed to broadcast mined block - {}", error);
- }
- }
- Ok(Err(error)) | Err(error) => trace!("{}", error),
- }
- }));
- }
- // Sleep for 2 seconds.
- tokio::time::sleep(std::time::Duration::from_secs(2)).await;
+ // TODO (howardwu): Check that the prover is connected to the pool before proceeding.
+ // Currently we use a sleep function to probabilistically ensure the peer is connected.
+ if !prover.terminator.load(Ordering::SeqCst) && !prover.status.is_peering() && !prover.status.is_mining() {
+ prover.send_pool_register().await;
}
- }));
- // Wait until the miner task is ready.
- let _ = handler.await;
- } else {
- error!("Missing miner address. Please specify an Aleo address in order to mine");
- }
+ }
+ });
+
+ // Wait until the operator handler is ready.
+ let _ = handler.await;
}
Ok(prover)
@@ -236,6 +204,10 @@ impl<N: Network, E: Environment> Prover<N, E> {
///
pub(super) async fn update(&self, request: ProverRequest<N>) {
match request {
+ ProverRequest::PoolRequest(operator_ip, share_difficulty, block_template) => {
+ // Process the pool request message.
+ self.process_pool_request(operator_ip, share_difficulty, block_template).await;
+ }
ProverRequest::MemoryPoolClear(block) => match block {
Some(block) => self.memory_pool.write().await.remove_transactions(block.transactions()),
None => *self.memory_pool.write().await = MemoryPool::new(),
@@ -250,6 +222,97 @@ impl<N: Network, E: Environment> Prover<N, E> {
}
}
+ ///
+ /// Sends a `PoolRegister` message to the pool IP address.
+ ///
+ async fn send_pool_register(&self) {
+ if E::NODE_TYPE == NodeType::Prover {
+ if let Some(recipient) = self.address {
+ if let Some(pool_ip) = self.pool {
+ // Proceed to register the prover to receive a block template.
+ let request = PeersRequest::MessageSend(pool_ip, Message::PoolRegister(recipient));
+ if let Err(error) = self.peers_router.send(request).await {
+ warn!("[PoolRegister] {}", error);
+ }
+ } else {
+ error!("Missing pool IP address. Please specify a pool IP address in order to run the prover");
+ }
+ } else {
+ error!("Missing prover address. Please specify an Aleo address in order to prove");
+ }
+ }
+ }
+
+ ///
+ /// Processes a `PoolRequest` message from a pool operator.
+ ///
+ async fn process_pool_request(&self, operator_ip: SocketAddr, share_difficulty: u64, block_template: BlockTemplate<N>) {
+ if E::NODE_TYPE == NodeType::Prover {
+ if let Some(recipient) = self.address {
+ if let Some(pool_ip) = self.pool {
+ // Refuse work from any pool other than the registered one.
+ if pool_ip == operator_ip {
+ // If `terminator` is `false` and the status is not `Peering` or `Mining`
+ // already, mine the next block.
+ if !self.terminator.load(Ordering::SeqCst) && !self.status.is_peering() && !self.status.is_mining() {
+ // Set the status to `Mining`.
+ self.status.update(State::Mining);
+
+ let thread_pool = self.thread_pool.clone();
+ let block_template = block_template.clone();
+ let terminator = self.terminator.clone();
+
+ let result = task::spawn_blocking(move || {
+ thread_pool.install(move || {
+ loop {
+ let block_header =
+ BlockHeader::mine_once_unchecked(&block_template, &terminator, &mut thread_rng())?;
+
+ // Ensure the share difficulty target is met.
+ if N::posw().verify(
+ block_header.height(),
+ share_difficulty,
+ &vec![*block_header.to_header_root().unwrap(), *block_header.nonce()],
+ block_header.proof(),
+ ) {
+ let proof_difficulty = block_header.proof().to_proof_difficulty()?;
+ return Ok::<(BlockHeader<N>, u64), anyhow::Error>((block_header, proof_difficulty));
+ }
+ }
+ })
+ })
+ .await;
+
+ self.status.update(State::Ready);
+
+ match result {
+ Ok(Ok((block_header, proof_difficulty))) => {
+ info!(
+ "Prover successfully mined a share for unconfirmed block {} with proof difficulty of {}",
+ block_header.height(),
+ proof_difficulty
+ );
+
+ // Send a `PoolResponse` to the operator.
+ let message = Message::PoolResponse(recipient, Data::Object(block_header));
+ if let Err(error) = self.peers_router.send(PeersRequest::MessageSend(operator_ip, message)).await {
+ warn!("[PoolResponse] {}", error);
+ }
+ }
+ Ok(Err(error)) => trace!("{}", error),
+ Err(error) => trace!("{}", anyhow!("Could not mine next block {}", error)),
+ }
+ }
+ }
+ } else {
+ error!("Missing pool IP address. Please specify a pool IP address in order to run the prover");
+ }
+ } else {
+ error!("Missing prover address. Please specify an Aleo address in order to prove");
+ }
+ }
+ }
+
///
/// Adds the given unconfirmed transaction to the memory pool.
///
@@ -272,4 +335,85 @@ impl<N: Network, E: Environment> Prover<N, E> {
}
}
}
+
+ ///
+ /// Initialize the miner, if the node type is a miner.
+ ///
+ async fn start_miner(tasks: &mut Tasks<JoinHandle<()>>, prover: Arc<Self>, local_ip: SocketAddr) {
+ // Initialize a new instance of the miner.
+ if E::NODE_TYPE == NodeType::Miner && prover.pool.is_none() {
+ if let Some(recipient) = prover.address {
+ // Initialize the prover process.
+ let prover = prover.clone();
+ let tasks_clone = tasks.clone();
+ let (router, handler) = oneshot::channel();
+ tasks.append(task::spawn(async move {
+ // Notify the outer function that the task is ready.
+ let _ = router.send(());
+ loop {
+ // Prepare the status and terminator.
+ let status = prover.status.clone();
+ let terminator = prover.terminator.clone();
+
+ // If `terminator` is `false` and the status is not `Peering` or `Mining` already, mine the next block.
+ if !terminator.load(Ordering::SeqCst) && !status.is_peering() && !status.is_mining() {
+ // Set the status to `Mining`.
+ status.update(State::Mining);
+
+ // Prepare the unconfirmed transactions and dependent objects.
+ let state = prover.state.clone();
+ let thread_pool = prover.thread_pool.clone();
+ let canon = prover.ledger_reader.clone(); // This is *safe* as the ledger only reads.
+ let unconfirmed_transactions = prover.memory_pool.read().await.transactions();
+ let ledger_router = prover.ledger_router.clone();
+ let prover_router = prover.prover_router.clone();
+
+ tasks_clone.append(task::spawn(async move {
+ // Mine the next block.
+ let result = task::spawn_blocking(move || {
+ thread_pool.install(move || {
+ canon.mine_next_block(
+ recipient,
+ E::COINBASE_IS_PUBLIC,
+ &unconfirmed_transactions,
+ &terminator,
+ &mut thread_rng(),
+ )
+ })
+ })
+ .await
+ .map_err(|e| e.into());
+
+ // Set the status to `Ready`.
+ status.update(State::Ready);
+
+ match result {
+ Ok(Ok((block, coinbase_record))) => {
+ debug!("Miner has found unconfirmed block {} ({})", block.height(), block.hash());
+ // Store the coinbase record.
+ if let Err(error) = state.add_coinbase_record(block.height(), coinbase_record) {
+ warn!("[Miner] Failed to store coinbase record - {}", error);
+ }
+
+ // Broadcast the next block.
+ let request = LedgerRequest::UnconfirmedBlock(local_ip, block, prover_router.clone());
+ if let Err(error) = ledger_router.send(request).await {
+ warn!("Failed to broadcast mined block - {}", error);
+ }
+ }
+ Ok(Err(error)) | Err(error) => trace!("{}", error),
+ }
+ }));
+ }
+ // Proceed to sleep for a preset amount of time.
+ tokio::time::sleep(MINER_HEARTBEAT_IN_SECONDS).await;
+ }
+ }));
+ // Wait until the miner task is ready.
+ let _ = handler.await;
+ } else {
+ error!("Missing miner address. Please specify an Aleo address in order to mine");
+ }
+ }
+ }
}
diff --git a/src/network/server.rs b/src/network/server.rs
index 10c369ec8c..400dc18fc0 100644
--- a/src/network/server.rs
+++ b/src/network/server.rs
@@ -16,14 +16,14 @@
use crate::{
display::notification_message,
+ environment::{Environment, NodeType},
helpers::{State, Status, Tasks},
ledger::{Ledger, LedgerRequest, LedgerRouter},
+ operator::{Operator, OperatorRouter},
peers::{Peers, PeersRequest, PeersRouter},
prover::{Prover, ProverRouter},
rpc::initialize_rpc_server,
- Environment,
Node,
- NodeType,
};
use snarkos_storage::{storage::rocksdb::RocksDB, LedgerState};
use snarkvm::prelude::*;
@@ -55,6 +55,8 @@ pub struct Server<N: Network, E: Environment> {
peers: Arc<Peers<N, E>>,
/// The ledger of the node.
ledger: Arc<Ledger<N, E>>,
+ /// The operator of the node.
+ operator: Arc<Operator<N, E>>,
/// The prover of the node.
prover: Arc<Prover<N, E>>,
/// The list of tasks spawned by the node.
@@ -66,7 +68,12 @@ impl<N: Network, E: Environment> Server<N, E> {
/// Starts the connection listener for peers.
///
#[inline]
- pub async fn initialize(node: &Node, miner: Option<Address<N>>, mut tasks: Tasks<task::JoinHandle<()>>) -> Result<Self> {
+ pub async fn initialize(
+ node: &Node,
+ address: Option<Address<N>>,
+ pool_ip: Option<SocketAddr>,
+ mut tasks: Tasks<task::JoinHandle<()>>,
+ ) -> Result<Self> {
// Initialize a new TCP listener at the given IP.
let (local_ip, listener) = match TcpListener::bind(node.node).await {
Ok(listener) => (listener.local_addr().expect("Failed to fetch the local IP"), listener),
@@ -75,8 +82,11 @@ impl<N: Network, E: Environment> Server<N, E> {
// Initialize the ledger storage path.
let ledger_storage_path = node.ledger_storage_path(local_ip);
+ // Initialize the operator storage path.
+ let operator_storage_path = node.operator_storage_path(local_ip);
// Initialize the prover storage path.
let prover_storage_path = node.prover_storage_path(local_ip);
+
// Initialize the status indicator.
let status = Status::new();
// Initialize the terminator bit.
@@ -90,8 +100,9 @@ impl<N: Network, E: Environment> Server<N, E> {
let prover = Prover::open::<RocksDB, _>(
&mut tasks,
&prover_storage_path,
- miner,
+ address,
local_ip,
+ pool_ip,
&status,
&terminator,
peers.router(),
@@ -99,6 +110,40 @@ impl<N: Network, E: Environment> Server<N, E> {
ledger.router(),
)
.await?;
+ // Initialize a new instance for managing the operator.
+ let operator = Operator::open::<RocksDB, _>(
+ &mut tasks,
+ &operator_storage_path,
+ address,
+ local_ip,
+ prover.memory_pool(),
+ peers.router(),
+ ledger.reader(),
+ ledger.router(),
+ prover.router(),
+ )
+ .await?;
+
+ // TODO (howardwu): This is a hack for the prover.
+ // Check that the prover is connected to the pool before sending a PoolRegister message.
+ if let Some(pool_ip) = pool_ip {
+ // Initialize the connection process.
+ let (router, handler) = oneshot::channel();
+ // Route a `Connect` request to the pool.
+ peers
+ .router()
+ .send(PeersRequest::Connect(
+ pool_ip,
+ ledger.reader(),
+ ledger.router(),
+ operator.router(),
+ prover.router(),
+ router,
+ ))
+ .await?;
+ // Wait until the connection task is initialized.
+ let _ = handler.await;
+ }
// Initialize the connection listener for new peers.
Self::initialize_listener(
@@ -109,11 +154,20 @@ impl<N: Network, E: Environment> Server<N, E> {
peers.clone(),
ledger.reader(),
ledger.router(),
+ operator.router(),
prover.router(),
)
.await;
// Initialize a new instance of the heartbeat.
- Self::initialize_heartbeat(&mut tasks, peers.router(), ledger.reader(), ledger.router(), prover.router()).await;
+ Self::initialize_heartbeat(
+ &mut tasks,
+ peers.router(),
+ ledger.reader(),
+ ledger.router(),
+ operator.router(),
+ prover.router(),
+ )
+ .await;
// Initialize a new instance of the RPC server.
Self::initialize_rpc(
&mut tasks,
@@ -126,13 +180,14 @@ impl<N: Network, E: Environment> Server<N, E> {
)
.await;
// Initialize a new instance of the notification.
- Self::initialize_notification(&mut tasks, ledger.reader(), prover.clone(), miner).await;
+ Self::initialize_notification(&mut tasks, ledger.reader(), prover.clone(), address).await;
Ok(Self {
local_ip,
status,
peers,
ledger,
+ operator,
prover,
tasks,
})
@@ -168,6 +223,7 @@ impl<N: Network, E: Environment> Server<N, E> {
peer_ip,
self.ledger.reader(),
self.ledger.router(),
+ self.operator.router(),
self.prover.router(),
router,
))
@@ -214,6 +270,7 @@ impl<N: Network, E: Environment> Server<N, E> {
peers: Arc<Peers<N, E>>,
ledger_reader: LedgerReader<N>,
ledger_router: LedgerRouter<N>,
+ operator_router: OperatorRouter<N>,
prover_router: ProverRouter<N>,
) {
// Initialize the listener process.
@@ -234,6 +291,7 @@ impl<N: Network, E: Environment> Server<N, E> {
peer_ip,
ledger_reader.clone(),
ledger_router.clone(),
+ operator_router.clone(),
prover_router.clone(),
);
if let Err(error) = peers_router.send(request).await {
@@ -263,6 +321,7 @@ impl<N: Network, E: Environment> Server<N, E> {
peers_router: PeersRouter<N, E>,
ledger_reader: LedgerReader<N>,
ledger_router: LedgerRouter<N>,
+ operator_router: OperatorRouter<N>,
prover_router: ProverRouter<N>,
) {
// Initialize the heartbeat process.
@@ -276,7 +335,12 @@ impl<N: Network, E: Environment> Server<N, E> {
error!("Failed to send heartbeat to ledger: {}", error)
}
// Transmit a heartbeat request to the peers.
- let request = PeersRequest::Heartbeat(ledger_reader.clone(), ledger_router.clone(), prover_router.clone());
+ let request = PeersRequest::Heartbeat(
+ ledger_reader.clone(),
+ ledger_router.clone(),
+ operator_router.clone(),
+ prover_router.clone(),
+ );
if let Err(error) = peers_router.send(request).await {
error!("Failed to send heartbeat to peers: {}", error)
}
@@ -327,7 +391,7 @@ impl<N: Network, E: Environment> Server<N, E> {
tasks: &mut Tasks<task::JoinHandle<()>>,
ledger: LedgerReader<N>,
prover: Arc<Prover<N, E>>,
- miner: Option<Address<N>>,
+ address: Option<Address<N>>,
) {
// Initialize the heartbeat process.
let (router, handler) = oneshot::channel();
@@ -335,10 +399,10 @@ impl<N: Network, E: Environment> Server<N, E> {
// Notify the outer function that the task is ready.
let _ = router.send(());
loop {
- info!("{}", notification_message(miner));
+ info!("{}", notification_message(address));
if E::NODE_TYPE == NodeType::Miner {
- if let Some(miner) = miner {
+ if let Some(miner_address) = address {
// Retrieve the latest block height.
let latest_block_height = ledger.latest_block_height();
@@ -351,7 +415,7 @@ impl<N: Network, E: Environment> Server<N, E> {
// Filter the coinbase records by determining if they exist on the canonical chain.
if let Ok(true) = ledger.contains_commitment(&record.commitment()) {
// Ensure the record owner matches.
- if record.owner() == miner {
+ if record.owner() == miner_address {
// Add the block to the appropriate list.
match block_height + 2048 < latest_block_height {
true => confirmed.push((block_height, record)),
@@ -365,7 +429,7 @@ impl<N: Network, E: Environment> Server<N, E> {
"Mining Report (confirmed_blocks = {}, pending_blocks = {}, miner_address = {})",
confirmed.len(),
pending.len(),
- miner
+ miner_address
);
}
}
diff --git a/src/node.rs b/src/node.rs
index 43c2c3f2a4..e360d014a2 100644
--- a/src/node.rs
+++ b/src/node.rs
@@ -15,16 +15,10 @@
// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
use crate::{
+ environment::{Client, ClientTrial, Environment, Miner, MinerTrial, NodeType, Operator, OperatorTrial, Prover, ProverTrial, SyncNode},
helpers::{Tasks, Updater},
network::Server,
- Client,
- ClientTrial,
Display,
- Environment,
- Miner,
- MinerTrial,
- NodeType,
- SyncNode,
};
use snarkos_storage::storage::rocksdb::RocksDB;
use snarkvm::dpc::{prelude::*, testnet2::Testnet2};
@@ -46,6 +40,15 @@ pub struct Node {
/// Specify this as a mining node, with the given miner address.
#[structopt(long = "miner")]
pub miner: Option<String>,
+ /// Specify this as an operating node, with the given operator address.
+ #[structopt(long = "operator")]
+ pub operator: Option<String>,
+ /// Specify this as a prover node, with the given prover address.
+ #[structopt(long = "prover")]
+ pub prover: Option<String>,
+ /// Specify the pool that a prover node is contributing to.
+ #[structopt(long = "pool")]
+ pub pool: Option<SocketAddr>,
/// Specify the network of this node.
#[structopt(default_value = "2", long = "network")]
pub network: u16,
@@ -91,12 +94,16 @@ impl Node {
println!("{}", command.parse()?);
Ok(())
}
- None => match (self.network, self.miner.is_some(), self.trial, self.sync) {
- (2, _, _, true) => self.start_server::<Testnet2, SyncNode<Testnet2>>().await,
- (2, true, false, false) => self.start_server::<Testnet2, Miner<Testnet2>>().await,
- (2, false, false, false) => self.start_server::<Testnet2, Client<Testnet2>>().await,
- (2, true, true, false) => self.start_server::<Testnet2, MinerTrial<Testnet2>>().await,
- (2, false, true, false) => self.start_server::<Testnet2, ClientTrial<Testnet2>>().await,
+ None => match (self.network, &self.miner, &self.operator, &self.prover, self.trial, self.sync) {
+ (2, None, None, None, false, false) => self.start_server::<Testnet2, Client<Testnet2>>(&None).await,
+ (2, Some(_), None, None, false, false) => self.start_server::<Testnet2, Miner<Testnet2>>(&self.miner).await,
+ (2, None, Some(_), None, false, false) => self.start_server::<Testnet2, Operator<Testnet2>>(&self.operator).await,
+ (2, None, None, Some(_), false, false) => self.start_server::<Testnet2, Prover<Testnet2>>(&self.prover).await,
+ (2, None, None, None, true, false) => self.start_server::<Testnet2, ClientTrial<Testnet2>>(&None).await,
+ (2, Some(_), None, None, true, false) => self.start_server::<Testnet2, MinerTrial<Testnet2>>(&self.miner).await,
+ (2, None, Some(_), None, true, false) => self.start_server::<Testnet2, OperatorTrial<Testnet2>>(&self.operator).await,
+ (2, None, None, Some(_), true, false) => self.start_server::<Testnet2, ProverTrial<Testnet2>>(&self.prover).await,
+ (2, None, None, None, _, true) => self.start_server::<Testnet2, SyncNode<Testnet2>>(&None).await,
_ => panic!("Unsupported node configuration"),
},
}
@@ -115,6 +122,19 @@ impl Node {
}
}
+ /// Returns the storage path of the operator.
+ pub(crate) fn operator_storage_path(&self, _local_ip: SocketAddr) -> PathBuf {
+ cfg_if::cfg_if! {
+ if #[cfg(feature = "test")] {
+ // Tests may use any available ports, and removes the storage artifacts afterwards,
+ // so that there is no need to adhere to a specific number assignment logic.
+ PathBuf::from(format!("/tmp/snarkos-test-operator-{}", _local_ip.port()))
+ } else {
+ aleo_std::aleo_operator_dir(self.network, self.dev)
+ }
+ }
+ }
+
/// Returns the storage path of the prover.
pub(crate) fn prover_storage_path(&self, _local_ip: SocketAddr) -> PathBuf {
cfg_if::cfg_if! {
@@ -128,29 +148,26 @@ impl Node {
}
}
- async fn start_server<N: Network, E: Environment>(&self) -> Result<()> {
- let miner = match (E::NODE_TYPE, &self.miner) {
- (NodeType::Miner, Some(address)) => {
- let miner_address = Address::<N>::from_str(address)?;
- println!("{}", crate::display::welcome_message());
- println!("Your Aleo address is {}.\n", miner_address);
- println!("Starting a mining node on {}.", N::NETWORK_NAME);
- println!("{}", crate::display::notification_message::<N>(Some(miner_address)));
- Some(miner_address)
- }
- _ => {
- println!("{}", crate::display::welcome_message());
- println!("Starting a {} node on {}.", E::NODE_TYPE, N::NETWORK_NAME);
- println!("{}", crate::display::notification_message::<N>(None));
- None
+ async fn start_server<N: Network, E: Environment>(&self, address: &Option<String>) -> Result<()> {
+ println!("{}", crate::display::welcome_message());
+
+ let address = match (E::NODE_TYPE, address) {
+ (NodeType::Miner, Some(address)) | (NodeType::Operator, Some(address)) | (NodeType::Prover, Some(address)) => {
+ let address = Address::<N>::from_str(address)?;
+ println!("Your Aleo address is {}.\n", address);
+ Some(address)
}
+ _ => None,
};
+ println!("Starting {} on {}.", E::NODE_TYPE.description(), N::NETWORK_NAME);
+ println!("{}", crate::display::notification_message::<N>(address));
+
// Initialize the tasks handler.
let tasks = Tasks::new();
// Initialize the node's server.
- let server = Server::<N, E>::initialize(self, miner, tasks.clone()).await?;
+ let server = Server::<N, E>::initialize(self, address, self.pool, tasks.clone()).await?;
// Initialize signal handling; it also maintains ownership of the Server
// in order for it to not go out of scope.
@@ -353,7 +370,7 @@ impl Experimental {
#[derive(StructOpt, Debug)]
pub enum ExperimentalCommands {
- #[structopt(name = "new_account", about = "Generate a new Aleo Account.")]
+ #[structopt(name = "new_account", about = "Generate a new Aleo account.")]
NewAccount(NewAccount),
}
diff --git a/src/rpc/rpc.rs b/src/rpc/rpc.rs
index 436500fd3f..df87e967a0 100644
--- a/src/rpc/rpc.rs
+++ b/src/rpc/rpc.rs
@@ -424,7 +424,7 @@ fn result_to_response<T: Serialize>(
#[cfg(test)]
mod tests {
use super::*;
- use crate::{helpers::State, ledger::Ledger, Client, Prover};
+ use crate::{environment::Client, helpers::State, ledger::Ledger, network::Prover};
use crate::helpers::Tasks;
use snarkos_storage::{
@@ -501,6 +501,7 @@ mod tests {
&prover_path,
None,
local_ip,
+ Some(local_ip),
&status,
&terminator,
peers.router(),
@@ -547,6 +548,7 @@ mod tests {
&prover_path,
None,
local_ip,
+ Some(local_ip),
&status,
&terminator,
peers.router(),
diff --git a/storage/Cargo.toml b/storage/Cargo.toml
index b50105575f..71dbb0c14e 100644
--- a/storage/Cargo.toml
+++ b/storage/Cargo.toml
@@ -17,7 +17,7 @@ license = "GPL-3.0"
edition = "2018"
[dependencies]
-snarkvm = { git = "https://github.com/AleoHQ/snarkVM.git", rev = "02c814c" }
+snarkvm = { git = "https://github.com/AleoHQ/snarkVM.git", rev = "b70bf72" }
#snarkvm = { path = "../../snarkVM" }
[dependencies.anyhow]
diff --git a/storage/src/helpers/block_locators.rs b/storage/src/helpers/block_locators.rs
index 80530843c9..fe8206b261 100644
--- a/storage/src/helpers/block_locators.rs
+++ b/storage/src/helpers/block_locators.rs
@@ -193,7 +193,7 @@ mod tests {
// Serialize
let expected_string = expected_block_locators.to_string();
let candidate_string = serde_json::to_string(&expected_block_locators).unwrap();
- assert_eq!(1692, candidate_string.len(), "Update me if serialization has changed");
+ assert_eq!(1703, candidate_string.len(), "Update me if serialization has changed");
assert_eq!(expected_string, candidate_string);
// Deserialize
diff --git a/storage/src/lib.rs b/storage/src/lib.rs
index 41e5e800ba..ee6cecbdba 100644
--- a/storage/src/lib.rs
+++ b/storage/src/lib.rs
@@ -24,6 +24,7 @@ pub(crate) mod state;
pub use state::{
LedgerState,
Metadata,
+ OperatorState,
ProverState,
MAXIMUM_BLOCK_LOCATORS,
MAXIMUM_LINEAR_BLOCK_LOCATORS,
diff --git a/storage/src/state/ledger.rs b/storage/src/state/ledger.rs
index 50f5c61654..eb12ba837f 100644
--- a/storage/src/state/ledger.rs
+++ b/storage/src/state/ledger.rs
@@ -155,6 +155,7 @@ impl<N: Network> LedgerState<N> {
{
let revert_block_height = snarkvm::dpc::testnet2::V12_UPGRADE_BLOCK_HEIGHT.saturating_sub(1);
warn!("Ledger is not V12-compliant, reverting to block {}", revert_block_height);
+ warn!("{:?}", ledger.get_block(latest_block_height));
latest_block_height = ledger.clear_incompatible_blocks(latest_block_height, revert_block_height)?;
info!("Ledger successfully transitioned and is now V12-compliant");
}
@@ -600,17 +601,20 @@ impl<N: Network> LedgerState<N> {
is_public: bool,
transactions: &[Transaction<N>],
rng: &mut R,
- ) -> Result<(BlockTemplate<N>, Record<N>)> {
- // Prepare the new block.
- let previous_block_hash = self.latest_block_hash();
- let block_height = self.latest_block_height().saturating_add(1);
+ ) -> Result<BlockTemplate<N>> {
+ // Fetch the latest state of the ledger.
+ let latest_block = self.latest_block();
+ let previous_ledger_root = self.latest_ledger_root();
+ // Prepare the new block.
+ let previous_block_hash = latest_block.hash();
+ let block_height = latest_block.height().saturating_add(1);
// Ensure that the new timestamp is ahead of the previous timestamp.
- let block_timestamp = std::cmp::max(chrono::Utc::now().timestamp(), self.latest_block_timestamp().saturating_add(1));
+ let block_timestamp = std::cmp::max(chrono::Utc::now().timestamp(), latest_block.timestamp().saturating_add(1));
// Compute the block difficulty target.
let difficulty_target = if N::NETWORK_ID == 2 && block_height <= snarkvm::dpc::testnet2::V12_UPGRADE_BLOCK_HEIGHT {
- Blocks::<N>::compute_difficulty_target(self.latest_block().header(), block_timestamp, block_height)
+ Blocks::<N>::compute_difficulty_target(latest_block.header(), block_timestamp, block_height)
} else if N::NETWORK_ID == 2 {
let anchor_block_header = self.get_block_header(snarkvm::dpc::testnet2::V12_UPGRADE_BLOCK_HEIGHT)?;
Blocks::<N>::compute_difficulty_target(&anchor_block_header, block_timestamp, block_height)
@@ -619,24 +623,22 @@ impl<N: Network> LedgerState<N> {
};
// Compute the cumulative weight.
- let cumulative_weight = self
- .latest_cumulative_weight()
+ let cumulative_weight = latest_block
+ .cumulative_weight()
.saturating_add((u64::MAX / difficulty_target) as u128);
- // Construct the ledger root.
- let ledger_root = self.latest_ledger_root();
- // Craft a coinbase transaction.
- let amount = Block::<N>::block_reward(block_height);
- let (coinbase_transaction, coinbase_record) = Transaction::<N>::new_coinbase(recipient, amount, is_public, rng)?;
+ // Compute the coinbase reward (not including the transaction fees).
+ let mut coinbase_reward = Block::<N>::block_reward(block_height);
+ let mut transaction_fees = AleoAmount::ZERO;
+
// Filter the transactions to ensure they are new, and append the coinbase transaction.
- // TODO (howardwu): Improve the performance and design of this.
let mut transactions: Vec<Transaction<N>> = transactions
.iter()
.filter(|transaction| {
for serial_number in transaction.serial_numbers() {
if let Ok(true) = self.contains_serial_number(serial_number) {
trace!(
- "Miner is filtering out transaction {} (serial_number {})",
+ "Ledger is filtering out transaction {} (serial_number {})",
transaction.transaction_id(),
serial_number
);
@@ -646,34 +648,41 @@ impl<N: Network> LedgerState<N> {
for commitment in transaction.commitments() {
if let Ok(true) = self.contains_commitment(commitment) {
trace!(
- "Miner is filtering out transaction {} (commitment {})",
+ "Ledger is filtering out transaction {} (commitment {})",
transaction.transaction_id(),
commitment
);
return false;
}
}
- trace!("Miner is adding transaction {}", transaction.transaction_id());
+ trace!("Adding transaction {} to block template", transaction.transaction_id());
+ transaction_fees = transaction_fees.add(transaction.value_balance());
true
})
.cloned()
.collect();
+
+ // Calculate the final coinbase reward (including the transaction fees).
+ coinbase_reward = coinbase_reward.add(transaction_fees);
+
+ // Craft a coinbase transaction, and append it to the list of transactions.
+ let (coinbase_transaction, coinbase_record) = Transaction::<N>::new_coinbase(recipient, coinbase_reward, is_public, rng)?;
transactions.push(coinbase_transaction);
+
// Construct the new block transactions.
let transactions = Transactions::from(&transactions)?;
// Construct the block template.
- let template = BlockTemplate::new(
+ Ok(BlockTemplate::new(
previous_block_hash,
block_height,
block_timestamp,
difficulty_target,
cumulative_weight,
- ledger_root,
+ previous_ledger_root,
transactions,
- );
-
- Ok((template, coinbase_record))
+ coinbase_record,
+ ))
}
/// Mines a new block using the latest state of the given ledger.
@@ -685,10 +694,11 @@ impl<N: Network> LedgerState<N> {
terminator: &AtomicBool,
rng: &mut R,
) -> Result<(Block<N>, Record<N>)> {
- let (template, coinbase_record) = self.get_block_template(recipient, is_public, transactions, rng)?;
+ let template = self.get_block_template(recipient, is_public, transactions, rng)?;
+ let coinbase_record = template.coinbase_record().clone();
// Mine the next block.
- match Block::mine(template, terminator, rng) {
+ match Block::mine(&template, terminator, rng) {
Ok(block) => Ok((block, coinbase_record)),
Err(error) => Err(anyhow!("Unable to mine the next block: {}", error)),
}
@@ -1189,6 +1199,8 @@ impl<N: Network> LedgerState<N> {
// Decrement the current block height, and update the current block.
current_block_height = current_block_height.saturating_sub(1);
+
+ trace!("Ledger successfully reverted to block {}", current_block_height);
}
Ok(current_block_height)
}
diff --git a/storage/src/state/mod.rs b/storage/src/state/mod.rs
index 177bfa10ea..5bbc304b9d 100644
--- a/storage/src/state/mod.rs
+++ b/storage/src/state/mod.rs
@@ -17,6 +17,9 @@
pub(crate) mod ledger;
pub use ledger::{LedgerState, Metadata, MAXIMUM_BLOCK_LOCATORS, MAXIMUM_LINEAR_BLOCK_LOCATORS, MAXIMUM_QUADRATIC_BLOCK_LOCATORS};
+pub(crate) mod operator;
+pub use operator::OperatorState;
+
pub(crate) mod prover;
pub use prover::ProverState;
diff --git a/storage/src/state/operator.rs b/storage/src/state/operator.rs
new file mode 100644
index 0000000000..c1ba0478c3
--- /dev/null
+++ b/storage/src/state/operator.rs
@@ -0,0 +1,151 @@
+// Copyright (C) 2019-2021 Aleo Systems Inc.
+// This file is part of the snarkOS library.
+
+// The snarkOS library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// The snarkOS library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
+
+use crate::{
+ state::LedgerState,
+ storage::{DataMap, Map, MapId, Storage},
+};
+use snarkvm::dpc::prelude::*;
+
+use anyhow::{anyhow, Result};
+use std::{collections::HashMap, path::Path, sync::Arc};
+
+#[derive(Debug)]
+pub struct OperatorState<N: Network> {
+ shares: SharesState<N>,
+}
+
+impl<N: Network> OperatorState<N> {
+ ///
+ /// Opens a new writable instance of `OperatorState` from the given storage path.
+ ///
+ pub fn open_writer<S: Storage, P: AsRef<Path>>(path: P) -> Result<Self> {
+ // Open storage.
+ let context = N::NETWORK_ID;
+ let is_read_only = false;
+ let storage = S::open(path, context, is_read_only)?;
+
+ // Initialize the operator.
+ let operator = Self {
+ shares: SharesState::open(storage)?,
+ };
+
+ info!("Operator successfully initialized");
+ Ok(operator)
+ }
+
+ /// Returns all the shares in storage.
+ pub fn to_shares(&self) -> Vec<((u32, Record<N>), HashMap<Address<N>, u64>)> {
+ self.shares.to_shares()
+ }
+
+ /// Returns all coinbase records in storage.
+ pub fn to_coinbase_records(&self) -> Vec<(u32, Record<N>)> {
+ self.shares.to_records()
+ }
+
+ /// Returns the shares for a specific block, given the block height and coinbase record.
+ pub fn get_shares_for_block(&self, block_height: u32, coinbase_record: Record<N>) -> Result<HashMap<Address<N>, u64>> {
+ self.shares.get_shares_for_block(block_height, coinbase_record)
+ }
+
+ /// Returns the shares for a specific prover, given a ledger and the prover address.
+ pub fn get_shares_for_prover(&self, ledger: &Arc<LedgerState<N>>, prover: &Address<N>) -> u64 {
+ self.shares.get_shares_for_prover(ledger, prover)
+ }
+
+ /// Increments the share count by one for a given block height, coinbase record and prover address.
+ pub fn increment_share(&self, block_height: u32, coinbase_record: Record<N>, prover: &Address<N>) -> Result<()> {
+ self.shares.increment_share(block_height, coinbase_record, prover)
+ }
+
+ /// Removes the shares for a given block height and coinbase record in storage.
+ pub fn remove_shares(&self, block_height: u32, coinbase_record: Record<N>) -> Result<()> {
+ self.shares.remove_shares(block_height, coinbase_record)
+ }
+}
+
+#[derive(Clone, Debug)]
+#[allow(clippy::type_complexity)]
+struct SharesState<N: Network> {
+ /// The miner shares for each block.
+ shares: DataMap<(u32, Record<N>), HashMap<Address<N>, u64>>,
+}
+
+impl<N: Network> SharesState<N> {
+ /// Initializes a new instance of `SharesState`.
+ fn open<S: Storage>(storage: S) -> Result<Self> {
+ Ok(Self {
+ shares: storage.open_map(MapId::Shares)?,
+ })
+ }
+
+ /// Returns all shares in storage.
+ fn to_shares(&self) -> Vec<((u32, Record<N>), HashMap<Address<N>, u64>)> {
+ self.shares.iter().collect()
+ }
+
+ /// Returns all records in storage.
+ fn to_records(&self) -> Vec<(u32, Record<N>)> {
+ self.shares.keys().collect()
+ }
+
+ /// Returns the shares for a specific block, given the block height and coinbase record.
+ fn get_shares_for_block(&self, block_height: u32, coinbase_record: Record<N>) -> Result<HashMap<Address<N>, u64>> {
+ match self.shares.get(&(block_height, coinbase_record))? {
+ Some(shares) => Ok(shares),
+ None => return Err(anyhow!("Block {} does not exist in shares storage", block_height)),
+ }
+ }
+
+ /// Returns the shares for a specific prover, given a ledger and the prover address.
+ fn get_shares_for_prover(&self, ledger: &Arc<LedgerState<N>>, prover: &Address<N>) -> u64 {
+ self.shares
+ .iter()
+ .filter_map(|((_, coinbase_record), shares)| {
+ if !shares.contains_key(prover) {
+ None
+ } else {
+ match ledger.contains_commitment(&coinbase_record.commitment()) {
+ Ok(true) => shares.get(prover).copied(),
+ Ok(false) | Err(_) => None,
+ }
+ }
+ })
+ .sum()
+ }
+
+ /// Increments the share count by one for a given block height, coinbase record, and prover address.
+ fn increment_share(&self, block_height: u32, coinbase_record: Record<N>, prover: &Address<N>) -> Result<()> {
+ // Retrieve the current shares for a given block height.
+ let mut shares = match self.shares.get(&(block_height, coinbase_record.clone()))? {
+ Some(shares) => shares,
+ None => HashMap::new(),
+ };
+
+ // Increment the share count for the given address.
+ let entry = shares.entry(*prover).or_insert(0);
+ *entry = entry.saturating_add(1);
+
+ // Insert the updated shares for the given block height.
+ self.shares.insert(&(block_height, coinbase_record), &shares)
+ }
+
+ /// Removes all of the shares for a given block height and coinbase record.
+ fn remove_shares(&self, block_height: u32, coinbase_record: Record<N>) -> Result<()> {
+ self.shares.remove(&(block_height, coinbase_record))
+ }
+}
diff --git a/storage/src/storage/rocksdb/map.rs b/storage/src/storage/rocksdb/map.rs
index e05ea63240..1480bf1679 100644
--- a/storage/src/storage/rocksdb/map.rs
+++ b/storage/src/storage/rocksdb/map.rs
@@ -27,6 +27,7 @@ pub enum MapId {
SerialNumbers,
Transactions,
Transitions,
+ Shares,
#[cfg(test)]
Test,
}
@@ -43,6 +44,7 @@ impl MapId {
Self::SerialNumbers => b"serial_numbers",
Self::Transactions => b"transactions",
Self::Transitions => b"transitions",
+ Self::Shares => b"shares",
#[cfg(test)]
Self::Test => b"hello world",
}
|
diff --git a/testing/Cargo.toml b/testing/Cargo.toml
index 67b09a297a..3440cd3652 100644
--- a/testing/Cargo.toml
+++ b/testing/Cargo.toml
@@ -25,7 +25,7 @@ path = "../storage"
[dependencies.snarkvm]
git = "https://github.com/AleoHQ/snarkVM.git"
-rev = "02c814c"
+rev = "b70bf72"
#path = "../../snarkVM"
[dependencies.anyhow]
diff --git a/testing/src/client_node.rs b/testing/src/client_node.rs
index 2af3608f29..b467932537 100644
--- a/testing/src/client_node.rs
+++ b/testing/src/client_node.rs
@@ -58,7 +58,7 @@ impl ClientNode {
let permanent_args = &["snarkos", "--norpc"];
let combined_args = permanent_args.iter().chain(extra_args.iter());
let config = snarkos::Node::from_iter(combined_args);
- let server = Server::<Testnet2, Client<Testnet2>>::initialize(&config, None, Tasks::new())
+ let server = Server::<Testnet2, Client<Testnet2>>::initialize(&config, None, None, Tasks::new())
.await
.unwrap();
|
[Bug][Pool S/W] Failed to initialize a block from given inputs due to Invalid block header
## 🐛 Bug Report
<!--
What's the bug in snarkOS that you found?
How serious is this bug and what is affected?
To report a security issue in snarkOS, please email security@aleo.org.
-->
I was testing the pool s/w in feat/mining-pool branch. [mining-pool latest codebase](https://github.com/AleoHQ/snarkOS/commit/eb4fd9133b63323ba90efd1be78f32452f8714cc)
It seems that Prover cannot found the block due to Invalid block header.
I'm not sure it was caused by lack of hash power on prover, but I cannot found to send a PoolResponse to the operator at all.
(FYI, I added some log to tracking block height, difficulty and nonce.)
```
2021-12-22T07:10:18.269644Z TRACE Received 'PoolRequest' from 95.214.55.117:4132
2021-12-22T07:10:18.341725Z DEBUG prover.process_pool_request()
2021-12-22 07:10:18.341817883 UTC --> header.mine_once_unchecked() height 71200, target 660876088681118, nonce hn1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq5dj2mk
2021-12-22 07:10:20.887372256 UTC posw.mine_once_unchecked(), height 71200, block_header.difficulty_target 660876088681118, nonce hn1xsnrwyqmc5l976maevuhlv4ae0tjtyd39q8d5af8kyg4rdptn5zqg7p4mf
2021-12-22 07:10:20.887610660 UTC <-- header.mine_once_unchecked() height 71200, target 660876088681118, nonce hn1xsnrwyqmc5l976maevuhlv4ae0tjtyd39q8d5af8kyg4rdptn5zqg7p4mf
2021-12-22T07:10:20.925679Z DEBUG start new block, share_difficulty 9223372036854775807
2021-12-22T07:10:20.925693Z DEBUG block_header.difficulty_target : 660876088681118
2021-12-22T07:10:20.925695Z DEBUG block_header.nonce : hn1xsnrwyqmc5l976maevuhlv4ae0tjtyd39q8d5af8kyg4rdptn5zqg7p4mf
2021-12-22 07:10:20.925699932 UTC block.new(), Initialize a new block
2021-12-22 07:10:20.926271 UTC block.from() Construct the block. header.difficulty_target 660876088681118, height 71200
2021-12-22 07:10:20.938687287 UTC PoSW difficulty target is not met. Expected 660876088681118, found 730151207245470603
2021-12-22 07:10:20.938721456 UTC Invalid block header
2021-12-22T07:10:20.938821Z TRACE cannot found block Failed to initialize a block from given inputs
```
## Steps to Reproduce
<!--
How do I reproduce this issue in snarkOS?
Is there a code snippet I can use to reproduce the issue?
Are there error messages or stack traces that would help debug this issue?
-->
1. Step 1
pull snarkOS testnet2 branch
checkout to feat/mining-pool
build snarkOS
2. Step 2
run pool operator and 2 prover nodes.
3. Step 3
check the log
## Expected Behavior
<!--
What was supposed to happen in snarkOS?
What happened instead?
-->
Prover find unconfirmed block and send a PoolResponse to the operator.
## Your Environment
- snarkOS : [mining-pool latest codebase](https://github.com/AleoHQ/snarkOS/commit/eb4fd9133b63323ba90efd1be78f32452f8714cc) <!-- snarkOS Version -->
- Rust version : rustc 1.57.0 (f1edd0429 2021-11-29) <!-- Rust Version -->
- OS : Ubuntu 20.04 <!-- Computer OS -->
|
2021-12-15T07:31:01Z
|
2.0
|
|
AleoNet/snarkOS
| 1,284
|
AleoNet__snarkOS-1284
|
[
"1266"
] |
90d33030e0f6913bada2ee1577ada43194d2986c
|
diff --git a/ledger/src/helpers/block_locators.rs b/ledger/src/helpers/block_locators.rs
index 6e6c8fd5ff..c551aa8ca2 100644
--- a/ledger/src/helpers/block_locators.rs
+++ b/ledger/src/helpers/block_locators.rs
@@ -29,7 +29,7 @@ use snarkvm::{
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use serde::{de, ser, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer};
-use std::ops::Deref;
+use std::{collections::BTreeMap, ops::Deref};
///
/// A helper struct to represent block locators from the ledger.
@@ -38,12 +38,12 @@ use std::ops::Deref;
///
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct BlockLocators<N: Network> {
- block_locators: Vec<(u32, N::BlockHash, Option<BlockHeader<N>>)>,
+ block_locators: BTreeMap<u32, (N::BlockHash, Option<BlockHeader<N>>)>,
}
impl<N: Network> BlockLocators<N> {
#[inline]
- pub fn from(block_locators: Vec<(u32, N::BlockHash, Option<BlockHeader<N>>)>) -> Self {
+ pub fn from(block_locators: BTreeMap<u32, (N::BlockHash, Option<BlockHeader<N>>)>) -> Self {
Self { block_locators }
}
@@ -56,6 +56,11 @@ impl<N: Network> BlockLocators<N> {
pub fn len(&self) -> usize {
self.block_locators.len()
}
+
+ #[inline]
+ pub fn get_block_hash(&self, block_height: u32) -> Option<N::BlockHash> {
+ self.block_locators.get(&block_height).map(|(block_hash, _)| *block_hash)
+ }
}
impl<N: Network> FromBytes for BlockLocators<N> {
@@ -63,10 +68,10 @@ impl<N: Network> FromBytes for BlockLocators<N> {
fn read_le<R: Read>(mut reader: R) -> IoResult<Self> {
let num_locators: u32 = FromBytes::read_le(&mut reader)?;
- let mut block_locators = Vec::with_capacity(num_locators as usize);
+ let mut block_locators = BTreeMap::new();
let mut block_headers_bytes = Vec::with_capacity(num_locators as usize);
- for index in 0..(num_locators as usize) {
+ for index in 0..num_locators {
let height: u32 = FromBytes::read_le(&mut reader)?;
let hash: N::BlockHash = FromBytes::read_le(&mut reader)?;
let header_exists: bool = FromBytes::read_le(&mut reader)?;
@@ -77,7 +82,7 @@ impl<N: Network> FromBytes for BlockLocators<N> {
block_headers_bytes.push((index, buffer));
};
- block_locators.push((height, hash, None));
+ block_locators.insert(height, (hash, None));
}
let block_headers = block_headers_bytes
@@ -89,7 +94,7 @@ impl<N: Network> FromBytes for BlockLocators<N> {
.collect::<Vec<_>>();
for (index, block_header) in block_headers.into_iter() {
- if let Some((_, _, header)) = block_locators.get_mut(*index) {
+ if let Some((_, header)) = block_locators.get_mut(index) {
*header = Some(block_header);
}
}
@@ -103,7 +108,7 @@ impl<N: Network> ToBytes for BlockLocators<N> {
fn write_le<W: Write>(&self, mut writer: W) -> IoResult<()> {
(self.block_locators.len() as u32).write_le(&mut writer)?;
- for (height, hash, header) in &self.block_locators {
+ for (height, (hash, header)) in &self.block_locators {
height.write_le(&mut writer)?;
hash.write_le(&mut writer)?;
match header {
@@ -150,7 +155,8 @@ impl<'de, N: Network> Deserialize<'de> for BlockLocators<N> {
match deserializer.is_human_readable() {
true => {
let block_locators = serde_json::Value::deserialize(deserializer)?;
- let block_locators: Vec<_> = serde_json::from_value(block_locators["block_locators"].clone()).map_err(de::Error::custom)?;
+ let block_locators: BTreeMap<u32, (N::BlockHash, Option<BlockHeader<N>>)> =
+ serde_json::from_value(block_locators["block_locators"].clone()).map_err(de::Error::custom)?;
Ok(Self::from(block_locators))
}
false => FromBytesDeserializer::<Self>::deserialize_with_size_encoding(deserializer, "block locators"),
@@ -161,12 +167,12 @@ impl<'de, N: Network> Deserialize<'de> for BlockLocators<N> {
impl<N: Network> Default for BlockLocators<N> {
#[inline]
fn default() -> Self {
- Self::from(vec![])
+ Self::from(Default::default())
}
}
impl<N: Network> Deref for BlockLocators<N> {
- type Target = Vec<(u32, N::BlockHash, Option<BlockHeader<N>>)>;
+ type Target = BTreeMap<u32, (N::BlockHash, Option<BlockHeader<N>>)>;
fn deref(&self) -> &Self::Target {
&self.block_locators
@@ -184,12 +190,12 @@ mod tests {
let expected_block_hash = Testnet2::genesis_block().hash();
let expected_block_header = Testnet2::genesis_block().header().clone();
let expected_block_locators =
- BlockLocators::<Testnet2>::from(vec![(expected_block_height, expected_block_hash, Some(expected_block_header))]);
+ BlockLocators::<Testnet2>::from([(expected_block_height, (expected_block_hash, Some(expected_block_header)))].into());
// Serialize
let expected_string = expected_block_locators.to_string();
let candidate_string = serde_json::to_string(&expected_block_locators).unwrap();
- assert_eq!(1682, candidate_string.len(), "Update me if serialization has changed");
+ assert_eq!(1684, candidate_string.len(), "Update me if serialization has changed");
assert_eq!(expected_string, candidate_string);
// Deserialize
@@ -203,7 +209,7 @@ mod tests {
let expected_block_hash = Testnet2::genesis_block().hash();
let expected_block_header = Testnet2::genesis_block().header().clone();
let expected_block_locators =
- BlockLocators::<Testnet2>::from(vec![(expected_block_height, expected_block_hash, Some(expected_block_header))]);
+ BlockLocators::<Testnet2>::from([(expected_block_height, (expected_block_hash, Some(expected_block_header)))].into());
// Serialize
let expected_bytes = expected_block_locators.to_bytes_le().unwrap();
diff --git a/ledger/src/state/ledger.rs b/ledger/src/state/ledger.rs
index 53b4702720..2a1d494276 100644
--- a/ledger/src/state/ledger.rs
+++ b/ledger/src/state/ledger.rs
@@ -27,6 +27,7 @@ use parking_lot::RwLock;
use rand::{CryptoRng, Rng};
use serde::{Deserialize, Serialize};
use std::{
+ collections::BTreeMap,
path::Path,
sync::{
atomic::{AtomicBool, AtomicU32, Ordering},
@@ -390,7 +391,7 @@ impl<N: Network> LedgerState<N> {
let block_locator_headers = block_hashes
.zip_eq(block_headers)
.take(num_block_headers as usize)
- .map(|(hash, header)| (header.height(), hash, Some(header)));
+ .map(|(hash, header)| (header.height(), (hash, Some(header))));
// Decrement the block locator height by the number of block headers.
block_locator_height -= num_block_headers;
@@ -398,11 +399,9 @@ impl<N: Network> LedgerState<N> {
// Return the block locators if the locator has run out of blocks.
if block_locator_height == 0 {
// Initialize the list of block locators.
- let mut block_locators = Vec::with_capacity((num_block_headers + 1) as usize);
- // Add the list of block locator headers.
- block_locators.extend(block_locator_headers);
+ let mut block_locators: BTreeMap<u32, (N::BlockHash, Option<BlockHeader<N>>)> = block_locator_headers.collect();
// Add the genesis locator.
- block_locators.push((0, self.get_block_hash(0)?, None));
+ block_locators.insert(0, (self.get_block_hash(0)?, None));
return Ok(BlockLocators::<N>::from(block_locators));
}
@@ -414,23 +413,17 @@ impl<N: Network> LedgerState<N> {
let mut block_locator_hashes = Vec::with_capacity(num_block_hashes as usize);
// Add the block locator hashes.
while block_locator_height > 0 && block_locator_hashes.len() < num_block_hashes as usize {
- block_locator_hashes.push((block_locator_height, self.get_block_hash(block_locator_height)?, None));
+ block_locator_hashes.push((block_locator_height, (self.get_block_hash(block_locator_height)?, None)));
// Decrement the block locator height by a power of two.
block_locator_height /= 2;
}
- // Determine the number of latest block headers and block hashes to include as block locators.
- let num_block_locators = num_block_headers + num_block_hashes + 1;
-
// Initialize the list of block locators.
- let mut block_locators = Vec::with_capacity(num_block_locators as usize);
- // Add the list of block locator headers.
- block_locators.extend(block_locator_headers);
- // Add the list of block locator hashes.
- block_locators.extend(block_locator_hashes);
+ let mut block_locators: BTreeMap<u32, (N::BlockHash, Option<BlockHeader<N>>)> =
+ block_locator_headers.chain(block_locator_hashes).collect();
// Add the genesis locator.
- block_locators.push((0, self.get_block_hash(0)?, None));
+ block_locators.insert(0, (self.get_block_hash(0)?, None));
Ok(BlockLocators::<N>::from(block_locators))
}
@@ -444,71 +437,74 @@ impl<N: Network> LedgerState<N> {
let block_locators = &**block_locators;
- // Check that the last block locator is the genesis locator.
- let (expected_height, expected_genesis_block_hash, expected_genesis_header) = &block_locators[block_locators.len() - 1];
- if *expected_height != 0 || expected_genesis_block_hash != &self.get_block_hash(0)? || expected_genesis_header.is_some() {
+ // Ensure the genesis block locator exists and is well-formed.
+ let (expected_genesis_block_hash, expected_genesis_header) = match block_locators.get(&0) {
+ Some((expected_genesis_block_hash, expected_genesis_header)) => (expected_genesis_block_hash, expected_genesis_header),
+ None => return Ok(false),
+ };
+ if expected_genesis_block_hash != &N::genesis_block().hash() || expected_genesis_header.is_some() {
return Ok(false);
}
- // Get the remaining block locators (excluding the genesis block).
- let remaining_block_locators = &block_locators[..block_locators.len() - 1];
- let num_block_headers = std::cmp::min(MAXIMUM_LINEAR_BLOCK_LOCATORS as usize, remaining_block_locators.len());
-
- // Check that the block headers are formed correctly (linear).
- // let mut last_block_height = remaining_block_locators[0].0 + 1;
- for (_block_height, _block_hash, block_header) in &remaining_block_locators[..num_block_headers] {
- // // Check that the block height is decrementing.
- // match last_block_height == *block_height + 1 {
- // true => last_block_height = *block_height,
- // false => return Ok(false)
- // }
-
- // Check that the block header is present.
- let _block_header = match block_header {
- Some(header) => header,
- None => return Ok(false),
- };
-
- // // Check that the expected block hash is correct.
- // if let Ok(expected_block_hash) = self.get_block_hash(*block_height) {
- // if &expected_block_hash != block_hash {
- // return Ok(false);
- // }
- // }
- //
- // // Check that the expected block headers is correct.
- // if let Ok(expected_block_header) = self.get_block_header(*block_height) {
- // if &expected_block_header != block_header {
- // return Ok(false);
- // }
- // }
- }
-
- // Check that the block hashes are formed correctly (power of two).
- if block_locators.len() > MAXIMUM_LINEAR_BLOCK_LOCATORS as usize {
- let mut previous_block_height = u32::MAX;
-
- for (block_height, _block_hash, block_header) in &block_locators[num_block_headers..] {
- // Check that the block heights increment by a power of two.
- if previous_block_height != u32::MAX && previous_block_height / 2 != *block_height {
- return Ok(false);
- }
-
- // Check that there is no block header.
- if block_header.is_some() {
- return Ok(false);
- }
-
- // // Check that the expected block hash is correct.
- // if let Ok(expected_block_hash) = self.get_block_hash(*block_height) {
- // if &expected_block_hash != block_hash {
- // return Ok(false);
- // }
- // }
-
- previous_block_height = *block_height;
- }
- }
+ // // Get the remaining block locators (excluding the genesis block).
+ // let remaining_block_locators = &block_locators[..block_locators.len() - 1];
+ // let num_block_headers = std::cmp::min(MAXIMUM_LINEAR_BLOCK_LOCATORS as usize, remaining_block_locators.len());
+ //
+ // // Check that the block headers are formed correctly (linear).
+ // // let mut last_block_height = remaining_block_locators[0].0 + 1;
+ // for (_block_height, _block_hash, block_header) in &remaining_block_locators[..num_block_headers] {
+ // // // Check that the block height is decrementing.
+ // // match last_block_height == *block_height + 1 {
+ // // true => last_block_height = *block_height,
+ // // false => return Ok(false)
+ // // }
+ //
+ // // Check that the block header is present.
+ // let _block_header = match block_header {
+ // Some(header) => header,
+ // None => return Ok(false),
+ // };
+ //
+ // // // Check that the expected block hash is correct.
+ // // if let Ok(expected_block_hash) = self.get_block_hash(*block_height) {
+ // // if &expected_block_hash != block_hash {
+ // // return Ok(false);
+ // // }
+ // // }
+ // //
+ // // // Check that the expected block headers is correct.
+ // // if let Ok(expected_block_header) = self.get_block_header(*block_height) {
+ // // if &expected_block_header != block_header {
+ // // return Ok(false);
+ // // }
+ // // }
+ // }
+
+ // // Check that the block hashes are formed correctly (power of two).
+ // if block_locators.len() > MAXIMUM_LINEAR_BLOCK_LOCATORS as usize {
+ // let mut previous_block_height = u32::MAX;
+ //
+ // for (block_height, _block_hash, block_header) in &block_locators[num_block_headers..] {
+ // // Check that the block heights increment by a power of two.
+ // if previous_block_height != u32::MAX && previous_block_height / 2 != *block_height {
+ // return Ok(false);
+ // }
+ //
+ // // Check that there is no block header.
+ // if block_header.is_some() {
+ // return Ok(false);
+ // }
+ //
+ // // // Check that the expected block hash is correct.
+ // // if let Ok(expected_block_hash) = self.get_block_hash(*block_height) {
+ // // if &expected_block_hash != block_hash {
+ // // return Ok(false);
+ // // }
+ // // }
+ //
+ // previous_block_height = *block_height;
+ // }
+ // }
Ok(true)
}
diff --git a/src/network/ledger.rs b/src/network/ledger.rs
index 40d4af09be..96e786fb4a 100644
--- a/src/network/ledger.rs
+++ b/src/network/ledger.rs
@@ -318,11 +318,31 @@ impl<N: Network, E: Environment> Ledger<N, E> {
while let Some(unconfirmed_block) = unconfirmed_blocks.get(&block.hash()) {
// Update the block iterator.
block = unconfirmed_block;
+
+ // Ensure the block height is not part of a block request in a fork.
+ let mut is_forked_block = false;
+ for requests in self.block_requests.values() {
+ for (block_height, block_hash) in requests.keys() {
+ // If the block is part of a fork, then don't attempt to add it again.
+ if block_height == &block.height() && block_hash.is_some() {
+ is_forked_block = true;
+ break;
+ }
+ }
+ }
+
+ // If the block is on a fork, remove the unconfirmed block, and break the loop.
+ if is_forked_block {
+ self.unconfirmed_blocks.remove(&block.hash());
+ break;
+ }
// Attempt to add the unconfirmed block.
- match self.add_block(block.clone()) {
- // Upon success, remove the unconfirmed block, as it is now confirmed.
- true => self.unconfirmed_blocks.remove(&block.hash()),
- false => break,
+ else {
+ match self.add_block(block.clone()) {
+ // Upon success, remove the unconfirmed block, as it is now confirmed.
+ true => self.unconfirmed_blocks.remove(&block.hash()),
+ false => break,
+ }
}
}
@@ -485,8 +505,6 @@ impl<N: Network, E: Environment> Ledger<N, E> {
true => {
trace!("Adding unconfirmed block {} to memory pool", block.height());
- // Set the terminator bit to `true` to ensure the miner updates state.
- self.terminator.store(true, Ordering::SeqCst);
// Add the block to the unconfirmed blocks.
self.unconfirmed_blocks.insert(block.previous_block_hash(), block);
}
@@ -606,7 +624,7 @@ impl<N: Network, E: Environment> Ledger<N, E> {
let mut latest_block_height_of_peer = 0;
// Verify the integrity of the block hashes sent by the peer.
- for (block_height, block_hash, _) in block_locators.iter() {
+ for (block_height, (block_hash, _)) in block_locators.iter() {
// Ensure the block hash corresponds with the block height, if the block hash exists in this ledger.
if let Ok(expected_block_height) = self.canon.get_block_height(block_hash) {
if expected_block_height != *block_height {
@@ -715,7 +733,7 @@ impl<N: Network, E: Environment> Ledger<N, E> {
let mut first_deviating_locator = None;
// Verify the integrity of the block hashes sent by the peer.
- for (block_height, block_hash, _) in &*maximum_block_locators {
+ for (block_height, (block_hash, _)) in maximum_block_locators.iter() {
// Ensure the block hash corresponds with the block height, if the block hash exists in this ledger.
if let Ok(expected_block_height) = self.canon.get_block_height(block_hash) {
if expected_block_height != *block_height {
@@ -746,20 +764,20 @@ impl<N: Network, E: Environment> Ledger<N, E> {
let latest_block_height = self.latest_block_height();
if latest_block_height < maximum_common_ancestor {
warn!(
- "The maximum common ancestor {} can't be greater than the latest block {}",
+ "The common ancestor {} cannot be greater than the latest block {}",
maximum_common_ancestor, latest_block_height
);
return;
}
// Determine the latest common ancestor.
- let latest_common_ancestor =
+ let (latest_common_ancestor, ledger_reverted) =
// Case 2(b) - This ledger is not a fork of the peer, it is on the same canon chain.
if !is_fork {
// Continue to sync from the latest block height of this ledger, if the peer is honest.
match first_deviating_locator.is_none() {
- true => maximum_common_ancestor,
- false => latest_block_height,
+ true => (maximum_common_ancestor, false),
+ false => (latest_block_height, false),
}
}
// Case 2(c) - This ledger is on a fork of the peer.
@@ -773,7 +791,7 @@ impl<N: Network, E: Environment> Ledger<N, E> {
if latest_block_height != maximum_common_ancestor && !self.revert_to_block_height(maximum_common_ancestor) {
return;
}
- maximum_common_ancestor
+ (maximum_common_ancestor, true)
}
// Case 2(c)(b) - If the common ancestor is NOT within `ALEO_MAXIMUM_FORK_DEPTH`.
else
@@ -800,7 +818,7 @@ impl<N: Network, E: Environment> Ledger<N, E> {
else {
info!("Found a potentially longer chain from {} starting at block {}", peer_ip, maximum_common_ancestor);
match self.revert_to_block_height(maximum_common_ancestor) {
- true => maximum_common_ancestor,
+ true => (maximum_common_ancestor, true),
false => return
}
}
@@ -823,8 +841,11 @@ impl<N: Network, E: Environment> Ledger<N, E> {
// Log each block request to ensure the peer responds with all requested blocks.
for block_height in start_block_height..=end_block_height {
- // Add the block request to the ledger.
- self.add_block_request(peer_ip, block_height, None);
+ // If the ledger was reverted, include the expected new block hash for the fork.
+ match ledger_reverted {
+ true => self.add_block_request(peer_ip, block_height, maximum_block_locators.get_block_hash(block_height)),
+ false => self.add_block_request(peer_ip, block_height, None),
+ };
}
}
}
|
diff --git a/ledger/src/state/tests.rs b/ledger/src/state/tests.rs
index db7c7275eb..5176d1983a 100644
--- a/ledger/src/state/tests.rs
+++ b/ledger/src/state/tests.rs
@@ -51,7 +51,7 @@ fn test_genesis() {
assert_eq!(genesis.timestamp(), ledger.latest_block_timestamp());
assert_eq!(genesis.difficulty_target(), ledger.latest_block_difficulty_target());
assert_eq!(genesis, &ledger.latest_block());
- assert_eq!(vec![(genesis.height(), genesis.hash(), None)], *ledger.latest_block_locators());
+ assert_eq!(Some(&(genesis.hash(), None)), ledger.latest_block_locators().get(&genesis.height()));
assert_eq!(ledger_tree.root(), ledger.latest_ledger_root());
}
@@ -94,8 +94,11 @@ fn test_add_next_block() {
// Ensure the block locators are correct.
let block_locators = ledger.latest_block_locators();
assert_eq!(2, block_locators.len());
- assert_eq!((block.height(), block.hash(), Some(block.header().clone())), block_locators[0]);
- assert_eq!((genesis.height(), genesis.hash(), None), block_locators[1]);
+ assert_eq!(
+ Some(&(block.hash(), Some(block.header().clone()))),
+ block_locators.get(&block.height())
+ );
+ assert_eq!(Some(&(genesis.hash(), None)), block_locators.get(&genesis.height()));
}
#[test]
@@ -138,7 +141,7 @@ fn test_remove_last_block() {
assert_eq!(genesis.timestamp(), ledger.latest_block_timestamp());
assert_eq!(genesis.difficulty_target(), ledger.latest_block_difficulty_target());
assert_eq!(genesis, &ledger.latest_block());
- assert_eq!(vec![(genesis.height(), genesis.hash(), None)], *ledger.latest_block_locators());
+ assert_eq!(Some(&(genesis.hash(), None)), ledger.latest_block_locators().get(&genesis.height()));
assert_eq!(ledger_tree.root(), ledger.latest_ledger_root());
}
@@ -187,7 +190,7 @@ fn test_remove_last_2_blocks() {
assert_eq!(genesis.timestamp(), ledger.latest_block_timestamp());
assert_eq!(genesis.difficulty_target(), ledger.latest_block_difficulty_target());
assert_eq!(genesis, &ledger.latest_block());
- assert_eq!(vec![(genesis.height(), genesis.hash(), None)], *ledger.latest_block_locators());
+ assert_eq!(Some(&(genesis.hash(), None)), ledger.latest_block_locators().get(&genesis.height()));
assert_eq!(ledger_tree.root(), ledger.latest_ledger_root());
}
|
Track rolled back blocks
<!-- Thank you for filing a PR! Help us understand by explaining your changes. Happy contributing! -->
## Motivation
This PR adds the block hash of rolled back blocks to the `block_requests` to prevent the `update_ledger` from re-canonizing blocks that were already rolled back.
|
2021-11-15T09:40:38Z
|
2.0
|
|
AleoNet/snarkOS
| 1,026
|
AleoNet__snarkOS-1026
|
[
"979"
] |
64586b2d15ec6b75dd048d4df87534287ebb87d3
|
diff --git a/consensus/src/consensus/inner/agent.rs b/consensus/src/consensus/inner/agent.rs
index c95dfcee8f..d2eaac69a8 100644
--- a/consensus/src/consensus/inner/agent.rs
+++ b/consensus/src/consensus/inner/agent.rs
@@ -34,10 +34,25 @@ impl ConsensusInner {
self.commit_block(&hash, &block).await?;
}
- // info!("rebuilding canon");
- // self.diff_canon().await?;
- // self.recommit_canon().await?; // TODO: DEFINITELY REMOVE
- // info!("rebuilt canon");
+
+ // scan for forks
+ let forks = self.scan_forks().await?;
+ for (canon, fork_child) in forks {
+ let canon_height = match self.storage.get_block_state(&canon).await? {
+ BlockStatus::Committed(n) => n,
+ _ => continue,
+ };
+ let fork_blocks = self.storage.longest_child_path(&fork_child).await?;
+ debug!(
+ "fork detected @ {}/{} -- starts at {}, goes for {} blocks, ending at {}",
+ canon_height,
+ canon,
+ fork_child,
+ fork_blocks.len(),
+ fork_blocks.last().unwrap()
+ );
+ }
+
if let Err(e) = self.try_to_fast_forward().await {
match e {
ConsensusError::InvalidBlock(e) => debug!("invalid block in initial fast-forward: {}", e),
diff --git a/consensus/src/consensus/inner/mod.rs b/consensus/src/consensus/inner/mod.rs
index 578d935991..877b59324f 100644
--- a/consensus/src/consensus/inner/mod.rs
+++ b/consensus/src/consensus/inner/mod.rs
@@ -26,6 +26,8 @@ use crate::{
};
use anyhow::*;
use snarkos_storage::{
+ BlockFilter,
+ BlockOrder,
BlockStatus,
Digest,
DynStorage,
@@ -67,29 +69,39 @@ struct LedgerData {
}
impl ConsensusInner {
- /// scans uncommitted blocks for forks
- async fn scan_forks(&mut self) -> Result<()> {
- let blocks = self
+ /// scans uncommitted blocks with a known path to the canon chain for forks
+ async fn scan_forks(&mut self) -> Result<Vec<(Digest, Digest)>> {
+ let canon_hashes = self
.storage
- .get_block_hashes(None, snarkos_storage::BlockFilter::NonCanonOnly)
+ .get_block_hashes(
+ Some(crate::OLDEST_FORK_THRESHOLD as u32 + 1),
+ BlockFilter::CanonOnly(BlockOrder::Descending),
+ )
.await?;
- info!("scanning {} blocks for forks", blocks.len());
- for hash in blocks {
- let header = self.storage.get_block_header(&hash).await?;
- let parent_state = self.storage.get_block_state(&header.previous_block_hash).await?;
- match parent_state {
- BlockStatus::Unknown => continue, // orphan
- BlockStatus::Committed(_) => (), // uncomitted child of canon, could be a fork or a hanging block
- BlockStatus::Uncommitted => {
- // mid-fork or orphan chain, wait for head of fork
- continue;
- }
- };
- let block = self.storage.get_block(&hash).await?;
- self.try_commit_block(&hash, &block).await?;
+ if canon_hashes.len() < 2 {
+ // windows will panic if len < 2
+ return Ok(vec![]);
}
- Ok(())
+
+ let mut known_forks = vec![];
+
+ for canon_hashes in canon_hashes.windows(2) {
+ // windows will ignore last block (furthest down), so we pull one extra above
+ let target_hash = &canon_hashes[1];
+ let ignore_child_hash = &canon_hashes[0];
+ let children = self.storage.get_block_children(target_hash).await?;
+ if children.len() == 1 && &children[0] == ignore_child_hash {
+ continue;
+ }
+ for child in children {
+ if &child != ignore_child_hash {
+ known_forks.push((target_hash.clone(), child));
+ }
+ }
+ }
+
+ Ok(known_forks)
}
fn fresh_ledger(&self, blocks: Vec<SerialBlock>) -> Result<LedgerData> {
diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs
index 83e5995f1b..7636f35183 100644
--- a/consensus/src/consensus/mod.rs
+++ b/consensus/src/consensus/mod.rs
@@ -151,7 +151,7 @@ impl Consensus {
}
/// Diagnostic function to scan for valid forks
- pub async fn scan_forks(&self) -> Result<()> {
+ pub async fn scan_forks(&self) -> Result<Vec<(Digest, Digest)>> {
self.send(ConsensusMessage::ScanForks()).await
}
diff --git a/network/src/lib.rs b/network/src/lib.rs
index 1a21623dc2..8c8a3161ef 100644
--- a/network/src/lib.rs
+++ b/network/src/lib.rs
@@ -49,7 +49,7 @@ pub mod topology;
pub use topology::*;
/// The maximum number of block hashes that can be requested or provided in a single batch.
-pub const MAX_BLOCK_SYNC_COUNT: u32 = snarkos_storage::NUM_LOCATOR_HASHES;
+pub const MAX_BLOCK_SYNC_COUNT: u32 = snarkos_storage::NUM_LOCATOR_HASHES * 2;
/// The maximum amount of time allowed to process a single batch of sync blocks. It should be aligned
/// with `MAX_BLOCK_SYNC_COUNT`.
pub const BLOCK_SYNC_EXPIRATION_SECS: u8 = 30;
diff --git a/network/src/sync/master.rs b/network/src/sync/master.rs
index 2949fb8a6a..f24fdf547c 100644
--- a/network/src/sync/master.rs
+++ b/network/src/sync/master.rs
@@ -80,7 +80,27 @@ impl SyncMaster {
}
async fn block_locator_hashes(&mut self) -> Result<Vec<Digest>> {
- match self.node.storage.get_block_locator_hashes().await {
+ let forks_of_interest = self.node.expect_sync().consensus.scan_forks().await?;
+ let blocks_of_interest: Vec<Digest> = forks_of_interest.into_iter().map(|(_canon, fork)| fork).collect();
+ let mut tips_of_blocks_of_interest: Vec<Digest> = Vec::with_capacity(blocks_of_interest.len());
+ for block in blocks_of_interest {
+ if tips_of_blocks_of_interest.len() > crate::MAX_BLOCK_SYNC_COUNT as usize {
+ debug!("reached limit of blocks of interest in sync block locator hashes");
+ break;
+ }
+ let mut fork_path = self.node.storage.longest_child_path(&block).await?;
+ if fork_path.len() < 2 {
+ // a minor fork, we probably don't care
+ continue;
+ }
+ tips_of_blocks_of_interest.push(fork_path.pop().unwrap());
+ }
+ match self
+ .node
+ .storage
+ .get_block_locator_hashes(tips_of_blocks_of_interest, snarkos_consensus::OLDEST_FORK_THRESHOLD)
+ .await
+ {
Ok(block_locator_hashes) => Ok(block_locator_hashes),
Err(e) => {
error!("Unable to get block locator hashes from storage: {:?}", e);
diff --git a/storage/src/key_value/agent/block.rs b/storage/src/key_value/agent/block.rs
index d3a0ee9e81..9ca6e00ce9 100644
--- a/storage/src/key_value/agent/block.rs
+++ b/storage/src/key_value/agent/block.rs
@@ -14,7 +14,11 @@
// You should have received a copy of the GNU General Public License
// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
-use std::collections::HashSet;
+use std::{collections::HashSet, convert::TryInto};
+
+use tracing::trace;
+
+use crate::BlockOrder;
use super::*;
@@ -224,34 +228,49 @@ impl<S: KeyValueStorage + Validator + 'static> Agent<S> {
})
}
- pub(super) fn get_block_locator_hashes(&mut self) -> Result<Vec<Digest>> {
+ pub(super) fn get_block_locator_hashes(
+ &mut self,
+ points_of_interest: Vec<Digest>,
+ oldest_fork_threshold: usize,
+ ) -> Result<Vec<Digest>> {
let canon = self.canon()?;
+ let target_height = canon.block_height as u32;
// The number of locator hashes left to obtain; accounts for the genesis block.
- let mut num_locator_hashes = std::cmp::min(crate::NUM_LOCATOR_HASHES - 1, canon.block_height as u32);
+ let mut num_locator_hashes = std::cmp::min(crate::NUM_LOCATOR_HASHES - 1, target_height);
// The output list of block locator hashes.
- let mut block_locator_hashes = Vec::with_capacity(num_locator_hashes as usize);
+ let mut block_locator_hashes = Vec::with_capacity(num_locator_hashes as usize + points_of_interest.len());
+
+ for hash in points_of_interest {
+ trace!("block locator hash -- interesting: block# none: {}", hash);
+ block_locator_hashes.push(hash);
+ }
// The index of the current block for which a locator hash is obtained.
- let mut hash_index = canon.block_height as u32;
+ let mut hash_index = target_height;
// The number of top blocks to provide locator hashes for.
let num_top_blocks = std::cmp::min(10, num_locator_hashes);
for _ in 0..num_top_blocks {
- block_locator_hashes.push(self.get_block_hash_guarded(hash_index)?);
+ let hash = self.get_block_hash_guarded(hash_index)?;
+ trace!("block locator hash -- top: block# {}: {}", hash_index, hash);
+ block_locator_hashes.push(hash);
hash_index -= 1; // safe; num_top_blocks is never higher than the height
}
num_locator_hashes -= num_top_blocks;
if num_locator_hashes == 0 {
- block_locator_hashes.push(self.get_block_hash_guarded(0)?);
+ let hash = self.get_block_hash_guarded(0)?;
+ trace!("block locator hash -- genesis: block# {}: {}", 0, hash);
+ block_locator_hashes.push(hash);
return Ok(block_locator_hashes);
}
// Calculate the average distance between block hashes based on the desired number of locator hashes.
- let mut proportional_step = hash_index / num_locator_hashes;
+ let mut proportional_step =
+ (hash_index.min(oldest_fork_threshold as u32) / num_locator_hashes).min(crate::NUM_LOCATOR_HASHES - 1);
// Provide hashes of blocks with indices descending quadratically while the quadratic step distance is
// lower or close to the proportional step distance.
@@ -263,14 +282,17 @@ impl<S: KeyValueStorage + Validator + 'static> Agent<S> {
// Obtain a few hashes increasing the distance quadratically.
let mut quadratic_step = 2; // the size of the first quadratic step
for _ in 0..num_quadratic_steps {
- block_locator_hashes.push(self.get_block_hash_guarded(hash_index)?);
+ let hash = self.get_block_hash_guarded(hash_index)?;
+ trace!("block locator hash -- quadratic: block# {}: {}", hash_index, hash);
+ block_locator_hashes.push(hash);
hash_index = hash_index.saturating_sub(quadratic_step);
quadratic_step *= 2;
}
// Update the size of the proportional step so that the hashes of the remaining blocks have the same distance
// between one another.
- proportional_step = hash_index / num_proportional_steps;
+ proportional_step =
+ (hash_index.min(oldest_fork_threshold as u32) / num_locator_hashes).min(crate::NUM_LOCATOR_HASHES - 1);
// Tweak: in order to avoid "jumping" by too many indices with the last step,
// increase the value of each step by 1 if the last step is too large. This
@@ -282,14 +304,18 @@ impl<S: KeyValueStorage + Validator + 'static> Agent<S> {
// Obtain the rest of hashes with a proportional distance between them.
for _ in 0..num_proportional_steps {
- block_locator_hashes.push(self.get_block_hash_guarded(hash_index)?);
+ let hash = self.get_block_hash_guarded(hash_index)?;
+ trace!("block locator hash -- proportional: block# {}: {}", hash_index, hash);
+ block_locator_hashes.push(hash);
if hash_index == 0 {
return Ok(block_locator_hashes);
}
hash_index = hash_index.saturating_sub(proportional_step);
}
- block_locator_hashes.push(self.get_block_hash_guarded(0)?);
+ let hash = self.get_block_hash_guarded(0)?;
+ trace!("block locator hash -- genesis: block# {}: {}", 0, hash);
+ block_locator_hashes.push(hash);
Ok(block_locator_hashes)
}
@@ -370,17 +396,30 @@ impl<S: KeyValueStorage + Validator + 'static> Agent<S> {
pub(super) fn get_block_hashes(&mut self, limit: Option<u32>, filter: BlockFilter) -> Result<Vec<Digest>> {
let mut hashes = match filter {
- BlockFilter::CanonOnly => {
+ BlockFilter::CanonOnly(BlockOrder::Unordered) => {
self.inner().get_column_keys(KeyValueColumn::BlockIndex)?
.into_iter()
.filter(|key| key.len() != 4) // only interested in block hash keys
.map(|key| key[..].into())
.collect::<Vec<Digest>>()
}
+ BlockFilter::CanonOnly(order) => {
+ let mut values = self.inner().get_column(KeyValueColumn::BlockIndex)?
+ .into_iter()
+ .filter(|(key, value)| key.len() != 4 && value.len() == 4) // only interested in block hash keys
+ .map(|(key, value)| (key[..].into(), u32::from_le_bytes((&value[..]).try_into().unwrap())))
+ .collect::<Vec<(Digest, u32)>>();
+ values.sort_by(|a, b| a.1.cmp(&b.1));
+ match order {
+ BlockOrder::Ascending => values.into_iter().map(|x| x.0).collect(),
+ BlockOrder::Descending => values.into_iter().rev().map(|x| x.0).collect(),
+ BlockOrder::Unordered => unreachable!(),
+ }
+ }
BlockFilter::NonCanonOnly => {
let all = self.get_block_hashes(None, BlockFilter::All)?;
let canon = self
- .get_block_hashes(None, BlockFilter::CanonOnly)?
+ .get_block_hashes(None, BlockFilter::CanonOnly(BlockOrder::Unordered))?
.into_iter()
.collect::<HashSet<Digest>>();
all.into_iter().filter(|hash| !canon.contains(hash)).collect()
diff --git a/storage/src/key_value/agent/block_commit.rs b/storage/src/key_value/agent/block_commit.rs
index c985d5aff4..9b724c32c6 100644
--- a/storage/src/key_value/agent/block_commit.rs
+++ b/storage/src/key_value/agent/block_commit.rs
@@ -24,19 +24,19 @@ impl<S: KeyValueStorage + Validator + 'static> Agent<S> {
let header = self.get_block_header(hash)?;
let canon_height = self.canon_height()?;
let mut parent_hash = header.previous_block_hash;
- for i in 0..=oldest_fork_threshold {
+ for _ in 0..=oldest_fork_threshold {
// check if the part is part of the canon chain
match self.get_block_state(&parent_hash)? {
// This is a canon parent
BlockStatus::Committed(block_num) => {
// Add the children from the latest block
- if block_num + oldest_fork_threshold - i < canon_height as usize {
+ if canon_height as usize - block_num > oldest_fork_threshold {
debug!("exceeded maximum fork length in extended path");
return Ok(ForkDescription::TooLong);
}
let longest_path = self.longest_child_path(hash)?;
+ debug!("longest child path terminating in {:?}", longest_path.last());
side_chain_path.extend(longest_path);
-
return Ok(ForkDescription::Path(ForkPath {
base_index: block_num as u32,
path: side_chain_path.into(),
diff --git a/storage/src/key_value/agent/mod.rs b/storage/src/key_value/agent/mod.rs
index 95ab3a6a49..d8308b9d87 100644
--- a/storage/src/key_value/agent/mod.rs
+++ b/storage/src/key_value/agent/mod.rs
@@ -197,8 +197,11 @@ impl<S: KeyValueStorage + Validator + 'static> Agent<S> {
}
Message::DecommitBlocks(hash) => Box::new(self.wrap(move |f| f.decommit_blocks(&hash))),
Message::Canon() => Box::new(self.canon()),
+ Message::GetBlockChildren(hash) => Box::new(self.get_child_block_hashes(&hash)),
Message::LongestChildPath(hash) => Box::new(self.longest_child_path(&hash)),
- Message::GetBlockLocatorHashes() => Box::new(self.get_block_locator_hashes()),
+ Message::GetBlockLocatorHashes(points_of_interest, oldest_fork_threshold) => {
+ Box::new(self.get_block_locator_hashes(points_of_interest, oldest_fork_threshold))
+ }
Message::FindSyncBlocks(hashes, block_count) => Box::new(self.find_sync_blocks(hashes, block_count)),
Message::GetTransactionLocation(transaction_id) => Box::new(self.get_transaction_location(&transaction_id)),
Message::GetRecordCommitments(limit) => Box::new(self.get_record_commitments(limit)),
diff --git a/storage/src/key_value/mod.rs b/storage/src/key_value/mod.rs
index abd3963536..cfeb4172f8 100644
--- a/storage/src/key_value/mod.rs
+++ b/storage/src/key_value/mod.rs
@@ -79,7 +79,8 @@ enum Message {
DecommitBlocks(Digest),
Canon(),
LongestChildPath(Digest),
- GetBlockLocatorHashes(),
+ GetBlockChildren(Digest),
+ GetBlockLocatorHashes(Vec<Digest>, usize), // points of interest, oldest_fork_threshold
FindSyncBlocks(Vec<Digest>, usize),
GetTransactionLocation(Digest),
GetRecordCommitments(Option<usize>),
@@ -117,7 +118,12 @@ impl fmt::Display for Message {
Message::DecommitBlocks(hash) => write!(f, "DecommitBlocks({})", hash),
Message::Canon() => write!(f, "Canon()"),
Message::LongestChildPath(hash) => write!(f, "LongestChildPath({})", hash),
- Message::GetBlockLocatorHashes() => write!(f, "GetBlockLocatorHashes()"),
+ Message::GetBlockChildren(hash) => write!(f, "GetBlockChildren({})", hash),
+ Message::GetBlockLocatorHashes(canon_depth_limit, oldest_fork_threshold) => write!(
+ f,
+ "GetBlockLocatorHashes({:?}, {})",
+ canon_depth_limit, oldest_fork_threshold
+ ),
Message::FindSyncBlocks(hashes, max_block_count) => {
write!(f, "FindSyncBlocks(")?;
for hash in hashes {
diff --git a/storage/src/key_value/storage.rs b/storage/src/key_value/storage.rs
index cffa7c0fe4..9205a21590 100644
--- a/storage/src/key_value/storage.rs
+++ b/storage/src/key_value/storage.rs
@@ -82,8 +82,20 @@ impl Storage for KeyValueStore {
self.send(Message::LongestChildPath(block_hash.clone())).await
}
- async fn get_block_locator_hashes(&self) -> Result<Vec<Digest>> {
- self.send(Message::GetBlockLocatorHashes()).await
+ async fn get_block_children(&self, block_hash: &Digest) -> Result<Vec<Digest>> {
+ self.send(Message::GetBlockChildren(block_hash.clone())).await
+ }
+
+ async fn get_block_locator_hashes(
+ &self,
+ points_of_interest: Vec<Digest>,
+ oldest_fork_threshold: usize,
+ ) -> Result<Vec<Digest>> {
+ self.send(Message::GetBlockLocatorHashes(
+ points_of_interest,
+ oldest_fork_threshold,
+ ))
+ .await
}
async fn find_sync_blocks(&self, block_locator_hashes: &[Digest], block_count: usize) -> Result<Vec<Digest>> {
diff --git a/storage/src/storage.rs b/storage/src/storage.rs
index caa544eec3..a77ed052d5 100644
--- a/storage/src/storage.rs
+++ b/storage/src/storage.rs
@@ -53,13 +53,20 @@ pub struct CanonData {
pub hash: Digest,
}
-#[derive(Debug)]
+#[derive(Debug, Clone, Copy)]
pub enum BlockFilter {
- CanonOnly,
+ CanonOnly(BlockOrder),
NonCanonOnly,
All,
}
+#[derive(Debug, Clone, Copy)]
+pub enum BlockOrder {
+ Ascending,
+ Descending,
+ Unordered,
+}
+
/// An application level storage interface
/// Requires atomicity within each method implementation, but doesn't require any kind of consistency between invocations other than call-order enforcement.
#[async_trait::async_trait]
@@ -100,8 +107,15 @@ pub trait Storage: Send + Sync {
/// Gets the longest, committed or uncommitted, chain of blocks originating from `block_hash`, including `block_hash`.
async fn longest_child_path(&self, block_hash: &Digest) -> Result<Vec<Digest>>;
+ /// Gets the immediate children of `block_hash`.
+ async fn get_block_children(&self, block_hash: &Digest) -> Result<Vec<Digest>>;
+
/// Gets a series of hashes used for relaying current block sync state.
- async fn get_block_locator_hashes(&self) -> Result<Vec<Digest>>;
+ async fn get_block_locator_hashes(
+ &self,
+ points_of_interest: Vec<Digest>,
+ oldest_fork_threshold: usize,
+ ) -> Result<Vec<Digest>>;
/// Find hashes to provide for a syncing node given `block_locator_hashes`.
async fn find_sync_blocks(&self, block_locator_hashes: &[Digest], block_count: usize) -> Result<Vec<Digest>>;
@@ -161,7 +175,7 @@ pub trait Storage: Send + Sync {
/// Gets a dump of all stored canon blocks, in block-number ascending order. A maintenance function, not intended for general use.
async fn get_canon_blocks(&self, limit: Option<u32>) -> Result<Vec<SerialBlock>>;
- /// Similar to `Storage::get_canon_blocks`, gets hashes of all blocks subject to `filter` and `limit` in block-number ascending order. A maintenance function, not intended for general use.
+ /// Similar to `Storage::get_canon_blocks`, gets hashes of all blocks subject to `filter` and `limit` in filter-defined order. A maintenance function, not intended for general use.
async fn get_block_hashes(&self, limit: Option<u32>, filter: BlockFilter) -> Result<Vec<Digest>>;
/// Performs low-level storage validation; it's mostly intended for test purposes, as there is a lower level `KeyValueStorage` interface available outside of them.
|
diff --git a/consensus/tests/consensus_sidechain.rs b/consensus/tests/consensus_sidechain.rs
index bb7eca7900..ce75b2fd2b 100644
--- a/consensus/tests/consensus_sidechain.rs
+++ b/consensus/tests/consensus_sidechain.rs
@@ -15,6 +15,7 @@
// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
mod consensus_sidechain {
+ use snarkos_consensus::OLDEST_FORK_THRESHOLD;
use snarkos_storage::validator::FixMode;
use snarkos_testing::sync::*;
@@ -183,7 +184,13 @@ mod consensus_sidechain {
assert_eq!(best_block_number, block_height);
// Check if the locator hashes can be found.
- assert!(consensus.storage.get_block_locator_hashes().await.is_ok());
+ assert!(
+ consensus
+ .storage
+ .get_block_locator_hashes(vec![], snarkos_consensus::OLDEST_FORK_THRESHOLD)
+ .await
+ .is_ok()
+ );
// Decommit a block.
let canon_hash = consensus.storage.canon().await.unwrap().hash;
@@ -195,7 +202,13 @@ mod consensus_sidechain {
assert_eq!(best_block_number, block_height);
// Check if the locator hashes can still be found.
- assert!(consensus.storage.get_block_locator_hashes().await.is_ok());
+ assert!(
+ consensus
+ .storage
+ .get_block_locator_hashes(vec![], snarkos_consensus::OLDEST_FORK_THRESHOLD)
+ .await
+ .is_ok()
+ );
}
#[tokio::test]
@@ -219,7 +232,11 @@ mod consensus_sidechain {
}
// There is no overlap between the 2 instances.
- let consensus1_locator_hashes = consensus1.storage.get_block_locator_hashes().await.unwrap();
+ let consensus1_locator_hashes = consensus1
+ .storage
+ .get_block_locator_hashes(vec![], OLDEST_FORK_THRESHOLD)
+ .await
+ .unwrap();
let consensus2_sync_blocks = consensus2
.storage
.find_sync_blocks(&consensus1_locator_hashes, 64)
@@ -242,7 +259,11 @@ mod consensus_sidechain {
}
// The blocks should fully overlap between the 2 instances now.
- let consensus1_locator_hashes = consensus1.storage.get_block_locator_hashes().await.unwrap();
+ let consensus1_locator_hashes = consensus1
+ .storage
+ .get_block_locator_hashes(vec![], OLDEST_FORK_THRESHOLD)
+ .await
+ .unwrap();
let consensus2_sync_blocks = consensus2
.storage
.find_sync_blocks(&consensus1_locator_hashes, 64)
@@ -277,7 +298,11 @@ mod consensus_sidechain {
let overlap_height = consensus1.storage.canon().await.unwrap().block_height;
// There is some initial overlap between the 2 instances.
- let consensus1_locator_hashes = consensus1.storage.get_block_locator_hashes().await.unwrap();
+ let consensus1_locator_hashes = consensus1
+ .storage
+ .get_block_locator_hashes(vec![], OLDEST_FORK_THRESHOLD)
+ .await
+ .unwrap();
let consensus2_sync_blocks = consensus2
.storage
.find_sync_blocks(&consensus1_locator_hashes, 64)
@@ -310,7 +335,11 @@ mod consensus_sidechain {
}
// The blocks should fully overlap between the 2 instances now.
- let consensus1_locator_hashes = consensus1.storage.get_block_locator_hashes().await.unwrap();
+ let consensus1_locator_hashes = consensus1
+ .storage
+ .get_block_locator_hashes(vec![], OLDEST_FORK_THRESHOLD)
+ .await
+ .unwrap();
let consensus2_sync_blocks = consensus2
.storage
.find_sync_blocks(&consensus1_locator_hashes, 64)
@@ -354,7 +383,11 @@ mod consensus_sidechain {
}
// The blocks should fully overlap between the 2 instances now.
- let consensus1_locator_hashes = consensus1.storage.get_block_locator_hashes().await.unwrap();
+ let consensus1_locator_hashes = consensus1
+ .storage
+ .get_block_locator_hashes(vec![], OLDEST_FORK_THRESHOLD)
+ .await
+ .unwrap();
let sync_blocks = consensus2
.storage
.find_sync_blocks(&consensus1_locator_hashes, 64)
diff --git a/testing/examples/test_blocks.rs b/testing/examples/test_blocks.rs
index 630be7cf58..badc142246 100644
--- a/testing/examples/test_blocks.rs
+++ b/testing/examples/test_blocks.rs
@@ -17,91 +17,15 @@
#[macro_use]
extern crate tracing;
-use snarkos_consensus::{error::ConsensusError, Consensus, CreateTransactionRequest, MineContext, TransactionResponse};
-use snarkos_storage::{PrivateKey, SerialBlock, SerialBlockHeader, SerialRecord, SerialTransaction};
-use snarkos_testing::sync::*;
-use snarkvm_dpc::{
- testnet1::{instantiated::*, record::payload::Payload as RecordPayload},
- Account,
- Address,
- AleoAmount,
- DPCComponents,
+use snarkos_consensus::{error::ConsensusError, MineContext, TransactionResponse};
+use snarkos_testing::{
+ mining::{mine_block, send},
+ sync::*,
};
use tracing_subscriber::EnvFilter;
use std::{fs::File, path::PathBuf};
-async fn mine_block(
- miner: &MineContext,
- transactions: Vec<SerialTransaction>,
- parent_block_header: &SerialBlockHeader,
-) -> Result<(SerialBlock, Vec<SerialRecord>), ConsensusError> {
- info!("Mining block!");
-
- let (transactions, coinbase_records) = miner.establish_block(transactions).await?;
-
- let header = miner.find_block(&transactions, parent_block_header)?;
-
- let block = SerialBlock { header, transactions };
-
- let old_block_height = miner.consensus.storage.canon().await?.block_height;
-
- // Duplicate blocks dont do anything
- miner.consensus.receive_block(block.clone()).await; // throws a duplicate error -- seemingly intentional
-
- let new_block_height = miner.consensus.storage.canon().await?.block_height;
- assert_eq!(old_block_height + 1, new_block_height);
-
- Ok((block, coinbase_records))
-}
-
-/// Spends some value from inputs owned by the sender, to the receiver,
-/// and pays back whatever we are left with.
-#[allow(clippy::too_many_arguments)]
-async fn send(
- consensus: &Consensus,
- from: &Account<Components>,
- inputs: Vec<SerialRecord>,
- receiver: &Address<Components>,
- amount: i64,
- memo: [u8; 32],
-) -> Result<TransactionResponse, ConsensusError> {
- let mut sum = 0;
- for inp in &inputs {
- sum += inp.value.0;
- }
- assert!(sum >= amount, "not enough balance in inputs");
- let change = sum - amount;
-
- let to = vec![receiver.clone(), from.address.clone()];
- let values = vec![amount, change];
-
- let from: Vec<PrivateKey> = vec![from.private_key.clone(); Components::NUM_INPUT_RECORDS]
- .into_iter()
- .map(Into::into)
- .collect();
-
- let joint_serial_numbers = consensus.calculate_joint_serial_numbers(&inputs[..], &from[..])?;
- let mut new_records = vec![];
- for j in 0..Components::NUM_OUTPUT_RECORDS as u8 {
- new_records.push(consensus.make_dummy_record(
- &joint_serial_numbers[..],
- j,
- to[j as usize].clone().into(),
- AleoAmount(values[j as usize]),
- RecordPayload::default(),
- )?);
- }
- consensus
- .create_transaction(CreateTransactionRequest {
- old_records: inputs,
- old_account_private_keys: from,
- new_records,
- memo,
- })
- .await
-}
-
async fn mine_blocks(n: u32) -> Result<TestBlocks, ConsensusError> {
info!("Creating test account");
let [miner_acc, acc_1, _] = FIXTURE.test_accounts.clone();
diff --git a/testing/examples/test_data.rs b/testing/examples/test_data.rs
index 4e801487d8..ba9d2e8c78 100644
--- a/testing/examples/test_data.rs
+++ b/testing/examples/test_data.rs
@@ -14,20 +14,14 @@
// You should have received a copy of the GNU General Public License
// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
-use snarkos_consensus::{error::ConsensusError, Consensus, CreateTransactionRequest, MineContext, TransactionResponse};
-use snarkos_storage::{PrivateKey, SerialBlock, SerialBlockHeader, SerialRecord, SerialTransaction};
-use snarkos_testing::sync::*;
-use snarkvm_dpc::{
- testnet1::{instantiated::*, payload::Payload as RecordPayload},
- Account,
- Address,
- AleoAmount,
- DPCComponents,
+use snarkos_consensus::{MineContext, TransactionResponse};
+use snarkos_testing::{
+ mining::{mine_block, send},
+ sync::*,
};
use snarkvm_utilities::ToBytes;
use std::{fs::File, path::PathBuf};
-use tracing::info;
async fn setup_test_data() -> TestData {
let [miner_acc, acc_1, _] = FIXTURE.test_accounts.clone();
@@ -89,77 +83,6 @@ async fn setup_test_data() -> TestData {
}
}
-async fn mine_block(
- miner: &MineContext,
- transactions: Vec<SerialTransaction>,
- parent_block_header: &SerialBlockHeader,
-) -> Result<(SerialBlock, Vec<SerialRecord>), ConsensusError> {
- info!("Mining block!");
-
- let (transactions, coinbase_records) = miner.establish_block(transactions).await?;
-
- let header = miner.find_block(&transactions, parent_block_header)?;
-
- let block = SerialBlock { header, transactions };
-
- let old_block_height = miner.consensus.storage.canon().await?.block_height;
-
- // Duplicate blocks dont do anything
- miner.consensus.receive_block(block.clone()).await; // throws a duplicate error -- seemingly intentional
-
- let new_block_height = miner.consensus.storage.canon().await?.block_height;
- assert_eq!(old_block_height + 1, new_block_height);
-
- Ok((block, coinbase_records))
-}
-
-/// Spends some value from inputs owned by the sender, to the receiver,
-/// and pays back whatever we are left with.
-#[allow(clippy::too_many_arguments)]
-async fn send(
- consensus: &Consensus,
- from: &Account<Components>,
- inputs: Vec<SerialRecord>,
- receiver: &Address<Components>,
- amount: i64,
- memo: [u8; 32],
-) -> Result<TransactionResponse, ConsensusError> {
- let mut sum = 0;
- for inp in &inputs {
- sum += inp.value.0;
- }
- assert!(sum >= amount, "not enough balance in inputs");
- let change = sum - amount;
-
- let to = vec![receiver.clone(), from.address.clone()];
- let values = vec![amount, change];
-
- let from: Vec<PrivateKey> = vec![from.private_key.clone(); Components::NUM_INPUT_RECORDS]
- .into_iter()
- .map(Into::into)
- .collect();
-
- let joint_serial_numbers = consensus.calculate_joint_serial_numbers(&inputs[..], &from[..])?;
- let mut new_records = vec![];
- for j in 0..Components::NUM_OUTPUT_RECORDS as u8 {
- new_records.push(consensus.make_dummy_record(
- &joint_serial_numbers[..],
- j,
- to[j as usize].clone().into(),
- AleoAmount(values[j as usize]),
- RecordPayload::default(),
- )?);
- }
- consensus
- .create_transaction(CreateTransactionRequest {
- old_records: inputs,
- old_account_private_keys: from,
- new_records,
- memo,
- })
- .await
-}
-
#[tokio::main]
pub async fn main() {
let test_data = setup_test_data().await;
diff --git a/testing/src/lib.rs b/testing/src/lib.rs
index aed057472c..66874650ed 100644
--- a/testing/src/lib.rs
+++ b/testing/src/lib.rs
@@ -18,6 +18,7 @@
#![forbid(unsafe_code)]
pub mod dpc;
+pub mod mining;
#[cfg(feature = "network")]
pub mod network;
pub mod storage;
diff --git a/testing/src/mining/mod.rs b/testing/src/mining/mod.rs
new file mode 100644
index 0000000000..10a501c6af
--- /dev/null
+++ b/testing/src/mining/mod.rs
@@ -0,0 +1,94 @@
+// Copyright (C) 2019-2021 Aleo Systems Inc.
+// This file is part of the snarkOS library.
+
+// The snarkOS library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// The snarkOS library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
+
+use snarkos_consensus::{error::ConsensusError, Consensus, CreateTransactionRequest, MineContext, TransactionResponse};
+use snarkos_storage::{PrivateKey, SerialBlock, SerialBlockHeader, SerialRecord, SerialTransaction};
+use snarkvm_dpc::{
+ testnet1::{instantiated::*, record::payload::Payload as RecordPayload},
+ Account,
+ Address,
+ AleoAmount,
+ DPCComponents,
+};
+
+pub async fn mine_block(
+ miner: &MineContext,
+ transactions: Vec<SerialTransaction>,
+ parent_block_header: &SerialBlockHeader,
+) -> Result<(SerialBlock, Vec<SerialRecord>), ConsensusError> {
+ let (transactions, coinbase_records) = miner.establish_block(transactions).await?;
+
+ let header = miner.find_block(&transactions, parent_block_header)?;
+
+ let block = SerialBlock { header, transactions };
+
+ let old_block_height = miner.consensus.storage.canon().await?.block_height;
+
+ // Duplicate blocks dont do anything
+ miner.consensus.receive_block(block.clone()).await; // throws a duplicate error -- seemingly intentional
+
+ let new_block_height = miner.consensus.storage.canon().await?.block_height;
+ assert_eq!(old_block_height + 1, new_block_height);
+
+ Ok((block, coinbase_records))
+}
+
+/// Spends some value from inputs owned by the sender, to the receiver,
+/// and pays back whatever we are left with.
+#[allow(clippy::too_many_arguments)]
+pub async fn send(
+ consensus: &Consensus,
+ from: &Account<Components>,
+ inputs: Vec<SerialRecord>,
+ receiver: &Address<Components>,
+ amount: i64,
+ memo: [u8; 32],
+) -> Result<TransactionResponse, ConsensusError> {
+ let mut sum = 0;
+ for inp in &inputs {
+ sum += inp.value.0;
+ }
+ assert!(sum >= amount, "not enough balance in inputs");
+ let change = sum - amount;
+
+ let to = vec![receiver.clone(), from.address.clone()];
+ let values = vec![amount, change];
+
+ let from: Vec<PrivateKey> = vec![from.private_key.clone(); Components::NUM_INPUT_RECORDS]
+ .into_iter()
+ .map(Into::into)
+ .collect();
+
+ let joint_serial_numbers = consensus.calculate_joint_serial_numbers(&inputs[..], &from[..])?;
+ let mut new_records = vec![];
+ for j in 0..Components::NUM_OUTPUT_RECORDS as u8 {
+ new_records.push(consensus.make_dummy_record(
+ &joint_serial_numbers[..],
+ j,
+ to[j as usize].clone().into(),
+ AleoAmount(values[j as usize]),
+ RecordPayload::default(),
+ )?);
+ }
+ consensus
+ .create_transaction(CreateTransactionRequest {
+ old_records: inputs,
+ old_account_private_keys: from,
+ new_records,
+ memo,
+ })
+ .await
+}
|
[Feature] Send fork points in block locator hashes
To alleviate issues with syncing with forks around, we could see places we have forked > 64 blocks, and include those critical points as hashes in block locator hashes.
|
While this could be a good ad-hoc measure, we could improve the general situation if we just decided to send more locator hashes and improve the distances between the provided block hashes; I've updated https://github.com/AleoHQ/snarkOS/pull/809 which does that, and improves the index-picking algorithm.
I ran into an issue and independently came to this conclusion again.
Consider the following cases:
1.
We have a fork that is 90 blocks long locally, 120 blocks back from local canon. In reality, the 90-block chain is canon, so we are marching down a fork. Even if we include every block we have in canon as block locator hashes, we will not include base of real canon + 90-64 block, the minimum block to continue advancing down canon, in any received sync blocks from peers due to peers sending back only 64 sync block hashes.
This has been observed.
2.
We have a fork that is 30 blocks long (notably less than 64 blocks), but its based 600 blocks behind local canon. In reality, the 30-block-long fork is actually canon.
The chances of a block locator hash entering a sufficient range, such that a peer sends back some hash we don't already have is relatively low. We halt syncing since our local canon is at tip of some fork, and we don't try to get any more blocks on the real canon chain,
This has been observed.
A proposed (and tested to work) fix is proposed:
1. Limit scope of block locator hashes to only include the range of the last 1024 blocks (equal to our max fork depth) -- we won't fork any further back then that regardless.
2. Limit proportional step of block locator hashes to 63 or less. This prevents us missing a range of block locator hashes, and is actually more strict than the 1024 limit generally.
3. Add the tip of all forks greater than 1 block deep (there are a lot of trivial 1 block forks we can ignore) to block locator hashes, exceeding the default capacity.
The combination of these items provides us 100% coverage of our 1024 block range and hedges against long forks with #3. There is a slight performance cost to calculating forks and such, but it's minimal in testing and is worth it.
|
2021-08-10T00:29:33Z
|
1.3
|
AleoNet/snarkOS
| 927
|
AleoNet__snarkOS-927
|
[
"817"
] |
16235c8d8968357be41fb03c729dfa4e87536362
|
diff --git a/network/src/errors/network.rs b/network/src/errors/network.rs
index 4b1a86dab7..c6f7934a6f 100644
--- a/network/src/errors/network.rs
+++ b/network/src/errors/network.rs
@@ -50,6 +50,7 @@ pub enum NetworkError {
}
impl NetworkError {
+ // FIXME (nkls): is unused and overlaps with `is_trivial`?
pub fn is_fatal(&self) -> bool {
match self {
Self::Io(err) => [
@@ -66,7 +67,7 @@ impl NetworkError {
pub fn is_trivial(&self) -> bool {
match self {
- NetworkError::Io(e) => {
+ Self::Io(e) => {
matches!(
e.kind(),
ErrorKind::BrokenPipe
diff --git a/network/src/peers/peer/connector.rs b/network/src/peers/peer/connector.rs
index 25e15f9beb..f0c105705b 100644
--- a/network/src/peers/peer/connector.rs
+++ b/network/src/peers/peer/connector.rs
@@ -50,9 +50,23 @@ impl Peer {
self.address, e
);
}
+
+ // Marks the peer as unroutable if the connection fails. Currently matches
+ // against all io errors which exclude a potential max peers limit breach.
+ //
+ // FIXME (nkls): refine this to be set for specific errors?
+ //
+ // TCP/IP error codes are different on Unix and on Windows and can't be
+ // reliably matched with the current error kinds. Nightly recently saw the
+ // addition of new error kinds that could be useful once stabilised:
+ // https://github.com/rust-lang/rust/issues/86442.
+ if let NetworkError::Io(_e) = e {
+ self.set_routable(false);
+ }
}
Ok(network) => {
self.set_connected();
+ self.set_routable(true);
metrics::increment_gauge!(CONNECTED, 1.0);
event_target
.send(PeerEvent {
diff --git a/network/src/peers/peer/peer.rs b/network/src/peers/peer/peer.rs
index 6f79382205..76a445d949 100644
--- a/network/src/peers/peer/peer.rs
+++ b/network/src/peers/peer/peer.rs
@@ -57,6 +57,10 @@ pub struct Peer {
pub is_bootnode: bool,
#[serde(skip)]
pub queued_outbound_message_count: Arc<AtomicUsize>,
+ /// Whether this peer is routable or not.
+ ///
+ /// `None` indicates the node has never attempted a connection with this peer.
+ pub is_routable: Option<bool>,
}
const FAILURE_EXPIRY_TIME: Duration = Duration::from_secs(15 * 60);
@@ -70,6 +74,10 @@ impl Peer {
quality: Default::default(),
is_bootnode,
queued_outbound_message_count: Default::default(),
+
+ // Set to `None` since peer creation only ever happens before a connection to the peer,
+ // therefore we don't know if its listener is routable or not.
+ is_routable: None,
}
}
@@ -182,4 +190,8 @@ impl Peer {
self.quality.disconnected();
self.status = PeerStatus::Disconnected;
}
+
+ pub(super) fn set_routable(&mut self, is_routable: bool) {
+ self.is_routable = Some(is_routable)
+ }
}
diff --git a/network/src/peers/peer_book.rs b/network/src/peers/peer_book.rs
index 4bac74cb8e..f1cc5a6bfa 100644
--- a/network/src/peers/peer_book.rs
+++ b/network/src/peers/peer_book.rs
@@ -233,7 +233,7 @@ impl PeerBook {
self.map_each_peer(|peer| async move { peer.load().await }).await
}
- pub async fn disconnected_peers_snapshot(&self) -> Vec<Peer> {
+ pub fn disconnected_peers_snapshot(&self) -> Vec<Peer> {
self.disconnected_peers
.inner()
.iter()
diff --git a/network/src/peers/peers.rs b/network/src/peers/peers.rs
index 3050d98ab8..56792685e6 100644
--- a/network/src/peers/peers.rs
+++ b/network/src/peers/peers.rs
@@ -202,7 +202,7 @@ impl<S: Storage + Send + Sync + 'static> Node<S> {
let bootnodes = self.config.bootnodes();
// Iterate through a selection of random peers and attempt to connect.
- let mut candidates = self.peer_book.disconnected_peers_snapshot().await;
+ let mut candidates = self.peer_book.disconnected_peers_snapshot();
candidates.retain(|peer| peer.address != own_address && !bootnodes.contains(&peer.address));
@@ -290,12 +290,52 @@ impl<S: Storage + Send + Sync + 'static> Node<S> {
pub(crate) async fn send_peers(&self, remote_address: SocketAddr) {
// Broadcast the sanitized list of connected peers back to the requesting peer.
- let peers = self
- .peer_book
- .connected_peers()
- .into_iter()
- .filter(|&addr| addr != remote_address)
- .choose_multiple(&mut rand::thread_rng(), crate::SHARED_PEER_COUNT);
+
+ use crate::Peer;
+ use rand::prelude::SliceRandom;
+
+ let connected_peers = self.peer_book.connected_peers_snapshot().await;
+
+ let basic_filter =
+ |peer: &Peer| peer.address != remote_address && !self.config.bootnodes().contains(&peer.address);
+ let strict_filter = |peer: &Peer| basic_filter(peer) && peer.is_routable.unwrap_or(false);
+
+ // Strictly filter the connected peers by only including the routable addresses.
+ let strictly_filtered_peers: Vec<SocketAddr> = connected_peers
+ .iter()
+ .filter(|peer| strict_filter(peer))
+ .map(|peer| peer.address)
+ .collect();
+
+ // Bootnodes apply less strict filtering rules if the set is empty by falling back on
+ // connected peers that may or may not be routable...
+ let peers = if self.config.is_bootnode() && strictly_filtered_peers.is_empty() {
+ let filtered_peers: Vec<SocketAddr> = connected_peers
+ .iter()
+ .filter(|peer| basic_filter(peer))
+ .map(|peer| peer.address)
+ .collect();
+
+ // ...and if need be on disconnected peers.
+ if filtered_peers.is_empty() {
+ self.peer_book
+ .disconnected_peers_snapshot()
+ .iter()
+ .filter(|peer| basic_filter(peer))
+ .map(|peer| peer.address)
+ .collect()
+ } else {
+ filtered_peers
+ }
+ } else {
+ strictly_filtered_peers
+ };
+
+ // Limit set size.
+ let peers = peers
+ .choose_multiple(&mut rand::thread_rng(), crate::SHARED_PEER_COUNT)
+ .copied()
+ .collect();
self.peer_book.send_to(remote_address, Payload::Peers(peers)).await;
}
diff --git a/rpc/src/rpc_impl.rs b/rpc/src/rpc_impl.rs
index 22104ca0d2..8517715e54 100644
--- a/rpc/src/rpc_impl.rs
+++ b/rpc/src/rpc_impl.rs
@@ -408,7 +408,7 @@ impl<S: Storage + Send + core::marker::Sync + 'static> RpcFunctions for RpcImpl<
.iter()
.map(|(addr, node_centrality)| Vertice {
addr: *addr,
- is_bootnode: self.node.config.bootnodes().contains(&addr),
+ is_bootnode: self.node.config.bootnodes().contains(addr),
degree_centrality: node_centrality.degree_centrality,
eigenvector_centrality: node_centrality.eigenvector_centrality,
fiedler_value: node_centrality.fiedler_value,
|
diff --git a/network/tests/peers.rs b/network/tests/peers.rs
index 8792a988e7..86fa8fb474 100644
--- a/network/tests/peers.rs
+++ b/network/tests/peers.rs
@@ -70,29 +70,28 @@ async fn peer_responder_side() {
}
#[tokio::test(flavor = "multi_thread")]
-async fn triangle() {
- let setup = |bootnodes| TestSetup {
+async fn bootnode_peer_propagation() {
+ let setup = |is_bootnode, bootnodes| TestSetup {
consensus_setup: None,
min_peers: 2,
peer_sync_interval: 1,
+ is_bootnode,
bootnodes,
..Default::default()
};
// Spin up and connect nodes A and B.
- let node_alice = test_node(setup(vec![])).await;
+ let node_alice = test_node(setup(true, vec![])).await;
let addr_alice = node_alice.local_address().unwrap();
- // wait a few ms so that nodes don't try to connect into one another simultaneously
- sleep(Duration::from_millis(10)).await;
+ // Connect B to A.
+ let node_bob = test_node(setup(false, vec![addr_alice.to_string()])).await;
- let node_bob = test_node(setup(vec![addr_alice.to_string()])).await;
- let addr_bob = node_bob.local_address().unwrap();
+ // Sleep to avoid C and B trying to simultaneously connect to each other.
+ sleep(Duration::from_millis(100)).await;
- sleep(Duration::from_millis(10)).await;
-
- // Spin up node C and connect to B.
- let node_charlie = test_node(setup(vec![addr_bob.to_string()])).await;
+ // Connect C to A.
+ let node_charlie = test_node(setup(false, vec![addr_alice.to_string()])).await;
let triangle_is_formed = || {
node_charlie.peer_book.is_connected(addr_alice)
@@ -101,6 +100,7 @@ async fn triangle() {
&& node_charlie.peer_book.get_active_peer_count() == 2
};
- // Make sure C connects to A => peer propagation works.
+ // Make sure B and C connect => bootnode propagates peers (without `is_routable` check in this
+ // case).
wait_until!(5, triangle_is_formed());
}
diff --git a/network/tests/topology.rs b/network/tests/topology.rs
index ee6cfdf358..13f4f16a5d 100644
--- a/network/tests/topology.rs
+++ b/network/tests/topology.rs
@@ -135,6 +135,8 @@ async fn spawn_nodes_in_a_mesh() {
);
}
+// FIXME: adjust to new peering mechanics.
+#[ignore]
#[tokio::test(flavor = "multi_thread")]
async fn line_converges_to_mesh() {
let setup = TestSetup {
@@ -156,6 +158,7 @@ async fn line_converges_to_mesh() {
);
}
+#[ignore]
#[tokio::test(flavor = "multi_thread")]
async fn ring_converges_to_mesh() {
let setup = TestSetup {
@@ -177,6 +180,7 @@ async fn ring_converges_to_mesh() {
);
}
+#[ignore]
#[tokio::test(flavor = "multi_thread")]
async fn star_converges_to_mesh() {
let setup = TestSetup {
|
[Feature] Don't provide unroutable addresses in peer lists
The current peer-sharing mechanism only checks that we don't provide a peer with its own address among the addresses we're connected to; we can do better than this, though, including:
- excluding bootnodes (peers are generally going to know the same ones)
- excluding addresses that connected to us (as opposed to us being the initiator; potentially unroutable)
- filtering for addresses that we would verify to be routable
This could be related to https://github.com/AleoHQ/snarkOS/issues/802, as the network crawler would know a lot of peers and be able to produce great peer lists.
|
2021-07-07T13:45:46Z
|
1.3
|
|
AleoNet/snarkOS
| 878
|
AleoNet__snarkOS-878
|
[
"802",
"818"
] |
fee2c04ff43bea30047da19b21ab69fe3fec42e2
|
diff --git a/Cargo.lock b/Cargo.lock
index ca14eb4226..f312fd086c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -61,9 +61,9 @@ checksum = "28b2cd92db5cbd74e8e5028f7e27dd7aa3090e89e4f2a197cc7c8dfb69c7063b"
[[package]]
name = "approx"
-version = "0.5.0"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "072df7202e63b127ab55acfe16ce97013d5b97bf160489336d3f1840fd78e99e"
+checksum = "3f2a05fd1bd10b2527e20a2cd32d8873d115b8b39fe219ee25f42a8aca6ba278"
dependencies = [
"num-traits",
]
@@ -1655,13 +1655,12 @@ dependencies = [
[[package]]
name = "nalgebra"
-version = "0.27.1"
+version = "0.26.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "462fffe4002f4f2e1f6a9dcf12cc1a6fc0e15989014efc02a941d3e0f5dc2120"
+checksum = "476d1d59fe02fe54c86356e91650cd892f392782a1cb9fc524ec84f7aa9e1d06"
dependencies = [
"approx",
"matrixmultiply",
- "nalgebra-macros",
"num-complex",
"num-rational",
"num-traits",
@@ -1669,17 +1668,6 @@ dependencies = [
"typenum",
]
-[[package]]
-name = "nalgebra-macros"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
-]
-
[[package]]
name = "native-tls"
version = "0.2.7"
@@ -1745,9 +1733,9 @@ dependencies = [
[[package]]
name = "num-complex"
-version = "0.4.0"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085"
+checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5"
dependencies = [
"num-traits",
]
@@ -1764,9 +1752,9 @@ dependencies = [
[[package]]
name = "num-rational"
-version = "0.4.0"
+version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a"
+checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07"
dependencies = [
"autocfg",
"num-integer",
@@ -2577,9 +2565,9 @@ checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2"
[[package]]
name = "simba"
-version = "0.5.1"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e82063457853d00243beda9952e910b82593e4b07ae9f721b9278a99a0d3d5c"
+checksum = "5132a955559188f3d13c9ba831e77c802ddc8782783f050ed0c52f5988b95f4c"
dependencies = [
"approx",
"num-complex",
@@ -2729,6 +2717,7 @@ dependencies = [
"mpmc-map",
"nalgebra",
"once_cell",
+ "parking_lot",
"peak_alloc",
"rand 0.8.3",
"serde",
diff --git a/network/Cargo.toml b/network/Cargo.toml
index 21e18c5781..c200c8fd03 100644
--- a/network/Cargo.toml
+++ b/network/Cargo.toml
@@ -84,9 +84,15 @@ version = "0.1"
[dependencies.async-trait]
version = "0.1"
+[dependencies.nalgebra]
+version = "0.26"
+
[dependencies.once_cell]
version = "1.5.2"
+[dependencies.parking_lot]
+version = "0.11.1"
+
[dependencies.rand]
version = "0.8"
@@ -144,8 +150,6 @@ version = "0.1"
[dev-dependencies.snarkos-testing]
path = "../testing"
-[dev-dependencies.nalgebra]
-version = "0.27"
[dev-dependencies.peak_alloc]
version = "0.1.0"
diff --git a/network/src/inbound/inbound.rs b/network/src/inbound/inbound.rs
index 5893c5653e..b6ac7d1694 100644
--- a/network/src/inbound/inbound.rs
+++ b/network/src/inbound/inbound.rs
@@ -207,7 +207,7 @@ impl<S: Storage + Send + Sync + 'static> Node<S> {
Payload::Peers(peers) => {
metrics::increment_counter!(inbound::PEERS);
- self.process_inbound_peers(peers).await;
+ self.process_inbound_peers(source, peers).await;
}
Payload::Ping(_) | Payload::Pong => {
// Skip as this case is already handled with priority in inbound_handler
diff --git a/network/src/lib.rs b/network/src/lib.rs
index d183f17f6e..55f5fc74e0 100644
--- a/network/src/lib.rs
+++ b/network/src/lib.rs
@@ -46,6 +46,9 @@ pub mod node;
pub mod peers;
pub mod sync;
+pub mod topology;
+pub use topology::*;
+
/// The maximum number of block hashes that can be requested or provided in a single batch.
pub const MAX_BLOCK_SYNC_COUNT: u32 = 64;
/// The maximum amount of time allowed to process a single batch of sync blocks. It should be aligned
diff --git a/network/src/node.rs b/network/src/node.rs
index c422c61013..fb60f770d7 100644
--- a/network/src/node.rs
+++ b/network/src/node.rs
@@ -63,6 +63,8 @@ pub struct InnerNode<S: Storage + core::marker::Sync + Send + 'static> {
pub peer_book: PeerBook,
/// The sync handler of this node.
pub sync: OnceCell<Arc<Sync<S>>>,
+ /// Tracks the known network crawled by this node.
+ pub known_network: OnceCell<KnownNetwork>,
/// The node's start-up timestamp.
pub launched: DateTime<Utc>,
/// The tasks spawned by the node.
@@ -111,7 +113,7 @@ impl<S: Storage + core::marker::Sync + Send + 'static> Node<S> {
impl<S: Storage + Send + core::marker::Sync + 'static> Node<S> {
/// Creates a new instance of `Node`.
pub async fn new(config: Config) -> Result<Self, NetworkError> {
- Ok(Self(Arc::new(InnerNode {
+ let node = Self(Arc::new(InnerNode {
id: thread_rng().gen(),
state: Default::default(),
local_address: Default::default(),
@@ -119,12 +121,20 @@ impl<S: Storage + Send + core::marker::Sync + 'static> Node<S> {
inbound: Default::default(),
peer_book: PeerBook::spawn(),
sync: Default::default(),
+ known_network: Default::default(),
launched: Utc::now(),
tasks: Default::default(),
threads: Default::default(),
shutting_down: Default::default(),
master_dispatch: RwLock::new(None),
- })))
+ }));
+
+ if node.config.is_bootnode() {
+ // Safe since this can only ever be set here.
+ node.known_network.set(KnownNetwork::default()).unwrap();
+ }
+
+ Ok(node)
}
pub fn set_sync(&mut self, sync: Sync<S>) {
@@ -151,6 +161,10 @@ impl<S: Storage + Send + core::marker::Sync + 'static> Node<S> {
self.sync().is_some()
}
+ pub fn known_network(&self) -> Option<&KnownNetwork> {
+ self.known_network.get()
+ }
+
pub async fn start_services(&self) {
let node_clone = self.clone();
let mut receiver = self.inbound.take_receiver().await;
@@ -181,6 +195,20 @@ impl<S: Storage + Send + core::marker::Sync + 'static> Node<S> {
});
self.register_task(peering_task);
+ if self.known_network().is_some() {
+ let node_clone = self.clone();
+
+ let known_network_task = task::spawn(async move {
+ loop {
+ // Should always be present since we check for it before this block.
+ if let Some(known_network) = node_clone.known_network() {
+ known_network.update().await
+ }
+ }
+ });
+ self.register_task(known_network_task);
+ }
+
let node_clone = self.clone();
let state_tracking_task = task::spawn(async move {
loop {
diff --git a/network/src/peers/peer_book.rs b/network/src/peers/peer_book.rs
index b849b6bc58..2a9eb0fb5c 100644
--- a/network/src/peers/peer_book.rs
+++ b/network/src/peers/peer_book.rs
@@ -113,8 +113,16 @@ impl PeerBook {
self.connected_peers.inner().keys().copied().collect()
}
+ pub fn disconnected_peers(&self) -> Vec<SocketAddr> {
+ self.disconnected_peers.inner().keys().copied().collect()
+ }
+
+ pub fn get_connected_peer_count(&self) -> u32 {
+ self.connected_peers.len() as u32
+ }
+
pub fn get_active_peer_count(&self) -> u32 {
- self.connected_peers.len() as u32 + self.pending_connections()
+ self.get_connected_peer_count() + self.pending_connections()
}
pub fn get_disconnected_peer_count(&self) -> u32 {
@@ -133,10 +141,6 @@ impl PeerBook {
self.disconnected_peers.get(&address)
}
- pub fn disconnected_peers(&self) -> Vec<SocketAddr> {
- self.disconnected_peers.inner().keys().copied().collect()
- }
-
async fn take_disconnected_peer(&self, address: SocketAddr) -> Option<Peer> {
metrics::decrement_gauge!(DISCONNECTED, 1.0);
self.disconnected_peers.remove(address).await
@@ -229,6 +233,14 @@ impl PeerBook {
self.map_each_peer(|peer| async move { peer.load().await }).await
}
+ pub async fn disconnected_peers_snapshot(&self) -> Vec<Peer> {
+ self.disconnected_peers
+ .inner()
+ .iter()
+ .map(|(_, peer)| peer.clone())
+ .collect()
+ }
+
///
/// Adds the given address to the disconnected peers in this `PeerBook`.
///
diff --git a/network/src/peers/peers.rs b/network/src/peers/peers.rs
index 452a0460a7..a73e49d417 100644
--- a/network/src/peers/peers.rs
+++ b/network/src/peers/peers.rs
@@ -37,8 +37,7 @@ impl<S: Storage + Send + Sync + 'static> Node<S> {
///
pub(crate) async fn update_peers(&self) {
// Fetch the number of connected and connecting peers.
- let active_peer_count = self.peer_book.get_active_peer_count() as usize;
-
+ let active_peer_count = self.peer_book.get_active_peer_count();
info!(
"Connected to {} peer{}",
active_peer_count,
@@ -50,27 +49,35 @@ impl<S: Storage + Send + Sync + 'static> Node<S> {
// give us 100ms to close some negatively judge_badd connections (probably less needed, but we have time)
tokio::time::sleep(Duration::from_millis(100)).await;
- // Attempt to connect to the default bootnodes of the network.
- self.connect_to_bootnodes().await;
-
- // Attempt to connect to each disconnected peer saved in the peer book.
- if !self.config.is_bootnode() {
- self.connect_to_disconnected_peers().await;
- }
-
- // Broadcast a `GetPeers` message to request for more peers.
- self.broadcast_getpeers_requests().await;
-
- let new_active_peer_count = self.peer_book.get_active_peer_count() as usize;
- // Check if this node server is above the permitted number of connected peers.
- let max_peers = self.config.maximum_number_of_connected_peers() as usize;
- if new_active_peer_count > max_peers {
- let number_to_disconnect = new_active_peer_count - max_peers;
- trace!(
- "Disconnecting from {} peers to maintain their permitted number",
- number_to_disconnect
- );
+ // Fetch active peer count after high RTTs disconnects.
+ let active_peer_count = self.peer_book.get_active_peer_count();
+ let min_peers = self.config.minimum_number_of_connected_peers() as u32;
+ let max_peers = self.config.maximum_number_of_connected_peers() as u32;
+
+ // Calculate the peer counts to disconnect and connect based on the node type and current
+ // peer counts.
+ let (number_to_disconnect, number_to_connect) = match self.config.is_bootnode() {
+ true => {
+ // Bootnodes disconnect down to the min peer count, this to free up room for
+ // the next crawled peers...
+ let number_to_disconnect = active_peer_count.saturating_sub(min_peers);
+ // ...then they connect to disconnected peers leaving 20% of their capacity open
+ // incoming connections.
+ const CRAWLING_CAPACITY_PERCENTAGE: f64 = 0.8;
+ let crawling_capacity = (CRAWLING_CAPACITY_PERCENTAGE * max_peers as f64).floor() as u32;
+ let number_to_connect = crawling_capacity.saturating_sub(active_peer_count - number_to_disconnect);
+
+ (number_to_disconnect, number_to_connect)
+ }
+ false => (
+ // Non-bootnodes disconnect if above the max peer count...
+ active_peer_count.saturating_sub(max_peers),
+ // ...and connect if below the min peer count.
+ min_peers.saturating_sub(active_peer_count),
+ ),
+ };
+ if number_to_disconnect != 0 {
let mut current_peers = self.peer_book.connected_peers_snapshot().await;
// Bootnodes will disconnect from random peers...
@@ -86,7 +93,21 @@ impl<S: Storage + Send + Sync + 'static> Node<S> {
}
}
- if new_active_peer_count != 0 {
+ // Attempt to connect to the default bootnodes of the network if the node has no active
+ // connections.
+ if self.peer_book.get_active_peer_count() == 0 {
+ self.connect_to_bootnodes().await;
+ }
+
+ if number_to_connect != 0 {
+ self.connect_to_disconnected_peers(number_to_connect as usize).await;
+ }
+
+ // Only broadcast requests if any peers are connected.
+ if self.peer_book.get_connected_peer_count() != 0 {
+ // Broadcast a `GetPeers` message to request for more peers.
+ self.broadcast_getpeers_requests().await;
+
// Send a `Ping` to every connected peer.
self.broadcast_pings().await;
}
@@ -167,44 +188,37 @@ impl<S: Storage + Send + Sync + 'static> Node<S> {
///
/// Broadcasts a connection request to all disconnected peers.
///
- async fn connect_to_disconnected_peers(&self) {
+ async fn connect_to_disconnected_peers(&self, count: usize) {
// Local address must be known by now.
let own_address = self.local_address().unwrap();
- // If this node is a bootnode, attempt to connect to all disconnected peers.
// If this node is not a bootnode, attempt to satisfy the minimum number of peer connections.
let random_peers = {
- // Fetch the number of connected and connecting peers.
- let number_of_peers = self.peer_book.get_active_peer_count() as usize;
-
- // Check if this node server is below the permitted number of connected peers.
- let min_peers = self.config.minimum_number_of_connected_peers() as usize;
- if number_of_peers >= min_peers {
- return;
- }
-
- // Set the number of peers to attempt a connection to.
- let count = min_peers - number_of_peers;
-
- if count == 0 {
- return;
- }
-
- let disconnected_peers = self.peer_book.disconnected_peers();
-
trace!(
"Connecting to {} disconnected peers",
- cmp::min(count, disconnected_peers.len())
+ cmp::min(count, self.peer_book.disconnected_peers().len())
);
let bootnodes = self.config.bootnodes();
// Iterate through a selection of random peers and attempt to connect.
- disconnected_peers
- .iter()
- .filter(|peer| **peer != own_address && !bootnodes.contains(peer))
- .copied()
- .choose_multiple(&mut rand::thread_rng(), count)
+ let mut candidates = self.peer_book.disconnected_peers_snapshot().await;
+
+ candidates.retain(|peer| peer.address != own_address && !bootnodes.contains(&peer.address));
+
+ if self.config.is_bootnode() {
+ // Bootnodes choose peers they haven't dialed in a while.
+ candidates.sort_unstable_by_key(|peer| peer.quality.last_connected);
+ }
+
+ // Only keep the addresses.
+ let addr_iter = candidates.iter().map(|peer| peer.address);
+
+ if self.config.is_bootnode() {
+ addr_iter.take(count).collect()
+ } else {
+ addr_iter.choose_multiple(&mut rand::thread_rng(), count)
+ }
};
for remote_address in random_peers {
@@ -289,17 +303,24 @@ impl<S: Storage + Send + Sync + 'static> Node<S> {
/// A node has sent their list of peer addresses.
/// Add all new/updated addresses to our disconnected.
/// The connection handler will be responsible for sending out handshake requests to them.
- pub(crate) async fn process_inbound_peers(&self, peers: Vec<SocketAddr>) {
+ pub(crate) async fn process_inbound_peers(&self, source: SocketAddr, peers: Vec<SocketAddr>) {
let local_address = self.local_address().unwrap(); // the address must be known by now
- for peer_address in peers.into_iter().filter(|&peer_addr| peer_addr != local_address) {
+ for peer_address in peers.iter().filter(|&peer_addr| *peer_addr != local_address) {
// Inform the peer book that we found a peer.
// The peer book will determine if we have seen the peer before,
// and include the peer if it is new.
self.peer_book
- .add_peer(peer_address, self.config.bootnodes().contains(&peer_address))
+ .add_peer(*peer_address, self.config.bootnodes().contains(&peer_address))
.await;
}
+
+ if let Some(known_network) = self.known_network() {
+ // If this node is tracking the network, record the connections. This can
+ // then be used to construct the graph and query peer info from the peerbook.
+
+ let _ = known_network.sender.try_send((source, peers));
+ }
}
pub fn can_connect(&self) -> bool {
diff --git a/network/src/topology.rs b/network/src/topology.rs
new file mode 100644
index 0000000000..078765bfe8
--- /dev/null
+++ b/network/src/topology.rs
@@ -0,0 +1,542 @@
+// Copyright (C) 2019-2021 Aleo Systems Inc.
+// This file is part of the snarkOS library.
+
+// The snarkOS library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// The snarkOS library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
+
+// Network crawler:
+// Start a crawler task (similar to the peers task) which updates state. Only one peer would be
+// connected at a time to start and would be queried for peers. It would then select on peer at
+// random to continue the crawl.
+//
+// Q: extend the network protocol to include statistics or node metadata?
+// Q: when to perform centrality computation?
+
+use std::{
+ cmp::Ordering,
+ collections::{BTreeMap, HashSet},
+ hash::{Hash, Hasher},
+ net::SocketAddr,
+ ops::Sub,
+};
+
+use chrono::{DateTime, Utc};
+use nalgebra::{DMatrix, DVector, SymmetricEigen};
+use parking_lot::RwLock;
+use tokio::sync::{
+ mpsc,
+ mpsc::{Receiver, Sender},
+ Mutex,
+};
+
+// Purges connections that haven't been seen within this time (in hours).
+const STALE_CONNECTION_CUTOFF_TIME_HRS: i64 = 4;
+
+/// A connection between two peers.
+///
+/// Implements `partialEq` and `Hash` manually so that the `source`-`target` order has no impact on equality
+/// (since connections are directionless). The timestamp is also not included in the comparison.
+#[derive(Debug, Eq, Copy, Clone)]
+pub struct Connection {
+ /// One side of the connection.
+ pub source: SocketAddr,
+ /// The other side of the connection.
+ pub target: SocketAddr,
+ /// The last time this peer was seen by the crawler (used determine which connections are
+ /// likely stale).
+ last_seen: DateTime<Utc>,
+}
+
+impl PartialEq for Connection {
+ fn eq(&self, other: &Self) -> bool {
+ let (a, b) = (self.source, self.target);
+ let (c, d) = (other.source, other.target);
+
+ a == d && b == c || a == c && b == d
+ }
+}
+
+impl Hash for Connection {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ let (a, b) = (self.source, self.target);
+
+ // This ensures the hash is the same for (a, b) as it is for (b, a).
+ match a.cmp(&b) {
+ Ordering::Greater => {
+ b.hash(state);
+ a.hash(state);
+ }
+ _ => {
+ a.hash(state);
+ b.hash(state);
+ }
+ }
+ }
+}
+
+impl Connection {
+ fn new(source: SocketAddr, target: SocketAddr) -> Self {
+ Connection {
+ source,
+ target,
+ last_seen: Utc::now(),
+ }
+ }
+}
+
+/// Keeps track of crawled peers and their connections.
+#[derive(Debug)]
+pub struct KnownNetwork {
+ pub sender: Sender<(SocketAddr, Vec<SocketAddr>)>,
+ receiver: Mutex<Receiver<(SocketAddr, Vec<SocketAddr>)>>,
+ connections: RwLock<HashSet<Connection>>,
+}
+
+impl Default for KnownNetwork {
+ fn default() -> Self {
+ // Buffer size of 1000 messages seems reasonable to begin with.
+ let (tx, rx) = mpsc::channel(1000);
+
+ Self {
+ sender: tx,
+ receiver: Mutex::new(rx),
+ connections: Default::default(),
+ }
+ }
+}
+
+impl KnownNetwork {
+ /// Updates the crawled connection set.
+ pub async fn update(&self) {
+ if let Some((source, peers)) = self.receiver.lock().await.recv().await {
+ self.update_inner(source, peers);
+ }
+ }
+
+ // More convenient for testing.
+ fn update_inner(&self, source: SocketAddr, peers: Vec<SocketAddr>) {
+ // Rules:
+ // - if a connecton exists already, do nothing.
+ // - if a connection is new, add it.
+ // - if an exisitng connection involving the source isn't in the peerlist, remove it if
+ // it's stale.
+
+ let new_connections: HashSet<Connection> =
+ peers.into_iter().map(|peer| Connection::new(source, peer)).collect();
+
+ // Find which connections need to be removed.
+ //
+ // With sets: a - b = removed connections (if and only if one of the two addrs is the
+ // source), otherwise it's a connection which doesn't include the source and shouldn't be
+ // removed. We also keep connections seen within the last few hours as peerlists are capped
+ // in size and omitted connections don't necessarily mean they don't exist anymore.
+ let connections_to_remove: HashSet<Connection> = self
+ .connections
+ .read()
+ .difference(&new_connections)
+ .filter(|conn| {
+ (conn.source == source || conn.target == source)
+ && (Utc::now() - conn.last_seen).num_hours() > STALE_CONNECTION_CUTOFF_TIME_HRS
+ })
+ .copied()
+ .collect();
+
+ // Only retain connections that aren't removed.
+ self.connections
+ .write()
+ .retain(|connection| !connections_to_remove.contains(&connection));
+
+ // Scope the write lock.
+ {
+ let mut connections_g = self.connections.write();
+
+ // Insert new connections, we use replace so the last seen timestamp is overwritten.
+ for new_connection in new_connections.into_iter() {
+ connections_g.replace(new_connection);
+ }
+ }
+ }
+
+ /// Returns a connection.
+ pub fn get_connection(&self, source: SocketAddr, target: SocketAddr) -> Option<Connection> {
+ self.connections.read().get(&Connection::new(source, target)).copied()
+ }
+
+ /// Returns a snapshot of all the connections.
+ pub fn connections(&self) -> HashSet<Connection> {
+ self.connections.read().clone()
+ }
+
+ /// Returns `true` if the known network contains any connections, `false` otherwise.
+ pub fn has_connections(&self) -> bool {
+ !self.connections.read().is_empty()
+ }
+}
+
+/// Network topology measurements.
+#[derive(Debug)]
+pub struct NetworkMetrics {
+ /// The total node count of the network.
+ node_count: usize,
+ /// The total connection count for the network.
+ connection_count: usize,
+ /// The network density.
+ ///
+ /// This is defined as actual connections divided by the total number of possible connections.
+ density: f64,
+ /// The algebraic connectivity of the network.
+ ///
+ /// This is the value of the Fiedler eigenvalue, the second-smallest eigenvalue of the network's
+ /// Laplacian matrix.
+ algebraic_connectivity: f64,
+ /// The difference between the node with the largest connection count and the node with the
+ /// lowest.
+ degree_centrality_delta: f64,
+ /// Node centrality measurements mapped to each node's address.
+ ///
+ /// Includes degree centrality, eigenvector centrality (the relative importance of a node in
+ /// the network) and Fiedler vector (describes a possible partitioning of the network).
+ centrality: BTreeMap<SocketAddr, NodeCentrality>,
+}
+
+impl NetworkMetrics {
+ /// Returns the network metrics for the state described by the connections list.
+ pub fn new(known_network: &KnownNetwork) -> Self {
+ // Copy the connections as the data must not change throughout the metrics computation.
+ let connections: HashSet<Connection> = known_network.connections();
+
+ // Construct the list of nodes from the connections.
+ let mut nodes: HashSet<SocketAddr> = HashSet::new();
+ for connection in connections.iter() {
+ // Using a hashset guarantees uniqueness.
+ nodes.insert(connection.source);
+ nodes.insert(connection.target);
+ }
+
+ let node_count = nodes.len();
+ let connection_count = connections.len();
+ let density = calculate_density(node_count as f64, connection_count as f64);
+
+ // Create an index of nodes to introduce some notion of order the rows and columns all matrices will follow.
+ let index: BTreeMap<SocketAddr, usize> = nodes.iter().enumerate().map(|(i, &addr)| (addr, i)).collect();
+
+ // Not stored on the struct but can be pretty inspected with `println!`.
+ // The adjacency matrix can be built from the node index and the connections list.
+ let adjacency_matrix = adjacency_matrix(&index, connections);
+ // The degree matrix can be built from the adjacency matrix (row sum is connection count).
+ let degree_matrix = degree_matrix(&index, &adjacency_matrix);
+ // The laplacian matrix is degree matrix minus the adjacence matrix.
+ let laplacian_matrix = degree_matrix.clone().sub(&adjacency_matrix);
+
+ let degree_centrality = degree_centrality(&index, °ree_matrix);
+ let degree_centrality_delta = degree_centrality_delta(°ree_matrix);
+ let eigenvector_centrality = eigenvector_centrality(&index, adjacency_matrix);
+ let (algebraic_connectivity, fiedler_vector_indexed) = fiedler(&index, laplacian_matrix);
+
+ // Create the `NodeCentrality` instances for each node.
+ let centrality: BTreeMap<SocketAddr, NodeCentrality> = nodes
+ .iter()
+ .map(|&addr| {
+ // Must contain values for this node since it was constructed using same set of
+ // nodes.
+ let dc = degree_centrality.get(&addr).unwrap();
+ let ec = eigenvector_centrality.get(&addr).unwrap();
+ let fv = fiedler_vector_indexed.get(&addr).unwrap();
+ let nc = NodeCentrality::new(*dc, *ec, *fv);
+
+ (addr, nc)
+ })
+ .collect();
+
+ Self {
+ node_count,
+ connection_count,
+ density,
+ algebraic_connectivity,
+ degree_centrality_delta,
+ centrality,
+ }
+ }
+}
+
+/// Centrality measurements of a node.
+#[derive(Debug)]
+struct NodeCentrality {
+ /// Connection count of the node.
+ degree_centrality: u16,
+ /// A measure of the relative importance of the node in the network.
+ ///
+ /// Summing the values of each node adds up to the number of nodes in the network. This was
+ /// done to allow comparison between different network topologies irrespective of node count.
+ eigenvector_centrality: f64,
+ /// This value is extracted from the Fiedler eigenvector corresponding to the second smallest
+ /// eigenvalue of the Laplacian matrix of the network.
+ ///
+ /// The network can be partitioned on the basis of these values (positive, negative and when
+ /// relevant close to zero).
+ fiedler_value: f64,
+}
+
+impl NodeCentrality {
+ fn new(degree_centrality: u16, eigenvector_centrality: f64, fiedler_value: f64) -> Self {
+ Self {
+ degree_centrality,
+ eigenvector_centrality,
+ fiedler_value,
+ }
+ }
+}
+
+pub fn calculate_density(n: f64, ac: f64) -> f64 {
+ // Calculate the total number of possible connections given a node count.
+ let pc = n * (n - 1.0) / 2.0;
+ // Actual connections divided by the possbile connections gives the density.
+ ac / pc
+}
+
+/// Returns the degree matrix for the network with values ordered by the index.
+fn degree_matrix(index: &BTreeMap<SocketAddr, usize>, adjacency_matrix: &DMatrix<f64>) -> DMatrix<f64> {
+ let n = index.len();
+ let mut matrix = DMatrix::<f64>::zeros(n, n);
+
+ for (i, row) in adjacency_matrix.row_iter().enumerate() {
+ // Set the diagonal to be the sum of connections in that row. The index isn't necessary
+ // here since the rows are visited in order and the adjacency matrix is ordered after the
+ // index.
+ matrix[(i, i)] = row.sum()
+ }
+
+ matrix
+}
+
+/// Returns the adjacency matrix for the network with values ordered by the index.
+fn adjacency_matrix(index: &BTreeMap<SocketAddr, usize>, connections: HashSet<Connection>) -> DMatrix<f64> {
+ let n = index.len();
+ let mut matrix = DMatrix::<f64>::zeros(n, n);
+
+ // Compute the adjacency matrix. As our network is an undirected graph, the adjacency matrix is
+ // symmetric.
+ for connection in connections {
+ // Addresses must be present.
+ // Get the indices for each address in the connection.
+ let i = index.get(&connection.source).unwrap();
+ let j = index.get(&connection.target).unwrap();
+
+ // Since connections are unique both the upper and lower triangles must be writted (as the
+ // graph is unidrected) for each connection.
+ matrix[(*i, *j)] = 1.0;
+ matrix[(*j, *i)] = 1.0;
+ }
+
+ matrix
+}
+
+/// Returns the difference between the highest and lowest degree centrality in the network.
+///
+/// Returns an `f64`, though the value should be a natural number.
+fn degree_centrality_delta(degree_matrix: &DMatrix<f64>) -> f64 {
+ let max = degree_matrix.max();
+ let min = degree_matrix.min();
+
+ max - min
+}
+
+/// Returns the degree centrality of a node.
+///
+/// This is defined as the connection count of the node.
+fn degree_centrality(index: &BTreeMap<SocketAddr, usize>, degree_matrix: &DMatrix<f64>) -> BTreeMap<SocketAddr, u16> {
+ let diag = degree_matrix.diagonal();
+ index
+ .keys()
+ .zip(diag.iter())
+ .map(|(addr, dc)| (*addr, *dc as u16))
+ .collect()
+}
+
+/// Returns the eigenvalue centrality of each node in the network.
+fn eigenvector_centrality(
+ index: &BTreeMap<SocketAddr, usize>,
+ adjacency_matrix: DMatrix<f64>,
+) -> BTreeMap<SocketAddr, f64> {
+ // Compute the eigenvectors and corresponding eigenvalues and sort in descending order.
+ let ascending = false;
+ let eigenvalue_vector_pairs = sorted_eigenvalue_vector_pairs(adjacency_matrix, ascending);
+ let (_highest_eigenvalue, highest_eigenvector) = &eigenvalue_vector_pairs[0];
+
+ // The eigenvector is a relative score of node importance (normalised by the norm), to obtain an absolute score for each
+ // node, we normalise so that the sum of the components are equal to 1.
+ let sum = highest_eigenvector.sum() / index.len() as f64;
+ let normalised = highest_eigenvector.unscale(sum);
+
+ // Map addresses to their eigenvalue centrality.
+ index
+ .keys()
+ .zip(normalised.column(0).iter())
+ .map(|(addr, ec)| (*addr, *ec))
+ .collect()
+}
+
+/// Returns the Fiedler values for each node in the network.
+fn fiedler(index: &BTreeMap<SocketAddr, usize>, laplacian_matrix: DMatrix<f64>) -> (f64, BTreeMap<SocketAddr, f64>) {
+ // Compute the eigenvectors and corresponding eigenvalues and sort in ascending order.
+ let ascending = true;
+ let pairs = sorted_eigenvalue_vector_pairs(laplacian_matrix, ascending);
+
+ // Second-smallest eigenvalue is the Fiedler value (algebraic connectivity), the associated
+ // eigenvector is the Fiedler vector.
+ let (algebraic_connectivity, fiedler_vector) = &pairs[1];
+
+ // Map addresses to their Fiedler values.
+ let fiedler_values_indexed = index
+ .keys()
+ .zip(fiedler_vector.column(0).iter())
+ .map(|(addr, fiedler_value)| (*addr, *fiedler_value))
+ .collect();
+
+ (*algebraic_connectivity, fiedler_values_indexed)
+}
+
+/// Computes the eigenvalues and corresponding eigenvalues from the supplied symmetric matrix.
+fn sorted_eigenvalue_vector_pairs(matrix: DMatrix<f64>, ascending: bool) -> Vec<(f64, DVector<f64>)> {
+ // Compute eigenvalues and eigenvectors.
+ let eigen = SymmetricEigen::new(matrix);
+
+ // Map eigenvalues to their eigenvectors.
+ let mut pairs: Vec<(f64, DVector<f64>)> = eigen
+ .eigenvalues
+ .iter()
+ .zip(eigen.eigenvectors.column_iter())
+ .map(|(value, vector)| (*value, vector.clone_owned()))
+ .collect();
+
+ // Sort eigenvalue-vector pairs in descending order.
+ pairs.sort_unstable_by(|(a, _), (b, _)| {
+ if ascending {
+ a.partial_cmp(b).unwrap()
+ } else {
+ b.partial_cmp(a).unwrap()
+ }
+ });
+
+ pairs
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use chrono::Duration;
+
+ #[test]
+ fn connections_partial_eq() {
+ let a = "12.34.56.78:9000".parse().unwrap();
+ let b = "98.76.54.32:1000".parse().unwrap();
+
+ assert_eq!(Connection::new(a, b), Connection::new(b, a));
+ assert_eq!(Connection::new(a, b), Connection::new(a, b));
+ }
+
+ #[test]
+ fn connections_update() {
+ let addr_a = "11.11.11.11:1000".parse().unwrap();
+ let addr_b = "22.22.22.22:2000".parse().unwrap();
+ let addr_c = "33.33.33.33:3000".parse().unwrap();
+ let addr_d = "44.44.44.44:4000".parse().unwrap();
+ let addr_e = "55.55.55.55:5000".parse().unwrap();
+
+ let old_but_valid_timestamp = Utc::now() - Duration::hours(STALE_CONNECTION_CUTOFF_TIME_HRS - 1);
+ let stale_timestamp = Utc::now() - Duration::hours(STALE_CONNECTION_CUTOFF_TIME_HRS + 1);
+
+ // Seed the known network with the older connections.
+ let old_but_valid_connection = Connection {
+ source: addr_a,
+ target: addr_d,
+ last_seen: old_but_valid_timestamp,
+ };
+
+ let stale_connection = Connection {
+ source: addr_a,
+ target: addr_e,
+ last_seen: stale_timestamp,
+ };
+
+ let mut seeded_connections = HashSet::new();
+ seeded_connections.insert(old_but_valid_connection);
+ seeded_connections.insert(stale_connection);
+
+ let (tx, rx) = mpsc::channel(100);
+ let known_network = KnownNetwork {
+ sender: tx,
+ receiver: Mutex::new(rx),
+ connections: RwLock::new(seeded_connections),
+ };
+
+ // Insert two connections.
+ known_network.update_inner(addr_a, vec![addr_b, addr_c]);
+ assert!(
+ known_network
+ .connections
+ .read()
+ .contains(&Connection::new(addr_a, addr_b))
+ );
+ assert!(
+ known_network
+ .connections
+ .read()
+ .contains(&Connection::new(addr_a, addr_c))
+ );
+ assert!(
+ known_network
+ .connections
+ .read()
+ .contains(&Connection::new(addr_a, addr_d))
+ );
+ // Assert the stale connection was purged.
+ assert!(
+ !known_network
+ .connections
+ .read()
+ .contains(&Connection::new(addr_a, addr_e))
+ );
+
+ // Insert (a, b) connection reversed, make sure it doesn't change the list.
+ known_network.update_inner(addr_b, vec![addr_a]);
+ assert_eq!(known_network.connections.read().len(), 3);
+
+ // Insert (a, d) again and make sure the timestamp was updated.
+ known_network.update_inner(addr_a, vec![addr_d]);
+ assert_ne!(
+ old_but_valid_timestamp,
+ known_network.get_connection(addr_a, addr_d).unwrap().last_seen
+ );
+ }
+
+ #[test]
+ fn connections_hash() {
+ use std::collections::hash_map::DefaultHasher;
+
+ let a = "11.11.11.11:1000".parse().unwrap();
+ let b = "22.22.22.22:2000".parse().unwrap();
+
+ let mut h1 = DefaultHasher::new();
+ let mut h2 = DefaultHasher::new();
+
+ let k1 = Connection::new(a, b);
+ let k2 = Connection::new(b, a);
+
+ k1.hash(&mut h1);
+ k2.hash(&mut h2);
+
+ // verify k1 == k2 => hash(k1) == hash(k2)
+ assert_eq!(h1.finish(), h2.finish());
+ }
+}
diff --git a/rpc/documentation/public_endpoints/getnetworkgraph.md b/rpc/documentation/public_endpoints/getnetworkgraph.md
new file mode 100644
index 0000000000..1b3545c5fe
--- /dev/null
+++ b/rpc/documentation/public_endpoints/getnetworkgraph.md
@@ -0,0 +1,22 @@
+Returns the network graph crawled by this node (if it is a bootnode).
+
+### Arguments
+
+None
+
+### Response
+
+| Parameter | Type | Description |
+| :-----------------------: | :--------: | :---------------------------------------: |
+| `edges` | array | The list of connections known by the node |
+| `vertices` | array | The list of nodes known by the node |
+| `edges[i].source` | SocketAddr | One side of the crawled connection |
+| `edges[i].target` | SocketAddr | The other side of the crawled connection |
+| `vertices[i].addr` | SocketAddr | The recorded address of the crawled node |
+| `vertices[i].is_bootnode` | bool | Indicates whether the node is a bootnode |
+
+### Example
+```ignore
+curl --data-binary '{"jsonrpc": "2.0", "id":"documentation", "method": "getnetworkgraph", "params": [] }' -H 'content-type: application/json' http://127.0.0.1:3030/
+```
+
diff --git a/rpc/src/custom_rpc_server.rs b/rpc/src/custom_rpc_server.rs
index afbd12d7bb..58c6c3c77b 100644
--- a/rpc/src/custom_rpc_server.rs
+++ b/rpc/src/custom_rpc_server.rs
@@ -219,6 +219,10 @@ async fn handle_rpc<S: Storage + Send + Sync + 'static>(
let result = rpc.get_block_template().map_err(convert_crate_err);
result_to_response(&req, result)
}
+ "getnetworkgraph" => {
+ let result = rpc.get_network_graph().map_err(convert_crate_err);
+ result_to_response(&req, result)
+ }
// private
"createaccount" => {
let result = rpc
diff --git a/rpc/src/error.rs b/rpc/src/error.rs
index 7818866d37..56a1dd6fd9 100644
--- a/rpc/src/error.rs
+++ b/rpc/src/error.rs
@@ -52,6 +52,9 @@ pub enum RpcError {
#[error("The node doesn't have the sync layer running")]
NoConsensus,
+ #[error("The node isn't tracking the network")]
+ NoKnownNetwork,
+
#[error("{}", _0)]
StorageError(StorageError),
diff --git a/rpc/src/rpc_impl.rs b/rpc/src/rpc_impl.rs
index 2fc93d905e..3c78d9e75e 100644
--- a/rpc/src/rpc_impl.rs
+++ b/rpc/src/rpc_impl.rs
@@ -21,7 +21,7 @@
use crate::{error::RpcError, rpc_trait::RpcFunctions, rpc_types::*};
use snarkos_consensus::{get_block_reward, memory_pool::Entry, ConsensusParameters, MemoryPool, MerkleTreeLedger};
use snarkos_metrics::{snapshots::NodeStats, stats::NODE_STATS};
-use snarkos_network::{Node, Sync};
+use snarkos_network::{KnownNetwork, Node, Sync};
use snarkvm_dpc::{
testnet1::{
instantiated::{Components, Tx},
@@ -40,6 +40,7 @@ use snarkvm_utilities::{
use chrono::Utc;
use std::{
+ collections::HashSet,
ops::Deref,
sync::{atomic::Ordering, Arc},
};
@@ -94,6 +95,10 @@ impl<S: Storage + Send + core::marker::Sync + 'static> RpcImpl<S> {
pub fn memory_pool(&self) -> Result<&MemoryPool<Tx>, RpcError> {
Ok(self.sync_handler()?.memory_pool())
}
+
+ pub fn known_network(&self) -> Result<&KnownNetwork, RpcError> {
+ self.node.known_network().ok_or(RpcError::NoKnownNetwork)
+ }
}
impl<S: Storage + Send + core::marker::Sync + 'static> RpcFunctions for RpcImpl<S> {
@@ -377,4 +382,29 @@ impl<S: Storage + Send + core::marker::Sync + 'static> RpcFunctions for RpcImpl<
coinbase_value: coinbase_value.0 as u64,
})
}
+
+ fn get_network_graph(&self) -> Result<NetworkGraph, RpcError> {
+ let mut vertices = HashSet::new();
+ let edges: HashSet<Edge> = self
+ .known_network()?
+ .connections()
+ .iter()
+ .map(|connection| {
+ let (source, target) = (connection.source, connection.target);
+
+ vertices.insert(Vertice {
+ addr: source,
+ is_bootnode: self.node.config.bootnodes().contains(&source),
+ });
+ vertices.insert(Vertice {
+ addr: target,
+ is_bootnode: self.node.config.bootnodes().contains(&target),
+ });
+
+ Edge { source, target }
+ })
+ .collect();
+
+ Ok(NetworkGraph { vertices, edges })
+ }
}
diff --git a/rpc/src/rpc_trait.rs b/rpc/src/rpc_trait.rs
index f203ee32db..9497a7344b 100644
--- a/rpc/src/rpc_trait.rs
+++ b/rpc/src/rpc_trait.rs
@@ -98,6 +98,11 @@ pub trait RpcFunctions {
// #[cfg_attr(nightly, doc(include = "../documentation/public_endpoints/getblocktemplate.md"))]
#[rpc(name = "getblocktemplate")]
fn get_block_template(&self) -> Result<BlockTemplate, RpcError>;
+
+ // todo: readd in Rust 1.54
+ // #[cfg_attr(nightly, doc(include ="../documentation/public_endpoints/getnetworkgraph.md"))]
+ #[rpc(name = "getnetworkgraph")]
+ fn get_network_graph(&self) -> Result<NetworkGraph, RpcError>;
}
/// Definition of private RPC endpoints that require authentication.
diff --git a/rpc/src/rpc_types.rs b/rpc/src/rpc_types.rs
index 2014e118a6..710227eccf 100644
--- a/rpc/src/rpc_types.rs
+++ b/rpc/src/rpc_types.rs
@@ -19,7 +19,7 @@
use chrono::{DateTime, Utc};
use jsonrpc_core::Metadata;
use serde::{Deserialize, Serialize};
-use std::net::SocketAddr;
+use std::{collections::HashSet, net::SocketAddr};
/// Defines the authentication format for accessing private endpoints on the RPC server
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
@@ -282,3 +282,21 @@ pub struct TransactionRecipient {
/// The amount being sent
pub amount: u64,
}
+
+#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
+pub struct NetworkGraph {
+ pub vertices: HashSet<Vertice>,
+ pub edges: HashSet<Edge>,
+}
+
+#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
+pub struct Vertice {
+ pub addr: SocketAddr,
+ pub is_bootnode: bool,
+}
+
+#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
+pub struct Edge {
+ pub source: SocketAddr,
+ pub target: SocketAddr,
+}
|
diff --git a/network/tests/topology.rs b/network/tests/topology.rs
index 78311941f4..ee6cfdf358 100644
--- a/network/tests/topology.rs
+++ b/network/tests/topology.rs
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with the snarkOS library. If not, see <https://www.gnu.org/licenses/>.
-use snarkos_network::Node;
+use snarkos_network::{topology::calculate_density, Node};
use snarkos_storage::LedgerStorage;
use snarkos_testing::{
network::{
@@ -26,10 +26,6 @@ use snarkos_testing::{
wait_until,
};
-use std::{collections::BTreeMap, net::SocketAddr, ops::Sub};
-
-use nalgebra::{DMatrix, DVector, SymmetricEigen};
-
const N: usize = 25;
const MIN_PEERS: u16 = 5;
const MAX_PEERS: u16 = 30;
@@ -132,7 +128,11 @@ async fn spawn_nodes_in_a_mesh() {
// Make sure the node with the largest degree centrality and smallest degree centrality don't
// have a delta greater than the max-min peer interval allows for. This check also provides
// some insight into whether the network is meshed in a homogeneous manner.
- wait_until!(15, degree_centrality_delta(&nodes) <= MAX_PEERS - MIN_PEERS, 200);
+ wait_until!(
+ 15,
+ degree_centrality_delta(&nodes) <= (MAX_PEERS - MIN_PEERS).into(),
+ 200
+ );
}
#[tokio::test(flavor = "multi_thread")]
@@ -149,7 +149,11 @@ async fn line_converges_to_mesh() {
start_nodes(&nodes).await;
wait_until!(10, network_density(&nodes) >= 0.1, 200);
- wait_until!(10, degree_centrality_delta(&nodes) <= MAX_PEERS - MIN_PEERS, 200);
+ wait_until!(
+ 10,
+ degree_centrality_delta(&nodes) <= (MAX_PEERS - MIN_PEERS).into(),
+ 200
+ );
}
#[tokio::test(flavor = "multi_thread")]
@@ -166,7 +170,11 @@ async fn ring_converges_to_mesh() {
start_nodes(&nodes).await;
wait_until!(10, network_density(&nodes) >= 0.1, 200);
- wait_until!(10, degree_centrality_delta(&nodes) <= MAX_PEERS - MIN_PEERS, 200);
+ wait_until!(
+ 10,
+ degree_centrality_delta(&nodes) <= (MAX_PEERS - MIN_PEERS).into(),
+ 200
+ );
}
#[tokio::test(flavor = "multi_thread")]
@@ -183,7 +191,11 @@ async fn star_converges_to_mesh() {
start_nodes(&nodes).await;
wait_until!(15, network_density(&nodes) >= 0.1, 200);
- wait_until!(15, degree_centrality_delta(&nodes) <= MAX_PEERS - MIN_PEERS, 200);
+ wait_until!(
+ 15,
+ degree_centrality_delta(&nodes) <= (MAX_PEERS - MIN_PEERS).into(),
+ 200
+ );
}
#[tokio::test(flavor = "multi_thread")]
@@ -253,268 +265,32 @@ async fn binary_star_contact() {
nodes.push(solo);
wait_until!(10, network_density(&nodes) >= 0.05);
-
- // Computing the metrics for this ignored case, interesting to inspect, especially Fiedler
- // partitioning as we have a graph with two clusters both centered around the bootnodes.
- let metrics = NetworkMetrics::new(&nodes);
- assert_eq!(metrics.node_count, 51);
-}
-
-/// Network topology measurements.
-#[derive(Debug)]
-struct NetworkMetrics {
- /// The total node count of the network.
- node_count: usize,
- /// The total connection count for the network.
- connection_count: usize,
- /// The network density.
- ///
- /// This is defined as actual connections divided by the total number of possible connections.
- density: f64,
- /// The algebraic connectivity of the network.
- ///
- /// This is the value of the Fiedler eigenvalue, the second-smallest eigenvalue of the network's
- /// Laplacian matrix.
- algebraic_connectivity: f64,
- /// The difference between the node with the largest connection count and the node with the
- /// lowest.
- degree_centrality_delta: u16,
- /// Node centrality measurements mapped to each node's address.
- ///
- /// Includes degree centrality, eigenvector centrality (the relative importance of a node in
- /// the network) and Fiedler vector (describes a possible partitioning of the network).
- centrality: BTreeMap<SocketAddr, NodeCentrality>,
-}
-
-impl NetworkMetrics {
- /// Returns the network metrics for the state described by the node list.
- fn new(nodes: &[Node<LedgerStorage>]) -> Self {
- let node_count = nodes.len();
- let connection_count = total_connection_count(nodes);
- let density = network_density(&nodes);
-
- // Create an index of nodes to introduce some notion of order the rows and columns all matrices will follow.
- let index: BTreeMap<SocketAddr, usize> = nodes
- .iter()
- .map(|node| node.local_address().unwrap())
- .enumerate()
- .map(|(i, addr)| (addr, i))
- .collect();
-
- // Not stored on the struct but can be pretty inspected with `println!`.
- let degree_matrix = degree_matrix(&index, &nodes);
- let adjacency_matrix = adjacency_matrix(&index, &nodes);
- let laplacian_matrix = degree_matrix.clone().sub(adjacency_matrix.clone());
-
- let degree_centrality = degree_centrality(&index, degree_matrix);
- let degree_centrality_delta = degree_centrality_delta(&nodes);
- let eigenvector_centrality = eigenvector_centrality(&index, adjacency_matrix);
- let (algebraic_connectivity, fiedler_vector_indexed) = fiedler(&index, laplacian_matrix);
-
- // Create the `NodeCentrality` instances for each node.
- let centrality: BTreeMap<SocketAddr, NodeCentrality> = nodes
- .iter()
- .map(|node| {
- let addr = node.local_address().unwrap();
- // Must contain values for this node since it was constructed using same set of
- // nodes.
- let dc = degree_centrality.get(&addr).unwrap();
- let ec = eigenvector_centrality.get(&addr).unwrap();
- let fv = fiedler_vector_indexed.get(&addr).unwrap();
- let nc = NodeCentrality::new(*dc, *ec, *fv);
-
- (addr, nc)
- })
- .collect();
-
- Self {
- node_count,
- connection_count,
- density,
- algebraic_connectivity,
- degree_centrality_delta,
- centrality,
- }
- }
-}
-
-/// Centrality measurements of a node.
-#[derive(Debug)]
-struct NodeCentrality {
- /// Connection count of the node.
- degree_centrality: u16,
- /// A measure of the relative importance of the node in the network.
- ///
- /// Summing the values of each node adds up to the number of nodes in the network. This was
- /// done to allow comparison between different network topologies irrespective of node count.
- eigenvector_centrality: f64,
- /// This value is extracted from the Fiedler eigenvector corresponding to the second smallest
- /// eigenvalue of the Laplacian matrix of the network.
- ///
- /// The network can be partitioned on the basis of these values (positive, negative and when
- /// relevant close to zero).
- fiedler_value: f64,
-}
-
-impl NodeCentrality {
- fn new(degree_centrality: u16, eigenvector_centrality: f64, fiedler_value: f64) -> Self {
- Self {
- degree_centrality,
- eigenvector_centrality,
- fiedler_value,
- }
- }
}
/// Returns the total connection count of the network.
-fn total_connection_count(nodes: &[Node<LedgerStorage>]) -> usize {
+fn total_connection_count(nodes: &[Node<LedgerStorage>]) -> u32 {
let mut count = 0;
for node in nodes {
- count += node.peer_book.get_active_peer_count()
- }
-
- (count / 2) as usize
-}
-
-/// Returns the network density.
-fn network_density(nodes: &[Node<LedgerStorage>]) -> f64 {
- let connections = total_connection_count(nodes);
- calculate_density(nodes.len() as f64, connections as f64)
-}
-
-fn calculate_density(n: f64, ac: f64) -> f64 {
- // Calculate the total number of possible connections given a node count.
- let pc = n * (n - 1.0) / 2.0;
- // Actual connections divided by the possbile connections gives the density.
- ac / pc
-}
-
-/// Returns the degree matrix for the network with values ordered by the index.
-fn degree_matrix(index: &BTreeMap<SocketAddr, usize>, nodes: &[Node<LedgerStorage>]) -> DMatrix<f64> {
- let n = nodes.len();
- let mut matrix = DMatrix::<f64>::zeros(n, n);
-
- for node in nodes {
- let n = node.peer_book.get_active_peer_count();
- // Address must be present.
- // Get the index for the and set the number of connected peers. The degree matrix is
- // diagonal.
- let node_n = index.get(&node.local_address().unwrap()).unwrap();
- matrix[(*node_n, *node_n)] = n as f64;
- }
-
- matrix
-}
-
-/// Returns the adjacency matrix for the network with values ordered by the index.
-fn adjacency_matrix(index: &BTreeMap<SocketAddr, usize>, nodes: &[Node<LedgerStorage>]) -> DMatrix<f64> {
- let n = nodes.len();
- let mut matrix = DMatrix::<f64>::zeros(n, n);
-
- // Compute the adjacency matrix. As our network is an undirected graph, the adjacency matrix is
- // symmetric.
- for node in nodes {
- node.peer_book.connected_peers().into_iter().for_each(|addr| {
- // Addresses must be present.
- // Get the indices for each node, progressing row by row to construct the matrix.
- let node_m = index.get(&node.local_address().unwrap()).unwrap();
- let peer_n = index.get(&addr).unwrap();
- matrix[(*node_m, *peer_n)] = 1.0;
- });
+ count += node.peer_book.get_connected_peer_count()
}
- matrix
+ count / 2
}
-/// Returns the difference between the highest and lowest degree centrality in the network.
// This could use the degree matrix, though as this is used extensively in tests and checked
// repeatedly until it reaches a certain value, we want to keep its calculation decoupled from the
// `NetworkMetrics`.
-fn degree_centrality_delta(nodes: &[Node<LedgerStorage>]) -> u16 {
- let dc = nodes.iter().map(|node| node.peer_book.get_active_peer_count());
+fn degree_centrality_delta(nodes: &[Node<LedgerStorage>]) -> u32 {
+ let dc = nodes.iter().map(|node| node.peer_book.get_connected_peer_count());
let min = dc.clone().min().unwrap();
let max = dc.max().unwrap();
- (max - min) as u16
-}
-
-/// Returns the degree centrality of a node.
-///
-/// This is defined as the connection count of the node.
-fn degree_centrality(index: &BTreeMap<SocketAddr, usize>, degree_matrix: DMatrix<f64>) -> BTreeMap<SocketAddr, u16> {
- let diag = degree_matrix.diagonal();
- index
- .keys()
- .zip(diag.iter())
- .map(|(addr, dc)| (*addr, *dc as u16))
- .collect()
+ max - min
}
-/// Returns the eigenvalue centrality of each node in the network.
-fn eigenvector_centrality(
- index: &BTreeMap<SocketAddr, usize>,
- adjacency_matrix: DMatrix<f64>,
-) -> BTreeMap<SocketAddr, f64> {
- // Compute the eigenvectors and corresponding eigenvalues and sort in descending order.
- let ascending = false;
- let eigenvalue_vector_pairs = sorted_eigenvalue_vector_pairs(adjacency_matrix, ascending);
- let (_highest_eigenvalue, highest_eigenvector) = &eigenvalue_vector_pairs[0];
-
- // The eigenvector is a relative score of node importance (normalised by the norm), to obtain an absolute score for each
- // node, we normalise so that the sum of the components are equal to 1.
- let sum = highest_eigenvector.sum() / index.len() as f64;
- let normalised = highest_eigenvector.unscale(sum);
-
- // Map addresses to their eigenvalue centrality.
- index
- .keys()
- .zip(normalised.column(0).iter())
- .map(|(addr, ec)| (*addr, *ec))
- .collect()
-}
-
-/// Returns the Fiedler values for each node in the network.
-fn fiedler(index: &BTreeMap<SocketAddr, usize>, laplacian_matrix: DMatrix<f64>) -> (f64, BTreeMap<SocketAddr, f64>) {
- // Compute the eigenvectors and corresponding eigenvalues and sort in ascending order.
- let ascending = true;
- let pairs = sorted_eigenvalue_vector_pairs(laplacian_matrix, ascending);
-
- // Second-smallest eigenvalue is the Fiedler value (algebraic connectivity), the associated
- // eigenvector is the Fiedler vector.
- let (algebraic_connectivity, fiedler_vector) = &pairs[1];
-
- // Map addresses to their Fiedler values.
- let fiedler_values_indexed = index
- .keys()
- .zip(fiedler_vector.column(0).iter())
- .map(|(addr, fiedler_value)| (*addr, *fiedler_value))
- .collect();
-
- (*algebraic_connectivity, fiedler_values_indexed)
-}
-
-/// Computes the eigenvalues and corresponding eigenvalues from the supplied symmetric matrix.
-fn sorted_eigenvalue_vector_pairs(matrix: DMatrix<f64>, ascending: bool) -> Vec<(f64, DVector<f64>)> {
- // Compute eigenvalues and eigenvectors.
- let eigen = SymmetricEigen::new(matrix);
-
- // Map eigenvalues to their eigenvectors.
- let mut pairs: Vec<(f64, DVector<f64>)> = eigen
- .eigenvalues
- .iter()
- .zip(eigen.eigenvectors.column_iter())
- .map(|(value, vector)| (*value, vector.clone_owned()))
- .collect();
-
- // Sort eigenvalue-vector pairs in descending order.
- pairs.sort_unstable_by(|(a, _), (b, _)| {
- if ascending {
- a.partial_cmp(b).unwrap()
- } else {
- b.partial_cmp(a).unwrap()
- }
- });
-
- pairs
+/// Returns the network density.
+fn network_density(nodes: &[Node<LedgerStorage>]) -> f64 {
+ let connections = total_connection_count(nodes);
+ calculate_density(nodes.len() as f64, connections as f64)
}
|
[Feature] Protocol (as opposed to rpc) based network crawler
## 🚀 Feature
A network based crawler would be more reliable than RPC (which relies on nodes running on the default port). It could also be used for in-depth network analysis (graph exposed through RPC and behind a feature flag) and network shaping when coupled with the already implemented centrality measurements.
[Feature] Improve the availability of bootnodes
Due to all the nodes maintaining persistent connections, the default list of bootnodes that all the nodes attempt to connect to at first can quickly exhaust their connection limits, regardless of how generous they are. There are several ways in which we can improve this situation:
- making the bootnodes "thin" and their connections short-lived, i.e. removing their sync capabilities and instead making them laser-focused on just providing lists of peers; that would make them capable of handling huge numbers of connections, making us able to have fewer bootnodes in total; such bootnodes could also serve as network crawlers (see https://github.com/AleoHQ/snarkOS/issues/802) and actively improve the "shape" of the network by providing "smart" peer lists
- making the connections of bootnodes short-lived (though not as short as in the first point), e.g. disconnecting from peers after e.g. 5 minutes; that time should allow the new peers to learn about other potential peers and possibly form long-lived connections with them instead of the bootnodes
- reduce the number of bootnodes that the nodes attempt to connect to in a single attempt; this could make the initial wait time a bit longer, but would be likely to reduce the strain on all the bootnodes by giving nodes the chance to learn about other peers and connect to them instead of all the bootnodes; this would work best if paired with https://github.com/AleoHQ/snarkOS/issues/817
|
2021-06-17T14:59:02Z
|
1.3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.