|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use std::{ |
|
|
cmp::{max, Ordering}, |
|
|
collections::HashSet, |
|
|
collections::VecDeque, |
|
|
collections::{BTreeSet, HashMap}, |
|
|
fmt, |
|
|
net::IpAddr, |
|
|
task::{Context, Poll}, |
|
|
time::Duration, |
|
|
}; |
|
|
|
|
|
use futures::StreamExt; |
|
|
use futures_ticker::Ticker; |
|
|
use prometheus_client::registry::Registry; |
|
|
use rand::{seq::SliceRandom, thread_rng}; |
|
|
|
|
|
use libp2p_core::{ |
|
|
multiaddr::Protocol::Ip4, multiaddr::Protocol::Ip6, transport::PortUse, Endpoint, Multiaddr, |
|
|
}; |
|
|
use libp2p_identity::Keypair; |
|
|
use libp2p_identity::PeerId; |
|
|
use libp2p_swarm::{ |
|
|
behaviour::{AddressChange, ConnectionClosed, ConnectionEstablished, FromSwarm}, |
|
|
dial_opts::DialOpts, |
|
|
ConnectionDenied, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, |
|
|
THandlerOutEvent, ToSwarm, |
|
|
}; |
|
|
use web_time::{Instant, SystemTime}; |
|
|
|
|
|
use crate::backoff::BackoffStorage; |
|
|
use crate::config::{Config, ValidationMode}; |
|
|
use crate::gossip_promises::GossipPromises; |
|
|
use crate::handler::{Handler, HandlerEvent, HandlerIn}; |
|
|
use crate::mcache::MessageCache; |
|
|
use crate::metrics::{Churn, Config as MetricsConfig, Inclusion, Metrics, Penalty}; |
|
|
use crate::peer_score::{PeerScore, PeerScoreParams, PeerScoreThresholds, RejectReason}; |
|
|
use crate::protocol::SIGNING_PREFIX; |
|
|
use crate::subscription_filter::{AllowAllSubscriptionFilter, TopicSubscriptionFilter}; |
|
|
use crate::time_cache::DuplicateCache; |
|
|
use crate::topic::{Hasher, Topic, TopicHash}; |
|
|
use crate::transform::{DataTransform, IdentityTransform}; |
|
|
use crate::types::{ |
|
|
ControlAction, Message, MessageAcceptance, MessageId, PeerInfo, RawMessage, Subscription, |
|
|
SubscriptionAction, |
|
|
}; |
|
|
use crate::types::{PeerConnections, PeerKind, RpcOut}; |
|
|
use crate::{rpc_proto::proto, TopicScoreParams}; |
|
|
use crate::{PublishError, SubscriptionError, ValidationError}; |
|
|
use quick_protobuf::{MessageWrite, Writer}; |
|
|
use std::{cmp::Ordering::Equal, fmt::Debug}; |
|
|
|
|
|
#[cfg(test)] |
|
|
mod tests; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[derive(Clone)] |
|
|
pub enum MessageAuthenticity { |
|
|
|
|
|
|
|
|
Signed(Keypair), |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Author(PeerId), |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
RandomAuthor, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Anonymous, |
|
|
} |
|
|
|
|
|
impl MessageAuthenticity { |
|
|
|
|
|
pub fn is_signing(&self) -> bool { |
|
|
matches!(self, MessageAuthenticity::Signed(_)) |
|
|
} |
|
|
|
|
|
pub fn is_anonymous(&self) -> bool { |
|
|
matches!(self, MessageAuthenticity::Anonymous) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
#[derive(Debug)] |
|
|
pub enum Event { |
|
|
|
|
|
Message { |
|
|
|
|
|
propagation_source: PeerId, |
|
|
|
|
|
|
|
|
message_id: MessageId, |
|
|
|
|
|
message: Message, |
|
|
}, |
|
|
|
|
|
Subscribed { |
|
|
|
|
|
peer_id: PeerId, |
|
|
|
|
|
topic: TopicHash, |
|
|
}, |
|
|
|
|
|
Unsubscribed { |
|
|
|
|
|
peer_id: PeerId, |
|
|
|
|
|
topic: TopicHash, |
|
|
}, |
|
|
|
|
|
GossipsubNotSupported { peer_id: PeerId }, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#[allow(clippy::large_enum_variant)] |
|
|
enum PublishConfig { |
|
|
Signing { |
|
|
keypair: Keypair, |
|
|
author: PeerId, |
|
|
inline_key: Option<Vec<u8>>, |
|
|
last_seq_no: SequenceNumber, |
|
|
}, |
|
|
Author(PeerId), |
|
|
RandomAuthor, |
|
|
Anonymous, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[derive(Debug)] |
|
|
struct SequenceNumber(u64); |
|
|
|
|
|
impl SequenceNumber { |
|
|
fn new() -> Self { |
|
|
let unix_timestamp = SystemTime::now() |
|
|
.duration_since(SystemTime::UNIX_EPOCH) |
|
|
.expect("time to be linear") |
|
|
.as_nanos(); |
|
|
|
|
|
Self(unix_timestamp as u64) |
|
|
} |
|
|
|
|
|
fn next(&mut self) -> u64 { |
|
|
self.0 = self |
|
|
.0 |
|
|
.checked_add(1) |
|
|
.expect("to not exhaust u64 space for sequence numbers"); |
|
|
|
|
|
self.0 |
|
|
} |
|
|
} |
|
|
|
|
|
impl PublishConfig { |
|
|
pub(crate) fn get_own_id(&self) -> Option<&PeerId> { |
|
|
match self { |
|
|
Self::Signing { author, .. } => Some(author), |
|
|
Self::Author(author) => Some(author), |
|
|
_ => None, |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
impl From<MessageAuthenticity> for PublishConfig { |
|
|
fn from(authenticity: MessageAuthenticity) -> Self { |
|
|
match authenticity { |
|
|
MessageAuthenticity::Signed(keypair) => { |
|
|
let public_key = keypair.public(); |
|
|
let key_enc = public_key.encode_protobuf(); |
|
|
let key = if key_enc.len() <= 42 { |
|
|
|
|
|
|
|
|
None |
|
|
} else { |
|
|
|
|
|
Some(key_enc) |
|
|
}; |
|
|
|
|
|
PublishConfig::Signing { |
|
|
keypair, |
|
|
author: public_key.to_peer_id(), |
|
|
inline_key: key, |
|
|
last_seq_no: SequenceNumber::new(), |
|
|
} |
|
|
} |
|
|
MessageAuthenticity::Author(peer_id) => PublishConfig::Author(peer_id), |
|
|
MessageAuthenticity::RandomAuthor => PublishConfig::RandomAuthor, |
|
|
MessageAuthenticity::Anonymous => PublishConfig::Anonymous, |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub struct Behaviour<D = IdentityTransform, F = AllowAllSubscriptionFilter> { |
|
|
|
|
|
config: Config, |
|
|
|
|
|
|
|
|
events: VecDeque<ToSwarm<Event, HandlerIn>>, |
|
|
|
|
|
|
|
|
control_pool: HashMap<PeerId, Vec<ControlAction>>, |
|
|
|
|
|
|
|
|
publish_config: PublishConfig, |
|
|
|
|
|
|
|
|
|
|
|
duplicate_cache: DuplicateCache<MessageId>, |
|
|
|
|
|
|
|
|
|
|
|
connected_peers: HashMap<PeerId, PeerConnections>, |
|
|
|
|
|
|
|
|
topic_peers: HashMap<TopicHash, BTreeSet<PeerId>>, |
|
|
|
|
|
|
|
|
peer_topics: HashMap<PeerId, BTreeSet<TopicHash>>, |
|
|
|
|
|
|
|
|
|
|
|
explicit_peers: HashSet<PeerId>, |
|
|
|
|
|
|
|
|
|
|
|
blacklisted_peers: HashSet<PeerId>, |
|
|
|
|
|
|
|
|
mesh: HashMap<TopicHash, BTreeSet<PeerId>>, |
|
|
|
|
|
|
|
|
fanout: HashMap<TopicHash, BTreeSet<PeerId>>, |
|
|
|
|
|
|
|
|
fanout_last_pub: HashMap<TopicHash, Instant>, |
|
|
|
|
|
|
|
|
backoffs: BackoffStorage, |
|
|
|
|
|
|
|
|
mcache: MessageCache, |
|
|
|
|
|
|
|
|
heartbeat: Ticker, |
|
|
|
|
|
|
|
|
|
|
|
heartbeat_ticks: u64, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
px_peers: HashSet<PeerId>, |
|
|
|
|
|
|
|
|
|
|
|
outbound_peers: HashSet<PeerId>, |
|
|
|
|
|
|
|
|
|
|
|
peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, |
|
|
|
|
|
|
|
|
count_received_ihave: HashMap<PeerId, usize>, |
|
|
|
|
|
|
|
|
count_sent_iwant: HashMap<PeerId, usize>, |
|
|
|
|
|
|
|
|
|
|
|
pending_iwant_msgs: HashSet<MessageId>, |
|
|
|
|
|
|
|
|
|
|
|
published_message_ids: DuplicateCache<MessageId>, |
|
|
|
|
|
|
|
|
subscription_filter: F, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_transform: D, |
|
|
|
|
|
|
|
|
metrics: Option<Metrics>, |
|
|
} |
|
|
|
|
|
impl<D, F> Behaviour<D, F> |
|
|
where |
|
|
D: DataTransform + Default, |
|
|
F: TopicSubscriptionFilter + Default, |
|
|
{ |
|
|
|
|
|
|
|
|
pub fn new(privacy: MessageAuthenticity, config: Config) -> Result<Self, &'static str> { |
|
|
Self::new_with_subscription_filter_and_transform( |
|
|
privacy, |
|
|
config, |
|
|
None, |
|
|
F::default(), |
|
|
D::default(), |
|
|
) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn new_with_metrics( |
|
|
privacy: MessageAuthenticity, |
|
|
config: Config, |
|
|
metrics_registry: &mut Registry, |
|
|
metrics_config: MetricsConfig, |
|
|
) -> Result<Self, &'static str> { |
|
|
Self::new_with_subscription_filter_and_transform( |
|
|
privacy, |
|
|
config, |
|
|
Some((metrics_registry, metrics_config)), |
|
|
F::default(), |
|
|
D::default(), |
|
|
) |
|
|
} |
|
|
} |
|
|
|
|
|
impl<D, F> Behaviour<D, F> |
|
|
where |
|
|
D: DataTransform + Default, |
|
|
F: TopicSubscriptionFilter, |
|
|
{ |
|
|
|
|
|
|
|
|
pub fn new_with_subscription_filter( |
|
|
privacy: MessageAuthenticity, |
|
|
config: Config, |
|
|
metrics: Option<(&mut Registry, MetricsConfig)>, |
|
|
subscription_filter: F, |
|
|
) -> Result<Self, &'static str> { |
|
|
Self::new_with_subscription_filter_and_transform( |
|
|
privacy, |
|
|
config, |
|
|
metrics, |
|
|
subscription_filter, |
|
|
D::default(), |
|
|
) |
|
|
} |
|
|
} |
|
|
|
|
|
impl<D, F> Behaviour<D, F> |
|
|
where |
|
|
D: DataTransform, |
|
|
F: TopicSubscriptionFilter + Default, |
|
|
{ |
|
|
|
|
|
|
|
|
pub fn new_with_transform( |
|
|
privacy: MessageAuthenticity, |
|
|
config: Config, |
|
|
metrics: Option<(&mut Registry, MetricsConfig)>, |
|
|
data_transform: D, |
|
|
) -> Result<Self, &'static str> { |
|
|
Self::new_with_subscription_filter_and_transform( |
|
|
privacy, |
|
|
config, |
|
|
metrics, |
|
|
F::default(), |
|
|
data_transform, |
|
|
) |
|
|
} |
|
|
} |
|
|
|
|
|
impl<D, F> Behaviour<D, F> |
|
|
where |
|
|
D: DataTransform, |
|
|
F: TopicSubscriptionFilter, |
|
|
{ |
|
|
|
|
|
|
|
|
pub fn new_with_subscription_filter_and_transform( |
|
|
privacy: MessageAuthenticity, |
|
|
config: Config, |
|
|
metrics: Option<(&mut Registry, MetricsConfig)>, |
|
|
subscription_filter: F, |
|
|
data_transform: D, |
|
|
) -> Result<Self, &'static str> { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
validate_config(&privacy, config.validation_mode())?; |
|
|
|
|
|
Ok(Behaviour { |
|
|
metrics: metrics.map(|(registry, cfg)| Metrics::new(registry, cfg)), |
|
|
events: VecDeque::new(), |
|
|
control_pool: HashMap::new(), |
|
|
publish_config: privacy.into(), |
|
|
duplicate_cache: DuplicateCache::new(config.duplicate_cache_time()), |
|
|
topic_peers: HashMap::new(), |
|
|
peer_topics: HashMap::new(), |
|
|
explicit_peers: HashSet::new(), |
|
|
blacklisted_peers: HashSet::new(), |
|
|
mesh: HashMap::new(), |
|
|
fanout: HashMap::new(), |
|
|
fanout_last_pub: HashMap::new(), |
|
|
backoffs: BackoffStorage::new( |
|
|
&config.prune_backoff(), |
|
|
config.heartbeat_interval(), |
|
|
config.backoff_slack(), |
|
|
), |
|
|
mcache: MessageCache::new(config.history_gossip(), config.history_length()), |
|
|
heartbeat: Ticker::new_with_next( |
|
|
config.heartbeat_interval(), |
|
|
config.heartbeat_initial_delay(), |
|
|
), |
|
|
heartbeat_ticks: 0, |
|
|
px_peers: HashSet::new(), |
|
|
outbound_peers: HashSet::new(), |
|
|
peer_score: None, |
|
|
count_received_ihave: HashMap::new(), |
|
|
count_sent_iwant: HashMap::new(), |
|
|
pending_iwant_msgs: HashSet::new(), |
|
|
connected_peers: HashMap::new(), |
|
|
published_message_ids: DuplicateCache::new(config.published_message_ids_cache_time()), |
|
|
config, |
|
|
subscription_filter, |
|
|
data_transform, |
|
|
}) |
|
|
} |
|
|
} |
|
|
|
|
|
impl<D, F> Behaviour<D, F> |
|
|
where |
|
|
D: DataTransform + Send + 'static, |
|
|
F: TopicSubscriptionFilter + Send + 'static, |
|
|
{ |
|
|
|
|
|
pub fn topics(&self) -> impl Iterator<Item = &TopicHash> { |
|
|
self.mesh.keys() |
|
|
} |
|
|
|
|
|
|
|
|
pub fn mesh_peers(&self, topic_hash: &TopicHash) -> impl Iterator<Item = &PeerId> { |
|
|
self.mesh.get(topic_hash).into_iter().flat_map(|x| x.iter()) |
|
|
} |
|
|
|
|
|
pub fn all_mesh_peers(&self) -> impl Iterator<Item = &PeerId> { |
|
|
let mut res = BTreeSet::new(); |
|
|
for peers in self.mesh.values() { |
|
|
res.extend(peers); |
|
|
} |
|
|
res.into_iter() |
|
|
} |
|
|
|
|
|
|
|
|
pub fn all_peers(&self) -> impl Iterator<Item = (&PeerId, Vec<&TopicHash>)> { |
|
|
self.peer_topics |
|
|
.iter() |
|
|
.map(|(peer_id, topic_set)| (peer_id, topic_set.iter().collect())) |
|
|
} |
|
|
|
|
|
|
|
|
pub fn peer_protocol(&self) -> impl Iterator<Item = (&PeerId, &PeerKind)> { |
|
|
self.connected_peers.iter().map(|(k, v)| (k, &v.kind)) |
|
|
} |
|
|
|
|
|
|
|
|
pub fn peer_score(&self, peer_id: &PeerId) -> Option<f64> { |
|
|
self.peer_score |
|
|
.as_ref() |
|
|
.map(|(score, ..)| score.score(peer_id)) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn subscribe<H: Hasher>(&mut self, topic: &Topic<H>) -> Result<bool, SubscriptionError> { |
|
|
tracing::debug!(%topic, "Subscribing to topic"); |
|
|
let topic_hash = topic.hash(); |
|
|
if !self.subscription_filter.can_subscribe(&topic_hash) { |
|
|
return Err(SubscriptionError::NotAllowed); |
|
|
} |
|
|
|
|
|
if self.mesh.contains_key(&topic_hash) { |
|
|
tracing::debug!(%topic, "Topic is already in the mesh"); |
|
|
return Ok(false); |
|
|
} |
|
|
|
|
|
|
|
|
for peer in self.peer_topics.keys().copied().collect::<Vec<_>>() { |
|
|
tracing::debug!(%peer, "Sending SUBSCRIBE to peer"); |
|
|
let event = RpcOut::Subscribe(topic_hash.clone()); |
|
|
self.send_message(peer, event); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
self.join(&topic_hash); |
|
|
tracing::debug!(%topic, "Subscribed to topic"); |
|
|
Ok(true) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[allow(clippy::unnecessary_wraps)] |
|
|
pub fn unsubscribe<H: Hasher>(&mut self, topic: &Topic<H>) -> Result<bool, PublishError> { |
|
|
tracing::debug!(%topic, "Unsubscribing from topic"); |
|
|
let topic_hash = topic.hash(); |
|
|
|
|
|
if !self.mesh.contains_key(&topic_hash) { |
|
|
tracing::debug!(topic=%topic_hash, "Already unsubscribed from topic"); |
|
|
|
|
|
return Ok(false); |
|
|
} |
|
|
|
|
|
|
|
|
for peer in self.peer_topics.keys().copied().collect::<Vec<_>>() { |
|
|
tracing::debug!(%peer, "Sending UNSUBSCRIBE to peer"); |
|
|
let event = RpcOut::Unsubscribe(topic_hash.clone()); |
|
|
self.send_message(peer, event); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
self.leave(&topic_hash); |
|
|
|
|
|
tracing::debug!(topic=%topic_hash, "Unsubscribed from topic"); |
|
|
Ok(true) |
|
|
} |
|
|
|
|
|
|
|
|
pub fn publish( |
|
|
&mut self, |
|
|
topic: impl Into<TopicHash>, |
|
|
data: impl Into<Vec<u8>>, |
|
|
) -> Result<MessageId, PublishError> { |
|
|
let data = data.into(); |
|
|
let topic = topic.into(); |
|
|
|
|
|
|
|
|
let transformed_data = self |
|
|
.data_transform |
|
|
.outbound_transform(&topic, data.clone())?; |
|
|
|
|
|
let raw_message = self.build_raw_message(topic, transformed_data)?; |
|
|
|
|
|
|
|
|
let msg_id = self.config.message_id(&Message { |
|
|
source: raw_message.source, |
|
|
data, |
|
|
sequence_number: raw_message.sequence_number, |
|
|
topic: raw_message.topic.clone(), |
|
|
}); |
|
|
|
|
|
|
|
|
if raw_message.raw_protobuf_len() > self.config.max_transmit_size() { |
|
|
return Err(PublishError::MessageTooLarge); |
|
|
} |
|
|
|
|
|
|
|
|
if self.duplicate_cache.contains(&msg_id) { |
|
|
|
|
|
|
|
|
tracing::warn!( |
|
|
message=%msg_id, |
|
|
"Not publishing a message that has already been published" |
|
|
); |
|
|
return Err(PublishError::Duplicate); |
|
|
} |
|
|
|
|
|
tracing::trace!(message=%msg_id, "Publishing message"); |
|
|
|
|
|
let topic_hash = raw_message.topic.clone(); |
|
|
|
|
|
let mut recipient_peers = HashSet::new(); |
|
|
if let Some(set) = self.topic_peers.get(&topic_hash) { |
|
|
if self.config.flood_publish() { |
|
|
|
|
|
recipient_peers.extend(set.iter().filter(|p| { |
|
|
self.explicit_peers.contains(*p) |
|
|
|| !self.score_below_threshold(p, |ts| ts.publish_threshold).0 |
|
|
})); |
|
|
} else { |
|
|
match self.mesh.get(&raw_message.topic) { |
|
|
|
|
|
Some(mesh_peers) => { |
|
|
recipient_peers.extend(mesh_peers); |
|
|
} |
|
|
|
|
|
None => { |
|
|
tracing::debug!(topic=%topic_hash, "Topic not in the mesh"); |
|
|
|
|
|
if self.fanout.contains_key(&topic_hash) { |
|
|
for peer in self.fanout.get(&topic_hash).expect("Topic must exist") { |
|
|
recipient_peers.insert(*peer); |
|
|
} |
|
|
} else { |
|
|
|
|
|
let mesh_n = self.config.mesh_n(); |
|
|
let new_peers = get_random_peers( |
|
|
&self.topic_peers, |
|
|
&self.connected_peers, |
|
|
&topic_hash, |
|
|
mesh_n, |
|
|
{ |
|
|
|p| { |
|
|
!self.explicit_peers.contains(p) |
|
|
&& !self |
|
|
.score_below_threshold(p, |pst| { |
|
|
pst.publish_threshold |
|
|
}) |
|
|
.0 |
|
|
} |
|
|
}, |
|
|
); |
|
|
|
|
|
self.fanout.insert(topic_hash.clone(), new_peers.clone()); |
|
|
for peer in new_peers { |
|
|
tracing::debug!(%peer, "Peer added to fanout"); |
|
|
recipient_peers.insert(peer); |
|
|
} |
|
|
} |
|
|
|
|
|
self.fanout_last_pub |
|
|
.insert(topic_hash.clone(), Instant::now()); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for peer in &self.explicit_peers { |
|
|
if set.contains(peer) { |
|
|
recipient_peers.insert(*peer); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for (peer, connections) in &self.connected_peers { |
|
|
if connections.kind == PeerKind::Floodsub |
|
|
&& !self |
|
|
.score_below_threshold(peer, |ts| ts.publish_threshold) |
|
|
.0 |
|
|
{ |
|
|
recipient_peers.insert(*peer); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
if recipient_peers.is_empty() { |
|
|
return Err(PublishError::InsufficientPeers); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
self.duplicate_cache.insert(msg_id.clone()); |
|
|
self.mcache.put(&msg_id, raw_message.clone()); |
|
|
|
|
|
|
|
|
|
|
|
if let PublishConfig::RandomAuthor | PublishConfig::Anonymous = self.publish_config { |
|
|
if !self.config.allow_self_origin() { |
|
|
self.published_message_ids.insert(msg_id.clone()); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for peer_id in recipient_peers.iter() { |
|
|
tracing::trace!(peer=%peer_id, "Sending message to peer"); |
|
|
self.send_message(*peer_id, RpcOut::Publish(raw_message.clone())); |
|
|
} |
|
|
|
|
|
tracing::debug!(message=%msg_id, "Published message"); |
|
|
|
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.register_published_message(&topic_hash); |
|
|
} |
|
|
|
|
|
Ok(msg_id) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn report_message_validation_result( |
|
|
&mut self, |
|
|
msg_id: &MessageId, |
|
|
propagation_source: &PeerId, |
|
|
acceptance: MessageAcceptance, |
|
|
) -> Result<bool, PublishError> { |
|
|
let reject_reason = match acceptance { |
|
|
MessageAcceptance::Accept => { |
|
|
let (raw_message, originating_peers) = match self.mcache.validate(msg_id) { |
|
|
Some((raw_message, originating_peers)) => { |
|
|
(raw_message.clone(), originating_peers) |
|
|
} |
|
|
None => { |
|
|
tracing::warn!( |
|
|
message=%msg_id, |
|
|
"Message not in cache. Ignoring forwarding" |
|
|
); |
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.memcache_miss(); |
|
|
} |
|
|
return Ok(false); |
|
|
} |
|
|
}; |
|
|
|
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.register_msg_validation(&raw_message.topic, &acceptance); |
|
|
} |
|
|
|
|
|
self.forward_msg( |
|
|
msg_id, |
|
|
raw_message, |
|
|
Some(propagation_source), |
|
|
originating_peers, |
|
|
)?; |
|
|
return Ok(true); |
|
|
} |
|
|
MessageAcceptance::Reject => RejectReason::ValidationFailed, |
|
|
MessageAcceptance::Ignore => RejectReason::ValidationIgnored, |
|
|
}; |
|
|
|
|
|
if let Some((raw_message, originating_peers)) = self.mcache.remove(msg_id) { |
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.register_msg_validation(&raw_message.topic, &acceptance); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.reject_message( |
|
|
propagation_source, |
|
|
msg_id, |
|
|
&raw_message.topic, |
|
|
reject_reason, |
|
|
); |
|
|
for peer in originating_peers.iter() { |
|
|
peer_score.reject_message(peer, msg_id, &raw_message.topic, reject_reason); |
|
|
} |
|
|
} |
|
|
Ok(true) |
|
|
} else { |
|
|
tracing::warn!(message=%msg_id, "Rejected message not in cache"); |
|
|
Ok(false) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
pub fn add_explicit_peer(&mut self, peer_id: &PeerId) { |
|
|
tracing::debug!(peer=%peer_id, "Adding explicit peer"); |
|
|
|
|
|
self.explicit_peers.insert(*peer_id); |
|
|
|
|
|
self.check_explicit_peer_connection(peer_id); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pub fn remove_explicit_peer(&mut self, peer_id: &PeerId) { |
|
|
tracing::debug!(peer=%peer_id, "Removing explicit peer"); |
|
|
self.explicit_peers.remove(peer_id); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pub fn blacklist_peer(&mut self, peer_id: &PeerId) { |
|
|
if self.blacklisted_peers.insert(*peer_id) { |
|
|
tracing::debug!(peer=%peer_id, "Peer has been blacklisted"); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
pub fn remove_blacklisted_peer(&mut self, peer_id: &PeerId) { |
|
|
if self.blacklisted_peers.remove(peer_id) { |
|
|
tracing::debug!(peer=%peer_id, "Peer has been removed from the blacklist"); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn with_peer_score( |
|
|
&mut self, |
|
|
params: PeerScoreParams, |
|
|
threshold: PeerScoreThresholds, |
|
|
) -> Result<(), String> { |
|
|
self.with_peer_score_and_message_delivery_time_callback(params, threshold, None) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pub fn with_peer_score_and_message_delivery_time_callback( |
|
|
&mut self, |
|
|
params: PeerScoreParams, |
|
|
threshold: PeerScoreThresholds, |
|
|
callback: Option<fn(&PeerId, &TopicHash, f64)>, |
|
|
) -> Result<(), String> { |
|
|
params.validate()?; |
|
|
threshold.validate()?; |
|
|
|
|
|
if self.peer_score.is_some() { |
|
|
return Err("Peer score set twice".into()); |
|
|
} |
|
|
|
|
|
let interval = Ticker::new(params.decay_interval); |
|
|
let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback); |
|
|
self.peer_score = Some((peer_score, threshold, interval, GossipPromises::default())); |
|
|
Ok(()) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn set_topic_params<H: Hasher>( |
|
|
&mut self, |
|
|
topic: Topic<H>, |
|
|
params: TopicScoreParams, |
|
|
) -> Result<(), &'static str> { |
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.set_topic_params(topic.hash(), params); |
|
|
Ok(()) |
|
|
} else { |
|
|
Err("Peer score must be initialised with `with_peer_score()`") |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
pub fn get_topic_params<H: Hasher>(&self, topic: &Topic<H>) -> Option<&TopicScoreParams> { |
|
|
self.peer_score.as_ref()?.0.get_topic_params(&topic.hash()) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pub fn set_application_score(&mut self, peer_id: &PeerId, new_score: f64) -> bool { |
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.set_application_score(peer_id, new_score) |
|
|
} else { |
|
|
false |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
fn join(&mut self, topic_hash: &TopicHash) { |
|
|
tracing::debug!(topic=%topic_hash, "Running JOIN for topic"); |
|
|
|
|
|
|
|
|
if self.mesh.contains_key(topic_hash) { |
|
|
tracing::debug!(topic=%topic_hash, "JOIN: The topic is already in the mesh, ignoring JOIN"); |
|
|
return; |
|
|
} |
|
|
|
|
|
let mut added_peers = HashSet::new(); |
|
|
|
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.joined(topic_hash) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if let Some((_, mut peers)) = self.fanout.remove_entry(topic_hash) { |
|
|
tracing::debug!( |
|
|
topic=%topic_hash, |
|
|
"JOIN: Removing peers from the fanout for topic" |
|
|
); |
|
|
|
|
|
|
|
|
peers.retain(|p| { |
|
|
!self.explicit_peers.contains(p) |
|
|
&& !self.score_below_threshold(p, |_| 0.0).0 |
|
|
&& !self.backoffs.is_backoff_with_slack(topic_hash, p) |
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
let add_peers = std::cmp::min(peers.len(), self.config.mesh_n()); |
|
|
tracing::debug!( |
|
|
topic=%topic_hash, |
|
|
"JOIN: Adding {:?} peers from the fanout for topic", |
|
|
add_peers |
|
|
); |
|
|
added_peers.extend(peers.iter().take(add_peers)); |
|
|
|
|
|
self.mesh.insert( |
|
|
topic_hash.clone(), |
|
|
peers.into_iter().take(add_peers).collect(), |
|
|
); |
|
|
|
|
|
|
|
|
self.fanout_last_pub.remove(topic_hash); |
|
|
} |
|
|
|
|
|
let fanaout_added = added_peers.len(); |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_included(topic_hash, Inclusion::Fanout, fanaout_added) |
|
|
} |
|
|
|
|
|
|
|
|
if added_peers.len() < self.config.mesh_n() { |
|
|
|
|
|
let new_peers = get_random_peers( |
|
|
&self.topic_peers, |
|
|
&self.connected_peers, |
|
|
topic_hash, |
|
|
self.config.mesh_n() - added_peers.len(), |
|
|
|peer| { |
|
|
!added_peers.contains(peer) |
|
|
&& !self.explicit_peers.contains(peer) |
|
|
&& !self.score_below_threshold(peer, |_| 0.0).0 |
|
|
&& !self.backoffs.is_backoff_with_slack(topic_hash, peer) |
|
|
}, |
|
|
); |
|
|
added_peers.extend(new_peers.clone()); |
|
|
|
|
|
tracing::debug!( |
|
|
"JOIN: Inserting {:?} random peers into the mesh", |
|
|
new_peers.len() |
|
|
); |
|
|
let mesh_peers = self.mesh.entry(topic_hash.clone()).or_default(); |
|
|
mesh_peers.extend(new_peers); |
|
|
} |
|
|
|
|
|
let random_added = added_peers.len() - fanaout_added; |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_included(topic_hash, Inclusion::Random, random_added) |
|
|
} |
|
|
|
|
|
for peer_id in added_peers { |
|
|
|
|
|
tracing::debug!(peer=%peer_id, "JOIN: Sending Graft message to peer"); |
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.graft(&peer_id, topic_hash.clone()); |
|
|
} |
|
|
Self::control_pool_add( |
|
|
&mut self.control_pool, |
|
|
peer_id, |
|
|
ControlAction::Graft { |
|
|
topic_hash: topic_hash.clone(), |
|
|
}, |
|
|
); |
|
|
|
|
|
|
|
|
peer_added_to_mesh( |
|
|
peer_id, |
|
|
vec![topic_hash], |
|
|
&self.mesh, |
|
|
self.peer_topics.get(&peer_id), |
|
|
&mut self.events, |
|
|
&self.connected_peers, |
|
|
); |
|
|
} |
|
|
|
|
|
let mesh_peers = self.mesh_peers(topic_hash).count(); |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.set_mesh_peers(topic_hash, mesh_peers) |
|
|
} |
|
|
|
|
|
tracing::debug!(topic=%topic_hash, "Completed JOIN for topic"); |
|
|
} |
|
|
|
|
|
|
|
|
fn make_prune( |
|
|
&mut self, |
|
|
topic_hash: &TopicHash, |
|
|
peer: &PeerId, |
|
|
do_px: bool, |
|
|
on_unsubscribe: bool, |
|
|
) -> ControlAction { |
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.prune(peer, topic_hash.clone()); |
|
|
} |
|
|
|
|
|
match self.connected_peers.get(peer).map(|v| &v.kind) { |
|
|
Some(PeerKind::Floodsub) => { |
|
|
tracing::error!("Attempted to prune a Floodsub peer"); |
|
|
} |
|
|
Some(PeerKind::Gossipsub) => { |
|
|
|
|
|
return ControlAction::Prune { |
|
|
topic_hash: topic_hash.clone(), |
|
|
peers: Vec::new(), |
|
|
backoff: None, |
|
|
}; |
|
|
} |
|
|
None => { |
|
|
tracing::error!("Attempted to Prune an unknown peer"); |
|
|
} |
|
|
_ => {} |
|
|
} |
|
|
|
|
|
|
|
|
let peers = if do_px { |
|
|
get_random_peers( |
|
|
&self.topic_peers, |
|
|
&self.connected_peers, |
|
|
topic_hash, |
|
|
self.config.prune_peers(), |
|
|
|p| p != peer && !self.score_below_threshold(p, |_| 0.0).0, |
|
|
) |
|
|
.into_iter() |
|
|
.map(|p| PeerInfo { peer_id: Some(p) }) |
|
|
.collect() |
|
|
} else { |
|
|
Vec::new() |
|
|
}; |
|
|
|
|
|
let backoff = if on_unsubscribe { |
|
|
self.config.unsubscribe_backoff() |
|
|
} else { |
|
|
self.config.prune_backoff() |
|
|
}; |
|
|
|
|
|
|
|
|
self.backoffs.update_backoff(topic_hash, peer, backoff); |
|
|
|
|
|
ControlAction::Prune { |
|
|
topic_hash: topic_hash.clone(), |
|
|
peers, |
|
|
backoff: Some(backoff.as_secs()), |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
fn leave(&mut self, topic_hash: &TopicHash) { |
|
|
tracing::debug!(topic=%topic_hash, "Running LEAVE for topic"); |
|
|
|
|
|
|
|
|
if let Some((_, peers)) = self.mesh.remove_entry(topic_hash) { |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.left(topic_hash) |
|
|
} |
|
|
for peer in peers { |
|
|
|
|
|
tracing::debug!(%peer, "LEAVE: Sending PRUNE to peer"); |
|
|
let on_unsubscribe = true; |
|
|
let control = |
|
|
self.make_prune(topic_hash, &peer, self.config.do_px(), on_unsubscribe); |
|
|
Self::control_pool_add(&mut self.control_pool, peer, control); |
|
|
|
|
|
|
|
|
peer_removed_from_mesh( |
|
|
peer, |
|
|
topic_hash, |
|
|
&self.mesh, |
|
|
self.peer_topics.get(&peer), |
|
|
&mut self.events, |
|
|
&self.connected_peers, |
|
|
); |
|
|
} |
|
|
} |
|
|
tracing::debug!(topic=%topic_hash, "Completed LEAVE for topic"); |
|
|
} |
|
|
|
|
|
|
|
|
fn check_explicit_peer_connection(&mut self, peer_id: &PeerId) { |
|
|
if !self.peer_topics.contains_key(peer_id) { |
|
|
|
|
|
tracing::debug!(peer=%peer_id, "Connecting to explicit peer"); |
|
|
self.events.push_back(ToSwarm::Dial { |
|
|
opts: DialOpts::peer_id(*peer_id).build(), |
|
|
}); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn score_below_threshold( |
|
|
&self, |
|
|
peer_id: &PeerId, |
|
|
threshold: impl Fn(&PeerScoreThresholds) -> f64, |
|
|
) -> (bool, f64) { |
|
|
Self::score_below_threshold_from_scores(&self.peer_score, peer_id, threshold) |
|
|
} |
|
|
|
|
|
fn score_below_threshold_from_scores( |
|
|
peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker, GossipPromises)>, |
|
|
peer_id: &PeerId, |
|
|
threshold: impl Fn(&PeerScoreThresholds) -> f64, |
|
|
) -> (bool, f64) { |
|
|
if let Some((peer_score, thresholds, ..)) = peer_score { |
|
|
let score = peer_score.score(peer_id); |
|
|
if score < threshold(thresholds) { |
|
|
return (true, score); |
|
|
} |
|
|
(false, score) |
|
|
} else { |
|
|
(false, 0.0) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn handle_ihave(&mut self, peer_id: &PeerId, ihave_msgs: Vec<(TopicHash, Vec<MessageId>)>) { |
|
|
|
|
|
if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
%score, |
|
|
"IHAVE: ignoring peer with score below threshold" |
|
|
); |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
let peer_have = self.count_received_ihave.entry(*peer_id).or_insert(0); |
|
|
*peer_have += 1; |
|
|
if *peer_have > self.config.max_ihave_messages() { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
"IHAVE: peer has advertised too many times ({}) within this heartbeat \ |
|
|
interval; ignoring", |
|
|
*peer_have |
|
|
); |
|
|
return; |
|
|
} |
|
|
|
|
|
if let Some(iasked) = self.count_sent_iwant.get(peer_id) { |
|
|
if *iasked >= self.config.max_ihave_length() { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
"IHAVE: peer has already advertised too many messages ({}); ignoring", |
|
|
*iasked |
|
|
); |
|
|
return; |
|
|
} |
|
|
} |
|
|
|
|
|
tracing::trace!(peer=%peer_id, "Handling IHAVE for peer"); |
|
|
|
|
|
let mut iwant_ids = HashSet::new(); |
|
|
|
|
|
let want_message = |id: &MessageId| { |
|
|
if self.duplicate_cache.contains(id) { |
|
|
return false; |
|
|
} |
|
|
|
|
|
if self.pending_iwant_msgs.contains(id) { |
|
|
return false; |
|
|
} |
|
|
|
|
|
self.peer_score |
|
|
.as_ref() |
|
|
.map(|(_, _, _, promises)| !promises.contains(id)) |
|
|
.unwrap_or(true) |
|
|
}; |
|
|
|
|
|
for (topic, ids) in ihave_msgs { |
|
|
|
|
|
if !self.mesh.contains_key(&topic) { |
|
|
tracing::debug!( |
|
|
%topic, |
|
|
"IHAVE: Ignoring IHAVE - Not subscribed to topic" |
|
|
); |
|
|
continue; |
|
|
} |
|
|
|
|
|
for id in ids.into_iter().filter(want_message) { |
|
|
|
|
|
if iwant_ids.insert(id) { |
|
|
|
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.register_iwant(&topic); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
if !iwant_ids.is_empty() { |
|
|
let iasked = self.count_sent_iwant.entry(*peer_id).or_insert(0); |
|
|
let mut iask = iwant_ids.len(); |
|
|
if *iasked + iask > self.config.max_ihave_length() { |
|
|
iask = self.config.max_ihave_length().saturating_sub(*iasked); |
|
|
} |
|
|
|
|
|
|
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
"IHAVE: Asking for {} out of {} messages from peer", |
|
|
iask, |
|
|
iwant_ids.len() |
|
|
); |
|
|
|
|
|
|
|
|
let mut iwant_ids_vec: Vec<_> = iwant_ids.into_iter().collect(); |
|
|
let mut rng = thread_rng(); |
|
|
iwant_ids_vec.partial_shuffle(&mut rng, iask); |
|
|
|
|
|
iwant_ids_vec.truncate(iask); |
|
|
*iasked += iask; |
|
|
|
|
|
for message_id in &iwant_ids_vec { |
|
|
|
|
|
self.pending_iwant_msgs.insert(message_id.clone()); |
|
|
} |
|
|
|
|
|
if let Some((_, _, _, gossip_promises)) = &mut self.peer_score { |
|
|
gossip_promises.add_promise( |
|
|
*peer_id, |
|
|
&iwant_ids_vec, |
|
|
Instant::now() + self.config.iwant_followup_time(), |
|
|
); |
|
|
} |
|
|
tracing::trace!( |
|
|
peer=%peer_id, |
|
|
"IHAVE: Asking for the following messages from peer: {:?}", |
|
|
iwant_ids_vec |
|
|
); |
|
|
|
|
|
Self::control_pool_add( |
|
|
&mut self.control_pool, |
|
|
*peer_id, |
|
|
ControlAction::IWant { |
|
|
message_ids: iwant_ids_vec, |
|
|
}, |
|
|
); |
|
|
} |
|
|
tracing::trace!(peer=%peer_id, "Completed IHAVE handling for peer"); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn handle_iwant(&mut self, peer_id: &PeerId, iwant_msgs: Vec<MessageId>) { |
|
|
|
|
|
if let (true, score) = self.score_below_threshold(peer_id, |pst| pst.gossip_threshold) { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
"IWANT: ignoring peer with score below threshold [score = {}]", |
|
|
score |
|
|
); |
|
|
return; |
|
|
} |
|
|
|
|
|
tracing::debug!(peer=%peer_id, "Handling IWANT for peer"); |
|
|
|
|
|
for id in iwant_msgs { |
|
|
|
|
|
|
|
|
if let Some((msg, count)) = self |
|
|
.mcache |
|
|
.get_with_iwant_counts(&id, peer_id) |
|
|
.map(|(msg, count)| (msg.clone(), count)) |
|
|
{ |
|
|
if count > self.config.gossip_retransimission() { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
message=%id, |
|
|
"IWANT: Peer has asked for message too many times; ignoring request" |
|
|
); |
|
|
} else { |
|
|
tracing::debug!(peer=%peer_id, "IWANT: Sending cached messages to peer"); |
|
|
self.send_message(*peer_id, RpcOut::Forward(msg)); |
|
|
} |
|
|
} |
|
|
} |
|
|
tracing::debug!(peer=%peer_id, "Completed IWANT handling for peer"); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn handle_graft(&mut self, peer_id: &PeerId, topics: Vec<TopicHash>) { |
|
|
tracing::debug!(peer=%peer_id, "Handling GRAFT message for peer"); |
|
|
|
|
|
let mut to_prune_topics = HashSet::new(); |
|
|
|
|
|
let mut do_px = self.config.do_px(); |
|
|
|
|
|
|
|
|
|
|
|
for topic in &topics { |
|
|
self.peer_topics |
|
|
.entry(*peer_id) |
|
|
.or_default() |
|
|
.insert(topic.clone()); |
|
|
self.topic_peers |
|
|
.entry(topic.clone()) |
|
|
.or_default() |
|
|
.insert(*peer_id); |
|
|
} |
|
|
|
|
|
|
|
|
if self.explicit_peers.contains(peer_id) { |
|
|
tracing::warn!(peer=%peer_id, "GRAFT: ignoring request from direct peer"); |
|
|
|
|
|
to_prune_topics = topics.into_iter().collect(); |
|
|
|
|
|
do_px = false |
|
|
} else { |
|
|
let (below_zero, score) = self.score_below_threshold(peer_id, |_| 0.0); |
|
|
let now = Instant::now(); |
|
|
for topic_hash in topics { |
|
|
if let Some(peers) = self.mesh.get_mut(&topic_hash) { |
|
|
|
|
|
if peers.contains(peer_id) { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
topic=%&topic_hash, |
|
|
"GRAFT: Received graft for peer that is already in topic" |
|
|
); |
|
|
continue; |
|
|
} |
|
|
|
|
|
|
|
|
if let Some(backoff_time) = self.backoffs.get_backoff_time(&topic_hash, peer_id) |
|
|
{ |
|
|
if backoff_time > now { |
|
|
tracing::warn!( |
|
|
peer=%peer_id, |
|
|
"[Penalty] Peer attempted graft within backoff time, penalizing" |
|
|
); |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.register_score_penalty(Penalty::GraftBackoff); |
|
|
} |
|
|
peer_score.add_penalty(peer_id, 1); |
|
|
|
|
|
|
|
|
|
|
|
#[allow(unknown_lints, clippy::unchecked_duration_subtraction)] |
|
|
let flood_cutoff = (backoff_time |
|
|
+ self.config.graft_flood_threshold()) |
|
|
- self.config.prune_backoff(); |
|
|
if flood_cutoff > now { |
|
|
|
|
|
peer_score.add_penalty(peer_id, 1); |
|
|
} |
|
|
} |
|
|
|
|
|
do_px = false; |
|
|
|
|
|
to_prune_topics.insert(topic_hash.clone()); |
|
|
continue; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if below_zero { |
|
|
|
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
%score, |
|
|
topic=%topic_hash, |
|
|
"GRAFT: ignoring peer with negative score" |
|
|
); |
|
|
|
|
|
to_prune_topics.insert(topic_hash.clone()); |
|
|
|
|
|
do_px = false; |
|
|
continue; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if peers.len() >= self.config.mesh_n_high() |
|
|
&& !self.outbound_peers.contains(peer_id) |
|
|
{ |
|
|
to_prune_topics.insert(topic_hash.clone()); |
|
|
continue; |
|
|
} |
|
|
|
|
|
|
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
topic=%topic_hash, |
|
|
"GRAFT: Mesh link added for peer in topic" |
|
|
); |
|
|
|
|
|
if peers.insert(*peer_id) { |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_included(&topic_hash, Inclusion::Subscribed, 1) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
peer_added_to_mesh( |
|
|
*peer_id, |
|
|
vec![&topic_hash], |
|
|
&self.mesh, |
|
|
self.peer_topics.get(peer_id), |
|
|
&mut self.events, |
|
|
&self.connected_peers, |
|
|
); |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.graft(peer_id, topic_hash); |
|
|
} |
|
|
} else { |
|
|
|
|
|
do_px = false; |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
topic=%topic_hash, |
|
|
"GRAFT: Received graft for unknown topic from peer" |
|
|
); |
|
|
|
|
|
continue; |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
if !to_prune_topics.is_empty() { |
|
|
|
|
|
let on_unsubscribe = false; |
|
|
for action in to_prune_topics |
|
|
.iter() |
|
|
.map(|t| self.make_prune(t, peer_id, do_px, on_unsubscribe)) |
|
|
.collect::<Vec<_>>() |
|
|
{ |
|
|
self.send_message(*peer_id, RpcOut::Control(action)); |
|
|
} |
|
|
|
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
"GRAFT: Not subscribed to topics - Sending PRUNE to peer" |
|
|
); |
|
|
} |
|
|
tracing::debug!(peer=%peer_id, "Completed GRAFT handling for peer"); |
|
|
} |
|
|
|
|
|
fn remove_peer_from_mesh( |
|
|
&mut self, |
|
|
peer_id: &PeerId, |
|
|
topic_hash: &TopicHash, |
|
|
backoff: Option<u64>, |
|
|
always_update_backoff: bool, |
|
|
reason: Churn, |
|
|
) { |
|
|
let mut update_backoff = always_update_backoff; |
|
|
if let Some(peers) = self.mesh.get_mut(topic_hash) { |
|
|
|
|
|
if peers.remove(peer_id) { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
topic=%topic_hash, |
|
|
"PRUNE: Removing peer from the mesh for topic" |
|
|
); |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_removed(topic_hash, reason, 1) |
|
|
} |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.prune(peer_id, topic_hash.clone()); |
|
|
} |
|
|
|
|
|
update_backoff = true; |
|
|
|
|
|
|
|
|
peer_removed_from_mesh( |
|
|
*peer_id, |
|
|
topic_hash, |
|
|
&self.mesh, |
|
|
self.peer_topics.get(peer_id), |
|
|
&mut self.events, |
|
|
&self.connected_peers, |
|
|
); |
|
|
} |
|
|
} |
|
|
if update_backoff { |
|
|
let time = if let Some(backoff) = backoff { |
|
|
Duration::from_secs(backoff) |
|
|
} else { |
|
|
self.config.prune_backoff() |
|
|
}; |
|
|
|
|
|
self.backoffs.update_backoff(topic_hash, peer_id, time); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
fn handle_prune( |
|
|
&mut self, |
|
|
peer_id: &PeerId, |
|
|
prune_data: Vec<(TopicHash, Vec<PeerInfo>, Option<u64>)>, |
|
|
) { |
|
|
tracing::debug!(peer=%peer_id, "Handling PRUNE message for peer"); |
|
|
let (below_threshold, score) = |
|
|
self.score_below_threshold(peer_id, |pst| pst.accept_px_threshold); |
|
|
for (topic_hash, px, backoff) in prune_data { |
|
|
self.remove_peer_from_mesh(peer_id, &topic_hash, backoff, true, Churn::Prune); |
|
|
|
|
|
if self.mesh.contains_key(&topic_hash) { |
|
|
|
|
|
if !px.is_empty() { |
|
|
|
|
|
if below_threshold { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
%score, |
|
|
topic=%topic_hash, |
|
|
"PRUNE: ignoring PX from peer with insufficient score" |
|
|
); |
|
|
continue; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.config.prune_peers() > 0 { |
|
|
self.px_connect(px); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
tracing::debug!(peer=%peer_id, "Completed PRUNE handling for peer"); |
|
|
} |
|
|
|
|
|
fn px_connect(&mut self, mut px: Vec<PeerInfo>) { |
|
|
let n = self.config.prune_peers(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
px.retain(|p| p.peer_id.is_some()); |
|
|
if px.len() > n { |
|
|
|
|
|
let mut rng = thread_rng(); |
|
|
px.partial_shuffle(&mut rng, n); |
|
|
px = px.into_iter().take(n).collect(); |
|
|
} |
|
|
|
|
|
for p in px { |
|
|
|
|
|
|
|
|
if let Some(peer_id) = p.peer_id { |
|
|
|
|
|
self.px_peers.insert(peer_id); |
|
|
|
|
|
|
|
|
self.events.push_back(ToSwarm::Dial { |
|
|
opts: DialOpts::peer_id(peer_id).build(), |
|
|
}); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn message_is_valid( |
|
|
&mut self, |
|
|
msg_id: &MessageId, |
|
|
raw_message: &mut RawMessage, |
|
|
propagation_source: &PeerId, |
|
|
) -> bool { |
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
message=%msg_id, |
|
|
"Handling message from peer" |
|
|
); |
|
|
|
|
|
|
|
|
if self.blacklisted_peers.contains(propagation_source) { |
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
"Rejecting message from blacklisted peer" |
|
|
); |
|
|
if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { |
|
|
peer_score.reject_message( |
|
|
propagation_source, |
|
|
msg_id, |
|
|
&raw_message.topic, |
|
|
RejectReason::BlackListedPeer, |
|
|
); |
|
|
gossip_promises.reject_message(msg_id, &RejectReason::BlackListedPeer); |
|
|
} |
|
|
return false; |
|
|
} |
|
|
|
|
|
|
|
|
if let Some(source) = raw_message.source.as_ref() { |
|
|
if self.blacklisted_peers.contains(source) { |
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
%source, |
|
|
"Rejecting message from peer because of blacklisted source" |
|
|
); |
|
|
self.handle_invalid_message( |
|
|
propagation_source, |
|
|
raw_message, |
|
|
RejectReason::BlackListedSource, |
|
|
); |
|
|
return false; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if !self.config.validate_messages() { |
|
|
raw_message.validated = true; |
|
|
} |
|
|
|
|
|
|
|
|
let self_published = !self.config.allow_self_origin() |
|
|
&& if let Some(own_id) = self.publish_config.get_own_id() { |
|
|
own_id != propagation_source |
|
|
&& raw_message.source.as_ref().map_or(false, |s| s == own_id) |
|
|
} else { |
|
|
self.published_message_ids.contains(msg_id) |
|
|
}; |
|
|
|
|
|
if self_published { |
|
|
tracing::debug!( |
|
|
message=%msg_id, |
|
|
source=%propagation_source, |
|
|
"Dropping message claiming to be from self but forwarded from source" |
|
|
); |
|
|
self.handle_invalid_message(propagation_source, raw_message, RejectReason::SelfOrigin); |
|
|
return false; |
|
|
} |
|
|
|
|
|
true |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn handle_received_message( |
|
|
&mut self, |
|
|
mut raw_message: RawMessage, |
|
|
propagation_source: &PeerId, |
|
|
) { |
|
|
|
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.msg_recvd_unfiltered(&raw_message.topic, raw_message.raw_protobuf_len()); |
|
|
} |
|
|
|
|
|
|
|
|
let message = match self.data_transform.inbound_transform(raw_message.clone()) { |
|
|
Ok(message) => message, |
|
|
Err(e) => { |
|
|
tracing::debug!("Invalid message. Transform error: {:?}", e); |
|
|
|
|
|
self.handle_invalid_message( |
|
|
propagation_source, |
|
|
&raw_message, |
|
|
RejectReason::ValidationError(ValidationError::TransformFailed), |
|
|
); |
|
|
return; |
|
|
} |
|
|
}; |
|
|
|
|
|
|
|
|
let msg_id = self.config.message_id(&message); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if !self.message_is_valid(&msg_id, &mut raw_message, propagation_source) { |
|
|
return; |
|
|
} |
|
|
|
|
|
if !self.duplicate_cache.insert(msg_id.clone()) { |
|
|
tracing::debug!(message=%msg_id, "Message already received, ignoring"); |
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.duplicated_message(propagation_source, &msg_id, &message.topic); |
|
|
} |
|
|
self.mcache.observe_duplicate(&msg_id, propagation_source); |
|
|
return; |
|
|
} |
|
|
tracing::debug!( |
|
|
message=%msg_id, |
|
|
"Put message in duplicate_cache and resolve promises" |
|
|
); |
|
|
|
|
|
|
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.msg_recvd(&message.topic); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { |
|
|
peer_score.validate_message(propagation_source, &msg_id, &message.topic); |
|
|
gossip_promises.message_delivered(&msg_id); |
|
|
} |
|
|
|
|
|
|
|
|
self.mcache.put(&msg_id, raw_message.clone()); |
|
|
|
|
|
|
|
|
if self.mesh.contains_key(&message.topic) { |
|
|
tracing::debug!("Sending received message to user"); |
|
|
self.events |
|
|
.push_back(ToSwarm::GenerateEvent(Event::Message { |
|
|
propagation_source: *propagation_source, |
|
|
message_id: msg_id.clone(), |
|
|
message, |
|
|
})); |
|
|
} else { |
|
|
tracing::debug!( |
|
|
topic=%message.topic, |
|
|
"Received message on a topic we are not subscribed to" |
|
|
); |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
if !self.config.validate_messages() { |
|
|
if self |
|
|
.forward_msg( |
|
|
&msg_id, |
|
|
raw_message, |
|
|
Some(propagation_source), |
|
|
HashSet::new(), |
|
|
) |
|
|
.is_err() |
|
|
{ |
|
|
tracing::error!("Failed to forward message. Too large"); |
|
|
} |
|
|
tracing::debug!(message=%msg_id, "Completed message handling for message"); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
fn handle_invalid_message( |
|
|
&mut self, |
|
|
propagation_source: &PeerId, |
|
|
raw_message: &RawMessage, |
|
|
reject_reason: RejectReason, |
|
|
) { |
|
|
if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { |
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.register_invalid_message(&raw_message.topic); |
|
|
} |
|
|
|
|
|
if let Ok(message) = self.data_transform.inbound_transform(raw_message.clone()) { |
|
|
let message_id = self.config.message_id(&message); |
|
|
|
|
|
peer_score.reject_message( |
|
|
propagation_source, |
|
|
&message_id, |
|
|
&message.topic, |
|
|
reject_reason, |
|
|
); |
|
|
|
|
|
gossip_promises.reject_message(&message_id, &reject_reason); |
|
|
} else { |
|
|
|
|
|
|
|
|
|
|
|
peer_score.reject_invalid_message(propagation_source, &raw_message.topic); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
fn handle_received_subscriptions( |
|
|
&mut self, |
|
|
subscriptions: &[Subscription], |
|
|
propagation_source: &PeerId, |
|
|
) { |
|
|
tracing::debug!( |
|
|
source=%propagation_source, |
|
|
"Handling subscriptions: {:?}", |
|
|
subscriptions, |
|
|
); |
|
|
|
|
|
let mut unsubscribed_peers = Vec::new(); |
|
|
|
|
|
let Some(subscribed_topics) = self.peer_topics.get_mut(propagation_source) else { |
|
|
tracing::error!( |
|
|
peer=%propagation_source, |
|
|
"Subscription by unknown peer" |
|
|
); |
|
|
return; |
|
|
}; |
|
|
|
|
|
|
|
|
let mut topics_to_graft = Vec::new(); |
|
|
|
|
|
|
|
|
let mut application_event = Vec::new(); |
|
|
|
|
|
let filtered_topics = match self |
|
|
.subscription_filter |
|
|
.filter_incoming_subscriptions(subscriptions, subscribed_topics) |
|
|
{ |
|
|
Ok(topics) => topics, |
|
|
Err(s) => { |
|
|
tracing::error!( |
|
|
peer=%propagation_source, |
|
|
"Subscription filter error: {}; ignoring RPC from peer", |
|
|
s |
|
|
); |
|
|
return; |
|
|
} |
|
|
}; |
|
|
|
|
|
for subscription in filtered_topics { |
|
|
|
|
|
let topic_hash = &subscription.topic_hash; |
|
|
let peer_list = self.topic_peers.entry(topic_hash.clone()).or_default(); |
|
|
|
|
|
match subscription.action { |
|
|
SubscriptionAction::Subscribe => { |
|
|
if peer_list.insert(*propagation_source) { |
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
topic=%topic_hash, |
|
|
"SUBSCRIPTION: Adding gossip peer to topic" |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
subscribed_topics.insert(topic_hash.clone()); |
|
|
|
|
|
|
|
|
if !self.explicit_peers.contains(propagation_source) |
|
|
&& matches!( |
|
|
self.connected_peers |
|
|
.get(propagation_source) |
|
|
.map(|v| &v.kind), |
|
|
Some(PeerKind::Gossipsubv1_1) | Some(PeerKind::Gossipsub) |
|
|
) |
|
|
&& !Self::score_below_threshold_from_scores( |
|
|
&self.peer_score, |
|
|
propagation_source, |
|
|
|_| 0.0, |
|
|
) |
|
|
.0 |
|
|
&& !self |
|
|
.backoffs |
|
|
.is_backoff_with_slack(topic_hash, propagation_source) |
|
|
{ |
|
|
if let Some(peers) = self.mesh.get_mut(topic_hash) { |
|
|
if peers.len() < self.config.mesh_n_low() |
|
|
&& peers.insert(*propagation_source) |
|
|
{ |
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
topic=%topic_hash, |
|
|
"SUBSCRIPTION: Adding peer to the mesh for topic" |
|
|
); |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_included(topic_hash, Inclusion::Subscribed, 1) |
|
|
} |
|
|
|
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
topic=%topic_hash, |
|
|
"Sending GRAFT to peer for topic" |
|
|
); |
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.graft(propagation_source, topic_hash.clone()); |
|
|
} |
|
|
topics_to_graft.push(topic_hash.clone()); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
application_event.push(ToSwarm::GenerateEvent(Event::Subscribed { |
|
|
peer_id: *propagation_source, |
|
|
topic: topic_hash.clone(), |
|
|
})); |
|
|
} |
|
|
SubscriptionAction::Unsubscribe => { |
|
|
if peer_list.remove(propagation_source) { |
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
topic=%topic_hash, |
|
|
"SUBSCRIPTION: Removing gossip peer from topic" |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
subscribed_topics.remove(topic_hash); |
|
|
unsubscribed_peers.push((*propagation_source, topic_hash.clone())); |
|
|
|
|
|
application_event.push(ToSwarm::GenerateEvent(Event::Unsubscribed { |
|
|
peer_id: *propagation_source, |
|
|
topic: topic_hash.clone(), |
|
|
})); |
|
|
} |
|
|
} |
|
|
|
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.set_topic_peers(topic_hash, peer_list.len()); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for (peer_id, topic_hash) in unsubscribed_peers { |
|
|
self.remove_peer_from_mesh(&peer_id, &topic_hash, None, false, Churn::Unsub); |
|
|
} |
|
|
|
|
|
|
|
|
let topics_joined = topics_to_graft.iter().collect::<Vec<_>>(); |
|
|
if !topics_joined.is_empty() { |
|
|
peer_added_to_mesh( |
|
|
*propagation_source, |
|
|
topics_joined, |
|
|
&self.mesh, |
|
|
self.peer_topics.get(propagation_source), |
|
|
&mut self.events, |
|
|
&self.connected_peers, |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for action in topics_to_graft |
|
|
.into_iter() |
|
|
.map(|topic_hash| ControlAction::Graft { topic_hash }) |
|
|
.collect::<Vec<_>>() |
|
|
{ |
|
|
self.send_message(*propagation_source, RpcOut::Control(action)) |
|
|
} |
|
|
|
|
|
|
|
|
for event in application_event { |
|
|
self.events.push_back(event); |
|
|
} |
|
|
|
|
|
tracing::trace!( |
|
|
source=%propagation_source, |
|
|
"Completed handling subscriptions from source" |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
fn apply_iwant_penalties(&mut self) { |
|
|
if let Some((peer_score, .., gossip_promises)) = &mut self.peer_score { |
|
|
for (peer, count) in gossip_promises.get_broken_promises() { |
|
|
peer_score.add_penalty(&peer, count); |
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.register_score_penalty(Penalty::BrokenPromise); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
fn heartbeat(&mut self) { |
|
|
tracing::debug!("Starting heartbeat"); |
|
|
let start = Instant::now(); |
|
|
|
|
|
self.heartbeat_ticks += 1; |
|
|
|
|
|
let mut to_graft = HashMap::new(); |
|
|
let mut to_prune = HashMap::new(); |
|
|
let mut no_px = HashSet::new(); |
|
|
|
|
|
|
|
|
self.backoffs.heartbeat(); |
|
|
|
|
|
|
|
|
self.count_sent_iwant.clear(); |
|
|
self.count_received_ihave.clear(); |
|
|
|
|
|
|
|
|
self.apply_iwant_penalties(); |
|
|
|
|
|
|
|
|
if self.heartbeat_ticks % self.config.check_explicit_peers_ticks() == 0 { |
|
|
for p in self.explicit_peers.clone() { |
|
|
self.check_explicit_peer_connection(&p); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
let mut scores = HashMap::with_capacity(self.connected_peers.len()); |
|
|
if let Some((peer_score, ..)) = &self.peer_score { |
|
|
for peer_id in self.connected_peers.keys() { |
|
|
scores |
|
|
.entry(peer_id) |
|
|
.or_insert_with(|| peer_score.metric_score(peer_id, self.metrics.as_mut())); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for (topic_hash, peers) in self.mesh.iter_mut() { |
|
|
let explicit_peers = &self.explicit_peers; |
|
|
let backoffs = &self.backoffs; |
|
|
let topic_peers = &self.topic_peers; |
|
|
let outbound_peers = &self.outbound_peers; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let mut to_remove_peers = Vec::new(); |
|
|
for peer_id in peers.iter() { |
|
|
let peer_score = *scores.get(peer_id).unwrap_or(&0.0); |
|
|
|
|
|
|
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.observe_mesh_peers_score(topic_hash, peer_score); |
|
|
} |
|
|
|
|
|
if peer_score < 0.0 { |
|
|
tracing::debug!( |
|
|
peer=%peer_id, |
|
|
score=%peer_score, |
|
|
topic=%topic_hash, |
|
|
"HEARTBEAT: Prune peer with negative score" |
|
|
); |
|
|
|
|
|
let current_topic = to_prune.entry(*peer_id).or_insert_with(Vec::new); |
|
|
current_topic.push(topic_hash.clone()); |
|
|
no_px.insert(*peer_id); |
|
|
to_remove_peers.push(*peer_id); |
|
|
} |
|
|
} |
|
|
|
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_removed(topic_hash, Churn::BadScore, to_remove_peers.len()) |
|
|
} |
|
|
|
|
|
for peer_id in to_remove_peers { |
|
|
peers.remove(&peer_id); |
|
|
} |
|
|
|
|
|
|
|
|
if peers.len() < self.config.mesh_n_low() { |
|
|
tracing::debug!( |
|
|
topic=%topic_hash, |
|
|
"HEARTBEAT: Mesh low. Topic contains: {} needs: {}", |
|
|
peers.len(), |
|
|
self.config.mesh_n_low() |
|
|
); |
|
|
|
|
|
let desired_peers = self.config.mesh_n() - peers.len(); |
|
|
let peer_list = get_random_peers( |
|
|
topic_peers, |
|
|
&self.connected_peers, |
|
|
topic_hash, |
|
|
desired_peers, |
|
|
|peer| { |
|
|
!peers.contains(peer) |
|
|
&& !explicit_peers.contains(peer) |
|
|
&& !backoffs.is_backoff_with_slack(topic_hash, peer) |
|
|
&& *scores.get(peer).unwrap_or(&0.0) >= 0.0 |
|
|
}, |
|
|
); |
|
|
for peer in &peer_list { |
|
|
let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); |
|
|
current_topic.push(topic_hash.clone()); |
|
|
} |
|
|
|
|
|
tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) |
|
|
} |
|
|
peers.extend(peer_list); |
|
|
} |
|
|
|
|
|
|
|
|
if peers.len() > self.config.mesh_n_high() { |
|
|
tracing::debug!( |
|
|
topic=%topic_hash, |
|
|
"HEARTBEAT: Mesh high. Topic contains: {} needs: {}", |
|
|
peers.len(), |
|
|
self.config.mesh_n_high() |
|
|
); |
|
|
let excess_peer_no = peers.len() - self.config.mesh_n(); |
|
|
|
|
|
|
|
|
let mut rng = thread_rng(); |
|
|
let mut shuffled = peers.iter().copied().collect::<Vec<_>>(); |
|
|
shuffled.shuffle(&mut rng); |
|
|
shuffled.sort_by(|p1, p2| { |
|
|
let score_p1 = *scores.get(p1).unwrap_or(&0.0); |
|
|
let score_p2 = *scores.get(p2).unwrap_or(&0.0); |
|
|
|
|
|
score_p1.partial_cmp(&score_p2).unwrap_or(Ordering::Equal) |
|
|
}); |
|
|
|
|
|
shuffled[..peers.len() - self.config.retain_scores()].shuffle(&mut rng); |
|
|
|
|
|
|
|
|
let mut outbound = { |
|
|
let outbound_peers = &self.outbound_peers; |
|
|
shuffled |
|
|
.iter() |
|
|
.filter(|p| outbound_peers.contains(*p)) |
|
|
.count() |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
let mut removed = 0; |
|
|
for peer in shuffled { |
|
|
if removed == excess_peer_no { |
|
|
break; |
|
|
} |
|
|
if self.outbound_peers.contains(&peer) { |
|
|
if outbound <= self.config.mesh_outbound_min() { |
|
|
|
|
|
continue; |
|
|
} else { |
|
|
|
|
|
outbound -= 1; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
peers.remove(&peer); |
|
|
let current_topic = to_prune.entry(peer).or_insert_with(Vec::new); |
|
|
current_topic.push(topic_hash.clone()); |
|
|
removed += 1; |
|
|
} |
|
|
|
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_removed(topic_hash, Churn::Excess, removed) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if peers.len() >= self.config.mesh_n_low() { |
|
|
|
|
|
let outbound = { peers.iter().filter(|p| outbound_peers.contains(*p)).count() }; |
|
|
|
|
|
|
|
|
if outbound < self.config.mesh_outbound_min() { |
|
|
let needed = self.config.mesh_outbound_min() - outbound; |
|
|
let peer_list = get_random_peers( |
|
|
topic_peers, |
|
|
&self.connected_peers, |
|
|
topic_hash, |
|
|
needed, |
|
|
|peer| { |
|
|
!peers.contains(peer) |
|
|
&& !explicit_peers.contains(peer) |
|
|
&& !backoffs.is_backoff_with_slack(topic_hash, peer) |
|
|
&& *scores.get(peer).unwrap_or(&0.0) >= 0.0 |
|
|
&& outbound_peers.contains(peer) |
|
|
}, |
|
|
); |
|
|
for peer in &peer_list { |
|
|
let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); |
|
|
current_topic.push(topic_hash.clone()); |
|
|
} |
|
|
|
|
|
tracing::debug!("Updating mesh, new mesh: {:?}", peer_list); |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_included(topic_hash, Inclusion::Outbound, peer_list.len()) |
|
|
} |
|
|
peers.extend(peer_list); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if self.heartbeat_ticks % self.config.opportunistic_graft_ticks() == 0 |
|
|
&& peers.len() > 1 |
|
|
&& self.peer_score.is_some() |
|
|
{ |
|
|
if let Some((_, thresholds, _, _)) = &self.peer_score { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let mut peers_by_score: Vec<_> = peers.iter().collect(); |
|
|
peers_by_score.sort_by(|p1, p2| { |
|
|
let p1_score = *scores.get(p1).unwrap_or(&0.0); |
|
|
let p2_score = *scores.get(p2).unwrap_or(&0.0); |
|
|
p1_score.partial_cmp(&p2_score).unwrap_or(Equal) |
|
|
}); |
|
|
|
|
|
let middle = peers_by_score.len() / 2; |
|
|
let median = if peers_by_score.len() % 2 == 0 { |
|
|
let sub_middle_peer = *peers_by_score |
|
|
.get(middle - 1) |
|
|
.expect("middle < vector length and middle > 0 since peers.len() > 0"); |
|
|
let sub_middle_score = *scores.get(sub_middle_peer).unwrap_or(&0.0); |
|
|
let middle_peer = |
|
|
*peers_by_score.get(middle).expect("middle < vector length"); |
|
|
let middle_score = *scores.get(middle_peer).unwrap_or(&0.0); |
|
|
|
|
|
(sub_middle_score + middle_score) * 0.5 |
|
|
} else { |
|
|
*scores |
|
|
.get(*peers_by_score.get(middle).expect("middle < vector length")) |
|
|
.unwrap_or(&0.0) |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
if median < thresholds.opportunistic_graft_threshold { |
|
|
let peer_list = get_random_peers( |
|
|
topic_peers, |
|
|
&self.connected_peers, |
|
|
topic_hash, |
|
|
self.config.opportunistic_graft_peers(), |
|
|
|peer_id| { |
|
|
!peers.contains(peer_id) |
|
|
&& !explicit_peers.contains(peer_id) |
|
|
&& !backoffs.is_backoff_with_slack(topic_hash, peer_id) |
|
|
&& *scores.get(peer_id).unwrap_or(&0.0) > median |
|
|
}, |
|
|
); |
|
|
for peer in &peer_list { |
|
|
let current_topic = to_graft.entry(*peer).or_insert_with(Vec::new); |
|
|
current_topic.push(topic_hash.clone()); |
|
|
} |
|
|
|
|
|
tracing::debug!( |
|
|
topic=%topic_hash, |
|
|
"Opportunistically graft in topic with peers {:?}", |
|
|
peer_list |
|
|
); |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_included(topic_hash, Inclusion::Random, peer_list.len()) |
|
|
} |
|
|
peers.extend(peer_list); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.set_mesh_peers(topic_hash, peers.len()) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
{ |
|
|
let fanout = &mut self.fanout; |
|
|
let fanout_ttl = self.config.fanout_ttl(); |
|
|
self.fanout_last_pub.retain(|topic_hash, last_pub_time| { |
|
|
if *last_pub_time + fanout_ttl < Instant::now() { |
|
|
tracing::debug!( |
|
|
topic=%topic_hash, |
|
|
"HEARTBEAT: Fanout topic removed due to timeout" |
|
|
); |
|
|
fanout.remove(topic_hash); |
|
|
return false; |
|
|
} |
|
|
true |
|
|
}); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for (topic_hash, peers) in self.fanout.iter_mut() { |
|
|
let mut to_remove_peers = Vec::new(); |
|
|
let publish_threshold = match &self.peer_score { |
|
|
Some((_, thresholds, _, _)) => thresholds.publish_threshold, |
|
|
_ => 0.0, |
|
|
}; |
|
|
for peer in peers.iter() { |
|
|
|
|
|
let peer_score = *scores.get(peer).unwrap_or(&0.0); |
|
|
match self.peer_topics.get(peer) { |
|
|
Some(topics) => { |
|
|
if !topics.contains(topic_hash) || peer_score < publish_threshold { |
|
|
tracing::debug!( |
|
|
topic=%topic_hash, |
|
|
"HEARTBEAT: Peer removed from fanout for topic" |
|
|
); |
|
|
to_remove_peers.push(*peer); |
|
|
} |
|
|
} |
|
|
None => { |
|
|
|
|
|
to_remove_peers.push(*peer); |
|
|
} |
|
|
} |
|
|
} |
|
|
for to_remove in to_remove_peers { |
|
|
peers.remove(&to_remove); |
|
|
} |
|
|
|
|
|
|
|
|
if peers.len() < self.config.mesh_n() { |
|
|
tracing::debug!( |
|
|
"HEARTBEAT: Fanout low. Contains: {:?} needs: {:?}", |
|
|
peers.len(), |
|
|
self.config.mesh_n() |
|
|
); |
|
|
let needed_peers = self.config.mesh_n() - peers.len(); |
|
|
let explicit_peers = &self.explicit_peers; |
|
|
let new_peers = get_random_peers( |
|
|
&self.topic_peers, |
|
|
&self.connected_peers, |
|
|
topic_hash, |
|
|
needed_peers, |
|
|
|peer_id| { |
|
|
!peers.contains(peer_id) |
|
|
&& !explicit_peers.contains(peer_id) |
|
|
&& *scores.get(peer_id).unwrap_or(&0.0) < publish_threshold |
|
|
}, |
|
|
); |
|
|
peers.extend(new_peers); |
|
|
} |
|
|
} |
|
|
|
|
|
if self.peer_score.is_some() { |
|
|
tracing::trace!("Mesh message deliveries: {:?}", { |
|
|
self.mesh |
|
|
.iter() |
|
|
.map(|(t, peers)| { |
|
|
( |
|
|
t.clone(), |
|
|
peers |
|
|
.iter() |
|
|
.map(|p| { |
|
|
( |
|
|
*p, |
|
|
self.peer_score |
|
|
.as_ref() |
|
|
.expect("peer_score.is_some()") |
|
|
.0 |
|
|
.mesh_message_deliveries(p, t) |
|
|
.unwrap_or(0.0), |
|
|
) |
|
|
}) |
|
|
.collect::<HashMap<PeerId, f64>>(), |
|
|
) |
|
|
}) |
|
|
.collect::<HashMap<TopicHash, HashMap<PeerId, f64>>>() |
|
|
}) |
|
|
} |
|
|
|
|
|
self.emit_gossip(); |
|
|
|
|
|
|
|
|
if !to_graft.is_empty() | !to_prune.is_empty() { |
|
|
self.send_graft_prune(to_graft, to_prune, no_px); |
|
|
} |
|
|
|
|
|
|
|
|
self.flush_control_pool(); |
|
|
|
|
|
|
|
|
self.mcache.shift(); |
|
|
|
|
|
tracing::debug!("Completed Heartbeat"); |
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
let duration = u64::try_from(start.elapsed().as_millis()).unwrap_or(u64::MAX); |
|
|
metrics.observe_heartbeat_duration(duration); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn emit_gossip(&mut self) { |
|
|
let mut rng = thread_rng(); |
|
|
for (topic_hash, peers) in self.mesh.iter().chain(self.fanout.iter()) { |
|
|
let mut message_ids = self.mcache.get_gossip_message_ids(topic_hash); |
|
|
if message_ids.is_empty() { |
|
|
continue; |
|
|
} |
|
|
|
|
|
|
|
|
if message_ids.len() > self.config.max_ihave_length() { |
|
|
|
|
|
tracing::debug!( |
|
|
"too many messages for gossip; will truncate IHAVE list ({} messages)", |
|
|
message_ids.len() |
|
|
); |
|
|
} else { |
|
|
|
|
|
message_ids.shuffle(&mut rng); |
|
|
} |
|
|
|
|
|
|
|
|
let n_map = |m| { |
|
|
max( |
|
|
self.config.gossip_lazy(), |
|
|
(self.config.gossip_factor() * m as f64) as usize, |
|
|
) |
|
|
}; |
|
|
|
|
|
let to_msg_peers = get_random_peers_dynamic( |
|
|
&self.topic_peers, |
|
|
&self.connected_peers, |
|
|
topic_hash, |
|
|
n_map, |
|
|
|peer| { |
|
|
!peers.contains(peer) |
|
|
&& !self.explicit_peers.contains(peer) |
|
|
&& !self.score_below_threshold(peer, |ts| ts.gossip_threshold).0 |
|
|
}, |
|
|
); |
|
|
|
|
|
tracing::debug!("Gossiping IHAVE to {} peers", to_msg_peers.len()); |
|
|
|
|
|
for peer in to_msg_peers { |
|
|
let mut peer_message_ids = message_ids.clone(); |
|
|
|
|
|
if peer_message_ids.len() > self.config.max_ihave_length() { |
|
|
|
|
|
|
|
|
|
|
|
peer_message_ids.partial_shuffle(&mut rng, self.config.max_ihave_length()); |
|
|
peer_message_ids.truncate(self.config.max_ihave_length()); |
|
|
} |
|
|
|
|
|
|
|
|
Self::control_pool_add( |
|
|
&mut self.control_pool, |
|
|
peer, |
|
|
ControlAction::IHave { |
|
|
topic_hash: topic_hash.clone(), |
|
|
message_ids: peer_message_ids, |
|
|
}, |
|
|
); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn send_graft_prune( |
|
|
&mut self, |
|
|
to_graft: HashMap<PeerId, Vec<TopicHash>>, |
|
|
mut to_prune: HashMap<PeerId, Vec<TopicHash>>, |
|
|
no_px: HashSet<PeerId>, |
|
|
) { |
|
|
|
|
|
for (peer, topics) in to_graft.into_iter() { |
|
|
for topic in &topics { |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.graft(&peer, topic.clone()); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
peer_added_to_mesh( |
|
|
peer, |
|
|
vec![topic], |
|
|
&self.mesh, |
|
|
self.peer_topics.get(&peer), |
|
|
&mut self.events, |
|
|
&self.connected_peers, |
|
|
); |
|
|
} |
|
|
let control_msgs = topics.iter().map(|topic_hash| ControlAction::Graft { |
|
|
topic_hash: topic_hash.clone(), |
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let prunes = to_prune |
|
|
.remove(&peer) |
|
|
.into_iter() |
|
|
.flatten() |
|
|
.map(|topic_hash| { |
|
|
self.make_prune( |
|
|
&topic_hash, |
|
|
&peer, |
|
|
self.config.do_px() && !no_px.contains(&peer), |
|
|
false, |
|
|
) |
|
|
}); |
|
|
|
|
|
|
|
|
for msg in control_msgs.chain(prunes).collect::<Vec<_>>() { |
|
|
self.send_message(peer, RpcOut::Control(msg)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for (peer, topics) in to_prune.iter() { |
|
|
for topic_hash in topics { |
|
|
let prune = self.make_prune( |
|
|
topic_hash, |
|
|
peer, |
|
|
self.config.do_px() && !no_px.contains(peer), |
|
|
false, |
|
|
); |
|
|
self.send_message(*peer, RpcOut::Control(prune)); |
|
|
|
|
|
|
|
|
peer_removed_from_mesh( |
|
|
*peer, |
|
|
topic_hash, |
|
|
&self.mesh, |
|
|
self.peer_topics.get(peer), |
|
|
&mut self.events, |
|
|
&self.connected_peers, |
|
|
); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[allow(clippy::unnecessary_wraps)] |
|
|
fn forward_msg( |
|
|
&mut self, |
|
|
msg_id: &MessageId, |
|
|
message: RawMessage, |
|
|
propagation_source: Option<&PeerId>, |
|
|
originating_peers: HashSet<PeerId>, |
|
|
) -> Result<bool, PublishError> { |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
if let Some(peer) = propagation_source { |
|
|
peer_score.deliver_message(peer, msg_id, &message.topic); |
|
|
} |
|
|
} |
|
|
|
|
|
tracing::debug!(message=%msg_id, "Forwarding message"); |
|
|
let mut recipient_peers = HashSet::new(); |
|
|
|
|
|
{ |
|
|
|
|
|
|
|
|
|
|
|
for peer_id in &self.explicit_peers { |
|
|
if let Some(topics) = self.peer_topics.get(peer_id) { |
|
|
if Some(peer_id) != propagation_source |
|
|
&& !originating_peers.contains(peer_id) |
|
|
&& Some(peer_id) != message.source.as_ref() |
|
|
&& topics.contains(&message.topic) |
|
|
{ |
|
|
recipient_peers.insert(*peer_id); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
let topic = &message.topic; |
|
|
|
|
|
if let Some(mesh_peers) = self.mesh.get(topic) { |
|
|
for peer_id in mesh_peers { |
|
|
if Some(peer_id) != propagation_source |
|
|
&& !originating_peers.contains(peer_id) |
|
|
&& Some(peer_id) != message.source.as_ref() |
|
|
{ |
|
|
recipient_peers.insert(*peer_id); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if !recipient_peers.is_empty() { |
|
|
let event = RpcOut::Forward(message.clone()); |
|
|
|
|
|
for peer in recipient_peers.iter() { |
|
|
tracing::debug!(%peer, message=%msg_id, "Sending message to peer"); |
|
|
self.send_message(*peer, event.clone()); |
|
|
} |
|
|
tracing::debug!("Completed forwarding message"); |
|
|
Ok(true) |
|
|
} else { |
|
|
Ok(false) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
pub(crate) fn build_raw_message( |
|
|
&mut self, |
|
|
topic: TopicHash, |
|
|
data: Vec<u8>, |
|
|
) -> Result<RawMessage, PublishError> { |
|
|
match &mut self.publish_config { |
|
|
PublishConfig::Signing { |
|
|
ref keypair, |
|
|
author, |
|
|
inline_key, |
|
|
last_seq_no, |
|
|
} => { |
|
|
let sequence_number = last_seq_no.next(); |
|
|
|
|
|
let signature = { |
|
|
let message = proto::Message { |
|
|
from: Some(author.to_bytes()), |
|
|
data: Some(data.clone()), |
|
|
seqno: Some(sequence_number.to_be_bytes().to_vec()), |
|
|
topic: topic.clone().into_string(), |
|
|
signature: None, |
|
|
key: None, |
|
|
}; |
|
|
|
|
|
let mut buf = Vec::with_capacity(message.get_size()); |
|
|
let mut writer = Writer::new(&mut buf); |
|
|
|
|
|
message |
|
|
.write_message(&mut writer) |
|
|
.expect("Encoding to succeed"); |
|
|
|
|
|
|
|
|
let mut signature_bytes = SIGNING_PREFIX.to_vec(); |
|
|
signature_bytes.extend_from_slice(&buf); |
|
|
Some(keypair.sign(&signature_bytes)?) |
|
|
}; |
|
|
|
|
|
Ok(RawMessage { |
|
|
source: Some(*author), |
|
|
data, |
|
|
|
|
|
|
|
|
sequence_number: Some(sequence_number), |
|
|
topic, |
|
|
signature, |
|
|
key: inline_key.clone(), |
|
|
validated: true, |
|
|
}) |
|
|
} |
|
|
PublishConfig::Author(peer_id) => { |
|
|
Ok(RawMessage { |
|
|
source: Some(*peer_id), |
|
|
data, |
|
|
|
|
|
|
|
|
sequence_number: Some(rand::random()), |
|
|
topic, |
|
|
signature: None, |
|
|
key: None, |
|
|
validated: true, |
|
|
}) |
|
|
} |
|
|
PublishConfig::RandomAuthor => { |
|
|
Ok(RawMessage { |
|
|
source: Some(PeerId::random()), |
|
|
data, |
|
|
|
|
|
|
|
|
sequence_number: Some(rand::random()), |
|
|
topic, |
|
|
signature: None, |
|
|
key: None, |
|
|
validated: true, |
|
|
}) |
|
|
} |
|
|
PublishConfig::Anonymous => { |
|
|
Ok(RawMessage { |
|
|
source: None, |
|
|
data, |
|
|
|
|
|
|
|
|
sequence_number: None, |
|
|
topic, |
|
|
signature: None, |
|
|
key: None, |
|
|
validated: true, |
|
|
}) |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
fn control_pool_add( |
|
|
control_pool: &mut HashMap<PeerId, Vec<ControlAction>>, |
|
|
peer: PeerId, |
|
|
control: ControlAction, |
|
|
) { |
|
|
control_pool.entry(peer).or_default().push(control); |
|
|
} |
|
|
|
|
|
|
|
|
fn flush_control_pool(&mut self) { |
|
|
for (peer, controls) in self.control_pool.drain().collect::<Vec<_>>() { |
|
|
for msg in controls { |
|
|
self.send_message(peer, RpcOut::Control(msg)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
self.pending_iwant_msgs.clear(); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn send_message(&mut self, peer_id: PeerId, rpc: RpcOut) { |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
if let RpcOut::Publish(ref message) | RpcOut::Forward(ref message) = rpc { |
|
|
|
|
|
m.msg_sent(&message.topic, message.raw_protobuf_len()); |
|
|
} |
|
|
} |
|
|
|
|
|
self.events.push_back(ToSwarm::NotifyHandler { |
|
|
peer_id, |
|
|
event: HandlerIn::Message(rpc), |
|
|
handler: NotifyHandler::Any, |
|
|
}); |
|
|
} |
|
|
|
|
|
fn on_connection_established( |
|
|
&mut self, |
|
|
ConnectionEstablished { |
|
|
peer_id, |
|
|
connection_id, |
|
|
endpoint, |
|
|
other_established, |
|
|
.. |
|
|
}: ConnectionEstablished, |
|
|
) { |
|
|
|
|
|
|
|
|
|
|
|
if endpoint.is_dialer() && other_established == 0 && !self.px_peers.contains(&peer_id) { |
|
|
|
|
|
|
|
|
self.outbound_peers.insert(peer_id); |
|
|
} |
|
|
|
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { |
|
|
peer_score.add_ip(&peer_id, ip); |
|
|
} else { |
|
|
tracing::trace!( |
|
|
peer=%peer_id, |
|
|
"Couldn't extract ip from endpoint of peer with endpoint {:?}", |
|
|
endpoint |
|
|
) |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.connected_peers |
|
|
.entry(peer_id) |
|
|
.or_insert(PeerConnections { |
|
|
kind: PeerKind::Floodsub, |
|
|
connections: vec![], |
|
|
}) |
|
|
.connections |
|
|
.push(connection_id); |
|
|
|
|
|
if other_established > 0 { |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
self.peer_topics.insert(peer_id, Default::default()); |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.add_peer(peer_id); |
|
|
} |
|
|
|
|
|
|
|
|
if self.blacklisted_peers.contains(&peer_id) { |
|
|
tracing::debug!(peer=%peer_id, "Ignoring connection from blacklisted peer"); |
|
|
return; |
|
|
} |
|
|
|
|
|
tracing::debug!(peer=%peer_id, "New peer connected"); |
|
|
|
|
|
for topic_hash in self.mesh.clone().into_keys() { |
|
|
self.send_message(peer_id, RpcOut::Subscribe(topic_hash)); |
|
|
} |
|
|
} |
|
|
|
|
|
fn on_connection_closed( |
|
|
&mut self, |
|
|
ConnectionClosed { |
|
|
peer_id, |
|
|
connection_id, |
|
|
endpoint, |
|
|
remaining_established, |
|
|
.. |
|
|
}: ConnectionClosed, |
|
|
) { |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
if let Some(ip) = get_ip_addr(endpoint.get_remote_address()) { |
|
|
peer_score.remove_ip(&peer_id, &ip); |
|
|
} else { |
|
|
tracing::trace!( |
|
|
peer=%peer_id, |
|
|
"Couldn't extract ip from endpoint of peer with endpoint {:?}", |
|
|
endpoint |
|
|
) |
|
|
} |
|
|
} |
|
|
|
|
|
if remaining_established != 0 { |
|
|
|
|
|
if let Some(connections) = self.connected_peers.get_mut(&peer_id) { |
|
|
let index = connections |
|
|
.connections |
|
|
.iter() |
|
|
.position(|v| v == &connection_id) |
|
|
.expect("Previously established connection to peer must be present"); |
|
|
connections.connections.remove(index); |
|
|
|
|
|
|
|
|
|
|
|
if !connections.connections.is_empty() { |
|
|
if let Some(topics) = self.peer_topics.get(&peer_id) { |
|
|
for topic in topics { |
|
|
if let Some(mesh_peers) = self.mesh.get(topic) { |
|
|
if mesh_peers.contains(&peer_id) { |
|
|
self.events.push_back(ToSwarm::NotifyHandler { |
|
|
peer_id, |
|
|
event: HandlerIn::JoinedMesh, |
|
|
handler: NotifyHandler::One(connections.connections[0]), |
|
|
}); |
|
|
break; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} else { |
|
|
|
|
|
tracing::debug!(peer=%peer_id, "Peer disconnected"); |
|
|
{ |
|
|
let Some(topics) = self.peer_topics.get(&peer_id) else { |
|
|
debug_assert!( |
|
|
self.blacklisted_peers.contains(&peer_id), |
|
|
"Disconnected node not in connected list" |
|
|
); |
|
|
return; |
|
|
}; |
|
|
|
|
|
|
|
|
for topic in topics { |
|
|
|
|
|
if let Some(mesh_peers) = self.mesh.get_mut(topic) { |
|
|
|
|
|
if mesh_peers.remove(&peer_id) { |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.peers_removed(topic, Churn::Dc, 1); |
|
|
m.set_mesh_peers(topic, mesh_peers.len()); |
|
|
} |
|
|
}; |
|
|
} |
|
|
|
|
|
|
|
|
if let Some(peer_list) = self.topic_peers.get_mut(topic) { |
|
|
if !peer_list.remove(&peer_id) { |
|
|
|
|
|
tracing::warn!( |
|
|
peer=%peer_id, |
|
|
"Disconnected node: peer not in topic_peers" |
|
|
); |
|
|
} |
|
|
if let Some(m) = self.metrics.as_mut() { |
|
|
m.set_topic_peers(topic, peer_list.len()) |
|
|
} |
|
|
} else { |
|
|
tracing::warn!( |
|
|
peer=%peer_id, |
|
|
topic=%topic, |
|
|
"Disconnected node: peer with topic not in topic_peers" |
|
|
); |
|
|
} |
|
|
|
|
|
|
|
|
self.fanout |
|
|
.get_mut(topic) |
|
|
.map(|peers| peers.remove(&peer_id)); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
self.px_peers.remove(&peer_id); |
|
|
self.outbound_peers.remove(&peer_id); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.peer_topics.remove(&peer_id); |
|
|
|
|
|
|
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
let peer_kind = &self |
|
|
.connected_peers |
|
|
.get(&peer_id) |
|
|
.expect("Connected peer must be registered") |
|
|
.kind; |
|
|
metrics.peer_protocol_disconnected(peer_kind.clone()); |
|
|
} |
|
|
|
|
|
self.connected_peers.remove(&peer_id); |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
peer_score.remove_peer(&peer_id); |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
fn on_address_change( |
|
|
&mut self, |
|
|
AddressChange { |
|
|
peer_id, |
|
|
old: endpoint_old, |
|
|
new: endpoint_new, |
|
|
.. |
|
|
}: AddressChange, |
|
|
) { |
|
|
|
|
|
if let Some((peer_score, ..)) = &mut self.peer_score { |
|
|
if let Some(ip) = get_ip_addr(endpoint_old.get_remote_address()) { |
|
|
peer_score.remove_ip(&peer_id, &ip); |
|
|
} else { |
|
|
tracing::trace!( |
|
|
peer=%&peer_id, |
|
|
"Couldn't extract ip from endpoint of peer with endpoint {:?}", |
|
|
endpoint_old |
|
|
) |
|
|
} |
|
|
if let Some(ip) = get_ip_addr(endpoint_new.get_remote_address()) { |
|
|
peer_score.add_ip(&peer_id, ip); |
|
|
} else { |
|
|
tracing::trace!( |
|
|
peer=%peer_id, |
|
|
"Couldn't extract ip from endpoint of peer with endpoint {:?}", |
|
|
endpoint_new |
|
|
) |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
fn get_ip_addr(addr: &Multiaddr) -> Option<IpAddr> { |
|
|
addr.iter().find_map(|p| match p { |
|
|
Ip4(addr) => Some(IpAddr::V4(addr)), |
|
|
Ip6(addr) => Some(IpAddr::V6(addr)), |
|
|
_ => None, |
|
|
}) |
|
|
} |
|
|
|
|
|
impl<C, F> NetworkBehaviour for Behaviour<C, F> |
|
|
where |
|
|
C: Send + 'static + DataTransform, |
|
|
F: Send + 'static + TopicSubscriptionFilter, |
|
|
{ |
|
|
type ConnectionHandler = Handler; |
|
|
type ToSwarm = Event; |
|
|
|
|
|
fn handle_established_inbound_connection( |
|
|
&mut self, |
|
|
_: ConnectionId, |
|
|
_: PeerId, |
|
|
_: &Multiaddr, |
|
|
_: &Multiaddr, |
|
|
) -> Result<THandler<Self>, ConnectionDenied> { |
|
|
Ok(Handler::new(self.config.protocol_config())) |
|
|
} |
|
|
|
|
|
fn handle_established_outbound_connection( |
|
|
&mut self, |
|
|
_: ConnectionId, |
|
|
_: PeerId, |
|
|
_: &Multiaddr, |
|
|
_: Endpoint, |
|
|
_: PortUse, |
|
|
) -> Result<THandler<Self>, ConnectionDenied> { |
|
|
Ok(Handler::new(self.config.protocol_config())) |
|
|
} |
|
|
|
|
|
fn on_connection_handler_event( |
|
|
&mut self, |
|
|
propagation_source: PeerId, |
|
|
_connection_id: ConnectionId, |
|
|
handler_event: THandlerOutEvent<Self>, |
|
|
) { |
|
|
match handler_event { |
|
|
HandlerEvent::PeerKind(kind) => { |
|
|
|
|
|
|
|
|
if let Some(metrics) = self.metrics.as_mut() { |
|
|
metrics.peer_protocol_connected(kind.clone()); |
|
|
} |
|
|
|
|
|
if let PeerKind::NotSupported = kind { |
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
"Peer does not support gossipsub protocols" |
|
|
); |
|
|
self.events |
|
|
.push_back(ToSwarm::GenerateEvent(Event::GossipsubNotSupported { |
|
|
peer_id: propagation_source, |
|
|
})); |
|
|
} else if let Some(conn) = self.connected_peers.get_mut(&propagation_source) { |
|
|
|
|
|
|
|
|
|
|
|
tracing::debug!( |
|
|
peer=%propagation_source, |
|
|
peer_type=%kind, |
|
|
"New peer type found for peer" |
|
|
); |
|
|
if let PeerKind::Floodsub = conn.kind { |
|
|
conn.kind = kind; |
|
|
} |
|
|
} |
|
|
} |
|
|
HandlerEvent::Message { |
|
|
rpc, |
|
|
invalid_messages, |
|
|
} => { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if !rpc.subscriptions.is_empty() { |
|
|
self.handle_received_subscriptions(&rpc.subscriptions, &propagation_source); |
|
|
} |
|
|
|
|
|
|
|
|
if let (true, _) = |
|
|
self.score_below_threshold(&propagation_source, |pst| pst.graylist_threshold) |
|
|
{ |
|
|
tracing::debug!(peer=%propagation_source, "RPC Dropped from greylisted peer"); |
|
|
return; |
|
|
} |
|
|
|
|
|
|
|
|
if self.peer_score.is_some() { |
|
|
for (raw_message, validation_error) in invalid_messages { |
|
|
self.handle_invalid_message( |
|
|
&propagation_source, |
|
|
&raw_message, |
|
|
RejectReason::ValidationError(validation_error), |
|
|
) |
|
|
} |
|
|
} else { |
|
|
|
|
|
for (message, validation_error) in invalid_messages { |
|
|
tracing::warn!( |
|
|
peer=%propagation_source, |
|
|
source=?message.source, |
|
|
"Invalid message from peer. Reason: {:?}", |
|
|
validation_error, |
|
|
); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
for (count, raw_message) in rpc.messages.into_iter().enumerate() { |
|
|
|
|
|
if self.config.max_messages_per_rpc().is_some() |
|
|
&& Some(count) >= self.config.max_messages_per_rpc() |
|
|
{ |
|
|
tracing::warn!("Received more messages than permitted. Ignoring further messages. Processed: {}", count); |
|
|
break; |
|
|
} |
|
|
self.handle_received_message(raw_message, &propagation_source); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
let mut ihave_msgs = vec![]; |
|
|
let mut graft_msgs = vec![]; |
|
|
let mut prune_msgs = vec![]; |
|
|
for control_msg in rpc.control_msgs { |
|
|
match control_msg { |
|
|
ControlAction::IHave { |
|
|
topic_hash, |
|
|
message_ids, |
|
|
} => { |
|
|
ihave_msgs.push((topic_hash, message_ids)); |
|
|
} |
|
|
ControlAction::IWant { message_ids } => { |
|
|
self.handle_iwant(&propagation_source, message_ids) |
|
|
} |
|
|
ControlAction::Graft { topic_hash } => graft_msgs.push(topic_hash), |
|
|
ControlAction::Prune { |
|
|
topic_hash, |
|
|
peers, |
|
|
backoff, |
|
|
} => prune_msgs.push((topic_hash, peers, backoff)), |
|
|
} |
|
|
} |
|
|
if !ihave_msgs.is_empty() { |
|
|
self.handle_ihave(&propagation_source, ihave_msgs); |
|
|
} |
|
|
if !graft_msgs.is_empty() { |
|
|
self.handle_graft(&propagation_source, graft_msgs); |
|
|
} |
|
|
if !prune_msgs.is_empty() { |
|
|
self.handle_prune(&propagation_source, prune_msgs); |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
#[tracing::instrument(level = "trace", name = "NetworkBehaviour::poll", skip(self, cx))] |
|
|
fn poll( |
|
|
&mut self, |
|
|
cx: &mut Context<'_>, |
|
|
) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> { |
|
|
if let Some(event) = self.events.pop_front() { |
|
|
return Poll::Ready(event); |
|
|
} |
|
|
|
|
|
|
|
|
if let Some((peer_score, _, interval, _)) = &mut self.peer_score { |
|
|
while let Poll::Ready(Some(_)) = interval.poll_next_unpin(cx) { |
|
|
peer_score.refresh_scores(); |
|
|
} |
|
|
} |
|
|
|
|
|
while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) { |
|
|
self.heartbeat(); |
|
|
} |
|
|
|
|
|
Poll::Pending |
|
|
} |
|
|
|
|
|
fn on_swarm_event(&mut self, event: FromSwarm) { |
|
|
match event { |
|
|
FromSwarm::ConnectionEstablished(connection_established) => { |
|
|
self.on_connection_established(connection_established) |
|
|
} |
|
|
FromSwarm::ConnectionClosed(connection_closed) => { |
|
|
self.on_connection_closed(connection_closed) |
|
|
} |
|
|
FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), |
|
|
_ => {} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn peer_added_to_mesh( |
|
|
peer_id: PeerId, |
|
|
new_topics: Vec<&TopicHash>, |
|
|
mesh: &HashMap<TopicHash, BTreeSet<PeerId>>, |
|
|
known_topics: Option<&BTreeSet<TopicHash>>, |
|
|
events: &mut VecDeque<ToSwarm<Event, HandlerIn>>, |
|
|
connections: &HashMap<PeerId, PeerConnections>, |
|
|
) { |
|
|
|
|
|
let connection_id = { |
|
|
let conn = connections.get(&peer_id).expect("To be connected to peer."); |
|
|
assert!( |
|
|
!conn.connections.is_empty(), |
|
|
"Must have at least one connection" |
|
|
); |
|
|
conn.connections[0] |
|
|
}; |
|
|
|
|
|
if let Some(topics) = known_topics { |
|
|
for topic in topics { |
|
|
if !new_topics.contains(&topic) { |
|
|
if let Some(mesh_peers) = mesh.get(topic) { |
|
|
if mesh_peers.contains(&peer_id) { |
|
|
|
|
|
return; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
events.push_back(ToSwarm::NotifyHandler { |
|
|
peer_id, |
|
|
event: HandlerIn::JoinedMesh, |
|
|
handler: NotifyHandler::One(connection_id), |
|
|
}); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn peer_removed_from_mesh( |
|
|
peer_id: PeerId, |
|
|
old_topic: &TopicHash, |
|
|
mesh: &HashMap<TopicHash, BTreeSet<PeerId>>, |
|
|
known_topics: Option<&BTreeSet<TopicHash>>, |
|
|
events: &mut VecDeque<ToSwarm<Event, HandlerIn>>, |
|
|
connections: &HashMap<PeerId, PeerConnections>, |
|
|
) { |
|
|
|
|
|
let connection_id = connections |
|
|
.get(&peer_id) |
|
|
.expect("To be connected to peer.") |
|
|
.connections |
|
|
.first() |
|
|
.expect("There should be at least one connection to a peer."); |
|
|
|
|
|
if let Some(topics) = known_topics { |
|
|
for topic in topics { |
|
|
if topic != old_topic { |
|
|
if let Some(mesh_peers) = mesh.get(topic) { |
|
|
if mesh_peers.contains(&peer_id) { |
|
|
|
|
|
return; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
events.push_back(ToSwarm::NotifyHandler { |
|
|
peer_id, |
|
|
event: HandlerIn::LeftMesh, |
|
|
handler: NotifyHandler::One(*connection_id), |
|
|
}); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn get_random_peers_dynamic( |
|
|
topic_peers: &HashMap<TopicHash, BTreeSet<PeerId>>, |
|
|
connected_peers: &HashMap<PeerId, PeerConnections>, |
|
|
topic_hash: &TopicHash, |
|
|
|
|
|
n_map: impl Fn(usize) -> usize, |
|
|
mut f: impl FnMut(&PeerId) -> bool, |
|
|
) -> BTreeSet<PeerId> { |
|
|
let mut gossip_peers = match topic_peers.get(topic_hash) { |
|
|
|
|
|
Some(peer_list) => peer_list |
|
|
.iter() |
|
|
.copied() |
|
|
.filter(|p| { |
|
|
f(p) && match connected_peers.get(p) { |
|
|
Some(connections) if connections.kind == PeerKind::Gossipsub => true, |
|
|
Some(connections) if connections.kind == PeerKind::Gossipsubv1_1 => true, |
|
|
_ => false, |
|
|
} |
|
|
}) |
|
|
.collect(), |
|
|
None => Vec::new(), |
|
|
}; |
|
|
|
|
|
|
|
|
let n = n_map(gossip_peers.len()); |
|
|
if gossip_peers.len() <= n { |
|
|
tracing::debug!("RANDOM PEERS: Got {:?} peers", gossip_peers.len()); |
|
|
return gossip_peers.into_iter().collect(); |
|
|
} |
|
|
|
|
|
|
|
|
let mut rng = thread_rng(); |
|
|
gossip_peers.partial_shuffle(&mut rng, n); |
|
|
|
|
|
tracing::debug!("RANDOM PEERS: Got {:?} peers", n); |
|
|
|
|
|
gossip_peers.into_iter().take(n).collect() |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn get_random_peers( |
|
|
topic_peers: &HashMap<TopicHash, BTreeSet<PeerId>>, |
|
|
connected_peers: &HashMap<PeerId, PeerConnections>, |
|
|
topic_hash: &TopicHash, |
|
|
n: usize, |
|
|
f: impl FnMut(&PeerId) -> bool, |
|
|
) -> BTreeSet<PeerId> { |
|
|
get_random_peers_dynamic(topic_peers, connected_peers, topic_hash, |_| n, f) |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
fn validate_config( |
|
|
authenticity: &MessageAuthenticity, |
|
|
validation_mode: &ValidationMode, |
|
|
) -> Result<(), &'static str> { |
|
|
match validation_mode { |
|
|
ValidationMode::Anonymous => { |
|
|
if authenticity.is_signing() { |
|
|
return Err("Cannot enable message signing with an Anonymous validation mode. Consider changing either the ValidationMode or MessageAuthenticity"); |
|
|
} |
|
|
|
|
|
if !authenticity.is_anonymous() { |
|
|
return Err("Published messages contain an author but incoming messages with an author will be rejected. Consider adjusting the validation or privacy settings in the config"); |
|
|
} |
|
|
} |
|
|
ValidationMode::Strict => { |
|
|
if !authenticity.is_signing() { |
|
|
return Err( |
|
|
"Messages will be |
|
|
published unsigned and incoming unsigned messages will be rejected. Consider adjusting |
|
|
the validation or privacy settings in the config" |
|
|
); |
|
|
} |
|
|
} |
|
|
_ => {} |
|
|
} |
|
|
Ok(()) |
|
|
} |
|
|
|
|
|
impl<C: DataTransform, F: TopicSubscriptionFilter> fmt::Debug for Behaviour<C, F> { |
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
|
|
f.debug_struct("Behaviour") |
|
|
.field("config", &self.config) |
|
|
.field("events", &self.events.len()) |
|
|
.field("control_pool", &self.control_pool) |
|
|
.field("publish_config", &self.publish_config) |
|
|
.field("topic_peers", &self.topic_peers) |
|
|
.field("peer_topics", &self.peer_topics) |
|
|
.field("mesh", &self.mesh) |
|
|
.field("fanout", &self.fanout) |
|
|
.field("fanout_last_pub", &self.fanout_last_pub) |
|
|
.field("mcache", &self.mcache) |
|
|
.field("heartbeat", &self.heartbeat) |
|
|
.finish() |
|
|
} |
|
|
} |
|
|
|
|
|
impl fmt::Debug for PublishConfig { |
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
|
|
match self { |
|
|
PublishConfig::Signing { author, .. } => { |
|
|
f.write_fmt(format_args!("PublishConfig::Signing({author})")) |
|
|
} |
|
|
PublishConfig::Author(author) => { |
|
|
f.write_fmt(format_args!("PublishConfig::Author({author})")) |
|
|
} |
|
|
PublishConfig::RandomAuthor => f.write_fmt(format_args!("PublishConfig::RandomAuthor")), |
|
|
PublishConfig::Anonymous => f.write_fmt(format_args!("PublishConfig::Anonymous")), |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
#[cfg(test)] |
|
|
mod local_test { |
|
|
use super::*; |
|
|
use crate::IdentTopic; |
|
|
use quickcheck::*; |
|
|
|
|
|
fn test_message() -> RawMessage { |
|
|
RawMessage { |
|
|
source: Some(PeerId::random()), |
|
|
data: vec![0; 100], |
|
|
sequence_number: None, |
|
|
topic: TopicHash::from_raw("test_topic"), |
|
|
signature: None, |
|
|
key: None, |
|
|
validated: false, |
|
|
} |
|
|
} |
|
|
|
|
|
fn test_control() -> ControlAction { |
|
|
ControlAction::IHave { |
|
|
topic_hash: IdentTopic::new("TestTopic").hash(), |
|
|
message_ids: vec![MessageId(vec![12u8]); 5], |
|
|
} |
|
|
} |
|
|
|
|
|
impl Arbitrary for RpcOut { |
|
|
fn arbitrary(g: &mut Gen) -> Self { |
|
|
match u8::arbitrary(g) % 5 { |
|
|
0 => RpcOut::Subscribe(IdentTopic::new("TestTopic").hash()), |
|
|
1 => RpcOut::Unsubscribe(IdentTopic::new("TestTopic").hash()), |
|
|
2 => RpcOut::Publish(test_message()), |
|
|
3 => RpcOut::Forward(test_message()), |
|
|
4 => RpcOut::Control(test_control()), |
|
|
_ => panic!("outside range"), |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|