repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-metrics/src/memory_profiler.rs | linera-metrics/src/memory_profiler.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Safe jemalloc memory profiling with jemalloc_pprof integration
//!
//! This module provides HTTP endpoints for pprof format profiles using pull model.
//! Profiles are generated on-demand when endpoints are requested by Grafana Alloy.
use axum::{
http::{header, StatusCode},
response::IntoResponse,
};
use jemalloc_pprof::PROF_CTL;
use thiserror::Error;
use tracing::{error, info, trace};
#[derive(Debug, Error)]
pub enum MemoryProfilerError {
#[error("jemalloc profiling not activated - check malloc_conf configuration")]
JemallocProfilingNotActivated,
#[error("PROF_CTL not available - ensure jemalloc is built with profiling support")]
ProfCtlNotAvailable,
#[error("another profiler is already running")]
AnotherProfilerAlreadyRunning,
}
/// Memory profiler using safe jemalloc_pprof wrapper (pull model only)
pub struct MemoryProfiler;
impl MemoryProfiler {
pub fn check_prof_ctl() -> Result<(), MemoryProfilerError> {
// Check if jemalloc profiling is available
if let Some(prof_ctl) = PROF_CTL.as_ref() {
let prof_ctl = prof_ctl
.try_lock()
.map_err(|_| MemoryProfilerError::AnotherProfilerAlreadyRunning)?;
if !prof_ctl.activated() {
error!("jemalloc profiling not activated");
return Err(MemoryProfilerError::JemallocProfilingNotActivated);
}
trace!("✓ jemalloc memory profiling is ready");
} else {
error!("PROF_CTL not available");
return Err(MemoryProfilerError::ProfCtlNotAvailable);
}
Ok(())
}
/// HTTP endpoint for heap profile - returns fresh pprof data
pub async fn heap_profile() -> Result<impl IntoResponse, StatusCode> {
trace!("Serving heap profile via /debug/pprof");
match Self::collect_heap_profile().await {
Ok(profile_data) => {
trace!("✓ Serving heap profile ({} bytes)", profile_data.len());
Ok((
StatusCode::OK,
[(header::CONTENT_TYPE, "application/octet-stream")],
profile_data,
))
}
Err(e) => {
error!("Failed to collect heap profile: {}", e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
/// HTTP endpoint for flamegraph - returns SVG flamegraph
pub async fn heap_flamegraph() -> Result<impl IntoResponse, StatusCode> {
info!("Serving heap flamegraph via /debug/flamegraph");
match Self::collect_heap_flamegraph().await {
Ok(flamegraph_svg) => {
trace!("✓ Serving heap flamegraph ({} bytes)", flamegraph_svg.len());
Ok((
StatusCode::OK,
[(header::CONTENT_TYPE, "image/svg+xml")],
flamegraph_svg,
))
}
Err(e) => {
error!("Failed to collect heap flamegraph: {}", e);
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
/// Collect heap profile on-demand using safe jemalloc_pprof wrapper
async fn collect_heap_profile() -> anyhow::Result<Vec<u8>> {
if let Some(prof_ctl) = PROF_CTL.as_ref() {
let mut prof_ctl = prof_ctl.lock().await;
if !prof_ctl.activated() {
return Err(anyhow::anyhow!("jemalloc profiling not activated"));
}
match prof_ctl.dump_pprof() {
Ok(profile) => {
trace!("✓ Collected heap profile ({} bytes)", profile.len());
Ok(profile)
}
Err(e) => {
error!("Failed to dump pprof profile: {}", e);
Err(anyhow::anyhow!("Failed to dump pprof profile: {}", e))
}
}
} else {
Err(anyhow::anyhow!("PROF_CTL not available"))
}
}
/// Collect heap flamegraph using prof_ctl.dump_flamegraph()
async fn collect_heap_flamegraph() -> anyhow::Result<Vec<u8>> {
if let Some(prof_ctl) = PROF_CTL.as_ref() {
let mut prof_ctl = prof_ctl.lock().await;
if !prof_ctl.activated() {
return Err(anyhow::anyhow!("jemalloc profiling not activated"));
}
match prof_ctl.dump_flamegraph() {
Ok(flamegraph_bytes) => {
trace!("✓ Generated flamegraph ({} bytes)", flamegraph_bytes.len());
Ok(flamegraph_bytes)
}
Err(e) => {
error!("Failed to dump flamegraph: {}", e);
Err(anyhow::anyhow!("Failed to dump flamegraph: {}", e))
}
}
} else {
Err(anyhow::anyhow!("PROF_CTL not available"))
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-metrics/src/monitoring_server.rs | linera-metrics/src/monitoring_server.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::fmt::Debug;
use axum::{http::StatusCode, response::IntoResponse, routing::get, Router};
use tokio::net::ToSocketAddrs;
use tokio_util::sync::CancellationToken;
use tracing::info;
#[cfg(feature = "memory-profiling")]
use crate::memory_profiler::MemoryProfiler;
pub fn start_metrics(
address: impl ToSocketAddrs + Debug + Send + 'static,
shutdown_signal: CancellationToken,
) {
#[cfg(feature = "memory-profiling")]
let app = {
// Try to add memory profiling endpoint
match MemoryProfiler::check_prof_ctl() {
Ok(()) => {
info!("Memory profiling available, enabling /debug/pprof and /debug/flamegraph endpoints");
Router::new()
.route("/metrics", get(serve_metrics))
.route("/debug/pprof", get(MemoryProfiler::heap_profile))
.route("/debug/flamegraph", get(MemoryProfiler::heap_flamegraph))
}
Err(e) => {
tracing::warn!(
"Memory profiling not available: {}, serving metrics-only",
e
);
Router::new().route("/metrics", get(serve_metrics))
}
}
};
#[cfg(not(feature = "memory-profiling"))]
let app = Router::new().route("/metrics", get(serve_metrics));
tokio::spawn(async move {
let listener = tokio::net::TcpListener::bind(address)
.await
.expect("Failed to bind to address");
let address = listener.local_addr().expect("Failed to get local address");
info!("Starting to serve metrics on {:?}", address);
if let Err(e) = axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal.cancelled_owned())
.await
{
panic!("Error serving metrics: {}", e);
}
});
}
async fn serve_metrics() -> Result<String, AxumError> {
let metric_families = prometheus::gather();
Ok(prometheus::TextEncoder::new()
.encode_to_string(&metric_families)
.map_err(anyhow::Error::from)?)
}
struct AxumError(anyhow::Error);
impl IntoResponse for AxumError {
fn into_response(self) -> axum::response::Response {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Something went wrong: {}", self.0),
)
.into_response()
}
}
impl<E> From<E> for AxumError
where
E: Into<anyhow::Error>,
{
fn from(err: E) -> Self {
Self(err.into())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/build.rs | linera-client/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
web: { all(target_arch = "wasm32", feature = "web") },
with_testing: { any(test, feature = "test") },
with_metrics: { all(not(target_arch = "wasm32"), feature = "metrics") },
};
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/config.rs | linera-client/src/config.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::iter::IntoIterator;
use linera_base::{
crypto::{AccountPublicKey, BcsSignable, CryptoHash, ValidatorPublicKey, ValidatorSecretKey},
data_types::{
Amount, Blob, ChainDescription, ChainOrigin, Epoch, InitialChainConfig, NetworkDescription,
Timestamp,
},
identifiers::ChainId,
ownership::ChainOwnership,
};
use linera_execution::{
committee::{Committee, ValidatorState},
ResourceControlPolicy,
};
use linera_rpc::config::{ValidatorInternalNetworkConfig, ValidatorPublicNetworkConfig};
use linera_storage::Storage;
use serde::{Deserialize, Serialize};
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("I/O error: {0}")]
IoError(#[from] std::io::Error),
#[error("chain error: {0}")]
Chain(#[from] linera_chain::ChainError),
#[error("storage is already initialized: {0:?}")]
StorageIsAlreadyInitialized(Box<NetworkDescription>),
#[error("no admin chain configured")]
NoAdminChain,
}
/// The public configuration of a validator.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ValidatorConfig {
/// The public key of the validator.
pub public_key: ValidatorPublicKey,
/// The account key of the validator.
pub account_key: AccountPublicKey,
/// The network configuration for the validator.
pub network: ValidatorPublicNetworkConfig,
}
/// The private configuration of a validator service.
#[derive(Serialize, Deserialize)]
pub struct ValidatorServerConfig {
pub validator: ValidatorConfig,
pub validator_secret: ValidatorSecretKey,
pub internal_network: ValidatorInternalNetworkConfig,
}
/// The (public) configuration for all validators.
#[derive(Debug, Default, Clone, Deserialize, Serialize)]
pub struct CommitteeConfig {
pub validators: Vec<ValidatorConfig>,
}
impl CommitteeConfig {
pub fn into_committee(self, policy: ResourceControlPolicy) -> Committee {
let validators = self
.validators
.into_iter()
.map(|v| {
(
v.public_key,
ValidatorState {
network_address: v.network.to_string(),
votes: 100,
account_public_key: v.account_key,
},
)
})
.collect();
Committee::new(validators, policy)
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct GenesisConfig {
pub committee: Committee,
pub timestamp: Timestamp,
pub chains: Vec<ChainDescription>,
pub network_name: String,
}
impl BcsSignable<'_> for GenesisConfig {}
fn make_chain(
index: u32,
public_key: AccountPublicKey,
balance: Amount,
timestamp: Timestamp,
) -> ChainDescription {
let origin = ChainOrigin::Root(index);
let config = InitialChainConfig {
application_permissions: Default::default(),
balance,
min_active_epoch: Epoch::ZERO,
max_active_epoch: Epoch::ZERO,
epoch: Epoch::ZERO,
ownership: ChainOwnership::single(public_key.into()),
};
ChainDescription::new(origin, config, timestamp)
}
impl GenesisConfig {
/// Creates a `GenesisConfig` with the first chain being the admin chain.
pub fn new(
committee: CommitteeConfig,
timestamp: Timestamp,
policy: ResourceControlPolicy,
network_name: String,
admin_public_key: AccountPublicKey,
admin_balance: Amount,
) -> Self {
let committee = committee.into_committee(policy);
let admin_chain = make_chain(0, admin_public_key, admin_balance, timestamp);
Self {
committee,
timestamp,
chains: vec![admin_chain],
network_name,
}
}
pub fn add_root_chain(
&mut self,
public_key: AccountPublicKey,
balance: Amount,
) -> ChainDescription {
let description = make_chain(
self.chains.len() as u32,
public_key,
balance,
self.timestamp,
);
self.chains.push(description.clone());
description
}
pub fn admin_chain_description(&self) -> &ChainDescription {
&self.chains[0]
}
pub fn admin_id(&self) -> ChainId {
self.admin_chain_description().id()
}
pub async fn initialize_storage<S>(&self, storage: &mut S) -> Result<(), Error>
where
S: Storage + Clone + 'static,
{
if let Some(description) = storage
.read_network_description()
.await
.map_err(linera_chain::ChainError::from)?
{
if description != self.network_description() {
// We can't initialize storage with a different network description.
tracing::error!(
current_network=?description,
new_network=?self.network_description(),
"storage already initialized"
);
return Err(Error::StorageIsAlreadyInitialized(Box::new(description)));
}
tracing::debug!(?description, "storage already initialized");
return Ok(());
}
let network_description = self.network_description();
storage
.write_blob(&self.committee_blob())
.await
.map_err(linera_chain::ChainError::from)?;
storage
.write_network_description(&network_description)
.await
.map_err(linera_chain::ChainError::from)?;
for description in &self.chains {
storage.create_chain(description.clone()).await?;
}
Ok(())
}
pub fn hash(&self) -> CryptoHash {
CryptoHash::new(self)
}
pub fn committee_blob(&self) -> Blob {
Blob::new_committee(
bcs::to_bytes(&self.committee).expect("serializing a committee should succeed"),
)
}
pub fn network_description(&self) -> NetworkDescription {
NetworkDescription {
name: self.network_name.clone(),
genesis_config_hash: CryptoHash::new(self),
genesis_timestamp: self.timestamp,
genesis_committee_blob_hash: self.committee_blob().id().hash,
admin_chain_id: self.admin_id(),
}
}
}
#[cfg(with_testing)]
mod test {
use linera_base::data_types::Timestamp;
use linera_core::test_utils::{MemoryStorageBuilder, TestBuilder};
use linera_rpc::{
config::{NetworkProtocol, ValidatorPublicNetworkPreConfig},
simple::TransportProtocol,
};
use super::*;
use crate::config::{CommitteeConfig, GenesisConfig, ValidatorConfig};
impl GenesisConfig {
/// Create a new local `GenesisConfig` for testing.
pub fn new_testing(builder: &TestBuilder<MemoryStorageBuilder>) -> Self {
let network = ValidatorPublicNetworkPreConfig {
protocol: NetworkProtocol::Simple(TransportProtocol::Tcp),
host: "localhost".to_string(),
port: 8080,
};
let validators = builder
.initial_committee
.validators()
.iter()
.map(|(public_key, state)| ValidatorConfig {
public_key: *public_key,
network: network.clone(),
account_key: state.account_public_key,
})
.collect();
let mut genesis_chains = builder.genesis_chains().into_iter();
let (admin_public_key, admin_balance) = genesis_chains
.next()
.expect("should have at least one chain");
let mut genesis_config = Self::new(
CommitteeConfig { validators },
Timestamp::from(0),
builder.initial_committee.policy().clone(),
"test network".to_string(),
admin_public_key,
admin_balance,
);
for (public_key, amount) in genesis_chains {
genesis_config.add_root_chain(public_key, amount);
}
genesis_config
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/client_context.rs | linera-client/src/client_context.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use futures::{Future, StreamExt as _, TryStreamExt as _};
use linera_base::{
crypto::{CryptoHash, ValidatorPublicKey},
data_types::{Epoch, Timestamp},
identifiers::{Account, AccountOwner, ChainId},
ownership::ChainOwnership,
time::{Duration, Instant},
util::future::FutureSyncExt as _,
};
use linera_chain::{manager::LockingBlock, types::ConfirmedBlockCertificate};
use linera_core::{
client::{ChainClient, Client, ListeningMode},
data_types::{ChainInfo, ChainInfoQuery, ClientOutcome},
join_set_ext::JoinSet,
node::ValidatorNode,
wallet, Environment, JoinSetExt as _, Wallet as _,
};
use linera_rpc::node_provider::{NodeOptions, NodeProvider};
use linera_version::VersionInfo;
use thiserror_context::Context;
use tracing::{debug, info, warn};
#[cfg(not(web))]
use {
crate::{
benchmark::{Benchmark, BenchmarkError},
client_metrics::ClientMetrics,
},
futures::stream,
linera_base::{
crypto::AccountPublicKey,
data_types::{Amount, BlockHeight},
identifiers::{ApplicationId, BlobType},
},
linera_core::client::chain_client,
linera_execution::{
system::{OpenChainConfig, SystemOperation},
Operation,
},
std::{collections::HashSet, iter, path::Path},
tokio::{sync::mpsc, task},
};
#[cfg(feature = "fs")]
use {
linera_base::{
data_types::{BlobContent, Bytecode},
identifiers::ModuleId,
vm::VmRuntime,
},
linera_core::client::create_bytecode_blobs,
std::{fs, path::PathBuf},
};
use crate::{
chain_listener::{self, ClientContext as _},
client_options::{ChainOwnershipConfig, Options},
config::GenesisConfig,
error, util, Error,
};
/// Results from querying a validator about version, network description, and chain info.
pub struct ValidatorQueryResults {
/// The validator's version information.
pub version_info: Result<VersionInfo, Error>,
/// The validator's genesis config hash.
pub genesis_config_hash: Result<CryptoHash, Error>,
/// The validator's chain info (if valid and signature check passed).
pub chain_info: Result<ChainInfo, Error>,
}
impl ValidatorQueryResults {
/// Returns a vector of references to all errors in the query results.
pub fn errors(&self) -> Vec<&Error> {
let mut errors = Vec::new();
if let Err(e) = &self.version_info {
errors.push(e);
}
if let Err(e) = &self.genesis_config_hash {
errors.push(e);
}
if let Err(e) = &self.chain_info {
errors.push(e);
}
errors
}
/// Prints validator information to stdout.
///
/// Prints public key, address, and optionally weight, version info, and chain info.
/// If `reference` is provided, only prints fields that differ from the reference.
pub fn print(
&self,
public_key: Option<&ValidatorPublicKey>,
address: Option<&str>,
weight: Option<u64>,
reference: Option<&ValidatorQueryResults>,
) {
if let Some(key) = public_key {
println!("Public key: {}", key);
}
if let Some(address) = address {
println!("Address: {}", address);
}
if let Some(w) = weight {
println!("Weight: {}", w);
}
let ref_version = reference.and_then(|ref_results| ref_results.version_info.as_ref().ok());
match &self.version_info {
Ok(version_info) => {
if ref_version.is_none_or(|ref_v| ref_v.crate_version != version_info.crate_version)
{
println!("Linera protocol: v{}", version_info.crate_version);
}
if ref_version.is_none_or(|ref_v| ref_v.rpc_hash != version_info.rpc_hash) {
println!("RPC API hash: {}", version_info.rpc_hash);
}
if ref_version.is_none_or(|ref_v| ref_v.graphql_hash != version_info.graphql_hash) {
println!("GraphQL API hash: {}", version_info.graphql_hash);
}
if ref_version.is_none_or(|ref_v| ref_v.wit_hash != version_info.wit_hash) {
println!("WIT API hash: {}", version_info.wit_hash);
}
if ref_version.is_none_or(|ref_v| {
(&ref_v.git_commit, ref_v.git_dirty)
!= (&version_info.git_commit, version_info.git_dirty)
}) {
println!(
"Source code: {}/tree/{}{}",
env!("CARGO_PKG_REPOSITORY"),
version_info.git_commit,
if version_info.git_dirty {
" (dirty)"
} else {
""
}
);
}
}
Err(err) => println!("Error getting version info: {err}"),
}
let ref_genesis_hash =
reference.and_then(|ref_results| ref_results.genesis_config_hash.as_ref().ok());
match &self.genesis_config_hash {
Ok(hash) if ref_genesis_hash.is_some_and(|ref_hash| ref_hash == hash) => {}
Ok(hash) => println!("Genesis config hash: {hash}"),
Err(err) => println!("Error getting genesis config: {err}"),
}
let ref_info = reference.and_then(|ref_results| ref_results.chain_info.as_ref().ok());
match &self.chain_info {
Ok(info) => {
if ref_info.is_none_or(|ref_info| info.block_hash != ref_info.block_hash) {
if let Some(hash) = info.block_hash {
println!("Block hash: {}", hash);
} else {
println!("Block hash: None");
}
}
if ref_info
.is_none_or(|ref_info| info.next_block_height != ref_info.next_block_height)
{
println!("Next height: {}", info.next_block_height);
}
if ref_info.is_none_or(|ref_info| info.timestamp != ref_info.timestamp) {
println!("Timestamp: {}", info.timestamp);
}
if ref_info.is_none_or(|ref_info| info.epoch != ref_info.epoch) {
println!("Epoch: {}", info.epoch);
}
if ref_info.is_none_or(|ref_info| {
info.manager.current_round != ref_info.manager.current_round
}) {
println!("Round: {}", info.manager.current_round);
}
if let Some(locking) = &info.manager.requested_locking {
match &**locking {
LockingBlock::Fast(proposal) => {
println!(
"Locking fast block from {}",
proposal.content.block.timestamp
);
}
LockingBlock::Regular(validated) => {
println!(
"Locking block {} in {} from {}",
validated.hash(),
validated.round,
validated.block().header.timestamp
);
}
}
}
}
Err(err) => println!("Error getting chain info: {err}"),
}
}
}
pub struct ClientContext<Env: Environment> {
pub client: Arc<Client<Env>>,
// TODO(#5083): this doesn't really need to be stored
pub genesis_config: crate::config::GenesisConfig,
pub send_timeout: Duration,
pub recv_timeout: Duration,
pub retry_delay: Duration,
pub max_retries: u32,
pub chain_listeners: JoinSet,
// TODO(#5082): move this into the upstream UI layers (maybe just the CLI)
pub default_chain: Option<ChainId>,
#[cfg(not(web))]
pub client_metrics: Option<ClientMetrics>,
}
impl<Env: Environment> chain_listener::ClientContext for ClientContext<Env> {
type Environment = Env;
fn wallet(&self) -> &Env::Wallet {
self.client.wallet()
}
fn storage(&self) -> &Env::Storage {
self.client.storage_client()
}
fn client(&self) -> &Arc<Client<Env>> {
&self.client
}
#[cfg(not(web))]
fn timing_sender(
&self,
) -> Option<mpsc::UnboundedSender<(u64, linera_core::client::TimingType)>> {
self.client_metrics
.as_ref()
.map(|metrics| metrics.timing_sender.clone())
}
async fn update_wallet_for_new_chain(
&mut self,
chain_id: ChainId,
owner: Option<AccountOwner>,
timestamp: Timestamp,
epoch: Epoch,
) -> Result<(), Error> {
self.update_wallet_for_new_chain(chain_id, owner, timestamp, epoch)
.make_sync()
.await
}
async fn update_wallet(&mut self, client: &ChainClient<Env>) -> Result<(), Error> {
self.update_wallet_from_client(client).make_sync().await
}
}
impl<S, Si, W> ClientContext<linera_core::environment::Impl<S, NodeProvider, Si, W>>
where
S: linera_core::environment::Storage,
Si: linera_core::environment::Signer,
W: linera_core::environment::Wallet,
{
// not worth refactoring this because
// https://github.com/linera-io/linera-protocol/issues/5082
// https://github.com/linera-io/linera-protocol/issues/5083
#[allow(clippy::too_many_arguments)]
pub async fn new(
storage: S,
wallet: W,
signer: Si,
options: &Options,
default_chain: Option<ChainId>,
genesis_config: GenesisConfig,
block_cache_size: usize,
execution_state_cache_size: usize,
) -> Result<Self, Error> {
#[cfg(not(web))]
let timing_config = options.to_timing_config();
let node_provider = NodeProvider::new(NodeOptions {
send_timeout: options.send_timeout,
recv_timeout: options.recv_timeout,
retry_delay: options.retry_delay,
max_retries: options.max_retries,
});
let chain_ids: Vec<_> = wallet
.chain_ids()
.try_collect()
.await
.map_err(error::Inner::wallet)?;
let name = match chain_ids.len() {
0 => "Client node".to_string(),
1 => format!("Client node for {:.8}", chain_ids[0]),
n => format!("Client node for {:.8} and {} others", chain_ids[0], n - 1),
};
let client = Client::new(
linera_core::environment::Impl {
network: node_provider,
storage,
signer,
wallet,
},
genesis_config.admin_id(),
options.long_lived_services,
chain_ids,
name,
options.chain_worker_ttl,
options.sender_chain_worker_ttl,
options.to_chain_client_options(),
block_cache_size,
execution_state_cache_size,
options.to_requests_scheduler_config(),
);
#[cfg(not(web))]
let client_metrics = if timing_config.enabled {
Some(ClientMetrics::new(timing_config))
} else {
None
};
Ok(ClientContext {
client: Arc::new(client),
default_chain,
genesis_config,
send_timeout: options.send_timeout,
recv_timeout: options.recv_timeout,
retry_delay: options.retry_delay,
max_retries: options.max_retries,
chain_listeners: JoinSet::default(),
#[cfg(not(web))]
client_metrics,
})
}
}
impl<Env: Environment> ClientContext<Env> {
// TODO(#5084) this (and other injected dependencies) should not be re-exposed by the
// client interface
/// Returns a reference to the wallet.
pub fn wallet(&self) -> &Env::Wallet {
self.client.wallet()
}
/// Returns the ID of the admin chain.
pub fn admin_chain(&self) -> ChainId {
self.client.admin_chain()
}
/// Retrieve the default account. Current this is the common account of the default
/// chain.
pub fn default_account(&self) -> Account {
Account::chain(self.default_chain())
}
/// Retrieve the default chain.
pub fn default_chain(&self) -> ChainId {
self.default_chain
.expect("default chain requested but none set")
}
pub async fn first_non_admin_chain(&self) -> Result<ChainId, Error> {
let admin_id = self.admin_chain();
std::pin::pin!(self
.wallet()
.chain_ids()
.try_filter(|chain_id| futures::future::ready(*chain_id != admin_id)))
.next()
.await
.expect("No non-admin chain specified in wallet with no non-admin chain")
.map_err(Error::wallet)
}
// TODO(#5084) this should match the `NodeProvider` from the `Environment`
pub fn make_node_provider(&self) -> NodeProvider {
NodeProvider::new(self.make_node_options())
}
fn make_node_options(&self) -> NodeOptions {
NodeOptions {
send_timeout: self.send_timeout,
recv_timeout: self.recv_timeout,
retry_delay: self.retry_delay,
max_retries: self.max_retries,
}
}
#[cfg(not(web))]
pub fn client_metrics(&self) -> Option<&ClientMetrics> {
self.client_metrics.as_ref()
}
pub async fn update_wallet_from_client<Env_: Environment>(
&self,
client: &ChainClient<Env_>,
) -> Result<(), Error> {
let info = client.chain_info().await?;
let chain_id = info.chain_id;
let mut new_chain = wallet::Chain {
pending_proposal: client.pending_proposal().clone(),
owner: client.preferred_owner(),
..info.as_ref().into()
};
if let Some(chain) = self
.wallet()
.get(chain_id)
.await
.map_err(error::Inner::wallet)?
{
new_chain.follow_only = chain.follow_only;
}
self.wallet()
.insert(chain_id, new_chain)
.await
.map_err(error::Inner::wallet)?;
Ok(())
}
/// Remembers the new chain and its owner (if any) in the wallet.
pub async fn update_wallet_for_new_chain(
&mut self,
chain_id: ChainId,
owner: Option<AccountOwner>,
timestamp: Timestamp,
epoch: Epoch,
) -> Result<(), Error> {
let _ = self
.wallet()
.try_insert(
chain_id,
linera_core::wallet::Chain::new(owner, epoch, timestamp),
)
.await
.map_err(error::Inner::wallet)?;
Ok(())
}
/// Sets the `follow_only` flag for a chain in both the wallet and the in-memory client state.
pub async fn set_follow_only(&self, chain_id: ChainId, follow_only: bool) -> Result<(), Error> {
self.wallet()
.modify(chain_id, |chain| chain.follow_only = follow_only)
.await
.map_err(error::Inner::wallet)?
.ok_or_else(|| error::Inner::UnknownChainId(chain_id))?;
self.client.set_chain_follow_only(chain_id, follow_only);
Ok(())
}
pub async fn process_inbox(
&mut self,
chain_client: &ChainClient<Env>,
) -> Result<Vec<ConfirmedBlockCertificate>, Error> {
let mut certificates = Vec::new();
// Try processing the inbox optimistically without waiting for validator notifications.
let (new_certificates, maybe_timeout) = {
chain_client.synchronize_from_validators().await?;
let result = chain_client.process_inbox_without_prepare().await;
self.update_wallet_from_client(chain_client).await?;
result?
};
certificates.extend(new_certificates);
if maybe_timeout.is_none() {
return Ok(certificates);
}
// Start listening for notifications, so we learn about new rounds and blocks.
let (listener, _listen_handle, mut notification_stream) =
chain_client.listen(ListeningMode::FullChain).await?;
self.chain_listeners.spawn_task(listener);
loop {
let (new_certificates, maybe_timeout) = {
let result = chain_client.process_inbox().await;
self.update_wallet_from_client(chain_client).await?;
result?
};
certificates.extend(new_certificates);
if let Some(timestamp) = maybe_timeout {
util::wait_for_next_round(&mut notification_stream, timestamp).await
} else {
return Ok(certificates);
}
}
}
pub async fn assign_new_chain_to_key(
&mut self,
chain_id: ChainId,
owner: AccountOwner,
) -> Result<(), Error> {
self.client.track_chain(chain_id);
let client = self.make_chain_client(chain_id).await?;
let chain_description = client.get_chain_description().await?;
let config = chain_description.config();
if !config.ownership.is_owner(&owner) {
tracing::error!(
"The chain with the ID returned by the faucet is not owned by you. \
Please make sure you are connecting to a genuine faucet."
);
return Err(error::Inner::ChainOwnership.into());
}
// Try to modify existing chain entry, setting the owner and disabling follow-only mode.
let timestamp = chain_description.timestamp();
let epoch = chain_description.config().epoch;
let modified = self
.wallet()
.modify(chain_id, |chain| {
chain.owner = Some(owner);
chain.follow_only = false;
})
.await
.map_err(error::Inner::wallet)?;
// If the chain didn't exist, insert a new entry.
if modified.is_none() {
self.wallet()
.insert(
chain_id,
wallet::Chain {
owner: Some(owner),
timestamp,
epoch: Some(epoch),
..Default::default()
},
)
.await
.map_err(error::Inner::wallet)
.context("assigning new chain")?;
}
Ok(())
}
/// Applies the given function to the chain client.
///
/// Updates the wallet regardless of the outcome. As long as the function returns a round
/// timeout, it will wait and retry.
pub async fn apply_client_command<E, F, Fut, T>(
&mut self,
client: &ChainClient<Env>,
mut f: F,
) -> Result<T, Error>
where
F: FnMut(&ChainClient<Env>) -> Fut,
Fut: Future<Output = Result<ClientOutcome<T>, E>>,
Error: From<E>,
{
client.prepare_chain().await?;
// Try applying f optimistically without validator notifications. Return if committed.
let result = f(client).await;
self.update_wallet_from_client(client).await?;
if let ClientOutcome::Committed(t) = result? {
return Ok(t);
}
// Start listening for notifications, so we learn about new rounds and blocks.
let (listener, _listen_handle, mut notification_stream) =
client.listen(ListeningMode::FullChain).await?;
self.chain_listeners.spawn_task(listener);
loop {
// Try applying f. Return if committed.
let result = f(client).await;
self.update_wallet_from_client(client).await?;
let timeout = match result? {
ClientOutcome::Committed(t) => return Ok(t),
ClientOutcome::WaitForTimeout(timeout) => timeout,
};
// Otherwise wait and try again in the next round.
util::wait_for_next_round(&mut notification_stream, timeout).await;
}
}
pub async fn ownership(&mut self, chain_id: Option<ChainId>) -> Result<ChainOwnership, Error> {
let chain_id = chain_id.unwrap_or_else(|| self.default_chain());
let client = self.make_chain_client(chain_id).await?;
let info = client.chain_info().await?;
Ok(info.manager.ownership)
}
pub async fn change_ownership(
&mut self,
chain_id: Option<ChainId>,
ownership_config: ChainOwnershipConfig,
) -> Result<(), Error> {
let chain_id = chain_id.unwrap_or_else(|| self.default_chain());
let chain_client = self.make_chain_client(chain_id).await?;
info!(
?ownership_config, %chain_id, preferred_owner=?chain_client.preferred_owner(),
"Changing ownership of a chain"
);
let time_start = Instant::now();
let ownership = ChainOwnership::try_from(ownership_config)?;
let certificate = self
.apply_client_command(&chain_client, |chain_client| {
let ownership = ownership.clone();
let chain_client = chain_client.clone();
async move {
chain_client
.change_ownership(ownership)
.await
.map_err(Error::from)
.context("Failed to change ownership")
}
})
.await?;
let time_total = time_start.elapsed();
info!("Operation confirmed after {} ms", time_total.as_millis());
debug!("{:?}", certificate);
Ok(())
}
pub async fn set_preferred_owner(
&mut self,
chain_id: Option<ChainId>,
preferred_owner: AccountOwner,
) -> Result<(), Error> {
let chain_id = chain_id.unwrap_or_else(|| self.default_chain());
let mut chain_client = self.make_chain_client(chain_id).await?;
let old_owner = chain_client.preferred_owner();
info!(%chain_id, ?old_owner, %preferred_owner, "Changing preferred owner for chain");
chain_client.set_preferred_owner(preferred_owner);
self.update_wallet_from_client(&chain_client).await?;
info!("New preferred owner set");
Ok(())
}
pub async fn check_compatible_version_info(
&self,
address: &str,
node: &impl ValidatorNode,
) -> Result<VersionInfo, Error> {
match node.get_version_info().await {
Ok(version_info) if version_info.is_compatible_with(&linera_version::VERSION_INFO) => {
debug!(
"Version information for validator {address}: {}",
version_info
);
Ok(version_info)
}
Ok(version_info) => Err(error::Inner::UnexpectedVersionInfo {
remote: Box::new(version_info),
local: Box::new(linera_version::VERSION_INFO.clone()),
}
.into()),
Err(error) => Err(error::Inner::UnavailableVersionInfo {
address: address.to_string(),
error: Box::new(error),
}
.into()),
}
}
pub async fn check_matching_network_description(
&self,
address: &str,
node: &impl ValidatorNode,
) -> Result<CryptoHash, Error> {
let network_description = self.genesis_config.network_description();
match node.get_network_description().await {
Ok(description) => {
if description == network_description {
Ok(description.genesis_config_hash)
} else {
Err(error::Inner::UnexpectedNetworkDescription {
remote: Box::new(description),
local: Box::new(network_description),
}
.into())
}
}
Err(error) => Err(error::Inner::UnavailableNetworkDescription {
address: address.to_string(),
error: Box::new(error),
}
.into()),
}
}
pub async fn check_validator_chain_info_response(
&self,
public_key: Option<&ValidatorPublicKey>,
address: &str,
node: &impl ValidatorNode,
chain_id: ChainId,
) -> Result<ChainInfo, Error> {
let query = ChainInfoQuery::new(chain_id).with_manager_values();
match node.handle_chain_info_query(query).await {
Ok(response) => {
debug!(
"Validator {address} sees chain {chain_id} at block height {} and epoch {:?}",
response.info.next_block_height, response.info.epoch,
);
if let Some(public_key) = public_key {
if response.check(*public_key).is_ok() {
debug!("Signature for public key {public_key} is OK.");
} else {
return Err(error::Inner::InvalidSignature {
public_key: *public_key,
}
.into());
}
} else {
warn!("Not checking signature as public key was not given");
}
Ok(*response.info)
}
Err(error) => Err(error::Inner::UnavailableChainInfo {
address: address.to_string(),
chain_id,
error: Box::new(error),
}
.into()),
}
}
/// Query a validator for version info, network description, and chain info.
///
/// Returns a `ValidatorQueryResults` struct with the results of all three queries.
pub async fn query_validator(
&self,
address: &str,
node: &impl ValidatorNode,
chain_id: ChainId,
public_key: Option<&ValidatorPublicKey>,
) -> ValidatorQueryResults {
let version_info = self.check_compatible_version_info(address, node).await;
let genesis_config_hash = self.check_matching_network_description(address, node).await;
let chain_info = self
.check_validator_chain_info_response(public_key, address, node, chain_id)
.await;
ValidatorQueryResults {
version_info,
genesis_config_hash,
chain_info,
}
}
/// Query the local node for version info, network description, and chain info.
///
/// Returns a `ValidatorQueryResults` struct with the local node's information.
pub async fn query_local_node(
&self,
chain_id: ChainId,
) -> Result<ValidatorQueryResults, Error> {
let version_info = Ok(linera_version::VERSION_INFO.clone());
let genesis_config_hash = Ok(self
.genesis_config
.network_description()
.genesis_config_hash);
let chain_info = self
.make_chain_client(chain_id)
.await?
.chain_info_with_manager_values()
.await
.map(|info| *info)
.map_err(|e| e.into());
Ok(ValidatorQueryResults {
version_info,
genesis_config_hash,
chain_info,
})
}
}
#[cfg(feature = "fs")]
impl<Env: Environment> ClientContext<Env> {
pub async fn publish_module(
&mut self,
chain_client: &ChainClient<Env>,
contract: PathBuf,
service: PathBuf,
vm_runtime: VmRuntime,
) -> Result<ModuleId, Error> {
info!("Loading bytecode files");
let contract_bytecode = Bytecode::load_from_file(&contract)
.with_context(|| format!("failed to load contract bytecode from {:?}", &contract))?;
let service_bytecode = Bytecode::load_from_file(&service)
.with_context(|| format!("failed to load service bytecode from {:?}", &service))?;
info!("Publishing module");
let (blobs, module_id) =
create_bytecode_blobs(contract_bytecode, service_bytecode, vm_runtime).await;
let (module_id, _) = self
.apply_client_command(chain_client, |chain_client| {
let blobs = blobs.clone();
let chain_client = chain_client.clone();
async move {
chain_client
.publish_module_blobs(blobs, module_id)
.await
.context("Failed to publish module")
}
})
.await?;
info!("{}", "Module published successfully!");
info!("Synchronizing client and processing inbox");
self.process_inbox(chain_client).await?;
Ok(module_id)
}
pub async fn publish_data_blob(
&mut self,
chain_client: &ChainClient<Env>,
blob_path: PathBuf,
) -> Result<CryptoHash, Error> {
info!("Loading data blob file");
let blob_bytes = fs::read(&blob_path).context(format!(
"failed to load data blob bytes from {:?}",
&blob_path
))?;
info!("Publishing data blob");
self.apply_client_command(chain_client, |chain_client| {
let blob_bytes = blob_bytes.clone();
let chain_client = chain_client.clone();
async move {
chain_client
.publish_data_blob(blob_bytes)
.await
.context("Failed to publish data blob")
}
})
.await?;
info!("{}", "Data blob published successfully!");
Ok(CryptoHash::new(&BlobContent::new_data(blob_bytes)))
}
// TODO(#2490): Consider removing or renaming this.
pub async fn read_data_blob(
&mut self,
chain_client: &ChainClient<Env>,
hash: CryptoHash,
) -> Result<(), Error> {
info!("Verifying data blob");
self.apply_client_command(chain_client, |chain_client| {
let chain_client = chain_client.clone();
async move {
chain_client
.read_data_blob(hash)
.await
.context("Failed to verify data blob")
}
})
.await?;
info!("{}", "Data blob verified successfully!");
Ok(())
}
}
#[cfg(not(web))]
impl<Env: Environment> ClientContext<Env> {
pub async fn prepare_for_benchmark(
&mut self,
num_chains: usize,
tokens_per_chain: Amount,
fungible_application_id: Option<ApplicationId>,
pub_keys: Vec<AccountPublicKey>,
chains_config_path: Option<&Path>,
) -> Result<(Vec<ChainClient<Env>>, Vec<ChainId>), Error> {
let start = Instant::now();
// Below all block proposals are supposed to succeed without retries, we
// must make sure that all incoming payments have been accepted on-chain
// and that no validator is missing user certificates.
self.process_inboxes_and_force_validator_updates().await;
info!(
"Processed inboxes and forced validator updates in {} ms",
start.elapsed().as_millis()
);
let start = Instant::now();
let (benchmark_chains, chain_clients) = self
.make_benchmark_chains(
num_chains,
tokens_per_chain,
pub_keys,
chains_config_path.is_some(),
)
.await?;
info!(
"Got {} chains in {} ms",
num_chains,
start.elapsed().as_millis()
);
if let Some(id) = fungible_application_id {
let start = Instant::now();
self.supply_fungible_tokens(&benchmark_chains, id).await?;
info!(
"Supplied fungible tokens in {} ms",
start.elapsed().as_millis()
);
// Need to process inboxes to make sure the chains receive the supplied tokens.
let start = Instant::now();
for chain_client in &chain_clients {
chain_client.process_inbox().await?;
}
info!(
"Processed inboxes after supplying fungible tokens in {} ms",
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/lib.rs | linera-client/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module provides a convenient library for writing a Linera client application.
#![recursion_limit = "256"]
#![allow(async_fn_in_trait)]
pub mod chain_listener;
pub mod client_context;
pub use client_context::ClientContext;
#[cfg(not(web))]
pub mod client_metrics;
pub mod client_options;
pub use client_options::Options;
pub mod config;
mod error;
pub mod util;
#[cfg(not(web))]
pub mod benchmark;
#[cfg(test)]
mod unit_tests;
pub use error::Error;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/benchmark.rs | linera-client/src/benchmark.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::HashMap,
path::Path,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use linera_base::{
data_types::Amount,
identifiers::{Account, AccountOwner, ApplicationId, ChainId},
time::Instant,
};
use linera_core::{
client::chain_client::{self, ChainClient},
Environment,
};
use linera_execution::{system::SystemOperation, Operation};
use linera_sdk::abis::fungible::FungibleOperation;
use num_format::{Locale, ToFormattedString};
use prometheus_parse::{HistogramCount, Scrape, Value};
use rand::{rngs::SmallRng, seq::SliceRandom, thread_rng, SeedableRng};
use serde::{Deserialize, Serialize};
use tokio::{
sync::{mpsc, Barrier, Notify},
task, time,
};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn, Instrument as _};
use crate::chain_listener::{ChainListener, ClientContext};
const PROXY_LATENCY_P99_THRESHOLD: f64 = 400.0;
const LATENCY_METRIC_PREFIX: &str = "linera_proxy_request_latency";
#[derive(Debug, thiserror::Error)]
pub enum BenchmarkError {
#[error("Failed to join task: {0}")]
JoinError(#[from] task::JoinError),
#[error("Chain client error: {0}")]
ChainClient(#[from] chain_client::Error),
#[error("Current histogram count is less than previous histogram count")]
HistogramCountMismatch,
#[error("Expected histogram value, got {0:?}")]
ExpectedHistogramValue(Value),
#[error("Expected untyped value, got {0:?}")]
ExpectedUntypedValue(Value),
#[error("Incomplete histogram data")]
IncompleteHistogramData,
#[error("Could not compute quantile")]
CouldNotComputeQuantile,
#[error("Bucket boundaries do not match: {0} vs {1}")]
BucketBoundariesDoNotMatch(f64, f64),
#[error("Reqwest error: {0}")]
Reqwest(#[from] reqwest::Error),
#[error("Io error: {0}")]
IoError(#[from] std::io::Error),
#[error("Previous histogram snapshot does not exist: {0}")]
PreviousHistogramSnapshotDoesNotExist(String),
#[error("No data available yet to calculate p99")]
NoDataYetForP99Calculation,
#[error("Unexpected empty bucket")]
UnexpectedEmptyBucket,
#[error("Failed to send unit message: {0}")]
TokioSendUnitError(#[from] mpsc::error::SendError<()>),
#[error("Config file not found: {0}")]
ConfigFileNotFound(std::path::PathBuf),
#[error("Failed to load config file: {0}")]
ConfigLoadError(#[from] anyhow::Error),
#[error("Could not find enough chains in wallet alone: needed {0}, but only found {1}")]
NotEnoughChainsInWallet(usize, usize),
#[error("Random number generator error: {0}")]
RandError(#[from] rand::Error),
#[error("Chain listener startup error")]
ChainListenerStartupError,
}
#[derive(Debug)]
struct HistogramSnapshot {
buckets: Vec<HistogramCount>,
count: f64,
sum: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct BenchmarkConfig {
pub chain_ids: Vec<ChainId>,
}
impl BenchmarkConfig {
pub fn load_from_file<P: AsRef<Path>>(path: P) -> anyhow::Result<Self> {
let content = std::fs::read_to_string(path)?;
let config = serde_yaml::from_str(&content)?;
Ok(config)
}
pub fn save_to_file<P: AsRef<Path>>(&self, path: P) -> anyhow::Result<()> {
let content = serde_yaml::to_string(self)?;
std::fs::write(path, content)?;
Ok(())
}
}
pub struct Benchmark<Env: Environment> {
_phantom: std::marker::PhantomData<Env>,
}
impl<Env: Environment> Benchmark<Env> {
#[expect(clippy::too_many_arguments)]
pub async fn run_benchmark<C: ClientContext<Environment = Env> + 'static>(
bps: usize,
chain_clients: Vec<ChainClient<Env>>,
all_chains: Vec<ChainId>,
transactions_per_block: usize,
fungible_application_id: Option<ApplicationId>,
health_check_endpoints: Option<String>,
runtime_in_seconds: Option<u64>,
delay_between_chains_ms: Option<u64>,
chain_listener: ChainListener<C>,
shutdown_notifier: &CancellationToken,
single_destination_per_block: bool,
) -> Result<(), BenchmarkError> {
let num_chains = chain_clients.len();
let bps_counts = (0..num_chains)
.map(|_| Arc::new(AtomicUsize::new(0)))
.collect::<Vec<_>>();
let notifier = Arc::new(Notify::new());
let barrier = Arc::new(Barrier::new(num_chains + 1));
let chain_listener_future = chain_listener
.run(true) // Enabling background sync for benchmarks
.await
.map_err(|_| BenchmarkError::ChainListenerStartupError)?;
let chain_listener_handle = tokio::spawn(chain_listener_future.in_current_span());
let bps_control_task = Self::bps_control_task(
&barrier,
shutdown_notifier,
&bps_counts,
¬ifier,
transactions_per_block,
bps,
);
let (runtime_control_task, runtime_control_sender) =
Self::runtime_control_task(shutdown_notifier, runtime_in_seconds, num_chains);
let bps_initial_share = bps / num_chains;
let mut bps_remainder = bps % num_chains;
let mut join_set = task::JoinSet::<Result<(), BenchmarkError>>::new();
for (chain_idx, chain_client) in chain_clients.into_iter().enumerate() {
let chain_id = chain_client.chain_id();
let shutdown_notifier_clone = shutdown_notifier.clone();
let barrier_clone = barrier.clone();
let bps_count_clone = bps_counts[chain_idx].clone();
let notifier_clone = notifier.clone();
let runtime_control_sender_clone = runtime_control_sender.clone();
let all_chains_clone = all_chains.clone();
let bps_share = if bps_remainder > 0 {
bps_remainder -= 1;
bps_initial_share + 1
} else {
bps_initial_share
};
join_set.spawn(
async move {
Box::pin(Self::run_benchmark_internal(
chain_idx,
chain_id,
bps_share,
chain_client,
all_chains_clone,
transactions_per_block,
fungible_application_id,
shutdown_notifier_clone,
bps_count_clone,
barrier_clone,
notifier_clone,
runtime_control_sender_clone,
delay_between_chains_ms,
single_destination_per_block,
))
.await?;
Ok(())
}
.instrument(tracing::info_span!("chain_id", chain_id = ?chain_id)),
);
}
let metrics_watcher =
Self::metrics_watcher(health_check_endpoints, shutdown_notifier).await?;
// Wait for tasks and fail immediately if any task returns an error or panics
while let Some(result) = join_set.join_next().await {
let inner_result = result?;
if let Err(e) = inner_result {
error!("Benchmark task failed: {}", e);
shutdown_notifier.cancel();
join_set.abort_all();
return Err(e);
}
}
info!("All benchmark tasks completed successfully");
bps_control_task.await?;
if let Some(metrics_watcher) = metrics_watcher {
metrics_watcher.await??;
}
if let Some(runtime_control_task) = runtime_control_task {
runtime_control_task.await?;
}
if let Err(e) = chain_listener_handle.await? {
tracing::error!("chain listener error: {e}");
}
Ok(())
}
// The bps control task will control the BPS from the threads.
fn bps_control_task(
barrier: &Arc<Barrier>,
shutdown_notifier: &CancellationToken,
bps_counts: &[Arc<AtomicUsize>],
notifier: &Arc<Notify>,
transactions_per_block: usize,
bps: usize,
) -> task::JoinHandle<()> {
let shutdown_notifier = shutdown_notifier.clone();
let bps_counts = bps_counts.to_vec();
let notifier = notifier.clone();
let barrier = barrier.clone();
task::spawn(
async move {
barrier.wait().await;
let mut one_second_interval = time::interval(time::Duration::from_secs(1));
loop {
if shutdown_notifier.is_cancelled() {
info!("Shutdown signal received in bps control task");
break;
}
one_second_interval.tick().await;
let current_bps_count: usize = bps_counts
.iter()
.map(|count| count.swap(0, Ordering::Relaxed))
.sum();
notifier.notify_waiters();
let formatted_current_bps = current_bps_count.to_formatted_string(&Locale::en);
let formatted_current_tps = (current_bps_count * transactions_per_block)
.to_formatted_string(&Locale::en);
let formatted_tps_goal =
(bps * transactions_per_block).to_formatted_string(&Locale::en);
let formatted_bps_goal = bps.to_formatted_string(&Locale::en);
if current_bps_count >= bps {
info!(
"Achieved {} BPS/{} TPS",
formatted_current_bps, formatted_current_tps
);
} else {
warn!(
"Failed to achieve {} BPS/{} TPS, only achieved {} BPS/{} TPS",
formatted_bps_goal,
formatted_tps_goal,
formatted_current_bps,
formatted_current_tps,
);
}
}
info!("Exiting bps control task");
}
.instrument(tracing::info_span!("bps_control")),
)
}
async fn metrics_watcher(
health_check_endpoints: Option<String>,
shutdown_notifier: &CancellationToken,
) -> Result<Option<task::JoinHandle<Result<(), BenchmarkError>>>, BenchmarkError> {
if let Some(health_check_endpoints) = health_check_endpoints {
let metrics_addresses = health_check_endpoints
.split(',')
.map(|address| format!("http://{}/metrics", address.trim()))
.collect::<Vec<_>>();
let mut previous_histogram_snapshots: HashMap<String, HistogramSnapshot> =
HashMap::new();
let scrapes = Self::get_scrapes(&metrics_addresses).await?;
for (metrics_address, scrape) in scrapes {
previous_histogram_snapshots.insert(
metrics_address,
Self::parse_histogram(&scrape, LATENCY_METRIC_PREFIX)?,
);
}
let shutdown_notifier = shutdown_notifier.clone();
let metrics_watcher: task::JoinHandle<Result<(), BenchmarkError>> = tokio::spawn(
async move {
let mut health_interval = time::interval(time::Duration::from_secs(5));
let mut shutdown_interval = time::interval(time::Duration::from_secs(1));
loop {
tokio::select! {
biased;
_ = health_interval.tick() => {
let result = Self::validators_healthy(&metrics_addresses, &mut previous_histogram_snapshots).await;
if let Err(ref err) = result {
info!("Shutting down benchmark due to error: {}", err);
shutdown_notifier.cancel();
break;
} else if !result? {
info!("Shutting down benchmark due to unhealthy validators");
shutdown_notifier.cancel();
break;
}
}
_ = shutdown_interval.tick() => {
if shutdown_notifier.is_cancelled() {
info!("Shutdown signal received, stopping metrics watcher");
break;
}
}
}
}
Ok(())
}
.instrument(tracing::info_span!("metrics_watcher")),
);
Ok(Some(metrics_watcher))
} else {
Ok(None)
}
}
fn runtime_control_task(
shutdown_notifier: &CancellationToken,
runtime_in_seconds: Option<u64>,
num_chain_groups: usize,
) -> (Option<task::JoinHandle<()>>, Option<mpsc::Sender<()>>) {
if let Some(runtime_in_seconds) = runtime_in_seconds {
let (runtime_control_sender, mut runtime_control_receiver) =
mpsc::channel(num_chain_groups);
let shutdown_notifier = shutdown_notifier.clone();
let runtime_control_task = task::spawn(
async move {
let mut chains_started = 0;
while runtime_control_receiver.recv().await.is_some() {
chains_started += 1;
if chains_started == num_chain_groups {
break;
}
}
time::sleep(time::Duration::from_secs(runtime_in_seconds)).await;
shutdown_notifier.cancel();
}
.instrument(tracing::info_span!("runtime_control")),
);
(Some(runtime_control_task), Some(runtime_control_sender))
} else {
(None, None)
}
}
async fn validators_healthy(
metrics_addresses: &[String],
previous_histogram_snapshots: &mut HashMap<String, HistogramSnapshot>,
) -> Result<bool, BenchmarkError> {
let scrapes = Self::get_scrapes(metrics_addresses).await?;
for (metrics_address, scrape) in scrapes {
let histogram = Self::parse_histogram(&scrape, LATENCY_METRIC_PREFIX)?;
let diff = Self::diff_histograms(
previous_histogram_snapshots.get(&metrics_address).ok_or(
BenchmarkError::PreviousHistogramSnapshotDoesNotExist(metrics_address.clone()),
)?,
&histogram,
)?;
let p99 = match Self::compute_quantile(&diff.buckets, diff.count, 0.99) {
Ok(p99) => p99,
Err(BenchmarkError::NoDataYetForP99Calculation) => {
info!(
"No data available yet to calculate p99 for {}",
metrics_address
);
continue;
}
Err(e) => {
error!("Error computing p99 for {}: {}", metrics_address, e);
return Err(e);
}
};
let last_bucket_boundary = diff.buckets[diff.buckets.len() - 2].less_than;
if p99 == f64::INFINITY {
info!(
"{} -> Estimated p99 for {} is higher than the last bucket boundary of {:?} ms",
metrics_address, LATENCY_METRIC_PREFIX, last_bucket_boundary
);
} else {
info!(
"{} -> Estimated p99 for {}: {:.2} ms",
metrics_address, LATENCY_METRIC_PREFIX, p99
);
}
if p99 > PROXY_LATENCY_P99_THRESHOLD {
if p99 == f64::INFINITY {
error!(
"Proxy of validator {} unhealthy! Latency p99 is too high, it is higher than \
the last bucket boundary of {:.2} ms",
metrics_address, last_bucket_boundary
);
} else {
error!(
"Proxy of validator {} unhealthy! Latency p99 is too high: {:.2} ms",
metrics_address, p99
);
}
return Ok(false);
}
previous_histogram_snapshots.insert(metrics_address.clone(), histogram);
}
Ok(true)
}
fn diff_histograms(
previous: &HistogramSnapshot,
current: &HistogramSnapshot,
) -> Result<HistogramSnapshot, BenchmarkError> {
if current.count < previous.count {
return Err(BenchmarkError::HistogramCountMismatch);
}
let total_diff = current.count - previous.count;
let mut buckets_diff: Vec<HistogramCount> = Vec::new();
for (before, after) in previous.buckets.iter().zip(current.buckets.iter()) {
let bound_before = before.less_than;
let bound_after = after.less_than;
let cumulative_before = before.count;
let cumulative_after = after.count;
if (bound_before - bound_after).abs() > f64::EPSILON {
return Err(BenchmarkError::BucketBoundariesDoNotMatch(
bound_before,
bound_after,
));
}
let diff = (cumulative_after - cumulative_before).max(0.0);
buckets_diff.push(HistogramCount {
less_than: bound_after,
count: diff,
});
}
Ok(HistogramSnapshot {
buckets: buckets_diff,
count: total_diff,
sum: current.sum - previous.sum,
})
}
async fn get_scrapes(
metrics_addresses: &[String],
) -> Result<Vec<(String, Scrape)>, BenchmarkError> {
let mut scrapes = Vec::new();
for metrics_address in metrics_addresses {
let response = reqwest::get(metrics_address)
.await
.map_err(BenchmarkError::Reqwest)?;
let metrics = response.text().await.map_err(BenchmarkError::Reqwest)?;
let scrape = Scrape::parse(metrics.lines().map(|line| Ok(line.to_owned())))
.map_err(BenchmarkError::IoError)?;
scrapes.push((metrics_address.clone(), scrape));
}
Ok(scrapes)
}
fn parse_histogram(
scrape: &Scrape,
metric_prefix: &str,
) -> Result<HistogramSnapshot, BenchmarkError> {
let mut buckets: Vec<HistogramCount> = Vec::new();
let mut total_count: Option<f64> = None;
let mut total_sum: Option<f64> = None;
// Iterate over each metric in the scrape.
for sample in &scrape.samples {
if sample.metric == metric_prefix {
if let Value::Histogram(histogram) = &sample.value {
buckets.extend(histogram.iter().cloned());
} else {
return Err(BenchmarkError::ExpectedHistogramValue(sample.value.clone()));
}
} else if sample.metric == format!("{}_count", metric_prefix) {
if let Value::Untyped(count) = sample.value {
total_count = Some(count);
} else {
return Err(BenchmarkError::ExpectedUntypedValue(sample.value.clone()));
}
} else if sample.metric == format!("{}_sum", metric_prefix) {
if let Value::Untyped(sum) = sample.value {
total_sum = Some(sum);
} else {
return Err(BenchmarkError::ExpectedUntypedValue(sample.value.clone()));
}
}
}
match (total_count, total_sum) {
(Some(count), Some(sum)) if !buckets.is_empty() => {
buckets.sort_by(|a, b| {
a.less_than
.partial_cmp(&b.less_than)
.expect("Comparison should not fail")
});
Ok(HistogramSnapshot {
buckets,
count,
sum,
})
}
_ => Err(BenchmarkError::IncompleteHistogramData),
}
}
fn compute_quantile(
buckets: &[HistogramCount],
total_count: f64,
quantile: f64,
) -> Result<f64, BenchmarkError> {
if total_count == 0.0 {
// Had no samples in the last 5s.
return Err(BenchmarkError::NoDataYetForP99Calculation);
}
// Compute the target cumulative count.
let target = (quantile * total_count).ceil();
let mut prev_cumulative = 0.0;
let mut prev_bound = 0.0;
for bucket in buckets {
if bucket.count >= target {
let bucket_count = bucket.count - prev_cumulative;
if bucket_count == 0.0 {
// Bucket that is supposed to contain the target quantile is empty, unexpectedly.
return Err(BenchmarkError::UnexpectedEmptyBucket);
}
let fraction = (target - prev_cumulative) / bucket_count;
return Ok(prev_bound + (bucket.less_than - prev_bound) * fraction);
}
prev_cumulative = bucket.count;
prev_bound = bucket.less_than;
}
Err(BenchmarkError::CouldNotComputeQuantile)
}
#[expect(clippy::too_many_arguments)]
async fn run_benchmark_internal(
chain_idx: usize,
chain_id: ChainId,
bps: usize,
chain_client: ChainClient<Env>,
all_chains: Vec<ChainId>,
transactions_per_block: usize,
fungible_application_id: Option<ApplicationId>,
shutdown_notifier: CancellationToken,
bps_count: Arc<AtomicUsize>,
barrier: Arc<Barrier>,
notifier: Arc<Notify>,
runtime_control_sender: Option<mpsc::Sender<()>>,
delay_between_chains_ms: Option<u64>,
single_destination_per_block: bool,
) -> Result<(), BenchmarkError> {
barrier.wait().await;
if let Some(delay_between_chains_ms) = delay_between_chains_ms {
time::sleep(time::Duration::from_millis(
(chain_idx as u64) * delay_between_chains_ms,
))
.await;
}
info!("Starting benchmark for chain {:?}", chain_id);
if let Some(runtime_control_sender) = runtime_control_sender {
runtime_control_sender.send(()).await?;
}
let owner = chain_client
.identity()
.await
.map_err(BenchmarkError::ChainClient)?;
let mut destination_manager = ChainDestinationManager::new(chain_id, all_chains)?;
loop {
tokio::select! {
biased;
_ = shutdown_notifier.cancelled() => {
info!("Shutdown signal received, stopping benchmark");
break;
}
result = chain_client.execute_operations(
Self::generate_operations(
owner,
transactions_per_block,
fungible_application_id,
&mut destination_manager,
single_destination_per_block,
),
vec![]
) => {
result
.map_err(BenchmarkError::ChainClient)?
.expect("should execute block with operations");
let current_bps_count = bps_count.fetch_add(1, Ordering::Relaxed) + 1;
if current_bps_count >= bps {
notifier.notified().await;
}
}
}
}
info!("Exiting task...");
Ok(())
}
fn create_operation(
fungible_application_id: Option<ApplicationId>,
recipient_chain_id: ChainId,
owner: AccountOwner,
amount: Amount,
) -> Operation {
match fungible_application_id {
Some(application_id) => {
Self::fungible_transfer(application_id, recipient_chain_id, owner, owner, amount)
}
None => Operation::system(SystemOperation::Transfer {
owner: AccountOwner::CHAIN,
recipient: Account::chain(recipient_chain_id),
amount,
}),
}
}
/// Generate operations for a single block, randomizing destinations after each full cycle
fn generate_operations(
owner: AccountOwner,
transactions_per_block: usize,
fungible_application_id: Option<ApplicationId>,
destination_manager: &mut ChainDestinationManager,
single_destination_per_block: bool,
) -> Vec<Operation> {
let amount = Amount::from_attos(1);
if single_destination_per_block {
let recipient_chain_id = destination_manager.get_next_destination();
(0..transactions_per_block)
.map(|_| {
Self::create_operation(
fungible_application_id,
recipient_chain_id,
owner,
amount,
)
})
.collect()
} else {
let mut operations = Vec::with_capacity(transactions_per_block);
for _ in 0..transactions_per_block {
let recipient_chain_id = destination_manager.get_next_destination();
operations.push(Self::create_operation(
fungible_application_id,
recipient_chain_id,
owner,
amount,
));
}
operations
}
}
/// Closes the chain that was created for the benchmark.
pub async fn close_benchmark_chain(
chain_client: &ChainClient<Env>,
) -> Result<(), BenchmarkError> {
let start = Instant::now();
chain_client
.execute_operation(Operation::system(SystemOperation::CloseChain))
.await?
.expect("Close chain operation should not fail!");
debug!(
"Closed chain {:?} in {} ms",
chain_client.chain_id(),
start.elapsed().as_millis()
);
Ok(())
}
pub fn get_all_chains(
chains_config_path: Option<&Path>,
benchmark_chains: &[(ChainId, AccountOwner)],
) -> Result<Vec<ChainId>, BenchmarkError> {
let all_chains = if let Some(config_path) = chains_config_path {
if !config_path.exists() {
return Err(BenchmarkError::ConfigFileNotFound(
config_path.to_path_buf(),
));
}
let config = BenchmarkConfig::load_from_file(config_path)
.map_err(BenchmarkError::ConfigLoadError)?;
config.chain_ids
} else {
benchmark_chains.iter().map(|(id, _)| *id).collect()
};
Ok(all_chains)
}
/// Creates a fungible token transfer operation.
pub fn fungible_transfer(
application_id: ApplicationId,
chain_id: ChainId,
sender: AccountOwner,
receiver: AccountOwner,
amount: Amount,
) -> Operation {
let target_account = Account {
chain_id,
owner: receiver,
};
let bytes = bcs::to_bytes(&FungibleOperation::Transfer {
owner: sender,
amount,
target_account,
})
.expect("should serialize fungible token operation");
Operation::User {
application_id,
bytes,
}
}
}
struct ChainDestinationManager {
source_chain_id: ChainId,
destination_index: usize,
destination_chains: Vec<ChainId>,
rng: SmallRng,
}
impl ChainDestinationManager {
fn new(
source_chain_id: ChainId,
mut destination_chains: Vec<ChainId>,
) -> Result<Self, BenchmarkError> {
let mut rng = SmallRng::from_rng(thread_rng())?;
destination_chains.shuffle(&mut rng);
Ok(Self {
source_chain_id,
destination_index: 0,
destination_chains,
rng,
})
}
fn get_next_destination(&mut self) -> ChainId {
// Check if we've gone through all destinations
if self.destination_index >= self.destination_chains.len() {
// Reshuffle the destinations for the next cycle
self.destination_chains.shuffle(&mut self.rng);
self.destination_index = 0;
}
let destination_chain_id = self.destination_chains[self.destination_index];
self.destination_index += 1;
if destination_chain_id == self.source_chain_id {
self.get_next_destination()
} else {
destination_chain_id
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/error.rs | linera-client/src/error.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::ValidatorPublicKey, data_types::NetworkDescription, identifiers::ChainId,
};
use linera_core::node::NodeError;
use linera_version::VersionInfo;
use thiserror_context::Context;
#[cfg(not(web))]
use crate::benchmark::BenchmarkError;
use crate::util;
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub(crate) enum Inner {
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("BCS error: {0}")]
Bcs(#[from] bcs::Error),
#[error("chain error: {0}")]
Chain(#[from] linera_chain::ChainError),
#[error("chain client error: {0}")]
ChainClient(#[from] linera_core::client::chain_client::Error),
#[error("options error: {0}")]
Options(#[from] crate::client_options::Error),
#[error("wallet error: {0}")]
Wallet(#[source] Box<dyn std::error::Error + Send + Sync>),
#[error("view error: {0}")]
View(#[from] linera_views::ViewError),
#[error("error on the local node: {0}")]
LocalNode(#[from] linera_core::LocalNodeError),
#[error("remote node operation failed: {0}")]
RemoteNode(#[from] linera_core::node::NodeError),
#[error("arithmetic error: {0}")]
Arithmetic(#[from] linera_base::data_types::ArithmeticError),
#[error("incorrect chain ownership")]
ChainOwnership,
#[cfg(not(web))]
#[error("Benchmark error: {0}")]
Benchmark(#[from] BenchmarkError),
#[error("Validator version {remote} is not compatible with local version {local}.")]
UnexpectedVersionInfo {
remote: Box<VersionInfo>,
local: Box<VersionInfo>,
},
#[error("Failed to get version information for validator {address}: {error}")]
UnavailableVersionInfo {
address: String,
error: Box<NodeError>,
},
#[error("Validator's network description {remote:?} does not match our own: {local:?}.")]
UnexpectedNetworkDescription {
remote: Box<NetworkDescription>,
local: Box<NetworkDescription>,
},
#[error("Failed to get network description for validator {address}: {error}")]
UnavailableNetworkDescription {
address: String,
error: Box<NodeError>,
},
#[error("Signature for public key {public_key} is invalid.")]
InvalidSignature { public_key: ValidatorPublicKey },
#[error("Failed to get chain info for validator {address} and chain {chain_id}: {error}")]
UnavailableChainInfo {
address: String,
chain_id: ChainId,
error: Box<NodeError>,
},
#[error("Chain {0} not found in wallet")]
UnknownChainId(ChainId),
}
impl Inner {
pub fn wallet(error: impl std::error::Error + Send + Sync + 'static) -> Self {
Self::Wallet(Box::new(error) as _)
}
}
thiserror_context::impl_context!(Error(Inner));
impl Error {
pub(crate) fn wallet(error: impl std::error::Error + Send + Sync + 'static) -> Self {
Inner::wallet(error).into()
}
}
util::impl_from_infallible!(Error);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/util.rs | linera-client/src/util.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::HashSet, num::ParseIntError, str::FromStr};
use futures::future;
use linera_base::{
crypto::CryptoError,
data_types::{TimeDelta, Timestamp},
identifiers::{ApplicationId, ChainId, GenericApplicationId},
time::Duration,
};
use linera_core::{data_types::RoundTimeout, node::NotificationStream, worker::Reason};
use tokio_stream::StreamExt as _;
pub fn parse_millis(s: &str) -> Result<Duration, ParseIntError> {
Ok(Duration::from_millis(s.parse()?))
}
pub fn parse_secs(s: &str) -> Result<Duration, ParseIntError> {
Ok(Duration::from_secs(s.parse()?))
}
pub fn parse_millis_delta(s: &str) -> Result<TimeDelta, ParseIntError> {
Ok(TimeDelta::from_millis(s.parse()?))
}
pub fn parse_chain_set(s: &str) -> Result<HashSet<ChainId>, CryptoError> {
match s.trim() {
"" => Ok(HashSet::new()),
s => s.split(",").map(ChainId::from_str).collect(),
}
}
pub fn parse_app_set(s: &str) -> anyhow::Result<HashSet<GenericApplicationId>> {
s.trim()
.split(",")
.map(|app_str| {
GenericApplicationId::from_str(app_str)
.or_else(|_| Ok(ApplicationId::from_str(app_str)?.into()))
})
.collect()
}
/// Returns after the specified time or if we receive a notification that a new round has started.
pub async fn wait_for_next_round(stream: &mut NotificationStream, timeout: RoundTimeout) {
let mut stream = stream.filter(|notification| match ¬ification.reason {
Reason::NewBlock { height, .. } | Reason::NewEvents { height, .. } => {
*height >= timeout.next_block_height
}
Reason::NewRound { round, .. } => *round > timeout.current_round,
Reason::NewIncomingBundle { .. } | Reason::BlockExecuted { .. } => false,
});
future::select(
Box::pin(stream.next()),
Box::pin(linera_base::time::timer::sleep(
timeout.timestamp.duration_since(Timestamp::now()),
)),
)
.await;
}
macro_rules! impl_from_infallible {
($target:path) => {
impl From<::std::convert::Infallible> for $target {
fn from(infallible: ::std::convert::Infallible) -> Self {
match infallible {}
}
}
};
}
pub(crate) use impl_from_infallible;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/client_options.rs | linera-client/src/client_options.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::HashSet, fmt, iter};
use linera_base::{
data_types::{ApplicationPermissions, TimeDelta},
identifiers::{AccountOwner, ApplicationId, ChainId, GenericApplicationId},
ownership::{ChainOwnership, TimeoutConfig},
time::Duration,
};
use linera_core::{
client::{
chain_client, BlanketMessagePolicy, MessagePolicy, DEFAULT_CERTIFICATE_DOWNLOAD_BATCH_SIZE,
DEFAULT_SENDER_CERTIFICATE_DOWNLOAD_BATCH_SIZE,
},
node::CrossChainMessageDelivery,
DEFAULT_QUORUM_GRACE_PERIOD,
};
use linera_execution::ResourceControlPolicy;
#[cfg(not(web))]
use crate::client_metrics::TimingConfig;
use crate::util;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("I/O error: {0}")]
IoError(#[from] std::io::Error),
#[error("there are {public_keys} public keys but {weights} weights")]
MisalignedWeights { public_keys: usize, weights: usize },
#[error("config error: {0}")]
Config(#[from] crate::config::Error),
}
util::impl_from_infallible!(Error);
#[derive(Clone, clap::Parser, serde::Deserialize, tsify::Tsify)]
#[tsify(from_wasm_abi)]
#[group(skip)]
#[serde(default, rename_all = "camelCase")]
pub struct Options {
/// Timeout for sending queries (milliseconds)
#[arg(long = "send-timeout-ms", default_value = "4000", value_parser = util::parse_millis)]
pub send_timeout: Duration,
/// Timeout for receiving responses (milliseconds)
#[arg(long = "recv-timeout-ms", default_value = "4000", value_parser = util::parse_millis)]
pub recv_timeout: Duration,
/// The maximum number of incoming message bundles to include in a block proposal.
#[arg(long, default_value = "10")]
pub max_pending_message_bundles: usize,
/// The duration in milliseconds after which an idle chain worker will free its memory.
#[arg(
long = "chain-worker-ttl-ms",
default_value = "30000",
env = "LINERA_CHAIN_WORKER_TTL_MS",
value_parser = util::parse_millis,
)]
pub chain_worker_ttl: Duration,
/// The duration, in milliseconds, after which an idle sender chain worker will
/// free its memory.
#[arg(
long = "sender-chain-worker-ttl-ms",
default_value = "1000",
env = "LINERA_SENDER_CHAIN_WORKER_TTL_MS",
value_parser = util::parse_millis
)]
pub sender_chain_worker_ttl: Duration,
/// Delay increment for retrying to connect to a validator.
#[arg(
long = "retry-delay-ms",
default_value = "1000",
value_parser = util::parse_millis
)]
pub retry_delay: Duration,
/// Number of times to retry connecting to a validator.
#[arg(long, default_value = "10")]
pub max_retries: u32,
/// Whether to wait until a quorum of validators has confirmed that all sent cross-chain
/// messages have been delivered.
#[arg(long)]
pub wait_for_outgoing_messages: bool,
/// (EXPERIMENTAL) Whether application services can persist in some cases between queries.
#[arg(long)]
pub long_lived_services: bool,
/// The policy for handling incoming messages.
#[arg(long, default_value_t, value_enum)]
pub blanket_message_policy: BlanketMessagePolicy,
/// A set of chains to restrict incoming messages from. By default, messages
/// from all chains are accepted. To reject messages from all chains, specify
/// an empty string.
#[arg(long, value_parser = util::parse_chain_set)]
pub restrict_chain_ids_to: Option<HashSet<ChainId>>,
/// A set of application IDs. If specified, only bundles with at least one message from one of
/// these applications will be accepted.
#[arg(long, value_parser = util::parse_app_set)]
pub reject_message_bundles_without_application_ids: Option<HashSet<GenericApplicationId>>,
/// A set of application IDs. If specified, only bundles where all messages are from one of
/// these applications will be accepted.
#[arg(long, value_parser = util::parse_app_set)]
pub reject_message_bundles_with_other_application_ids: Option<HashSet<GenericApplicationId>>,
/// Enable timing reports during operations
#[cfg(not(web))]
#[arg(long)]
pub timings: bool,
/// Interval in seconds between timing reports (defaults to 5)
#[cfg(not(web))]
#[arg(long, default_value = "5")]
pub timing_interval: u64,
/// An additional delay, after reaching a quorum, to wait for additional validator signatures,
/// as a fraction of time taken to reach quorum.
#[arg(long, default_value_t = DEFAULT_QUORUM_GRACE_PERIOD)]
pub quorum_grace_period: f64,
/// The delay when downloading a blob, after which we try a second validator, in milliseconds.
#[arg(
long = "blob-download-timeout-ms",
default_value = "1000",
value_parser = util::parse_millis,
)]
pub blob_download_timeout: Duration,
/// The delay when downloading a batch of certificates, after which we try a second validator,
/// in milliseconds.
#[arg(
long = "cert-batch-download-timeout-ms",
default_value = "1000",
value_parser = util::parse_millis
)]
pub certificate_batch_download_timeout: Duration,
/// Maximum number of certificates that we download at a time from one validator when
/// synchronizing one of our chains.
#[arg(
long,
default_value_t = DEFAULT_CERTIFICATE_DOWNLOAD_BATCH_SIZE,
)]
pub certificate_download_batch_size: u64,
/// Maximum number of sender certificates we try to download and receive in one go
/// when syncing sender chains.
#[arg(
long,
default_value_t = DEFAULT_SENDER_CERTIFICATE_DOWNLOAD_BATCH_SIZE,
)]
pub sender_certificate_download_batch_size: usize,
/// Maximum number of tasks that can are joined concurrently in the client.
#[arg(long, default_value = "100")]
pub max_joined_tasks: usize,
/// Maximum expected latency in milliseconds for score normalization.
#[arg(
long,
default_value_t = linera_core::client::requests_scheduler::MAX_ACCEPTED_LATENCY_MS,
env = "LINERA_REQUESTS_SCHEDULER_MAX_ACCEPTED_LATENCY_MS"
)]
pub max_accepted_latency_ms: f64,
/// Time-to-live for cached responses in milliseconds.
#[arg(
long,
default_value_t = linera_core::client::requests_scheduler::CACHE_TTL_MS,
env = "LINERA_REQUESTS_SCHEDULER_CACHE_TTL_MS"
)]
pub cache_ttl_ms: u64,
/// Maximum number of entries in the cache.
#[arg(
long,
default_value_t = linera_core::client::requests_scheduler::CACHE_MAX_SIZE,
env = "LINERA_REQUESTS_SCHEDULER_CACHE_MAX_SIZE"
)]
pub cache_max_size: usize,
/// Maximum latency for an in-flight request before we stop deduplicating it (in milliseconds).
#[arg(
long,
default_value_t = linera_core::client::requests_scheduler::MAX_REQUEST_TTL_MS,
env = "LINERA_REQUESTS_SCHEDULER_MAX_REQUEST_TTL_MS"
)]
pub max_request_ttl_ms: u64,
/// Smoothing factor for Exponential Moving Averages (0 < alpha < 1).
/// Higher values give more weight to recent observations.
/// Typical values are between 0.01 and 0.5.
/// A value of 0.1 means that 10% of the new observation is considered
/// and 90% of the previous average is retained.
#[arg(
long,
default_value_t = linera_core::client::requests_scheduler::ALPHA_SMOOTHING_FACTOR,
env = "LINERA_REQUESTS_SCHEDULER_ALPHA"
)]
pub alpha: f64,
/// Delay in milliseconds between starting requests to different peers.
/// This helps to stagger requests and avoid overwhelming the network.
#[arg(
long,
default_value_t = linera_core::client::requests_scheduler::STAGGERED_DELAY_MS,
env = "LINERA_REQUESTS_SCHEDULER_ALTERNATIVE_PEERS_RETRY_DELAY_MS"
)]
pub alternative_peers_retry_delay_ms: u64,
#[serde(flatten)]
#[clap(flatten)]
pub chain_listener_config: crate::chain_listener::ChainListenerConfig,
}
impl Default for Options {
fn default() -> Self {
use clap::Parser;
#[derive(Parser)]
struct OptionsParser {
#[clap(flatten)]
options: Options,
}
OptionsParser::try_parse_from(std::iter::empty::<std::ffi::OsString>())
.expect("Options has no required arguments")
.options
}
}
impl Options {
/// Creates [`chain_client::Options`] with the corresponding values.
pub(crate) fn to_chain_client_options(&self) -> chain_client::Options {
let message_policy = MessagePolicy::new(
self.blanket_message_policy,
self.restrict_chain_ids_to.clone(),
self.reject_message_bundles_without_application_ids.clone(),
self.reject_message_bundles_with_other_application_ids
.clone(),
);
let cross_chain_message_delivery =
CrossChainMessageDelivery::new(self.wait_for_outgoing_messages);
chain_client::Options {
max_pending_message_bundles: self.max_pending_message_bundles,
message_policy,
cross_chain_message_delivery,
quorum_grace_period: self.quorum_grace_period,
blob_download_timeout: self.blob_download_timeout,
certificate_batch_download_timeout: self.certificate_batch_download_timeout,
certificate_download_batch_size: self.certificate_download_batch_size,
sender_certificate_download_batch_size: self.sender_certificate_download_batch_size,
max_joined_tasks: self.max_joined_tasks,
}
}
/// Creates [`TimingConfig`] with the corresponding values.
#[cfg(not(web))]
pub(crate) fn to_timing_config(&self) -> TimingConfig {
TimingConfig {
enabled: self.timings,
report_interval_secs: self.timing_interval,
}
}
/// Creates [`RequestsSchedulerConfig`] with the corresponding values.
pub(crate) fn to_requests_scheduler_config(
&self,
) -> linera_core::client::RequestsSchedulerConfig {
linera_core::client::RequestsSchedulerConfig {
max_accepted_latency_ms: self.max_accepted_latency_ms,
cache_ttl_ms: self.cache_ttl_ms,
cache_max_size: self.cache_max_size,
max_request_ttl_ms: self.max_request_ttl_ms,
alpha: self.alpha,
retry_delay_ms: self.alternative_peers_retry_delay_ms,
}
}
}
#[derive(Debug, Clone, clap::Args)]
pub struct ChainOwnershipConfig {
/// The new super owners.
#[arg(long, num_args(0..))]
pub super_owners: Vec<AccountOwner>,
/// The new regular owners.
#[arg(long, num_args(0..))]
pub owners: Vec<AccountOwner>,
/// The leader of the first single-leader round. If not set, this is random like other rounds.
#[arg(long)]
pub first_leader: Option<AccountOwner>,
/// Weights for the new owners.
///
/// If they are specified there must be exactly one weight for each owner.
/// If no weights are given, every owner will have weight 100.
#[arg(long, num_args(0..))]
pub owner_weights: Vec<u64>,
/// The number of rounds in which every owner can propose blocks, i.e. the first round
/// number in which only a single designated leader is allowed to propose blocks.
#[arg(long)]
pub multi_leader_rounds: Option<u32>,
/// Whether the multi-leader rounds are unrestricted, i.e. not limited to chain owners.
/// This should only be `true` on chains with restrictive application permissions and an
/// application-based mechanism to select block proposers.
#[arg(long)]
pub open_multi_leader_rounds: bool,
/// The duration of the fast round, in milliseconds.
#[arg(long = "fast-round-ms", value_parser = util::parse_millis_delta)]
pub fast_round_duration: Option<TimeDelta>,
/// The duration of the first single-leader and all multi-leader rounds.
#[arg(
long = "base-timeout-ms",
default_value = "10000",
value_parser = util::parse_millis_delta
)]
pub base_timeout: TimeDelta,
/// The number of milliseconds by which the timeout increases after each
/// single-leader round.
#[arg(
long = "timeout-increment-ms",
default_value = "1000",
value_parser = util::parse_millis_delta
)]
pub timeout_increment: TimeDelta,
/// The age of an incoming tracked or protected message after which the validators start
/// transitioning the chain to fallback mode, in milliseconds.
#[arg(
long = "fallback-duration-ms",
default_value = "86400000", // 1 day
value_parser = util::parse_millis_delta
)]
pub fallback_duration: TimeDelta,
}
impl TryFrom<ChainOwnershipConfig> for ChainOwnership {
type Error = Error;
fn try_from(config: ChainOwnershipConfig) -> Result<ChainOwnership, Error> {
let ChainOwnershipConfig {
super_owners,
owners,
first_leader,
owner_weights,
multi_leader_rounds,
fast_round_duration,
open_multi_leader_rounds,
base_timeout,
timeout_increment,
fallback_duration,
} = config;
if !owner_weights.is_empty() && owner_weights.len() != owners.len() {
return Err(Error::MisalignedWeights {
public_keys: owners.len(),
weights: owner_weights.len(),
});
}
let super_owners = super_owners.into_iter().collect();
let owners = owners
.into_iter()
.zip(owner_weights.into_iter().chain(iter::repeat(100)))
.collect();
let multi_leader_rounds = multi_leader_rounds.unwrap_or(u32::MAX);
let timeout_config = TimeoutConfig {
fast_round_duration,
base_timeout,
timeout_increment,
fallback_duration,
};
Ok(ChainOwnership {
super_owners,
owners,
first_leader,
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config,
})
}
}
#[derive(Debug, Clone, clap::Args)]
pub struct ApplicationPermissionsConfig {
/// If present, only operations from the specified applications are allowed, and
/// no system operations. Otherwise all operations are allowed.
#[arg(long)]
pub execute_operations: Option<Vec<ApplicationId>>,
/// At least one operation or incoming message from each of these applications must occur in
/// every block.
#[arg(long)]
pub mandatory_applications: Option<Vec<ApplicationId>>,
/// These applications are allowed to close the current chain using the system API.
#[arg(long)]
pub close_chain: Option<Vec<ApplicationId>>,
/// These applications are allowed to change the application permissions on the current chain
/// using the system API.
#[arg(long)]
pub change_application_permissions: Option<Vec<ApplicationId>>,
/// These applications are allowed to call services as oracles on the current chain using the
/// system API.
#[arg(long)]
pub call_service_as_oracle: Option<Vec<ApplicationId>>,
/// These applications are allowed to make HTTP requests on the current chain using the system
/// API.
#[arg(long)]
pub make_http_requests: Option<Vec<ApplicationId>>,
}
impl From<ApplicationPermissionsConfig> for ApplicationPermissions {
fn from(config: ApplicationPermissionsConfig) -> ApplicationPermissions {
ApplicationPermissions {
execute_operations: config.execute_operations,
mandatory_applications: config.mandatory_applications.unwrap_or_default(),
close_chain: config.close_chain.unwrap_or_default(),
change_application_permissions: config
.change_application_permissions
.unwrap_or_default(),
call_service_as_oracle: config.call_service_as_oracle,
make_http_requests: config.make_http_requests,
}
}
}
#[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq, Eq)]
pub enum ResourceControlPolicyConfig {
NoFees,
Testnet,
#[cfg(with_testing)]
OnlyFuel,
#[cfg(with_testing)]
AllCategories,
}
impl ResourceControlPolicyConfig {
pub fn into_policy(self) -> ResourceControlPolicy {
match self {
ResourceControlPolicyConfig::NoFees => ResourceControlPolicy::no_fees(),
ResourceControlPolicyConfig::Testnet => ResourceControlPolicy::testnet(),
#[cfg(with_testing)]
ResourceControlPolicyConfig::OnlyFuel => ResourceControlPolicy::only_fuel(),
#[cfg(with_testing)]
ResourceControlPolicyConfig::AllCategories => ResourceControlPolicy::all_categories(),
}
}
}
impl std::str::FromStr for ResourceControlPolicyConfig {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
clap::ValueEnum::from_str(s, true)
}
}
impl fmt::Display for ResourceControlPolicyConfig {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/client_metrics.rs | linera-client/src/client_metrics.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use hdrhistogram::Histogram;
use linera_core::client::TimingType;
use tokio::{sync::mpsc, task, time};
use tracing::{debug, info, warn};
#[derive(Debug, Clone)]
pub struct TimingConfig {
pub enabled: bool,
pub report_interval_secs: u64,
}
#[cfg(not(web))]
impl Default for TimingConfig {
fn default() -> Self {
Self {
enabled: false,
report_interval_secs: 5,
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum ClientMetricsError {
#[error("Failed to create histogram: {0}")]
HistogramCreationError(#[from] hdrhistogram::CreationError),
#[error("Failed to record histogram: {0}")]
HistogramRecordError(#[from] hdrhistogram::RecordError),
}
pub struct ExecuteBlockTimingsHistograms {
pub submit_block_proposal_histogram: Histogram<u64>,
pub update_validators_histogram: Histogram<u64>,
}
impl ExecuteBlockTimingsHistograms {
pub fn new() -> Result<Self, ClientMetricsError> {
Ok(Self {
submit_block_proposal_histogram: Histogram::<u64>::new(2)?,
update_validators_histogram: Histogram::<u64>::new(2)?,
})
}
}
pub struct ExecuteOperationsTimingsHistograms {
pub execute_block_histogram: Histogram<u64>,
pub execute_block_timings_histograms: ExecuteBlockTimingsHistograms,
}
impl ExecuteOperationsTimingsHistograms {
pub fn new() -> Result<Self, ClientMetricsError> {
Ok(Self {
execute_block_histogram: Histogram::<u64>::new(2)?,
execute_block_timings_histograms: ExecuteBlockTimingsHistograms::new()?,
})
}
}
pub struct BlockTimingsHistograms {
pub execute_operations_histogram: Histogram<u64>,
pub execute_operations_timings_histograms: ExecuteOperationsTimingsHistograms,
}
impl BlockTimingsHistograms {
pub fn new() -> Result<Self, ClientMetricsError> {
Ok(Self {
execute_operations_histogram: Histogram::<u64>::new(2)?,
execute_operations_timings_histograms: ExecuteOperationsTimingsHistograms::new()?,
})
}
pub fn record_timing(
&mut self,
duration_ms: u64,
timing_type: TimingType,
) -> Result<(), ClientMetricsError> {
match timing_type {
TimingType::ExecuteOperations => {
self.execute_operations_histogram.record(duration_ms)?;
}
TimingType::ExecuteBlock => {
self.execute_operations_timings_histograms
.execute_block_histogram
.record(duration_ms)?;
}
TimingType::SubmitBlockProposal => {
self.execute_operations_timings_histograms
.execute_block_timings_histograms
.submit_block_proposal_histogram
.record(duration_ms)?;
}
TimingType::UpdateValidators => {
self.execute_operations_timings_histograms
.execute_block_timings_histograms
.update_validators_histogram
.record(duration_ms)?;
}
}
Ok(())
}
}
#[cfg(not(web))]
pub struct ClientMetrics {
pub timing_config: TimingConfig,
pub timing_sender: mpsc::UnboundedSender<(u64, TimingType)>,
pub timing_task: task::JoinHandle<()>,
}
#[cfg(not(web))]
impl ClientMetrics {
pub fn new(timing_config: TimingConfig) -> Self {
let (tx, rx) = mpsc::unbounded_channel();
let timing_task = tokio::spawn(Self::timing_collection(
rx,
timing_config.report_interval_secs,
));
Self {
timing_config,
timing_sender: tx,
timing_task,
}
}
async fn timing_collection(
mut receiver: mpsc::UnboundedReceiver<(u64, TimingType)>,
report_interval_secs: u64,
) {
let mut histograms =
BlockTimingsHistograms::new().expect("Failed to create timing histograms");
let mut report_needed = false;
let mut report_timer = time::interval(time::Duration::from_secs(report_interval_secs));
report_timer.set_missed_tick_behavior(time::MissedTickBehavior::Skip);
loop {
tokio::select! {
timing_data = receiver.recv() => {
match timing_data {
Some((duration_ms, timing_type)) => {
if let Err(e) = histograms.record_timing(duration_ms, timing_type) {
warn!("Failed to record timing data: {}", e);
} else {
report_needed = true;
}
}
None => {
debug!("Timing collection task shutting down - sender closed");
break;
}
}
}
_ = report_timer.tick() => {
if report_needed {
Self::print_timing_report(&histograms);
report_needed = false;
}
}
}
}
}
fn print_timing_report(histograms: &BlockTimingsHistograms) {
for quantile in [0.99, 0.95, 0.90, 0.50] {
let formatted_quantile = (quantile * 100.0) as usize;
info!(
"Execute operations p{}: {} ms",
formatted_quantile,
histograms
.execute_operations_histogram
.value_at_quantile(quantile)
);
info!(
" └─ Execute block p{}: {} ms",
formatted_quantile,
histograms
.execute_operations_timings_histograms
.execute_block_histogram
.value_at_quantile(quantile)
);
info!(
" ├─ Submit block proposal p{}: {} ms",
formatted_quantile,
histograms
.execute_operations_timings_histograms
.execute_block_timings_histograms
.submit_block_proposal_histogram
.value_at_quantile(quantile)
);
info!(
" └─ Update validators p{}: {} ms",
formatted_quantile,
histograms
.execute_operations_timings_histograms
.execute_block_timings_histograms
.update_validators_histogram
.value_at_quantile(quantile)
);
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/chain_listener.rs | linera-client/src/chain_listener.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{btree_map::Entry, BTreeMap, BTreeSet},
sync::Arc,
time::Duration,
};
use futures::{
future::{join_all, select_all},
lock::Mutex,
Future, FutureExt as _, StreamExt,
};
use linera_base::{
crypto::{CryptoHash, Signer},
data_types::{ChainDescription, Epoch, Timestamp},
identifiers::{AccountOwner, BlobType, ChainId},
task::NonBlockingFuture,
util::future::FutureSyncExt as _,
};
use linera_core::{
client::{
chain_client::{self, ChainClient},
AbortOnDrop, ListeningMode,
},
node::NotificationStream,
worker::{Notification, Reason},
Environment, Wallet,
};
use linera_storage::{Clock as _, Storage as _};
use tokio::sync::mpsc::UnboundedReceiver;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument, warn, Instrument as _};
use crate::error::{self, Error};
#[derive(Default, Debug, Clone, clap::Args, serde::Serialize, serde::Deserialize, tsify::Tsify)]
#[serde(rename_all = "camelCase")]
pub struct ChainListenerConfig {
/// Do not create blocks automatically to receive incoming messages. Instead, wait for
/// an explicit mutation `processInbox`.
#[serde(default)]
#[arg(
long = "listener-skip-process-inbox",
env = "LINERA_LISTENER_SKIP_PROCESS_INBOX"
)]
pub skip_process_inbox: bool,
/// Wait before processing any notification (useful for testing).
#[serde(default)]
#[arg(
long = "listener-delay-before-ms",
default_value = "0",
env = "LINERA_LISTENER_DELAY_BEFORE"
)]
pub delay_before_ms: u64,
/// Wait after processing any notification (useful for rate limiting).
#[serde(default)]
#[arg(
long = "listener-delay-after-ms",
default_value = "0",
env = "LINERA_LISTENER_DELAY_AFTER"
)]
pub delay_after_ms: u64,
}
type ContextChainClient<C> = ChainClient<<C as ClientContext>::Environment>;
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
#[allow(async_fn_in_trait)]
pub trait ClientContext {
type Environment: linera_core::Environment;
fn wallet(&self) -> &<Self::Environment as linera_core::Environment>::Wallet;
fn storage(&self) -> &<Self::Environment as linera_core::Environment>::Storage;
fn client(&self) -> &Arc<linera_core::client::Client<Self::Environment>>;
fn admin_chain(&self) -> ChainId {
self.client().admin_chain()
}
/// Gets the timing sender for benchmarking, if available.
#[cfg(not(web))]
fn timing_sender(
&self,
) -> Option<tokio::sync::mpsc::UnboundedSender<(u64, linera_core::client::TimingType)>>;
#[cfg(web)]
fn timing_sender(
&self,
) -> Option<tokio::sync::mpsc::UnboundedSender<(u64, linera_core::client::TimingType)>> {
None
}
fn make_chain_client(
&self,
chain_id: ChainId,
) -> impl Future<Output = Result<ChainClient<Self::Environment>, Error>> {
async move {
let chain = self
.wallet()
.get(chain_id)
.make_sync()
.await
.map_err(error::Inner::wallet)?
.unwrap_or_default();
Ok(self.client().create_chain_client(
chain_id,
chain.block_hash,
chain.next_block_height,
chain.pending_proposal,
chain.owner,
self.timing_sender(),
chain.follow_only,
))
}
}
async fn update_wallet_for_new_chain(
&mut self,
chain_id: ChainId,
owner: Option<AccountOwner>,
timestamp: Timestamp,
epoch: Epoch,
) -> Result<(), Error>;
async fn update_wallet(&mut self, client: &ContextChainClient<Self>) -> Result<(), Error>;
}
#[allow(async_fn_in_trait)]
pub trait ClientContextExt: ClientContext {
async fn clients(&self) -> Result<Vec<ContextChainClient<Self>>, Error> {
use futures::stream::TryStreamExt as _;
self.wallet()
.chain_ids()
.map_err(|e| error::Inner::wallet(e).into())
.and_then(|chain_id| self.make_chain_client(chain_id))
.try_collect()
.await
}
}
impl<T: ClientContext> ClientContextExt for T {}
/// A chain client together with the stream of notifications from the local node.
///
/// A background task listens to the validators and updates the local node, so any updates to
/// this chain will trigger a notification. The background task is terminated when this gets
/// dropped.
struct ListeningClient<C: ClientContext> {
/// The chain client.
client: ContextChainClient<C>,
/// The abort handle for the task that listens to the validators.
abort_handle: AbortOnDrop,
/// The listening task's join handle.
join_handle: NonBlockingFuture<()>,
/// The stream of notifications from the local node.
notification_stream: Arc<Mutex<NotificationStream>>,
/// This is only `< u64::MAX` when the client is waiting for a timeout to process the inbox.
timeout: Timestamp,
/// The mode of listening to this chain.
listening_mode: ListeningMode,
}
impl<C: ClientContext> ListeningClient<C> {
fn new(
client: ContextChainClient<C>,
abort_handle: AbortOnDrop,
join_handle: NonBlockingFuture<()>,
notification_stream: NotificationStream,
listening_mode: ListeningMode,
) -> Self {
Self {
client,
abort_handle,
join_handle,
#[allow(clippy::arc_with_non_send_sync)] // Only `Send` with `futures-util/alloc`.
notification_stream: Arc::new(Mutex::new(notification_stream)),
timeout: Timestamp::from(u64::MAX),
listening_mode,
}
}
async fn stop(self) {
// TODO(#4965): this is unnecessary: the join handle now also acts as an abort handle
drop(self.abort_handle);
self.join_handle.await;
}
}
/// Commands to the chain listener.
pub enum ListenerCommand {
/// Command: start listening to the given chains, using specified listening modes.
Listen(BTreeMap<ChainId, ListeningMode>),
/// Command: stop listening to the given chains.
StopListening(BTreeSet<ChainId>),
}
/// A `ChainListener` is a process that listens to notifications from validators and reacts
/// appropriately.
pub struct ChainListener<C: ClientContext> {
context: Arc<Mutex<C>>,
storage: <C::Environment as Environment>::Storage,
config: Arc<ChainListenerConfig>,
listening: BTreeMap<ChainId, ListeningClient<C>>,
/// Map from publishing chain to subscriber chains.
/// Events emitted on the _publishing chain_ are of interest to the _subscriber chains_.
event_subscribers: BTreeMap<ChainId, BTreeSet<ChainId>>,
cancellation_token: CancellationToken,
/// The channel through which the listener can receive commands.
command_receiver: UnboundedReceiver<ListenerCommand>,
}
impl<C: ClientContext + 'static> ChainListener<C> {
/// Creates a new chain listener given client chains.
pub fn new(
config: ChainListenerConfig,
context: Arc<Mutex<C>>,
storage: <C::Environment as Environment>::Storage,
cancellation_token: CancellationToken,
command_receiver: UnboundedReceiver<ListenerCommand>,
) -> Self {
Self {
storage,
context,
config: Arc::new(config),
listening: Default::default(),
event_subscribers: Default::default(),
cancellation_token,
command_receiver,
}
}
/// Runs the chain listener.
#[instrument(skip(self))]
pub async fn run(
mut self,
enable_background_sync: bool,
) -> Result<impl Future<Output = Result<(), Error>>, Error> {
let chain_ids = {
let guard = self.context.lock().await;
let admin_chain_id = guard.admin_chain();
guard
.make_chain_client(admin_chain_id)
.await?
.synchronize_chain_state(admin_chain_id)
.await?;
let mut chain_ids: BTreeMap<_, _> = guard
.wallet()
.items()
.collect::<Vec<_>>()
.await
.into_iter()
.map(|result| {
let (chain_id, chain) = result?;
let mode = if chain.follow_only {
ListeningMode::FollowChain
} else {
ListeningMode::FullChain
};
Ok((chain_id, mode))
})
.collect::<Result<BTreeMap<_, _>, _>>()
.map_err(
|e: <<C::Environment as Environment>::Wallet as Wallet>::Error| {
crate::error::Inner::Wallet(Box::new(e) as _)
},
)?;
// If the admin chain is not in the wallet, add it as follow-only since we
// typically don't own it.
chain_ids
.entry(admin_chain_id)
.or_insert(ListeningMode::FollowChain);
chain_ids
};
// Start background tasks to sync received certificates for each chain,
// if enabled.
if enable_background_sync {
let context = Arc::clone(&self.context);
let cancellation_token = self.cancellation_token.clone();
for (chain_id, mode) in chain_ids.iter() {
if mode != &ListeningMode::FullChain {
continue;
}
let context = Arc::clone(&context);
let cancellation_token = cancellation_token.clone();
let chain_id = *chain_id;
linera_base::task::spawn(async move {
if let Err(e) = Self::background_sync_received_certificates(
context,
chain_id,
cancellation_token,
)
.await
{
warn!("Background sync failed for chain {chain_id}: {e}");
}
})
.forget();
}
}
Ok(async {
self.listen_recursively(chain_ids).await?;
loop {
match self.next_action().await? {
Action::ProcessInbox(chain_id) => self.maybe_process_inbox(chain_id).await?,
Action::Notification(notification) => {
self.process_notification(notification).await?
}
Action::Stop => break,
}
}
join_all(self.listening.into_values().map(|client| client.stop())).await;
Ok(())
})
}
/// Processes a notification, updating local chains and validators as needed.
async fn process_notification(&mut self, notification: Notification) -> Result<(), Error> {
Self::sleep(self.config.delay_before_ms).await;
let Some(listening_mode) = self
.listening
.get(¬ification.chain_id)
.map(|listening_client| &listening_client.listening_mode)
else {
warn!(
?notification,
"ChainListener::process_notification: got a notification without listening to the chain"
);
return Ok(());
};
if !listening_mode.is_relevant(¬ification.reason) {
debug!(
reason = ?notification.reason,
"ChainListener: ignoring notification due to listening mode"
);
return Ok(());
}
match ¬ification.reason {
Reason::NewIncomingBundle { .. } => {
self.maybe_process_inbox(notification.chain_id).await?;
}
Reason::NewRound { .. } => {
self.update_validators(¬ification).await?;
}
Reason::NewBlock { hash, .. } => {
self.update_wallet(notification.chain_id).await?;
if matches!(listening_mode, ListeningMode::FullChain) {
self.add_new_chains(*hash).await?;
let publishers = self
.update_event_subscriptions(notification.chain_id)
.await?;
if !publishers.is_empty() {
self.listen_recursively(publishers).await?;
self.maybe_process_inbox(notification.chain_id).await?;
}
self.process_new_events(notification.chain_id).await?;
}
}
Reason::NewEvents { .. } => {
self.process_new_events(notification.chain_id).await?;
}
Reason::BlockExecuted { .. } => {}
}
Self::sleep(self.config.delay_after_ms).await;
Ok(())
}
/// If any new chains were created by the given block, and we have a key pair for them,
/// add them to the wallet and start listening for notifications. (This is not done for
/// fallback owners, as those would have to monitor all chains anyway.)
async fn add_new_chains(&mut self, hash: CryptoHash) -> Result<(), Error> {
let block = self
.storage
.read_confirmed_block(hash)
.await?
.ok_or(chain_client::Error::MissingConfirmedBlock(hash))?
.into_block();
let blobs = block.created_blobs().into_iter();
let new_chains = blobs
.filter_map(|(blob_id, blob)| {
if blob_id.blob_type == BlobType::ChainDescription {
let chain_desc: ChainDescription = bcs::from_bytes(blob.content().bytes())
.expect("ChainDescription should deserialize correctly");
Some((ChainId(blob_id.hash), chain_desc))
} else {
None
}
})
.collect::<Vec<_>>();
if new_chains.is_empty() {
return Ok(());
}
let mut new_ids = BTreeMap::new();
let mut context_guard = self.context.lock().await;
for (new_chain_id, chain_desc) in new_chains {
for chain_owner in chain_desc.config().ownership.all_owners() {
if context_guard
.client()
.signer()
.contains_key(chain_owner)
.await
.map_err(chain_client::Error::signer_failure)?
{
context_guard
.update_wallet_for_new_chain(
new_chain_id,
Some(*chain_owner),
block.header.timestamp,
block.header.epoch,
)
.await?;
new_ids.insert(new_chain_id, ListeningMode::FullChain);
}
}
}
drop(context_guard);
self.listen_recursively(new_ids).await?;
Ok(())
}
/// Processes the inboxes of all chains that are subscribed to `chain_id`.
async fn process_new_events(&mut self, chain_id: ChainId) -> Result<(), Error> {
let Some(subscribers) = self.event_subscribers.get(&chain_id).cloned() else {
return Ok(());
};
for subscriber_id in subscribers {
self.maybe_process_inbox(subscriber_id).await?;
}
Ok(())
}
/// Starts listening for notifications about the given chains, and any chains that publish
/// event streams those chains are subscribed to.
async fn listen_recursively(
&mut self,
mut chain_ids: BTreeMap<ChainId, ListeningMode>,
) -> Result<(), Error> {
while let Some((chain_id, listening_mode)) = chain_ids.pop_first() {
for (new_chain_id, new_listening_mode) in self.listen(chain_id, listening_mode).await? {
match chain_ids.entry(new_chain_id) {
Entry::Vacant(vacant) => {
vacant.insert(new_listening_mode);
}
Entry::Occupied(mut occupied) => {
occupied.get_mut().extend(Some(new_listening_mode));
}
}
}
}
Ok(())
}
/// Background task that syncs received certificates in small batches.
/// This discovers unacknowledged sender blocks gradually without overwhelming the system.
#[instrument(skip(context, cancellation_token))]
async fn background_sync_received_certificates(
context: Arc<Mutex<C>>,
chain_id: ChainId,
cancellation_token: CancellationToken,
) -> Result<(), Error> {
info!("Starting background certificate sync for chain {chain_id}");
let client = context.lock().await.make_chain_client(chain_id).await?;
Ok(client
.find_received_certificates(Some(cancellation_token))
.await?)
}
/// Starts listening for notifications about the given chain.
///
/// Returns all publishing chains, that we also need to listen to.
async fn listen(
&mut self,
chain_id: ChainId,
mut listening_mode: ListeningMode,
) -> Result<BTreeMap<ChainId, ListeningMode>, Error> {
if self
.listening
.get(&chain_id)
.is_some_and(|existing_client| existing_client.listening_mode >= listening_mode)
{
return Ok(BTreeMap::new());
}
listening_mode.extend(
self.listening
.get(&chain_id)
.map(|existing_client| existing_client.listening_mode.clone()),
);
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
let (listener, abort_handle, notification_stream) =
client.listen(listening_mode.clone()).await?;
let join_handle = linera_base::task::spawn(listener.in_current_span());
let listening_client = ListeningClient::new(
client,
abort_handle,
join_handle,
notification_stream,
listening_mode,
);
self.listening.insert(chain_id, listening_client);
let publishing_chains = self.update_event_subscriptions(chain_id).await?;
self.maybe_process_inbox(chain_id).await?;
Ok(publishing_chains)
}
/// Updates the event subscribers map, and returns all publishing chains we need to listen to.
async fn update_event_subscriptions(
&mut self,
chain_id: ChainId,
) -> Result<BTreeMap<ChainId, ListeningMode>, Error> {
let listening_client = self.listening.get_mut(&chain_id).expect("missing client");
if !listening_client.client.is_tracked() {
return Ok(BTreeMap::new());
}
let publishing_chains: BTreeMap<_, _> = listening_client
.client
.event_stream_publishers()
.await?
.into_iter()
.map(|(chain_id, streams)| (chain_id, ListeningMode::EventsOnly(streams)))
.collect();
for publisher_id in publishing_chains.keys() {
self.event_subscribers
.entry(*publisher_id)
.or_default()
.insert(chain_id);
}
Ok(publishing_chains)
}
/// Returns the next notification or timeout to process.
async fn next_action(&mut self) -> Result<Action, Error> {
loop {
let (timeout_chain_id, timeout) = self.next_timeout()?;
let notification_futures = self
.listening
.values_mut()
.map(|client| {
let stream = client.notification_stream.clone();
Box::pin(async move { stream.lock().await.next().await })
})
.collect::<Vec<_>>();
futures::select! {
() = self.cancellation_token.cancelled().fuse() => {
return Ok(Action::Stop);
}
() = self.storage.clock().sleep_until(timeout).fuse() => {
return Ok(Action::ProcessInbox(timeout_chain_id));
}
command = self.command_receiver.recv().then(async |maybe_command| {
if let Some(command) = maybe_command {
command
} else {
std::future::pending().await
}
}).fuse() => {
match command {
ListenerCommand::Listen(new_chains) => {
debug!(?new_chains, "received command to listen to new chains");
self.listen_recursively(new_chains).await?;
}
ListenerCommand::StopListening(chains) => {
debug!(?chains, "received command to stop listening to chains");
for chain_id in chains {
debug!(%chain_id, "stopping the listener for chain");
let Some(listening_client) = self.listening.remove(&chain_id) else {
error!(%chain_id, "attempted to drop a non-existent listener");
continue;
};
listening_client.stop().await;
}
}
}
}
(maybe_notification, index, _) = select_all(notification_futures).fuse() => {
let Some(notification) = maybe_notification else {
let chain_id = *self.listening.keys().nth(index).unwrap();
warn!("Notification stream for {chain_id} closed");
let Some(listening_client) = self.listening.remove(&chain_id) else {
error!(%chain_id, "attempted to drop a non-existent listener");
continue;
};
listening_client.stop().await;
continue;
};
return Ok(Action::Notification(notification));
}
}
}
}
/// Returns the next timeout to process, and the chain to which it applies.
fn next_timeout(&self) -> Result<(ChainId, Timestamp), Error> {
let (chain_id, client) = self
.listening
.iter()
.min_by_key(|(_, client)| client.timeout)
.expect("No chains left to listen to");
Ok((*chain_id, client.timeout))
}
/// Updates the validators about the chain.
async fn update_validators(&self, notification: &Notification) -> Result<(), Error> {
let chain_id = notification.chain_id;
let listening_client = self.listening.get(&chain_id).expect("missing client");
let latest_block = if let Reason::NewBlock { hash, .. } = ¬ification.reason {
listening_client.client.read_certificate(*hash).await.ok()
} else {
None
};
if let Err(error) = listening_client
.client
.update_validators(None, latest_block)
.await
{
warn!(
"Failed to update validators about the local chain after \
receiving {notification:?} with error: {error:?}"
);
}
Ok(())
}
/// Updates the wallet based on the client for this chain.
async fn update_wallet(&self, chain_id: ChainId) -> Result<(), Error> {
let client = &self
.listening
.get(&chain_id)
.expect("missing client")
.client;
self.context.lock().await.update_wallet(client).await?;
Ok(())
}
/// Processes the inbox, unless `skip_process_inbox` is set.
///
/// If no block can be produced because we are not the round leader, a timeout is returned
/// for when to retry; otherwise `u64::MAX` is returned.
///
/// The wallet is persisted with any blocks that processing the inbox added. An error
/// is returned if persisting the wallet fails.
async fn maybe_process_inbox(&mut self, chain_id: ChainId) -> Result<(), Error> {
if self.config.skip_process_inbox {
debug!("Not processing inbox for {chain_id:.8} due to listener configuration");
return Ok(());
}
let listening_client = self.listening.get_mut(&chain_id).expect("missing client");
if !listening_client.client.is_tracked() {
debug!("Not processing inbox for non-tracked chain {chain_id:.8}");
return Ok(());
}
if listening_client.client.preferred_owner().is_none() {
debug!("Not processing inbox for non-owned chain {chain_id:.8}");
return Ok(());
}
debug!("Processing inbox for {chain_id:.8}");
listening_client.timeout = Timestamp::from(u64::MAX);
match listening_client
.client
.process_inbox_without_prepare()
.await
{
Err(chain_client::Error::CannotFindKeyForChain(chain_id)) => {
debug!(%chain_id, "Cannot find key for chain");
}
Err(error) => warn!(%error, "Failed to process inbox."),
Ok((certs, None)) => info!(
%chain_id,
created_block_count = %certs.len(),
"done processing inbox",
),
Ok((certs, Some(new_timeout))) => {
info!(
%chain_id,
created_block_count = %certs.len(),
timeout = %new_timeout,
"waiting for round timeout before continuing to process the inbox",
);
listening_client.timeout = new_timeout.timestamp;
}
}
let mut context_guard = self.context.lock().await;
context_guard
.update_wallet(&listening_client.client)
.await?;
Ok(())
}
/// Sleeps for the given number of milliseconds, if greater than 0.
async fn sleep(delay_ms: u64) {
if delay_ms > 0 {
linera_base::time::timer::sleep(Duration::from_millis(delay_ms)).await;
}
}
}
enum Action {
ProcessInbox(ChainId),
Notification(Notification),
Stop,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/unit_tests/mod.rs | linera-client/src/unit_tests/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod chain_listener;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-client/src/unit_tests/chain_listener.rs | linera-client/src/unit_tests/chain_listener.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{sync::Arc, time::Duration};
use futures::{lock::Mutex, FutureExt as _};
use linera_base::{
crypto::{AccountPublicKey, InMemorySigner},
data_types::{Amount, BlockHeight, Epoch, TimeDelta, Timestamp},
identifiers::{Account, AccountOwner, ChainId},
ownership::{ChainOwnership, TimeoutConfig},
};
use linera_core::{
client::{chain_client, ChainClient, Client},
environment,
test_utils::{MemoryStorageBuilder, StorageBuilder as _, TestBuilder},
wallet,
};
use linera_storage::Storage;
use tokio_util::sync::CancellationToken;
use crate::{
chain_listener::{self, ChainListener, ChainListenerConfig, ClientContext as _},
config::GenesisConfig,
Error,
};
struct ClientContext {
client: Arc<Client<environment::Test>>,
}
impl chain_listener::ClientContext for ClientContext {
type Environment = environment::Test;
fn wallet(&self) -> &environment::TestWallet {
self.client.wallet()
}
fn storage(&self) -> &environment::TestStorage {
self.client.storage_client()
}
fn client(&self) -> &Arc<linera_core::client::Client<Self::Environment>> {
&self.client
}
fn timing_sender(
&self,
) -> Option<tokio::sync::mpsc::UnboundedSender<(u64, linera_core::client::TimingType)>> {
None
}
async fn update_wallet_for_new_chain(
&mut self,
chain_id: ChainId,
owner: Option<AccountOwner>,
timestamp: Timestamp,
epoch: Epoch,
) -> Result<(), Error> {
let _ = self
.wallet()
.try_insert(chain_id, wallet::Chain::new(owner, epoch, timestamp));
Ok(())
}
async fn update_wallet(
&mut self,
client: &ChainClient<environment::Test>,
) -> Result<(), Error> {
let info = client.chain_info().await?;
let client_owner = client.preferred_owner();
let pending_proposal = client.pending_proposal().clone();
let follow_only = client.is_follow_only();
self.wallet().insert(
info.chain_id,
wallet::Chain {
pending_proposal,
owner: client_owner,
follow_only,
..info.as_ref().into()
},
);
Ok(())
}
}
/// Tests that the chain listener, if there is a message in the inbox, will continue requesting
/// timeout certificates until it becomes the leader and can process the inbox.
#[test_log::test(tokio::test)]
async fn test_chain_listener() -> anyhow::Result<()> {
// Create two chains.
let mut signer = InMemorySigner::new(Some(42));
let key_pair = signer.generate_new();
let owner: AccountOwner = key_pair.into();
let config = ChainListenerConfig::default();
let storage_builder = MemoryStorageBuilder::default();
let clock = storage_builder.clock().clone();
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer.clone()).await?;
let client0 = builder.add_root_chain(0, Amount::ONE).await?;
let chain_id0 = client0.chain_id();
let client1 = builder.add_root_chain(1, Amount::ONE).await?;
// Start a chain listener for chain 0 with a new key.
let genesis_config = GenesisConfig::new_testing(&builder);
let admin_id = genesis_config.admin_id();
let storage = builder.make_storage().await?;
let epoch0 = client0.chain_info().await?.epoch;
let epoch1 = client1.chain_info().await?.epoch;
let mut context = ClientContext {
client: Arc::new(Client::new(
environment::Impl {
storage: storage.clone(),
network: builder.make_node_provider(),
signer,
wallet: environment::TestWallet::default(),
},
admin_id,
false,
[chain_id0],
format!("Client node for {:.8}", chain_id0),
Duration::from_secs(30),
Duration::from_secs(1),
chain_client::Options::test_default(),
5_000,
10_000,
linera_core::client::RequestsSchedulerConfig::default(),
)),
};
context
.update_wallet_for_new_chain(chain_id0, Some(owner), clock.current_time(), epoch0)
.await?;
context
.update_wallet_for_new_chain(
client1.chain_id(),
client1.preferred_owner(),
clock.current_time(),
epoch1,
)
.await?;
// Transfer ownership of chain 0 to the chain listener and some other key. The listener will
// be leader in ~10% of the rounds.
let owners = [(owner, 1), (AccountPublicKey::test_key(1).into(), 9)];
let timeout_config = TimeoutConfig {
base_timeout: TimeDelta::from_secs(1),
timeout_increment: TimeDelta::ZERO,
..TimeoutConfig::default()
};
client0
.change_ownership(ChainOwnership::multiple(owners, 0, timeout_config))
.await?;
let context = Arc::new(Mutex::new(context));
let cancellation_token = CancellationToken::new();
let child_token = cancellation_token.child_token();
let chain_listener = ChainListener::new(
config,
context,
storage,
child_token,
tokio::sync::mpsc::unbounded_channel().1,
)
.run(false) // Unit test doesn't need background sync
.await
.unwrap();
let handle = linera_base::task::spawn(async move { chain_listener.await.unwrap() });
// Transfer one token to chain 0. The listener should eventually become leader and receive
// the message.
let recipient0 = Account::chain(chain_id0);
client1
.transfer(AccountOwner::CHAIN, Amount::ONE, recipient0)
.await?;
for i in 0.. {
client0.synchronize_from_validators().boxed().await?;
let balance = client0.local_balance().await?;
if balance == Amount::from_tokens(2) {
break;
}
clock.add(TimeDelta::from_secs(1));
if i == 30 {
panic!("Unexpected local balance: {}", balance);
}
}
cancellation_token.cancel();
handle.await;
Ok(())
}
/// Tests that a follow-only chain listener does NOT process its inbox when receiving messages.
/// We set up a listener with two chains: chain A (follow-only but owned) and chain B (FullChain).
/// The sender sends a message to A first, then to B. Once the listener processes B's inbox
/// (which we can observe), we know it must have also seen A's notification - but A's inbox
/// should remain unprocessed because it's follow-only (not because of missing ownership).
#[test_log::test(tokio::test)]
async fn test_chain_listener_follow_only() -> anyhow::Result<()> {
let signer = InMemorySigner::new(Some(42));
let config = ChainListenerConfig::default();
let storage_builder = MemoryStorageBuilder::default();
let clock = storage_builder.clock().clone();
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer.clone()).await?;
// Create three chains: sender, chain_a (will be follow-only), chain_b (will be FullChain).
let sender = builder.add_root_chain(0, Amount::from_tokens(10)).await?;
let chain_a = builder.add_root_chain(1, Amount::ZERO).await?;
let chain_b = builder.add_root_chain(2, Amount::ZERO).await?;
let chain_a_id = chain_a.chain_id();
let chain_b_id = chain_b.chain_id();
let genesis_config = GenesisConfig::new_testing(&builder);
let admin_id = genesis_config.admin_id();
let storage = builder.make_storage().await?;
let chain_a_info = chain_a.chain_info().await?;
let chain_b_info = chain_b.chain_info().await?;
let context = ClientContext {
client: Arc::new(Client::new(
environment::Impl {
storage: storage.clone(),
network: builder.make_node_provider(),
signer,
wallet: environment::TestWallet::default(),
},
admin_id,
false,
[chain_a_id, chain_b_id],
"Client node with follow-only and owned chains".to_string(),
Duration::from_secs(30),
Duration::from_secs(1),
chain_client::Options::test_default(),
5_000,
10_000,
linera_core::client::RequestsSchedulerConfig::default(),
)),
};
// Add chain A as follow-only. We *do* own it, but follow_only should prevent inbox processing.
context.wallet().insert(
chain_a_id,
wallet::Chain {
owner: chain_a.preferred_owner(),
block_hash: chain_a_info.block_hash,
next_block_height: chain_a_info.next_block_height,
timestamp: clock.current_time(),
pending_proposal: None,
epoch: Some(chain_a_info.epoch),
follow_only: true,
},
);
// Add chain B as FullChain mode.
context.wallet().insert(
chain_b_id,
wallet::Chain {
owner: chain_b.preferred_owner(),
block_hash: chain_b_info.block_hash,
next_block_height: chain_b_info.next_block_height,
timestamp: clock.current_time(),
pending_proposal: None,
epoch: Some(chain_b_info.epoch),
follow_only: false,
},
);
let context = Arc::new(Mutex::new(context));
let cancellation_token = CancellationToken::new();
let child_token = cancellation_token.child_token();
let chain_listener = ChainListener::new(
config,
context.clone(),
storage.clone(),
child_token,
tokio::sync::mpsc::unbounded_channel().1,
)
.run(false) // Unit test doesn't need background sync
.await
.unwrap();
let handle = linera_base::task::spawn(async move { chain_listener.await.unwrap() });
// Send a message to chain A first (follow-only). This notification should be ignored.
sender
.transfer(AccountOwner::CHAIN, Amount::ONE, Account::chain(chain_a_id))
.await?;
// Then send a message to chain B (owned). The listener should process this inbox.
sender
.transfer(AccountOwner::CHAIN, Amount::ONE, Account::chain(chain_b_id))
.await?;
// Wait until chain B processes its inbox. Once this happens, we know the listener
// has seen both notifications (A's came first), but should have only acted on B's.
for i in 0.. {
tokio::task::yield_now().await;
chain_b.synchronize_from_validators().await?;
let chain_b_info = chain_b.chain_info().await?;
// Chain B should have height 1 after processing its inbox.
if chain_b_info.next_block_height >= BlockHeight::from(1) {
break;
}
if i >= 50 {
panic!(
"Chain B's inbox was not processed by the listener. Expected height >= 1, got {}",
chain_b_info.next_block_height
);
}
}
// Now verify that chain A's inbox was NOT processed (follow-only ignores NewIncomingBundle).
chain_a.synchronize_from_validators().await?;
let chain_a_info = chain_a.chain_info().await?;
assert_eq!(
chain_a_info.next_block_height,
BlockHeight::ZERO,
"Follow-only chain A should not have had its inbox processed"
);
// Verify that the listener's wallet still shows chain A at height 0.
let wallet_chain_a = context.lock().await.wallet().get(chain_a_id).unwrap();
assert_eq!(
wallet_chain_a.next_block_height,
BlockHeight::ZERO,
"Wallet should show chain A at height 0"
);
// Now have the original chain_a client process its inbox, creating a block.
chain_a.process_inbox().await?;
// Wait for the chain listener to see the NewBlock notification and update its wallet.
// This verifies that follow-only mode DOES process NewBlock notifications.
for i in 0.. {
tokio::task::yield_now().await;
let wallet_chain_a = context.lock().await.wallet().get(chain_a_id).unwrap();
if wallet_chain_a.next_block_height >= BlockHeight::from(1) {
break;
}
if i >= 50 {
panic!(
"Wallet not updated after chain A created a block. Expected height >= 1, got {}",
wallet_chain_a.next_block_height
);
}
}
// Verify the wallet was updated and follow_only is preserved.
let wallet_chain_a = context.lock().await.wallet().get(chain_a_id).unwrap();
assert!(
wallet_chain_a.follow_only,
"follow_only flag should be preserved in wallet"
);
cancellation_token.cancel();
handle.await;
Ok(())
}
/// Tests that the chain listener always listens to the admin chain.
#[test_log::test(tokio::test)]
async fn test_chain_listener_admin_chain() -> anyhow::Result<()> {
let signer = InMemorySigner::new(Some(42));
let config = ChainListenerConfig::default();
let storage_builder = MemoryStorageBuilder::default();
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer.clone()).await?;
let client0 = builder.add_root_chain(0, Amount::ONE).await?;
let genesis_config = GenesisConfig::new_testing(&builder);
let admin_id = genesis_config.admin_id();
let storage = builder.make_storage().await?;
let context = ClientContext {
client: Arc::new(Client::new(
environment::Impl {
storage: storage.clone(),
network: builder.make_node_provider(),
signer,
wallet: environment::TestWallet::default(),
},
admin_id,
false,
[],
"Client node with no chains".to_string(),
Duration::from_secs(30),
Duration::from_secs(1),
chain_client::Options::test_default(),
5_000,
10_000,
linera_core::client::RequestsSchedulerConfig::default(),
)),
};
let context = Arc::new(Mutex::new(context));
let cancellation_token = CancellationToken::new();
let child_token = cancellation_token.child_token();
let chain_listener = ChainListener::new(
config,
context,
storage.clone(),
child_token,
tokio::sync::mpsc::unbounded_channel().1,
)
.run(false) // Unit test doesn't need background sync
.await
.unwrap();
let handle = linera_base::task::spawn(async move { chain_listener.await.unwrap() });
let committee = builder.initial_committee.clone();
// Stage a committee (this will emit events that the listener should be listening to).
let certificate = client0.stage_new_committee(committee).await?.unwrap();
for i in 0.. {
linera_base::time::timer::sleep(Duration::from_secs(i)).await;
let result = storage.read_certificate(certificate.hash()).await?;
if result.as_ref() == Some(&certificate) {
break;
}
if i == 5 {
panic!("Failed to learn about new block.");
}
}
cancellation_token.cancel();
handle.await;
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-explorer/src/entrypoint.rs | linera-explorer/src/entrypoint.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use serde::Serialize;
use serde_json::Value;
use serde_wasm_bindgen::from_value;
use wasm_bindgen::prelude::*;
use super::js_utils::{getf, js_to_json, setf, SER};
use crate::reqwest_client;
/// Auxiliary recursive function for `forge_arg`.
fn forge_arg_type(arg: &Value, non_null: bool) -> Option<String> {
let deprecated = matches!(arg.get("isDeprecated"), Some(Value::Bool(true)));
if deprecated {
return None;
};
match arg["kind"].as_str() {
Some("SCALAR") => {
if non_null {
Some(arg["_input"].to_string())
} else {
arg.get("_input").map(Value::to_string)
}
}
Some("NON_NULL") => forge_arg_type(&arg["ofType"], true),
Some("LIST") => {
let args = arg["_input"]
.as_array()
.unwrap()
.iter()
.filter_map(|x| forge_arg_type(x, false))
.collect::<Vec<_>>();
Some(format!("[{}]", args.join(", ")))
}
Some("ENUM") => arg["_input"].as_str().map(|x| x.to_string()),
Some("INPUT_OBJECT") => {
let args = arg["inputFields"]
.as_array()
.unwrap()
.iter()
.filter_map(|x| {
let name = x["name"].as_str().unwrap();
forge_arg_type(&x["type"], false).map(|arg| format!("{}: {}", name, arg))
})
.collect::<Vec<_>>();
Some(format!("{{{}}}", args.join(", ")))
}
_ => None,
}
}
/// Forges a query argument.
fn forge_arg(arg: &Value) -> Option<String> {
forge_arg_type(&arg["type"], false).map(|s| {
format!(
"{}: {}",
arg["name"].as_str().expect("name is not a string"),
s
)
})
}
/// Forges query arguments.
fn forge_args(args: Vec<Value>) -> String {
let args = args.iter().filter_map(forge_arg).collect::<Vec<_>>();
if !args.is_empty() {
format!("({})", args.join(","))
} else {
"".to_string()
}
}
/// Auxiliary recursive function for `forge_response`.
fn forge_response_type(output: &Value, name: Option<&str>, root: bool) -> Option<String> {
let is_non_null_or_list = matches!(output["kind"].as_str(), Some("NON_NULL") | Some("LIST"));
let incl = matches!(output.get("_include"), Some(Value::Bool(true)));
let deprecated = matches!(output.get("isDeprecated"), Some(Value::Bool(true)));
if !(incl || root || is_non_null_or_list) || deprecated {
return None;
}
match output["kind"].as_str().unwrap() {
"SCALAR" | "ENUM" => Some(
name.unwrap_or_else(|| output["name"].as_str().unwrap())
.to_string(),
),
"NON_NULL" | "LIST" => forge_response_type(&output["ofType"], name, root),
"OBJECT" => {
let fields = output["fields"]
.as_array()
.unwrap()
.iter()
.filter_map(|elt: &Value| {
forge_response_type(&elt["type"], elt["name"].as_str(), false)
})
.collect::<Vec<_>>();
if root {
Some(format!("{{ {} }}", fields.join(" ")))
} else {
Some(format!(
"{} {{ {} }}",
name.unwrap_or_else(|| output["name"].as_str().unwrap()),
fields.join(" ")
))
}
}
_ => None,
}
}
/// Forges query response.
fn forge_response(output: &Value) -> String {
if empty_response_aux(output) {
"".to_string()
} else {
forge_response_type(output, None, true).unwrap_or("".to_string())
}
}
/// Queries mutations or queries for applications.
#[wasm_bindgen]
pub async fn query(app: JsValue, query: JsValue, kind: String) {
let link =
from_value::<String>(getf(&app, "link")).expect("cannot parse application vue argument");
let fetch_json = js_to_json(&query);
let name = fetch_json["name"].as_str().unwrap();
let args = fetch_json["args"].as_array().unwrap().to_vec();
let args = forge_args(args);
let input = format!("{}{}", name, args);
let response = forge_response(&fetch_json["type"]);
let body =
serde_json::json!({ "query": format!("{} {{{} {}}}", kind, input, response) }).to_string();
let client = reqwest_client();
match client.post(&link).body(body).send().await {
Err(e) => setf(&app, "errors", &JsValue::from_str(&e.to_string())),
Ok(response) => match response.text().await {
Err(e) => setf(&app, "errors", &JsValue::from_str(&e.to_string())),
Ok(response_txt) => match serde_json::from_str::<Value>(&response_txt) {
Ok(res_json) => {
setf(&app, "result", &res_json["data"].serialize(&SER).unwrap());
setf(
&app,
"errors",
&res_json
.get("errors")
.unwrap_or(&Value::Null)
.serialize(&SER)
.unwrap(),
);
}
Err(_) => setf(&app, "errors", &JsValue::from_str(&response_txt)),
},
},
}
}
/// Checks if response fields are not needed.
#[wasm_bindgen]
pub fn empty_response(output: JsValue) -> bool {
let output = js_to_json(&output);
empty_response_aux(&output)
}
/// Auxiliary recursive function for `empty_response`
fn empty_response_aux(output: &Value) -> bool {
match output.get("kind") {
None => true,
Some(s) => match s.as_str() {
Some("SCALAR") => true,
Some("LIST") | Some("NON_NULL") => empty_response_aux(&output["ofType"]),
_ => false,
},
}
}
#[cfg(test)]
mod tests {
use serde_json::json;
#[test]
fn test_forge_response() {
let json = json!(
{ "_include":true, "args":null, "kind":"OBJECT", "name":"OutputObject", "ofType":null, "type":null,
"fields":[
{ "args":[], "name":"field1", "ofType":null,
"type": { "_include":true, "args":null, "isDeprecated":false, "kind":"SCALAR", "name":"String", "ofType":null, "type":null } },
{ "args":[], "name": "field2", "ofType":null,
"type": { "_include":false, "args":null, "isDeprecated":false, "kind":"SCALAR", "name":"Int", "ofType":null, "type":null } },
{ "args":[], "name":"field3", "ofType":null,
"type": { "_include":true, "args":null, "isDeprecated":false, "kind":"OBJECT", "name":"field3Output", "ofType":null, "type":null,
"fields":[
{ "args":[], "isDeprecated":false, "name":"field31", "ofType":null,
"type": { "_include":true, "args":null, "kind":"SCALAR", "name":"Boolean", "ofType":null, "type":null } },
]
}
}
]
}
);
let result = super::forge_response(&json);
assert_eq!(&result, "{ field1 field3 { field31 } }")
}
#[test]
fn test_forge_args() {
let json = json!(
{ "args":null, "name":"arg", "ofType":null,
"type":{ "args":null, "kind":"NON_NULL", "name":null, "type":null,
"ofType":{ "args":null, "kind":"INPUT_OBJECT", "name":"InputObject", "ofType":null, "type":null, "inputFields":[
{ "args":null, "name":"field1", "ofType":null,
"type":{ "_input":[{ "args":null, "kind":"NON_NULL", "name":null,
"ofType":{ "_include":true, "args":null, "kind":"SCALAR", "name":"String", "ofType":null, "type":null, "_input":"foo"} ,"type":null }],
"args":null, "kind":"LIST", "name":null,
"ofType":{ "args":null, "kind":"NON_NULL", "name":null,
"ofType":{ "_include":true, "args":null, "kind":"SCALAR", "name":"String", "ofType":null, "type":null }, "type":null}, "type":null } },
{ "args":null, "name":"field2", "ofType":null,
"type":{ "_include":true, "args":null, "kind":"ENUM", "name":"Enum", "ofType":null, "type":null, "_input":"E2",
"enumValues":[
{ "args":null, "isDeprecated":false, "name":"E1", "ofType":null, "type":null },
{ "args":null, "isDeprecated":false, "name":"E2", "ofType":null,"type":null} ] } },
{ "args":null, "name":"field3", "ofType":null,
"type":{ "args":null, "kind":"INPUT_OBJECT","name":"InputObject0","ofType":null,"type":null,
"inputFields":[
{ "args":null, "name":"field31",
"ofType":null,"type":{ "_include":true, "args":null, "kind":"SCALAR", "name":"Boolean", "ofType":null,"type":null,"_input":true} },
{ "args":null, "name":"field32", "ofType":null, "type":{ "_include":true, "args":null, "kind":"SCALAR", "name":"Int", "ofType":null, "type":null, "_input":42 } }]
} }
] }
}
}
);
let result = super::forge_args(vec![json]);
assert_eq!(
&result,
"(arg: {field1: [\"foo\"], field2: E2, field3: {field31: true, field32: 42}})"
)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-explorer/src/lib.rs | linera-explorer/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module provides web files to run a block explorer from Linera service node and Linera indexer.
#![recursion_limit = "256"]
mod entrypoint;
mod graphql;
mod input_type;
mod js_utils;
use std::str::FromStr;
use anyhow::{anyhow, Context as _, Result};
use futures::prelude::*;
use gql_service::{
applications::{self, ApplicationsApplications as Application},
block::{self, BlockBlock as Block},
blocks::{self, BlocksBlocks as Blocks},
chain::{self, ChainChain as Chain},
chains, notifications, request, Chains, Reason,
};
use graphql_client::Response;
use js_utils::{getf, log_str, parse, setf, stringify, SER};
use linera_base::{crypto::CryptoHash, data_types::BlockHeight, identifiers::ChainId};
use linera_indexer_graphql_client::{
indexer::{plugins, Plugins},
operations as gql_operations,
operations::{
get_operation,
get_operation::{GetOperationOperation as Operation, OperationKeyKind},
operations,
operations::{OperationKeyKind as OperationsKeyKind, OperationsOperations as Operations},
OperationKey,
},
};
use linera_service_graphql_client as gql_service;
use once_cell::sync::OnceCell;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use serde_wasm_bindgen::from_value;
use url::Url;
use uuid::Uuid;
use wasm_bindgen::prelude::*;
use wasm_bindgen_futures::spawn_local;
use ws_stream_wasm::*;
static WEBSOCKET: OnceCell<WsMeta> = OnceCell::new();
pub(crate) fn reqwest_client() -> reqwest::Client {
// timeouts cannot be enforced when compiling to wasm-js.
reqwest::ClientBuilder::new().build().unwrap()
}
/// Page enum containing info for each page.
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "lowercase")]
enum Page {
Unloaded,
Home {
chain: Box<Chain>,
blocks: Vec<Blocks>,
apps: Vec<Application>,
},
Blocks(Vec<Blocks>),
Block(Box<Block>),
Applications(Vec<Application>),
Application {
app: Box<Application>,
queries: Value,
mutations: Value,
subscriptions: Value,
},
Operations(Vec<Operations>),
Operation(Operation),
Plugin {
name: String,
link: String,
queries: Value,
},
Error(String),
}
/// Config type dealt with localstorage.
#[wasm_bindgen]
#[derive(Serialize, Deserialize, Clone)]
pub struct Config {
indexer: String,
node: String,
tls: bool,
}
impl Config {
/// Loads config from local storage.
fn load() -> Self {
let default = Config {
indexer: "localhost:8081".to_string(),
node: "localhost:8080".to_string(),
tls: false,
};
// Return default if window doesn't exist (e.g., in test environment).
let Some(window) = web_sys::window() else {
return default;
};
match window.local_storage() {
Ok(Some(st)) => match st.get_item("config") {
Ok(Some(s)) => serde_json::from_str::<Config>(&s).unwrap_or(default),
_ => default,
},
_ => default,
}
}
}
/// type for Vue data.
#[derive(Serialize, Deserialize, Clone)]
pub struct Data {
config: Config,
page: Page,
chains: Vec<ChainId>,
chain: ChainId,
plugins: Vec<String>,
}
/// Initializes Vue data.
#[wasm_bindgen]
pub fn data() -> JsValue {
let data = Data {
config: Config::load(),
page: Page::Unloaded,
chains: Vec::new(),
chain: ChainId::from_str(
"0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap(),
plugins: Vec::new(),
};
data.serialize(&SER).unwrap()
}
/// GraphQL query type (for subscriptions).
#[derive(Serialize, Deserialize)]
pub struct GQuery<T> {
id: Option<String>,
#[serde(rename = "type")]
typ: String,
payload: Option<T>,
}
pub enum Protocol {
Http,
Websocket,
}
pub enum AddressKind {
Node,
Indexer,
}
fn url(config: &Config, protocol: Protocol, kind: AddressKind) -> String {
let protocol = match protocol {
Protocol::Http => "http",
Protocol::Websocket => "ws",
};
let tls = if config.tls { "s" } else { "" };
let address = match kind {
AddressKind::Node => &config.node,
AddressKind::Indexer => &config.indexer,
};
format!("{}{}://{}", protocol, tls, address)
}
async fn get_chain(node: &str, chain_id: ChainId) -> Result<Box<Chain>> {
let client = reqwest::Client::new();
let variables = chain::Variables {
chain_id,
inboxes_input: None,
outboxes_input: None,
};
let chain = request::<gql_service::Chain, _>(&client, node, variables)
.await?
.chain;
log_str(&serde_json::to_string_pretty(&chain).unwrap());
Ok(Box::new(chain))
}
async fn get_blocks(
node: &str,
chain_id: ChainId,
from: Option<CryptoHash>,
limit: Option<u32>,
) -> Result<Vec<Blocks>> {
let client = reqwest_client();
let variables = blocks::Variables {
from,
chain_id,
limit: limit.map(|x| x.into()),
};
Ok(request::<gql_service::Blocks, _>(&client, node, variables)
.await?
.blocks)
}
async fn get_applications(node: &str, chain_id: ChainId) -> Result<Vec<Application>> {
let client = reqwest_client();
let variables = applications::Variables { chain_id };
Ok(
request::<gql_service::Applications, _>(&client, node, variables)
.await?
.applications,
)
}
async fn get_operations(indexer: &str, chain_id: ChainId) -> Result<Vec<Operations>> {
let client = reqwest_client();
let operations_indexer = format!("{}/operations", indexer);
let variables = operations::Variables {
from: OperationsKeyKind::Last(chain_id),
limit: None,
};
Ok(
request::<gql_operations::Operations, _>(&client, &operations_indexer, variables)
.await?
.operations,
)
}
/// Returns the error page.
fn error(error: &anyhow::Error) -> (Page, String) {
(Page::Error(error.to_string()), "/error".to_string())
}
/// Returns the home page.
async fn home(node: &str, chain_id: ChainId) -> Result<(Page, String)> {
let chain = get_chain(node, chain_id).await?;
let blocks = get_blocks(node, chain_id, None, None).await?;
let apps = get_applications(node, chain_id).await?;
Ok((
Page::Home {
chain,
blocks,
apps,
},
format!("/?chain={}", chain_id),
))
}
/// Returns the blocks page.
async fn blocks(
node: &str,
chain_id: ChainId,
from: Option<CryptoHash>,
limit: Option<u32>,
) -> Result<(Page, String)> {
// TODO: limit is not used in the UI, it should be implemented with some path arguments and select input
let blocks = get_blocks(node, chain_id, from, limit).await?;
Ok((Page::Blocks(blocks), format!("/blocks?chain={}", chain_id)))
}
/// Returns the block page.
async fn block(node: &str, chain_id: ChainId, hash: Option<CryptoHash>) -> Result<(Page, String)> {
let client = reqwest_client();
let variables = block::Variables { hash, chain_id };
let block = request::<gql_service::Block, _>(&client, node, variables)
.await?
.block
.context("no block found")?;
let hash = block.hash;
Ok((
Page::Block(Box::new(block)),
format!("/block/{}?chain={}", hash, chain_id),
))
}
/// Queries wallet chains.
async fn chains(app: &JsValue, node: &str) -> Result<ChainId> {
let client = reqwest_client();
let variables = chains::Variables;
let chains = request::<Chains, _>(&client, node, variables).await?.chains;
let chains_js = chains
.list
.serialize(&SER)
.expect("failed to serialize ChainIds");
setf(app, "chains", &chains_js);
chains
.default
.or_else(|| chains.list.first().copied())
.ok_or_else(|| anyhow::Error::msg("no chains available"))
}
/// Queries indexer plugins.
async fn plugins(app: &JsValue, indexer: &str) {
let client = reqwest_client();
let Ok(data) = request::<Plugins, _>(&client, indexer, plugins::Variables).await else {
return;
};
let plugins_js = data
.plugins
.serialize(&SER)
.expect("failed to serialize plugins");
setf(app, "plugins", &plugins_js)
}
/// Returns the applications page.
async fn applications(node: &str, chain_id: ChainId) -> Result<(Page, String)> {
let applications = get_applications(node, chain_id).await?;
Ok((
Page::Applications(applications),
format!("/applications?chain={}", chain_id),
))
}
/// Returns the operations page.
async fn operations(indexer: &str, chain_id: ChainId) -> Result<(Page, String)> {
let operations = get_operations(indexer, chain_id).await?;
Ok((
Page::Operations(operations),
format!("/operations?chain={}", chain_id),
))
}
/// Returns the operation page.
async fn operation(
indexer: &str,
key: Option<OperationKey>,
chain_id: ChainId,
) -> Result<(Page, String)> {
let client = reqwest_client();
let operations_indexer = format!("{}/operations", indexer);
let key = match key {
Some(key) => OperationKeyKind::Key(key),
None => OperationKeyKind::Last(chain_id),
};
let variables = get_operation::Variables { key };
let operation =
request::<gql_operations::GetOperation, _>(&client, &operations_indexer, variables)
.await?
.operation
.context("no operation found")?;
Ok((
Page::Operation(operation.clone()),
format!(
"/operation?chain={}&height={}&index={}",
chain_id, operation.key.height, operation.key.index
),
))
}
/// Lists entrypoints for GraphQL queries, mutations or subscriptions.
fn list_entrypoints(types: &[Value], name: &Value) -> Option<Value> {
types
.iter()
.find(|x: &&Value| &x["name"] == name)
.map(|x| x["fields"].clone())
}
/// Fills recursively GraphQL objects with their type definitions.
fn fill_type(element: &Value, types: &Vec<Value>) -> Value {
match element {
Value::Array(array) => Value::Array(
array
.iter()
.map(|elt: &Value| fill_type(elt, types))
.collect(),
),
Value::Object(object) => {
let mut object = object.clone();
let name = element["name"].as_str();
let kind = element["kind"].as_str();
let of_type = &element["ofType"];
match (kind, name, of_type) {
(Some("OBJECT"), Some(name), _) => {
match types.iter().find(|elt: &&Value| elt["name"] == name) {
None => (),
Some(element_definition) => {
let fields = element_definition["fields"]
.as_array()
.unwrap()
.iter()
.map(|elt| fill_type(elt, types))
.collect::<Vec<_>>();
object.insert("fields".to_string(), Value::Array(fields));
}
}
}
(Some("INPUT_OBJECT"), Some(name), _) => {
match types.iter().find(|elt: &&Value| elt["name"] == name) {
None => (),
Some(element_definition) => {
let fields = element_definition["inputFields"]
.as_array()
.unwrap()
.iter()
.map(|elt| fill_type(elt, types))
.collect::<Vec<_>>();
object.insert("inputFields".to_string(), Value::Array(fields));
}
}
}
(Some("ENUM"), Some(name), _) => {
match types.iter().find(|elt: &&Value| elt["name"] == name) {
None => (),
Some(element_definition) => {
let values = element_definition["enumValues"]
.as_array()
.unwrap()
.iter()
.map(|elt| fill_type(elt, types))
.collect::<Vec<_>>();
object.insert("enumValues".to_string(), Value::Array(values));
}
}
}
(Some("LIST" | "NON_NULL"), Some(name), Value::Null) => {
match types.iter().find(|elt: &&Value| elt["name"] == name) {
None => (),
Some(element_definition) => {
object
.insert("ofType".to_string(), fill_type(element_definition, types));
}
}
}
_ => (),
};
object.insert("ofType".to_string(), fill_type(&element["ofType"], types));
object.insert("type".to_string(), fill_type(&element["type"], types));
object.insert("args".to_string(), fill_type(&element["args"], types));
if let Some("LIST") = kind {
object.insert("_input".to_string(), Value::Array(Vec::new()));
}
if let Some("SCALAR" | "ENUM" | "OBJECT") = kind {
object.insert("_include".to_string(), Value::Bool(true));
}
Value::Object(object)
}
elt => elt.clone(),
}
}
/// Returns the application page.
async fn application(app: Application) -> Result<(Page, String)> {
let schema = graphql::introspection(&app.link).await?;
let sch = &schema["data"]["__schema"];
let types = sch["types"]
.as_array()
.expect("introspection types is not an array")
.clone();
let queries =
list_entrypoints(&types, &sch["queryType"]["name"]).unwrap_or(Value::Array(Vec::new()));
let queries = fill_type(&queries, &types);
let mutations =
list_entrypoints(&types, &sch["mutationType"]["name"]).unwrap_or(Value::Array(Vec::new()));
let mutations = fill_type(&mutations, &types);
let subscriptions = list_entrypoints(&types, &sch["subscriptionType"]["name"])
.unwrap_or(Value::Array(Vec::new()));
let subscriptions = fill_type(&subscriptions, &types);
let pathname = format!("/application/{}", app.id.as_str());
Ok((
Page::Application {
app: Box::new(app),
queries,
mutations,
subscriptions,
},
pathname,
))
}
/// Returns the plugin page.
async fn plugin(plugin: &str, indexer: &str) -> Result<(Page, String)> {
let link = format!("{}/{}", indexer, plugin);
let schema = graphql::introspection(&link).await?;
let sch = &schema["data"]["__schema"];
let types = sch["types"]
.as_array()
.expect("introspection types is not an array")
.clone();
let queries =
list_entrypoints(&types, &sch["queryType"]["name"]).unwrap_or(Value::Array(Vec::new()));
let queries = fill_type(&queries, &types);
let pathname = format!("/plugin?plugin={}", plugin);
Ok((
Page::Plugin {
name: plugin.to_string(),
link,
queries,
},
pathname,
))
}
fn format_bytes(value: &JsValue) -> JsValue {
let modified_value = value.clone();
if let Some(object) = js_sys::Object::try_from(value) {
js_sys::Object::keys(object)
.iter()
.for_each(|k: JsValue| match k.as_string() {
None => (),
Some(key_str) => {
if &key_str == "bytes" {
let array: Vec<u8> =
js_sys::Uint8Array::from(getf(&modified_value, "bytes")).to_vec();
let array_hex = hex::encode(array);
let hex_len = array_hex.len();
let hex_elided = if hex_len > 128 {
// don't show all hex digits if the bytes array is too long
format!("{}..{}", &array_hex[0..4], &array_hex[hex_len - 4..])
} else {
array_hex
};
setf(&modified_value, "bytes", &JsValue::from_str(&hex_elided))
} else {
setf(
&modified_value,
&key_str,
&format_bytes(&getf(&modified_value, &key_str)),
)
}
}
});
};
modified_value
}
fn page_name_and_args(page: &Page) -> (&str, Vec<(String, String)>) {
match page {
Page::Unloaded | Page::Home { .. } => ("", Vec::new()),
Page::Block(b) => ("block", vec![("block".to_string(), b.hash.to_string())]),
Page::Blocks { .. } => ("blocks", Vec::new()),
Page::Applications(_) => ("applications", Vec::new()),
Page::Application { app, .. } => (
"application",
vec![("app".to_string(), stringify(&app.serialize(&SER).unwrap()))],
),
Page::Operations(_) => ("operations", Vec::new()),
Page::Operation(op) => (
"operation",
vec![
("height".to_string(), op.key.height.to_string()),
("index".to_string(), op.key.index.to_string()),
],
),
Page::Plugin { name, .. } => ("plugin", vec![("plugin".to_string(), name.to_string())]),
Page::Error(_) => ("error", Vec::new()),
}
}
fn find_arg(args: &[(String, String)], key: &str) -> Option<String> {
args.iter()
.find_map(|(k, v)| if k == key { Some(v.clone()) } else { None })
}
fn find_arg_map<T, F, E>(args: &[(String, String)], key: &str, f: F) -> Result<Option<T>>
where
F: FnOnce(&str) -> Result<T, E>,
E: std::error::Error + Send + Sync + 'static,
{
match args
.iter()
.find_map(|(k, v)| if k == key { Some(v) } else { None })
{
None => Ok(None),
Some(v) => Ok(Some(f(v)?)),
}
}
fn chain_id_from_args(
app: &JsValue,
data: &Data,
args: &[(String, String)],
init: bool,
) -> Result<(ChainId, bool)> {
match find_arg(args, "chain") {
None => Ok((data.chain, init)),
Some(chain_id) => {
let chain_js: JsValue = chain_id
.serialize(&SER)
.expect("failed to serialize ChainId");
setf(app, "chain", &chain_js);
Ok(ChainId::from_str(&chain_id).map(|id| (id, id != data.chain || init))?)
}
}
}
async fn page(
page_name: &str,
node: &str,
indexer: &str,
chain_id: ChainId,
args: &[(String, String)],
) -> Result<(Page, String)> {
match page_name {
"" => home(node, chain_id).await,
"block" => {
let hash = find_arg_map(args, "block", CryptoHash::from_str)?;
block(node, chain_id, hash).await
}
"blocks" => blocks(node, chain_id, None, Some(20)).await,
"applications" => applications(node, chain_id).await,
"application" => {
let app_arg = find_arg(args, "app").context("unknown application")?;
let app =
from_value::<Application>(parse(&app_arg)).expect("cannot parse applications");
application(app).await
}
"operation" => {
let height = find_arg_map(args, "height", BlockHeight::from_str)?;
let index = find_arg_map(args, "index", usize::from_str)?;
match (height, index) {
(None, _) | (_, None) => operation(indexer, None, chain_id).await,
(Some(height), Some(index)) => {
let key = OperationKey {
chain_id,
height,
index,
};
operation(indexer, Some(key), chain_id).await
}
}
}
"operations" => operations(indexer, chain_id).await,
"plugin" => {
let name = find_arg(args, "plugin").context("unknown plugin")?;
plugin(&name, indexer).await
}
"error" => {
let msg = find_arg(args, "msg").unwrap_or("unknown error".to_string());
Err(anyhow::Error::msg(msg))
}
_ => Err(anyhow!("unknown page")),
}
}
/// Main function to switch between Vue pages.
async fn route_aux(
app: &JsValue,
data: &Data,
path: &Option<String>,
args: &[(String, String)],
init: bool,
) {
let chain_info = chain_id_from_args(app, data, args, init);
let (page_name, args): (&str, Vec<(String, String)>) = match (path, &data.page) {
(Some(p), _) => (p, args.to_vec()),
(_, p) => page_name_and_args(p),
};
let node = url(&data.config, Protocol::Http, AddressKind::Node);
let indexer = url(&data.config, Protocol::Http, AddressKind::Indexer);
let result = match chain_info {
Err(e) => Err(e),
Ok((chain_id, chain_changed)) => {
let page_result = page(page_name, &node, &indexer, chain_id, &args).await;
if chain_changed {
if let Some(ws) = WEBSOCKET.get() {
let _ = ws.close().await;
}
let address = url(&data.config, Protocol::Websocket, AddressKind::Node);
subscribe_chain(app, &address, chain_id).await;
};
page_result
}
};
let (page, new_path) = result.unwrap_or_else(|e| error(&e));
let page_js = format_bytes(&page.serialize(&SER).unwrap());
setf(app, "page", &page_js);
web_sys::window()
.expect("window object not found")
.history()
.expect("history object not found")
.push_state_with_url(&page_js, &new_path, Some(&new_path))
.expect("push_state failed");
}
#[wasm_bindgen]
pub async fn route(app: JsValue, path: JsValue, args: JsValue) {
let path = path.as_string();
let args = from_value::<Vec<(String, String)>>(args).unwrap_or_default();
let msg = format!("route: {} {:?}", path.as_deref().unwrap_or("none"), args);
log_str(&msg);
let data = from_value::<Data>(app.clone()).expect("cannot parse Vue data");
route_aux(&app, &data, &path, &args, false).await
}
#[wasm_bindgen]
pub fn short_crypto_hash(s: String) -> String {
let hash = CryptoHash::from_str(&s).expect("not a crypto hash");
format!("{:?}", hash)
}
#[wasm_bindgen]
pub fn short_app_id(s: String) -> String {
format!("{}..{}..{}..", &s[..4], &s[64..68], &s[152..156])
}
fn set_onpopstate(app: JsValue) {
let callback = Closure::<dyn FnMut(JsValue)>::new(move |v: JsValue| {
setf(&app, "page", &getf(&v, "state"));
});
web_sys::window()
.expect("window object not found")
.set_onpopstate(Some(callback.as_ref().unchecked_ref()));
callback.forget()
}
/// Subscribes to notifications for one chain
async fn subscribe_chain(app: &JsValue, address: &str, chain: ChainId) {
let (ws, mut wsio) = WsMeta::connect(
&format!("{}/ws", address),
Some(vec!["graphql-transport-ws"]),
)
.await
.expect("cannot connect to websocket");
wsio.send(WsMessage::Text(
"{\"type\": \"connection_init\", \"payload\": {}}".to_string(),
))
.await
.expect("cannot send to websocket");
wsio.next().await;
let uuid = Uuid::new_v3(&Uuid::NAMESPACE_DNS, b"linera.dev");
let payload_query = format!(
r#"subscription {{ notifications(chainId: \"{}\") }}"#,
chain
);
let query = format!(
r#"{{ "id": "{}", "type": "subscribe", "payload": {{"query": "{}"}} }}"#,
uuid, payload_query
);
wsio.send(WsMessage::Text(query))
.await
.expect("cannot send to websocket");
let app = app.clone();
spawn_local(async move {
while let Some(evt) = wsio.next().await {
match evt {
WsMessage::Text(message) => {
let graphql_message = serde_json::from_str::<
GQuery<Response<notifications::ResponseData>>,
>(&message)
.expect("unexpected websocket response");
if let Some(payload) = graphql_message.payload {
if let Some(message_data) = payload.data {
let data =
from_value::<Data>(app.clone()).expect("cannot parse vue data");
if let Reason::NewBlock { .. } = message_data.notifications.reason {
if message_data.notifications.chain_id == chain {
route_aux(&app, &data, &None, &Vec::new(), false).await
}
}
}
if let Some(errors) = payload.errors {
errors.iter().for_each(|e| log_str(&e.to_string()));
break;
}
};
}
WsMessage::Binary(_) => (),
}
}
});
let _ = WEBSOCKET.set(ws);
}
/// Initializes pages and subscribes to notifications.
#[wasm_bindgen]
pub async fn start(app: JsValue) {
console_error_panic_hook::set_once();
set_onpopstate(app.clone());
let data = from_value::<Data>(app.clone()).expect("cannot parse vue data");
let address = url(&data.config, Protocol::Http, AddressKind::Node);
let default_chain = chains(&app, &address).await;
match default_chain {
Err(e) => {
route_aux(
&app,
&data,
&Some("error".to_string()),
&[("msg".to_string(), e.to_string())],
true,
)
.await
}
Ok(default_chain) => {
let indexer = url(&data.config, Protocol::Http, AddressKind::Indexer);
let _ = plugins(&app, &indexer).await;
let uri = web_sys::window()
.expect("window object not found")
.location()
.href()
.unwrap();
let uri = Url::parse(&uri).expect("failed to parse url");
let pathname = uri.path();
let mut args = uri.query_pairs().into_owned().collect::<Vec<_>>();
args.push(("chain".to_string(), default_chain.to_string()));
let path = match pathname {
"/blocks" => Some("blocks".to_string()),
"/applications" => Some("applications".to_string()),
"/operations" => Some("operations".to_string()),
"/operation" => Some("operation".to_string()),
"/plugin" => Some("plugin".to_string()),
pathname => match (
pathname.strip_prefix("/block/"),
pathname.strip_prefix("/application/"),
) {
(Some(hash), _) => {
args.push(("block".to_string(), hash.to_string()));
Some("block".to_string())
}
(_, Some(app_id)) => {
let link = format!("{}/applications/{}", address, app_id);
let app =
serde_json::json!({"id": app_id, "link": link, "description": ""})
.to_string();
args.push(("app".to_string(), app));
Some("application".to_string())
}
_ => None,
},
};
route_aux(&app, &data, &path, &args, true).await;
}
}
}
/// Saves config to local storage.
#[wasm_bindgen]
pub fn save_config(app: JsValue) {
let data = from_value::<Data>(app).expect("cannot parse vue data");
if let Ok(Some(storage)) = web_sys::window()
.expect("window object not found")
.local_storage()
{
storage
.set_item(
"config",
&serde_json::to_string::<Config>(&data.config)
.expect("cannot parse localstorage config"),
)
.expect("cannot set config");
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-explorer/src/input_type.rs | linera-explorer/src/input_type.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use wasm_bindgen::prelude::*;
use super::js_utils::{getf, unproxy};
/// Adds an input line for lists.
#[wasm_bindgen]
pub fn append_input(component: JsValue) {
let element = getf(&component, "elt");
let input: js_sys::Array = getf(&element, "_input").into();
let child = unproxy(&getf(&element, "ofType"));
input.splice(input.length(), 0, &child);
}
/// Removes an input line.
#[wasm_bindgen]
pub fn remove_input(component: JsValue, index: u32) {
let element = getf(&component, "elt");
let input: js_sys::Array = getf(&element, "_input").into();
input.splice(index, 1, &JsValue::undefined());
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-explorer/src/js_utils.rs | linera-explorer/src/js_utils.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
// use serde::Serialize;
use serde_wasm_bindgen::Serializer;
use wasm_bindgen::prelude::*;
/// JS special serializer
pub(crate) const SER: Serializer =
serde_wasm_bindgen::Serializer::new().serialize_large_number_types_as_bigints(true);
pub fn setf(target: &JsValue, field: &str, value: &JsValue) {
js_sys::Reflect::set(target, &JsValue::from_str(field), value)
.unwrap_or_else(|_| panic!("failed to set JS field '{}'", field));
}
pub fn getf(target: &JsValue, field: &str) -> JsValue {
js_sys::Reflect::get(target, &JsValue::from_str(field))
.unwrap_or_else(|_| panic!("failed to get JS field '{}'", field))
}
pub fn log(x: &JsValue) {
web_sys::console::log_1(x)
}
pub fn log_str(s: &str) {
log(&JsValue::from_str(s))
}
pub fn parse(x: &str) -> JsValue {
js_sys::JSON::parse(x).expect("failed to parse JSON")
}
pub fn stringify(x: &JsValue) -> String {
js_sys::JSON::stringify(x)
.expect("failed to stringify JSON")
.into()
}
pub fn js_to_json(x: &JsValue) -> serde_json::Value {
serde_json::from_str::<serde_json::Value>(&stringify(x)).expect("failed to convert JS to JSON")
}
pub fn unproxy(x: &JsValue) -> JsValue {
parse(&stringify(x))
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-explorer/src/graphql.rs | linera-explorer/src/graphql.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use serde_json::Value;
use crate::reqwest_client;
pub async fn introspection(url: &str) -> Result<Value> {
let client = reqwest_client();
let graphql_query =
"query { \
__schema { \
queryType { name } \
mutationType { name } \
subscriptionType { name } \
types { ...FullType } \
directives { name description locations args { ...InputValue } } } } \
fragment FullType on __Type { \
kind name description \
fields(includeDeprecated:true) { \
name description \
args { ...InputValue } \
type{ ...TypeRef } \
isDeprecated deprecationReason } \
inputFields { ...InputValue } \
interfaces { ...TypeRef } \
enumValues(includeDeprecated:true) { name description isDeprecated deprecationReason } \
possibleTypes { ...TypeRef } } \
fragment InputValue on __InputValue { \
name description \
type { ...TypeRef } \
defaultValue } \
fragment TypeRef on __Type { \
kind name \
ofType { kind name ofType { kind name ofType { kind name ofType { kind name ofType { kind name ofType { kind name ofType {kind name} } } } } } } }";
let res = client
.post(url)
.body(format!("{{\"query\":\"{}\"}}", graphql_query))
.send()
.await?
.text()
.await?;
Ok(serde_json::from_str::<Value>(&res)?)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/build.rs | linera-core/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
web: { all(target_arch = "wasm32", feature = "web") },
with_testing: { any(test, feature = "test") },
with_metrics: { all(not(target_arch = "wasm32"), feature = "metrics") },
// the old version of `getrandom` we pin here is available on all targets, but
// using it will panic if no suitable source of entropy is found
with_getrandom: { any(web, not(target_arch = "wasm32")) },
};
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/node.rs | linera-core/src/node.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#[cfg(not(web))]
use futures::stream::BoxStream;
#[cfg(web)]
use futures::stream::LocalBoxStream as BoxStream;
use futures::stream::Stream;
use linera_base::{
crypto::{CryptoError, CryptoHash, ValidatorPublicKey},
data_types::{
ArithmeticError, Blob, BlobContent, BlockHeight, NetworkDescription, Round, Timestamp,
},
identifiers::{BlobId, ChainId, EventId},
};
use linera_chain::{
data_types::BlockProposal,
types::{
ConfirmedBlock, ConfirmedBlockCertificate, GenericCertificate, LiteCertificate, Timeout,
ValidatedBlock,
},
ChainError,
};
use linera_execution::{committee::Committee, ExecutionError};
use linera_version::VersionInfo;
use linera_views::ViewError;
use serde::{Deserialize, Serialize};
use thiserror::Error;
use crate::{
data_types::{ChainInfoQuery, ChainInfoResponse},
worker::{Notification, WorkerError},
};
/// A pinned [`Stream`] of Notifications.
pub type NotificationStream = BoxStream<'static, Notification>;
/// Whether to wait for the delivery of outgoing cross-chain messages.
#[derive(Debug, Default, Clone, Copy)]
pub enum CrossChainMessageDelivery {
#[default]
NonBlocking,
Blocking,
}
/// How to communicate with a validator node.
#[allow(async_fn_in_trait)]
#[cfg_attr(not(web), trait_variant::make(Send))]
pub trait ValidatorNode {
#[cfg(not(web))]
type NotificationStream: Stream<Item = Notification> + Unpin + Send;
#[cfg(web)]
type NotificationStream: Stream<Item = Notification> + Unpin;
fn address(&self) -> String;
/// Proposes a new block.
async fn handle_block_proposal(
&self,
proposal: BlockProposal,
) -> Result<ChainInfoResponse, NodeError>;
/// Processes a certificate without a value.
async fn handle_lite_certificate(
&self,
certificate: LiteCertificate<'_>,
delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError>;
/// Processes a confirmed certificate.
async fn handle_confirmed_certificate(
&self,
certificate: GenericCertificate<ConfirmedBlock>,
delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError>;
/// Processes a validated certificate.
async fn handle_validated_certificate(
&self,
certificate: GenericCertificate<ValidatedBlock>,
) -> Result<ChainInfoResponse, NodeError>;
/// Processes a timeout certificate.
async fn handle_timeout_certificate(
&self,
certificate: GenericCertificate<Timeout>,
) -> Result<ChainInfoResponse, NodeError>;
/// Handles information queries for this chain.
async fn handle_chain_info_query(
&self,
query: ChainInfoQuery,
) -> Result<ChainInfoResponse, NodeError>;
/// Gets the version info for this validator node.
async fn get_version_info(&self) -> Result<VersionInfo, NodeError>;
/// Gets the network's description.
async fn get_network_description(&self) -> Result<NetworkDescription, NodeError>;
/// Subscribes to receiving notifications for a collection of chains.
async fn subscribe(&self, chains: Vec<ChainId>) -> Result<Self::NotificationStream, NodeError>;
// Uploads a blob. Returns an error if the validator has not seen a
// certificate using this blob.
async fn upload_blob(&self, content: BlobContent) -> Result<BlobId, NodeError>;
/// Uploads the blobs to the validator.
// Unfortunately, this doesn't compile as an async function: async functions in traits
// don't play well with default implementations, apparently.
// See also https://github.com/rust-lang/impl-trait-utils/issues/17
fn upload_blobs(
&self,
blobs: Vec<Blob>,
) -> impl futures::Future<Output = Result<Vec<BlobId>, NodeError>> {
let tasks: Vec<_> = blobs
.into_iter()
.map(|blob| self.upload_blob(blob.into()))
.collect();
futures::future::try_join_all(tasks)
}
/// Downloads a blob. Returns an error if the validator does not have the blob.
async fn download_blob(&self, blob_id: BlobId) -> Result<BlobContent, NodeError>;
/// Downloads a blob that belongs to a pending proposal or the locking block on a chain.
async fn download_pending_blob(
&self,
chain_id: ChainId,
blob_id: BlobId,
) -> Result<BlobContent, NodeError>;
/// Handles a blob that belongs to a pending proposal or validated block certificate.
async fn handle_pending_blob(
&self,
chain_id: ChainId,
blob: BlobContent,
) -> Result<ChainInfoResponse, NodeError>;
async fn download_certificate(
&self,
hash: CryptoHash,
) -> Result<ConfirmedBlockCertificate, NodeError>;
/// Requests a batch of certificates from the validator.
async fn download_certificates(
&self,
hashes: Vec<CryptoHash>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError>;
/// Requests a batch of certificates from a specific chain by heights.
async fn download_certificates_by_heights(
&self,
chain_id: ChainId,
heights: Vec<BlockHeight>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError>;
/// Returns the hash of the `Certificate` that last used a blob.
async fn blob_last_used_by(&self, blob_id: BlobId) -> Result<CryptoHash, NodeError>;
/// Returns the certificate that last used the blob.
async fn blob_last_used_by_certificate(
&self,
blob_id: BlobId,
) -> Result<ConfirmedBlockCertificate, NodeError>;
/// Returns the missing `Blob`s by their IDs.
async fn missing_blob_ids(&self, blob_ids: Vec<BlobId>) -> Result<Vec<BlobId>, NodeError>;
/// Gets shard information for a specific chain.
async fn get_shard_info(
&self,
chain_id: ChainId,
) -> Result<crate::data_types::ShardInfo, NodeError>;
}
/// Turn an address into a validator node.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait ValidatorNodeProvider: 'static {
#[cfg(not(web))]
type Node: ValidatorNode + Send + Sync + Clone + 'static;
#[cfg(web)]
type Node: ValidatorNode + Clone + 'static;
fn make_node(&self, address: &str) -> Result<Self::Node, NodeError>;
fn make_nodes(
&self,
committee: &Committee,
) -> Result<impl Iterator<Item = (ValidatorPublicKey, Self::Node)> + '_, NodeError> {
let validator_addresses: Vec<_> = committee
.validator_addresses()
.map(|(node, name)| (node, name.to_owned()))
.collect();
self.make_nodes_from_list(validator_addresses)
}
fn make_nodes_from_list<A>(
&self,
validators: impl IntoIterator<Item = (ValidatorPublicKey, A)>,
) -> Result<impl Iterator<Item = (ValidatorPublicKey, Self::Node)>, NodeError>
where
A: AsRef<str>,
{
Ok(validators
.into_iter()
.map(|(name, address)| Ok((name, self.make_node(address.as_ref())?)))
.collect::<Result<Vec<_>, NodeError>>()?
.into_iter())
}
}
/// Error type for node queries.
///
/// This error is meant to be serialized over the network and aggregated by clients (i.e.
/// clients will track validator votes on each error value).
#[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize, Error, Hash)]
pub enum NodeError {
#[error("Cryptographic error: {error}")]
CryptoError { error: String },
#[error("Arithmetic error: {error}")]
ArithmeticError { error: String },
#[error("Error while accessing storage: {error}")]
ViewError { error: String },
#[error("Chain error: {error}")]
ChainError { error: String },
#[error("Worker error: {error}")]
WorkerError { error: String },
// This error must be normalized during conversions.
#[error("The chain {0} is not active in validator")]
InactiveChain(ChainId),
#[error("Round number should be {0:?}")]
WrongRound(Round),
#[error(
"Chain is expecting a next block at height {expected_block_height} but the given block \
is at height {found_block_height} instead"
)]
UnexpectedBlockHeight {
expected_block_height: BlockHeight,
found_block_height: BlockHeight,
},
// This error must be normalized during conversions.
#[error(
"Cannot vote for block proposal of chain {chain_id} because a message \
from chain {origin} at height {height} has not been received yet"
)]
MissingCrossChainUpdate {
chain_id: ChainId,
origin: ChainId,
height: BlockHeight,
},
#[error("Blobs not found: {0:?}")]
BlobsNotFound(Vec<BlobId>),
#[error("Events not found: {0:?}")]
EventsNotFound(Vec<EventId>),
// This error must be normalized during conversions.
#[error("We don't have the value for the certificate.")]
MissingCertificateValue,
#[error("Response doesn't contain requested certificates: {0:?}")]
MissingCertificates(Vec<CryptoHash>),
#[error("Validator's response failed to include a vote when trying to {0}")]
MissingVoteInValidatorResponse(String),
#[error("The received chain info response is invalid")]
InvalidChainInfoResponse,
#[error("Unexpected certificate value")]
UnexpectedCertificateValue,
// Networking errors.
// TODO(#258): These errors should be defined in linera-rpc.
#[error("Cannot deserialize")]
InvalidDecoding,
#[error("Unexpected message")]
UnexpectedMessage,
#[error("Grpc error: {error}")]
GrpcError { error: String },
#[error("Network error while querying service: {error}")]
ClientIoError { error: String },
#[error("Failed to resolve validator address: {address}")]
CannotResolveValidatorAddress { address: String },
#[error("Subscription error due to incorrect transport. Was expecting gRPC, instead found: {transport}")]
SubscriptionError { transport: String },
#[error("Failed to subscribe; tonic status: {status:?}")]
SubscriptionFailed { status: String },
#[error("Node failed to provide a 'last used by' certificate for the blob")]
InvalidCertificateForBlob(BlobId),
#[error("Node returned a BlobsNotFound error with duplicates")]
DuplicatesInBlobsNotFound,
#[error("Node returned a BlobsNotFound error with unexpected blob IDs")]
UnexpectedEntriesInBlobsNotFound,
#[error("Node returned certificates {returned:?}, but we requested {requested:?}")]
UnexpectedCertificates {
returned: Vec<CryptoHash>,
requested: Vec<CryptoHash>,
},
#[error("Node returned a BlobsNotFound error with an empty list of missing blob IDs")]
EmptyBlobsNotFound,
#[error("Local error handling validator response: {error}")]
ResponseHandlingError { error: String },
#[error("Missing certificates for chain {chain_id} in heights {heights:?}")]
MissingCertificatesByHeights {
chain_id: ChainId,
heights: Vec<BlockHeight>,
},
#[error("Too many certificates returned for chain {chain_id} from {remote_node}")]
TooManyCertificatesReturned {
chain_id: ChainId,
remote_node: Box<ValidatorPublicKey>,
},
#[error(
"Block timestamp ({block_timestamp}) is further in the future from local time \
({local_time}) than block time grace period ({block_time_grace_period_ms} ms)"
)]
InvalidTimestamp {
block_timestamp: Timestamp,
local_time: Timestamp,
block_time_grace_period_ms: u64,
},
}
impl From<tonic::Status> for NodeError {
fn from(status: tonic::Status) -> Self {
Self::GrpcError {
error: status.to_string(),
}
}
}
impl CrossChainMessageDelivery {
pub fn new(wait_for_outgoing_messages: bool) -> Self {
if wait_for_outgoing_messages {
CrossChainMessageDelivery::Blocking
} else {
CrossChainMessageDelivery::NonBlocking
}
}
pub fn wait_for_outgoing_messages(self) -> bool {
match self {
CrossChainMessageDelivery::NonBlocking => false,
CrossChainMessageDelivery::Blocking => true,
}
}
}
impl From<ViewError> for NodeError {
fn from(error: ViewError) -> Self {
Self::ViewError {
error: error.to_string(),
}
}
}
impl From<ArithmeticError> for NodeError {
fn from(error: ArithmeticError) -> Self {
Self::ArithmeticError {
error: error.to_string(),
}
}
}
impl From<CryptoError> for NodeError {
fn from(error: CryptoError) -> Self {
Self::CryptoError {
error: error.to_string(),
}
}
}
impl From<ChainError> for NodeError {
fn from(error: ChainError) -> Self {
match error {
ChainError::MissingCrossChainUpdate {
chain_id,
origin,
height,
} => Self::MissingCrossChainUpdate {
chain_id,
origin,
height,
},
ChainError::InactiveChain(chain_id) => Self::InactiveChain(chain_id),
ChainError::ExecutionError(execution_error, context) => match *execution_error {
ExecutionError::BlobsNotFound(blob_ids) => Self::BlobsNotFound(blob_ids),
ExecutionError::EventsNotFound(event_ids) => Self::EventsNotFound(event_ids),
_ => Self::ChainError {
error: ChainError::ExecutionError(execution_error, context).to_string(),
},
},
ChainError::UnexpectedBlockHeight {
expected_block_height,
found_block_height,
} => Self::UnexpectedBlockHeight {
expected_block_height,
found_block_height,
},
ChainError::WrongRound(round) => Self::WrongRound(round),
error => Self::ChainError {
error: error.to_string(),
},
}
}
}
impl From<WorkerError> for NodeError {
fn from(error: WorkerError) -> Self {
match error {
WorkerError::ChainError(error) => (*error).into(),
WorkerError::MissingCertificateValue => Self::MissingCertificateValue,
WorkerError::BlobsNotFound(blob_ids) => Self::BlobsNotFound(blob_ids),
WorkerError::EventsNotFound(event_ids) => Self::EventsNotFound(event_ids),
WorkerError::UnexpectedBlockHeight {
expected_block_height,
found_block_height,
} => NodeError::UnexpectedBlockHeight {
expected_block_height,
found_block_height,
},
WorkerError::InvalidTimestamp {
block_timestamp,
local_time,
block_time_grace_period,
} => NodeError::InvalidTimestamp {
block_timestamp,
local_time,
block_time_grace_period_ms: block_time_grace_period.as_millis() as u64,
},
error => Self::WorkerError {
error: error.to_string(),
},
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/lib.rs | linera-core/src/lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines the core Linera protocol.
#![recursion_limit = "256"]
// We conditionally add autotraits to the traits here.
#![allow(async_fn_in_trait)]
mod chain_worker;
pub mod client;
pub use client::Client;
pub mod data_types;
pub mod join_set_ext;
mod local_node;
pub mod node;
pub mod notifier;
mod remote_node;
#[cfg(with_testing)]
#[path = "unit_tests/test_utils.rs"]
pub mod test_utils;
pub mod worker;
pub(crate) mod updater;
mod value_cache;
pub use local_node::LocalNodeError;
pub use updater::DEFAULT_QUORUM_GRACE_PERIOD;
pub use crate::join_set_ext::{JoinSetExt, TaskHandle};
pub mod environment;
pub use environment::{
wallet::{self, Wallet},
Environment,
};
/// The maximum number of entries in a `received_log` included in a `ChainInfo` response.
// TODO(#4638): Revisit the number.
pub const CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES: usize = 20_000;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/join_set_ext.rs | linera-core/src/join_set_ext.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! An extension trait to allow determining at compile time how tasks are spawned on the Tokio
//! runtime.
//!
//! In most cases the [`Future`] task to be spawned should implement [`Send`], but that's
//! not possible when compiling for the Web. In that case, the task is spawned on the
//! browser event loop.
use futures::channel::oneshot;
#[cfg(web)]
mod implementation {
pub use futures::future::AbortHandle;
use futures::{future, stream, StreamExt as _};
use super::*;
#[derive(Default)]
pub struct JoinSet(Vec<oneshot::Receiver<()>>);
/// An extension trait for the [`JoinSet`] type.
pub trait JoinSetExt: Sized {
/// Spawns a `future` task on this [`JoinSet`] using [`JoinSet::spawn_local`].
///
/// Returns a [`oneshot::Receiver`] to receive the `future`'s output, and an
/// [`AbortHandle`] to cancel execution of the task.
fn spawn_task<F: Future + 'static>(&mut self, future: F) -> TaskHandle<F::Output>;
/// Awaits all tasks spawned in this [`JoinSet`].
fn await_all_tasks(&mut self) -> impl Future<Output = ()>;
/// Reaps tasks that have finished.
fn reap_finished_tasks(&mut self);
}
impl JoinSetExt for JoinSet {
fn spawn_task<F: Future + 'static>(&mut self, future: F) -> TaskHandle<F::Output> {
let (abort_handle, abort_registration) = AbortHandle::new_pair();
let (send_done, recv_done) = oneshot::channel();
let (send_output, recv_output) = oneshot::channel();
let future = async move {
let _ = send_output.send(future.await);
let _ = send_done.send(());
};
self.0.push(recv_done);
wasm_bindgen_futures::spawn_local(
future::Abortable::new(future, abort_registration).map(drop),
);
TaskHandle {
output_receiver: recv_output,
abort_handle,
}
}
async fn await_all_tasks(&mut self) {
stream::iter(&mut self.0)
.then(|x| x)
.map(drop)
.collect()
.await
}
fn reap_finished_tasks(&mut self) {
self.0.retain_mut(|task| task.try_recv() == Ok(None));
}
}
}
#[cfg(not(web))]
mod implementation {
pub use tokio::task::AbortHandle;
use super::*;
pub type JoinSet = tokio::task::JoinSet<()>;
/// An extension trait for the [`JoinSet`] type.
#[trait_variant::make(Send)]
pub trait JoinSetExt: Sized {
/// Spawns a `future` task on this [`JoinSet`] using [`JoinSet::spawn`].
///
/// Returns a [`oneshot::Receiver`] to receive the `future`'s output, and an
/// [`AbortHandle`] to cancel execution of the task.
fn spawn_task<F: Future<Output: Send> + Send + 'static>(
&mut self,
future: F,
) -> TaskHandle<F::Output>;
/// Awaits all tasks spawned in this [`JoinSet`].
async fn await_all_tasks(&mut self);
/// Reaps tasks that have finished.
fn reap_finished_tasks(&mut self);
}
impl JoinSetExt for JoinSet {
fn spawn_task<F>(&mut self, future: F) -> TaskHandle<F::Output>
where
F: Future + Send + 'static,
F::Output: Send,
{
let (output_sender, output_receiver) = oneshot::channel();
let abort_handle = self.spawn(async move {
let _ = output_sender.send(future.await);
});
TaskHandle {
output_receiver,
abort_handle,
}
}
async fn await_all_tasks(&mut self) {
while self.join_next().await.is_some() {}
}
fn reap_finished_tasks(&mut self) {
while self.try_join_next().is_some() {}
}
}
}
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use futures::FutureExt as _;
pub use implementation::*;
/// A handle to a task spawned with [`JoinSetExt`].
///
/// Dropping a handle detaches its respective task.
pub struct TaskHandle<Output> {
output_receiver: oneshot::Receiver<Output>,
abort_handle: AbortHandle,
}
impl<Output> Future for TaskHandle<Output> {
type Output = Result<Output, oneshot::Canceled>;
fn poll(mut self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Self::Output> {
self.as_mut().output_receiver.poll_unpin(context)
}
}
impl<Output> TaskHandle<Output> {
/// Aborts the task.
pub fn abort(&self) {
self.abort_handle.abort();
}
/// Returns [`true`] if the task is still running.
pub fn is_running(&mut self) -> bool {
self.output_receiver.try_recv().is_err()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/notifier.rs | linera-core/src/notifier.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use linera_base::identifiers::ChainId;
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use tracing::trace;
use crate::worker;
// TODO(#2171): replace this with a Tokio broadcast channel
/// A `Notifier` holds references to clients waiting to receive notifications
/// from the validator.
/// Clients will be evicted if their connections are terminated.
pub struct ChannelNotifier<N> {
inner: papaya::HashMap<ChainId, Vec<UnboundedSender<N>>>,
}
impl<N> Default for ChannelNotifier<N> {
fn default() -> Self {
Self {
inner: papaya::HashMap::default(),
}
}
}
impl<N> ChannelNotifier<N> {
fn add_sender(&self, chain_ids: Vec<ChainId>, sender: &UnboundedSender<N>) {
let pinned = self.inner.pin();
for id in chain_ids {
pinned.update_or_insert_with(
id,
|senders| senders.iter().cloned().chain([sender.clone()]).collect(),
|| vec![sender.clone()],
);
}
}
/// Creates a subscription given a collection of chain IDs and a sender to the client.
pub fn subscribe(&self, chain_ids: Vec<ChainId>) -> UnboundedReceiver<N> {
let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
self.add_sender(chain_ids, &tx);
rx
}
/// Creates a subscription given a collection of chain IDs and a sender to the client.
/// Immediately posts a first notification as an ACK.
pub fn subscribe_with_ack(&self, chain_ids: Vec<ChainId>, ack: N) -> UnboundedReceiver<N> {
let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
self.add_sender(chain_ids, &tx);
tx.send(ack)
.expect("pushing to a new channel should succeed");
rx
}
}
impl<N> ChannelNotifier<N>
where
N: Clone,
{
/// Notifies all the clients waiting for a notification from a given chain.
pub fn notify_chain(&self, chain_id: &ChainId, notification: &N) {
self.inner.pin().compute(*chain_id, |senders| {
let Some((_key, senders)) = senders else {
trace!("Chain {chain_id} has no subscribers.");
return papaya::Operation::Abort(());
};
let live_senders = senders
.iter()
.filter(|sender| sender.send(notification.clone()).is_ok())
.cloned()
.collect::<Vec<_>>();
if live_senders.is_empty() {
trace!("No more subscribers for chain {chain_id}. Removing entry.");
return papaya::Operation::Remove;
}
papaya::Operation::Insert(live_senders)
});
}
}
pub trait Notifier: Clone + Send + 'static {
fn notify(&self, notifications: &[worker::Notification]);
}
impl Notifier for Arc<ChannelNotifier<worker::Notification>> {
fn notify(&self, notifications: &[worker::Notification]) {
for notification in notifications {
self.notify_chain(¬ification.chain_id, notification);
}
}
}
impl Notifier for () {
fn notify(&self, _notifications: &[worker::Notification]) {}
}
#[cfg(with_testing)]
impl Notifier for Arc<std::sync::Mutex<Vec<worker::Notification>>> {
fn notify(&self, notifications: &[worker::Notification]) {
let mut guard = self.lock().unwrap();
guard.extend(notifications.iter().cloned())
}
}
#[cfg(test)]
pub mod tests {
use std::{
sync::{atomic::Ordering, Arc},
time::Duration,
};
use linera_execution::test_utils::dummy_chain_description;
use super::*;
#[test]
fn test_concurrent() {
let notifier = ChannelNotifier::default();
let chain_a = dummy_chain_description(0).id();
let chain_b = dummy_chain_description(1).id();
let a_rec = Arc::new(std::sync::atomic::AtomicUsize::new(0));
let b_rec = Arc::new(std::sync::atomic::AtomicUsize::new(0));
let a_b_rec = Arc::new(std::sync::atomic::AtomicUsize::new(0));
let mut rx_a = notifier.subscribe(vec![chain_a]);
let mut rx_b = notifier.subscribe(vec![chain_b]);
let mut rx_a_b = notifier.subscribe(vec![chain_a, chain_b]);
let a_rec_clone = a_rec.clone();
let b_rec_clone = b_rec.clone();
let a_b_rec_clone = a_b_rec.clone();
let notifier = Arc::new(notifier);
std::thread::spawn(move || {
while rx_a.blocking_recv().is_some() {
a_rec_clone.fetch_add(1, Ordering::Relaxed);
}
});
std::thread::spawn(move || {
while rx_b.blocking_recv().is_some() {
b_rec_clone.fetch_add(1, Ordering::Relaxed);
}
});
std::thread::spawn(move || {
while rx_a_b.blocking_recv().is_some() {
a_b_rec_clone.fetch_add(1, Ordering::Relaxed);
}
});
const NOTIFICATIONS_A: usize = 500;
const NOTIFICATIONS_B: usize = 700;
let a_notifier = notifier.clone();
let handle_a = std::thread::spawn(move || {
for _ in 0..NOTIFICATIONS_A {
a_notifier.notify_chain(&chain_a, &());
}
});
let handle_b = std::thread::spawn(move || {
for _ in 0..NOTIFICATIONS_B {
notifier.notify_chain(&chain_b, &());
}
});
// finish sending all the messages
handle_a.join().unwrap();
handle_b.join().unwrap();
// give some time for the messages to be received.
std::thread::sleep(Duration::from_millis(100));
assert_eq!(a_rec.load(Ordering::Relaxed), NOTIFICATIONS_A);
assert_eq!(b_rec.load(Ordering::Relaxed), NOTIFICATIONS_B);
assert_eq!(
a_b_rec.load(Ordering::Relaxed),
NOTIFICATIONS_A + NOTIFICATIONS_B
);
}
#[test]
fn test_eviction() {
let notifier = ChannelNotifier::default();
let chain_a = dummy_chain_description(0).id();
let chain_b = dummy_chain_description(1).id();
let chain_c = dummy_chain_description(2).id();
let chain_d = dummy_chain_description(3).id();
// Chain A -> Notify A, Notify B
// Chain B -> Notify A, Notify B
// Chain C -> Notify C
// Chain D -> Notify A, Notify B, Notify C, Notify D
let mut rx_a = notifier.subscribe(vec![chain_a, chain_b, chain_d]);
let mut rx_b = notifier.subscribe(vec![chain_a, chain_b, chain_d]);
let mut rx_c = notifier.subscribe(vec![chain_c, chain_d]);
let mut rx_d = notifier.subscribe(vec![chain_d]);
assert_eq!(notifier.inner.len(), 4);
rx_c.close();
notifier.notify_chain(&chain_c, &());
assert_eq!(notifier.inner.len(), 3);
rx_a.close();
notifier.notify_chain(&chain_a, &());
assert_eq!(notifier.inner.len(), 3);
rx_b.close();
notifier.notify_chain(&chain_b, &());
assert_eq!(notifier.inner.len(), 2);
notifier.notify_chain(&chain_a, &());
assert_eq!(notifier.inner.len(), 1);
rx_d.close();
notifier.notify_chain(&chain_d, &());
assert_eq!(notifier.inner.len(), 0);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/value_cache.rs | linera-core/src/value_cache.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A least-recently used cache of values.
#[cfg(test)]
#[path = "unit_tests/value_cache_tests.rs"]
mod unit_tests;
#[cfg(with_metrics)]
use std::any::type_name;
use std::{borrow::Cow, hash::Hash, num::NonZeroUsize, sync::Mutex};
use linera_base::{crypto::CryptoHash, hashed::Hashed};
use lru::LruCache;
/// A counter metric for the number of cache hits in the [`ValueCache`].
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::register_int_counter_vec;
use prometheus::IntCounterVec;
pub static CACHE_HIT_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"value_cache_hit",
"Cache hits in `ValueCache`",
&["key_type", "value_type"],
)
});
/// A counter metric for the number of cache misses in the [`ValueCache`].
pub static CACHE_MISS_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"value_cache_miss",
"Cache misses in `ValueCache`",
&["key_type", "value_type"],
)
});
}
/// A least-recently used cache of a value.
pub struct ValueCache<K, V>
where
K: Hash + Eq + PartialEq + Copy,
{
cache: Mutex<LruCache<K, V>>,
}
impl<K, V> ValueCache<K, V>
where
K: Hash + Eq + PartialEq + Copy,
{
/// Creates a new `ValueCache` with the given size.
pub fn new(size: usize) -> Self {
let size = NonZeroUsize::try_from(size).expect("Cache size is larger than zero");
ValueCache {
cache: Mutex::new(LruCache::new(size)),
}
}
/// Inserts a `V` into the cache, if it's not already present.
pub fn insert_owned(&self, key: &K, value: V) -> bool {
let mut cache = self.cache.lock().unwrap();
if cache.contains(key) {
// Promote the re-inserted value in the cache, as if it was accessed again.
cache.promote(key);
false
} else {
// Cache the value so that clients don't have to send it again.
cache.push(*key, value);
true
}
}
/// Removes a `V` from the cache and returns it, if present.
pub fn remove(&self, hash: &K) -> Option<V> {
Self::track_cache_usage(self.cache.lock().unwrap().pop(hash))
}
/// Returns a `V` from the cache, if present.
pub fn get(&self, hash: &K) -> Option<V>
where
V: Clone,
{
Self::track_cache_usage(self.cache.lock().unwrap().get(hash).cloned())
}
fn track_cache_usage(maybe_value: Option<V>) -> Option<V> {
#[cfg(with_metrics)]
{
let metric = if maybe_value.is_some() {
&metrics::CACHE_HIT_COUNT
} else {
&metrics::CACHE_MISS_COUNT
};
metric
.with_label_values(&[type_name::<K>(), type_name::<V>()])
.inc();
}
maybe_value
}
}
impl<T: Clone> ValueCache<CryptoHash, Hashed<T>> {
/// Inserts a [`HashedCertificateValue`] into the cache, if it's not already present.
///
/// The `value` is wrapped in a [`Cow`] so that it is only cloned if it needs to be
/// inserted in the cache.
///
/// Returns [`true`] if the value was not already present in the cache.
pub fn insert(&self, value: Cow<Hashed<T>>) -> bool {
let hash = (*value).hash();
let mut cache = self.cache.lock().unwrap();
if cache.contains(&hash) {
// Promote the re-inserted value in the cache, as if it was accessed again.
cache.promote(&hash);
false
} else {
// Cache the certificate so that clients don't have to send the value again.
cache.push(hash, value.into_owned());
true
}
}
/// Inserts multiple [`HashedCertificateValue`]s into the cache. If they're not
/// already present.
///
/// The `values` are wrapped in [`Cow`]s so that each `value` is only cloned if it
/// needs to be inserted in the cache.
#[cfg(test)]
pub fn insert_all<'a>(&self, values: impl IntoIterator<Item = Cow<'a, Hashed<T>>>)
where
T: 'a,
{
let mut cache = self.cache.lock().unwrap();
for value in values {
let hash = (*value).hash();
if !cache.contains(&hash) {
cache.push(hash, value.into_owned());
}
}
}
}
#[cfg(test)]
impl<K, V> ValueCache<K, V>
where
K: Hash + Eq + PartialEq + Copy,
{
/// Returns a `Collection` of the hashes in the cache.
pub fn keys<Collection>(&self) -> Collection
where
Collection: FromIterator<K>,
{
self.cache
.lock()
.unwrap()
.iter()
.map(|(key, _)| *key)
.collect()
}
/// Returns [`true`] if the cache contains the `V` with the
/// requested `K`.
pub fn contains(&self, key: &K) -> bool {
self.cache.lock().unwrap().contains(key)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/worker.rs | linera-core/src/worker.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque},
sync::{Arc, Mutex, RwLock},
time::Duration,
};
use futures::future::Either;
use linera_base::{
crypto::{CryptoError, CryptoHash, ValidatorPublicKey, ValidatorSecretKey},
data_types::{
ApplicationDescription, ArithmeticError, Blob, BlockHeight, Epoch, Round, Timestamp,
},
doc_scalar,
hashed::Hashed,
identifiers::{AccountOwner, ApplicationId, BlobId, ChainId, EventId, StreamId},
time::Instant,
util::traits::DynError,
};
#[cfg(with_testing)]
use linera_chain::ChainExecutionContext;
use linera_chain::{
data_types::{BlockExecutionOutcome, BlockProposal, MessageBundle, ProposedBlock},
types::{
Block, CertificateValue, ConfirmedBlock, ConfirmedBlockCertificate, GenericCertificate,
LiteCertificate, Timeout, TimeoutCertificate, ValidatedBlock, ValidatedBlockCertificate,
},
ChainError, ChainStateView,
};
use linera_execution::{ExecutionError, ExecutionStateView, Query, QueryOutcome};
use linera_storage::Storage;
use linera_views::{context::InactiveContext, ViewError};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use tokio::sync::{mpsc, oneshot, OwnedRwLockReadGuard};
use tracing::{error, instrument, trace, warn};
/// Re-export of [`EventSubscriptionsResult`] for use by other crate modules.
pub(crate) use crate::chain_worker::EventSubscriptionsResult;
use crate::{
chain_worker::{
BlockOutcome, ChainWorkerActor, ChainWorkerConfig, ChainWorkerRequest, DeliveryNotifier,
},
data_types::{ChainInfoQuery, ChainInfoResponse, CrossChainRequest},
join_set_ext::{JoinSet, JoinSetExt},
notifier::Notifier,
value_cache::ValueCache,
CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES,
};
#[cfg(test)]
#[path = "unit_tests/worker_tests.rs"]
mod worker_tests;
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{
exponential_bucket_interval, register_histogram_vec, register_int_counter,
register_int_counter_vec,
};
use prometheus::{HistogramVec, IntCounter, IntCounterVec};
pub static NUM_ROUNDS_IN_CERTIFICATE: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"num_rounds_in_certificate",
"Number of rounds in certificate",
&["certificate_value", "round_type"],
exponential_bucket_interval(0.1, 50.0),
)
});
pub static NUM_ROUNDS_IN_BLOCK_PROPOSAL: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"num_rounds_in_block_proposal",
"Number of rounds in block proposal",
&["round_type"],
exponential_bucket_interval(0.1, 50.0),
)
});
pub static TRANSACTION_COUNT: LazyLock<IntCounterVec> =
LazyLock::new(|| register_int_counter_vec("transaction_count", "Transaction count", &[]));
pub static INCOMING_BUNDLE_COUNT: LazyLock<IntCounter> =
LazyLock::new(|| register_int_counter("incoming_bundle_count", "Incoming bundle count"));
pub static OPERATION_COUNT: LazyLock<IntCounter> =
LazyLock::new(|| register_int_counter("operation_count", "Operation count"));
pub static NUM_BLOCKS: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec("num_blocks", "Number of blocks added to chains", &[])
});
pub static CERTIFICATES_SIGNED: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"certificates_signed",
"Number of confirmed block certificates signed by each validator",
&["validator_name"],
)
});
pub static CHAIN_INFO_QUERIES: LazyLock<IntCounter> = LazyLock::new(|| {
register_int_counter(
"chain_info_queries",
"Number of chain info queries processed",
)
});
}
/// Instruct the networking layer to send cross-chain requests and/or push notifications.
#[derive(Default, Debug)]
pub struct NetworkActions {
/// The cross-chain requests
pub cross_chain_requests: Vec<CrossChainRequest>,
/// The push notifications.
pub notifications: Vec<Notification>,
}
impl NetworkActions {
pub fn extend(&mut self, other: NetworkActions) {
self.cross_chain_requests.extend(other.cross_chain_requests);
self.notifications.extend(other.notifications);
}
}
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
/// Notification that a chain has a new certified block or a new message.
pub struct Notification {
pub chain_id: ChainId,
pub reason: Reason,
}
doc_scalar!(
Notification,
"Notify that a chain has a new certified block or a new message"
);
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
/// Reason for the notification.
pub enum Reason {
NewBlock {
height: BlockHeight,
hash: CryptoHash,
},
NewEvents {
height: BlockHeight,
hash: CryptoHash,
event_streams: BTreeSet<StreamId>,
},
NewIncomingBundle {
origin: ChainId,
height: BlockHeight,
},
NewRound {
height: BlockHeight,
round: Round,
},
BlockExecuted {
height: BlockHeight,
hash: CryptoHash,
},
}
/// Error type for worker operations.
#[derive(Debug, Error)]
pub enum WorkerError {
#[error(transparent)]
CryptoError(#[from] CryptoError),
#[error(transparent)]
ArithmeticError(#[from] ArithmeticError),
#[error(transparent)]
ViewError(#[from] ViewError),
#[error("Certificates are in confirmed_log but not in storage: {0:?}")]
ReadCertificatesError(Vec<CryptoHash>),
#[error(transparent)]
ChainError(#[from] Box<ChainError>),
#[error(transparent)]
BcsError(#[from] bcs::Error),
// Chain access control
#[error("Block was not signed by an authorized owner")]
InvalidOwner,
#[error("Operations in the block are not authenticated by the proper owner: {0}")]
InvalidSigner(AccountOwner),
// Chaining
#[error(
"Chain is expecting a next block at height {expected_block_height} but the given block \
is at height {found_block_height} instead"
)]
UnexpectedBlockHeight {
expected_block_height: BlockHeight,
found_block_height: BlockHeight,
},
#[error("Unexpected epoch {epoch}: chain {chain_id} is at {chain_epoch}")]
InvalidEpoch {
chain_id: ChainId,
chain_epoch: Epoch,
epoch: Epoch,
},
#[error("Events not found: {0:?}")]
EventsNotFound(Vec<EventId>),
// Other server-side errors
#[error("Invalid cross-chain request")]
InvalidCrossChainRequest,
#[error("The block does not contain the hash that we expected for the previous block")]
InvalidBlockChaining,
#[error(
"The given outcome is not what we computed after executing the block.\n\
Computed: {computed:#?}\n\
Submitted: {submitted:#?}"
)]
IncorrectOutcome {
computed: Box<BlockExecutionOutcome>,
submitted: Box<BlockExecutionOutcome>,
},
#[error(
"Block timestamp ({block_timestamp}) is further in the future from local time \
({local_time}) than block time grace period ({block_time_grace_period:?})"
)]
InvalidTimestamp {
block_timestamp: Timestamp,
local_time: Timestamp,
block_time_grace_period: Duration,
},
#[error("We don't have the value for the certificate.")]
MissingCertificateValue,
#[error("The hash certificate doesn't match its value.")]
InvalidLiteCertificate,
#[error("Fast blocks cannot query oracles")]
FastBlockUsingOracles,
#[error("Blobs not found: {0:?}")]
BlobsNotFound(Vec<BlobId>),
#[error("confirmed_log entry at height {height} for chain {chain_id:8} not found")]
ConfirmedLogEntryNotFound {
height: BlockHeight,
chain_id: ChainId,
},
#[error("preprocessed_blocks entry at height {height} for chain {chain_id:8} not found")]
PreprocessedBlocksEntryNotFound {
height: BlockHeight,
chain_id: ChainId,
},
#[error("The block proposal is invalid: {0}")]
InvalidBlockProposal(String),
#[error("Blob was not required by any pending block")]
UnexpectedBlob,
#[error("Number of published blobs per block must not exceed {0}")]
TooManyPublishedBlobs(u64),
#[error("Missing network description")]
MissingNetworkDescription,
#[error("ChainWorkerActor for chain {chain_id} stopped executing unexpectedly: {error}")]
ChainActorSendError {
chain_id: ChainId,
error: Box<dyn DynError>,
},
#[error("ChainWorkerActor for chain {chain_id} stopped executing without responding: {error}")]
ChainActorRecvError {
chain_id: ChainId,
error: Box<dyn DynError>,
},
#[error("thread error: {0}")]
Thread(#[from] web_thread_pool::Error),
}
impl WorkerError {
/// Returns whether this error is caused by an issue in the local node.
///
/// Returns `false` whenever the error could be caused by a bad message from a peer.
pub fn is_local(&self) -> bool {
match self {
WorkerError::CryptoError(_)
| WorkerError::ArithmeticError(_)
| WorkerError::InvalidOwner
| WorkerError::InvalidSigner(_)
| WorkerError::UnexpectedBlockHeight { .. }
| WorkerError::InvalidEpoch { .. }
| WorkerError::EventsNotFound(_)
| WorkerError::InvalidBlockChaining
| WorkerError::IncorrectOutcome { .. }
| WorkerError::InvalidTimestamp { .. }
| WorkerError::MissingCertificateValue
| WorkerError::InvalidLiteCertificate
| WorkerError::FastBlockUsingOracles
| WorkerError::BlobsNotFound(_)
| WorkerError::InvalidBlockProposal(_)
| WorkerError::UnexpectedBlob
| WorkerError::TooManyPublishedBlobs(_)
| WorkerError::ViewError(ViewError::NotFound(_)) => false,
WorkerError::BcsError(_)
| WorkerError::InvalidCrossChainRequest
| WorkerError::ViewError(_)
| WorkerError::ConfirmedLogEntryNotFound { .. }
| WorkerError::PreprocessedBlocksEntryNotFound { .. }
| WorkerError::MissingNetworkDescription
| WorkerError::ChainActorSendError { .. }
| WorkerError::ChainActorRecvError { .. }
| WorkerError::Thread(_)
| WorkerError::ReadCertificatesError(_) => true,
WorkerError::ChainError(chain_error) => chain_error.is_local(),
}
}
}
impl From<ChainError> for WorkerError {
#[instrument(level = "trace", skip(chain_error))]
fn from(chain_error: ChainError) -> Self {
match chain_error {
ChainError::ExecutionError(execution_error, context) => match *execution_error {
ExecutionError::BlobsNotFound(blob_ids) => Self::BlobsNotFound(blob_ids),
ExecutionError::EventsNotFound(event_ids) => Self::EventsNotFound(event_ids),
_ => Self::ChainError(Box::new(ChainError::ExecutionError(
execution_error,
context,
))),
},
error => Self::ChainError(Box::new(error)),
}
}
}
#[cfg(with_testing)]
impl WorkerError {
/// Returns the inner [`ExecutionError`] in this error.
///
/// # Panics
///
/// If this is not caused by an [`ExecutionError`].
pub fn expect_execution_error(self, expected_context: ChainExecutionContext) -> ExecutionError {
let WorkerError::ChainError(chain_error) = self else {
panic!("Expected an `ExecutionError`. Got: {self:#?}");
};
let ChainError::ExecutionError(execution_error, context) = *chain_error else {
panic!("Expected an `ExecutionError`. Got: {chain_error:#?}");
};
assert_eq!(context, expected_context);
*execution_error
}
}
/// State of a worker in a validator or a local node.
pub struct WorkerState<StorageClient>
where
StorageClient: Storage,
{
/// A name used for logging
nickname: String,
/// Access to local persistent storage.
storage: StorageClient,
/// Configuration options for the [`ChainWorker`]s.
chain_worker_config: ChainWorkerConfig,
block_cache: Arc<ValueCache<CryptoHash, Hashed<Block>>>,
execution_state_cache: Arc<ValueCache<CryptoHash, ExecutionStateView<InactiveContext>>>,
/// Chain IDs that should be tracked by a worker.
tracked_chains: Option<Arc<RwLock<HashSet<ChainId>>>>,
/// One-shot channels to notify callers when messages of a particular chain have been
/// delivered.
delivery_notifiers: Arc<Mutex<DeliveryNotifiers>>,
/// The set of spawned [`ChainWorkerActor`] tasks.
chain_worker_tasks: Arc<Mutex<JoinSet>>,
/// The cache of running [`ChainWorkerActor`]s.
chain_workers: Arc<Mutex<BTreeMap<ChainId, ChainActorEndpoint<StorageClient>>>>,
}
impl<StorageClient> Clone for WorkerState<StorageClient>
where
StorageClient: Storage + Clone,
{
fn clone(&self) -> Self {
WorkerState {
nickname: self.nickname.clone(),
storage: self.storage.clone(),
chain_worker_config: self.chain_worker_config.clone(),
block_cache: self.block_cache.clone(),
execution_state_cache: self.execution_state_cache.clone(),
tracked_chains: self.tracked_chains.clone(),
delivery_notifiers: self.delivery_notifiers.clone(),
chain_worker_tasks: self.chain_worker_tasks.clone(),
chain_workers: self.chain_workers.clone(),
}
}
}
/// The sender endpoint for [`ChainWorkerRequest`]s.
type ChainActorEndpoint<StorageClient> = mpsc::UnboundedSender<(
ChainWorkerRequest<<StorageClient as Storage>::Context>,
tracing::Span,
Instant,
)>;
pub(crate) type DeliveryNotifiers = HashMap<ChainId, DeliveryNotifier>;
impl<StorageClient> WorkerState<StorageClient>
where
StorageClient: Storage,
{
#[instrument(level = "trace", skip(nickname, key_pair, storage))]
pub fn new(
nickname: String,
key_pair: Option<ValidatorSecretKey>,
storage: StorageClient,
block_cache_size: usize,
execution_state_cache_size: usize,
) -> Self {
WorkerState {
nickname,
storage,
chain_worker_config: ChainWorkerConfig::default().with_key_pair(key_pair),
block_cache: Arc::new(ValueCache::new(block_cache_size)),
execution_state_cache: Arc::new(ValueCache::new(execution_state_cache_size)),
tracked_chains: None,
delivery_notifiers: Arc::default(),
chain_worker_tasks: Arc::default(),
chain_workers: Arc::new(Mutex::new(BTreeMap::new())),
}
}
#[instrument(level = "trace", skip(nickname, storage))]
pub fn new_for_client(
nickname: String,
storage: StorageClient,
tracked_chains: Arc<RwLock<HashSet<ChainId>>>,
block_cache_size: usize,
execution_state_cache_size: usize,
) -> Self {
WorkerState {
nickname,
storage,
chain_worker_config: ChainWorkerConfig::default(),
block_cache: Arc::new(ValueCache::new(block_cache_size)),
execution_state_cache: Arc::new(ValueCache::new(execution_state_cache_size)),
tracked_chains: Some(tracked_chains),
delivery_notifiers: Arc::default(),
chain_worker_tasks: Arc::default(),
chain_workers: Arc::new(Mutex::new(BTreeMap::new())),
}
}
#[instrument(level = "trace", skip(self, value))]
pub fn with_allow_inactive_chains(mut self, value: bool) -> Self {
self.chain_worker_config.allow_inactive_chains = value;
self
}
#[instrument(level = "trace", skip(self, value))]
pub fn with_allow_messages_from_deprecated_epochs(mut self, value: bool) -> Self {
self.chain_worker_config
.allow_messages_from_deprecated_epochs = value;
self
}
#[instrument(level = "trace", skip(self, value))]
pub fn with_long_lived_services(mut self, value: bool) -> Self {
self.chain_worker_config.long_lived_services = value;
self
}
/// Returns an instance with the specified block time grace period.
///
/// Blocks with a timestamp this far in the future will still be accepted, but the validator
/// will wait until that timestamp before voting.
#[instrument(level = "trace", skip(self))]
pub fn with_block_time_grace_period(mut self, block_time_grace_period: Duration) -> Self {
self.chain_worker_config.block_time_grace_period = block_time_grace_period;
self
}
/// Returns an instance with the specified chain worker TTL.
///
/// Idle chain workers free their memory after that duration without requests.
#[instrument(level = "trace", skip(self))]
pub fn with_chain_worker_ttl(mut self, chain_worker_ttl: Duration) -> Self {
self.chain_worker_config.ttl = chain_worker_ttl;
self
}
/// Returns an instance with the specified sender chain worker TTL.
///
/// Idle sender chain workers free their memory after that duration without requests.
#[instrument(level = "trace", skip(self))]
pub fn with_sender_chain_worker_ttl(mut self, sender_chain_worker_ttl: Duration) -> Self {
self.chain_worker_config.sender_chain_ttl = sender_chain_worker_ttl;
self
}
/// Returns an instance with the specified maximum size for received_log entries.
///
/// Sizes below `CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES` should be avoided.
#[instrument(level = "trace", skip(self))]
pub fn with_chain_info_max_received_log_entries(
mut self,
chain_info_max_received_log_entries: usize,
) -> Self {
if chain_info_max_received_log_entries < CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES {
warn!(
"The value set for the maximum size of received_log entries \
may not be compatible with the latest clients: {} instead of {}",
chain_info_max_received_log_entries, CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES
);
}
self.chain_worker_config.chain_info_max_received_log_entries =
chain_info_max_received_log_entries;
self
}
#[instrument(level = "trace", skip(self))]
pub fn nickname(&self) -> &str {
&self.nickname
}
/// Returns the storage client so that it can be manipulated or queried.
#[instrument(level = "trace", skip(self))]
#[cfg(not(feature = "test"))]
pub(crate) fn storage_client(&self) -> &StorageClient {
&self.storage
}
/// Returns the storage client so that it can be manipulated or queried by tests in other
/// crates.
#[instrument(level = "trace", skip(self))]
#[cfg(feature = "test")]
pub fn storage_client(&self) -> &StorageClient {
&self.storage
}
#[instrument(level = "trace", skip(self, certificate))]
pub(crate) async fn full_certificate(
&self,
certificate: LiteCertificate<'_>,
) -> Result<Either<ConfirmedBlockCertificate, ValidatedBlockCertificate>, WorkerError> {
let block = self
.block_cache
.get(&certificate.value.value_hash)
.ok_or(WorkerError::MissingCertificateValue)?;
match certificate.value.kind {
linera_chain::types::CertificateKind::Confirmed => {
let value = ConfirmedBlock::from_hashed(block);
Ok(Either::Left(
certificate
.with_value(value)
.ok_or(WorkerError::InvalidLiteCertificate)?,
))
}
linera_chain::types::CertificateKind::Validated => {
let value = ValidatedBlock::from_hashed(block);
Ok(Either::Right(
certificate
.with_value(value)
.ok_or(WorkerError::InvalidLiteCertificate)?,
))
}
_ => Err(WorkerError::InvalidLiteCertificate),
}
}
}
#[allow(async_fn_in_trait)]
#[cfg_attr(not(web), trait_variant::make(Send))]
pub trait ProcessableCertificate: CertificateValue + Sized + 'static {
async fn process_certificate<S: Storage + Clone + 'static>(
worker: &WorkerState<S>,
certificate: GenericCertificate<Self>,
) -> Result<(ChainInfoResponse, NetworkActions), WorkerError>;
}
impl ProcessableCertificate for ConfirmedBlock {
async fn process_certificate<S: Storage + Clone + 'static>(
worker: &WorkerState<S>,
certificate: ConfirmedBlockCertificate,
) -> Result<(ChainInfoResponse, NetworkActions), WorkerError> {
Box::pin(worker.handle_confirmed_certificate(certificate, None)).await
}
}
impl ProcessableCertificate for ValidatedBlock {
async fn process_certificate<S: Storage + Clone + 'static>(
worker: &WorkerState<S>,
certificate: ValidatedBlockCertificate,
) -> Result<(ChainInfoResponse, NetworkActions), WorkerError> {
Box::pin(worker.handle_validated_certificate(certificate)).await
}
}
impl ProcessableCertificate for Timeout {
async fn process_certificate<S: Storage + Clone + 'static>(
worker: &WorkerState<S>,
certificate: TimeoutCertificate,
) -> Result<(ChainInfoResponse, NetworkActions), WorkerError> {
worker.handle_timeout_certificate(certificate).await
}
}
impl<StorageClient> WorkerState<StorageClient>
where
StorageClient: Storage + Clone + 'static,
{
#[instrument(level = "trace", skip(self, certificate, notifier))]
#[inline]
pub async fn fully_handle_certificate_with_notifications<T>(
&self,
certificate: GenericCertificate<T>,
notifier: &impl Notifier,
) -> Result<ChainInfoResponse, WorkerError>
where
T: ProcessableCertificate,
{
let notifications = (*notifier).clone();
let this = self.clone();
linera_base::task::spawn(async move {
let (response, actions) =
ProcessableCertificate::process_certificate(&this, certificate).await?;
notifications.notify(&actions.notifications);
let mut requests = VecDeque::from(actions.cross_chain_requests);
while let Some(request) = requests.pop_front() {
let actions = this.handle_cross_chain_request(request).await?;
requests.extend(actions.cross_chain_requests);
notifications.notify(&actions.notifications);
}
Ok(response)
})
.await
}
/// Tries to execute a block proposal without any verification other than block execution.
#[instrument(level = "trace", skip(self, block))]
pub async fn stage_block_execution(
&self,
block: ProposedBlock,
round: Option<u32>,
published_blobs: Vec<Blob>,
) -> Result<(Block, ChainInfoResponse), WorkerError> {
self.query_chain_worker(block.chain_id, move |callback| {
ChainWorkerRequest::StageBlockExecution {
block,
round,
published_blobs,
callback,
}
})
.await
}
/// Executes a [`Query`] for an application's state on a specific chain.
///
/// If `block_hash` is specified, system will query the application's state
/// at that block. If it doesn't exist, it uses latest state.
#[instrument(level = "trace", skip(self, chain_id, query))]
pub async fn query_application(
&self,
chain_id: ChainId,
query: Query,
block_hash: Option<CryptoHash>,
) -> Result<QueryOutcome, WorkerError> {
self.query_chain_worker(chain_id, move |callback| {
ChainWorkerRequest::QueryApplication {
query,
block_hash,
callback,
}
})
.await
}
#[instrument(level = "trace", skip(self, chain_id, application_id), fields(
nickname = %self.nickname,
chain_id = %chain_id,
application_id = %application_id
))]
pub async fn describe_application(
&self,
chain_id: ChainId,
application_id: ApplicationId,
) -> Result<ApplicationDescription, WorkerError> {
self.query_chain_worker(chain_id, move |callback| {
ChainWorkerRequest::DescribeApplication {
application_id,
callback,
}
})
.await
}
/// Processes a confirmed block (aka a commit).
#[instrument(
level = "trace",
skip(self, certificate, notify_when_messages_are_delivered),
fields(
nickname = %self.nickname,
chain_id = %certificate.block().header.chain_id,
block_height = %certificate.block().header.height
)
)]
async fn process_confirmed_block(
&self,
certificate: ConfirmedBlockCertificate,
notify_when_messages_are_delivered: Option<oneshot::Sender<()>>,
) -> Result<(ChainInfoResponse, NetworkActions, BlockOutcome), WorkerError> {
let chain_id = certificate.block().header.chain_id;
self.query_chain_worker(chain_id, move |callback| {
ChainWorkerRequest::ProcessConfirmedBlock {
certificate,
notify_when_messages_are_delivered,
callback,
}
})
.await
}
/// Processes a validated block issued from a multi-owner chain.
#[instrument(level = "trace", skip(self, certificate), fields(
nickname = %self.nickname,
chain_id = %certificate.block().header.chain_id,
block_height = %certificate.block().header.height
))]
async fn process_validated_block(
&self,
certificate: ValidatedBlockCertificate,
) -> Result<(ChainInfoResponse, NetworkActions, BlockOutcome), WorkerError> {
let chain_id = certificate.block().header.chain_id;
self.query_chain_worker(chain_id, move |callback| {
ChainWorkerRequest::ProcessValidatedBlock {
certificate,
callback,
}
})
.await
}
/// Processes a leader timeout issued from a multi-owner chain.
#[instrument(level = "trace", skip(self, certificate), fields(
nickname = %self.nickname,
chain_id = %certificate.value().chain_id(),
height = %certificate.value().height()
))]
async fn process_timeout(
&self,
certificate: TimeoutCertificate,
) -> Result<(ChainInfoResponse, NetworkActions), WorkerError> {
let chain_id = certificate.value().chain_id();
self.query_chain_worker(chain_id, move |callback| {
ChainWorkerRequest::ProcessTimeout {
certificate,
callback,
}
})
.await
}
#[instrument(level = "trace", skip(self, origin, recipient, bundles), fields(
nickname = %self.nickname,
origin = %origin,
recipient = %recipient,
num_bundles = %bundles.len()
))]
async fn process_cross_chain_update(
&self,
origin: ChainId,
recipient: ChainId,
bundles: Vec<(Epoch, MessageBundle)>,
) -> Result<Option<BlockHeight>, WorkerError> {
self.query_chain_worker(recipient, move |callback| {
ChainWorkerRequest::ProcessCrossChainUpdate {
origin,
bundles,
callback,
}
})
.await
}
/// Returns a stored [`ConfirmedBlockCertificate`] for a chain's block.
#[instrument(level = "trace", skip(self, chain_id, height), fields(
nickname = %self.nickname,
chain_id = %chain_id,
height = %height
))]
#[cfg(with_testing)]
pub async fn read_certificate(
&self,
chain_id: ChainId,
height: BlockHeight,
) -> Result<Option<ConfirmedBlockCertificate>, WorkerError> {
self.query_chain_worker(chain_id, move |callback| {
ChainWorkerRequest::ReadCertificate { height, callback }
})
.await
}
/// Returns a read-only view of the [`ChainStateView`] of a chain referenced by its
/// [`ChainId`].
///
/// The returned view holds a lock on the chain state, which prevents the worker from changing
/// the state of that chain.
#[instrument(level = "trace", skip(self), fields(
nickname = %self.nickname,
chain_id = %chain_id
))]
pub async fn chain_state_view(
&self,
chain_id: ChainId,
) -> Result<OwnedRwLockReadGuard<ChainStateView<StorageClient::Context>>, WorkerError> {
self.query_chain_worker(chain_id, |callback| ChainWorkerRequest::GetChainStateView {
callback,
})
.await
}
/// Sends a request to the [`ChainWorker`] for a [`ChainId`] and waits for the `Response`.
#[instrument(level = "trace", skip(self, request_builder), fields(
nickname = %self.nickname,
chain_id = %chain_id
))]
async fn query_chain_worker<Response>(
&self,
chain_id: ChainId,
request_builder: impl FnOnce(
oneshot::Sender<Result<Response, WorkerError>>,
) -> ChainWorkerRequest<StorageClient::Context>,
) -> Result<Response, WorkerError> {
// Build the request.
let (callback, response) = oneshot::channel();
let request = request_builder(callback);
// Call the endpoint, possibly a new one.
let new_receiver = self.call_and_maybe_create_chain_worker_endpoint(chain_id, request)?;
// We just created an endpoint: spawn the actor.
if let Some(receiver) = new_receiver {
let delivery_notifier = self
.delivery_notifiers
.lock()
.unwrap()
.entry(chain_id)
.or_default()
.clone();
let is_tracked = self
.tracked_chains
.as_ref()
.is_some_and(|tracked_chains| tracked_chains.read().unwrap().contains(&chain_id));
let actor_task = ChainWorkerActor::run(
self.chain_worker_config.clone(),
self.storage.clone(),
self.block_cache.clone(),
self.execution_state_cache.clone(),
self.tracked_chains.clone(),
delivery_notifier,
chain_id,
receiver,
is_tracked,
);
self.chain_worker_tasks
.lock()
.unwrap()
.spawn_task(actor_task);
}
// Finally, wait a response.
match response.await {
Err(e) => {
// The actor endpoint was dropped. Better luck next time!
Err(WorkerError::ChainActorRecvError {
chain_id,
error: Box::new(e),
})
}
Ok(response) => response,
}
}
/// Find an endpoint and call it. Create the endpoint if necessary.
#[instrument(level = "trace", skip(self), fields(
nickname = %self.nickname,
chain_id = %chain_id
))]
#[expect(clippy::type_complexity)]
fn call_and_maybe_create_chain_worker_endpoint(
&self,
chain_id: ChainId,
request: ChainWorkerRequest<StorageClient::Context>,
) -> Result<
Option<
mpsc::UnboundedReceiver<(
ChainWorkerRequest<StorageClient::Context>,
tracing::Span,
Instant,
)>,
>,
WorkerError,
> {
let mut chain_workers = self.chain_workers.lock().unwrap();
let (sender, new_receiver) = if let Some(endpoint) = chain_workers.remove(&chain_id) {
(endpoint, None)
} else {
let (sender, receiver) = mpsc::unbounded_channel();
(sender, Some(receiver))
};
if let Err(e) = sender.send((request, tracing::Span::current(), Instant::now())) {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/local_node.rs | linera-core/src/local_node.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, HashMap, VecDeque},
sync::Arc,
};
use futures::{stream::FuturesUnordered, TryStreamExt as _};
use linera_base::{
crypto::{CryptoHash, ValidatorPublicKey},
data_types::{ArithmeticError, Blob, BlockHeight, Epoch},
identifiers::{BlobId, ChainId, StreamId},
};
use linera_chain::{
data_types::{BlockProposal, ProposedBlock},
types::{Block, GenericCertificate},
ChainStateView,
};
use linera_execution::{committee::Committee, BlobState, Query, QueryOutcome};
use linera_storage::Storage;
use linera_views::ViewError;
use thiserror::Error;
use tokio::sync::OwnedRwLockReadGuard;
use tracing::{instrument, warn};
use crate::{
data_types::{ChainInfo, ChainInfoQuery, ChainInfoResponse},
notifier::Notifier,
worker::{ProcessableCertificate, WorkerError, WorkerState},
};
/// A local node with a single worker, typically used by clients.
pub struct LocalNode<S>
where
S: Storage,
{
state: WorkerState<S>,
}
/// A client to a local node.
#[derive(Clone)]
pub struct LocalNodeClient<S>
where
S: Storage,
{
node: Arc<LocalNode<S>>,
}
/// Error type for the operations on a local node.
#[derive(Debug, Error)]
pub enum LocalNodeError {
#[error(transparent)]
ArithmeticError(#[from] ArithmeticError),
#[error(transparent)]
ViewError(#[from] ViewError),
#[error("Worker operation failed: {0}")]
WorkerError(WorkerError),
#[error("The local node doesn't have an active chain {0}")]
InactiveChain(ChainId),
#[error("The chain info response received from the local node is invalid")]
InvalidChainInfoResponse,
#[error("Blobs not found: {0:?}")]
BlobsNotFound(Vec<BlobId>),
}
impl From<WorkerError> for LocalNodeError {
fn from(error: WorkerError) -> Self {
match error {
WorkerError::BlobsNotFound(blob_ids) => LocalNodeError::BlobsNotFound(blob_ids),
error => LocalNodeError::WorkerError(error),
}
}
}
impl<S> LocalNodeClient<S>
where
S: Storage + Clone + 'static,
{
#[instrument(level = "trace", skip_all)]
pub async fn handle_block_proposal(
&self,
proposal: BlockProposal,
) -> Result<ChainInfoResponse, LocalNodeError> {
// In local nodes, we can trust fully_handle_certificate to carry all actions eventually.
let (response, _actions) =
Box::pin(self.node.state.handle_block_proposal(proposal)).await?;
Ok(response)
}
#[instrument(level = "trace", skip_all)]
pub async fn handle_certificate<T>(
&self,
certificate: GenericCertificate<T>,
notifier: &impl Notifier,
) -> Result<ChainInfoResponse, LocalNodeError>
where
T: ProcessableCertificate,
{
Ok(Box::pin(
self.node
.state
.fully_handle_certificate_with_notifications(certificate, notifier),
)
.await?)
}
#[instrument(level = "trace", skip_all)]
pub async fn handle_chain_info_query(
&self,
query: ChainInfoQuery,
) -> Result<ChainInfoResponse, LocalNodeError> {
// In local nodes, we can trust fully_handle_certificate to carry all actions eventually.
let (response, _actions) = self.node.state.handle_chain_info_query(query).await?;
Ok(response)
}
#[instrument(level = "trace", skip_all)]
pub fn new(state: WorkerState<S>) -> Self {
Self {
node: Arc::new(LocalNode { state }),
}
}
#[instrument(level = "trace", skip_all)]
pub(crate) fn storage_client(&self) -> S {
self.node.state.storage_client().clone()
}
#[instrument(level = "trace", skip_all)]
pub async fn stage_block_execution(
&self,
block: ProposedBlock,
round: Option<u32>,
published_blobs: Vec<Blob>,
) -> Result<(Block, ChainInfoResponse), LocalNodeError> {
Ok(self
.node
.state
.stage_block_execution(block, round, published_blobs)
.await?)
}
/// Reads blobs from storage.
pub async fn read_blobs_from_storage(
&self,
blob_ids: &[BlobId],
) -> Result<Option<Vec<Blob>>, LocalNodeError> {
let storage = self.storage_client();
Ok(storage.read_blobs(blob_ids).await?.into_iter().collect())
}
/// Reads blob states from storage.
pub async fn read_blob_states_from_storage(
&self,
blob_ids: &[BlobId],
) -> Result<Vec<BlobState>, LocalNodeError> {
let storage = self.storage_client();
let mut blobs_not_found = Vec::new();
let mut blob_states = Vec::new();
for (blob_state, blob_id) in storage
.read_blob_states(blob_ids)
.await?
.into_iter()
.zip(blob_ids)
{
match blob_state {
None => blobs_not_found.push(*blob_id),
Some(blob_state) => blob_states.push(blob_state),
}
}
if !blobs_not_found.is_empty() {
return Err(LocalNodeError::BlobsNotFound(blobs_not_found));
}
Ok(blob_states)
}
/// Looks for the specified blobs in the local chain manager's locking blobs.
/// Returns `Ok(None)` if any of the blobs is not found.
pub async fn get_locking_blobs(
&self,
blob_ids: impl IntoIterator<Item = &BlobId>,
chain_id: ChainId,
) -> Result<Option<Vec<Blob>>, LocalNodeError> {
let blob_ids_vec: Vec<_> = blob_ids.into_iter().copied().collect();
Ok(self
.node
.state
.get_locking_blobs(chain_id, blob_ids_vec)
.await?)
}
/// Writes the given blobs to storage if there is an appropriate blob state.
pub async fn store_blobs(&self, blobs: &[Blob]) -> Result<(), LocalNodeError> {
let storage = self.storage_client();
storage.maybe_write_blobs(blobs).await?;
Ok(())
}
pub async fn handle_pending_blobs(
&self,
chain_id: ChainId,
blobs: Vec<Blob>,
) -> Result<(), LocalNodeError> {
for blob in blobs {
self.node.state.handle_pending_blob(chain_id, blob).await?;
}
Ok(())
}
/// Returns a read-only view of the [`ChainStateView`] of a chain referenced by its
/// [`ChainId`].
///
/// The returned view holds a lock on the chain state, which prevents the local node from
/// changing the state of that chain.
#[instrument(level = "trace", skip(self))]
pub async fn chain_state_view(
&self,
chain_id: ChainId,
) -> Result<OwnedRwLockReadGuard<ChainStateView<S::Context>>, LocalNodeError> {
Ok(self.node.state.chain_state_view(chain_id).await?)
}
#[instrument(level = "trace", skip(self))]
pub(crate) async fn chain_info(
&self,
chain_id: ChainId,
) -> Result<Box<ChainInfo>, LocalNodeError> {
let query = ChainInfoQuery::new(chain_id);
Ok(self.handle_chain_info_query(query).await?.info)
}
#[instrument(level = "trace", skip(self, query))]
pub async fn query_application(
&self,
chain_id: ChainId,
query: Query,
block_hash: Option<CryptoHash>,
) -> Result<QueryOutcome, LocalNodeError> {
let outcome = self
.node
.state
.query_application(chain_id, query, block_hash)
.await?;
Ok(outcome)
}
/// Handles any pending local cross-chain requests.
#[instrument(level = "trace", skip(self))]
pub async fn retry_pending_cross_chain_requests(
&self,
sender_chain: ChainId,
) -> Result<(), LocalNodeError> {
let (_response, actions) = self
.node
.state
.handle_chain_info_query(ChainInfoQuery::new(sender_chain).with_network_actions())
.await?;
let mut requests = VecDeque::from_iter(actions.cross_chain_requests);
while let Some(request) = requests.pop_front() {
let new_actions = self.node.state.handle_cross_chain_request(request).await?;
requests.extend(new_actions.cross_chain_requests);
}
Ok(())
}
/// Given a list of chain IDs, returns a map that assigns to each of them the next block
/// height to schedule, i.e. the lowest block height for which we haven't added the messages
/// to `receiver_id` to the outbox yet.
pub async fn next_outbox_heights(
&self,
chain_ids: impl IntoIterator<Item = &ChainId>,
receiver_id: ChainId,
) -> Result<BTreeMap<ChainId, BlockHeight>, LocalNodeError> {
let futures =
FuturesUnordered::from_iter(chain_ids.into_iter().map(|chain_id| async move {
let (next_block_height, next_height_to_schedule) = match self
.get_tip_state_and_outbox_info(*chain_id, receiver_id)
.await
{
Ok(info) => info,
Err(LocalNodeError::BlobsNotFound(_) | LocalNodeError::InactiveChain(_)) => {
return Ok((*chain_id, BlockHeight::ZERO))
}
Err(err) => Err(err)?,
};
let next_height = if let Some(scheduled_height) = next_height_to_schedule {
next_block_height.max(scheduled_height)
} else {
next_block_height
};
Ok::<_, LocalNodeError>((*chain_id, next_height))
}));
futures.try_collect().await
}
pub async fn update_received_certificate_trackers(
&self,
chain_id: ChainId,
new_trackers: BTreeMap<ValidatorPublicKey, u64>,
) -> Result<(), LocalNodeError> {
self.node
.state
.update_received_certificate_trackers(chain_id, new_trackers)
.await?;
Ok(())
}
pub async fn get_preprocessed_block_hashes(
&self,
chain_id: ChainId,
start: BlockHeight,
end: BlockHeight,
) -> Result<Vec<linera_base::crypto::CryptoHash>, LocalNodeError> {
Ok(self
.node
.state
.get_preprocessed_block_hashes(chain_id, start, end)
.await?)
}
pub async fn get_inbox_next_height(
&self,
chain_id: ChainId,
origin: ChainId,
) -> Result<BlockHeight, LocalNodeError> {
Ok(self
.node
.state
.get_inbox_next_height(chain_id, origin)
.await?)
}
/// Gets block hashes for the given heights.
pub async fn get_block_hashes(
&self,
chain_id: ChainId,
heights: Vec<BlockHeight>,
) -> Result<Vec<CryptoHash>, LocalNodeError> {
Ok(self.node.state.get_block_hashes(chain_id, heights).await?)
}
/// Gets proposed blobs from the manager for specified blob IDs.
pub async fn get_proposed_blobs(
&self,
chain_id: ChainId,
blob_ids: Vec<BlobId>,
) -> Result<Vec<Blob>, LocalNodeError> {
Ok(self
.node
.state
.get_proposed_blobs(chain_id, blob_ids)
.await?)
}
/// Gets event subscriptions from the chain.
pub async fn get_event_subscriptions(
&self,
chain_id: ChainId,
) -> Result<crate::worker::EventSubscriptionsResult, LocalNodeError> {
Ok(self.node.state.get_event_subscriptions(chain_id).await?)
}
/// Gets the next expected event index for a stream.
pub async fn get_next_expected_event(
&self,
chain_id: ChainId,
stream_id: StreamId,
) -> Result<Option<u32>, LocalNodeError> {
Ok(self
.node
.state
.get_next_expected_event(chain_id, stream_id)
.await?)
}
/// Gets received certificate trackers.
pub async fn get_received_certificate_trackers(
&self,
chain_id: ChainId,
) -> Result<HashMap<ValidatorPublicKey, u64>, LocalNodeError> {
Ok(self
.node
.state
.get_received_certificate_trackers(chain_id)
.await?)
}
/// Gets tip state and outbox info for next_outbox_heights calculation.
pub async fn get_tip_state_and_outbox_info(
&self,
chain_id: ChainId,
receiver_id: ChainId,
) -> Result<(BlockHeight, Option<BlockHeight>), LocalNodeError> {
Ok(self
.node
.state
.get_tip_state_and_outbox_info(chain_id, receiver_id)
.await?)
}
/// Gets the next height to preprocess.
pub async fn get_next_height_to_preprocess(
&self,
chain_id: ChainId,
) -> Result<BlockHeight, LocalNodeError> {
Ok(self
.node
.state
.get_next_height_to_preprocess(chain_id)
.await?)
}
}
/// Extension trait for [`ChainInfo`]s from our local node. These should always be valid and
/// contain the requested information.
pub trait LocalChainInfoExt {
/// Returns the requested map of committees.
fn into_committees(self) -> Result<BTreeMap<Epoch, Committee>, LocalNodeError>;
/// Returns the current committee.
fn into_current_committee(self) -> Result<Committee, LocalNodeError>;
/// Returns a reference to the current committee.
fn current_committee(&self) -> Result<&Committee, LocalNodeError>;
}
impl LocalChainInfoExt for ChainInfo {
fn into_committees(self) -> Result<BTreeMap<Epoch, Committee>, LocalNodeError> {
self.requested_committees
.ok_or(LocalNodeError::InvalidChainInfoResponse)
}
fn into_current_committee(self) -> Result<Committee, LocalNodeError> {
self.requested_committees
.ok_or(LocalNodeError::InvalidChainInfoResponse)?
.remove(&self.epoch)
.ok_or(LocalNodeError::InactiveChain(self.chain_id))
}
fn current_committee(&self) -> Result<&Committee, LocalNodeError> {
self.requested_committees
.as_ref()
.ok_or(LocalNodeError::InvalidChainInfoResponse)?
.get(&self.epoch)
.ok_or(LocalNodeError::InactiveChain(self.chain_id))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/remote_node.rs | linera-core/src/remote_node.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::{HashSet, VecDeque};
use custom_debug_derive::Debug;
use futures::future::try_join_all;
use linera_base::{
crypto::ValidatorPublicKey,
data_types::{Blob, BlockHeight},
ensure,
identifiers::{BlobId, ChainId},
};
use linera_chain::{
data_types::BlockProposal,
types::{
CertificateValue, ConfirmedBlockCertificate, GenericCertificate, LiteCertificate,
TimeoutCertificate, ValidatedBlockCertificate,
},
};
use tracing::{debug, info, instrument};
use crate::{
data_types::{ChainInfo, ChainInfoQuery, ChainInfoResponse},
node::{CrossChainMessageDelivery, NodeError, ValidatorNode},
};
/// A validator node together with the validator's name.
#[derive(Clone, Debug)]
pub struct RemoteNode<N> {
pub public_key: ValidatorPublicKey,
#[debug(skip)]
pub node: N,
}
impl<N: ValidatorNode> RemoteNode<N> {
pub(crate) async fn handle_chain_info_query(
&self,
query: ChainInfoQuery,
) -> Result<Box<ChainInfo>, NodeError> {
let chain_id = query.chain_id;
let response = self.node.handle_chain_info_query(query).await?;
self.check_and_return_info(response, chain_id)
}
#[instrument(level = "trace")]
pub(crate) async fn handle_block_proposal(
&self,
proposal: Box<BlockProposal>,
) -> Result<Box<ChainInfo>, NodeError> {
let chain_id = proposal.content.block.chain_id;
let response = self.node.handle_block_proposal(*proposal).await?;
self.check_and_return_info(response, chain_id)
}
pub(crate) async fn handle_timeout_certificate(
&self,
certificate: TimeoutCertificate,
) -> Result<Box<ChainInfo>, NodeError> {
let chain_id = certificate.inner().chain_id();
let response = self.node.handle_timeout_certificate(certificate).await?;
self.check_and_return_info(response, chain_id)
}
pub(crate) async fn handle_confirmed_certificate(
&self,
certificate: ConfirmedBlockCertificate,
delivery: CrossChainMessageDelivery,
) -> Result<Box<ChainInfo>, NodeError> {
let chain_id = certificate.inner().chain_id();
let response = self
.node
.handle_confirmed_certificate(certificate, delivery)
.await?;
self.check_and_return_info(response, chain_id)
}
pub(crate) async fn handle_validated_certificate(
&self,
certificate: ValidatedBlockCertificate,
) -> Result<Box<ChainInfo>, NodeError> {
let chain_id = certificate.inner().chain_id();
let response = self.node.handle_validated_certificate(certificate).await?;
self.check_and_return_info(response, chain_id)
}
#[instrument(level = "trace")]
pub(crate) async fn handle_lite_certificate(
&self,
certificate: LiteCertificate<'_>,
delivery: CrossChainMessageDelivery,
) -> Result<Box<ChainInfo>, NodeError> {
let chain_id = certificate.value.chain_id;
let response = self
.node
.handle_lite_certificate(certificate, delivery)
.await?;
self.check_and_return_info(response, chain_id)
}
pub(crate) async fn handle_optimized_validated_certificate(
&mut self,
certificate: &ValidatedBlockCertificate,
delivery: CrossChainMessageDelivery,
) -> Result<Box<ChainInfo>, NodeError> {
if certificate.is_signed_by(&self.public_key) {
let result = self
.handle_lite_certificate(certificate.lite_certificate(), delivery)
.await;
match result {
Err(NodeError::MissingCertificateValue) => {
debug!(
address = self.address(),
certificate_hash = %certificate.hash(),
"validator forgot a validated block value that they signed before",
);
}
_ => return result,
}
}
self.handle_validated_certificate(certificate.clone()).await
}
pub(crate) async fn handle_optimized_confirmed_certificate(
&mut self,
certificate: &ConfirmedBlockCertificate,
delivery: CrossChainMessageDelivery,
) -> Result<Box<ChainInfo>, NodeError> {
if certificate.is_signed_by(&self.public_key) {
let result = self
.handle_lite_certificate(certificate.lite_certificate(), delivery)
.await;
match result {
Err(NodeError::MissingCertificateValue) => {
debug!(
address = self.address(),
certificate_hash = %certificate.hash(),
"validator forgot a confirmed block value that they signed before",
);
}
_ => return result,
}
}
self.handle_confirmed_certificate(certificate.clone(), delivery)
.await
}
fn check_and_return_info(
&self,
response: ChainInfoResponse,
chain_id: ChainId,
) -> Result<Box<ChainInfo>, NodeError> {
let manager = &response.info.manager;
let proposed = manager.requested_proposed.as_ref();
let locking = manager.requested_locking.as_ref();
ensure!(
proposed.is_none_or(|proposal| proposal.content.block.chain_id == chain_id)
&& locking.is_none_or(|cert| cert.chain_id() == chain_id)
&& response.check(self.public_key).is_ok(),
NodeError::InvalidChainInfoResponse
);
Ok(response.info)
}
#[instrument(level = "trace")]
pub(crate) async fn download_certificate_for_blob(
&self,
blob_id: BlobId,
) -> Result<ConfirmedBlockCertificate, NodeError> {
let certificate = self.node.blob_last_used_by_certificate(blob_id).await?;
if !certificate.block().requires_or_creates_blob(&blob_id) {
info!(
address = self.address(),
%blob_id,
"got invalid last used by certificate for blob from validator",
);
return Err(NodeError::InvalidCertificateForBlob(blob_id));
}
Ok(certificate)
}
/// Sends a pending validated block's blobs to the validator.
#[instrument(level = "trace")]
pub(crate) async fn send_pending_blobs(
&self,
chain_id: ChainId,
blobs: Vec<Blob>,
) -> Result<(), NodeError> {
let tasks = blobs
.into_iter()
.map(|blob| self.node.handle_pending_blob(chain_id, blob.into_content()));
try_join_all(tasks).await?;
Ok(())
}
#[instrument(level = "trace")]
pub async fn download_blob(&self, blob_id: BlobId) -> Result<Option<Blob>, NodeError> {
match self.node.download_blob(blob_id).await {
Ok(blob) => {
let blob = Blob::new(blob);
if blob.id() != blob_id {
tracing::info!(
address = self.address(),
%blob_id,
"validator sent an invalid blob.",
);
Ok(None)
} else {
Ok(Some(blob))
}
}
Err(NodeError::BlobsNotFound(_error)) => {
tracing::debug!(
?blob_id,
address = self.address(),
"validator is missing the blob",
);
Ok(None)
}
Err(error) => Err(error),
}
}
/// Downloads a list of certificates from the given chain.
#[instrument(level = "trace")]
pub async fn download_certificates_by_heights(
&self,
chain_id: ChainId,
heights: Vec<BlockHeight>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
let mut expected_heights = VecDeque::from(heights.clone());
let certificates = self
.node
.download_certificates_by_heights(chain_id, heights)
.await?;
if certificates.len() > expected_heights.len() {
return Err(NodeError::TooManyCertificatesReturned {
chain_id,
remote_node: Box::new(self.public_key),
});
}
for certificate in &certificates {
ensure!(
certificate.inner().chain_id() == chain_id,
NodeError::UnexpectedCertificateValue
);
if let Some(expected_height) = expected_heights.pop_front() {
ensure!(
expected_height == certificate.inner().height(),
NodeError::UnexpectedCertificateValue
);
} else {
return Err(NodeError::UnexpectedCertificateValue);
}
}
ensure!(
expected_heights.is_empty(),
NodeError::MissingCertificatesByHeights {
chain_id,
heights: expected_heights.into_iter().collect(),
}
);
Ok(certificates)
}
/// Checks that requesting these blobs when trying to handle this certificate is legitimate,
/// i.e. that there are no duplicates and the blobs are actually required.
pub fn check_blobs_not_found<T: CertificateValue>(
&self,
certificate: &GenericCertificate<T>,
blob_ids: &[BlobId],
) -> Result<(), NodeError> {
ensure!(!blob_ids.is_empty(), NodeError::EmptyBlobsNotFound);
let required = certificate.inner().required_blob_ids();
for blob_id in blob_ids {
if !required.contains(blob_id) {
info!(
address = self.address(),
%blob_id,
"validator requested blob but it is not required",
);
return Err(NodeError::UnexpectedEntriesInBlobsNotFound);
}
}
let unique_missing_blob_ids = blob_ids.iter().copied().collect::<HashSet<_>>();
if blob_ids.len() > unique_missing_blob_ids.len() {
info!(
address = self.address(),
"blobs requested by validator contain duplicates",
);
return Err(NodeError::DuplicatesInBlobsNotFound);
}
Ok(())
}
/// Returns the validator's URL.
pub fn address(&self) -> String {
self.node.address()
}
}
impl<N: ValidatorNode> PartialEq for RemoteNode<N> {
fn eq(&self, other: &Self) -> bool {
self.public_key == other.public_key
}
}
impl<N: ValidatorNode> Eq for RemoteNode<N> {}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/data_types.rs | linera-core/src/data_types.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::BTreeMap, fmt, ops::Not};
use custom_debug_derive::Debug;
use linera_base::{
crypto::{
BcsSignable, CryptoError, CryptoHash, ValidatorPublicKey, ValidatorSecretKey,
ValidatorSignature,
},
data_types::{Amount, BlockHeight, ChainDescription, Epoch, Round, Timestamp},
identifiers::{AccountOwner, ChainId},
};
use linera_chain::{
data_types::{ChainAndHeight, IncomingBundle, MessageBundle},
manager::ChainManagerInfo,
ChainStateView,
};
use linera_execution::{committee::Committee, ExecutionRuntimeContext};
use linera_storage::ChainRuntimeContext;
use linera_views::context::Context;
use serde::{Deserialize, Serialize};
use crate::client::chain_client;
/// A range of block heights as used in `ChainInfoQuery`.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary, Eq, PartialEq))]
pub struct BlockHeightRange {
/// Starting point
pub start: BlockHeight,
/// Optional limit on the number of elements.
#[debug(skip_if = Option::is_none)]
pub limit: Option<u64>,
}
impl BlockHeightRange {
/// Creates a range containing only the single specified block height.
pub fn single(start: BlockHeight) -> BlockHeightRange {
let limit = Some(1);
BlockHeightRange { start, limit }
}
/// Creates a range starting at the specified block height and containing up to `limit` elements.
pub fn multi(start: BlockHeight, limit: u64) -> BlockHeightRange {
BlockHeightRange {
start,
limit: Some(limit),
}
}
/// Returns the highest block height in the range.
pub fn highest(&self) -> BlockHeight {
self.limit
.map_or(self.start, |limit| BlockHeight(self.start.0 + limit - 1))
}
}
/// Request information about a chain.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary, Eq, PartialEq))]
pub struct ChainInfoQuery {
/// The chain ID.
pub chain_id: ChainId,
/// Optionally test that the block height is the one expected.
#[debug(skip_if = Option::is_none)]
pub test_next_block_height: Option<BlockHeight>,
/// Request the balance of a given [`AccountOwner`].
pub request_owner_balance: AccountOwner,
/// Query the current committees.
#[debug(skip_if = Not::not)]
pub request_committees: bool,
/// Query the received messages that are waiting to be picked in the next block.
#[debug(skip_if = Not::not)]
pub request_pending_message_bundles: bool,
/// Query a range of certificate hashes sent from the chain.
// dev: this field is left and unused to maintain backwards compatibility
// after hotfixing Testnet Conway.
#[debug(skip_if = Option::is_none)]
pub request_sent_certificate_hashes_in_range: Option<BlockHeightRange>,
/// Query new certificate sender chain IDs and block heights received from the chain.
#[debug(skip_if = Option::is_none)]
pub request_received_log_excluding_first_n: Option<u64>,
/// Query values from the chain manager, not just votes.
#[debug(skip_if = Not::not)]
pub request_manager_values: bool,
/// Include a timeout vote for the specified round, if appropriate.
#[debug(skip_if = Option::is_none)]
pub request_leader_timeout: Option<(BlockHeight, Round)>,
/// Include a vote to switch to fallback mode, if appropriate.
#[debug(skip_if = Not::not)]
pub request_fallback: bool,
/// Query for certificate hashes at block heights.
#[debug(skip_if = Vec::is_empty)]
pub request_sent_certificate_hashes_by_heights: Vec<BlockHeight>,
#[serde(default = "default_true")]
pub create_network_actions: bool,
}
// Default value for create_network_actions.
// Default for bool returns false.
fn default_true() -> bool {
true
}
impl ChainInfoQuery {
pub fn new(chain_id: ChainId) -> Self {
Self {
chain_id,
test_next_block_height: None,
request_committees: false,
request_owner_balance: AccountOwner::CHAIN,
request_pending_message_bundles: false,
request_sent_certificate_hashes_in_range: None,
request_received_log_excluding_first_n: None,
request_manager_values: false,
request_leader_timeout: None,
request_fallback: false,
request_sent_certificate_hashes_by_heights: Vec::new(),
create_network_actions: false,
}
}
pub fn test_next_block_height(mut self, height: BlockHeight) -> Self {
self.test_next_block_height = Some(height);
self
}
pub fn with_committees(mut self) -> Self {
self.request_committees = true;
self
}
pub fn with_owner_balance(mut self, owner: AccountOwner) -> Self {
self.request_owner_balance = owner;
self
}
pub fn with_pending_message_bundles(mut self) -> Self {
self.request_pending_message_bundles = true;
self
}
pub fn with_sent_certificate_hashes_by_heights(mut self, heights: Vec<BlockHeight>) -> Self {
self.request_sent_certificate_hashes_by_heights = heights;
self
}
pub fn with_received_log_excluding_first_n(mut self, n: u64) -> Self {
self.request_received_log_excluding_first_n = Some(n);
self
}
pub fn with_manager_values(mut self) -> Self {
self.request_manager_values = true;
self
}
pub fn with_timeout(mut self, height: BlockHeight, round: Round) -> Self {
self.request_leader_timeout = Some((height, round));
self
}
pub fn with_fallback(mut self) -> Self {
self.request_fallback = true;
self
}
pub fn with_network_actions(mut self) -> Self {
self.create_network_actions = true;
self
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct ChainInfo {
/// The chain ID.
pub chain_id: ChainId,
/// The number identifying the current configuration.
pub epoch: Epoch,
/// The chain description.
#[debug(skip_if = Option::is_none)]
pub description: Option<ChainDescription>,
/// The state of the chain authentication.
pub manager: Box<ChainManagerInfo>,
/// The current balance.
pub chain_balance: Amount,
/// The last block hash, if any.
#[debug(skip_if = Option::is_none)]
pub block_hash: Option<CryptoHash>,
/// The earliest possible timestamp for the next block.
pub timestamp: Timestamp,
/// The height after the latest block in the chain.
pub next_block_height: BlockHeight,
/// The hash of the current execution state.
#[debug(skip_if = Option::is_none)]
pub state_hash: Option<CryptoHash>,
/// The requested owner balance, if any.
#[debug(skip_if = Option::is_none)]
pub requested_owner_balance: Option<Amount>,
/// The current committees.
#[debug(skip_if = Option::is_none)]
pub requested_committees: Option<BTreeMap<Epoch, Committee>>,
/// The received messages that are waiting be picked in the next block (if requested).
#[debug(skip_if = Vec::is_empty)]
pub requested_pending_message_bundles: Vec<IncomingBundle>,
/// The response to `request_sent_certificate_hashes_in_range`
#[debug(skip_if = Vec::is_empty)]
pub requested_sent_certificate_hashes: Vec<CryptoHash>,
/// The current number of received certificates (useful for `request_received_log_excluding_first_n`)
pub count_received_log: usize,
/// The response to `request_received_certificates_excluding_first_n`
#[debug(skip_if = Vec::is_empty)]
pub requested_received_log: Vec<ChainAndHeight>,
}
impl ChainInfo {
/// Returns the `RoundTimeout` value for the current round, or `None` if the current round
/// does not time out.
pub fn round_timeout(&self) -> Option<RoundTimeout> {
// TODO(#1424): The local timeout might not match the validators' exactly.
Some(RoundTimeout {
timestamp: self.manager.round_timeout?,
current_round: self.manager.current_round,
next_block_height: self.next_block_height,
})
}
}
/// The response to an `ChainInfoQuery`
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct ChainInfoResponse {
pub info: Box<ChainInfo>,
pub signature: Option<ValidatorSignature>,
}
/// Information about shard allocation for a chain.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct ShardInfo {
/// The shard ID that will process this chain.
pub shard_id: usize,
/// The total number of shards in the validator.
pub total_shards: usize,
}
/// An internal request between chains within a validator.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub enum CrossChainRequest {
/// Communicate a number of confirmed blocks from the sender to the recipient.
/// Blocks must be given by increasing heights.
UpdateRecipient {
sender: ChainId,
recipient: ChainId,
bundles: Vec<(Epoch, MessageBundle)>,
},
/// Acknowledge the height of the highest confirmed blocks communicated with `UpdateRecipient`.
ConfirmUpdatedRecipient {
sender: ChainId,
recipient: ChainId,
latest_height: BlockHeight,
},
}
impl CrossChainRequest {
/// Where to send the cross-chain request.
pub fn target_chain_id(&self) -> ChainId {
use CrossChainRequest::*;
match self {
UpdateRecipient { recipient, .. } => *recipient,
ConfirmUpdatedRecipient { sender, .. } => *sender,
}
}
/// Returns true if the cross-chain request has messages lower or equal than `height`.
pub fn has_messages_lower_or_equal_than(&self, height: BlockHeight) -> bool {
match self {
CrossChainRequest::UpdateRecipient { bundles, .. } => {
debug_assert!(bundles.windows(2).all(|w| w[0].1.height <= w[1].1.height));
matches!(bundles.first(), Some((_, h)) if h.height <= height)
}
_ => false,
}
}
}
impl<C, S> From<&ChainStateView<C>> for ChainInfo
where
C: Context<Extra = ChainRuntimeContext<S>> + Clone + 'static,
ChainRuntimeContext<S>: ExecutionRuntimeContext,
{
fn from(view: &ChainStateView<C>) -> Self {
let system_state = &view.execution_state.system;
let tip_state = view.tip_state.get();
ChainInfo {
chain_id: view.chain_id(),
epoch: *system_state.epoch.get(),
description: system_state.description.get().clone(),
manager: Box::new(ChainManagerInfo::from(&view.manager)),
chain_balance: *system_state.balance.get(),
block_hash: tip_state.block_hash,
next_block_height: tip_state.next_block_height,
timestamp: *view.execution_state.system.timestamp.get(),
state_hash: *view.execution_state_hash.get(),
requested_committees: None,
requested_owner_balance: None,
requested_pending_message_bundles: Vec::new(),
requested_sent_certificate_hashes: Vec::new(),
count_received_log: view.received_log.count(),
requested_received_log: Vec::new(),
}
}
}
impl ChainInfoResponse {
pub fn new(info: impl Into<ChainInfo>, key_pair: Option<&ValidatorSecretKey>) -> Self {
let info = Box::new(info.into());
let signature = key_pair.map(|kp| ValidatorSignature::new(&*info, kp));
Self { info, signature }
}
/// Signs the [`ChainInfo`] stored inside this [`ChainInfoResponse`] with the provided
/// [`ValidatorSecretKey`].
pub fn sign(&mut self, key_pair: &ValidatorSecretKey) {
self.signature = Some(ValidatorSignature::new(&*self.info, key_pair));
}
pub fn check(&self, public_key: ValidatorPublicKey) -> Result<(), CryptoError> {
match self.signature.as_ref() {
Some(sig) => sig.check(&*self.info, public_key),
None => Err(CryptoError::MissingValidatorSignature),
}
}
}
impl BcsSignable<'_> for ChainInfo {}
/// Request for downloading certificates by heights.
#[derive(Debug, Clone)]
pub struct CertificatesByHeightRequest {
pub chain_id: ChainId,
pub heights: Vec<BlockHeight>,
}
/// The outcome of trying to commit a list of operations to the chain.
#[derive(Debug)]
pub enum ClientOutcome<T> {
/// The operations were committed successfully.
Committed(T),
/// We are not the round leader and cannot do anything. Try again at the specified time
/// or whenever the round or block height changes.
WaitForTimeout(RoundTimeout),
}
#[derive(Debug)]
pub struct RoundTimeout {
pub timestamp: Timestamp,
pub current_round: Round,
pub next_block_height: BlockHeight,
}
impl fmt::Display for RoundTimeout {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} at height {} times out at {}",
self.current_round, self.next_block_height, self.timestamp
)
}
}
impl<T> ClientOutcome<T> {
#[cfg(with_testing)]
pub fn unwrap(self) -> T {
match self {
ClientOutcome::Committed(t) => t,
ClientOutcome::WaitForTimeout(timeout) => panic!("unexpected timeout: {timeout}"),
}
}
pub fn expect(self, msg: &'static str) -> T {
match self {
ClientOutcome::Committed(t) => t,
ClientOutcome::WaitForTimeout(_) => panic!("{}", msg),
}
}
pub fn map<F, S>(self, f: F) -> ClientOutcome<S>
where
F: FnOnce(T) -> S,
{
match self {
ClientOutcome::Committed(t) => ClientOutcome::Committed(f(t)),
ClientOutcome::WaitForTimeout(timeout) => ClientOutcome::WaitForTimeout(timeout),
}
}
pub fn try_map<F, S>(self, f: F) -> Result<ClientOutcome<S>, chain_client::Error>
where
F: FnOnce(T) -> Result<S, chain_client::Error>,
{
match self {
ClientOutcome::Committed(t) => Ok(ClientOutcome::Committed(f(t)?)),
ClientOutcome::WaitForTimeout(timeout) => Ok(ClientOutcome::WaitForTimeout(timeout)),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/updater.rs | linera-core/src/updater.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, BTreeSet, HashMap},
fmt,
hash::Hash,
mem,
sync::Arc,
};
use futures::{
stream::{FuturesUnordered, TryStreamExt},
Future, StreamExt,
};
use linera_base::{
crypto::ValidatorPublicKey,
data_types::{BlockHeight, Round, TimeDelta},
ensure,
identifiers::{BlobId, BlobType, ChainId, StreamId},
time::{timer::timeout, Duration, Instant},
};
use linera_chain::{
data_types::{BlockProposal, LiteVote},
manager::LockingBlock,
types::{ConfirmedBlock, GenericCertificate, ValidatedBlock, ValidatedBlockCertificate},
};
use linera_execution::{committee::Committee, system::EPOCH_STREAM_NAME};
use linera_storage::{Clock, ResultReadCertificates, Storage};
use thiserror::Error;
use tokio::sync::mpsc;
use tracing::{instrument, Level};
use crate::{
client::{chain_client, Client},
data_types::{ChainInfo, ChainInfoQuery},
environment::Environment,
node::{CrossChainMessageDelivery, NodeError, ValidatorNode},
remote_node::RemoteNode,
LocalNodeError,
};
/// The default amount of time we wait for additional validators to contribute
/// to the result, as a fraction of how long it took to reach a quorum.
pub const DEFAULT_QUORUM_GRACE_PERIOD: f64 = 0.2;
/// A report of clock skew from a validator, sent before retrying due to `InvalidTimestamp`.
pub type ClockSkewReport = (ValidatorPublicKey, TimeDelta);
/// The maximum timeout for requests to a stake-weighted quorum if no quorum is reached.
const MAX_TIMEOUT: Duration = Duration::from_secs(60 * 60 * 24); // 1 day.
/// Used for `communicate_chain_action`
#[derive(Clone)]
pub enum CommunicateAction {
SubmitBlock {
proposal: Box<BlockProposal>,
blob_ids: Vec<BlobId>,
/// Channel to report clock skew before sleeping, so the caller can aggregate reports.
clock_skew_sender: mpsc::UnboundedSender<ClockSkewReport>,
},
FinalizeBlock {
certificate: Box<ValidatedBlockCertificate>,
delivery: CrossChainMessageDelivery,
},
RequestTimeout {
chain_id: ChainId,
height: BlockHeight,
round: Round,
},
}
impl CommunicateAction {
/// The round to which this action pertains.
pub fn round(&self) -> Round {
match self {
CommunicateAction::SubmitBlock { proposal, .. } => proposal.content.round,
CommunicateAction::FinalizeBlock { certificate, .. } => certificate.round,
CommunicateAction::RequestTimeout { round, .. } => *round,
}
}
}
pub struct ValidatorUpdater<Env>
where
Env: Environment,
{
pub remote_node: RemoteNode<Env::ValidatorNode>,
pub client: Arc<Client<Env>>,
pub admin_id: ChainId,
}
impl<Env: Environment> Clone for ValidatorUpdater<Env> {
fn clone(&self) -> Self {
ValidatorUpdater {
remote_node: self.remote_node.clone(),
client: self.client.clone(),
admin_id: self.admin_id,
}
}
}
/// An error result for requests to a stake-weighted quorum.
#[derive(Error, Debug)]
pub enum CommunicationError<E: fmt::Debug> {
/// No consensus is possible since validators returned different possibilities
/// for the next block
#[error(
"No error but failed to find a consensus block. Consensus threshold: {0}, Proposals: {1:?}"
)]
NoConsensus(u64, Vec<(u64, usize)>),
/// A single error that was returned by a sufficient number of nodes to be trusted as
/// valid.
#[error("Failed to communicate with a quorum of validators: {0}")]
Trusted(E),
/// No single error reached the validity threshold so we're returning a sample of
/// errors for debugging purposes, together with their weight.
#[error("Failed to communicate with a quorum of validators:\n{:#?}", .0)]
Sample(Vec<(E, u64)>),
}
/// Executes a sequence of actions in parallel for all validators.
///
/// Tries to stop early when a quorum is reached. If `quorum_grace_period` is specified, other
/// validators are given additional time to contribute to the result. The grace period is
/// calculated as a fraction (defaulting to `DEFAULT_QUORUM_GRACE_PERIOD`) of the time taken to
/// reach quorum.
pub async fn communicate_with_quorum<'a, A, V, K, F, R, G>(
validator_clients: &'a [RemoteNode<A>],
committee: &Committee,
group_by: G,
execute: F,
// Grace period as a fraction of time taken to reach quorum.
quorum_grace_period: f64,
) -> Result<(K, Vec<(ValidatorPublicKey, V)>), CommunicationError<NodeError>>
where
A: ValidatorNode + Clone + 'static,
F: Clone + Fn(RemoteNode<A>) -> R,
R: Future<Output = Result<V, chain_client::Error>> + 'a,
G: Fn(&V) -> K,
K: Hash + PartialEq + Eq + Clone + 'static,
V: 'static,
{
let mut responses: futures::stream::FuturesUnordered<_> = validator_clients
.iter()
.filter_map(|remote_node| {
if committee.weight(&remote_node.public_key) == 0 {
// This should not happen but better prevent it because certificates
// are not allowed to include votes with weight 0.
return None;
}
let execute = execute.clone();
let remote_node = remote_node.clone();
Some(async move { (remote_node.public_key, execute(remote_node).await) })
})
.collect();
let start_time = Instant::now();
let mut end_time: Option<Instant> = None;
let mut remaining_votes = committee.total_votes();
let mut highest_key_score = 0;
let mut value_scores: HashMap<K, (u64, Vec<(ValidatorPublicKey, V)>)> = HashMap::new();
let mut error_scores = HashMap::new();
'vote_wait: while let Ok(Some((name, result))) = timeout(
end_time.map_or(MAX_TIMEOUT, |t| t.saturating_duration_since(Instant::now())),
responses.next(),
)
.await
{
remaining_votes -= committee.weight(&name);
match result {
Ok(value) => {
let key = group_by(&value);
let entry = value_scores.entry(key.clone()).or_insert((0, Vec::new()));
entry.0 += committee.weight(&name);
entry.1.push((name, value));
highest_key_score = highest_key_score.max(entry.0);
}
Err(err) => {
// TODO(#2857): Handle non-remote errors properly.
let err = match err {
chain_client::Error::RemoteNodeError(err) => err,
err => NodeError::ResponseHandlingError {
error: err.to_string(),
},
};
let entry = error_scores.entry(err.clone()).or_insert(0);
*entry += committee.weight(&name);
if *entry >= committee.validity_threshold() {
// At least one honest node returned this error.
// No quorum can be reached, so return early.
return Err(CommunicationError::Trusted(err));
}
}
}
// If it becomes clear that no key can reach a quorum, break early.
if highest_key_score + remaining_votes < committee.quorum_threshold() {
break 'vote_wait;
}
// If a key reaches a quorum, wait for the grace period to collect more values
// or error information and then stop.
if end_time.is_none() && highest_key_score >= committee.quorum_threshold() {
end_time = Some(Instant::now() + start_time.elapsed().mul_f64(quorum_grace_period));
}
}
let scores = value_scores
.values()
.map(|(weight, values)| (*weight, values.len()))
.collect();
// If a key has a quorum, return it with its values.
if let Some((key, (_, values))) = value_scores
.into_iter()
.find(|(_, (score, _))| *score >= committee.quorum_threshold())
{
return Ok((key, values));
}
if error_scores.is_empty() {
return Err(CommunicationError::NoConsensus(
committee.quorum_threshold(),
scores,
));
}
// No specific error is available to report reliably.
let mut sample = error_scores.into_iter().collect::<Vec<_>>();
sample.sort_by_key(|(_, score)| std::cmp::Reverse(*score));
sample.truncate(4);
Err(CommunicationError::Sample(sample))
}
impl<Env> ValidatorUpdater<Env>
where
Env: Environment + 'static,
{
#[instrument(
level = "trace", skip_all, err(level = Level::WARN),
fields(chain_id = %certificate.block().header.chain_id)
)]
async fn send_confirmed_certificate(
&mut self,
certificate: GenericCertificate<ConfirmedBlock>,
delivery: CrossChainMessageDelivery,
) -> Result<Box<ChainInfo>, chain_client::Error> {
let mut result = self
.remote_node
.handle_optimized_confirmed_certificate(&certificate, delivery)
.await;
let mut sent_admin_chain = false;
let mut sent_blobs = false;
loop {
match result {
Err(NodeError::EventsNotFound(event_ids))
if !sent_admin_chain
&& certificate.inner().chain_id() != self.admin_id
&& event_ids.iter().all(|event_id| {
event_id.stream_id == StreamId::system(EPOCH_STREAM_NAME)
&& event_id.chain_id == self.admin_id
}) =>
{
// The validator doesn't have the committee that signed the certificate.
self.update_admin_chain().await?;
sent_admin_chain = true;
}
Err(NodeError::BlobsNotFound(blob_ids)) if !sent_blobs => {
// The validator is missing the blobs required by the certificate.
self.remote_node
.check_blobs_not_found(&certificate, &blob_ids)?;
// The certificate is confirmed, so the blobs must be in storage.
let maybe_blobs = self
.client
.local_node
.read_blobs_from_storage(&blob_ids)
.await?;
let blobs = maybe_blobs.ok_or(NodeError::BlobsNotFound(blob_ids))?;
self.remote_node.node.upload_blobs(blobs).await?;
sent_blobs = true;
}
result => return Ok(result?),
}
result = self
.remote_node
.handle_confirmed_certificate(certificate.clone(), delivery)
.await;
}
}
async fn send_validated_certificate(
&mut self,
certificate: GenericCertificate<ValidatedBlock>,
delivery: CrossChainMessageDelivery,
) -> Result<Box<ChainInfo>, chain_client::Error> {
let result = self
.remote_node
.handle_optimized_validated_certificate(&certificate, delivery)
.await;
let chain_id = certificate.inner().chain_id();
match &result {
Err(original_err @ NodeError::BlobsNotFound(blob_ids)) => {
self.remote_node
.check_blobs_not_found(&certificate, blob_ids)?;
// The certificate is for a validated block, i.e. for our locking block.
// Take the missing blobs from our local chain manager.
let blobs = self
.client
.local_node
.get_locking_blobs(blob_ids, chain_id)
.await?
.ok_or_else(|| original_err.clone())?;
self.remote_node.send_pending_blobs(chain_id, blobs).await?;
}
Err(error) => {
self.sync_if_needed(
chain_id,
certificate.round,
certificate.block().header.height,
error,
)
.await?;
}
_ => return Ok(result?),
}
Ok(self
.remote_node
.handle_validated_certificate(certificate)
.await?)
}
/// Requests a vote for a timeout certificate for the given round from the remote node.
///
/// If the remote node is not in that round or at that height yet, sends the chain information
/// to update it.
async fn request_timeout(
&mut self,
chain_id: ChainId,
round: Round,
height: BlockHeight,
) -> Result<Box<ChainInfo>, chain_client::Error> {
let query = ChainInfoQuery::new(chain_id).with_timeout(height, round);
let result = self
.remote_node
.handle_chain_info_query(query.clone())
.await;
if let Err(err) = &result {
self.sync_if_needed(chain_id, round, height, err).await?;
}
Ok(result?)
}
/// Synchronizes either the local node or the remote node, if one of them is lagging behind.
async fn sync_if_needed(
&mut self,
chain_id: ChainId,
round: Round,
height: BlockHeight,
error: &NodeError,
) -> Result<(), chain_client::Error> {
let address = &self.remote_node.address();
match error {
NodeError::WrongRound(validator_round) if *validator_round > round => {
tracing::debug!(
address, %chain_id, %validator_round, %round,
"validator is at a higher round; synchronizing",
);
self.client
.synchronize_chain_state_from(&self.remote_node, chain_id)
.await?;
}
NodeError::UnexpectedBlockHeight {
expected_block_height,
found_block_height,
} if expected_block_height > found_block_height => {
tracing::debug!(
address,
%chain_id,
%expected_block_height,
%found_block_height,
"validator is at a higher height; synchronizing",
);
self.client
.synchronize_chain_state_from(&self.remote_node, chain_id)
.await?;
}
NodeError::WrongRound(validator_round) if *validator_round < round => {
tracing::debug!(
address, %chain_id, %validator_round, %round,
"validator is at a lower round; sending chain info",
);
self.send_chain_information(
chain_id,
height,
CrossChainMessageDelivery::NonBlocking,
None,
)
.await?;
}
NodeError::UnexpectedBlockHeight {
expected_block_height,
found_block_height,
} if expected_block_height < found_block_height => {
tracing::debug!(
address,
%chain_id,
%expected_block_height,
%found_block_height,
"Validator is at a lower height; sending chain info.",
);
self.send_chain_information(
chain_id,
height,
CrossChainMessageDelivery::NonBlocking,
None,
)
.await?;
}
NodeError::InactiveChain(chain_id) => {
tracing::debug!(
address,
%chain_id,
"Validator has inactive chain; sending chain info.",
);
self.send_chain_information(
*chain_id,
height,
CrossChainMessageDelivery::NonBlocking,
None,
)
.await?;
}
_ => {}
}
Ok(())
}
async fn send_block_proposal(
&mut self,
proposal: Box<BlockProposal>,
mut blob_ids: Vec<BlobId>,
clock_skew_sender: mpsc::UnboundedSender<ClockSkewReport>,
) -> Result<Box<ChainInfo>, chain_client::Error> {
let chain_id = proposal.content.block.chain_id;
let mut sent_cross_chain_updates = BTreeMap::new();
let mut publisher_chain_ids_sent = BTreeSet::new();
let storage = self.client.local_node.storage_client();
loop {
let local_time = storage.clock().current_time();
match self
.remote_node
.handle_block_proposal(proposal.clone())
.await
{
Ok(info) => return Ok(info),
Err(NodeError::WrongRound(_round)) => {
// The proposal is for a different round, so we need to update the validator.
// TODO: this should probably be more specific as to which rounds are retried.
tracing::debug!(
remote_node = self.remote_node.address(),
%chain_id,
"wrong round; sending chain to validator",
);
self.send_chain_information(
chain_id,
proposal.content.block.height,
CrossChainMessageDelivery::NonBlocking,
None,
)
.await?;
}
Err(NodeError::UnexpectedBlockHeight {
expected_block_height,
found_block_height,
}) if expected_block_height < found_block_height
&& found_block_height == proposal.content.block.height =>
{
tracing::debug!(
remote_node = self.remote_node.address(),
%chain_id,
"wrong height; sending chain to validator",
);
// The proposal is for a later block height, so we need to update the validator.
self.send_chain_information(
chain_id,
found_block_height,
CrossChainMessageDelivery::NonBlocking,
None,
)
.await?;
}
Err(NodeError::MissingCrossChainUpdate {
chain_id,
origin,
height,
}) if chain_id == proposal.content.block.chain_id
&& sent_cross_chain_updates
.get(&origin)
.is_none_or(|h| *h < height) =>
{
tracing::debug!(
remote_node = %self.remote_node.address(),
chain_id = %origin,
"Missing cross-chain update; sending chain to validator.",
);
sent_cross_chain_updates.insert(origin, height);
// Some received certificates may be missing for this validator
// (e.g. to create the chain or make the balance sufficient) so we are going to
// synchronize them now and retry.
self.send_chain_information(
origin,
height.try_add_one()?,
CrossChainMessageDelivery::Blocking,
None,
)
.await?;
}
Err(NodeError::EventsNotFound(event_ids)) => {
let mut publisher_heights = BTreeMap::new();
let chain_ids = event_ids
.iter()
.map(|event_id| event_id.chain_id)
.filter(|chain_id| !publisher_chain_ids_sent.contains(chain_id))
.collect::<BTreeSet<_>>();
tracing::debug!(
remote_node = self.remote_node.address(),
?chain_ids,
"missing events; sending chains to validator",
);
ensure!(!chain_ids.is_empty(), NodeError::EventsNotFound(event_ids));
for chain_id in chain_ids {
let height = self
.client
.local_node
.get_next_height_to_preprocess(chain_id)
.await?;
publisher_heights.insert(chain_id, height);
publisher_chain_ids_sent.insert(chain_id);
}
self.send_chain_info_up_to_heights(
publisher_heights,
CrossChainMessageDelivery::NonBlocking,
)
.await?;
}
Err(NodeError::BlobsNotFound(_) | NodeError::InactiveChain(_))
if !blob_ids.is_empty() =>
{
tracing::debug!("Missing blobs");
// For `BlobsNotFound`, we assume that the local node should already be
// updated with the needed blobs, so sending the chain information about the
// certificates that last used the blobs to the validator node should be enough.
let published_blob_ids =
BTreeSet::from_iter(proposal.content.block.published_blob_ids());
blob_ids.retain(|blob_id| !published_blob_ids.contains(blob_id));
let published_blobs = self
.client
.local_node
.get_proposed_blobs(chain_id, published_blob_ids.into_iter().collect())
.await?;
self.remote_node
.send_pending_blobs(chain_id, published_blobs)
.await?;
let missing_blob_ids = self
.remote_node
.node
.missing_blob_ids(mem::take(&mut blob_ids))
.await?;
let blob_states = self
.client
.local_node
.read_blob_states_from_storage(&missing_blob_ids)
.await?;
let mut chain_heights = BTreeMap::new();
for blob_state in blob_states {
let block_chain_id = blob_state.chain_id;
let block_height = blob_state.block_height.try_add_one()?;
chain_heights
.entry(block_chain_id)
.and_modify(|h| *h = block_height.max(*h))
.or_insert(block_height);
}
tracing::debug!("Sending chains {chain_heights:?}");
self.send_chain_info_up_to_heights(
chain_heights,
CrossChainMessageDelivery::NonBlocking,
)
.await?;
}
Err(NodeError::InvalidTimestamp {
block_timestamp,
local_time: validator_local_time,
..
}) => {
// The validator's clock is behind the block's timestamp. We need to
// wait for two things:
// 1. Our clock to reach block_timestamp (in case the block timestamp
// is in the future from our perspective too).
// 2. The validator's clock to catch up (in case of clock skew between
// us and the validator).
let clock_skew = local_time.delta_since(validator_local_time);
tracing::debug!(
remote_node = self.remote_node.address(),
%chain_id,
%block_timestamp,
?clock_skew,
"validator's clock is behind; waiting and retrying",
);
// Report the clock skew before sleeping so the caller can aggregate.
let _ = clock_skew_sender.send((self.remote_node.public_key, clock_skew));
storage
.clock()
.sleep_until(block_timestamp.saturating_add(clock_skew))
.await;
}
// Fail immediately on other errors.
Err(err) => return Err(err.into()),
}
}
}
async fn update_admin_chain(&mut self) -> Result<(), chain_client::Error> {
let local_admin_info = self.client.local_node.chain_info(self.admin_id).await?;
Box::pin(self.send_chain_information(
self.admin_id,
local_admin_info.next_block_height,
CrossChainMessageDelivery::NonBlocking,
None,
))
.await
}
pub async fn send_chain_information(
&mut self,
chain_id: ChainId,
target_block_height: BlockHeight,
delivery: CrossChainMessageDelivery,
latest_certificate: Option<GenericCertificate<ConfirmedBlock>>,
) -> Result<(), chain_client::Error> {
let info = if let Ok(height) = target_block_height.try_sub_one() {
// Figure out which certificates this validator is missing. In many cases, it's just the
// last one, so we optimistically send that one right away.
let certificate = if let Some(cert) = latest_certificate {
cert
} else {
let hash = self
.client
.local_node
.get_block_hashes(chain_id, vec![height])
.await?
.into_iter()
.next()
.ok_or_else(|| {
chain_client::Error::InternalError(
"send_chain_information called with invalid target_block_height",
)
})?;
self.client
.local_node
.storage_client()
.read_certificate(hash)
.await?
.ok_or_else(|| chain_client::Error::MissingConfirmedBlock(hash))?
};
let info = match self.send_confirmed_certificate(certificate, delivery).await {
Ok(info) => info,
Err(error) => {
tracing::debug!(
address = self.remote_node.address(), %error,
"validator failed to handle confirmed certificate; sending whole chain",
);
let query = ChainInfoQuery::new(chain_id);
self.remote_node.handle_chain_info_query(query).await?
}
};
// Obtain the missing blocks and the manager state from the local node.
let heights: Vec<_> = (info.next_block_height.0..target_block_height.0)
.map(BlockHeight)
.collect();
let validator_missing_hashes = self
.client
.local_node
.get_block_hashes(chain_id, heights)
.await?;
if !validator_missing_hashes.is_empty() {
// Send the requested certificates in order.
let certificates = self
.client
.local_node
.storage_client()
.read_certificates(validator_missing_hashes.clone())
.await?;
let certificates =
match ResultReadCertificates::new(certificates, validator_missing_hashes) {
ResultReadCertificates::Certificates(certificates) => certificates,
ResultReadCertificates::InvalidHashes(hashes) => {
return Err(chain_client::Error::ReadCertificatesError(hashes))
}
};
for certificate in certificates {
self.send_confirmed_certificate(certificate, delivery)
.await?;
}
}
info
} else {
// The remote node might not know about the chain yet.
let blob_states = self
.client
.local_node
.read_blob_states_from_storage(&[BlobId::new(
chain_id.0,
BlobType::ChainDescription,
)])
.await?;
let mut chain_heights = BTreeMap::new();
for blob_state in blob_states {
let block_chain_id = blob_state.chain_id;
let block_height = blob_state.block_height.try_add_one()?;
chain_heights
.entry(block_chain_id)
.and_modify(|h| *h = block_height.max(*h))
.or_insert(block_height);
}
self.send_chain_info_up_to_heights(
chain_heights,
CrossChainMessageDelivery::NonBlocking,
)
.await?;
let query = ChainInfoQuery::new(chain_id);
self.remote_node.handle_chain_info_query(query).await?
};
let (remote_height, remote_round) = (info.next_block_height, info.manager.current_round);
let query = ChainInfoQuery::new(chain_id).with_manager_values();
let local_info = match self.client.local_node.handle_chain_info_query(query).await {
Ok(response) => response.info,
// We don't have the full chain description.
Err(LocalNodeError::BlobsNotFound(_)) => return Ok(()),
Err(error) => return Err(error.into()),
};
let manager = local_info.manager;
if local_info.next_block_height != remote_height || manager.current_round <= remote_round {
return Ok(());
}
// The remote node is at our height but not at the current round. Send it the proposal,
// validated block certificate or timeout certificate that proves the current round.
for proposal in manager
.requested_proposed
.into_iter()
.chain(manager.requested_signed_proposal)
{
if proposal.content.round == manager.current_round {
if let Err(error) = self.remote_node.handle_block_proposal(proposal).await {
tracing::info!(%error, "failed to send block proposal");
} else {
return Ok(());
}
}
}
if let Some(LockingBlock::Regular(validated)) = manager.requested_locking.map(|b| *b) {
if validated.round == manager.current_round {
if let Err(error) = self
.remote_node
.handle_optimized_validated_certificate(
&validated,
CrossChainMessageDelivery::NonBlocking,
)
.await
{
tracing::info!(%error, "failed to send locking block");
} else {
return Ok(());
}
}
}
if let Some(cert) = manager.timeout {
if cert.round >= remote_round {
tracing::debug!(round = %cert.round, "sending timeout");
self.remote_node.handle_timeout_certificate(*cert).await?;
}
}
Ok(())
}
async fn send_chain_info_up_to_heights(
&mut self,
chain_heights: impl IntoIterator<Item = (ChainId, BlockHeight)>,
delivery: CrossChainMessageDelivery,
) -> Result<(), chain_client::Error> {
FuturesUnordered::from_iter(chain_heights.into_iter().map(|(chain_id, height)| {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/unit_tests/worker_tests.rs | linera-core/src/unit_tests/worker_tests.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::large_futures)]
#[path = "./wasm_worker_tests.rs"]
mod wasm;
use std::{
collections::BTreeMap,
iter,
sync::{Arc, Mutex},
time::Duration,
};
use assert_matches::assert_matches;
use linera_base::{
crypto::{
AccountPublicKey, AccountSecretKey, AccountSignature, CryptoHash, InMemorySigner,
ValidatorKeypair,
},
data_types::*,
identifiers::{Account, AccountOwner, ApplicationId, ChainId, EventId, StreamId},
ownership::{ChainOwnership, TimeoutConfig},
};
use linera_chain::{
data_types::{
BlockExecutionOutcome, BlockProposal, ChainAndHeight, IncomingBundle, LiteValue, LiteVote,
MessageAction, MessageBundle, OperationResult, PostedMessage, ProposedBlock,
SignatureAggregator, Transaction,
},
manager::LockingBlock,
test::{make_child_block, make_first_block, BlockTestExt, MessageTestExt, VoteTestExt},
types::{
CertificateKind, CertificateValue, ConfirmedBlock, ConfirmedBlockCertificate,
GenericCertificate, Timeout, ValidatedBlock,
},
ChainError, ChainExecutionContext, ChainStateView,
};
use linera_execution::{
committee::Committee,
system::{
AdminOperation, EpochEventData, OpenChainConfig, SystemMessage, SystemOperation,
EPOCH_STREAM_NAME as NEW_EPOCH_STREAM_NAME, REMOVED_EPOCH_STREAM_NAME,
},
test_utils::{
dummy_chain_description, ExpectedCall, MockApplication, RegisterMockApplication,
SystemExecutionState,
},
ExecutionError, ExecutionRuntimeContext, Message, MessageKind, OutgoingMessage, Query,
QueryContext, QueryOutcome, QueryResponse, SystemQuery, SystemResponse,
};
use linera_storage::Storage;
use linera_views::{context::Context, views::RootView};
use test_case::test_case;
use test_log::test;
#[cfg(feature = "dynamodb")]
use crate::test_utils::DynamoDbStorageBuilder;
#[cfg(feature = "rocksdb")]
use crate::test_utils::RocksDbStorageBuilder;
#[cfg(feature = "scylladb")]
use crate::test_utils::ScyllaDbStorageBuilder;
use crate::{
chain_worker::CrossChainUpdateHelper,
data_types::*,
test_utils::{MemoryStorageBuilder, StorageBuilder},
worker::{
Notification,
Reason::{self, NewBlock, NewIncomingBundle},
WorkerError, WorkerState,
},
};
/// The test worker accepts blocks with a timestamp this far in the future.
const TEST_GRACE_PERIOD_MICROS: u64 = 500_000;
struct TestEnvironment<S: Storage> {
committee: Committee,
// The main worker used for assertions.
worker: WorkerState<S>,
// This second worker is mostly used to create certificates that can then be handled by the
// main worker, but some tests depend on the worker handling proposals to be a validator, so
// they need to use this worker for everything.
// The certificates have to be created by a worker due to the difficulty of computing
// historical hashes for manually prepared certificates.
executing_worker: WorkerState<S>,
admin_keypair: AccountSecretKey,
admin_description: ChainDescription,
other_chains: BTreeMap<ChainId, ChainDescription>,
}
impl<S> TestEnvironment<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
async fn new<B: StorageBuilder<Storage = S>>(
builder: &mut B,
is_client: bool,
has_long_lived_services: bool,
) -> Result<Self, anyhow::Error> {
Self::new_with_amount(
builder,
is_client,
has_long_lived_services,
Amount::from_tokens(1_000_000),
)
.await
}
async fn new_with_amount<B: StorageBuilder<Storage = S>>(
builder: &mut B,
is_client: bool,
has_long_lived_services: bool,
amount: Amount,
) -> Result<Self, anyhow::Error> {
let validator_keypair = ValidatorKeypair::generate();
let account_secret = AccountSecretKey::generate();
let committee = Committee::make_simple(vec![(
validator_keypair.public_key,
account_secret.public(),
)]);
let origin = ChainOrigin::Root(0);
let config = InitialChainConfig {
balance: amount,
ownership: ChainOwnership::single(account_secret.public().into()),
epoch: Epoch::ZERO,
min_active_epoch: Epoch::ZERO,
max_active_epoch: Epoch::ZERO,
application_permissions: Default::default(),
};
let admin_description = ChainDescription::new(origin, config, Timestamp::from(0));
let committee_blob = Blob::new_committee(bcs::to_bytes(&committee).unwrap());
let mut make_worker = async |keypair: ValidatorKeypair| {
let storage = builder
.build()
.await
.expect("building storage should not fail");
storage
.write_blob(&Blob::new_chain_description(&admin_description))
.await
.expect("writing a blob should not fail");
storage
.write_blob(&committee_blob)
.await
.expect("writing a blob should succeed");
storage
.write_network_description(&NetworkDescription {
admin_chain_id: admin_description.id(),
genesis_config_hash: CryptoHash::test_hash("genesis config"),
genesis_timestamp: Timestamp::from(0),
genesis_committee_blob_hash: committee_blob.id().hash,
name: "test network".to_string(),
})
.await
.expect("writing a network description should not fail");
WorkerState::new(
"Single validator node".to_string(),
Some(keypair.secret_key),
storage,
5_000,
10_000,
)
.with_allow_inactive_chains(is_client)
.with_allow_messages_from_deprecated_epochs(is_client)
.with_long_lived_services(has_long_lived_services)
.with_block_time_grace_period(Duration::from_micros(TEST_GRACE_PERIOD_MICROS))
};
let worker = make_worker(ValidatorKeypair::generate()).await;
let executing_worker = make_worker(validator_keypair).await;
Ok(Self {
committee,
worker,
executing_worker,
admin_description,
admin_keypair: account_secret,
other_chains: BTreeMap::new(),
})
}
fn admin_id(&self) -> ChainId {
self.admin_description.id()
}
fn committee(&self) -> &Committee {
&self.committee
}
fn worker(&self) -> &WorkerState<S> {
&self.worker
}
fn executing_worker(&self) -> &WorkerState<S> {
&self.executing_worker
}
fn admin_public_key(&self) -> AccountPublicKey {
self.admin_keypair.public()
}
pub async fn write_blobs(&mut self, blobs: &[Blob]) -> Result<(), linera_views::ViewError> {
self.worker.storage.write_blobs(blobs).await?;
self.executing_worker.storage.write_blobs(blobs).await?;
Ok(())
}
pub async fn register_mock_application(
&mut self,
chain_id: ChainId,
index: u32,
) -> Result<(ApplicationId, MockApplication), anyhow::Error> {
let mut chain = self.worker.storage.load_chain(chain_id).await?;
let _ = chain
.execution_state
.register_mock_application(index)
.await?;
chain.save().await?;
let mut chain = self.executing_worker.storage.load_chain(chain_id).await?;
let (application_id, application, _) = chain
.execution_state
.register_mock_application(index)
.await?;
chain.save().await?;
Ok((application_id, application))
}
async fn add_root_chain(
&mut self,
index: u32,
owner: AccountOwner,
balance: Amount,
) -> ChainDescription {
self.add_root_chain_with_ownership(index, balance, ChainOwnership::single(owner))
.await
}
async fn add_root_chain_with_ownership(
&mut self,
index: u32,
balance: Amount,
ownership: ChainOwnership,
) -> ChainDescription {
let origin = ChainOrigin::Root(index);
let config = InitialChainConfig {
epoch: self.admin_description.config().epoch,
ownership,
min_active_epoch: self.admin_description.config().min_active_epoch,
max_active_epoch: self.admin_description.config().max_active_epoch,
balance,
application_permissions: Default::default(),
};
let description = ChainDescription::new(origin, config, Timestamp::from(0));
self.other_chains
.insert(description.id(), description.clone());
self.worker
.storage
.create_chain(description.clone())
.await
.unwrap();
self.executing_worker
.storage
.create_chain(description.clone())
.await
.unwrap();
description
}
async fn add_child_chain(
&mut self,
parent_id: ChainId,
owner: AccountOwner,
balance: Amount,
) -> ChainDescription {
let origin = ChainOrigin::Child {
parent: parent_id,
block_height: BlockHeight(0),
chain_index: 0,
};
let config = InitialChainConfig {
epoch: self.admin_description.config().epoch,
ownership: ChainOwnership::single(owner),
min_active_epoch: self.admin_description.config().min_active_epoch,
max_active_epoch: self.admin_description.config().max_active_epoch,
balance,
application_permissions: Default::default(),
};
let description = ChainDescription::new(origin, config, Timestamp::from(0));
self.other_chains
.insert(description.id(), description.clone());
self.worker
.storage
.create_chain(description.clone())
.await
.unwrap();
self.executing_worker
.storage
.create_chain(description.clone())
.await
.unwrap();
description
}
fn make_certificate<T>(&self, value: T) -> GenericCertificate<T>
where
T: CertificateValue,
{
self.make_certificate_with_round(value, Round::MultiLeader(0))
}
fn make_certificate_with_round<T>(&self, value: T, round: Round) -> GenericCertificate<T>
where
T: CertificateValue,
{
let vote = LiteVote::new(
LiteValue::new(&value),
round,
self.executing_worker
.chain_worker_config
.key_pair()
.unwrap(),
);
let mut builder = SignatureAggregator::new(value, round, &self.committee);
builder
.append(self.executing_worker.public_key(), vote.signature)
.unwrap()
.unwrap()
}
async fn make_simple_transfer_certificate(
&mut self,
chain_id: ChainId,
chain_owner_pubkey: AccountPublicKey,
target_id: ChainId,
amount: Amount,
incoming_bundles: Vec<IncomingBundle>,
previous_confirmed_block: Option<&ConfirmedBlockCertificate>,
) -> ConfirmedBlockCertificate {
self.make_transfer_certificate_for_epoch(
chain_id,
chain_owner_pubkey.into(),
AccountOwner::CHAIN,
Account::chain(target_id),
amount,
incoming_bundles,
Epoch::ZERO,
previous_confirmed_block,
)
.await
}
#[expect(clippy::too_many_arguments)]
async fn make_transfer_certificate(
&mut self,
chain_id: ChainId,
authenticated_owner: AccountOwner,
source: AccountOwner,
recipient: Account,
amount: Amount,
incoming_bundles: Vec<IncomingBundle>,
previous_confirmed_block: Option<&ConfirmedBlockCertificate>,
) -> ConfirmedBlockCertificate {
self.make_transfer_certificate_for_epoch(
chain_id,
authenticated_owner,
source,
recipient,
amount,
incoming_bundles,
Epoch::ZERO,
previous_confirmed_block,
)
.await
}
/// Creates a certificate with a transfer.
#[expect(clippy::too_many_arguments)]
async fn make_transfer_certificate_for_epoch(
&mut self,
chain_id: ChainId,
authenticated_owner: AccountOwner,
source: AccountOwner,
recipient: Account,
amount: Amount,
incoming_bundles: Vec<IncomingBundle>,
epoch: Epoch,
previous_confirmed_block: Option<&ConfirmedBlockCertificate>,
) -> ConfirmedBlockCertificate {
let block = match previous_confirmed_block {
None => make_first_block(chain_id),
Some(cert) => make_child_block(cert.value()),
}
.with_incoming_bundles(incoming_bundles)
.with_transfer(source, recipient, amount)
.with_epoch(epoch)
.with_authenticated_owner(Some(authenticated_owner));
self.execute_proposal(block, vec![]).await.unwrap()
}
/// Creates a certificate with a transfer.
///
/// This does not work for blocks with ancestors that sent a message to the same recipient, unless
/// the `previous_confirmed_block` also did.
/// It also does not work as a certificate that can be processed - it doesn't do
/// proper hashing of the execution state, so such a certificate will be rejected by
/// the worker.
#[expect(clippy::too_many_arguments)]
async fn make_transfer_certificate_for_epoch_unprocessable(
&self,
chain_description: ChainDescription,
chain_owner_pubkey: AccountPublicKey,
authenticated_owner: AccountOwner,
source: AccountOwner,
recipient: Account,
amount: Amount,
incoming_bundles: Vec<IncomingBundle>,
epoch: Epoch,
balance: Amount,
balances: BTreeMap<AccountOwner, Amount>,
previous_confirmed_blocks: Vec<&ConfirmedBlockCertificate>,
) -> ConfirmedBlockCertificate {
let chain_id = chain_description.id();
let system_state = SystemExecutionState {
committees: [(epoch, self.committee.clone())].into_iter().collect(),
ownership: ChainOwnership::single(chain_owner_pubkey.into()),
balance,
balances,
admin_id: Some(self.admin_id()),
..SystemExecutionState::new(chain_description)
};
let mut block = match previous_confirmed_blocks.first() {
None => make_first_block(chain_id),
Some(cert) => make_child_block(cert.value()),
}
.with_transfer(source, recipient, amount);
block.authenticated_owner = Some(authenticated_owner);
block.epoch = epoch;
let mut messages = incoming_bundles
.iter()
.flat_map(|incoming_bundle| {
incoming_bundle
.bundle
.messages
.iter()
.map(|posted_message| {
if matches!(incoming_bundle.action, MessageAction::Reject)
&& matches!(posted_message.kind, MessageKind::Tracked)
{
vec![OutgoingMessage {
authenticated_owner: posted_message.authenticated_owner,
destination: incoming_bundle.origin,
grant: Amount::ZERO,
refund_grant_to: None,
kind: MessageKind::Bouncing,
message: posted_message.message.clone(),
}]
} else {
Vec::new()
}
})
})
.collect::<Vec<_>>();
block.transactions = incoming_bundles
.into_iter()
.map(Transaction::ReceiveMessages)
.chain(block.transactions)
.collect();
if chain_id != recipient.chain_id {
messages.push(vec![direct_outgoing_message(
recipient.chain_id,
MessageKind::Tracked,
SystemMessage::Credit {
source,
target: recipient.owner,
amount,
},
)]);
} else {
messages.push(Vec::new());
}
let tx_count = block.transactions.len();
let oracle_responses = iter::repeat_with(Vec::new).take(tx_count).collect();
let events = iter::repeat_with(Vec::new).take(tx_count).collect();
let blobs = iter::repeat_with(Vec::new).take(tx_count).collect();
let operation_results = vec![OperationResult(Vec::new()); block.operations().count()];
let state_hash = system_state.into_hash().await;
let previous_message_blocks = messages
.iter()
.flatten()
.map(|message| message.destination)
.filter_map(|recipient| {
previous_confirmed_blocks
.iter()
.find(|block| {
block
.inner()
.block()
.body
.messages
.iter()
.flatten()
.any(|message| message.destination == recipient)
})
.map(|block| {
(
recipient,
(block.hash(), block.inner().block().header.height),
)
})
})
.collect();
let value = ConfirmedBlock::new(
BlockExecutionOutcome {
messages,
previous_message_blocks,
previous_event_blocks: BTreeMap::new(),
events,
blobs,
state_hash,
oracle_responses,
operation_results,
}
.with(block),
);
self.make_certificate(value)
}
pub fn system_execution_state(&self, chain_id: &ChainId) -> SystemExecutionState {
let description = if *chain_id == self.admin_id() {
self.admin_description.clone()
} else {
self.other_chains
.get(chain_id)
.expect("Unknown chain")
.clone()
};
SystemExecutionState {
admin_id: Some(self.admin_id()),
timestamp: description.timestamp(),
committees: [(Epoch::ZERO, self.committee.clone())]
.into_iter()
.collect(),
..SystemExecutionState::new(description.clone())
}
}
/// A method creating a `ConfirmedBlockCertificate` for a proposal by executing it on the
/// `executing_worker`.
async fn execute_proposal(
&mut self,
proposal: ProposedBlock,
blobs: Vec<Blob>,
) -> Result<ConfirmedBlockCertificate, anyhow::Error> {
let (block, _) = self
.executing_worker
.stage_block_execution(proposal, None, blobs)
.await?;
let certificate = self.make_certificate(ConfirmedBlock::new(block));
self.executing_worker
.fully_handle_certificate_with_notifications(certificate.clone(), &())
.await?;
Ok(certificate)
}
}
/// Asserts that there are no "removed" bundles in the inbox, that have been included as
/// incoming in a block but not received from the sender chain yet.
async fn assert_no_removed_bundles<C>(chain: &ChainStateView<C>)
where
C: Context + Clone + Send + Sync + 'static,
C::Extra: ExecutionRuntimeContext,
{
for (_, inbox) in chain.inboxes.try_load_all_entries().await.unwrap() {
assert_eq!(inbox.removed_bundles.front().await.unwrap(), None);
}
}
fn direct_outgoing_message(
recipient: ChainId,
kind: MessageKind,
message: SystemMessage,
) -> OutgoingMessage {
OutgoingMessage {
destination: recipient,
authenticated_owner: None,
grant: Amount::ZERO,
refund_grant_to: None,
kind,
message: Message::System(message),
}
}
fn system_credit_message(amount: Amount) -> Message {
Message::System(SystemMessage::Credit {
source: AccountOwner::CHAIN,
target: AccountOwner::CHAIN,
amount,
})
}
fn direct_credit_message(recipient: ChainId, amount: Amount) -> OutgoingMessage {
let message = SystemMessage::Credit {
source: AccountOwner::CHAIN,
target: AccountOwner::CHAIN,
amount,
};
direct_outgoing_message(recipient, MessageKind::Tracked, message)
}
/// Creates `count` key pairs and returns them, sorted by the `AccountOwner` created from their public key.
fn generate_key_pairs(signer: &mut InMemorySigner, count: usize) -> Vec<AccountPublicKey> {
let mut public_keys = iter::repeat_with(|| signer.generate_new())
.take(count)
.collect::<Vec<_>>();
public_keys.sort_by_key(|pk| AccountOwner::from(*pk));
public_keys
}
/// Creates a `CrossChainRequest` with the messages sent by the certificate to the recipient.
fn update_recipient_direct(
recipient: ChainId,
certificate: &ConfirmedBlockCertificate,
) -> CrossChainRequest {
let sender = certificate.inner().block().header.chain_id;
let bundles = certificate.message_bundles_for(recipient).collect();
CrossChainRequest::UpdateRecipient {
sender,
recipient,
bundles,
}
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_handle_block_proposal_bad_signature<B>(mut storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let sender_public_key = signer.generate_new();
let sender_owner = sender_public_key.into();
let mut env = TestEnvironment::new(&mut storage_builder, false, false).await?;
let chain_1_desc = env
.add_root_chain(1, sender_owner, Amount::from_tokens(5))
.await;
let chain_2_desc = env
.add_root_chain(2, AccountPublicKey::test_key(2).into(), Amount::ZERO)
.await;
let chain_1 = chain_1_desc.id();
let chain_2 = chain_2_desc.id();
let block_proposal = make_first_block(chain_1)
.with_simple_transfer(chain_2, Amount::from_tokens(5))
.into_first_proposal(sender_owner, &signer)
.await
.unwrap();
let unknown_key_pair = AccountSecretKey::generate();
let original_public_key = match block_proposal.signature {
AccountSignature::Ed25519 { public_key, .. } => public_key,
_ => {
panic!(
"Expected an Ed25519 signature, found: {:?}",
block_proposal.signature
);
}
};
let mut bad_signature_block_proposal = block_proposal.clone();
let bad_signature = match unknown_key_pair.sign(&block_proposal.content) {
AccountSignature::Ed25519 { signature, .. } => AccountSignature::Ed25519 {
public_key: original_public_key,
signature,
},
_ => panic!("Expected an Ed25519 signature"),
};
bad_signature_block_proposal.signature = bad_signature;
assert_matches!(
env.executing_worker()
.handle_block_proposal(bad_signature_block_proposal)
.await,
Err(WorkerError::CryptoError(error))
if matches!(error, linera_base::crypto::CryptoError::InvalidSignature {..})
);
let chain = env.executing_worker().chain_state_view(chain_1).await?;
assert!(chain.is_active());
assert!(chain.manager.confirmed_vote().is_none());
assert!(chain.manager.validated_vote().is_none());
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_handle_block_proposal_zero_amount<B>(mut storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let sender_owner = signer.generate_new().into();
let mut env = TestEnvironment::new(&mut storage_builder, false, false).await?;
let chain_1_desc = env
.add_root_chain(1, sender_owner, Amount::from_tokens(5))
.await;
let chain_2_desc = env
.add_root_chain(2, AccountPublicKey::test_key(2).into(), Amount::ZERO)
.await;
let chain_1 = chain_1_desc.id();
let chain_2 = chain_2_desc.id();
// test block non-positive amount
let zero_amount_block_proposal = make_first_block(chain_1)
.with_simple_transfer(chain_2, Amount::ZERO)
.with_authenticated_owner(Some(sender_owner))
.into_first_proposal(sender_owner, &signer)
.await
.unwrap();
assert_matches!(
env.executing_worker()
.handle_block_proposal(zero_amount_block_proposal)
.await,
Err(
WorkerError::ChainError(error)
) if matches!(&*error, ChainError::ExecutionError(
execution_error, ChainExecutionContext::Operation(_)
) if matches!(**execution_error, ExecutionError::IncorrectTransferAmount))
);
let chain = env.executing_worker().chain_state_view(chain_1).await?;
assert!(chain.is_active());
assert!(chain.manager.confirmed_vote().is_none());
assert!(chain.manager.validated_vote().is_none());
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_handle_block_proposal_valid_timestamps<B>(
mut storage_builder: B,
) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let public_key = signer.generate_new();
let owner = public_key.into();
let balance = Amount::from_tokens(5);
let small_transfer = Amount::from_micros(1);
let mut env = TestEnvironment::new(&mut storage_builder, false, false).await?;
let clock = storage_builder.clock();
let chain_1_desc = env.add_root_chain(1, owner, balance).await;
let chain_2_desc = env.add_root_chain(2, owner, balance).await;
let chain_1 = chain_1_desc.id();
let chain_2 = chain_2_desc.id();
{
let block_proposal = make_first_block(chain_1)
.with_simple_transfer(chain_2, small_transfer)
.with_authenticated_owner(Some(owner))
.with_timestamp(Timestamp::from(TEST_GRACE_PERIOD_MICROS + 1_000_000))
.into_first_proposal(owner, &signer)
.await
.unwrap();
// Timestamp too far in the future
assert_matches!(
env.executing_worker()
.handle_block_proposal(block_proposal)
.await,
Err(WorkerError::InvalidTimestamp { .. })
);
}
let block_0_time = Timestamp::from(TEST_GRACE_PERIOD_MICROS);
let block = make_first_block(chain_1)
.with_timestamp(block_0_time)
.with_simple_transfer(chain_2, small_transfer)
.with_authenticated_owner(Some(owner));
let certificate = {
let future = env.execute_proposal(block.clone(), vec![]);
clock.set(block_0_time);
future.await?
};
assert!(certificate.value().matches_proposed_block(&block));
assert!(certificate.block().outcome_matches(
vec![vec![direct_credit_message(chain_2, small_transfer)]],
BTreeMap::new(),
BTreeMap::new(),
vec![vec![]],
vec![vec![]],
vec![vec![]],
vec![OperationResult::default()],
));
env.worker()
.fully_handle_certificate_with_notifications(certificate.clone(), &())
.await?;
{
let block_proposal = make_child_block(&certificate.into_value())
.with_timestamp(block_0_time.saturating_sub_micros(1))
.into_first_proposal(owner, &signer)
.await
.unwrap();
// Timestamp older than previous one
assert_matches!(
env.executing_worker().handle_block_proposal(block_proposal).await,
Err(WorkerError::ChainError(error))
if matches!(*error, ChainError::InvalidBlockTimestamp { .. })
);
}
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_handle_block_proposal_unknown_sender<B>(mut storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let sender_public_key = signer.generate_new();
let mut env = TestEnvironment::new(&mut storage_builder, false, false).await?;
let chain_1_desc = env
.add_root_chain(1, sender_public_key.into(), Amount::from_tokens(5))
.await;
let chain_2_desc = env
.add_root_chain(2, AccountPublicKey::test_key(2).into(), Amount::ZERO)
.await;
let chain_1 = chain_1_desc.id();
let chain_2 = chain_2_desc.id();
let unknown_key = AccountSecretKey::generate();
let unknown_owner = unknown_key.public().into();
let new_signer: InMemorySigner = InMemorySigner::from_iter(vec![(unknown_owner, unknown_key)]);
let unknown_sender_block_proposal = make_first_block(chain_1)
.with_simple_transfer(chain_2, Amount::from_tokens(5))
.into_first_proposal(unknown_owner, &new_signer)
.await
.unwrap();
assert_matches!(
env.executing_worker()
.handle_block_proposal(unknown_sender_block_proposal)
.await,
Err(WorkerError::InvalidOwner)
);
let chain = env.executing_worker().chain_state_view(chain_1).await?;
assert!(chain.is_active());
assert!(chain.manager.confirmed_vote().is_none());
assert!(chain.manager.validated_vote().is_none());
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_handle_block_proposal_with_chaining<B>(mut storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let sender_public_key = signer.generate_new();
let sender_owner = sender_public_key.into();
let mut env = TestEnvironment::new(&mut storage_builder, false, false).await?;
let chain_desc = env
.add_root_chain(1, sender_owner, Amount::from_tokens(5))
.await;
let chain_1 = chain_desc.id();
let chain_2 = env.add_root_chain(2, sender_owner, Amount::ZERO).await.id();
let block_proposal0 = make_first_block(chain_1)
.with_simple_transfer(chain_2, Amount::ONE)
.with_authenticated_owner(Some(sender_owner))
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/unit_tests/test_helpers.rs | linera-core/src/unit_tests/test_helpers.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use assert_matches::assert_matches;
use linera_chain::{ChainError, ChainExecutionContext};
use linera_execution::ExecutionError;
use crate::{client::chain_client, local_node::LocalNodeError, worker::WorkerError};
/// Asserts that an error is due to insufficient balance during an operation.
pub fn assert_insufficient_balance_during_operation<T>(
obtained_error: Result<T, chain_client::Error>,
operation_index: u32,
) {
let chain_client::Error::LocalNodeError(LocalNodeError::WorkerError(WorkerError::ChainError(
error,
))) = obtained_error.err().unwrap()
else {
panic!("Expected a chain_client::Error::LocalNodeError with a WorkerError::ChainError");
};
let ChainError::ExecutionError(execution_error, context) = *error else {
panic!("Expected a ChainError::ExecutionError, found: {error:#?}");
};
let ChainExecutionContext::Operation(index) = context else {
panic!("Expected ChainExecutionContext::Operation, found: {context:#?}");
};
assert_eq!(index, operation_index, "Operation index mismatch");
assert_matches!(
*execution_error,
ExecutionError::InsufficientBalance { .. },
"Expected ExecutionError::InsufficientBalance, found: {execution_error:#?}"
);
}
/// Asserts that an error is due to insufficient funding for fees.
pub fn assert_fees_exceed_funding<T>(obtained_error: Result<T, chain_client::Error>) {
let chain_client::Error::LocalNodeError(LocalNodeError::WorkerError(WorkerError::ChainError(
error,
))) = obtained_error.err().unwrap()
else {
panic!("Expected a chain_client::Error::LocalNodeError with a WorkerError::ChainError");
};
let ChainError::ExecutionError(execution_error, _context) = *error else {
panic!("Expected a ChainError::ExecutionError, found: {error:#?}");
};
assert_matches!(
*execution_error,
ExecutionError::FeesExceedFunding { .. },
"Expected ExecutionError::FeesExceedFunding, found: {execution_error:#?}"
);
}
/// Asserts that an error is due to insufficient funding with a generic execution context.
pub fn assert_insufficient_funding<T>(
obtained_error: Result<T, chain_client::Error>,
expected_context: ChainExecutionContext,
) {
let chain_client::Error::LocalNodeError(LocalNodeError::WorkerError(WorkerError::ChainError(
error,
))) = obtained_error.err().unwrap()
else {
panic!("Expected a chain_client::Error::LocalNodeError with a WorkerError::ChainError");
};
let ChainError::ExecutionError(execution_error, context) = *error else {
panic!("Expected a ChainError::ExecutionError, found: {error:#?}");
};
if std::mem::discriminant(&context) != std::mem::discriminant(&expected_context) {
panic!("Expected execution context {expected_context:?}, but found {context:?}");
}
assert_matches!(
*execution_error,
ExecutionError::InsufficientBalance { .. },
"Expected ExecutionError::InsufficientBalance, found: {execution_error:#?}"
);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/unit_tests/test_utils.rs | linera-core/src/unit_tests/test_utils.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, HashMap, HashSet},
sync::Arc,
time::Duration,
vec,
};
use async_trait::async_trait;
use futures::{
future::Either,
lock::{Mutex, MutexGuard},
Future,
};
use linera_base::{
crypto::{AccountPublicKey, CryptoHash, ValidatorKeypair, ValidatorPublicKey},
data_types::*,
identifiers::{AccountOwner, BlobId, ChainId},
ownership::ChainOwnership,
};
use linera_chain::{
data_types::BlockProposal,
types::{
CertificateKind, ConfirmedBlock, ConfirmedBlockCertificate, GenericCertificate,
LiteCertificate, Timeout, ValidatedBlock,
},
};
use linera_execution::{committee::Committee, ResourceControlPolicy, WasmRuntime};
use linera_storage::{DbStorage, ResultReadCertificates, Storage, TestClock};
#[cfg(all(not(target_arch = "wasm32"), feature = "storage-service"))]
use linera_storage_service::client::StorageServiceDatabase;
use linera_version::VersionInfo;
#[cfg(feature = "dynamodb")]
use linera_views::dynamo_db::DynamoDbDatabase;
#[cfg(feature = "scylladb")]
use linera_views::scylla_db::ScyllaDbDatabase;
use linera_views::{
memory::MemoryDatabase, random::generate_test_namespace, store::TestKeyValueDatabase as _,
};
use tokio::sync::oneshot;
use tokio_stream::wrappers::UnboundedReceiverStream;
#[cfg(feature = "rocksdb")]
use {
linera_views::rocks_db::RocksDbDatabase,
tokio::sync::{Semaphore, SemaphorePermit},
};
use crate::{
client::{chain_client, Client},
data_types::*,
environment::{TestSigner, TestWallet},
node::{
CrossChainMessageDelivery, NodeError, NotificationStream, ValidatorNode,
ValidatorNodeProvider,
},
notifier::ChannelNotifier,
worker::{Notification, ProcessableCertificate, WorkerState},
};
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum FaultType {
Honest,
Offline,
OfflineWithInfo,
NoChains,
DontSendConfirmVote,
DontProcessValidated,
DontSendValidateVote,
}
/// A validator used for testing. "Faulty" validators ignore block proposals (but not
/// certificates or info queries) and have the wrong initial balance for all chains.
///
/// All methods are executed in spawned Tokio tasks, so that canceling a client task doesn't cause
/// the validator's tasks to be canceled: In a real network, a validator also wouldn't cancel
/// tasks if the client stopped waiting for the response.
struct LocalValidator<S>
where
S: Storage,
{
state: WorkerState<S>,
notifier: Arc<ChannelNotifier<Notification>>,
}
#[derive(Clone)]
pub struct LocalValidatorClient<S>
where
S: Storage,
{
public_key: ValidatorPublicKey,
client: Arc<Mutex<LocalValidator<S>>>,
fault_type: FaultType,
}
impl<S> ValidatorNode for LocalValidatorClient<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
type NotificationStream = NotificationStream;
fn address(&self) -> String {
format!("local:{}", self.public_key)
}
async fn handle_block_proposal(
&self,
proposal: BlockProposal,
) -> Result<ChainInfoResponse, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_handle_block_proposal(proposal, sender)
})
.await
}
async fn handle_lite_certificate(
&self,
certificate: LiteCertificate<'_>,
_delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError> {
let certificate = certificate.cloned();
self.spawn_and_receive(move |validator, sender| {
validator.do_handle_lite_certificate(certificate, sender)
})
.await
}
async fn handle_timeout_certificate(
&self,
certificate: GenericCertificate<Timeout>,
) -> Result<ChainInfoResponse, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_handle_certificate(certificate, sender)
})
.await
}
async fn handle_validated_certificate(
&self,
certificate: GenericCertificate<ValidatedBlock>,
) -> Result<ChainInfoResponse, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_handle_certificate(certificate, sender)
})
.await
}
async fn handle_confirmed_certificate(
&self,
certificate: GenericCertificate<ConfirmedBlock>,
_delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_handle_certificate(certificate, sender)
})
.await
}
async fn handle_chain_info_query(
&self,
query: ChainInfoQuery,
) -> Result<ChainInfoResponse, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_handle_chain_info_query(query, sender)
})
.await
}
async fn subscribe(&self, chains: Vec<ChainId>) -> Result<NotificationStream, NodeError> {
self.spawn_and_receive(move |validator, sender| validator.do_subscribe(chains, sender))
.await
}
async fn get_version_info(&self) -> Result<VersionInfo, NodeError> {
Ok(Default::default())
}
async fn get_network_description(&self) -> Result<NetworkDescription, NodeError> {
Ok(self
.client
.lock()
.await
.state
.storage_client()
.read_network_description()
.await
.transpose()
.ok_or(NodeError::ViewError {
error: "missing NetworkDescription".to_owned(),
})??)
}
async fn upload_blob(&self, content: BlobContent) -> Result<BlobId, NodeError> {
self.spawn_and_receive(move |validator, sender| validator.do_upload_blob(content, sender))
.await
}
async fn download_blob(&self, blob_id: BlobId) -> Result<BlobContent, NodeError> {
self.spawn_and_receive(move |validator, sender| validator.do_download_blob(blob_id, sender))
.await
}
async fn download_pending_blob(
&self,
chain_id: ChainId,
blob_id: BlobId,
) -> Result<BlobContent, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_download_pending_blob(chain_id, blob_id, sender)
})
.await
}
async fn handle_pending_blob(
&self,
chain_id: ChainId,
blob: BlobContent,
) -> Result<ChainInfoResponse, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_handle_pending_blob(chain_id, blob, sender)
})
.await
}
async fn download_certificate(
&self,
hash: CryptoHash,
) -> Result<ConfirmedBlockCertificate, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_download_certificate(hash, sender)
})
.await
}
async fn download_certificates(
&self,
hashes: Vec<CryptoHash>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_download_certificates(hashes, sender)
})
.await
}
async fn download_certificates_by_heights(
&self,
chain_id: ChainId,
heights: Vec<BlockHeight>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_download_certificates_by_heights(chain_id, heights, sender)
})
.await
}
async fn blob_last_used_by(&self, blob_id: BlobId) -> Result<CryptoHash, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_blob_last_used_by(blob_id, sender)
})
.await
}
async fn blob_last_used_by_certificate(
&self,
blob_id: BlobId,
) -> Result<ConfirmedBlockCertificate, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_blob_last_used_by_certificate(blob_id, sender)
})
.await
}
async fn missing_blob_ids(&self, blob_ids: Vec<BlobId>) -> Result<Vec<BlobId>, NodeError> {
self.spawn_and_receive(move |validator, sender| {
validator.do_missing_blob_ids(blob_ids, sender)
})
.await
}
async fn get_shard_info(
&self,
_chain_id: ChainId,
) -> Result<crate::data_types::ShardInfo, NodeError> {
// For test purposes, return a dummy shard info
Ok(crate::data_types::ShardInfo {
shard_id: 0,
total_shards: 1,
})
}
}
impl<S> LocalValidatorClient<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
fn new(public_key: ValidatorPublicKey, state: WorkerState<S>) -> Self {
let client = LocalValidator {
state,
notifier: Arc::new(ChannelNotifier::default()),
};
Self {
public_key,
client: Arc::new(Mutex::new(client)),
fault_type: FaultType::Honest,
}
}
pub fn name(&self) -> ValidatorPublicKey {
self.public_key
}
fn set_fault_type(&mut self, fault_type: FaultType) {
self.fault_type = fault_type;
}
/// Obtains the basic `ChainInfo` data for the local validator chain, with chain manager values.
pub async fn chain_info_with_manager_values(
&mut self,
chain_id: ChainId,
) -> Result<Box<ChainInfo>, NodeError> {
let query = ChainInfoQuery::new(chain_id).with_manager_values();
let response = self.handle_chain_info_query(query).await?;
Ok(response.info)
}
/// Executes the future produced by `f` in a new thread in a new Tokio runtime.
/// Returns the value that the future puts into the sender.
async fn spawn_and_receive<F, R, T>(&self, f: F) -> T
where
T: Send + 'static,
R: Future<Output = Result<(), T>> + Send,
F: FnOnce(Self, oneshot::Sender<T>) -> R + Send + 'static,
{
let validator = self.clone();
let (sender, receiver) = oneshot::channel();
tokio::spawn(async move {
if f(validator, sender).await.is_err() {
tracing::debug!("result could not be sent");
}
});
receiver.await.unwrap()
}
async fn do_handle_block_proposal(
self,
proposal: BlockProposal,
sender: oneshot::Sender<Result<ChainInfoResponse, NodeError>>,
) -> Result<(), Result<ChainInfoResponse, NodeError>> {
let result = match self.fault_type {
FaultType::Offline | FaultType::OfflineWithInfo => Err(NodeError::ClientIoError {
error: "offline".to_string(),
}),
FaultType::NoChains => Err(NodeError::InactiveChain(proposal.content.block.chain_id)),
FaultType::DontSendValidateVote
| FaultType::Honest
| FaultType::DontSendConfirmVote
| FaultType::DontProcessValidated => {
let result = self
.client
.lock()
.await
.state
.handle_block_proposal(proposal)
.await
.map_err(Into::into);
if self.fault_type == FaultType::DontSendValidateVote {
Err(NodeError::ClientIoError {
error: "refusing to validate".to_string(),
})
} else {
result
}
}
};
// In a local node cross-chain messages can't get lost, so we can ignore the actions here.
sender.send(result.map(|(info, _actions)| info))
}
async fn do_handle_lite_certificate(
self,
certificate: LiteCertificate<'_>,
sender: oneshot::Sender<Result<ChainInfoResponse, NodeError>>,
) -> Result<(), Result<ChainInfoResponse, NodeError>> {
let client = self.client.clone();
let mut validator = client.lock().await;
let result = async move {
match validator.state.full_certificate(certificate).await? {
Either::Left(confirmed) => {
self.do_handle_certificate_internal(confirmed, &mut validator)
.await
}
Either::Right(validated) => {
self.do_handle_certificate_internal(validated, &mut validator)
.await
}
}
}
.await;
sender.send(result)
}
async fn do_handle_certificate_internal<T: ProcessableCertificate>(
&self,
certificate: GenericCertificate<T>,
validator: &mut MutexGuard<'_, LocalValidator<S>>,
) -> Result<ChainInfoResponse, NodeError> {
match self.fault_type {
FaultType::DontProcessValidated if T::KIND == CertificateKind::Validated => {
Err(NodeError::ClientIoError {
error: "refusing to process validated block".to_string(),
})
}
FaultType::NoChains => Err(NodeError::InactiveChain(certificate.value().chain_id())),
FaultType::Honest
| FaultType::DontSendConfirmVote
| FaultType::DontProcessValidated
| FaultType::DontSendValidateVote => {
let result = validator
.state
.fully_handle_certificate_with_notifications(certificate, &validator.notifier)
.await
.map_err(Into::into);
if T::KIND == CertificateKind::Validated
&& self.fault_type == FaultType::DontSendConfirmVote
{
Err(NodeError::ClientIoError {
error: "refusing to confirm".to_string(),
})
} else {
result
}
}
FaultType::Offline | FaultType::OfflineWithInfo => Err(NodeError::ClientIoError {
error: "offline".to_string(),
}),
}
}
async fn do_handle_certificate<T: ProcessableCertificate>(
self,
certificate: GenericCertificate<T>,
sender: oneshot::Sender<Result<ChainInfoResponse, NodeError>>,
) -> Result<(), Result<ChainInfoResponse, NodeError>> {
let mut validator = self.client.lock().await;
let result = self
.do_handle_certificate_internal(certificate, &mut validator)
.await;
sender.send(result)
}
async fn do_handle_chain_info_query(
self,
query: ChainInfoQuery,
sender: oneshot::Sender<Result<ChainInfoResponse, NodeError>>,
) -> Result<(), Result<ChainInfoResponse, NodeError>> {
let validator = self.client.lock().await;
let result = match self.fault_type {
FaultType::Offline => Err(NodeError::ClientIoError {
error: "offline".to_string(),
}),
FaultType::NoChains => Err(NodeError::InactiveChain(query.chain_id)),
FaultType::Honest
| FaultType::DontSendConfirmVote
| FaultType::DontProcessValidated
| FaultType::DontSendValidateVote
| FaultType::OfflineWithInfo => validator
.state
.handle_chain_info_query(query)
.await
.map_err(Into::into),
};
// In a local node cross-chain messages can't get lost, so we can ignore the actions here.
sender.send(result.map(|(info, _actions)| info))
}
async fn do_subscribe(
self,
chains: Vec<ChainId>,
sender: oneshot::Sender<Result<NotificationStream, NodeError>>,
) -> Result<(), Result<NotificationStream, NodeError>> {
let validator = self.client.lock().await;
let rx = validator.notifier.subscribe(chains);
let stream: NotificationStream = Box::pin(UnboundedReceiverStream::new(rx));
sender.send(Ok(stream))
}
async fn do_upload_blob(
self,
content: BlobContent,
sender: oneshot::Sender<Result<BlobId, NodeError>>,
) -> Result<(), Result<BlobId, NodeError>> {
let validator = self.client.lock().await;
let blob = Blob::new(content);
let id = blob.id();
let storage = validator.state.storage_client();
let result = match storage.maybe_write_blobs(&[blob]).await {
Ok(has_state) if has_state.first() == Some(&true) => Ok(id),
Ok(_) => Err(NodeError::BlobsNotFound(vec![id])),
Err(error) => Err(error.into()),
};
sender.send(result)
}
async fn do_download_blob(
self,
blob_id: BlobId,
sender: oneshot::Sender<Result<BlobContent, NodeError>>,
) -> Result<(), Result<BlobContent, NodeError>> {
let validator = self.client.lock().await;
let blob = validator
.state
.storage_client()
.read_blob(blob_id)
.await
.map_err(Into::into);
let blob = match blob {
Ok(blob) => blob.ok_or(NodeError::BlobsNotFound(vec![blob_id])),
Err(error) => Err(error),
};
sender.send(blob.map(|blob| blob.into_content()))
}
async fn do_download_pending_blob(
self,
chain_id: ChainId,
blob_id: BlobId,
sender: oneshot::Sender<Result<BlobContent, NodeError>>,
) -> Result<(), Result<BlobContent, NodeError>> {
let validator = self.client.lock().await;
let result = validator
.state
.download_pending_blob(chain_id, blob_id)
.await
.map_err(Into::into);
sender.send(result.map(|blob| blob.into_content()))
}
async fn do_handle_pending_blob(
self,
chain_id: ChainId,
blob: BlobContent,
sender: oneshot::Sender<Result<ChainInfoResponse, NodeError>>,
) -> Result<(), Result<ChainInfoResponse, NodeError>> {
let validator = self.client.lock().await;
let result = validator
.state
.handle_pending_blob(chain_id, Blob::new(blob))
.await
.map_err(Into::into);
sender.send(result)
}
async fn do_download_certificate(
self,
hash: CryptoHash,
sender: oneshot::Sender<Result<ConfirmedBlockCertificate, NodeError>>,
) -> Result<(), Result<ConfirmedBlockCertificate, NodeError>> {
let validator = self.client.lock().await;
let certificate = validator
.state
.storage_client()
.read_certificate(hash)
.await
.map_err(Into::into);
let certificate = match certificate {
Err(error) => Err(error),
Ok(entry) => match entry {
Some(certificate) => Ok(certificate),
None => {
panic!("Missing certificate: {hash}");
}
},
};
sender.send(certificate)
}
async fn do_download_certificates(
self,
hashes: Vec<CryptoHash>,
sender: oneshot::Sender<Result<Vec<ConfirmedBlockCertificate>, NodeError>>,
) -> Result<(), Result<Vec<ConfirmedBlockCertificate>, NodeError>> {
let validator = self.client.lock().await;
let certificates = validator
.state
.storage_client()
.read_certificates(hashes.clone())
.await
.map_err(Into::into);
let certificates = match certificates {
Err(error) => Err(error),
Ok(certificates) => match ResultReadCertificates::new(certificates, hashes) {
ResultReadCertificates::Certificates(certificates) => Ok(certificates),
ResultReadCertificates::InvalidHashes(hashes) => {
panic!("Missing certificates: {:?}", hashes)
}
},
};
sender.send(certificates)
}
async fn do_download_certificates_by_heights(
self,
chain_id: ChainId,
heights: Vec<BlockHeight>,
sender: oneshot::Sender<Result<Vec<ConfirmedBlockCertificate>, NodeError>>,
) -> Result<(), Result<Vec<ConfirmedBlockCertificate>, NodeError>> {
// First, use do_handle_chain_info_query to get the certificate hashes
let (query_sender, query_receiver) = oneshot::channel();
let query = ChainInfoQuery::new(chain_id).with_sent_certificate_hashes_by_heights(heights);
let self_clone = self.clone();
self.do_handle_chain_info_query(query, query_sender)
.await
.expect("Failed to handle chain info query");
// Get the response from the chain info query
let chain_info_response = query_receiver.await.map_err(|_| {
Err(NodeError::ClientIoError {
error: "Failed to receive chain info response".to_string(),
})
})?;
let hashes = match chain_info_response {
Ok(response) => response.info.requested_sent_certificate_hashes,
Err(e) => {
return sender.send(Err(e));
}
};
// Now use do_download_certificates to get the actual certificates
let (cert_sender, cert_receiver) = oneshot::channel();
self_clone
.do_download_certificates(hashes, cert_sender)
.await?;
// Forward the result to the original sender
let result = cert_receiver.await.map_err(|_| {
Err(NodeError::ClientIoError {
error: "Failed to receive certificates".to_string(),
})
})?;
sender.send(result)
}
async fn do_blob_last_used_by(
self,
blob_id: BlobId,
sender: oneshot::Sender<Result<CryptoHash, NodeError>>,
) -> Result<(), Result<CryptoHash, NodeError>> {
let validator = self.client.lock().await;
let blob_state = validator
.state
.storage_client()
.read_blob_state(blob_id)
.await
.map_err(Into::into);
let certificate_hash = match blob_state {
Err(err) => Err(err),
Ok(blob_state) => match blob_state {
None => Err(NodeError::BlobsNotFound(vec![blob_id])),
Some(blob_state) => blob_state
.last_used_by
.ok_or_else(|| NodeError::BlobsNotFound(vec![blob_id])),
},
};
sender.send(certificate_hash)
}
async fn do_blob_last_used_by_certificate(
self,
blob_id: BlobId,
sender: oneshot::Sender<Result<ConfirmedBlockCertificate, NodeError>>,
) -> Result<(), Result<ConfirmedBlockCertificate, NodeError>> {
match self.blob_last_used_by(blob_id).await {
Ok(cert_hash) => {
let cert = self.download_certificate(cert_hash).await;
sender.send(cert)
}
Err(err) => sender.send(Err(err)),
}
}
async fn do_missing_blob_ids(
self,
blob_ids: Vec<BlobId>,
sender: oneshot::Sender<Result<Vec<BlobId>, NodeError>>,
) -> Result<(), Result<Vec<BlobId>, NodeError>> {
let validator = self.client.lock().await;
let missing_blob_ids = validator
.state
.storage_client()
.missing_blobs(&blob_ids)
.await
.map_err(Into::into);
sender.send(missing_blob_ids)
}
}
#[derive(Clone)]
pub struct NodeProvider<S>(Arc<std::sync::Mutex<Vec<LocalValidatorClient<S>>>>)
where
S: Storage;
impl<S> NodeProvider<S>
where
S: Storage + Clone,
{
fn all_nodes(&self) -> Vec<LocalValidatorClient<S>> {
self.0.lock().unwrap().clone()
}
}
impl<S> ValidatorNodeProvider for NodeProvider<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
type Node = LocalValidatorClient<S>;
fn make_node(&self, _name: &str) -> Result<Self::Node, NodeError> {
unimplemented!()
}
fn make_nodes_from_list<A>(
&self,
validators: impl IntoIterator<Item = (ValidatorPublicKey, A)>,
) -> Result<impl Iterator<Item = (ValidatorPublicKey, Self::Node)>, NodeError>
where
A: AsRef<str>,
{
let list = self.0.lock().unwrap();
Ok(validators
.into_iter()
.map(|(public_key, address)| {
list.iter()
.find(|client| client.public_key == public_key)
.ok_or_else(|| NodeError::CannotResolveValidatorAddress {
address: address.as_ref().to_string(),
})
.map(|client| (public_key, client.clone()))
})
.collect::<Result<Vec<_>, _>>()?
.into_iter())
}
}
impl<S> FromIterator<LocalValidatorClient<S>> for NodeProvider<S>
where
S: Storage,
{
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = LocalValidatorClient<S>>,
{
Self(Arc::new(std::sync::Mutex::new(iter.into_iter().collect())))
}
}
// NOTE:
// * To communicate with a quorum of validators, chain clients iterate over a copy of
// `validator_clients` to spawn I/O tasks.
// * When using `LocalValidatorClient`, clients communicate with an exact quorum then stop.
// * Most tests have 1 faulty validator out 4 so that there is exactly only 1 quorum to
// communicate with.
pub struct TestBuilder<B: StorageBuilder> {
storage_builder: B,
pub initial_committee: Committee,
admin_description: Option<ChainDescription>,
network_description: Option<NetworkDescription>,
genesis_storage_builder: GenesisStorageBuilder,
node_provider: NodeProvider<B::Storage>,
validator_storages: HashMap<ValidatorPublicKey, B::Storage>,
chain_client_storages: Vec<B::Storage>,
pub chain_owners: BTreeMap<ChainId, AccountOwner>,
pub signer: TestSigner,
}
#[async_trait]
pub trait StorageBuilder {
type Storage: Storage + Clone + Send + Sync + 'static;
async fn build(&mut self) -> Result<Self::Storage, anyhow::Error>;
fn clock(&self) -> &TestClock;
}
#[derive(Default)]
struct GenesisStorageBuilder {
accounts: Vec<GenesisAccount>,
}
struct GenesisAccount {
description: ChainDescription,
public_key: AccountPublicKey,
}
impl GenesisStorageBuilder {
fn add(&mut self, description: ChainDescription, public_key: AccountPublicKey) {
self.accounts.push(GenesisAccount {
description,
public_key,
})
}
async fn build<S>(&self, storage: S) -> S
where
S: Storage + Clone + Send + Sync + 'static,
{
for account in &self.accounts {
storage
.create_chain(account.description.clone())
.await
.unwrap();
}
storage
}
}
pub type ChainClient<S> = crate::client::ChainClient<crate::environment::Impl<S, NodeProvider<S>>>;
impl<S: Storage + Clone + Send + Sync + 'static> ChainClient<S> {
/// Reads the hashed certificate values in descending order from the given hash.
pub async fn read_confirmed_blocks_downward(
&self,
from: CryptoHash,
limit: u32,
) -> anyhow::Result<Vec<ConfirmedBlock>> {
let mut hash = Some(from);
let mut values = Vec::new();
for _ in 0..limit {
let Some(next_hash) = hash else {
break;
};
let value = self.read_confirmed_block(next_hash).await?;
hash = value.block().header.previous_block_hash;
values.push(value);
}
Ok(values)
}
}
impl<B> TestBuilder<B>
where
B: StorageBuilder,
{
pub async fn new(
mut storage_builder: B,
count: usize,
with_faulty_validators: usize,
mut signer: TestSigner,
) -> Result<Self, anyhow::Error> {
let mut validators = Vec::new();
for _ in 0..count {
let validator_keypair = ValidatorKeypair::generate();
let account_public_key = signer.generate_new();
validators.push((validator_keypair, account_public_key));
}
let for_committee = validators
.iter()
.map(|(validating, account)| (validating.public_key, *account))
.collect::<Vec<_>>();
let initial_committee = Committee::make_simple(for_committee);
let mut validator_clients = Vec::new();
let mut validator_storages = HashMap::new();
let mut faulty_validators = HashSet::new();
for (i, (validator_keypair, _account_public_key)) in validators.into_iter().enumerate() {
let validator_public_key = validator_keypair.public_key;
let storage = storage_builder.build().await?;
let state = WorkerState::new(
format!("Node {}", i),
Some(validator_keypair.secret_key),
storage.clone(),
5_000,
10_000,
)
.with_allow_inactive_chains(false)
.with_allow_messages_from_deprecated_epochs(false);
let mut validator = LocalValidatorClient::new(validator_public_key, state);
if i < with_faulty_validators {
faulty_validators.insert(validator_public_key);
validator.set_fault_type(FaultType::NoChains);
}
validator_clients.push(validator);
validator_storages.insert(validator_public_key, storage);
}
tracing::info!(
"Test will use the following faulty validators: {:?}",
faulty_validators
);
Ok(Self {
storage_builder,
initial_committee,
admin_description: None,
network_description: None,
genesis_storage_builder: GenesisStorageBuilder::default(),
node_provider: NodeProvider::from_iter(validator_clients),
validator_storages,
chain_client_storages: Vec::new(),
chain_owners: BTreeMap::new(),
signer,
})
}
pub fn with_policy(mut self, policy: ResourceControlPolicy) -> Self {
let validators = self.initial_committee.validators().clone();
self.initial_committee = Committee::new(validators, policy);
self
}
pub fn set_fault_type(&mut self, indexes: impl AsRef<[usize]>, fault_type: FaultType) {
let mut faulty_validators = vec![];
let mut validator_clients = self.node_provider.0.lock().unwrap();
for index in indexes.as_ref() {
let validator = &mut validator_clients[*index];
validator.set_fault_type(fault_type);
faulty_validators.push(validator.public_key);
}
tracing::info!(
"Making the following validators {:?}: {:?}",
fault_type,
faulty_validators
);
}
/// Creates the root chain with the given `index`, and returns a client for it.
///
/// Root chain 0 is the admin chain and needs to be initialized first, otherwise its balance
/// is automatically set to zero.
pub async fn add_root_chain(
&mut self,
index: u32,
balance: Amount,
) -> anyhow::Result<ChainClient<B::Storage>> {
// Make sure the admin chain is initialized.
if self.admin_description.is_none() && index != 0 {
Box::pin(self.add_root_chain(0, Amount::ZERO)).await?;
}
let origin = ChainOrigin::Root(index);
let public_key = self.signer.generate_new();
let open_chain_config = InitialChainConfig {
ownership: ChainOwnership::single(public_key.into()),
epoch: Epoch(0),
min_active_epoch: Epoch(0),
max_active_epoch: Epoch(0),
balance,
application_permissions: ApplicationPermissions::default(),
};
let description = ChainDescription::new(origin, open_chain_config, Timestamp::from(0));
let committee_blob = Blob::new_committee(bcs::to_bytes(&self.initial_committee).unwrap());
if index == 0 {
self.admin_description = Some(description.clone());
self.network_description = Some(NetworkDescription {
admin_chain_id: description.id(),
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/unit_tests/client_tests.rs | linera-core/src/unit_tests/client_tests.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod test_helpers;
#[path = "./wasm_client_tests.rs"]
mod wasm;
use std::collections::{BTreeMap, BTreeSet};
use assert_matches::assert_matches;
use futures::StreamExt;
use linera_base::{
crypto::{AccountSecretKey, CryptoHash, InMemorySigner},
data_types::*,
identifiers::{Account, AccountOwner, ApplicationId},
ownership::{ChainOwnership, TimeoutConfig},
};
use linera_chain::{
data_types::{IncomingBundle, MessageBundle, PostedMessage, Transaction},
manager::LockingBlock,
types::Timeout,
ChainError, ChainExecutionContext,
};
use linera_execution::{
committee::Committee, system::SystemOperation, ExecutionError, Message, MessageKind, Operation,
QueryOutcome, ResourceControlPolicy, SystemMessage, SystemQuery, SystemResponse,
};
use linera_storage::Storage;
use rand::Rng;
use test_case::test_case;
use test_helpers::{
assert_fees_exceed_funding, assert_insufficient_balance_during_operation,
assert_insufficient_funding,
};
#[cfg(feature = "dynamodb")]
use crate::test_utils::DynamoDbStorageBuilder;
#[cfg(feature = "rocksdb")]
use crate::test_utils::RocksDbStorageBuilder;
#[cfg(feature = "scylladb")]
use crate::test_utils::ScyllaDbStorageBuilder;
#[cfg(feature = "storage-service")]
use crate::test_utils::ServiceStorageBuilder;
use crate::{
client::{
chain_client::{self, ChainClient},
BlanketMessagePolicy, ClientOutcome, ListeningMode, MessageAction, MessagePolicy,
},
local_node::LocalNodeError,
node::{
NodeError::{self, ClientIoError},
ValidatorNode,
},
test_utils::{
ClientOutcomeResultExt as _, FaultType, MemoryStorageBuilder, StorageBuilder, TestBuilder,
},
updater::CommunicationError,
worker::{Notification, Reason, WorkerError},
Environment,
};
/// A test to ensure that our chain client listener remains `Send`. This is a bit of a
/// hack, but requires that we not hold a `std::sync::Mutex` over `await` points, a
/// situation that is likely to lead to deadlock. To further support this mode of
/// testing, `dashmap` references in [`crate::client`] have also been wrapped in a newtype
/// to make them non-`Send`.
#[test_log::test]
#[allow(dead_code)]
fn test_listener_is_send() {
fn ensure_send(_: &impl Send) {}
async fn check_listener(
chain_client: ChainClient<impl Environment>,
) -> Result<(), chain_client::Error> {
let (listener, _abort_notifications, _notifications) =
chain_client.listen(ListeningMode::FullChain).await?;
ensure_send(&listener);
Ok(())
}
// If it compiles, we're okay — no need to do anything at runtime.
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_initiating_valid_transfer_with_notifications<B>(
storage_builder: B,
) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let signer = InMemorySigner::new(None);
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer)
.await?
.with_policy(ResourceControlPolicy::only_fuel());
let sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let chain_2 = builder.add_root_chain(2, Amount::ZERO).await?;
// Listen to the notifications on the sender chain.
let mut notifications = sender.subscribe()?;
let (listener, _listen_handle, _) = sender.listen(ListeningMode::FullChain).await?;
tokio::spawn(listener);
{
let certificate = sender
.transfer_to_account(
AccountOwner::CHAIN,
Amount::from_tokens(3),
Account::chain(chain_2.chain_id()),
)
.await
.unwrap_ok_committed();
assert_eq!(
sender.chain_info().await?.next_block_height,
BlockHeight::from(1)
);
assert!(sender.pending_proposal().is_none());
assert_eq!(
sender.local_balance().await.unwrap(),
Amount::from_millis(1000)
);
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::ZERO, 3)
.await,
Some(certificate)
);
}
let executed_block_hash = match notifications.next().await {
Some(Notification {
reason: Reason::BlockExecuted { hash, height },
chain_id,
}) => {
assert_eq!(chain_id, sender.chain_id());
assert_eq!(height, BlockHeight::ZERO);
hash
}
_ => panic!("Expected BlockExecuted notification"),
};
// We execute twice in the local node:
// - first time when setting the proposal as a pending block
// - second time when processing pending block
// This results in two BlockExecuted notifications.
let _notification = notifications.next().await;
match notifications.next().await {
Some(Notification {
reason: Reason::NewBlock { hash, height, .. },
chain_id,
}) => {
assert_eq!(chain_id, sender.chain_id());
assert_eq!(height, BlockHeight::ZERO);
assert_eq!(executed_block_hash, hash);
}
other => panic!("Expected NewBlock notification, got {:?}", other),
}
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_claim_amount<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let signer = InMemorySigner::new(None);
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer)
.await?
.with_policy(ResourceControlPolicy::only_fuel());
let sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let owner = sender.identity().await?;
let receiver = builder.add_root_chain(2, Amount::ZERO).await?;
let receiver_id = receiver.chain_id();
let friend = receiver.identity().await?;
let cert = sender
.transfer_to_account(
AccountOwner::CHAIN,
Amount::from_tokens(3),
Account::new(receiver_id, owner),
)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::ZERO, 3)
.await,
Some(cert)
);
let cert = sender
.transfer_to_account(
AccountOwner::CHAIN,
Amount::from_millis(100),
Account::new(receiver_id, friend),
)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::from(1), 3)
.await,
Some(cert)
);
assert_eq!(
sender.local_balance().await.unwrap(),
Amount::from_millis(900)
);
receiver.synchronize_from_validators().await?;
assert_eq!(receiver.process_inbox().await?.0.len(), 1);
// The friend paid to receive the message.
assert_eq!(
receiver.local_owner_balance(friend).await.unwrap(),
Amount::from_millis(100)
);
// The received amount is not in the unprotected balance.
assert_eq!(receiver.local_balance().await.unwrap(), Amount::ZERO);
assert_eq!(
receiver.local_owner_balance(owner).await.unwrap(),
Amount::from_tokens(3)
);
assert_eq!(
receiver.local_balances_with_owner(owner).await.unwrap(),
(Amount::ZERO, Some(Amount::from_tokens(3)))
);
assert_eq!(receiver.query_balance().await.unwrap(), Amount::ZERO);
assert_eq!(
receiver.query_owner_balance(owner).await.unwrap(),
Amount::from_millis(3000)
);
assert_eq!(
receiver.query_balances_with_owner(owner).await.unwrap(),
(Amount::ZERO, Some(Amount::from_millis(3000)))
);
// First attempt that should be rejected.
let cert1 = sender
.claim(
owner,
receiver_id,
Account::chain(sender.chain_id()),
Amount::from_tokens(5),
)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::from(2), 3)
.await,
Some(cert1)
);
// Second attempt with a correct amount.
let cert2 = sender
.claim(
owner,
receiver_id,
Account::chain(sender.chain_id()),
Amount::from_tokens(2),
)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::from(3), 3)
.await,
Some(cert2)
);
receiver.synchronize_from_validators().await?;
let cert = receiver.process_inbox().await?.0.pop().unwrap();
{
let messages = cert.block().body.incoming_bundles().collect::<Vec<_>>();
// Both `Claim` messages were included in the block.
assert_eq!(messages.len(), 2);
// The first one was rejected.
assert_eq!(messages[0].bundle.height, BlockHeight::from(2));
assert_eq!(messages[0].action, MessageAction::Reject);
// The second was accepted.
assert_eq!(messages[1].bundle.height, BlockHeight::from(3));
assert_eq!(messages[1].action, MessageAction::Accept);
}
sender.synchronize_from_validators().await?;
sender.process_inbox().await?;
assert_eq!(
sender.local_balance().await.unwrap(),
Amount::from_millis(2900)
);
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_rotate_key_pair<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let new_public_key = signer.generate_new();
let new_owner = AccountOwner::from(new_public_key);
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer)
.await?
.with_policy(ResourceControlPolicy::only_fuel());
let mut sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let certificate = sender
.rotate_key_pair(new_public_key)
.await
.unwrap_ok_committed();
sender.set_preferred_owner(new_owner);
assert_eq!(
sender.chain_info().await?.next_block_height,
BlockHeight::from(1)
);
assert!(sender.pending_proposal().is_none());
assert_eq!(sender.identity().await?, new_owner);
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::ZERO, 3)
.await,
Some(certificate)
);
assert_eq!(
sender.local_balance().await.unwrap(),
Amount::from_millis(4000)
);
sender.synchronize_from_validators().await.unwrap();
// Can still use the chain.
sender
.burn(AccountOwner::CHAIN, Amount::from_tokens(3))
.await
.unwrap();
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_transfer_ownership<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let signer = InMemorySigner::new(None);
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer)
.await?
.with_policy(ResourceControlPolicy::only_fuel());
let sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let new_owner: AccountOwner = builder.signer.generate_new().into();
let certificate = sender.transfer_ownership(new_owner).await.unwrap().unwrap();
assert_eq!(
sender.chain_info().await?.next_block_height,
BlockHeight::from(1)
);
assert!(sender.pending_proposal().is_none());
assert_matches!(
sender.identity().await,
Err(chain_client::Error::NotAnOwner(_))
);
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::ZERO, 3)
.await,
Some(certificate)
);
assert_eq!(
sender.local_balance().await.unwrap(),
Amount::from_millis(4000)
);
sender.synchronize_from_validators().await.unwrap();
// Cannot use the chain any more.
assert_matches!(
sender
.burn(AccountOwner::CHAIN, Amount::from_tokens(3))
.await,
Err(chain_client::Error::NotAnOwner(_))
);
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_share_ownership<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let new_owner = signer.generate_new().into();
let mut builder = TestBuilder::new(storage_builder, 4, 0, signer).await?;
let sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let certificate = sender
.share_ownership(new_owner, 100)
.await
.unwrap_ok_committed();
assert_eq!(
sender.chain_info().await?.next_block_height,
BlockHeight::from(1)
);
assert!(sender.pending_proposal().is_none());
assert_eq!(sender.identity().await?, sender.preferred_owner().unwrap());
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::ZERO, 3)
.await,
Some(certificate)
);
assert_eq!(
sender.local_balance().await.unwrap(),
Amount::from_tokens(4)
);
sender.synchronize_from_validators().await.unwrap();
// Can still use the chain with the old client.
sender
.burn(AccountOwner::CHAIN, Amount::from_tokens(2))
.await
.unwrap();
let sender_info = sender.chain_info().await?;
assert_eq!(sender_info.next_block_height, BlockHeight::from(2));
// Make a client to try the new key.
let mut client = builder
.make_client(
sender.chain_id(),
sender_info.block_hash,
BlockHeight::from(2),
)
.await?;
client.set_preferred_owner(new_owner);
// Local balance fails because the client has block height 2 but we haven't downloaded
// the blocks yet.
assert_matches!(
client.local_balance().await,
Err(chain_client::Error::WalletSynchronizationError)
);
client.synchronize_from_validators().await.unwrap();
assert_eq!(
client.local_balance().await.unwrap(),
Amount::from_tokens(2)
);
// We need at least three validators for making an operation.
builder.set_fault_type([0, 1], FaultType::Offline);
let result = client.burn(AccountOwner::CHAIN, Amount::ONE).await;
assert_matches!(
result,
Err(chain_client::Error::CommunicationError(
CommunicationError::Trusted(ClientIoError { .. }),
))
);
builder.set_fault_type([0, 1], FaultType::Honest);
builder.set_fault_type([2, 3], FaultType::Offline);
assert_matches!(
sender.burn(AccountOwner::CHAIN, Amount::ONE).await,
Err(chain_client::Error::CommunicationError(
CommunicationError::Trusted(ClientIoError { .. })
))
);
// Half the validators voted for one block, half for the other. We need to make a proposal in
// the next round to succeed.
builder.set_fault_type([0, 1, 2, 3], FaultType::Honest);
client.synchronize_from_validators().await.unwrap();
assert_eq!(
client.local_balance().await.unwrap(),
Amount::from_tokens(2)
);
client.clear_pending_proposal();
client
.burn(AccountOwner::CHAIN, Amount::ONE)
.await
.unwrap_ok_committed();
assert_eq!(client.local_balance().await.unwrap(), Amount::ONE);
// The other client doesn't know the new round number yet:
sender.synchronize_from_validators().await.unwrap();
sender.process_inbox().await.unwrap();
assert_eq!(client.chain_info().await?, sender.chain_info().await?);
assert_eq!(sender.local_balance().await.unwrap(), Amount::ONE);
sender.clear_pending_proposal();
sender
.burn(AccountOwner::CHAIN, Amount::ONE)
.await
.unwrap_ok_committed();
// That's it, we spent all our money on this test!
assert_eq!(sender.local_balance().await.unwrap(), Amount::ZERO);
client.synchronize_from_validators().await.unwrap();
client.process_inbox().await.unwrap();
assert_eq!(client.local_balance().await.unwrap(), Amount::ZERO);
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
/// Regression test: A super owner should be able to propose even without multi-leader rounds.
async fn test_super_owner_in_single_leader_round<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let regular_owner = signer.generate_new().into();
let mut builder = TestBuilder::new(storage_builder, 4, 0, signer).await?;
let sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let super_owner = sender.identity().await?;
// Configure chain with one super owner and one regular owner, no multi-leader rounds.
let owner_change_op = Operation::system(SystemOperation::ChangeOwnership {
super_owners: vec![super_owner],
owners: vec![(regular_owner, 100)],
first_leader: None,
multi_leader_rounds: 0,
open_multi_leader_rounds: false,
timeout_config: TimeoutConfig::default(),
});
sender.execute_operation(owner_change_op).await.unwrap();
// The super owner can still burn tokens since that doesn't use the validation round oracle.
sender
.burn(AccountOwner::CHAIN, Amount::from_tokens(2))
.await
.unwrap();
assert_eq!(
sender.local_balance().await.unwrap(),
Amount::from_tokens(2)
);
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_open_chain_then_close_it<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let mut signer = InMemorySigner::new(None);
let new_public_key = signer.generate_new();
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer).await?;
// New chains use the admin chain to verify their creation certificate.
let _admin = builder.add_root_chain(0, Amount::ZERO).await?;
let sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
// Open the new chain.
let (new_description, _certificate) = sender
.open_chain(
ChainOwnership::single(new_public_key.into()),
ApplicationPermissions::default(),
Amount::ZERO,
)
.await
.unwrap_ok_committed();
let new_id = new_description.id();
assert_eq!(
sender.chain_info().await?.next_block_height,
BlockHeight::from(1)
);
assert!(sender.pending_proposal().is_none());
assert_eq!(sender.identity().await?, sender.preferred_owner().unwrap());
// Make a client to try the new chain.
let mut client = builder.make_client(new_id, None, BlockHeight::ZERO).await?;
client.set_preferred_owner(new_public_key.into());
client.synchronize_from_validators().await.unwrap();
assert_eq!(client.query_balance().await.unwrap(), Amount::ZERO);
client.close_chain().await.unwrap();
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_transfer_then_open_chain<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let signer = InMemorySigner::new(None);
let clock = storage_builder.clock().clone();
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer).await?;
// New chains use the admin chain to verify their creation certificate.
let _admin = builder.add_root_chain(0, Amount::ZERO).await?;
let sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let parent = builder.add_root_chain(2, Amount::ZERO).await?;
let new_public_key = builder.signer.generate_new();
let admin_config = builder.admin_description().unwrap().config();
let new_chain_config = InitialChainConfig {
ownership: ChainOwnership::single(new_public_key.into()),
epoch: Epoch::ZERO,
min_active_epoch: admin_config.min_active_epoch,
max_active_epoch: admin_config.max_active_epoch,
balance: Amount::ZERO,
application_permissions: Default::default(),
};
let new_chain_origin = ChainOrigin::Child {
parent: parent.chain_id(),
block_height: BlockHeight::ZERO,
chain_index: 0,
};
let new_id =
ChainDescription::new(new_chain_origin, new_chain_config, clock.current_time()).id();
// Transfer before creating the chain. The validators will ignore the cross-chain messages.
let cert = sender
.transfer_to_account(
AccountOwner::CHAIN,
Amount::from_tokens(2),
Account::chain(new_id),
)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::from(0), 3)
.await,
Some(cert)
);
// Open the new chain.
let (new_description2, certificate) = parent
.open_chain(
ChainOwnership::single(new_public_key.into()),
ApplicationPermissions::default(),
Amount::ZERO,
)
.await
.unwrap_ok_committed();
let new_id2 = new_description2.id();
assert_eq!(new_id, new_id2);
assert_eq!(
sender.chain_info().await?.next_block_height,
BlockHeight::from(1)
);
assert_eq!(
parent.chain_info().await?.next_block_height,
BlockHeight::from(1)
);
assert!(sender.pending_proposal().is_none());
assert_eq!(sender.identity().await?, sender.preferred_owner().unwrap());
assert_matches!(
&certificate.block().body.transactions[0],
Transaction::ExecuteOperation(Operation::System(system_op)) if matches!(**system_op, SystemOperation::OpenChain(_)),
"Unexpected certificate value",
);
assert_eq!(
builder
.check_that_validators_have_certificate(parent.chain_id(), BlockHeight::ZERO, 3)
.await,
Some(certificate)
);
// Make a client to try the new chain.
let mut client = builder.make_client(new_id, None, BlockHeight::ZERO).await?;
client.set_preferred_owner(new_public_key.into());
client.synchronize_from_validators().await.unwrap();
// Make another block on top of the one that sent the two tokens, so that the validators
// process the cross-chain messages.
let cert = sender
.transfer_to_account(
AccountOwner::CHAIN,
Amount::from_tokens(1),
Account::chain(new_id),
)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::from(1), 3)
.await,
Some(cert)
);
client.synchronize_from_validators().await.unwrap();
assert_eq!(
client.query_balance().await.unwrap(),
Amount::from_tokens(3)
);
client
.burn(AccountOwner::CHAIN, Amount::from_tokens(3))
.await
.unwrap();
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_open_chain_then_transfer<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let signer = InMemorySigner::new(None);
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer).await?;
// New chains use the admin chain to verify their creation certificate.
let _admin = builder.add_root_chain(0, Amount::ZERO).await?;
let sender = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let new_public_key = builder.signer.generate_new();
// Open the new chain. We are both regular and super owner.
let ownership = ChainOwnership::single(new_public_key.into())
.with_regular_owner(new_public_key.into(), 100);
let (new_description, _creation_certificate) = sender
.open_chain(ownership, ApplicationPermissions::default(), Amount::ZERO)
.await
.unwrap_ok_committed();
let new_id = new_description.id();
// Transfer after creating the chain.
let cert = sender
.transfer_to_account(
AccountOwner::CHAIN,
Amount::from_tokens(3),
Account::chain(new_id),
)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::from(1), 3)
.await,
Some(cert)
);
assert_eq!(
sender.chain_info().await?.next_block_height,
BlockHeight::from(2)
);
assert!(sender.pending_proposal().is_none());
assert_eq!(sender.identity().await?, sender.preferred_owner().unwrap());
// Make a client to try the new chain.
let mut client = builder.make_client(new_id, None, BlockHeight::ZERO).await?;
client.set_preferred_owner(new_public_key.into());
// Must process the creation certificate before using the new chain.
client.synchronize_from_validators().await.unwrap();
assert_eq!(client.local_balance().await.unwrap(), Amount::ZERO);
client.synchronize_from_validators().await.unwrap();
assert_eq!(
client.query_balance().await.unwrap(),
Amount::from_tokens(3)
);
client
.burn(AccountOwner::CHAIN, Amount::from_tokens(3))
.await
.unwrap();
assert_eq!(client.local_balance().await.unwrap(), Amount::ZERO);
Ok(())
}
#[test_case(MemoryStorageBuilder::default(); "memory")]
#[cfg_attr(feature = "storage-service", test_case(ServiceStorageBuilder::new(); "storage_service"))]
#[cfg_attr(feature = "rocksdb", test_case(RocksDbStorageBuilder::new().await; "rocks_db"))]
#[cfg_attr(feature = "dynamodb", test_case(DynamoDbStorageBuilder::default(); "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(ScyllaDbStorageBuilder::default(); "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_close_chain<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let signer = InMemorySigner::new(None);
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer)
.await?
.with_policy(ResourceControlPolicy::all_categories());
let client1 = builder.add_root_chain(1, Amount::from_tokens(4)).await?;
let client2 = builder.add_root_chain(2, Amount::from_tokens(4)).await?;
let certificate = client1.close_chain().await.unwrap().unwrap().unwrap();
assert_eq!(
certificate.block().body.transactions.len(),
1,
"Unexpected transactions in certificate"
);
assert_matches!(
&certificate.block().body.transactions[0],
Transaction::ExecuteOperation(Operation::System(system_op)) if matches!(**system_op, SystemOperation::CloseChain),
"Unexpected certificate value",
);
assert_eq!(
client1.chain_info().await?.next_block_height,
BlockHeight::from(1)
);
assert!(client1.pending_proposal().is_none());
assert!(client1.identity().await.is_ok());
assert_eq!(
builder
.check_that_validators_have_certificate(client1.chain_id(), BlockHeight::ZERO, 3)
.await,
Some(certificate)
);
// Cannot use the chain for operations any more.
let result = client1
.burn(AccountOwner::CHAIN, Amount::from_tokens(3))
.await;
assert!(
matches!(
&result,
Err(chain_client::Error::LocalNodeError(
LocalNodeError::WorkerError(WorkerError::ChainError(err))
)) if matches!(**err, ChainError::ClosedChain)
),
"Unexpected result: {:?}",
result,
);
// Incoming messages now get rejected.
let cert = client2
.transfer_to_account(
AccountOwner::CHAIN,
Amount::from_tokens(3),
Account::chain(client1.chain_id()),
)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(client2.chain_id(), BlockHeight::from(0), 3)
.await,
Some(cert)
);
client1.synchronize_from_validators().await.unwrap();
let (certificates, _) = client1.process_inbox().await.unwrap();
let block = certificates[0].block();
assert_eq!(block.body.transactions.len(), 1);
assert_matches!(
&block.body.transactions[..],
[Transaction::ReceiveMessages(IncomingBundle {
origin: sender,
action: MessageAction::Reject,
bundle: MessageBundle {
messages,
..
},
})] if *sender == client2.chain_id() && matches!(messages[..],
[PostedMessage {
message: Message::System(SystemMessage::Credit { .. }),
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/unit_tests/wasm_client_tests.rs | linera-core/src/unit_tests/wasm_client_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Wasm specific client tests.
//!
//! These tests only run if a Wasm runtime has been configured by enabling either the `wasmer` or
//! the `wasmtime` feature flags.
// Tests for `RocksDb`, `DynamoDb`, `ScyllaDb` and `Service` are currently disabled
// because they are slow and their behavior appears to be correctly check by the
// test with memory.
#![allow(clippy::large_futures)]
#![cfg(any(feature = "wasmer", feature = "wasmtime"))]
use std::collections::BTreeMap;
use assert_matches::assert_matches;
use async_graphql::Request;
use counter::CounterAbi;
use crowd_funding::{CrowdFundingAbi, InstantiationArgument, Operation as CrowdFundingOperation};
use fungible::{FungibleOperation, InitialState, Parameters};
use hex_game::{HexAbi, Operation as HexOperation, Timeouts};
use linera_base::{
crypto::{CryptoHash, InMemorySigner},
data_types::{
Amount, BlobContent, BlockHeight, Bytecode, ChainDescription, Event, OracleResponse, Round,
TimeDelta, Timestamp,
},
identifiers::{
Account, ApplicationId, BlobId, BlobType, DataBlobHash, ModuleId, StreamId, StreamName,
},
ownership::{ChainOwnership, TimeoutConfig},
vm::VmRuntime,
};
use linera_chain::{data_types::MessageAction, ChainError, ChainExecutionContext};
use linera_execution::{
wasm_test, ExecutionError, Message, MessageKind, Operation, QueryOutcome,
ResourceControlPolicy, SystemMessage, SystemOperation, WasmRuntime,
};
use linera_storage::Storage as _;
use serde_json::json;
use test_case::test_case;
#[cfg(feature = "dynamodb")]
use crate::client::client_tests::DynamoDbStorageBuilder;
#[cfg(feature = "rocksdb")]
use crate::client::client_tests::RocksDbStorageBuilder;
#[cfg(feature = "scylladb")]
use crate::client::client_tests::ScyllaDbStorageBuilder;
#[cfg(feature = "storage-service")]
use crate::client::client_tests::ServiceStorageBuilder;
use crate::{
client::{
chain_client::{self, ChainClient},
client_tests::{MemoryStorageBuilder, StorageBuilder, TestBuilder},
BlanketMessagePolicy, ClientOutcome, MessagePolicy,
},
local_node::LocalNodeError,
test_utils::{ClientOutcomeResultExt as _, FaultType},
worker::WorkerError,
Environment,
};
trait ChainClientExt {
/// Reads the bytecode of a Wasm example and publishes it. Returns the new module ID.
async fn publish_wasm_example(&self, name: &str) -> anyhow::Result<ModuleId>;
}
impl<Env: Environment> ChainClientExt for ChainClient<Env> {
async fn publish_wasm_example(&self, name: &str) -> anyhow::Result<ModuleId> {
let (contract_path, service_path) = wasm_test::get_example_bytecode_paths(name)?;
let contract_bytecode = Bytecode::load_from_file(contract_path)?;
let service_bytecode = Bytecode::load_from_file(service_path)?;
let (module_id, _cert) = self
.publish_module(contract_bytecode, service_bytecode, VmRuntime::Wasm)
.await
.unwrap_ok_committed();
Ok(module_id)
}
}
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_memory_create_application(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_create_application(MemoryStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "storage-service")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_service_create_application(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_create_application(ServiceStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "rocksdb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_rocks_db_create_application(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_create_application(RocksDbStorageBuilder::with_wasm_runtime(wasm_runtime).await).await
}
#[ignore]
#[cfg(feature = "dynamodb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_dynamo_db_create_application(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_create_application(DynamoDbStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "scylladb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_scylla_db_create_application(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_create_application(ScyllaDbStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
async fn run_test_create_application<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let keys = InMemorySigner::new(None);
let vm_runtime = VmRuntime::Wasm;
let (contract_path, service_path) =
linera_execution::wasm_test::get_example_bytecode_paths("counter")?;
let contract_bytecode = Bytecode::load_from_file(contract_path)?;
let service_bytecode = Bytecode::load_from_file(service_path)?;
let contract_compressed_len = contract_bytecode.compress().compressed_bytes.len();
let service_compressed_len = service_bytecode.compress().compressed_bytes.len();
let mut policy = ResourceControlPolicy::all_categories();
policy.maximum_bytecode_size = contract_bytecode
.bytes
.len()
.max(service_bytecode.bytes.len()) as u64;
policy.maximum_blob_size = contract_compressed_len.max(service_compressed_len) as u64;
let mut builder = TestBuilder::new(storage_builder, 4, 1, keys)
.await?
.with_policy(policy.clone());
let publisher = builder.add_root_chain(0, Amount::from_tokens(3)).await?;
let creator = builder.add_root_chain(1, Amount::ONE).await?;
let (module_id, _cert) = publisher
.publish_module(contract_bytecode, service_bytecode, vm_runtime)
.await
.unwrap_ok_committed();
let module_id = module_id.with_abi::<counter::CounterAbi, (), u64>();
creator.synchronize_from_validators().await.unwrap();
creator.process_inbox().await.unwrap();
// No fuel was used so far.
let balance_after_messaging = creator.local_balance().await?;
assert_eq!(balance_after_messaging, Amount::ONE);
let initial_value = 10_u64;
let (application_id, _) = creator
.create_application(module_id, &(), &initial_value, vec![])
.await
.unwrap_ok_committed();
let increment = 5_u64;
let counter_operation = counter::CounterOperation::Increment { value: increment };
creator
.execute_operation(Operation::user(application_id, &counter_operation)?)
.await
.unwrap();
let query = Request::new("{ value }");
let outcome = creator
.query_user_application(application_id, &query)
.await
.unwrap();
let expected = QueryOutcome {
response: async_graphql::Response::new(
async_graphql::Value::from_json(json!({"value": 15})).unwrap(),
),
operations: vec![],
};
assert_eq!(outcome, expected);
// Creating the application used fuel because of the `instantiate` call.
let balance_after_init = creator.local_balance().await?;
assert!(balance_after_init < balance_after_messaging);
let large_bytecode = Bytecode::new(vec![0; policy.maximum_bytecode_size as usize + 1]);
let small_bytecode = Bytecode::new(vec![]);
// Publishing bytecode that exceeds the limit fails.
let result = publisher
.publish_module(large_bytecode.clone(), small_bytecode.clone(), vm_runtime)
.await;
assert_matches!(
result,
Err(chain_client::Error::LocalNodeError(
LocalNodeError::WorkerError(WorkerError::ChainError(chain_error))
)) if matches!(&*chain_error, ChainError::ExecutionError(
error, ChainExecutionContext::Block
) if matches!(**error, ExecutionError::BytecodeTooLarge))
);
let result = publisher
.publish_module(small_bytecode, large_bytecode, vm_runtime)
.await;
assert_matches!(
result,
Err(chain_client::Error::LocalNodeError(
LocalNodeError::WorkerError(WorkerError::ChainError(chain_error))
)) if matches!(&*chain_error, ChainError::ExecutionError(
error, ChainExecutionContext::Block
) if matches!(**error, ExecutionError::BytecodeTooLarge))
);
Ok(())
}
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_memory_run_application_with_dependency(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
run_test_run_application_with_dependency(MemoryStorageBuilder::with_wasm_runtime(wasm_runtime))
.await
}
#[ignore]
#[cfg(feature = "storage-service")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_service_run_application_with_dependency(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
run_test_run_application_with_dependency(ServiceStorageBuilder::with_wasm_runtime(wasm_runtime))
.await
}
#[ignore]
#[cfg(feature = "rocksdb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_rocks_db_run_application_with_dependency(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
run_test_run_application_with_dependency(
RocksDbStorageBuilder::with_wasm_runtime(wasm_runtime).await,
)
.await
}
#[ignore]
#[cfg(feature = "dynamodb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_dynamo_db_run_application_with_dependency(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
run_test_run_application_with_dependency(DynamoDbStorageBuilder::with_wasm_runtime(
wasm_runtime,
))
.await
}
#[ignore]
#[cfg(feature = "scylladb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_scylla_db_run_application_with_dependency(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
run_test_run_application_with_dependency(ScyllaDbStorageBuilder::with_wasm_runtime(
wasm_runtime,
))
.await
}
async fn run_test_run_application_with_dependency<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let keys = InMemorySigner::new(None);
let mut builder = TestBuilder::new(storage_builder, 4, 1, keys)
.await?
.with_policy(ResourceControlPolicy::all_categories());
// Will publish the module.
let publisher = builder.add_root_chain(0, Amount::from_tokens(3)).await?;
// Will create the apps and use them to send a message.
let creator = builder.add_root_chain(1, Amount::ONE).await?;
// Will receive the message.
let receiver = builder.add_root_chain(2, Amount::ONE).await?;
let receiver_id = receiver.chain_id();
// Handling the message causes an oracle request to the counter service, so no fast blocks
// are allowed.
let receiver_key = receiver.identity().await.unwrap();
receiver
.change_ownership(ChainOwnership::multiple(
[(receiver_key, 100)],
100,
TimeoutConfig::default(),
))
.await
.unwrap();
let creator_key = creator.identity().await.unwrap();
creator
.change_ownership(ChainOwnership::multiple(
[(creator_key, 100)],
100,
TimeoutConfig::default(),
))
.await
.unwrap();
let module_id1 = publisher.publish_wasm_example("counter").await?;
let module_id1 = module_id1.with_abi::<counter::CounterAbi, (), u64>();
let module_id2 = publisher.publish_wasm_example("meta-counter").await?;
let module_id2 =
module_id2.with_abi::<meta_counter::MetaCounterAbi, ApplicationId<CounterAbi>, ()>();
// Creator receives the bytecode files then creates the app.
creator.synchronize_from_validators().await.unwrap();
let initial_value = 10_u64;
let (application_id1, _) = creator
.create_application(module_id1, &(), &initial_value, vec![])
.await
.unwrap_ok_committed();
let (application_id2, certificate) = creator
.create_application(
module_id2,
&application_id1,
&(),
vec![application_id1.forget_abi()],
)
.await
.unwrap_ok_committed();
assert_eq!(
certificate.block().body.events,
vec![vec![Event {
stream_id: StreamId {
application_id: application_id2.forget_abi().into(),
stream_name: StreamName(b"announcements".to_vec()),
},
index: 0,
value: bcs::to_bytes(&"instantiated".to_string()).unwrap(),
}]]
);
let mut operation = meta_counter::Operation::increment(receiver_id, 5, true);
operation.fuel_grant = 1000000;
let cert = creator
.execute_operation(Operation::user(application_id2, &operation)?)
.await
.unwrap_ok_committed();
let block = cert.block();
let responses = &block.body.oracle_responses;
let [_, responses] = &responses[..] else {
panic!("Unexpected oracle responses: {:?}", responses);
};
let [OracleResponse::Service(json)] = &responses[..] else {
assert_eq!(&responses[..], &[]);
panic!("Unexpected oracle responses: {:?}", responses);
};
let response_json = serde_json::from_slice::<serde_json::Value>(json).unwrap();
assert_eq!(response_json["data"], json!({"value": 10}));
receiver.synchronize_from_validators().await.unwrap();
receiver.process_inbox().await.unwrap();
let query = Request::new("{ value }");
let outcome = receiver
.query_user_application(application_id2, &query)
.await
.unwrap();
let expected = QueryOutcome {
response: async_graphql::Response::new(
async_graphql::Value::from_json(json!({"value": 5})).unwrap(),
),
operations: vec![],
};
assert_eq!(outcome, expected);
// Try again with a value that will make the (untracked) message fail.
let operation = meta_counter::Operation::fail(receiver_id);
let cert = creator
.execute_operation(Operation::user(application_id2, &operation)?)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(creator.chain_id(), BlockHeight::from(4), 3)
.await,
Some(cert)
);
receiver.synchronize_from_validators().await.unwrap();
let mut certs = receiver.process_inbox().await.unwrap().0;
assert_eq!(certs.len(), 1);
let cert = certs.pop().unwrap();
let incoming_bundles = cert.block().body.incoming_bundles().collect::<Vec<_>>();
assert_eq!(incoming_bundles.len(), 1);
assert_eq!(incoming_bundles[0].action, MessageAction::Reject);
assert_eq!(
incoming_bundles[0].bundle.messages[0].kind,
MessageKind::Simple
);
let messages = cert.block().messages();
assert_eq!(messages.len(), 1);
assert_eq!(messages[0].len(), 0);
// Try again with a value that will make the (tracked) message fail.
let mut operation = meta_counter::Operation::fail(receiver_id);
operation.is_tracked = true;
let cert = creator
.execute_operation(Operation::user(application_id2, &operation)?)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(creator.chain_id(), BlockHeight::from(5), 3)
.await,
Some(cert)
);
receiver.synchronize_from_validators().await.unwrap();
let mut certs = receiver.process_inbox().await.unwrap().0;
assert_eq!(certs.len(), 1);
let cert = certs.pop().unwrap();
let incoming_bundles = cert.block().body.incoming_bundles().collect::<Vec<_>>();
assert_eq!(incoming_bundles.len(), 1);
assert_eq!(incoming_bundles[0].action, MessageAction::Reject);
assert_eq!(
incoming_bundles[0].bundle.messages[0].kind,
MessageKind::Tracked
);
let messages = cert.block().messages();
assert_eq!(messages.len(), 1);
// The bounced message is marked as "bouncing" in the Wasm context and succeeds.
creator.synchronize_from_validators().await.unwrap();
let mut certs = creator.process_inbox().await.unwrap().0;
assert_eq!(certs.len(), 1);
let cert = certs.pop().unwrap();
let incoming_bundles = cert.block().body.incoming_bundles().collect::<Vec<_>>();
assert_eq!(incoming_bundles.len(), 2);
// First message is the grant refund for the successful message sent before.
assert_eq!(incoming_bundles[0].action, MessageAction::Accept);
assert_eq!(
incoming_bundles[0].bundle.messages[0].kind,
MessageKind::Tracked
);
assert_matches!(
incoming_bundles[0].bundle.messages[0].message,
Message::System(SystemMessage::Credit { .. })
);
// Second message is the bounced message.
assert_eq!(incoming_bundles[1].action, MessageAction::Accept);
assert_eq!(
incoming_bundles[1].bundle.messages[0].kind,
MessageKind::Bouncing
);
assert_matches!(
incoming_bundles[1].bundle.messages[0].message,
Message::User { .. }
);
Ok(())
}
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_memory_cross_chain_message(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_cross_chain_message(MemoryStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "storage-service")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_service_cross_chain_message(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_cross_chain_message(ServiceStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "rocksdb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_rocks_db_cross_chain_message(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_cross_chain_message(RocksDbStorageBuilder::with_wasm_runtime(wasm_runtime).await).await
}
#[ignore]
#[cfg(feature = "dynamodb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_dynamo_db_cross_chain_message(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_cross_chain_message(DynamoDbStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "scylladb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_scylla_db_cross_chain_message(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_cross_chain_message(ScyllaDbStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
async fn run_test_cross_chain_message<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let keys = InMemorySigner::new(None);
let mut builder = TestBuilder::new(storage_builder, 4, 1, keys)
.await?
.with_policy(ResourceControlPolicy::all_categories());
let _admin = builder.add_root_chain(0, Amount::ONE).await?;
let sender = builder.add_root_chain(1, Amount::from_tokens(3)).await?;
let receiver = builder.add_root_chain(2, Amount::ONE).await?;
let receiver2 = builder.add_root_chain(3, Amount::ONE).await?;
let module_id = sender.publish_wasm_example("fungible").await?;
let module_id = module_id.with_abi::<fungible::FungibleTokenAbi, Parameters, InitialState>();
let sender_owner = sender.preferred_owner().unwrap();
let receiver_owner = receiver.preferred_owner().unwrap();
let receiver2_owner = receiver2.preferred_owner().unwrap();
let accounts = BTreeMap::from_iter([(sender_owner, Amount::from_tokens(1_000_000))]);
let state = InitialState { accounts };
let params = Parameters::new("FUN");
let (application_id, _cert) = sender
.create_application(module_id, ¶ms, &state, vec![])
.await
.unwrap_ok_committed();
// Make a transfer using the fungible app.
let transfer = FungibleOperation::Transfer {
owner: sender_owner,
amount: 100.into(),
target_account: Account {
chain_id: receiver.chain_id(),
owner: receiver_owner,
},
};
let cert = sender
.execute_operation(Operation::user(application_id, &transfer)?)
.await
.unwrap_ok_committed();
receiver.synchronize_from_validators().await.unwrap();
{
// The receiver did not execute the sender chain.
let chain = receiver
.storage_client()
.load_chain(sender.chain_id())
.await?;
assert_eq!(chain.tip_state.get().next_block_height.0, 0);
assert_eq!(
chain
.preprocessed_blocks
.get(&cert.inner().height())
.await?,
Some(cert.hash())
);
}
let certs = receiver.process_inbox().await.unwrap().0;
assert_eq!(certs.len(), 1);
let bundles = certs[0].block().body.incoming_bundles();
assert!(bundles
.flat_map(|msg| &msg.bundle.messages)
.any(|msg| matches!(msg.message, Message::User { .. })));
// Make another transfer.
let transfer = FungibleOperation::Transfer {
owner: sender_owner,
amount: 200.into(),
target_account: Account {
chain_id: receiver.chain_id(),
owner: receiver_owner,
},
};
let cert = sender
.execute_operation(Operation::user(application_id, &transfer)?)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(sender.chain_id(), BlockHeight::from(3), 3)
.await,
Some(cert)
);
receiver.synchronize_from_validators().await.unwrap();
let certs = receiver.process_inbox().await.unwrap().0;
assert_eq!(certs.len(), 1);
let bundles = certs[0].block().body.incoming_bundles();
assert!(bundles
.flat_map(|msg| &msg.bundle.messages)
.any(|msg| matches!(msg.message, Message::User { .. })));
// Try another transfer except that the amount is too large.
let transfer = FungibleOperation::Transfer {
owner: receiver_owner,
amount: 301.into(),
target_account: Account {
chain_id: receiver2.chain_id(),
owner: receiver2_owner,
},
};
assert!(receiver
.execute_operation(Operation::user(application_id, &transfer)?)
.await
.is_err());
receiver.clear_pending_proposal();
// Try another transfer with the correct amount.
let transfer = FungibleOperation::Transfer {
owner: receiver_owner,
amount: 300.into(),
target_account: Account {
chain_id: receiver2.chain_id(),
owner: receiver2_owner,
},
};
let certificate = receiver
.execute_operation(Operation::user(application_id, &transfer)?)
.await
.unwrap_ok_committed();
assert_eq!(
builder
.check_that_validators_have_certificate(receiver.chain_id(), BlockHeight::from(2), 3)
.await,
Some(certificate)
);
receiver2.synchronize_from_validators().await.unwrap();
Ok(())
}
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_memory_event_streams(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_event_streams(MemoryStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "storage-service")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_service_event_streams(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_event_streams(ServiceStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "rocksdb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_rocks_db_event_streams(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_event_streams(RocksDbStorageBuilder::with_wasm_runtime(wasm_runtime).await).await
}
#[ignore]
#[cfg(feature = "dynamodb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_dynamo_db_event_streams(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_event_streams(DynamoDbStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
#[ignore]
#[cfg(feature = "scylladb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime; "wasmtime"))]
#[test_log::test(tokio::test)]
async fn test_scylla_db_event_streams(wasm_runtime: WasmRuntime) -> anyhow::Result<()> {
run_test_event_streams(ScyllaDbStorageBuilder::with_wasm_runtime(wasm_runtime)).await
}
async fn run_test_event_streams<B>(storage_builder: B) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let keys = InMemorySigner::new(None);
let mut builder = TestBuilder::new(storage_builder, 4, 0, keys)
.await?
.with_policy(ResourceControlPolicy::all_categories());
builder.set_fault_type([3], FaultType::Offline);
let sender = builder.add_root_chain(0, Amount::ONE).await?;
let sender2 = builder.add_root_chain(1, Amount::ONE).await?;
// Make sure that sender's chain ID is less than sender2's - important for the final
// query check
let (sender, sender2) = if sender.chain_id() < sender2.chain_id() {
(sender, sender2)
} else {
(sender2, sender)
};
let mut receiver = builder.add_root_chain(2, Amount::ONE).await?;
let module_id = receiver.publish_wasm_example("social").await?;
let module_id = module_id.with_abi::<social::SocialAbi, (), ()>();
let (application_id, _cert) = receiver
.create_application(module_id, &(), &(), vec![])
.await
.unwrap_ok_committed();
// Request to subscribe to the senders.
let request_subscribe = social::Operation::Subscribe {
chain_id: sender.chain_id(),
};
let request_subscribe2 = social::Operation::Subscribe {
chain_id: sender2.chain_id(),
};
receiver
.execute_operations(
vec![
Operation::user(application_id, &request_subscribe)?,
Operation::user(application_id, &request_subscribe2)?,
],
vec![],
)
.await
.unwrap_ok_committed();
// Make a post.
let text = "Please like and comment!".to_string();
let post = social::Operation::Post {
text: text.clone(),
image_url: None,
};
sender
.execute_operation(Operation::user(application_id, &post)?)
.await
.unwrap_ok_committed();
receiver.synchronize_from_validators().await.unwrap();
builder.set_fault_type([3], FaultType::Honest);
builder.set_fault_type([2], FaultType::Offline);
let certs = receiver.process_inbox().await.unwrap().0;
assert_eq!(certs.len(), 1);
// There should be an UpdateStreams operation due to the new post.
let operations = certs[0].block().body.operations().collect::<Vec<_>>();
let [Operation::System(operation)] = &*operations else {
panic!("Expected one operation, got {:?}", operations);
};
let stream_id = StreamId {
application_id: application_id.forget_abi().into(),
stream_name: b"posts".into(),
};
assert_eq!(
**operation,
SystemOperation::UpdateStreams(vec![(sender.chain_id(), stream_id, 1)])
);
let query = async_graphql::Request::new("{ receivedPosts { keys { author, index } } }");
let outcome = receiver
.query_user_application(application_id, &query)
.await?;
let expected = QueryOutcome {
response: async_graphql::Response::new(
async_graphql::Value::from_json(json!({
"receivedPosts": {
"keys": [
{ "author": sender.chain_id(), "index": 0 }
]
}
}))
.unwrap(),
),
operations: vec![],
};
assert_eq!(outcome, expected);
// Make two more posts.
let text = "Follow sender2!".to_string();
let post = social::Operation::Post {
text: text.clone(),
image_url: None,
};
sender
.execute_operation(Operation::user(application_id, &post)?)
.await
.unwrap_ok_committed();
let text = "Thanks for the shoutout!".to_string();
let post = social::Operation::Post {
text: text.clone(),
image_url: None,
};
sender2
.execute_operation(Operation::user(application_id, &post)?)
.await
.unwrap_ok_committed();
receiver.synchronize_from_validators().await.unwrap();
receiver.options_mut().message_policy = MessagePolicy::new(
BlanketMessagePolicy::Accept,
Some([sender.chain_id()].into_iter().collect()),
None,
None,
);
// Receiver should only process the event from sender now.
let certs = receiver.process_inbox().await.unwrap().0;
assert_eq!(certs.len(), 1);
// There should be an UpdateStreams operation due to the new post.
let operations = certs[0].block().body.operations().collect::<Vec<_>>();
let [Operation::System(operation)] = &*operations else {
panic!("Expected one operation, got {:?}", operations);
};
let stream_id = StreamId {
application_id: application_id.forget_abi().into(),
stream_name: b"posts".into(),
};
assert_eq!(
**operation,
SystemOperation::UpdateStreams(vec![(sender.chain_id(), stream_id, 2)])
);
// Let's receive from everyone again.
receiver.options_mut().message_policy =
MessagePolicy::new(BlanketMessagePolicy::Accept, None, None, None);
// Receiver should now process the event from sender2 as well.
let certs = receiver.process_inbox().await.unwrap().0;
assert_eq!(certs.len(), 1);
// There should be an UpdateStreams operation due to the new post.
let operations = certs[0].block().body.operations().collect::<Vec<_>>();
let [Operation::System(operation)] = &*operations else {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/unit_tests/wasm_worker_tests.rs | linera-core/src/unit_tests/wasm_worker_tests.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Wasm specific worker tests.
//!
//! These tests only run if a Wasm runtime has been configured by enabling either the `wasmer` or
//! the `wasmtime` feature flags.
#![allow(clippy::large_futures)]
#![cfg(any(feature = "wasmer", feature = "wasmtime"))]
use std::collections::BTreeMap;
use assert_matches::assert_matches;
use linera_base::{
crypto::AccountSecretKey,
data_types::{
Amount, ApplicationDescription, Blob, BlockHeight, Bytecode, OracleResponse, Timestamp,
},
identifiers::ModuleId,
vm::VmRuntime,
};
use linera_chain::{
data_types::OperationResult,
test::{make_child_block, make_first_block, BlockTestExt},
};
use linera_execution::{system::SystemOperation, Operation, WasmRuntime};
use linera_storage::Storage;
use test_case::test_case;
use super::TestEnvironment;
#[cfg(feature = "dynamodb")]
use crate::test_utils::DynamoDbStorageBuilder;
#[cfg(feature = "rocksdb")]
use crate::test_utils::RocksDbStorageBuilder;
#[cfg(feature = "scylladb")]
use crate::test_utils::ScyllaDbStorageBuilder;
use crate::{
test_utils::{MemoryStorageBuilder, StorageBuilder},
worker::WorkerError,
};
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_memory_handle_certificates_to_create_application(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
let builder = MemoryStorageBuilder::with_wasm_runtime(Some(wasm_runtime));
run_test_handle_certificates_to_create_application(builder).await
}
#[cfg(feature = "rocksdb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_rocks_db_handle_certificates_to_create_application(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
let builder = RocksDbStorageBuilder::with_wasm_runtime(Some(wasm_runtime)).await;
run_test_handle_certificates_to_create_application(builder).await
}
#[cfg(feature = "dynamodb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_dynamo_db_handle_certificates_to_create_application(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
let builder = DynamoDbStorageBuilder::with_wasm_runtime(Some(wasm_runtime));
run_test_handle_certificates_to_create_application(builder).await
}
#[cfg(feature = "scylladb")]
#[cfg_attr(feature = "wasmer", test_case(WasmRuntime::Wasmer ; "wasmer"))]
#[cfg_attr(feature = "wasmtime", test_case(WasmRuntime::Wasmtime ; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_scylla_db_handle_certificates_to_create_application(
wasm_runtime: WasmRuntime,
) -> anyhow::Result<()> {
let builder = ScyllaDbStorageBuilder::with_wasm_runtime(Some(wasm_runtime));
run_test_handle_certificates_to_create_application(builder).await
}
async fn run_test_handle_certificates_to_create_application<B>(
mut storage_builder: B,
) -> anyhow::Result<()>
where
B: StorageBuilder,
{
let vm_runtime = VmRuntime::Wasm;
let publisher_owner = AccountSecretKey::generate().public().into();
let creator_owner = AccountSecretKey::generate().public().into();
let mut env = TestEnvironment::new(&mut storage_builder, false, false).await?;
let publisher_chain = env.add_root_chain(1, publisher_owner, Amount::ZERO).await;
let creator_chain = env.add_root_chain(2, creator_owner, Amount::ZERO).await;
// Load the bytecode files for a module.
let (contract_path, service_path) =
linera_execution::wasm_test::get_example_bytecode_paths("counter")?;
let contract_bytecode = Bytecode::load_from_file(contract_path)?;
let service_bytecode = Bytecode::load_from_file(service_path)?;
let contract_blob = Blob::new_contract_bytecode(contract_bytecode.clone().compress());
let service_blob = Blob::new_service_bytecode(service_bytecode.compress());
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let contract_blob_hash = contract_blob_id.hash;
let service_blob_hash = service_blob_id.hash;
let module_id = ModuleId::new(contract_blob_hash, service_blob_hash, vm_runtime);
// Publish the module.
let publish_operation = SystemOperation::PublishModule { module_id };
let publish_block = make_first_block(publisher_chain.id())
.with_timestamp(1)
.with_operation(publish_operation);
env.executing_worker()
.storage
.write_blobs(&[contract_blob.clone(), service_blob.clone()])
.await?;
let publish_certificate = env
.execute_proposal(
publish_block.clone(),
vec![contract_blob.clone(), service_blob.clone()],
)
.await?;
assert!(publish_certificate
.value()
.matches_proposed_block(&publish_block));
assert!(publish_certificate.block().outcome_matches(
vec![vec![]],
BTreeMap::new(),
BTreeMap::new(),
vec![vec![]],
vec![vec![]],
vec![vec![]],
vec![OperationResult::default()]
));
assert_matches!(
env.worker()
.fully_handle_certificate_with_notifications(publish_certificate.clone(), &())
.await,
Err(WorkerError::BlobsNotFound(_))
);
env.write_blobs(&[contract_blob.clone(), service_blob.clone()])
.await?;
let info = env
.worker()
.fully_handle_certificate_with_notifications(publish_certificate.clone(), &())
.await
.unwrap()
.info;
assert_eq!(publisher_chain.id(), info.chain_id);
assert_eq!(Amount::ZERO, info.chain_balance);
assert_eq!(BlockHeight::from(1), info.next_block_height);
assert_eq!(Timestamp::from(1), info.timestamp);
assert_eq!(Some(publish_certificate.hash()), info.block_hash);
assert!(info.manager.pending.is_none());
// Create an application.
let initial_value = 10_u64;
let initial_value_bytes = serde_json::to_vec(&initial_value)?;
let parameters_bytes = serde_json::to_vec(&())?;
let create_operation = SystemOperation::CreateApplication {
module_id,
parameters: parameters_bytes.clone(),
instantiation_argument: initial_value_bytes.clone(),
required_application_ids: vec![],
};
let application_description = ApplicationDescription {
module_id,
creator_chain_id: creator_chain.id(),
block_height: BlockHeight::from(0),
application_index: 0,
required_application_ids: vec![],
parameters: parameters_bytes,
};
let application_description_blob = Blob::new_application_description(&application_description);
let application_id = From::from(&application_description);
let create_block = make_first_block(creator_chain.id())
.with_timestamp(2)
.with_operation(create_operation);
let create_certificate = env.execute_proposal(create_block.clone(), vec![]).await?;
assert!(create_certificate
.value()
.matches_proposed_block(&create_block));
assert!(create_certificate.block().outcome_matches(
vec![vec![]],
BTreeMap::new(),
BTreeMap::new(),
vec![vec![
OracleResponse::Blob(contract_blob_id),
OracleResponse::Blob(service_blob_id),
]],
vec![vec![]],
vec![vec![application_description_blob.clone()]],
vec![OperationResult::default()],
));
env.write_blobs(&[application_description_blob.clone()])
.await?;
let info = env
.worker()
.fully_handle_certificate_with_notifications(create_certificate.clone(), &())
.await
.unwrap()
.info;
assert_eq!(creator_chain.id(), info.chain_id);
assert_eq!(Amount::ZERO, info.chain_balance);
assert_eq!(BlockHeight::from(1), info.next_block_height);
assert_eq!(Timestamp::from(2), info.timestamp);
assert_eq!(Some(create_certificate.hash()), info.block_hash);
assert!(info.manager.pending.is_none());
// Execute an application operation
let increment = 5_u64;
let counter_operation = counter::CounterOperation::Increment { value: increment };
let user_operation = bcs::to_bytes(&counter_operation)?;
let run_block = make_child_block(&create_certificate.into_value())
.with_timestamp(3)
.with_operation(Operation::User {
application_id,
bytes: user_operation.clone(),
});
let run_certificate = env.execute_proposal(run_block.clone(), vec![]).await?;
assert!(run_certificate.value().matches_proposed_block(&run_block));
assert!(run_certificate.block().outcome_matches(
vec![vec![]],
BTreeMap::new(),
BTreeMap::new(),
vec![vec![]],
vec![vec![]],
vec![vec![]],
vec![OperationResult(bcs::to_bytes(&15u64)?)],
));
let info = env
.worker()
.fully_handle_certificate_with_notifications(run_certificate.clone(), &())
.await
.unwrap()
.info;
assert_eq!(creator_chain.id(), info.chain_id);
assert_eq!(Amount::ZERO, info.chain_balance);
assert_eq!(BlockHeight::from(2), info.next_block_height);
assert_eq!(Some(run_certificate.hash()), info.block_hash);
assert_eq!(Timestamp::from(3), info.timestamp);
assert!(info.manager.pending.is_none());
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/unit_tests/value_cache_tests.rs | linera-core/src/unit_tests/value_cache_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{borrow::Cow, collections::BTreeSet};
use linera_base::{
crypto::CryptoHash,
data_types::{BlockHeight, Epoch},
hashed::Hashed,
identifiers::ChainId,
};
use linera_chain::types::Timeout;
use super::ValueCache;
/// Test cache size for unit tests.
const TEST_CACHE_SIZE: usize = 10;
/// Tests attempt to retrieve non-existent value.
#[test]
fn test_retrieve_missing_value() {
let cache = ValueCache::<CryptoHash, Hashed<Timeout>>::new(TEST_CACHE_SIZE);
let hash = CryptoHash::test_hash("Missing value");
assert!(cache.get(&hash).is_none());
assert!(cache.keys::<Vec<_>>().is_empty());
}
/// Tests inserting a certificate value in the cache.
#[test]
fn test_insert_single_certificate_value() {
let cache = ValueCache::<CryptoHash, Hashed<Timeout>>::new(TEST_CACHE_SIZE);
let value = create_dummy_certificate_value(0);
let hash = value.hash();
assert!(cache.insert(Cow::Borrowed(&value)));
assert!(cache.contains(&hash));
assert_eq!(cache.get(&hash), Some(value));
assert_eq!(cache.keys::<BTreeSet<_>>(), BTreeSet::from([hash]));
}
/// Tests inserting many certificate values in the cache, one-by-one.
#[test]
fn test_insert_many_certificate_values_individually() {
let cache = ValueCache::<CryptoHash, Hashed<Timeout>>::new(TEST_CACHE_SIZE);
let values = create_dummy_certificate_values(0..(TEST_CACHE_SIZE as u64)).collect::<Vec<_>>();
for value in &values {
assert!(cache.insert(Cow::Borrowed(value)));
}
for value in &values {
assert!(cache.contains(&value.hash()));
assert_eq!(cache.get(&value.hash()).as_ref(), Some(value));
}
assert_eq!(
cache.keys::<BTreeSet<_>>(),
BTreeSet::from_iter(values.iter().map(Hashed::hash))
);
}
/// Tests inserting many values in the cache, all-at-once.
#[test]
fn test_insert_many_values_together() {
let cache = ValueCache::<CryptoHash, Hashed<Timeout>>::new(TEST_CACHE_SIZE);
let values = create_dummy_certificate_values(0..(TEST_CACHE_SIZE as u64)).collect::<Vec<_>>();
cache.insert_all(values.iter().map(Cow::Borrowed));
for value in &values {
assert!(cache.contains(&value.hash()));
assert_eq!(cache.get(&value.hash()).as_ref(), Some(value));
}
assert_eq!(
cache.keys::<BTreeSet<_>>(),
BTreeSet::from_iter(values.iter().map(|el| el.hash()))
);
}
/// Tests re-inserting many values in the cache, all-at-once.
#[test]
fn test_reinsertion_of_values() {
let cache = ValueCache::<CryptoHash, Hashed<Timeout>>::new(TEST_CACHE_SIZE);
let values = create_dummy_certificate_values(0..(TEST_CACHE_SIZE as u64)).collect::<Vec<_>>();
cache.insert_all(values.iter().map(Cow::Borrowed));
for value in &values {
assert!(!cache.insert(Cow::Borrowed(value)));
}
for value in &values {
assert!(cache.contains(&value.hash()));
assert_eq!(cache.get(&value.hash()).as_ref(), Some(value));
}
assert_eq!(
cache.keys::<BTreeSet<_>>(),
BTreeSet::from_iter(values.iter().map(Hashed::hash))
);
}
/// Tests eviction of one entry.
#[test]
fn test_one_eviction() {
let cache = ValueCache::<CryptoHash, Hashed<Timeout>>::new(TEST_CACHE_SIZE);
let values = create_dummy_certificate_values(0..=(TEST_CACHE_SIZE as u64)).collect::<Vec<_>>();
cache.insert_all(values.iter().map(Cow::Borrowed));
assert!(!cache.contains(&values[0].hash()));
assert!(cache.get(&values[0].hash()).is_none());
for value in values.iter().skip(1) {
assert!(cache.contains(&value.hash()));
assert_eq!(cache.get(&value.hash()).as_ref(), Some(value));
}
assert_eq!(
cache.keys::<BTreeSet<_>>(),
BTreeSet::from_iter(values.iter().skip(1).map(Hashed::hash))
);
}
/// Tests eviction of the second entry.
#[test]
fn test_eviction_of_second_entry() {
let cache = ValueCache::<CryptoHash, Hashed<Timeout>>::new(TEST_CACHE_SIZE);
let values = create_dummy_certificate_values(0..=(TEST_CACHE_SIZE as u64)).collect::<Vec<_>>();
cache.insert_all(values.iter().take(TEST_CACHE_SIZE).map(Cow::Borrowed));
cache.get(&values[0].hash());
assert!(cache.insert(Cow::Borrowed(&values[TEST_CACHE_SIZE])));
assert!(cache.contains(&values[0].hash()));
assert_eq!(cache.get(&values[0].hash()).as_ref(), Some(&values[0]));
assert!(!cache.contains(&values[1].hash()));
assert!(cache.get(&values[1].hash()).is_none());
for value in values.iter().skip(2) {
assert!(cache.contains(&value.hash()));
assert_eq!(cache.get(&value.hash()).as_ref(), Some(value));
}
assert_eq!(
cache.keys::<BTreeSet<_>>(),
BTreeSet::from_iter(
values
.iter()
.skip(2)
.map(Hashed::hash)
.chain(Some(values[0].hash()))
)
);
}
/// Tests if reinsertion of the first entry promotes it so that it's not evicted so soon.
#[test]
fn test_promotion_of_reinsertion() {
let cache = ValueCache::<CryptoHash, Hashed<Timeout>>::new(TEST_CACHE_SIZE);
let values = create_dummy_certificate_values(0..=(TEST_CACHE_SIZE as u64)).collect::<Vec<_>>();
cache.insert_all(values.iter().take(TEST_CACHE_SIZE).map(Cow::Borrowed));
assert!(!cache.insert(Cow::Borrowed(&values[0])));
assert!(cache.insert(Cow::Borrowed(&values[TEST_CACHE_SIZE])));
assert!(cache.contains(&values[0].hash()));
assert_eq!(cache.get(&values[0].hash()).as_ref(), Some(&values[0]));
assert!(!cache.contains(&values[1].hash()));
assert!(cache.get(&values[1].hash()).is_none());
for value in values.iter().skip(2) {
assert!(cache.contains(&value.hash()));
assert_eq!(cache.get(&value.hash()).as_ref(), Some(value));
}
assert_eq!(
cache.keys::<BTreeSet<_>>(),
BTreeSet::from_iter(
values
.iter()
.skip(2)
.map(Hashed::hash)
.chain(Some(values[0].hash()))
)
);
}
/// Creates multiple dummy [`Hashed<Timeout>`]s to use in the tests.
fn create_dummy_certificate_values<Heights>(
heights: Heights,
) -> impl Iterator<Item = Hashed<Timeout>>
where
Heights: IntoIterator,
Heights::Item: Into<BlockHeight>,
{
heights.into_iter().map(create_dummy_certificate_value)
}
/// Creates a new dummy [`Hashed<Timeout>`] to use in the tests.
fn create_dummy_certificate_value(height: impl Into<BlockHeight>) -> Hashed<Timeout> {
Hashed::new(Timeout::new(
ChainId(CryptoHash::test_hash("Fake chain ID")),
height.into(),
Epoch(0),
))
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/chain_worker/config.rs | linera-core/src/chain_worker/config.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Configuration parameters for the chain worker.
use std::sync::Arc;
use linera_base::{crypto::ValidatorSecretKey, time::Duration};
use crate::CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES;
/// Configuration parameters for the [`ChainWorkerState`][`super::state::ChainWorkerState`].
#[derive(Clone)]
pub struct ChainWorkerConfig {
/// The signature key pair of the validator. The key may be missing for replicas
/// without voting rights (possibly with a partial view of chains).
pub key_pair: Option<Arc<ValidatorSecretKey>>,
/// Whether inactive chains are allowed in storage.
pub allow_inactive_chains: bool,
/// Whether new messages from deprecated epochs are allowed.
pub allow_messages_from_deprecated_epochs: bool,
/// Whether the user application services should be long-lived.
pub long_lived_services: bool,
/// Blocks with a timestamp this far in the future will still be accepted, but the validator
/// will wait until that timestamp before voting.
pub block_time_grace_period: Duration,
/// Idle chain workers free their memory after that duration without requests.
pub ttl: Duration,
/// TTL for sender chains.
// We don't want them to keep in memory forever since usually they're short-lived.
pub sender_chain_ttl: Duration,
/// The size to truncate receive log entries in chain info responses.
pub chain_info_max_received_log_entries: usize,
}
impl ChainWorkerConfig {
/// Configures the `key_pair` in this [`ChainWorkerConfig`].
pub fn with_key_pair(mut self, key_pair: Option<ValidatorSecretKey>) -> Self {
match key_pair {
Some(validator_secret) => {
self.key_pair = Some(Arc::new(validator_secret));
}
None => {
self.key_pair = None;
}
}
self
}
/// Gets a reference to the [`ValidatorSecretKey`], if available.
pub fn key_pair(&self) -> Option<&ValidatorSecretKey> {
self.key_pair.as_ref().map(Arc::as_ref)
}
}
impl Default for ChainWorkerConfig {
fn default() -> Self {
Self {
key_pair: None,
allow_inactive_chains: false,
allow_messages_from_deprecated_epochs: false,
long_lived_services: false,
block_time_grace_period: Default::default(),
ttl: Default::default(),
sender_chain_ttl: Duration::from_secs(1),
chain_info_max_received_log_entries: CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES,
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/chain_worker/delivery_notifier.rs | linera-core/src/chain_worker/delivery_notifier.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module contains a helper type to notify when cross-chain messages have been
//! delivered.
//!
//! Keeping it in a separate module ensures that only the chain worker is able to call its
//! methods.
use std::{
collections::BTreeMap,
mem,
sync::{Arc, Mutex},
};
use linera_base::data_types::BlockHeight;
use tokio::sync::oneshot;
use tracing::warn;
/// A set of pending listeners waiting to be notified about the delivery of messages sent
/// from specific [`BlockHeight`]s.
///
/// The notifier instance can be cheaply `clone`d and works as a shared reference.
/// However, its methods still require `&mut self` to hint that it should only be changed by
/// [`ChainWorkerGuard`](super::ChainWorkerGuard).
#[derive(Clone, Default)]
pub struct DeliveryNotifier {
notifiers: Arc<Mutex<BTreeMap<BlockHeight, Vec<oneshot::Sender<()>>>>>,
}
impl DeliveryNotifier {
/// Registers a delivery `notifier` for a desired [`BlockHeight`].
pub(super) fn register(&mut self, height: BlockHeight, notifier: oneshot::Sender<()>) {
let mut notifiers = self
.notifiers
.lock()
.expect("Panics should never happen while holding a lock to the `notifiers`");
notifiers.entry(height).or_default().push(notifier);
}
/// Notifies that all messages up to `height` have been delivered.
pub(super) fn notify(&mut self, height: BlockHeight) {
let relevant_notifiers = {
let mut notifiers = self
.notifiers
.lock()
.expect("Panics should never happen while holding a lock to the `notifiers`");
let pending_notifiers = height
.try_add_one()
.map(|first_still_undelivered_height| {
notifiers.split_off(&first_still_undelivered_height)
})
.unwrap_or_default();
mem::replace(&mut *notifiers, pending_notifiers)
};
for notifier in relevant_notifiers.into_values().flatten() {
if let Err(()) = notifier.send(()) {
warn!("Failed to notify message delivery to caller");
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/chain_worker/actor.rs | linera-core/src/chain_worker/actor.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! An actor that runs a chain worker.
use std::{
collections::{BTreeMap, HashMap, HashSet},
fmt,
sync::{self, Arc, RwLock},
};
use custom_debug_derive::Debug;
use futures::FutureExt;
use linera_base::{
crypto::{CryptoHash, ValidatorPublicKey},
data_types::{ApplicationDescription, Blob, BlockHeight, Epoch, TimeDelta, Timestamp},
hashed::Hashed,
identifiers::{ApplicationId, BlobId, ChainId, StreamId},
time::Instant,
};
use linera_chain::{
data_types::{BlockProposal, MessageBundle, ProposedBlock},
types::{Block, ConfirmedBlockCertificate, TimeoutCertificate, ValidatedBlockCertificate},
ChainStateView,
};
use linera_execution::{
system::EventSubscriptions, ExecutionStateView, Query, QueryContext, QueryOutcome,
ServiceRuntimeEndpoint, ServiceSyncRuntime,
};
use linera_storage::{Clock as _, Storage};
use linera_views::context::InactiveContext;
use tokio::sync::{mpsc, oneshot, OwnedRwLockReadGuard};
use tracing::{debug, instrument, trace, Instrument as _};
use super::{config::ChainWorkerConfig, state::ChainWorkerState, DeliveryNotifier};
use crate::{
chain_worker::BlockOutcome,
data_types::{ChainInfoQuery, ChainInfoResponse},
value_cache::ValueCache,
worker::{NetworkActions, WorkerError},
};
/// Type alias for event subscriptions result.
pub(crate) type EventSubscriptionsResult = Vec<((ChainId, StreamId), EventSubscriptions)>;
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_interval, register_histogram};
use prometheus::Histogram;
pub static CHAIN_WORKER_REQUEST_QUEUE_WAIT_TIME: LazyLock<Histogram> = LazyLock::new(|| {
register_histogram(
"chain_worker_request_queue_wait_time",
"Time (ms) a chain worker request waits in queue before being processed",
exponential_bucket_interval(0.1_f64, 10_000.0),
)
});
}
/// A request for the [`ChainWorkerActor`].
#[derive(Debug)]
pub(crate) enum ChainWorkerRequest<Context>
where
Context: linera_views::context::Context + Clone + 'static,
{
/// Reads the certificate for a requested [`BlockHeight`].
#[cfg(with_testing)]
ReadCertificate {
height: BlockHeight,
#[debug(skip)]
callback: oneshot::Sender<Result<Option<ConfirmedBlockCertificate>, WorkerError>>,
},
/// Request a read-only view of the [`ChainStateView`].
GetChainStateView {
#[debug(skip)]
callback:
oneshot::Sender<Result<OwnedRwLockReadGuard<ChainStateView<Context>>, WorkerError>>,
},
/// Query an application's state.
QueryApplication {
query: Query,
block_hash: Option<CryptoHash>,
#[debug(skip)]
callback: oneshot::Sender<Result<QueryOutcome, WorkerError>>,
},
/// Describe an application.
DescribeApplication {
application_id: ApplicationId,
#[debug(skip)]
callback: oneshot::Sender<Result<ApplicationDescription, WorkerError>>,
},
/// Execute a block but discard any changes to the chain state.
StageBlockExecution {
block: ProposedBlock,
round: Option<u32>,
published_blobs: Vec<Blob>,
#[debug(skip)]
callback: oneshot::Sender<Result<(Block, ChainInfoResponse), WorkerError>>,
},
/// Process a leader timeout issued for this multi-owner chain.
ProcessTimeout {
certificate: TimeoutCertificate,
#[debug(skip)]
callback: oneshot::Sender<Result<(ChainInfoResponse, NetworkActions), WorkerError>>,
},
/// Handle a proposal for the next block on this chain.
HandleBlockProposal {
proposal: BlockProposal,
#[debug(skip)]
callback: oneshot::Sender<Result<(ChainInfoResponse, NetworkActions), WorkerError>>,
},
/// Process a validated block issued for this multi-owner chain.
ProcessValidatedBlock {
certificate: ValidatedBlockCertificate,
#[debug(skip)]
callback:
oneshot::Sender<Result<(ChainInfoResponse, NetworkActions, BlockOutcome), WorkerError>>,
},
/// Process a confirmed block (a commit).
ProcessConfirmedBlock {
certificate: ConfirmedBlockCertificate,
#[debug(with = "elide_option")]
notify_when_messages_are_delivered: Option<oneshot::Sender<()>>,
#[debug(skip)]
callback:
oneshot::Sender<Result<(ChainInfoResponse, NetworkActions, BlockOutcome), WorkerError>>,
},
/// Process a cross-chain update.
ProcessCrossChainUpdate {
origin: ChainId,
bundles: Vec<(Epoch, MessageBundle)>,
#[debug(skip)]
callback: oneshot::Sender<Result<Option<BlockHeight>, WorkerError>>,
},
/// Handle cross-chain request to confirm that the recipient was updated.
ConfirmUpdatedRecipient {
recipient: ChainId,
latest_height: BlockHeight,
#[debug(skip)]
callback: oneshot::Sender<Result<(), WorkerError>>,
},
/// Handle a [`ChainInfoQuery`].
HandleChainInfoQuery {
query: ChainInfoQuery,
#[debug(skip)]
callback: oneshot::Sender<Result<(ChainInfoResponse, NetworkActions), WorkerError>>,
},
/// Get a blob if it belongs to the current locking block or pending proposal.
DownloadPendingBlob {
blob_id: BlobId,
#[debug(skip)]
callback: oneshot::Sender<Result<Blob, WorkerError>>,
},
/// Handle a blob that belongs to a pending proposal or validated block certificate.
HandlePendingBlob {
blob: Blob,
#[debug(skip)]
callback: oneshot::Sender<Result<ChainInfoResponse, WorkerError>>,
},
/// Update the received certificate trackers to at least the given values.
UpdateReceivedCertificateTrackers {
new_trackers: BTreeMap<ValidatorPublicKey, u64>,
callback: oneshot::Sender<Result<(), WorkerError>>,
},
/// Get preprocessed block hashes in a given height range.
GetPreprocessedBlockHashes {
start: BlockHeight,
end: BlockHeight,
#[debug(skip)]
callback: oneshot::Sender<Result<Vec<CryptoHash>, WorkerError>>,
},
/// Get the next block height to receive from an inbox.
GetInboxNextHeight {
origin: ChainId,
#[debug(skip)]
callback: oneshot::Sender<Result<BlockHeight, WorkerError>>,
},
/// Get locking blobs for specific blob IDs.
GetLockingBlobs {
blob_ids: Vec<BlobId>,
#[debug(skip)]
callback: oneshot::Sender<Result<Option<Vec<Blob>>, WorkerError>>,
},
/// Get block hashes for specified heights.
GetBlockHashes {
heights: Vec<BlockHeight>,
#[debug(skip)]
callback: oneshot::Sender<Result<Vec<CryptoHash>, WorkerError>>,
},
/// Get proposed blobs from the manager for specified blob IDs.
GetProposedBlobs {
blob_ids: Vec<BlobId>,
#[debug(skip)]
callback: oneshot::Sender<Result<Vec<Blob>, WorkerError>>,
},
/// Get event subscriptions as a list of ((ChainId, StreamId), EventSubscriptions).
GetEventSubscriptions {
#[debug(skip)]
callback: oneshot::Sender<Result<EventSubscriptionsResult, WorkerError>>,
},
/// Get the next expected event index for a stream.
GetNextExpectedEvent {
stream_id: StreamId,
#[debug(skip)]
callback: oneshot::Sender<Result<Option<u32>, WorkerError>>,
},
/// Get received certificate trackers.
GetReceivedCertificateTrackers {
#[debug(skip)]
callback: oneshot::Sender<Result<HashMap<ValidatorPublicKey, u64>, WorkerError>>,
},
/// Get tip state info for next_outbox_heights calculation.
GetTipStateAndOutboxInfo {
receiver_id: ChainId,
#[debug(skip)]
callback: oneshot::Sender<Result<(BlockHeight, Option<BlockHeight>), WorkerError>>,
},
/// Get the next height to preprocess.
GetNextHeightToPreprocess {
#[debug(skip)]
callback: oneshot::Sender<Result<BlockHeight, WorkerError>>,
},
}
/// The actor worker type.
pub(crate) struct ChainWorkerActor<StorageClient>
where
StorageClient: Storage + Clone + 'static,
{
chain_id: ChainId,
config: ChainWorkerConfig,
storage: StorageClient,
block_values: Arc<ValueCache<CryptoHash, Hashed<Block>>>,
execution_state_cache: Arc<ValueCache<CryptoHash, ExecutionStateView<InactiveContext>>>,
tracked_chains: Option<Arc<sync::RwLock<HashSet<ChainId>>>>,
delivery_notifier: DeliveryNotifier,
is_tracked: bool,
}
struct ServiceRuntimeActor {
task: web_thread_pool::Task<()>,
endpoint: ServiceRuntimeEndpoint,
}
impl ServiceRuntimeActor {
/// Spawns a blocking task to execute the service runtime actor.
///
/// Returns the task handle and the endpoints to interact with the actor.
async fn spawn(chain_id: ChainId, thread_pool: &linera_execution::ThreadPool) -> Self {
let (execution_state_sender, incoming_execution_requests) =
futures::channel::mpsc::unbounded();
let (runtime_request_sender, runtime_request_receiver) = std::sync::mpsc::channel();
Self {
endpoint: ServiceRuntimeEndpoint {
incoming_execution_requests,
runtime_request_sender,
},
task: thread_pool
.run((), move |()| async move {
ServiceSyncRuntime::new(
execution_state_sender,
QueryContext {
chain_id,
next_block_height: BlockHeight(0),
local_time: Timestamp::from(0),
},
)
.run(runtime_request_receiver)
})
.await,
}
}
}
impl<StorageClient> ChainWorkerActor<StorageClient>
where
StorageClient: Storage + Clone + 'static,
{
/// Runs the [`ChainWorkerActor`]. The chain state is loaded when the first request
/// arrives.
#[expect(clippy::too_many_arguments)]
pub(crate) async fn run(
config: ChainWorkerConfig,
storage: StorageClient,
block_values: Arc<ValueCache<CryptoHash, Hashed<Block>>>,
execution_state_cache: Arc<ValueCache<CryptoHash, ExecutionStateView<InactiveContext>>>,
tracked_chains: Option<Arc<RwLock<HashSet<ChainId>>>>,
delivery_notifier: DeliveryNotifier,
chain_id: ChainId,
incoming_requests: mpsc::UnboundedReceiver<(
ChainWorkerRequest<StorageClient::Context>,
tracing::Span,
Instant,
)>,
is_tracked: bool,
) {
let actor = ChainWorkerActor {
config,
storage,
block_values,
execution_state_cache,
tracked_chains,
delivery_notifier,
chain_id,
is_tracked,
};
if let Err(err) = actor.handle_requests(incoming_requests).await {
tracing::error!("Chain actor error: {err}");
}
}
/// Sleeps for the configured TTL.
pub(super) async fn sleep_until_timeout(&self) {
let now = self.storage.clock().current_time();
let timeout = if self.is_tracked {
self.config.sender_chain_ttl
} else {
self.config.ttl
};
let ttl = TimeDelta::from_micros(u64::try_from(timeout.as_micros()).unwrap_or(u64::MAX));
let timeout = now.saturating_add(ttl);
self.storage.clock().sleep_until(timeout).await
}
/// Runs the worker until there are no more incoming requests.
#[instrument(
skip_all,
fields(chain_id = format!("{:.8}", self.chain_id), long_lived_services = %self.config.long_lived_services),
)]
async fn handle_requests(
self,
mut incoming_requests: mpsc::UnboundedReceiver<(
ChainWorkerRequest<StorageClient::Context>,
tracing::Span,
Instant,
)>,
) -> Result<(), WorkerError> {
trace!("Starting `ChainWorkerActor`");
while let Some((request, span, _queued_at)) = incoming_requests.recv().await {
// Record how long the request waited in queue (in milliseconds)
#[cfg(with_metrics)]
{
let queue_wait_time_ms = _queued_at.elapsed().as_secs_f64() * 1000.0;
metrics::CHAIN_WORKER_REQUEST_QUEUE_WAIT_TIME.observe(queue_wait_time_ms);
}
let (service_runtime_task, service_runtime_endpoint) =
if self.config.long_lived_services {
let actor =
ServiceRuntimeActor::spawn(self.chain_id, self.storage.thread_pool()).await;
(Some(actor.task), Some(actor.endpoint))
} else {
(None, None)
};
trace!("Loading chain state of {}", self.chain_id);
let mut worker = ChainWorkerState::load(
self.config.clone(),
self.storage.clone(),
self.block_values.clone(),
self.execution_state_cache.clone(),
self.tracked_chains.clone(),
self.delivery_notifier.clone(),
self.chain_id,
service_runtime_endpoint,
)
.instrument(span.clone())
.await?;
Box::pin(worker.handle_request(request))
.instrument(span)
.await;
loop {
futures::select! {
() = self.sleep_until_timeout().fuse() => break,
maybe_request = incoming_requests.recv().fuse() => {
let Some((request, span, _queued_at)) = maybe_request else {
break; // Request sender was dropped.
};
// Record how long the request waited in queue (in milliseconds)
#[cfg(with_metrics)]
{
let queue_wait_time_ms = _queued_at.elapsed().as_secs_f64() * 1000.0;
metrics::CHAIN_WORKER_REQUEST_QUEUE_WAIT_TIME.observe(queue_wait_time_ms);
}
Box::pin(worker.handle_request(request)).instrument(span).await;
}
}
}
trace!("Unloading chain state of {} ...", self.chain_id);
worker.clear_shared_chain_view().await;
drop(worker);
if let Some(task) = service_runtime_task {
task.await?;
}
trace!("Done unloading chain state of {}", self.chain_id);
}
trace!("`ChainWorkerActor` finished");
Ok(())
}
}
/// Writes an option as `Some(..)` or `None`.
fn elide_option<T>(option: &Option<T>, f: &mut fmt::Formatter) -> fmt::Result {
match option {
Some(_) => write!(f, "Some(..)"),
None => write!(f, "None"),
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/chain_worker/state.rs | linera-core/src/chain_worker/state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! The state and functionality of a chain worker.
use std::{
borrow::Cow,
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
sync::{self, Arc},
};
use futures::future::Either;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::{
crypto::{CryptoHash, ValidatorPublicKey},
data_types::{
ApplicationDescription, ArithmeticError, Blob, BlockHeight, Epoch, Round, Timestamp,
},
ensure,
hashed::Hashed,
identifiers::{AccountOwner, ApplicationId, BlobId, BlobType, ChainId, EventId, StreamId},
};
use linera_chain::{
data_types::{
BlockExecutionOutcome, BlockProposal, IncomingBundle, MessageAction, MessageBundle,
OriginalProposal, ProposalContent, ProposedBlock,
},
manager,
types::{Block, ConfirmedBlockCertificate, TimeoutCertificate, ValidatedBlockCertificate},
ChainError, ChainExecutionContext, ChainStateView, ExecutionResultExt as _,
};
use linera_execution::{
system::{EpochEventData, EPOCH_STREAM_NAME},
Committee, ExecutionRuntimeContext as _, ExecutionStateView, Query, QueryContext, QueryOutcome,
ServiceRuntimeEndpoint,
};
use linera_storage::{Clock as _, ResultReadCertificates, Storage};
use linera_views::{
context::{Context, InactiveContext},
views::{ClonableView, ReplaceContext as _, RootView as _, View as _},
};
use tokio::sync::{oneshot, OwnedRwLockReadGuard, RwLock, RwLockWriteGuard};
use tracing::{debug, instrument, trace, warn};
use super::{ChainWorkerConfig, ChainWorkerRequest, DeliveryNotifier, EventSubscriptionsResult};
use crate::{
data_types::{ChainInfo, ChainInfoQuery, ChainInfoResponse, CrossChainRequest},
value_cache::ValueCache,
worker::{NetworkActions, Notification, Reason, WorkerError},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram};
use prometheus::Histogram;
pub static CREATE_NETWORK_ACTIONS_LATENCY: LazyLock<Histogram> = LazyLock::new(|| {
register_histogram(
"create_network_actions_latency",
"Time (ms) to create network actions",
exponential_bucket_latencies(10_000.0),
)
});
}
/// The state of the chain worker.
pub(crate) struct ChainWorkerState<StorageClient>
where
StorageClient: Storage + Clone + 'static,
{
config: ChainWorkerConfig,
storage: StorageClient,
chain: ChainStateView<StorageClient::Context>,
shared_chain_view: Option<Arc<RwLock<ChainStateView<StorageClient::Context>>>>,
service_runtime_endpoint: Option<ServiceRuntimeEndpoint>,
block_values: Arc<ValueCache<CryptoHash, Hashed<Block>>>,
execution_state_cache: Arc<ValueCache<CryptoHash, ExecutionStateView<InactiveContext>>>,
tracked_chains: Option<Arc<sync::RwLock<HashSet<ChainId>>>>,
delivery_notifier: DeliveryNotifier,
knows_chain_is_active: bool,
}
/// Whether the block was processed or skipped. Used for metrics.
pub enum BlockOutcome {
Processed,
Preprocessed,
Skipped,
}
impl<StorageClient> ChainWorkerState<StorageClient>
where
StorageClient: Storage + Clone + 'static,
{
/// Creates a new [`ChainWorkerState`] using the provided `storage` client.
#[instrument(skip_all, fields(
chain_id = %chain_id
))]
#[expect(clippy::too_many_arguments)]
pub(super) async fn load(
config: ChainWorkerConfig,
storage: StorageClient,
block_values: Arc<ValueCache<CryptoHash, Hashed<Block>>>,
execution_state_cache: Arc<ValueCache<CryptoHash, ExecutionStateView<InactiveContext>>>,
tracked_chains: Option<Arc<sync::RwLock<HashSet<ChainId>>>>,
delivery_notifier: DeliveryNotifier,
chain_id: ChainId,
service_runtime_endpoint: Option<ServiceRuntimeEndpoint>,
) -> Result<Self, WorkerError> {
let chain = storage.load_chain(chain_id).await?;
Ok(ChainWorkerState {
config,
storage,
chain,
shared_chain_view: None,
service_runtime_endpoint,
block_values,
execution_state_cache,
tracked_chains,
delivery_notifier,
knows_chain_is_active: false,
})
}
/// Returns the [`ChainId`] of the chain handled by this worker.
fn chain_id(&self) -> ChainId {
self.chain.chain_id()
}
/// Handles a request and applies it to the chain state.
#[instrument(skip_all, fields(chain_id = %self.chain_id()))]
pub(super) async fn handle_request(
&mut self,
request: ChainWorkerRequest<StorageClient::Context>,
) {
tracing::trace!("Handling chain worker request: {request:?}");
// TODO(#2237): Spawn concurrent tasks for read-only operations
let responded = match request {
#[cfg(with_testing)]
ChainWorkerRequest::ReadCertificate { height, callback } => {
callback.send(self.read_certificate(height).await).is_ok()
}
ChainWorkerRequest::GetChainStateView { callback } => {
callback.send(self.chain_state_view().await).is_ok()
}
ChainWorkerRequest::QueryApplication {
query,
block_hash,
callback,
} => callback
.send(self.query_application(query, block_hash).await)
.is_ok(),
ChainWorkerRequest::DescribeApplication {
application_id,
callback,
} => callback
.send(self.describe_application(application_id).await)
.is_ok(),
ChainWorkerRequest::StageBlockExecution {
block,
round,
published_blobs,
callback,
} => callback
.send(
self.stage_block_execution(block, round, &published_blobs)
.await,
)
.is_ok(),
ChainWorkerRequest::ProcessTimeout {
certificate,
callback,
} => callback
.send(self.process_timeout(certificate).await)
.is_ok(),
ChainWorkerRequest::HandleBlockProposal { proposal, callback } => callback
.send(self.handle_block_proposal(proposal).await)
.is_ok(),
ChainWorkerRequest::ProcessValidatedBlock {
certificate,
callback,
} => callback
.send(self.process_validated_block(certificate).await)
.is_ok(),
ChainWorkerRequest::ProcessConfirmedBlock {
certificate,
notify_when_messages_are_delivered,
callback,
} => callback
.send(
self.process_confirmed_block(certificate, notify_when_messages_are_delivered)
.await,
)
.is_ok(),
ChainWorkerRequest::ProcessCrossChainUpdate {
origin,
bundles,
callback,
} => callback
.send(self.process_cross_chain_update(origin, bundles).await)
.is_ok(),
ChainWorkerRequest::ConfirmUpdatedRecipient {
recipient,
latest_height,
callback,
} => callback
.send(
self.confirm_updated_recipient(recipient, latest_height)
.await,
)
.is_ok(),
ChainWorkerRequest::HandleChainInfoQuery { query, callback } => callback
.send(self.handle_chain_info_query(query).await)
.is_ok(),
ChainWorkerRequest::DownloadPendingBlob { blob_id, callback } => callback
.send(self.download_pending_blob(blob_id).await)
.is_ok(),
ChainWorkerRequest::HandlePendingBlob { blob, callback } => {
callback.send(self.handle_pending_blob(blob).await).is_ok()
}
ChainWorkerRequest::UpdateReceivedCertificateTrackers {
new_trackers,
callback,
} => callback
.send(
self.update_received_certificate_trackers(new_trackers)
.await,
)
.is_ok(),
ChainWorkerRequest::GetPreprocessedBlockHashes {
start,
end,
callback,
} => callback
.send(self.get_preprocessed_block_hashes(start, end).await)
.is_ok(),
ChainWorkerRequest::GetInboxNextHeight { origin, callback } => callback
.send(self.get_inbox_next_height(origin).await)
.is_ok(),
ChainWorkerRequest::GetLockingBlobs { blob_ids, callback } => callback
.send(self.get_locking_blobs(blob_ids).await)
.is_ok(),
ChainWorkerRequest::GetBlockHashes { heights, callback } => {
callback.send(self.get_block_hashes(heights).await).is_ok()
}
ChainWorkerRequest::GetProposedBlobs { blob_ids, callback } => callback
.send(self.get_proposed_blobs(blob_ids).await)
.is_ok(),
ChainWorkerRequest::GetEventSubscriptions { callback } => {
callback.send(self.get_event_subscriptions().await).is_ok()
}
ChainWorkerRequest::GetNextExpectedEvent {
stream_id,
callback,
} => callback
.send(self.get_next_expected_event(stream_id).await)
.is_ok(),
ChainWorkerRequest::GetReceivedCertificateTrackers { callback } => callback
.send(self.get_received_certificate_trackers().await)
.is_ok(),
ChainWorkerRequest::GetTipStateAndOutboxInfo {
receiver_id,
callback,
} => callback
.send(self.get_tip_state_and_outbox_info(receiver_id).await)
.is_ok(),
ChainWorkerRequest::GetNextHeightToPreprocess { callback } => callback
.send(self.get_next_height_to_preprocess().await)
.is_ok(),
};
if !responded {
debug!("Callback for `ChainWorkerActor` was dropped before a response was sent");
}
// Roll back any unsaved changes to the chain state: If there was an error while trying
// to handle the request, the chain state might contain unsaved and potentially invalid
// changes. The next request needs to be applied to the chain state as it is in storage.
self.chain.rollback();
}
/// Returns a read-only view of the [`ChainStateView`].
///
/// The returned view holds a lock on the chain state, which prevents the worker from changing
/// it.
async fn chain_state_view(
&mut self,
) -> Result<OwnedRwLockReadGuard<ChainStateView<StorageClient::Context>>, WorkerError> {
if self.shared_chain_view.is_none() {
self.shared_chain_view = Some(Arc::new(RwLock::new(self.chain.clone_unchecked()?)));
}
Ok(self
.shared_chain_view
.as_ref()
.expect("`shared_chain_view` should be initialized above")
.clone()
.read_owned()
.await)
}
/// Clears the shared chain view, and acquires and drops its write lock.
///
/// This is the only place a write lock is acquired, and read locks are acquired in
/// the `chain_state_view` method, which has a `&mut self` receiver like this one.
/// That means that when this function returns, no readers will be waiting to acquire
/// the lock and it is safe to write the chain state to storage without any readers
/// having a stale view of it.
#[instrument(skip_all, fields(
chain_id = %self.chain_id()
))]
pub(super) async fn clear_shared_chain_view(&mut self) {
if let Some(shared_chain_view) = self.shared_chain_view.take() {
let _: RwLockWriteGuard<_> = shared_chain_view.write().await;
}
}
/// Handles a [`ChainInfoQuery`], potentially voting on the next block.
#[tracing::instrument(level = "debug", skip(self))]
async fn handle_chain_info_query(
&mut self,
query: ChainInfoQuery,
) -> Result<(ChainInfoResponse, NetworkActions), WorkerError> {
let create_network_actions = query.create_network_actions;
if let Some((height, round)) = query.request_leader_timeout {
self.vote_for_leader_timeout(height, round).await?;
}
if query.request_fallback {
self.vote_for_fallback().await?;
}
let response = self.prepare_chain_info_response(query).await?;
// Trigger any outgoing cross-chain messages that haven't been confirmed yet.
let actions = if create_network_actions {
self.create_network_actions(None).await?
} else {
NetworkActions::default()
};
Ok((response, actions))
}
/// Returns the requested blob, if it belongs to the current locking block or pending proposal.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
blob_id = %blob_id
))]
async fn download_pending_blob(&self, blob_id: BlobId) -> Result<Blob, WorkerError> {
if let Some(blob) = self.chain.manager.pending_blob(&blob_id).await? {
return Ok(blob);
}
let blob = self.storage.read_blob(blob_id).await?;
blob.ok_or(WorkerError::BlobsNotFound(vec![blob_id]))
}
/// Reads the blobs from the chain manager or from storage. Returns an error if any are
/// missing.
#[instrument(skip_all, fields(
chain_id = %self.chain_id()
))]
async fn get_required_blobs(
&self,
required_blob_ids: impl IntoIterator<Item = BlobId>,
created_blobs: &BTreeMap<BlobId, Blob>,
) -> Result<BTreeMap<BlobId, Blob>, WorkerError> {
let maybe_blobs = self
.maybe_get_required_blobs(required_blob_ids, Some(created_blobs))
.await?;
let not_found_blob_ids = missing_blob_ids(&maybe_blobs);
ensure!(
not_found_blob_ids.is_empty(),
WorkerError::BlobsNotFound(not_found_blob_ids)
);
Ok(maybe_blobs
.into_iter()
.filter_map(|(blob_id, maybe_blob)| Some((blob_id, maybe_blob?)))
.collect())
}
/// Tries to read the blobs from the chain manager or storage. Returns `None` if not found.
#[instrument(skip_all, fields(
chain_id = %self.chain_id()
))]
async fn maybe_get_required_blobs(
&self,
blob_ids: impl IntoIterator<Item = BlobId>,
created_blobs: Option<&BTreeMap<BlobId, Blob>>,
) -> Result<BTreeMap<BlobId, Option<Blob>>, WorkerError> {
let maybe_blobs = blob_ids.into_iter().collect::<BTreeSet<_>>();
let mut maybe_blobs = maybe_blobs
.into_iter()
.map(|x| (x, None))
.collect::<Vec<(BlobId, Option<Blob>)>>();
if let Some(blob_map) = created_blobs {
for (blob_id, value) in &mut maybe_blobs {
if let Some(blob) = blob_map.get(blob_id) {
*value = Some(blob.clone());
}
}
}
let (missing_indices, missing_blob_ids) = missing_indices_blob_ids(&maybe_blobs);
let second_block_blobs = self.chain.manager.pending_blobs(&missing_blob_ids).await?;
for (index, blob) in missing_indices.into_iter().zip(second_block_blobs) {
maybe_blobs[index].1 = blob;
}
let (missing_indices, missing_blob_ids) = missing_indices_blob_ids(&maybe_blobs);
let third_block_blobs = self
.chain
.pending_validated_blobs
.multi_get(&missing_blob_ids)
.await?;
for (index, blob) in missing_indices.into_iter().zip(third_block_blobs) {
maybe_blobs[index].1 = blob;
}
let (missing_indices, missing_blob_ids) = missing_indices_blob_ids(&maybe_blobs);
if !missing_indices.is_empty() {
let all_entries_pending_blobs = self
.chain
.pending_proposed_blobs
.try_load_all_entries()
.await?;
for (index, blob_id) in missing_indices.into_iter().zip(missing_blob_ids) {
for (_, pending_blobs) in &all_entries_pending_blobs {
if let Some(blob) = pending_blobs.get(&blob_id).await? {
maybe_blobs[index].1 = Some(blob);
break;
}
}
}
}
let (missing_indices, missing_blob_ids) = missing_indices_blob_ids(&maybe_blobs);
let fourth_block_blobs = self.storage.read_blobs(&missing_blob_ids).await?;
for (index, blob) in missing_indices.into_iter().zip(fourth_block_blobs) {
maybe_blobs[index].1 = blob;
}
Ok(maybe_blobs.into_iter().collect())
}
/// Adds any newly created chains to the set of `tracked_chains`, if the parent chain is
/// also tracked.
///
/// Chains that are not tracked are usually processed only because they sent some message
/// to one of the tracked chains. In most use cases, their children won't be of interest.
fn track_newly_created_chains(
&self,
proposed_block: &ProposedBlock,
outcome: &BlockExecutionOutcome,
) {
if let Some(tracked_chains) = self.tracked_chains.as_ref() {
if !tracked_chains
.read()
.expect("Panics should not happen while holding a lock to `tracked_chains`")
.contains(&proposed_block.chain_id)
{
return; // The parent chain is not tracked; don't track the child.
}
let new_chain_ids = outcome
.created_blobs_ids()
.into_iter()
.filter(|blob_id| blob_id.blob_type == BlobType::ChainDescription)
.map(|blob_id| ChainId(blob_id.hash));
tracked_chains
.write()
.expect("Panics should not happen while holding a lock to `tracked_chains`")
.extend(new_chain_ids);
}
}
/// Loads pending cross-chain requests, and adds `NewRound` notifications where appropriate.
#[instrument(skip_all, fields(
chain_id = %self.chain_id()
))]
async fn create_network_actions(
&self,
old_round: Option<Round>,
) -> Result<NetworkActions, WorkerError> {
#[cfg(with_metrics)]
let _latency = metrics::CREATE_NETWORK_ACTIONS_LATENCY.measure_latency();
let mut heights_by_recipient = BTreeMap::<_, Vec<_>>::new();
let mut targets = self.chain.nonempty_outbox_chain_ids();
if let Some(tracked_chains) = self.tracked_chains.as_ref() {
let tracked_chains = tracked_chains
.read()
.expect("Panics should not happen while holding a lock to `tracked_chains`");
targets.retain(|target| tracked_chains.contains(target));
}
let outboxes = self.chain.load_outboxes(&targets).await?;
for (target, outbox) in targets.into_iter().zip(outboxes) {
let heights = outbox.queue.elements().await?;
heights_by_recipient.insert(target, heights);
}
let cross_chain_requests = self
.create_cross_chain_requests(heights_by_recipient)
.await?;
let mut notifications = Vec::new();
if let Some(old_round) = old_round {
let round = self.chain.manager.current_round();
if round > old_round {
let height = self.chain.tip_state.get().next_block_height;
notifications.push(Notification {
chain_id: self.chain_id(),
reason: Reason::NewRound { height, round },
});
}
}
Ok(NetworkActions {
cross_chain_requests,
notifications,
})
}
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
num_recipients = %heights_by_recipient.len()
))]
async fn create_cross_chain_requests(
&self,
heights_by_recipient: BTreeMap<ChainId, Vec<BlockHeight>>,
) -> Result<Vec<CrossChainRequest>, WorkerError> {
// Load all the certificates we will need, regardless of the medium.
let heights = BTreeSet::from_iter(heights_by_recipient.values().flatten().copied());
let next_block_height = self.chain.tip_state.get().next_block_height;
let log_heights = heights
.range(..next_block_height)
.copied()
.map(usize::try_from)
.collect::<Result<Vec<_>, _>>()?;
let mut hashes = self
.chain
.confirmed_log
.multi_get(log_heights)
.await?
.into_iter()
.zip(&heights)
.map(|(maybe_hash, height)| {
maybe_hash.ok_or_else(|| WorkerError::ConfirmedLogEntryNotFound {
height: *height,
chain_id: self.chain_id(),
})
})
.collect::<Result<Vec<_>, _>>()?;
let requested_heights: Vec<BlockHeight> = heights
.range(next_block_height..)
.copied()
.collect::<Vec<BlockHeight>>();
for (height, hash) in self
.chain
.preprocessed_blocks
.multi_get_pairs(requested_heights)
.await?
{
let hash = hash.ok_or_else(|| WorkerError::PreprocessedBlocksEntryNotFound {
height,
chain_id: self.chain_id(),
})?;
hashes.push(hash);
}
let mut uncached_hashes = Vec::new();
let mut height_to_blocks: HashMap<BlockHeight, Hashed<Block>> = HashMap::new();
for hash in hashes {
if let Some(hashed_block) = self.block_values.get(&hash) {
height_to_blocks.insert(hashed_block.inner().header.height, hashed_block);
} else {
uncached_hashes.push(hash);
}
}
if !uncached_hashes.is_empty() {
let certificates = self
.storage
.read_certificates(uncached_hashes.clone())
.await?;
let certificates = match ResultReadCertificates::new(certificates, uncached_hashes) {
ResultReadCertificates::Certificates(certificates) => certificates,
ResultReadCertificates::InvalidHashes(hashes) => {
return Err(WorkerError::ReadCertificatesError(hashes))
}
};
for cert in certificates {
let hashed_block = cert.into_value().into_inner();
let height = hashed_block.inner().header.height;
self.block_values.insert(Cow::Owned(hashed_block.clone()));
height_to_blocks.insert(height, hashed_block);
}
}
let mut cross_chain_requests = Vec::new();
for (recipient, heights) in heights_by_recipient {
let mut bundles = Vec::new();
for height in heights {
let hashed_block = height_to_blocks
.get(&height)
.ok_or_else(|| ChainError::InternalError("missing block".to_string()))?;
bundles.extend(
hashed_block
.inner()
.message_bundles_for(recipient, hashed_block.hash()),
);
}
let request = CrossChainRequest::UpdateRecipient {
sender: self.chain.chain_id(),
recipient,
bundles,
};
cross_chain_requests.push(request);
}
Ok(cross_chain_requests)
}
/// Returns true if there are no more outgoing messages in flight up to the given
/// block height.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
height = %height
))]
async fn all_messages_to_tracked_chains_delivered_up_to(
&self,
height: BlockHeight,
) -> Result<bool, WorkerError> {
if self.chain.all_messages_delivered_up_to(height) {
return Ok(true);
}
let Some(tracked_chains) = self.tracked_chains.as_ref() else {
return Ok(false);
};
let mut targets = self.chain.nonempty_outbox_chain_ids();
{
let tracked_chains = tracked_chains.read().unwrap();
targets.retain(|target| tracked_chains.contains(target));
}
let outboxes = self.chain.load_outboxes(&targets).await?;
for outbox in outboxes {
let front = outbox.queue.front().await?;
if front.is_some_and(|key| key <= height) {
return Ok(false);
}
}
Ok(true)
}
/// Processes a leader timeout issued for this multi-owner chain.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
height = %certificate.inner().height()
))]
async fn process_timeout(
&mut self,
certificate: TimeoutCertificate,
) -> Result<(ChainInfoResponse, NetworkActions), WorkerError> {
// Check that the chain is active and ready for this timeout.
// Verify the certificate. Returns a catch-all error to make client code more robust.
self.initialize_and_save_if_needed().await?;
let (chain_epoch, committee) = self.chain.current_committee()?;
certificate.check(committee)?;
if self
.chain
.tip_state
.get()
.already_validated_block(certificate.inner().height())?
{
return Ok((self.chain_info_response(), NetworkActions::default()));
}
ensure!(
certificate.inner().epoch() == chain_epoch,
WorkerError::InvalidEpoch {
chain_id: certificate.inner().chain_id(),
chain_epoch,
epoch: certificate.inner().epoch()
}
);
let old_round = self.chain.manager.current_round();
self.chain
.manager
.handle_timeout_certificate(certificate, self.storage.clock().current_time());
self.save().await?;
let actions = self.create_network_actions(Some(old_round)).await?;
Ok((self.chain_info_response(), actions))
}
/// Tries to load all blobs published in this proposal.
///
/// If they cannot be found, it creates an entry in `pending_proposed_blobs` so they can be
/// submitted one by one.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
block_height = %proposal.content.block.height
))]
async fn load_proposal_blobs(
&mut self,
proposal: &BlockProposal,
) -> Result<Vec<Blob>, WorkerError> {
let owner = proposal.owner();
let BlockProposal {
content:
ProposalContent {
block,
round,
outcome: _,
},
original_proposal,
signature: _,
} = proposal;
let mut maybe_blobs = self
.maybe_get_required_blobs(proposal.required_blob_ids(), None)
.await?;
let missing_blob_ids = missing_blob_ids(&maybe_blobs);
if !missing_blob_ids.is_empty() {
let chain = &mut self.chain;
if chain.ownership().open_multi_leader_rounds {
// TODO(#3203): Allow multiple pending proposals on permissionless chains.
chain.pending_proposed_blobs.clear();
}
let validated = matches!(original_proposal, Some(OriginalProposal::Regular { .. }));
chain
.pending_proposed_blobs
.try_load_entry_mut(&owner)
.await?
.update(*round, validated, maybe_blobs)?;
self.save().await?;
return Err(WorkerError::BlobsNotFound(missing_blob_ids));
}
let published_blobs = block
.published_blob_ids()
.iter()
.filter_map(|blob_id| maybe_blobs.remove(blob_id).flatten())
.collect::<Vec<_>>();
Ok(published_blobs)
}
/// Processes a validated block issued for this multi-owner chain.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
block_height = %certificate.block().header.height
))]
async fn process_validated_block(
&mut self,
certificate: ValidatedBlockCertificate,
) -> Result<(ChainInfoResponse, NetworkActions, BlockOutcome), WorkerError> {
let block = certificate.block();
let header = &block.header;
let height = header.height;
// Check that the chain is active and ready for this validated block.
// Verify the certificate. Returns a catch-all error to make client code more robust.
self.initialize_and_save_if_needed().await?;
let tip_state = self.chain.tip_state.get();
ensure!(
header.height == tip_state.next_block_height,
ChainError::UnexpectedBlockHeight {
expected_block_height: tip_state.next_block_height,
found_block_height: header.height,
}
);
let (epoch, committee) = self.chain.current_committee()?;
check_block_epoch(epoch, header.chain_id, header.epoch)?;
certificate.check(committee)?;
let already_committed_block = self.chain.tip_state.get().already_validated_block(height)?;
let should_skip_validated_block = || {
self.chain
.manager
.check_validated_block(&certificate)
.map(|outcome| outcome == manager::Outcome::Skip)
};
if already_committed_block || should_skip_validated_block()? {
// If we just processed the same pending block, return the chain info unchanged.
return Ok((
self.chain_info_response(),
NetworkActions::default(),
BlockOutcome::Skipped,
));
}
self.block_values
.insert(Cow::Borrowed(certificate.inner().inner()));
let required_blob_ids = block.required_blob_ids();
let maybe_blobs = self
.maybe_get_required_blobs(required_blob_ids, Some(&block.created_blobs()))
.await?;
let missing_blob_ids = missing_blob_ids(&maybe_blobs);
if !missing_blob_ids.is_empty() {
self.chain
.pending_validated_blobs
.update(certificate.round, true, maybe_blobs)?;
self.save().await?;
return Err(WorkerError::BlobsNotFound(missing_blob_ids));
}
let blobs = maybe_blobs
.into_iter()
.filter_map(|(blob_id, maybe_blob)| Some((blob_id, maybe_blob?)))
.collect();
let old_round = self.chain.manager.current_round();
self.chain.manager.create_final_vote(
certificate,
self.config.key_pair(),
self.storage.clock().current_time(),
blobs,
)?;
self.save().await?;
let actions = self.create_network_actions(Some(old_round)).await?;
Ok((self.chain_info_response(), actions, BlockOutcome::Processed))
}
/// Processes a confirmed block (aka a commit).
#[instrument(skip_all, fields(
chain_id = %certificate.block().header.chain_id,
height = %certificate.block().header.height,
block_hash = %certificate.hash(),
))]
async fn process_confirmed_block(
&mut self,
certificate: ConfirmedBlockCertificate,
notify_when_messages_are_delivered: Option<oneshot::Sender<()>>,
) -> Result<(ChainInfoResponse, NetworkActions, BlockOutcome), WorkerError> {
let block = certificate.block();
let block_hash = certificate.hash();
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/chain_worker/mod.rs | linera-core/src/chain_worker/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A worker to handle a single chain.
mod actor;
mod config;
mod delivery_notifier;
mod state;
pub(super) use self::delivery_notifier::DeliveryNotifier;
#[cfg(test)]
pub(crate) use self::state::CrossChainUpdateHelper;
pub(crate) use self::{
actor::{ChainWorkerActor, ChainWorkerRequest, EventSubscriptionsResult},
config::ChainWorkerConfig,
state::BlockOutcome,
};
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/environment/mod.rs | linera-core/src/environment/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
pub mod wallet;
use linera_base::util::traits::AutoTraits;
trait_set::trait_set! {
pub trait Network = crate::node::ValidatorNodeProvider + AutoTraits;
pub trait Signer = linera_base::crypto::Signer + AutoTraits;
// TODO(#5064): we shouldn't hard-code `Send` + `Sync` here
pub trait Storage = linera_storage::Storage + Clone + AutoTraits;
pub trait Wallet = wallet::Wallet + AutoTraits;
}
pub trait Environment: AutoTraits {
type Storage: Storage<Context = Self::StorageContext>;
type Network: Network<Node = Self::ValidatorNode>;
type Signer: Signer;
type Wallet: Wallet;
type ValidatorNode: crate::node::ValidatorNode + AutoTraits + Clone;
// TODO(#5064): we shouldn't hard-code `Send` + `Sync` here
type StorageContext: linera_views::context::Context<Extra: linera_execution::ExecutionRuntimeContext>
+ AutoTraits;
fn storage(&self) -> &Self::Storage;
fn network(&self) -> &Self::Network;
fn signer(&self) -> &Self::Signer;
fn wallet(&self) -> &Self::Wallet;
}
pub struct Impl<
Storage,
Network,
Signer = linera_base::crypto::InMemorySigner,
Wallet = wallet::Memory,
> {
pub storage: Storage,
pub network: Network,
pub signer: Signer,
pub wallet: Wallet,
}
impl<St: Storage, N: Network, Si: Signer, W: Wallet> Environment for Impl<St, N, Si, W> {
type Storage = St;
type Network = N;
type Signer = Si;
type Wallet = W;
type ValidatorNode = N::Node;
type StorageContext = St::Context;
fn storage(&self) -> &St {
&self.storage
}
fn network(&self) -> &N {
&self.network
}
fn signer(&self) -> &Si {
&self.signer
}
fn wallet(&self) -> &W {
&self.wallet
}
}
cfg_if::cfg_if! {
if #[cfg(with_testing)] {
pub type TestStorage = linera_storage::DbStorage<linera_views::memory::MemoryDatabase, linera_storage::TestClock>;
pub type TestNetwork = crate::test_utils::NodeProvider<TestStorage>;
pub type TestSigner = linera_base::crypto::InMemorySigner;
pub type TestWallet = crate::wallet::Memory;
pub type Test = Impl<TestStorage, TestNetwork, TestSigner, TestWallet>;
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/environment/wallet/memory.rs | linera-core/src/environment/wallet/memory.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use futures::{Stream, StreamExt as _};
use linera_base::identifiers::ChainId;
use super::{Chain, Wallet};
/// A basic implementation of `Wallet` that doesn't persist anything and merely tracks the
/// chains in memory.
///
/// This can be used as-is as an ephemeral wallet for testing or ephemeral clients, or as
/// a building block for more complex wallets that layer persistence on top of it.
#[derive(Default, Clone, serde::Serialize, serde::Deserialize)]
pub struct Memory(papaya::HashMap<ChainId, Chain>);
impl Memory {
pub fn get(&self, id: ChainId) -> Option<Chain> {
self.0.pin().get(&id).cloned()
}
pub fn insert(&self, id: ChainId, chain: Chain) -> Option<Chain> {
self.0.pin().insert(id, chain).cloned()
}
pub fn try_insert(&self, id: ChainId, chain: Chain) -> Option<Chain> {
match self.0.pin().try_insert(id, chain) {
Ok(_inserted) => None,
Err(error) => Some(error.not_inserted),
}
}
pub fn remove(&self, id: ChainId) -> Option<Chain> {
self.0.pin().remove(&id).cloned()
}
pub fn items(&self) -> Vec<(ChainId, Chain)> {
self.0
.pin()
.iter()
.map(|(id, chain)| (*id, chain.clone()))
.collect::<Vec<_>>()
}
pub fn chain_ids(&self) -> Vec<ChainId> {
self.0.pin().keys().copied().collect::<Vec<_>>()
}
pub fn owned_chain_ids(&self) -> Vec<ChainId> {
self.0
.pin()
.iter()
.filter_map(|(id, chain)| chain.owner.as_ref().map(|_| *id))
.collect::<Vec<_>>()
}
pub fn mutate<R>(
&self,
chain_id: ChainId,
mut mutate: impl FnMut(&mut Chain) -> R,
) -> Option<R> {
use papaya::Operation::*;
let mut outcome = None;
self.0.pin().compute(chain_id, |chain| {
if let Some((_, chain)) = chain {
let mut chain = chain.clone();
outcome = Some(mutate(&mut chain));
Insert(chain)
} else {
Abort(())
}
});
outcome
}
}
impl Extend<(ChainId, Chain)> for Memory {
fn extend<It: IntoIterator<Item = (ChainId, Chain)>>(&mut self, chains: It) {
let map = self.0.pin();
for (id, chain) in chains {
let _ = map.insert(id, chain);
}
}
}
impl Wallet for Memory {
type Error = std::convert::Infallible;
async fn get(&self, id: ChainId) -> Result<Option<Chain>, Self::Error> {
Ok(self.get(id))
}
async fn insert(&self, id: ChainId, chain: Chain) -> Result<Option<Chain>, Self::Error> {
Ok(self.insert(id, chain))
}
async fn try_insert(&self, id: ChainId, chain: Chain) -> Result<Option<Chain>, Self::Error> {
Ok(self.try_insert(id, chain))
}
async fn remove(&self, id: ChainId) -> Result<Option<Chain>, Self::Error> {
Ok(self.remove(id))
}
fn items(&self) -> impl Stream<Item = Result<(ChainId, Chain), Self::Error>> {
futures::stream::iter(self.items()).map(Ok)
}
async fn modify(
&self,
id: ChainId,
f: impl FnMut(&mut Chain) + Send,
) -> Result<Option<()>, Self::Error> {
Ok(self.mutate(id, f))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/environment/wallet/mod.rs | linera-core/src/environment/wallet/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::ops::Deref;
use futures::{Stream, StreamExt as _, TryStreamExt as _};
use linera_base::{
crypto::CryptoHash,
data_types::{BlockHeight, ChainDescription, Epoch, Timestamp},
identifiers::{AccountOwner, ChainId},
};
use crate::{client::PendingProposal, data_types::ChainInfo};
mod memory;
pub use memory::Memory;
#[derive(Default, Clone, serde::Serialize, serde::Deserialize)]
pub struct Chain {
pub owner: Option<AccountOwner>,
pub block_hash: Option<CryptoHash>,
pub next_block_height: BlockHeight,
pub timestamp: Timestamp,
pub pending_proposal: Option<PendingProposal>,
pub epoch: Option<Epoch>,
/// If true, we only follow this chain's blocks without downloading sender chain blocks
/// or participating in consensus rounds. Use this for chains we're interested in observing
/// but don't intend to propose blocks for.
#[serde(default)]
pub follow_only: bool,
}
impl From<&ChainInfo> for Chain {
fn from(info: &ChainInfo) -> Self {
Self {
owner: None,
block_hash: info.block_hash,
next_block_height: info.next_block_height,
timestamp: info.timestamp,
pending_proposal: None,
epoch: Some(info.epoch),
follow_only: false,
}
}
}
impl From<ChainInfo> for Chain {
fn from(info: ChainInfo) -> Self {
Self::from(&info)
}
}
impl From<&ChainDescription> for Chain {
fn from(description: &ChainDescription) -> Self {
Self::new(None, description.config().epoch, description.timestamp())
}
}
impl From<ChainDescription> for Chain {
fn from(description: ChainDescription) -> Self {
(&description).into()
}
}
impl Chain {
/// Create a chain that we haven't interacted with before.
pub fn new(owner: Option<AccountOwner>, current_epoch: Epoch, now: Timestamp) -> Self {
Self {
owner,
block_hash: None,
timestamp: now,
next_block_height: BlockHeight::ZERO,
pending_proposal: None,
epoch: Some(current_epoch),
follow_only: false,
}
}
}
/// A trait for the wallet (i.e. set of chain states) tracked by the client.
#[cfg_attr(not(web), trait_variant::make(Send))]
pub trait Wallet {
type Error: std::error::Error + Send + Sync;
async fn get(&self, id: ChainId) -> Result<Option<Chain>, Self::Error>;
async fn remove(&self, id: ChainId) -> Result<Option<Chain>, Self::Error>;
fn items(&self) -> impl Stream<Item = Result<(ChainId, Chain), Self::Error>>;
async fn insert(&self, id: ChainId, chain: Chain) -> Result<Option<Chain>, Self::Error>;
async fn try_insert(&self, id: ChainId, chain: Chain) -> Result<Option<Chain>, Self::Error>;
fn chain_ids(&self) -> impl Stream<Item = Result<ChainId, Self::Error>> {
self.items().map(|result| result.map(|kv| kv.0))
}
fn owned_chain_ids(&self) -> impl Stream<Item = Result<ChainId, Self::Error>> {
self.items()
.try_filter_map(|(id, chain)| async move { Ok(chain.owner.map(|_| id)) })
}
/// Modifies a chain in the wallet. Returns `Ok(None)` if the chain doesn't exist.
async fn modify(
&self,
id: ChainId,
f: impl FnMut(&mut Chain) + Send,
) -> Result<Option<()>, Self::Error>;
}
impl<W: Deref<Target: Wallet> + linera_base::util::traits::AutoTraits> Wallet for W {
type Error = <W::Target as Wallet>::Error;
async fn get(&self, id: ChainId) -> Result<Option<Chain>, Self::Error> {
self.deref().get(id).await
}
async fn remove(&self, id: ChainId) -> Result<Option<Chain>, Self::Error> {
self.deref().remove(id).await
}
fn items(&self) -> impl Stream<Item = Result<(ChainId, Chain), Self::Error>> {
self.deref().items()
}
async fn insert(&self, id: ChainId, chain: Chain) -> Result<Option<Chain>, Self::Error> {
self.deref().insert(id, chain).await
}
async fn try_insert(&self, id: ChainId, chain: Chain) -> Result<Option<Chain>, Self::Error> {
self.deref().try_insert(id, chain).await
}
fn chain_ids(&self) -> impl Stream<Item = Result<ChainId, Self::Error>> {
self.deref().chain_ids()
}
fn owned_chain_ids(&self) -> impl Stream<Item = Result<ChainId, Self::Error>> {
self.deref().owned_chain_ids()
}
async fn modify(
&self,
id: ChainId,
f: impl FnMut(&mut Chain) + Send,
) -> Result<Option<()>, Self::Error> {
self.deref().modify(id, f).await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/received_log.rs | linera-core/src/client/received_log.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::{BTreeMap, BTreeSet};
use linera_base::{crypto::ValidatorPublicKey, data_types::BlockHeight, identifiers::ChainId};
use linera_chain::data_types::ChainAndHeight;
/// Struct keeping track of the blocks sending messages to some chain, from chains identified by
/// the keys in the map.
#[derive(Clone)]
pub(super) struct ReceivedLogs(
BTreeMap<ChainId, BTreeMap<BlockHeight, BTreeSet<ValidatorPublicKey>>>,
);
impl ReceivedLogs {
/// Converts a set of logs received from validators into a single log.
pub(super) fn from_received_result(
result: Vec<(ValidatorPublicKey, Vec<ChainAndHeight>)>,
) -> Self {
Self::from_iterator(result.into_iter().flat_map(|(validator, received_log)| {
received_log
.into_iter()
.map(move |chain_and_height| (chain_and_height, validator))
}))
}
/// Returns a map that assigns to each chain ID the set of heights. The returned map contains
/// no empty values.
pub(super) fn heights_per_chain(&self) -> BTreeMap<ChainId, BTreeSet<BlockHeight>> {
self.0
.iter()
.map(|(chain_id, heights)| (*chain_id, heights.keys().cloned().collect()))
.collect()
}
/// Returns whether a given validator should have a block at the given chain and
/// height, according to this log.
pub(super) fn validator_has_block(
&self,
validator: &ValidatorPublicKey,
chain_id: ChainId,
height: BlockHeight,
) -> bool {
self.0
.get(&chain_id)
.and_then(|heights| heights.get(&height))
.is_some_and(|validators| validators.contains(validator))
}
/// Returns the number of chains that sent messages according to this log.
pub(super) fn num_chains(&self) -> usize {
self.0.len()
}
/// Returns the total number of certificates recorded in this log.
pub(super) fn num_certs(&self) -> usize {
self.0.values().map(|heights| heights.len()).sum()
}
/// An iterator over the chains in the log.
pub(super) fn chains(&self) -> impl Iterator<Item = &'_ ChainId> + '_ {
self.0.keys()
}
/// Gets a mutable reference to the map of heights for the given chain.
pub(super) fn get_chain_mut(
&mut self,
chain_id: &ChainId,
) -> Option<&mut BTreeMap<BlockHeight, BTreeSet<ValidatorPublicKey>>> {
self.0.get_mut(chain_id)
}
/// Splits this `ReceivedLogs` into batches of size `batch_size`. Batches are sorted
/// by chain ID and height.
pub(super) fn into_batches(
self,
batch_size: usize,
max_blocks_per_chain: usize,
) -> impl Iterator<Item = ReceivedLogs> {
BatchingHelper::new(self.0, batch_size, max_blocks_per_chain)
}
fn from_iterator<I: IntoIterator<Item = (ChainAndHeight, ValidatorPublicKey)>>(
iterator: I,
) -> Self {
iterator.into_iter().fold(
Self(BTreeMap::new()),
|mut acc, (chain_and_height, validator)| {
acc.0
.entry(chain_and_height.chain_id)
.or_default()
.entry(chain_and_height.height)
.or_default()
.insert(validator);
acc
},
)
}
}
/// Iterator adapter lazily yielding batches of size `self.batch_size` from
/// `heights`.
/// Given sets of heights per chain for some chains, it will return a batch containing up to
/// `max_blocks_per_chain` heights from the first chain, up to `max_blocks_per_chain` from the
/// second chain, etc., up to `batch_size` in total. If it runs out of chains before the batch is
/// full, it will restart adding up to `max_blocks_per_chain` from the first chain.
/// This way we will get batches that are fairly balanced between the number of chains and number
/// of blocks per chain.
struct BatchingHelper {
keys: Vec<ChainId>,
heights: BTreeMap<ChainId, BTreeMap<BlockHeight, BTreeSet<ValidatorPublicKey>>>,
batch_size: usize,
max_blocks_per_chain: usize,
current_chain: usize,
current_taken_from_single_chain: usize,
current_batch_counter: usize,
}
impl BatchingHelper {
fn new(
heights: BTreeMap<ChainId, BTreeMap<BlockHeight, BTreeSet<ValidatorPublicKey>>>,
batch_size: usize,
max_blocks_per_chain: usize,
) -> Self {
let keys = heights.keys().copied().collect();
Self {
keys,
heights,
batch_size,
max_blocks_per_chain,
current_chain: 0,
current_taken_from_single_chain: 0,
current_batch_counter: 0,
}
}
fn next_chain_and_height(&mut self) -> Option<(ChainAndHeight, BTreeSet<ValidatorPublicKey>)> {
if self.keys.is_empty() {
return None;
}
let (chain_id, maybe_heights) = loop {
let chain_id = self.keys[self.current_chain];
if self.heights[&chain_id].is_empty() {
self.heights.remove(&chain_id);
self.keys.remove(self.current_chain);
self.current_taken_from_single_chain = 0;
if self.current_chain >= self.keys.len() {
self.current_chain = 0;
}
if self.keys.is_empty() {
return None;
}
} else {
break (chain_id, self.heights.get_mut(&chain_id));
}
};
if self.current_taken_from_single_chain < self.max_blocks_per_chain - 1 {
self.current_taken_from_single_chain += 1;
} else {
self.current_taken_from_single_chain = 0;
if self.current_chain < self.keys.len() - 1 {
self.current_chain += 1;
} else {
self.current_chain = 0;
}
}
if self.current_batch_counter < self.batch_size - 1 {
self.current_batch_counter += 1;
} else {
self.current_taken_from_single_chain = 0;
self.current_batch_counter = 0;
}
maybe_heights
.and_then(|heights| heights.pop_first())
.map(|(height, validators)| (ChainAndHeight { chain_id, height }, validators))
}
}
impl Iterator for BatchingHelper {
type Item = ReceivedLogs;
fn next(&mut self) -> Option<Self::Item> {
let batch_size = self.batch_size;
let result = ReceivedLogs::from_iterator(
std::iter::from_fn(|| self.next_chain_and_height())
.take(batch_size)
.flat_map(|(chain_and_height, validators)| {
validators
.into_iter()
.map(move |validator| (chain_and_height, validator))
}),
);
(result.num_chains() > 0).then_some(result)
}
}
#[cfg(test)]
mod tests {
use linera_base::{
crypto::{CryptoHash, ValidatorKeypair},
identifiers::ChainId,
};
use linera_chain::data_types::ChainAndHeight;
use super::ReceivedLogs;
#[test]
fn test_received_log_batching() {
let (chain1, chain2) = {
// make sure that chain1 is lexicographically earlier than chain2
let chain_a = ChainId(CryptoHash::test_hash("chain_a"));
let chain_b = ChainId(CryptoHash::test_hash("chain_b"));
if chain_a < chain_b {
(chain_a, chain_b)
} else {
(chain_b, chain_a)
}
};
let validator = ValidatorKeypair::generate().public_key;
let test_log = ReceivedLogs::from_received_result(vec![(
validator,
vec![
(chain1, 1),
(chain1, 2),
(chain2, 1),
(chain1, 3),
(chain2, 2),
(chain2, 3),
(chain1, 4),
(chain1, 5),
(chain2, 4),
]
.into_iter()
.map(|(chain_id, height)| ChainAndHeight {
chain_id,
height: height.into(),
})
.collect(),
)]);
let batches = test_log.clone().into_batches(2, 1).collect::<Vec<_>>();
assert_eq!(batches.len(), 5);
assert_eq!(
batches
.iter()
.map(|batch| batch.num_chains())
.collect::<Vec<_>>(),
vec![2, 2, 2, 2, 1]
);
let chains_heights = batches
.into_iter()
.map(|batch| {
batch
.heights_per_chain()
.into_iter()
.flat_map(|(chain_id, heights)| {
heights.into_iter().map(move |height| (chain_id, height.0))
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
assert_eq!(
chains_heights,
vec![
vec![(chain1, 1), (chain2, 1)],
vec![(chain1, 2), (chain2, 2)],
vec![(chain1, 3), (chain2, 3)],
vec![(chain1, 4), (chain2, 4)],
vec![(chain1, 5)]
]
);
// Check with 2 blocks per chain:
let batches = test_log.into_batches(2, 2).collect::<Vec<_>>();
assert_eq!(batches.len(), 5);
assert_eq!(
batches
.iter()
.map(|batch| batch.num_chains())
.collect::<Vec<_>>(),
vec![1, 1, 1, 1, 1]
);
let chains_heights = batches
.into_iter()
.map(|batch| {
batch
.heights_per_chain()
.into_iter()
.flat_map(|(chain_id, heights)| {
heights.into_iter().map(move |height| (chain_id, height.0))
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
assert_eq!(
chains_heights,
vec![
vec![(chain1, 1), (chain1, 2)],
vec![(chain2, 1), (chain2, 2)],
vec![(chain1, 3), (chain1, 4)],
vec![(chain2, 3), (chain2, 4)],
vec![(chain1, 5)]
]
);
}
#[test]
fn test_received_log_batching_unbalanced() {
let (chain1, chain2) = {
// make sure that chain1 is lexicographically earlier than chain2
let chain_a = ChainId(CryptoHash::test_hash("chain_a"));
let chain_b = ChainId(CryptoHash::test_hash("chain_b"));
if chain_a < chain_b {
(chain_a, chain_b)
} else {
(chain_b, chain_a)
}
};
let validator = ValidatorKeypair::generate().public_key;
let test_log = ReceivedLogs::from_received_result(vec![(
validator,
vec![
(chain1, 1),
(chain1, 2),
(chain2, 1),
(chain1, 3),
(chain2, 2),
(chain1, 4),
(chain1, 5),
(chain1, 6),
(chain1, 7),
]
.into_iter()
.map(|(chain_id, height)| ChainAndHeight {
chain_id,
height: height.into(),
})
.collect(),
)]);
let batches = test_log.clone().into_batches(2, 1).collect::<Vec<_>>();
assert_eq!(batches.len(), 5);
assert_eq!(
batches
.iter()
.map(|batch| batch.num_chains())
.collect::<Vec<_>>(),
vec![2, 2, 1, 1, 1]
);
let chains_heights = batches
.into_iter()
.map(|batch| {
batch
.heights_per_chain()
.into_iter()
.flat_map(|(chain_id, heights)| {
heights.into_iter().map(move |height| (chain_id, height.0))
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
assert_eq!(
chains_heights,
vec![
vec![(chain1, 1), (chain2, 1)],
vec![(chain1, 2), (chain2, 2)],
vec![(chain1, 3), (chain1, 4)],
vec![(chain1, 5), (chain1, 6)],
vec![(chain1, 7)]
]
);
// Check with batches of 3, 2 blocks per chain
let batches = test_log.into_batches(3, 2).collect::<Vec<_>>();
assert_eq!(batches.len(), 3);
assert_eq!(
batches
.iter()
.map(|batch| batch.num_chains())
.collect::<Vec<_>>(),
vec![2, 2, 1]
);
let chains_heights = batches
.into_iter()
.map(|batch| {
batch
.heights_per_chain()
.into_iter()
.flat_map(|(chain_id, heights)| {
heights.into_iter().map(move |height| (chain_id, height.0))
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
assert_eq!(
chains_heights,
vec![
vec![(chain1, 1), (chain1, 2), (chain2, 1)],
vec![(chain1, 3), (chain1, 4), (chain2, 2)],
vec![(chain1, 5), (chain1, 6), (chain1, 7)],
]
);
}
#[test]
fn test_multiple_validators() {
let (chain1, chain2) = {
// make sure that chain1 is lexicographically earlier than chain2
let chain_a = ChainId(CryptoHash::test_hash("chain_a"));
let chain_b = ChainId(CryptoHash::test_hash("chain_b"));
if chain_a < chain_b {
(chain_a, chain_b)
} else {
(chain_b, chain_a)
}
};
let validator1 = ValidatorKeypair::generate().public_key;
let validator2 = ValidatorKeypair::generate().public_key;
let test_log = ReceivedLogs::from_received_result(vec![
(
validator1,
vec![
(chain1, 1),
(chain1, 2),
(chain2, 1),
(chain1, 3),
(chain2, 2),
(chain1, 4),
(chain1, 5),
(chain1, 6),
// validator 1 does not have (chain1, 7)
]
.into_iter()
.map(|(chain_id, height)| ChainAndHeight {
chain_id,
height: height.into(),
})
.collect(),
),
(
validator2,
vec![
(chain1, 1),
(chain1, 2),
(chain1, 3),
(chain1, 4),
(chain1, 5),
(chain2, 1),
(chain1, 6),
(chain1, 7),
// validator2 does not have (chain2, 2)
]
.into_iter()
.map(|(chain_id, height)| ChainAndHeight {
chain_id,
height: height.into(),
})
.collect(),
),
]);
let batches = test_log.clone().into_batches(2, 1).collect::<Vec<_>>();
assert_eq!(batches.len(), 5);
assert_eq!(
batches
.iter()
.map(|batch| batch.num_chains())
.collect::<Vec<_>>(),
vec![2, 2, 1, 1, 1]
);
// Check that we know which validators have which blocks.
assert!(batches[0].validator_has_block(&validator1, chain2, 1.into()));
assert!(batches[0].validator_has_block(&validator2, chain2, 1.into()));
assert!(batches[1].validator_has_block(&validator1, chain2, 2.into()));
assert!(!batches[1].validator_has_block(&validator2, chain2, 2.into()));
assert!(!batches[4].validator_has_block(&validator1, chain1, 7.into()));
assert!(batches[4].validator_has_block(&validator2, chain1, 7.into()));
let chains_heights = batches
.into_iter()
.map(|batch| {
batch
.heights_per_chain()
.into_iter()
.flat_map(|(chain_id, heights)| {
heights.into_iter().map(move |height| (chain_id, height.0))
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
assert_eq!(
chains_heights,
vec![
vec![(chain1, 1), (chain2, 1)],
vec![(chain1, 2), (chain2, 2)],
vec![(chain1, 3), (chain1, 4)],
vec![(chain1, 5), (chain1, 6)],
vec![(chain1, 7)]
]
);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/mod.rs | linera-core/src/client/mod.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
cmp::Ordering,
collections::{BTreeMap, BTreeSet, HashSet},
sync::{Arc, RwLock},
};
use custom_debug_derive::Debug;
use futures::{
future::Future,
stream::{self, AbortHandle, FuturesUnordered, StreamExt},
};
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::{
crypto::{CryptoHash, ValidatorPublicKey},
data_types::{ArithmeticError, Blob, BlockHeight, ChainDescription, Epoch, TimeDelta},
ensure,
identifiers::{AccountOwner, BlobId, BlobType, ChainId, GenericApplicationId, StreamId},
time::Duration,
};
#[cfg(not(target_arch = "wasm32"))]
use linera_base::{data_types::Bytecode, identifiers::ModuleId, vm::VmRuntime};
use linera_chain::{
data_types::{
BlockProposal, ChainAndHeight, IncomingBundle, LiteVote, MessageAction, ProposedBlock,
Transaction,
},
manager::LockingBlock,
types::{
Block, CertificateValue, ConfirmedBlock, ConfirmedBlockCertificate, GenericCertificate,
LiteCertificate, ValidatedBlock, ValidatedBlockCertificate,
},
ChainError, ChainExecutionContext,
};
use linera_execution::committee::Committee;
use linera_storage::{ResultReadCertificates, Storage as _};
use rand::{
distributions::{Distribution, WeightedIndex},
seq::SliceRandom,
};
use received_log::ReceivedLogs;
use serde::{Deserialize, Serialize};
use tokio::sync::mpsc;
use tracing::{debug, error, info, instrument, trace, warn};
use crate::{
data_types::{ChainInfo, ChainInfoQuery, ChainInfoResponse, RoundTimeout},
environment::Environment,
local_node::{LocalChainInfoExt as _, LocalNodeClient, LocalNodeError},
node::{CrossChainMessageDelivery, NodeError, ValidatorNodeProvider as _},
notifier::{ChannelNotifier, Notifier as _},
remote_node::RemoteNode,
updater::{communicate_with_quorum, CommunicateAction, ValidatorUpdater},
worker::{Notification, ProcessableCertificate, Reason, WorkerError, WorkerState},
CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES,
};
pub mod chain_client;
pub use chain_client::ChainClient;
pub use crate::data_types::ClientOutcome;
#[cfg(test)]
#[path = "../unit_tests/client_tests.rs"]
mod client_tests;
pub mod requests_scheduler;
pub use requests_scheduler::{RequestsScheduler, RequestsSchedulerConfig, ScoringWeights};
mod received_log;
mod validator_trackers;
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
pub static PROCESS_INBOX_WITHOUT_PREPARE_LATENCY: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"process_inbox_latency",
"process_inbox latency",
&[],
exponential_bucket_latencies(500.0),
)
});
pub static PREPARE_CHAIN_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"prepare_chain_latency",
"prepare_chain latency",
&[],
exponential_bucket_latencies(500.0),
)
});
pub static SYNCHRONIZE_CHAIN_STATE_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"synchronize_chain_state_latency",
"synchronize_chain_state latency",
&[],
exponential_bucket_latencies(500.0),
)
});
pub static EXECUTE_BLOCK_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"execute_block_latency",
"execute_block latency",
&[],
exponential_bucket_latencies(500.0),
)
});
pub static FIND_RECEIVED_CERTIFICATES_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"find_received_certificates_latency",
"find_received_certificates latency",
&[],
exponential_bucket_latencies(500.0),
)
});
}
pub static DEFAULT_CERTIFICATE_DOWNLOAD_BATCH_SIZE: u64 = 500;
pub static DEFAULT_SENDER_CERTIFICATE_DOWNLOAD_BATCH_SIZE: usize = 20_000;
/// Policies for automatically handling incoming messages.
#[derive(Clone, Debug)]
pub struct MessagePolicy {
/// The blanket policy applied to all messages.
blanket: BlanketMessagePolicy,
/// A collection of chains which restrict the origin of messages to be
/// accepted. `Option::None` means that messages from all chains are accepted. An empty
/// `HashSet` denotes that messages from no chains are accepted.
restrict_chain_ids_to: Option<HashSet<ChainId>>,
/// A collection of applications: If `Some`, only bundles with at least one message by any
/// of these applications will be accepted.
reject_message_bundles_without_application_ids: Option<HashSet<GenericApplicationId>>,
/// A collection of applications: If `Some`, only bundles all of whose messages are by these
/// applications will be accepted.
reject_message_bundles_with_other_application_ids: Option<HashSet<GenericApplicationId>>,
}
#[derive(Default, Copy, Clone, Debug, clap::ValueEnum, serde::Deserialize, tsify::Tsify)]
pub enum BlanketMessagePolicy {
/// Automatically accept all incoming messages. Reject them only if execution fails.
#[default]
Accept,
/// Automatically reject tracked messages, ignore or skip untracked messages, but accept
/// protected ones.
Reject,
/// Don't include any messages in blocks, and don't make any decision whether to accept or
/// reject.
Ignore,
}
impl MessagePolicy {
pub fn new(
blanket: BlanketMessagePolicy,
restrict_chain_ids_to: Option<HashSet<ChainId>>,
reject_message_bundles_without_application_ids: Option<HashSet<GenericApplicationId>>,
reject_message_bundles_with_other_application_ids: Option<HashSet<GenericApplicationId>>,
) -> Self {
Self {
blanket,
restrict_chain_ids_to,
reject_message_bundles_without_application_ids,
reject_message_bundles_with_other_application_ids,
}
}
#[cfg(with_testing)]
pub fn new_accept_all() -> Self {
Self {
blanket: BlanketMessagePolicy::Accept,
restrict_chain_ids_to: None,
reject_message_bundles_without_application_ids: None,
reject_message_bundles_with_other_application_ids: None,
}
}
#[instrument(level = "trace", skip(self))]
fn apply(&self, mut bundle: IncomingBundle) -> Option<IncomingBundle> {
if let Some(chain_ids) = &self.restrict_chain_ids_to {
if !chain_ids.contains(&bundle.origin) {
return None;
}
}
if let Some(app_ids) = &self.reject_message_bundles_without_application_ids {
if !bundle
.messages()
.any(|posted_msg| app_ids.contains(&posted_msg.message.application_id()))
{
return None;
}
}
if let Some(app_ids) = &self.reject_message_bundles_with_other_application_ids {
if !bundle
.messages()
.all(|posted_msg| app_ids.contains(&posted_msg.message.application_id()))
{
return None;
}
}
if self.is_reject() {
if bundle.bundle.is_skippable() {
return None;
} else if !bundle.bundle.is_protected() {
bundle.action = MessageAction::Reject;
}
}
Some(bundle)
}
#[instrument(level = "trace", skip(self))]
fn is_ignore(&self) -> bool {
matches!(self.blanket, BlanketMessagePolicy::Ignore)
}
#[instrument(level = "trace", skip(self))]
fn is_reject(&self) -> bool {
matches!(self.blanket, BlanketMessagePolicy::Reject)
}
}
#[derive(Debug, Clone, Copy)]
pub enum TimingType {
ExecuteOperations,
ExecuteBlock,
SubmitBlockProposal,
UpdateValidators,
}
/// Defines how we listen to a chain:
/// - do we care about every block notification?
/// - or do we only care about blocks containing events from some particular streams?
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ListeningMode {
/// Listen to everything: all blocks for the chain and all blocks from sender chains,
/// and participate in rounds.
FullChain,
/// Listen to all blocks for the chain, but don't download sender chain blocks or participate
/// in rounds. Use this when interested in the chain's state but not intending to propose
/// blocks (e.g., because we're not a chain owner).
FollowChain,
/// Only listen to blocks which contain events from those streams.
EventsOnly(BTreeSet<StreamId>),
}
impl PartialOrd for ListeningMode {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (self, other) {
(ListeningMode::FullChain, ListeningMode::FullChain) => Some(Ordering::Equal),
(ListeningMode::FullChain, _) => Some(Ordering::Greater),
(_, ListeningMode::FullChain) => Some(Ordering::Less),
(ListeningMode::FollowChain, ListeningMode::FollowChain) => Some(Ordering::Equal),
(ListeningMode::FollowChain, ListeningMode::EventsOnly(_)) => Some(Ordering::Greater),
(ListeningMode::EventsOnly(_), ListeningMode::FollowChain) => Some(Ordering::Less),
(ListeningMode::EventsOnly(events_a), ListeningMode::EventsOnly(events_b)) => {
if events_a.is_superset(events_b) {
Some(Ordering::Greater)
} else if events_b.is_superset(events_a) {
Some(Ordering::Less)
} else {
None
}
}
}
}
}
impl ListeningMode {
/// Returns whether a notification with this reason should be processed under this listening
/// mode.
pub fn is_relevant(&self, reason: &Reason) -> bool {
match (reason, self) {
// FullChain processes everything.
(_, ListeningMode::FullChain) => true,
// FollowChain processes new blocks on the chain itself, including blocks that
// produced events.
(Reason::NewBlock { .. }, ListeningMode::FollowChain) => true,
(Reason::NewEvents { .. }, ListeningMode::FollowChain) => true,
(_, ListeningMode::FollowChain) => false,
// EventsOnly only processes events from relevant streams.
(Reason::NewEvents { event_streams, .. }, ListeningMode::EventsOnly(relevant)) => {
relevant.intersection(event_streams).next().is_some()
}
(_, ListeningMode::EventsOnly(_)) => false,
}
}
pub fn extend(&mut self, other: Option<ListeningMode>) {
match (self, other) {
(_, None) => (),
(ListeningMode::FullChain, _) => (),
(mode, Some(ListeningMode::FullChain)) => {
*mode = ListeningMode::FullChain;
}
(ListeningMode::FollowChain, _) => (),
(mode, Some(ListeningMode::FollowChain)) => {
*mode = ListeningMode::FollowChain;
}
(
ListeningMode::EventsOnly(self_events),
Some(ListeningMode::EventsOnly(other_events)),
) => {
self_events.extend(other_events);
}
}
}
}
/// A builder that creates [`ChainClient`]s which share the cache and notifiers.
pub struct Client<Env: Environment> {
environment: Env,
/// Local node to manage the execution state and the local storage of the chains that we are
/// tracking.
pub local_node: LocalNodeClient<Env::Storage>,
/// Manages the requests sent to validator nodes.
requests_scheduler: RequestsScheduler<Env>,
/// The admin chain ID.
admin_id: ChainId,
/// Chains that should be tracked by the client.
// TODO(#2412): Merge with set of chains the client is receiving notifications from validators
tracked_chains: Arc<RwLock<HashSet<ChainId>>>,
/// References to clients waiting for chain notifications.
notifier: Arc<ChannelNotifier<Notification>>,
/// Chain state for the managed chains.
chains: papaya::HashMap<ChainId, chain_client::State>,
/// Configuration options.
options: chain_client::Options,
}
impl<Env: Environment> Client<Env> {
/// Creates a new `Client` with a new cache and notifiers.
#[instrument(level = "trace", skip_all)]
#[allow(clippy::too_many_arguments)]
pub fn new(
environment: Env,
admin_id: ChainId,
long_lived_services: bool,
tracked_chains: impl IntoIterator<Item = ChainId>,
name: impl Into<String>,
chain_worker_ttl: Duration,
sender_chain_worker_ttl: Duration,
options: chain_client::Options,
block_cache_size: usize,
execution_state_cache_size: usize,
requests_scheduler_config: requests_scheduler::RequestsSchedulerConfig,
) -> Self {
let tracked_chains = Arc::new(RwLock::new(tracked_chains.into_iter().collect()));
let state = WorkerState::new_for_client(
name.into(),
environment.storage().clone(),
tracked_chains.clone(),
block_cache_size,
execution_state_cache_size,
)
.with_long_lived_services(long_lived_services)
.with_allow_inactive_chains(true)
.with_allow_messages_from_deprecated_epochs(true)
.with_chain_worker_ttl(chain_worker_ttl)
.with_sender_chain_worker_ttl(sender_chain_worker_ttl);
let local_node = LocalNodeClient::new(state);
let requests_scheduler = RequestsScheduler::new(vec![], requests_scheduler_config);
Self {
environment,
local_node,
requests_scheduler,
chains: papaya::HashMap::new(),
admin_id,
tracked_chains,
notifier: Arc::new(ChannelNotifier::default()),
options,
}
}
/// Returns the chain ID of the admin chain.
pub fn admin_chain(&self) -> ChainId {
self.admin_id
}
/// Returns the storage client used by this client's local node.
pub fn storage_client(&self) -> &Env::Storage {
self.environment.storage()
}
pub fn validator_node_provider(&self) -> &Env::Network {
self.environment.network()
}
/// Returns a reference to the client's [`Signer`][crate::environment::Signer].
#[instrument(level = "trace", skip(self))]
pub fn signer(&self) -> &Env::Signer {
self.environment.signer()
}
/// Returns a reference to the client's [`Wallet`][crate::environment::Wallet].
pub fn wallet(&self) -> &Env::Wallet {
self.environment.wallet()
}
/// Adds a chain to the set of chains tracked by the local node.
#[instrument(level = "trace", skip(self))]
pub fn track_chain(&self, chain_id: ChainId) {
self.tracked_chains
.write()
.expect("Panics should not happen while holding a lock to `tracked_chains`")
.insert(chain_id);
}
/// Creates a new `ChainClient`.
#[expect(clippy::too_many_arguments)]
#[instrument(level = "trace", skip_all, fields(chain_id, next_block_height))]
pub fn create_chain_client(
self: &Arc<Self>,
chain_id: ChainId,
block_hash: Option<CryptoHash>,
next_block_height: BlockHeight,
pending_proposal: Option<PendingProposal>,
preferred_owner: Option<AccountOwner>,
timing_sender: Option<mpsc::UnboundedSender<(u64, TimingType)>>,
follow_only: bool,
) -> ChainClient<Env> {
// If the entry already exists we assume that the entry is more up to date than
// the arguments: If they were read from the wallet file, they might be stale.
self.chains.pin().get_or_insert_with(chain_id, || {
chain_client::State::new(pending_proposal.clone(), follow_only)
});
ChainClient::new(
self.clone(),
chain_id,
self.options.clone(),
block_hash,
next_block_height,
preferred_owner,
timing_sender,
)
}
/// Returns whether the given chain is in follow-only mode.
fn is_chain_follow_only(&self, chain_id: ChainId) -> bool {
self.chains
.pin()
.get(&chain_id)
.is_some_and(|state| state.is_follow_only())
}
/// Sets whether the given chain is in follow-only mode.
pub fn set_chain_follow_only(&self, chain_id: ChainId, follow_only: bool) {
self.chains.pin().update(chain_id, |state| {
let mut state = state.clone_for_update_unchecked();
state.set_follow_only(follow_only);
state
});
}
/// Fetches the chain description blob if needed, and returns the chain info.
async fn fetch_chain_info(
&self,
chain_id: ChainId,
validators: &[RemoteNode<Env::ValidatorNode>],
) -> Result<Box<ChainInfo>, chain_client::Error> {
match self.local_node.chain_info(chain_id).await {
Ok(info) => Ok(info),
Err(LocalNodeError::BlobsNotFound(blob_ids)) => {
// Make sure the admin chain is up to date.
self.synchronize_chain_state(self.admin_id).await?;
// If the chain is missing then the error is a WorkerError
// and so a BlobsNotFound
self.update_local_node_with_blobs_from(blob_ids, validators)
.await?;
Ok(self.local_node.chain_info(chain_id).await?)
}
Err(err) => Err(err.into()),
}
}
fn weighted_select(
remaining_validators: &mut Vec<RemoteNode<Env::ValidatorNode>>,
remaining_weights: &mut Vec<u64>,
) -> Option<RemoteNode<Env::ValidatorNode>> {
if remaining_weights.is_empty() {
return None;
}
let dist = WeightedIndex::new(remaining_weights.clone()).unwrap();
let idx = dist.sample(&mut rand::thread_rng());
remaining_weights.remove(idx);
Some(remaining_validators.remove(idx))
}
/// Downloads and processes all certificates up to (excluding) the specified height.
#[instrument(level = "trace", skip(self))]
async fn download_certificates(
&self,
chain_id: ChainId,
target_next_block_height: BlockHeight,
) -> Result<Box<ChainInfo>, chain_client::Error> {
let (_, committee) = self.admin_committee().await?;
let mut remaining_validators = self.make_nodes(&committee)?;
let mut info = self
.fetch_chain_info(chain_id, &remaining_validators)
.await?;
// Determining the weights of the validators
let mut remaining_weights = remaining_validators
.iter()
.map(|validator| {
let validator_state = committee.validators.get(&validator.public_key).unwrap();
validator_state.votes
})
.collect::<Vec<_>>();
while let Some(remote_node) =
Self::weighted_select(&mut remaining_validators, &mut remaining_weights)
{
if target_next_block_height <= info.next_block_height {
return Ok(info);
}
match self
.download_certificates_from(&remote_node, chain_id, target_next_block_height)
.await
{
Err(error) => info!(
remote_node = remote_node.address(),
%error,
"failed to download certificates from validator",
),
Ok(Some(new_info)) => info = new_info,
Ok(None) => {}
}
}
ensure!(
target_next_block_height <= info.next_block_height,
chain_client::Error::CannotDownloadCertificates {
chain_id,
target_next_block_height,
}
);
Ok(info)
}
/// Downloads and processes all certificates up to (excluding) the specified height from the
/// given validator.
#[instrument(level = "trace", skip_all)]
async fn download_certificates_from(
&self,
remote_node: &RemoteNode<Env::ValidatorNode>,
chain_id: ChainId,
stop: BlockHeight,
) -> Result<Option<Box<ChainInfo>>, chain_client::Error> {
let mut last_info = None;
// First load any blocks from local storage, if available.
let chain_info = self.local_node.chain_info(chain_id).await?;
let mut next_height = chain_info.next_block_height;
let hashes = self
.local_node
.get_preprocessed_block_hashes(chain_id, next_height, stop)
.await?;
let certificates = self
.storage_client()
.read_certificates(hashes.clone())
.await?;
let certificates = match ResultReadCertificates::new(certificates, hashes) {
ResultReadCertificates::Certificates(certificates) => certificates,
ResultReadCertificates::InvalidHashes(hashes) => {
return Err(chain_client::Error::ReadCertificatesError(hashes))
}
};
for certificate in certificates {
last_info = Some(self.handle_certificate(certificate).await?.info);
}
// Now download the rest in batches from the remote node.
while next_height < stop {
// TODO(#2045): Analyze network errors instead of using a fixed batch size.
let limit = u64::from(stop)
.checked_sub(u64::from(next_height))
.ok_or(ArithmeticError::Overflow)?
.min(self.options.certificate_download_batch_size);
let certificates = self
.requests_scheduler
.download_certificates(remote_node, chain_id, next_height, limit)
.await?;
let Some(info) = self.process_certificates(remote_node, certificates).await? else {
break;
};
assert!(info.next_block_height > next_height);
next_height = info.next_block_height;
last_info = Some(info);
}
Ok(last_info)
}
async fn download_blobs(
&self,
remote_nodes: &[RemoteNode<Env::ValidatorNode>],
blob_ids: &[BlobId],
) -> Result<(), chain_client::Error> {
let blobs = &self
.requests_scheduler
.download_blobs(remote_nodes, blob_ids, self.options.blob_download_timeout)
.await?
.ok_or_else(|| {
chain_client::Error::RemoteNodeError(NodeError::BlobsNotFound(blob_ids.to_vec()))
})?;
self.local_node.store_blobs(blobs).await.map_err(Into::into)
}
/// Tries to process all the certificates, requesting any missing blobs from the given node.
/// Returns the chain info of the last successfully processed certificate.
#[instrument(level = "trace", skip_all)]
async fn process_certificates(
&self,
remote_node: &RemoteNode<Env::ValidatorNode>,
certificates: Vec<ConfirmedBlockCertificate>,
) -> Result<Option<Box<ChainInfo>>, chain_client::Error> {
let mut info = None;
let required_blob_ids: Vec<_> = certificates
.iter()
.flat_map(|certificate| certificate.value().required_blob_ids())
.collect();
match self
.local_node
.read_blob_states_from_storage(&required_blob_ids)
.await
{
Err(LocalNodeError::BlobsNotFound(blob_ids)) => {
self.download_blobs(&[remote_node.clone()], &blob_ids)
.await?;
}
x => {
x?;
}
}
for certificate in certificates {
info = Some(
match self.handle_certificate(certificate.clone()).await {
Err(LocalNodeError::BlobsNotFound(blob_ids)) => {
self.download_blobs(&[remote_node.clone()], &blob_ids)
.await?;
self.handle_certificate(certificate).await?
}
x => x?,
}
.info,
);
}
// Done with all certificates.
Ok(info)
}
async fn handle_certificate<T: ProcessableCertificate>(
&self,
certificate: GenericCertificate<T>,
) -> Result<ChainInfoResponse, LocalNodeError> {
self.local_node
.handle_certificate(certificate, &self.notifier)
.await
}
async fn chain_info_with_committees(
&self,
chain_id: ChainId,
) -> Result<Box<ChainInfo>, LocalNodeError> {
let query = ChainInfoQuery::new(chain_id).with_committees();
let info = self.local_node.handle_chain_info_query(query).await?.info;
Ok(info)
}
/// Obtains all the committees trusted by any of the given chains. Also returns the highest
/// of their epochs.
#[instrument(level = "trace", skip_all)]
async fn admin_committees(
&self,
) -> Result<(Epoch, BTreeMap<Epoch, Committee>), LocalNodeError> {
let info = self.chain_info_with_committees(self.admin_id).await?;
Ok((info.epoch, info.into_committees()?))
}
/// Obtains the committee for the latest epoch on the admin chain.
pub async fn admin_committee(&self) -> Result<(Epoch, Committee), LocalNodeError> {
let info = self.chain_info_with_committees(self.admin_id).await?;
Ok((info.epoch, info.into_current_committee()?))
}
/// Obtains the validators for the latest epoch.
async fn validator_nodes(
&self,
) -> Result<Vec<RemoteNode<Env::ValidatorNode>>, chain_client::Error> {
let (_, committee) = self.admin_committee().await?;
Ok(self.make_nodes(&committee)?)
}
/// Creates a [`RemoteNode`] for each validator in the committee.
fn make_nodes(
&self,
committee: &Committee,
) -> Result<Vec<RemoteNode<Env::ValidatorNode>>, NodeError> {
Ok(self
.validator_node_provider()
.make_nodes(committee)?
.map(|(public_key, node)| RemoteNode { public_key, node })
.collect())
}
/// Ensures that the client has the `ChainDescription` blob corresponding to this
/// client's `ChainId`.
pub async fn get_chain_description(
&self,
chain_id: ChainId,
) -> Result<ChainDescription, chain_client::Error> {
let chain_desc_id = BlobId::new(chain_id.0, BlobType::ChainDescription);
let blob = self
.local_node
.storage_client()
.read_blob(chain_desc_id)
.await?;
if let Some(blob) = blob {
// We have the blob - return it.
return Ok(bcs::from_bytes(blob.bytes())?);
};
// Recover history from the current validators, according to the admin chain.
self.synchronize_chain_state(self.admin_id).await?;
let nodes = self.validator_nodes().await?;
let blob = self
.update_local_node_with_blobs_from(vec![chain_desc_id], &nodes)
.await?
.pop()
.unwrap(); // Returns exactly as many blobs as passed-in IDs.
Ok(bcs::from_bytes(blob.bytes())?)
}
/// Updates the latest block and next block height and round information from the chain info.
#[instrument(level = "trace", skip_all, fields(chain_id = format!("{:.8}", info.chain_id)))]
fn update_from_info(&self, info: &ChainInfo) {
self.chains.pin().update(info.chain_id, |state| {
let mut state = state.clone_for_update_unchecked();
state.update_from_info(info);
state
});
}
/// Handles the certificate in the local node and the resulting notifications.
#[instrument(level = "trace", skip_all)]
async fn process_certificate<T: ProcessableCertificate>(
&self,
certificate: Box<GenericCertificate<T>>,
) -> Result<(), LocalNodeError> {
let info = self.handle_certificate(*certificate).await?.info;
self.update_from_info(&info);
Ok(())
}
/// Submits a validated block for finalization and returns the confirmed block certificate.
#[instrument(level = "trace", skip_all)]
pub(crate) async fn finalize_block(
self: &Arc<Self>,
committee: &Committee,
certificate: ValidatedBlockCertificate,
) -> Result<ConfirmedBlockCertificate, chain_client::Error> {
debug!(round = %certificate.round, "Submitting block for confirmation");
let hashed_value = ConfirmedBlock::new(certificate.inner().block().clone());
let finalize_action = CommunicateAction::FinalizeBlock {
certificate: Box::new(certificate),
delivery: self.options.cross_chain_message_delivery,
};
let certificate = self
.communicate_chain_action(committee, finalize_action, hashed_value)
.await?;
self.receive_certificate_with_checked_signatures(certificate.clone())
.await?;
Ok(certificate)
}
/// Submits a block proposal to the validators.
#[instrument(level = "trace", skip_all)]
pub(crate) async fn submit_block_proposal<T: ProcessableCertificate>(
self: &Arc<Self>,
committee: &Committee,
proposal: Box<BlockProposal>,
value: T,
) -> Result<GenericCertificate<T>, chain_client::Error> {
use linera_storage::Clock as _;
debug!(
round = %proposal.content.round,
"Submitting block proposal to validators"
);
// Check if the block timestamp is in the future and log INFO.
let block_timestamp = proposal.content.block.timestamp;
let local_time = self.local_node.storage_client().clock().current_time();
if block_timestamp > local_time {
info!(
chain_id = %proposal.content.block.chain_id,
%block_timestamp,
%local_time,
"Block timestamp is in the future; waiting for validators",
);
}
// Create channel for clock skew reports from validators.
let (clock_skew_sender, mut clock_skew_receiver) = mpsc::unbounded_channel();
let submit_action = CommunicateAction::SubmitBlock {
proposal,
blob_ids: value.required_blob_ids().into_iter().collect(),
clock_skew_sender,
};
// Spawn a task to monitor clock skew reports and warn if threshold is reached.
let validity_threshold = committee.validity_threshold();
let committee_clone = committee.clone();
let clock_skew_check_handle = linera_base::task::spawn(async move {
let mut skew_weight = 0u64;
let mut min_skew = TimeDelta::MAX;
let mut max_skew = TimeDelta::ZERO;
while let Some((public_key, clock_skew)) = clock_skew_receiver.recv().await {
if clock_skew.as_micros() > 0 {
skew_weight += committee_clone.weight(&public_key);
min_skew = min_skew.min(clock_skew);
max_skew = max_skew.max(clock_skew);
if skew_weight >= validity_threshold {
warn!(
skew_weight,
validity_threshold,
min_skew_ms = min_skew.as_micros() / 1000,
max_skew_ms = max_skew.as_micros() / 1000,
"A validity threshold of validators reported clock skew; \
consider checking your system clock",
);
return;
}
}
}
});
let certificate = self
.communicate_chain_action(committee, submit_action, value)
.await?;
clock_skew_check_handle.await;
self.process_certificate(Box::new(certificate.clone()))
.await?;
Ok(certificate)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/validator_trackers.rs | linera-core/src/client/validator_trackers.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::{BTreeMap, HashMap, VecDeque};
use linera_base::{crypto::ValidatorPublicKey, data_types::BlockHeight, identifiers::ChainId};
use linera_chain::data_types::ChainAndHeight;
use super::received_log::ReceivedLogs;
/// Keeps multiple `ValidatorTracker`s for multiple validators.
pub(super) struct ValidatorTrackers(BTreeMap<ValidatorPublicKey, ValidatorTracker>);
impl ValidatorTrackers {
/// Creates a new `ValidatorTrackers`.
pub(super) fn new(
received_logs: Vec<(ValidatorPublicKey, Vec<ChainAndHeight>)>,
trackers: &HashMap<ValidatorPublicKey, u64>,
) -> Self {
Self(
received_logs
.into_iter()
.map(|(validator, log)| {
(
validator,
ValidatorTracker::new(*trackers.get(&validator).unwrap_or(&0), log),
)
})
.collect(),
)
}
/// Updates all the trackers with the information that a particular certificate has
/// been downloaded and processed.
pub(super) fn downloaded_cert(&mut self, chain_and_height: ChainAndHeight) {
for tracker in self.0.values_mut() {
tracker.downloaded_cert(chain_and_height);
}
}
/// Converts the `ValidatorTrackers` into a map of per-validator tracker values
/// (indices into the validators' received logs).
pub(super) fn to_map(&self) -> BTreeMap<ValidatorPublicKey, u64> {
self.0
.iter()
.map(|(validator, tracker)| (*validator, tracker.current_tracker_value))
.collect()
}
/// Compares validators' received logs of sender chains with local node information and returns
/// a per-chain list of block heights that sent us messages we didn't see yet. Updates
/// the trackers accordingly.
pub(super) fn filter_out_already_known(
&mut self,
received_logs: &mut ReceivedLogs,
local_next_heights: BTreeMap<ChainId, BlockHeight>,
) {
for (sender_chain_id, local_highest) in &local_next_heights {
if let Some(remote_heights) = received_logs.get_chain_mut(sender_chain_id) {
remote_heights.retain(|height, _| {
if height < local_highest {
// we consider all of the heights below our local next height
// to have been already downloaded, so we will increase the
// validators' trackers accordingly
self.downloaded_cert(ChainAndHeight {
chain_id: *sender_chain_id,
height: *height,
});
false
} else {
true
}
});
}
}
}
}
/// Manages a "tracker" of a single validator.
/// The received log is the list of chains and heights of blocks sending messages to a
/// particular receiver chain. The tracker is the index of the first entry in that log
/// that corresponds to a block that hasn't been processed yet.
/// In order to keep the tracker value up to date, we keep the part of the log starting
/// with the first entry corresponding to a not-yet-processed block, and a record of which
/// certificates we have already downloaded. Whenever we download a certificate corresponding to
/// the first block in the log, we increase the tracker and pop the blocks off the log,
/// until we hit one we haven't downloaded yet.
struct ValidatorTracker {
current_tracker_value: u64,
to_be_downloaded: VecDeque<ChainAndHeight>,
highest_downloaded: BTreeMap<ChainId, BlockHeight>,
}
impl ValidatorTracker {
/// Creates a new `ValidatorTracker`.
fn new(tracker: u64, validator_log: Vec<ChainAndHeight>) -> Self {
Self {
current_tracker_value: tracker,
to_be_downloaded: validator_log.into_iter().collect(),
highest_downloaded: BTreeMap::new(),
}
}
/// Marks a certificate at a particular height in a particular chain as downloaded,
/// and updates the tracker accordingly.
fn downloaded_cert(&mut self, chain_and_height: ChainAndHeight) {
let current_highest = self
.highest_downloaded
.entry(chain_and_height.chain_id)
.or_insert(BlockHeight(0));
*current_highest = chain_and_height.height.max(*current_highest);
self.maximize_tracker();
}
/// Increases the tracker value to the first index that hasn't been downloaded yet.
fn maximize_tracker(&mut self) {
while self.to_be_downloaded.front().is_some_and(|first_cert| {
self.highest_downloaded
.get(&first_cert.chain_id)
.is_some_and(|max_downloaded_height| *max_downloaded_height >= first_cert.height)
}) {
let _first_cert = self.to_be_downloaded.pop_front().unwrap();
self.current_tracker_value += 1;
}
}
}
#[cfg(test)]
mod test {
use linera_base::{
crypto::{CryptoHash, ValidatorKeypair},
data_types::BlockHeight,
identifiers::ChainId,
};
use linera_chain::data_types::ChainAndHeight;
use super::{super::received_log::ReceivedLogs, ValidatorTracker, ValidatorTrackers};
#[test]
fn test_validator_tracker() {
let chain1 = ChainId(CryptoHash::test_hash("chain1"));
let chain2 = ChainId(CryptoHash::test_hash("chain2"));
let mut tracker = ValidatorTracker::new(
0,
vec![(chain1, 0), (chain2, 0), (chain1, 1)]
.into_iter()
.map(|(chain_id, height)| ChainAndHeight {
chain_id,
height: height.into(),
})
.collect(),
);
tracker.downloaded_cert(ChainAndHeight {
chain_id: chain1,
height: 0.into(),
});
assert_eq!(tracker.current_tracker_value, 1);
tracker.downloaded_cert(ChainAndHeight {
chain_id: chain1,
height: 1.into(),
});
assert_eq!(tracker.current_tracker_value, 1);
tracker.downloaded_cert(ChainAndHeight {
chain_id: chain2,
height: 0.into(),
});
assert_eq!(tracker.current_tracker_value, 3);
}
#[test]
fn test_filter_out_already_known() {
let chain1 = ChainId(CryptoHash::test_hash("chain1"));
let chain2 = ChainId(CryptoHash::test_hash("chain2"));
let validator = ValidatorKeypair::generate().public_key;
let log: Vec<_> = vec![
(chain1, 0),
(chain2, 0),
(chain1, 1),
(chain1, 2),
(chain2, 1),
(chain1, 3),
(chain2, 2),
]
.into_iter()
.map(|(chain_id, height)| ChainAndHeight {
chain_id,
height: height.into(),
})
.collect();
let mut received_log = ReceivedLogs::from_received_result(vec![(validator, log.clone())]);
assert_eq!(received_log.num_chains(), 2);
assert_eq!(received_log.num_certs(), 7);
let mut tracker = ValidatorTrackers::new(
vec![(validator, log)],
&vec![(validator, 0)].into_iter().collect(),
);
let local_heights = vec![(chain1, BlockHeight(3)), (chain2, BlockHeight(3))]
.into_iter()
.collect();
tracker.filter_out_already_known(&mut received_log, local_heights);
assert_eq!(received_log.num_chains(), 2); // we do not remove empty chains
assert_eq!(received_log.num_certs(), 1);
// tracker should have shifted to point to (chain1, 3)
assert_eq!(tracker.0[&validator].current_tracker_value, 5);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/chain_client/state.rs | linera-core/src/client/chain_client/state.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::BTreeSet, sync::Arc};
use linera_base::data_types::Blob;
use linera_chain::data_types::ProposedBlock;
use tokio::sync::Mutex;
use super::super::PendingProposal;
use crate::data_types::ChainInfo;
/// The state of our interaction with a particular chain: how far we have synchronized it and
/// whether we are currently attempting to propose a new block.
pub struct State {
/// The block we are currently trying to propose for the next height, if any.
///
/// This is always at the same height as `next_block_height`.
pending_proposal: Option<PendingProposal>,
/// A mutex that is held whilst we are performing operations that should not be
/// attempted by multiple clients at the same time.
client_mutex: Arc<Mutex<()>>,
/// If true, only download blocks for this chain without fetching manager values.
/// Use this for chains we're interested in observing but don't intend to propose blocks for.
follow_only: bool,
}
impl State {
pub fn new(pending_proposal: Option<PendingProposal>, follow_only: bool) -> State {
State {
pending_proposal,
client_mutex: Arc::default(),
follow_only,
}
}
/// Clones the state. This must only be used to update the state, and one of the two clones
/// must be dropped.
pub(crate) fn clone_for_update_unchecked(&self) -> State {
State {
pending_proposal: self.pending_proposal.clone(),
client_mutex: Arc::clone(&self.client_mutex),
follow_only: self.follow_only,
}
}
/// Returns whether this chain is in follow-only mode.
pub fn is_follow_only(&self) -> bool {
self.follow_only
}
/// Sets whether this chain is in follow-only mode.
pub fn set_follow_only(&mut self, follow_only: bool) {
self.follow_only = follow_only;
}
pub fn pending_proposal(&self) -> &Option<PendingProposal> {
&self.pending_proposal
}
pub(super) fn set_pending_proposal(&mut self, block: ProposedBlock, blobs: Vec<Blob>) {
if self
.pending_proposal
.as_ref()
.is_some_and(|pending| pending.block.height >= block.height)
{
tracing::error!(
"Not setting pending block at {}, because we already have a pending proposal.",
block.height
);
return;
}
assert_eq!(
block.published_blob_ids(),
BTreeSet::from_iter(blobs.iter().map(Blob::id))
);
self.pending_proposal = Some(PendingProposal { block, blobs });
}
pub(crate) fn update_from_info(&mut self, info: &ChainInfo) {
if let Some(pending) = &self.pending_proposal {
if pending.block.height < info.next_block_height {
tracing::debug!(
"Clearing pending proposal: a block was committed at height {}",
pending.block.height
);
self.clear_pending_proposal();
}
}
}
pub(super) fn clear_pending_proposal(&mut self) {
self.pending_proposal = None;
}
pub(super) fn client_mutex(&self) -> Arc<Mutex<()>> {
self.client_mutex.clone()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/chain_client/mod.rs | linera-core/src/client/chain_client/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod state;
use std::{
collections::{hash_map, BTreeMap, BTreeSet, HashMap},
convert::Infallible,
iter,
sync::Arc,
};
use custom_debug_derive::Debug;
use futures::{
future::{self, Either, FusedFuture, Future, FutureExt},
select,
stream::{self, AbortHandle, FusedStream, FuturesUnordered, StreamExt, TryStreamExt},
};
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::{
abi::Abi,
crypto::{signer, AccountPublicKey, CryptoHash, Signer, ValidatorPublicKey},
data_types::{
Amount, ApplicationPermissions, ArithmeticError, Blob, BlobContent, BlockHeight,
ChainDescription, Epoch, Round, Timestamp,
},
ensure,
identifiers::{
Account, AccountOwner, ApplicationId, BlobId, BlobType, ChainId, EventId, IndexAndEvent,
ModuleId, StreamId,
},
ownership::{ChainOwnership, TimeoutConfig},
time::{Duration, Instant},
};
#[cfg(not(target_arch = "wasm32"))]
use linera_base::{data_types::Bytecode, vm::VmRuntime};
use linera_chain::{
data_types::{BlockProposal, ChainAndHeight, IncomingBundle, ProposedBlock, Transaction},
manager::LockingBlock,
types::{
Block, ConfirmedBlock, ConfirmedBlockCertificate, Timeout, TimeoutCertificate,
ValidatedBlock,
},
ChainError, ChainExecutionContext, ChainStateView,
};
use linera_execution::{
committee::Committee,
system::{
AdminOperation, OpenChainConfig, SystemOperation, EPOCH_STREAM_NAME,
REMOVED_EPOCH_STREAM_NAME,
},
ExecutionError, Operation, Query, QueryOutcome, QueryResponse, SystemQuery, SystemResponse,
};
use linera_storage::{Clock as _, ResultReadCertificates, Storage as _};
use linera_views::ViewError;
use rand::seq::SliceRandom;
use serde::Serialize;
pub use state::State;
use thiserror::Error;
use tokio::sync::{mpsc, OwnedRwLockReadGuard};
use tokio_stream::wrappers::UnboundedReceiverStream;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument, trace, warn, Instrument as _};
use super::{
received_log::ReceivedLogs, validator_trackers::ValidatorTrackers, AbortOnDrop, Client,
ExecuteBlockOutcome, ListeningMode, MessagePolicy, PendingProposal, ReceiveCertificateMode,
TimingType,
};
use crate::{
data_types::{ChainInfo, ChainInfoQuery, ClientOutcome, RoundTimeout},
environment::Environment,
local_node::{LocalChainInfoExt as _, LocalNodeClient, LocalNodeError},
node::{
CrossChainMessageDelivery, NodeError, NotificationStream, ValidatorNode,
ValidatorNodeProvider as _,
},
remote_node::RemoteNode,
updater::{communicate_with_quorum, CommunicateAction, CommunicationError},
worker::{Notification, Reason, WorkerError},
};
#[derive(Debug, Clone)]
pub struct Options {
/// Maximum number of pending message bundles processed at a time in a block.
pub max_pending_message_bundles: usize,
/// The policy for automatically handling incoming messages.
pub message_policy: MessagePolicy,
/// Whether to block on cross-chain message delivery.
pub cross_chain_message_delivery: CrossChainMessageDelivery,
/// An additional delay, after reaching a quorum, to wait for additional validator signatures,
/// as a fraction of time taken to reach quorum.
pub quorum_grace_period: f64,
/// The delay when downloading a blob, after which we try a second validator.
pub blob_download_timeout: Duration,
/// The delay when downloading a batch of certificates, after which we try a second validator.
pub certificate_batch_download_timeout: Duration,
/// Maximum number of certificates that we download at a time from one validator when
/// synchronizing one of our chains.
pub certificate_download_batch_size: u64,
/// Maximum number of sender certificates we try to download and receive in one go
/// when syncing sender chains.
pub sender_certificate_download_batch_size: usize,
/// Maximum number of tasks that can be joined concurrently using buffer_unordered.
pub max_joined_tasks: usize,
}
#[cfg(with_testing)]
impl Options {
pub fn test_default() -> Self {
use super::{
DEFAULT_CERTIFICATE_DOWNLOAD_BATCH_SIZE, DEFAULT_SENDER_CERTIFICATE_DOWNLOAD_BATCH_SIZE,
};
use crate::DEFAULT_QUORUM_GRACE_PERIOD;
Options {
max_pending_message_bundles: 10,
message_policy: MessagePolicy::new_accept_all(),
cross_chain_message_delivery: CrossChainMessageDelivery::NonBlocking,
quorum_grace_period: DEFAULT_QUORUM_GRACE_PERIOD,
blob_download_timeout: Duration::from_secs(1),
certificate_batch_download_timeout: Duration::from_secs(1),
certificate_download_batch_size: DEFAULT_CERTIFICATE_DOWNLOAD_BATCH_SIZE,
sender_certificate_download_batch_size: DEFAULT_SENDER_CERTIFICATE_DOWNLOAD_BATCH_SIZE,
max_joined_tasks: 100,
}
}
}
/// Client to operate a chain by interacting with validators and the given local storage
/// implementation.
/// * The chain being operated is called the "local chain" or just the "chain".
/// * As a rule, operations are considered successful (and communication may stop) when
/// they succeeded in gathering a quorum of responses.
#[derive(Debug)]
pub struct ChainClient<Env: Environment> {
/// The Linera [`Client`] that manages operations for this chain client.
#[debug(skip)]
pub(crate) client: Arc<Client<Env>>,
/// The off-chain chain ID.
chain_id: ChainId,
/// The client options.
#[debug(skip)]
options: Options,
/// The preferred owner of the chain used to sign proposals.
/// `None` if we cannot propose on this chain.
preferred_owner: Option<AccountOwner>,
/// The next block height as read from the wallet.
initial_next_block_height: BlockHeight,
/// The last block hash as read from the wallet.
initial_block_hash: Option<CryptoHash>,
/// Optional timing sender for benchmarking.
timing_sender: Option<mpsc::UnboundedSender<(u64, TimingType)>>,
}
impl<Env: Environment> Clone for ChainClient<Env> {
fn clone(&self) -> Self {
Self {
client: self.client.clone(),
chain_id: self.chain_id,
options: self.options.clone(),
preferred_owner: self.preferred_owner,
initial_next_block_height: self.initial_next_block_height,
initial_block_hash: self.initial_block_hash,
timing_sender: self.timing_sender.clone(),
}
}
}
/// Error type for [`ChainClient`].
#[derive(Debug, Error)]
pub enum Error {
#[error("Local node operation failed: {0}")]
LocalNodeError(#[from] LocalNodeError),
#[error("Remote node operation failed: {0}")]
RemoteNodeError(#[from] NodeError),
#[error(transparent)]
ArithmeticError(#[from] ArithmeticError),
#[error("Missing certificates: {0:?}")]
ReadCertificatesError(Vec<CryptoHash>),
#[error("Missing confirmed block: {0:?}")]
MissingConfirmedBlock(CryptoHash),
#[error("JSON (de)serialization error: {0}")]
JsonError(#[from] serde_json::Error),
#[error("Chain operation failed: {0}")]
ChainError(#[from] ChainError),
#[error(transparent)]
CommunicationError(#[from] CommunicationError<NodeError>),
#[error("Internal error within chain client: {0}")]
InternalError(&'static str),
#[error(
"Cannot accept a certificate from an unknown committee in the future. \
Please synchronize the local view of the admin chain"
)]
CommitteeSynchronizationError,
#[error("The local node is behind the trusted state in wallet and needs synchronization with validators")]
WalletSynchronizationError,
#[error("The state of the client is incompatible with the proposed block: {0}")]
BlockProposalError(&'static str),
#[error(
"Cannot accept a certificate from a committee that was retired. \
Try a newer certificate from the same origin"
)]
CommitteeDeprecationError,
#[error("Protocol error within chain client: {0}")]
ProtocolError(&'static str),
#[error("Signer doesn't have key to sign for chain {0}")]
CannotFindKeyForChain(ChainId),
#[error("client is not configured to propose on chain {0}")]
NoAccountKeyConfigured(ChainId),
#[error("The chain client isn't owner on chain {0}")]
NotAnOwner(ChainId),
#[error(transparent)]
ViewError(#[from] ViewError),
#[error(
"Failed to download certificates and update local node to the next height \
{target_next_block_height} of chain {chain_id}"
)]
CannotDownloadCertificates {
chain_id: ChainId,
target_next_block_height: BlockHeight,
},
#[error(transparent)]
BcsError(#[from] bcs::Error),
#[error(
"Unexpected quorum: validators voted for block hash {hash} in {round}, \
expected block hash {expected_hash} in {expected_round}"
)]
UnexpectedQuorum {
hash: CryptoHash,
round: Round,
expected_hash: CryptoHash,
expected_round: Round,
},
#[error("signer error: {0:?}")]
Signer(#[source] Box<dyn signer::Error>),
#[error("Cannot revoke the current epoch {0}")]
CannotRevokeCurrentEpoch(Epoch),
#[error("Epoch is already revoked")]
EpochAlreadyRevoked,
#[error("Failed to download missing sender blocks from chain {chain_id} at height {height}")]
CannotDownloadMissingSenderBlock {
chain_id: ChainId,
height: BlockHeight,
},
}
impl From<Infallible> for Error {
fn from(infallible: Infallible) -> Self {
match infallible {}
}
}
impl Error {
pub fn signer_failure(err: impl signer::Error + 'static) -> Self {
Self::Signer(Box::new(err))
}
}
impl<Env: Environment> ChainClient<Env> {
pub fn new(
client: Arc<Client<Env>>,
chain_id: ChainId,
options: Options,
initial_block_hash: Option<CryptoHash>,
initial_next_block_height: BlockHeight,
preferred_owner: Option<AccountOwner>,
timing_sender: Option<mpsc::UnboundedSender<(u64, TimingType)>>,
) -> Self {
ChainClient {
client,
chain_id,
options,
preferred_owner,
initial_block_hash,
initial_next_block_height,
timing_sender,
}
}
/// Returns whether this chain is in follow-only mode.
pub fn is_follow_only(&self) -> bool {
self.client.is_chain_follow_only(self.chain_id)
}
/// Gets the client mutex from the chain's state.
#[instrument(level = "trace", skip(self))]
fn client_mutex(&self) -> Arc<tokio::sync::Mutex<()>> {
self.client
.chains
.pin()
.get(&self.chain_id)
.expect("Chain client constructed for invalid chain")
.client_mutex()
}
/// Gets the next pending block.
#[instrument(level = "trace", skip(self))]
pub fn pending_proposal(&self) -> Option<PendingProposal> {
self.client
.chains
.pin()
.get(&self.chain_id)
.expect("Chain client constructed for invalid chain")
.pending_proposal()
.clone()
}
/// Updates the chain's state using a closure.
#[instrument(level = "trace", skip(self, f))]
fn update_state<F>(&self, f: F)
where
F: Fn(&mut State),
{
let chains = self.client.chains.pin();
chains
.update(self.chain_id, |state| {
let mut state = state.clone_for_update_unchecked();
f(&mut state);
state
})
.expect("Chain client constructed for invalid chain");
}
/// Gets a reference to the client's signer instance.
#[instrument(level = "trace", skip(self))]
pub fn signer(&self) -> &impl Signer {
self.client.signer()
}
/// Gets a mutable reference to the per-`ChainClient` options.
#[instrument(level = "trace", skip(self))]
pub fn options_mut(&mut self) -> &mut Options {
&mut self.options
}
/// Gets a reference to the per-`ChainClient` options.
#[instrument(level = "trace", skip(self))]
pub fn options(&self) -> &Options {
&self.options
}
/// Gets the ID of the associated chain.
#[instrument(level = "trace", skip(self))]
pub fn chain_id(&self) -> ChainId {
self.chain_id
}
/// Gets a clone of the timing sender for benchmarking.
pub fn timing_sender(&self) -> Option<mpsc::UnboundedSender<(u64, TimingType)>> {
self.timing_sender.clone()
}
/// Gets the ID of the admin chain.
#[instrument(level = "trace", skip(self))]
pub fn admin_id(&self) -> ChainId {
self.client.admin_id
}
/// Gets the currently preferred owner for signing the blocks.
#[instrument(level = "trace", skip(self))]
pub fn preferred_owner(&self) -> Option<AccountOwner> {
self.preferred_owner
}
/// Sets the new, preferred owner for signing the blocks.
#[instrument(level = "trace", skip(self))]
pub fn set_preferred_owner(&mut self, preferred_owner: AccountOwner) {
self.preferred_owner = Some(preferred_owner);
}
/// Unsets the preferred owner for signing the blocks.
#[instrument(level = "trace", skip(self))]
pub fn unset_preferred_owner(&mut self) {
self.preferred_owner = None;
}
/// Obtains a `ChainStateView` for this client's chain.
#[instrument(level = "trace")]
pub async fn chain_state_view(
&self,
) -> Result<OwnedRwLockReadGuard<ChainStateView<Env::StorageContext>>, LocalNodeError> {
self.client.local_node.chain_state_view(self.chain_id).await
}
/// Returns chain IDs that this chain subscribes to.
#[instrument(level = "trace", skip(self))]
pub async fn event_stream_publishers(
&self,
) -> Result<BTreeMap<ChainId, BTreeSet<StreamId>>, LocalNodeError> {
let subscriptions = self
.client
.local_node
.get_event_subscriptions(self.chain_id)
.await?;
let mut publishers = subscriptions.into_iter().fold(
BTreeMap::<ChainId, BTreeSet<StreamId>>::new(),
|mut map, ((chain_id, stream_id), _)| {
map.entry(chain_id).or_default().insert(stream_id);
map
},
);
if self.chain_id != self.client.admin_id {
publishers.insert(
self.client.admin_id,
vec![
StreamId::system(EPOCH_STREAM_NAME),
StreamId::system(REMOVED_EPOCH_STREAM_NAME),
]
.into_iter()
.collect(),
);
}
Ok(publishers)
}
/// Subscribes to notifications from this client's chain.
#[instrument(level = "trace")]
pub fn subscribe(&self) -> Result<NotificationStream, LocalNodeError> {
self.subscribe_to(self.chain_id)
}
/// Subscribes to notifications from the specified chain.
#[instrument(level = "trace")]
pub fn subscribe_to(&self, chain_id: ChainId) -> Result<NotificationStream, LocalNodeError> {
Ok(Box::pin(UnboundedReceiverStream::new(
self.client.notifier.subscribe(vec![chain_id]),
)))
}
/// Returns the storage client used by this client's local node.
#[instrument(level = "trace")]
pub fn storage_client(&self) -> &Env::Storage {
self.client.storage_client()
}
/// Obtains the basic `ChainInfo` data for the local chain.
#[instrument(level = "trace")]
pub async fn chain_info(&self) -> Result<Box<ChainInfo>, LocalNodeError> {
let query = ChainInfoQuery::new(self.chain_id);
let response = self
.client
.local_node
.handle_chain_info_query(query)
.await?;
self.client.update_from_info(&response.info);
Ok(response.info)
}
/// Obtains the basic `ChainInfo` data for the local chain, with chain manager values.
#[instrument(level = "trace")]
pub async fn chain_info_with_manager_values(&self) -> Result<Box<ChainInfo>, LocalNodeError> {
let query = ChainInfoQuery::new(self.chain_id)
.with_manager_values()
.with_committees();
let response = self
.client
.local_node
.handle_chain_info_query(query)
.await?;
self.client.update_from_info(&response.info);
Ok(response.info)
}
/// Returns the chain's description. Fetches it from the validators if necessary.
pub async fn get_chain_description(&self) -> Result<ChainDescription, Error> {
self.client.get_chain_description(self.chain_id).await
}
/// Obtains up to `self.options.max_pending_message_bundles` pending message bundles for the
/// local chain.
#[instrument(level = "trace")]
async fn pending_message_bundles(&self) -> Result<Vec<IncomingBundle>, Error> {
if self.options.message_policy.is_ignore() {
// Ignore all messages.
return Ok(Vec::new());
}
let query = ChainInfoQuery::new(self.chain_id).with_pending_message_bundles();
let info = self
.client
.local_node
.handle_chain_info_query(query)
.await?
.info;
if self.preferred_owner.is_some_and(|owner| {
info.manager
.ownership
.is_super_owner_no_regular_owners(&owner)
}) {
// There are only super owners; they are expected to sync manually.
ensure!(
info.next_block_height >= self.initial_next_block_height,
Error::WalletSynchronizationError
);
}
Ok(info
.requested_pending_message_bundles
.into_iter()
.filter_map(|bundle| self.options.message_policy.apply(bundle))
.take(self.options.max_pending_message_bundles)
.collect())
}
/// Returns an `UpdateStreams` operation that updates this client's chain about new events
/// in any of the streams its applications are subscribing to. Returns `None` if there are no
/// new events.
#[instrument(level = "trace")]
async fn collect_stream_updates(&self) -> Result<Option<Operation>, Error> {
// Load all our subscriptions.
let subscription_map = self
.client
.local_node
.get_event_subscriptions(self.chain_id)
.await?;
// Collect the indices of all new events.
let futures = subscription_map
.into_iter()
.filter(|((chain_id, _), _)| {
self.options
.message_policy
.restrict_chain_ids_to
.as_ref()
.is_none_or(|chain_set| chain_set.contains(chain_id))
})
.map(|((chain_id, stream_id), subscriptions)| {
let client = self.client.clone();
async move {
let next_expected_index = client
.local_node
.get_next_expected_event(chain_id, stream_id.clone())
.await?;
if let Some(next_index) = next_expected_index
.filter(|next_index| *next_index > subscriptions.next_index)
{
Ok(Some((chain_id, stream_id, next_index)))
} else {
Ok::<_, Error>(None)
}
}
});
let updates = futures::stream::iter(futures)
.buffer_unordered(self.options.max_joined_tasks)
.try_collect::<Vec<_>>()
.await?
.into_iter()
.flatten()
.collect::<Vec<_>>();
if updates.is_empty() {
return Ok(None);
}
Ok(Some(SystemOperation::UpdateStreams(updates).into()))
}
#[instrument(level = "trace")]
async fn chain_info_with_committees(&self) -> Result<Box<ChainInfo>, LocalNodeError> {
self.client.chain_info_with_committees(self.chain_id).await
}
/// Obtains the current epoch of the local chain as well as its set of trusted committees.
#[instrument(level = "trace")]
async fn epoch_and_committees(
&self,
) -> Result<(Epoch, BTreeMap<Epoch, Committee>), LocalNodeError> {
let info = self.chain_info_with_committees().await?;
let epoch = info.epoch;
let committees = info.into_committees()?;
Ok((epoch, committees))
}
/// Obtains the committee for the current epoch of the local chain.
#[instrument(level = "trace")]
pub async fn local_committee(&self) -> Result<Committee, Error> {
let info = match self.chain_info_with_committees().await {
Ok(info) => info,
Err(LocalNodeError::BlobsNotFound(_)) => {
self.synchronize_chain_state(self.chain_id).await?;
self.chain_info_with_committees().await?
}
Err(err) => return Err(err.into()),
};
Ok(info.into_current_committee()?)
}
/// Obtains the committee for the latest epoch on the admin chain.
#[instrument(level = "trace")]
pub async fn admin_committee(&self) -> Result<(Epoch, Committee), LocalNodeError> {
self.client.admin_committee().await
}
/// Obtains the identity of the current owner of the chain.
///
/// Returns an error if we don't have the private key for the identity.
#[instrument(level = "trace")]
pub async fn identity(&self) -> Result<AccountOwner, Error> {
let Some(preferred_owner) = self.preferred_owner else {
return Err(Error::NoAccountKeyConfigured(self.chain_id));
};
let manager = self.chain_info().await?.manager;
ensure!(
manager.ownership.is_active(),
LocalNodeError::InactiveChain(self.chain_id)
);
let fallback_owners = if manager.ownership.has_fallback() {
self.local_committee()
.await?
.account_keys_and_weights()
.map(|(key, _)| AccountOwner::from(key))
.collect()
} else {
BTreeSet::new()
};
let is_owner = manager.ownership.is_owner(&preferred_owner)
|| fallback_owners.contains(&preferred_owner);
if !is_owner {
warn!(
chain_id = %self.chain_id,
ownership = ?manager.ownership,
?fallback_owners,
?preferred_owner,
"The preferred owner is not configured as an owner of this chain",
);
return Err(Error::NotAnOwner(self.chain_id));
}
let has_signer = self
.signer()
.contains_key(&preferred_owner)
.await
.map_err(Error::signer_failure)?;
if !has_signer {
warn!(%self.chain_id, ?preferred_owner,
"Chain is one of the owners but its Signer instance doesn't contain the key",
);
return Err(Error::CannotFindKeyForChain(self.chain_id));
}
Ok(preferred_owner)
}
/// Prepares the chain for the next operation, i.e. makes sure we have synchronized it up to
/// its current height.
#[instrument(level = "trace")]
pub async fn prepare_chain(&self) -> Result<Box<ChainInfo>, Error> {
#[cfg(with_metrics)]
let _latency = super::metrics::PREPARE_CHAIN_LATENCY.measure_latency();
let mut info = self.synchronize_to_known_height().await?;
if self.preferred_owner.is_none_or(|owner| {
!info
.manager
.ownership
.is_super_owner_no_regular_owners(&owner)
}) {
// If we are not a super owner or there are regular owners, we could be missing recent
// certificates created by other clients. Further synchronize blocks from the network.
// This is a best-effort that depends on network conditions.
info = self.client.synchronize_chain_state(self.chain_id).await?;
}
if info.epoch > self.client.admin_committees().await?.0 {
self.client
.synchronize_chain_state(self.client.admin_id)
.await?;
}
self.client.update_from_info(&info);
Ok(info)
}
// Verifies that our local storage contains enough history compared to the
// known block height. Otherwise, downloads the missing history from the
// network.
// The known height only differs if the wallet is ahead of storage.
async fn synchronize_to_known_height(&self) -> Result<Box<ChainInfo>, Error> {
let info = self
.client
.download_certificates(self.chain_id, self.initial_next_block_height)
.await?;
if info.next_block_height == self.initial_next_block_height {
// Check that our local node has the expected block hash.
ensure!(
self.initial_block_hash == info.block_hash,
Error::InternalError("Invalid chain of blocks in local node")
);
}
Ok(info)
}
/// Attempts to update all validators about the local chain.
#[instrument(level = "trace", skip(old_committee, latest_certificate))]
pub async fn update_validators(
&self,
old_committee: Option<&Committee>,
latest_certificate: Option<ConfirmedBlockCertificate>,
) -> Result<(), Error> {
let update_validators_start = linera_base::time::Instant::now();
// Communicate the new certificate now.
if let Some(old_committee) = old_committee {
self.communicate_chain_updates(old_committee, latest_certificate.clone())
.await?
};
if let Ok(new_committee) = self.local_committee().await {
if Some(&new_committee) != old_committee {
// If the configuration just changed, communicate to the new committee as well.
// (This is actually more important that updating the previous committee.)
self.communicate_chain_updates(&new_committee, latest_certificate)
.await?;
}
}
self.send_timing(update_validators_start, TimingType::UpdateValidators);
Ok(())
}
/// Broadcasts certified blocks to validators.
#[instrument(level = "trace", skip(committee))]
pub async fn communicate_chain_updates(
&self,
committee: &Committee,
latest_certificate: Option<ConfirmedBlockCertificate>,
) -> Result<(), Error> {
let delivery = self.options.cross_chain_message_delivery;
let height = self.chain_info().await?.next_block_height;
self.client
.communicate_chain_updates(
committee,
self.chain_id,
height,
delivery,
latest_certificate,
)
.await
}
/// Synchronizes all chains that any application on this chain subscribes to.
/// We always consider the admin chain a relevant publishing chain, for new epochs.
async fn synchronize_publisher_chains(&self) -> Result<(), Error> {
let subscriptions = self
.client
.local_node
.get_event_subscriptions(self.chain_id)
.await?;
let chain_ids = subscriptions
.iter()
.map(|((chain_id, _), _)| *chain_id)
.chain(iter::once(self.client.admin_id))
.filter(|chain_id| *chain_id != self.chain_id)
.collect::<BTreeSet<_>>();
stream::iter(
chain_ids
.into_iter()
.map(|chain_id| self.client.synchronize_chain_state(chain_id)),
)
.buffer_unordered(self.options.max_joined_tasks)
.collect::<Vec<_>>()
.await
.into_iter()
.collect::<Result<Vec<_>, _>>()?;
Ok(())
}
/// Attempts to download new received certificates.
///
/// This is a best effort: it will only find certificates that have been confirmed
/// amongst sufficiently many validators of the current committee of the target
/// chain.
///
/// However, this should be the case whenever a sender's chain is still in use and
/// is regularly upgraded to new committees.
#[instrument(level = "trace")]
pub async fn find_received_certificates(
&self,
cancellation_token: Option<CancellationToken>,
) -> Result<(), Error> {
debug!(chain_id = %self.chain_id, "starting find_received_certificates");
#[cfg(with_metrics)]
let _latency = super::metrics::FIND_RECEIVED_CERTIFICATES_LATENCY.measure_latency();
// Use network information from the local chain.
let chain_id = self.chain_id;
let (_, committee) = self.admin_committee().await?;
let nodes = self.client.make_nodes(&committee)?;
let trackers = self
.client
.local_node
.get_received_certificate_trackers(chain_id)
.await?;
trace!("find_received_certificates: read trackers");
let received_log_batches = Arc::new(std::sync::Mutex::new(Vec::new()));
// Proceed to downloading received logs.
let result = communicate_with_quorum(
&nodes,
&committee,
|_| (),
|remote_node| {
let client = &self.client;
let tracker = trackers.get(&remote_node.public_key).copied().unwrap_or(0);
let received_log_batches = Arc::clone(&received_log_batches);
Box::pin(async move {
let batch = client
.get_received_log_from_validator(chain_id, &remote_node, tracker)
.await?;
let mut batches = received_log_batches.lock().unwrap();
batches.push((remote_node.public_key, batch));
Ok(())
})
},
self.options.quorum_grace_period,
)
.await;
if let Err(error) = result {
error!(
%error,
"Failed to synchronize received_logs from at least a quorum of validators",
);
}
let received_logs: Vec<_> = {
let mut received_log_batches = received_log_batches.lock().unwrap();
std::mem::take(received_log_batches.as_mut())
};
debug!(
received_logs_len = %received_logs.len(),
received_logs_total = %received_logs.iter().map(|x| x.1.len()).sum::<usize>(),
"collected received logs"
);
let (received_logs, mut validator_trackers) = {
(
ReceivedLogs::from_received_result(received_logs.clone()),
ValidatorTrackers::new(received_logs, &trackers),
)
};
debug!(
num_chains = %received_logs.num_chains(),
num_certs = %received_logs.num_certs(),
"find_received_certificates: total number of chains and certificates to sync",
);
let max_blocks_per_chain =
self.options.sender_certificate_download_batch_size / self.options.max_joined_tasks * 2;
for received_log in received_logs.into_batches(
self.options.sender_certificate_download_batch_size,
max_blocks_per_chain,
) {
validator_trackers = self
.receive_sender_certificates(
received_log,
validator_trackers,
&nodes,
cancellation_token.clone(),
)
.await?;
self.update_received_certificate_trackers(&validator_trackers)
.await;
}
info!("find_received_certificates finished");
Ok(())
}
async fn update_received_certificate_trackers(&self, trackers: &ValidatorTrackers) {
let updated_trackers = trackers.to_map();
trace!(?updated_trackers, "updated tracker values");
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/requests_scheduler/node_info.rs | linera-core/src/client/requests_scheduler/node_info.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use custom_debug_derive::Debug;
use super::scoring::ScoringWeights;
use crate::{environment::Environment, remote_node::RemoteNode};
/// Tracks performance metrics and request capacity for a validator node using
/// Exponential Moving Averages (EMA) for adaptive scoring.
///
/// This struct wraps a `RemoteNode` with performance tracking that adapts quickly
/// to changing network conditions. The scoring system uses EMAs to weight recent
/// performance more heavily than historical data.
#[derive(Debug, Clone)]
pub(super) struct NodeInfo<Env: Environment> {
/// The underlying validator node connection
pub(super) node: RemoteNode<Env::ValidatorNode>,
/// Exponential Moving Average of latency in milliseconds
/// Adapts quickly to changes in response time
ema_latency_ms: f64,
/// Exponential Moving Average of success rate (0.0 to 1.0)
/// Tracks recent success/failure patterns
ema_success_rate: f64,
/// Total number of requests processed (for monitoring and cold-start handling)
total_requests: u64,
/// Configuration for scoring weights
weights: ScoringWeights,
/// EMA smoothing factor (0 < alpha < 1)
/// Higher values give more weight to recent observations
alpha: f64,
/// Maximum expected latency in milliseconds for score normalization
max_expected_latency_ms: f64,
}
impl<Env: Environment> NodeInfo<Env> {
/// Creates a new `NodeInfo` with custom configuration.
pub(super) fn with_config(
node: RemoteNode<Env::ValidatorNode>,
weights: ScoringWeights,
alpha: f64,
max_expected_latency_ms: f64,
) -> Self {
assert!(alpha > 0.0 && alpha < 1.0, "Alpha must be in (0, 1) range");
Self {
node,
ema_latency_ms: 100.0, // Start with reasonable latency expectation
ema_success_rate: 1.0, // Start optimistically with 100% success
total_requests: 0,
weights,
alpha,
max_expected_latency_ms,
}
}
/// Calculates a normalized performance score (0.0 to 1.0) using weighted metrics.
///
/// The score combines three normalized components:
/// - **Latency score**: Inversely proportional to EMA latency
/// - **Success score**: Directly proportional to EMA success rate
/// - **Load score**: Inversely proportional to current load
///
/// Returns a score from 0.0 to 1.0, where higher values indicate better performance.
pub(super) async fn calculate_score(&self) -> f64 {
// 1. Normalize Latency (lower is better, so we invert)
let latency_score = 1.0
- (self.ema_latency_ms.min(self.max_expected_latency_ms)
/ self.max_expected_latency_ms);
// 2. Success Rate is already normalized [0, 1]
let success_score = self.ema_success_rate;
// 4. Apply cold-start penalty for nodes with very few requests
let confidence_factor = (self.total_requests as f64 / 10.0).min(1.0);
// 5. Combine with weights
let raw_score =
(self.weights.latency * latency_score) + (self.weights.success * success_score);
// Apply confidence factor to penalize nodes with too few samples
raw_score * (0.5 + 0.5 * confidence_factor)
}
/// Updates performance metrics using Exponential Moving Average.
///
/// # Arguments
/// - `success`: Whether the request completed successfully
/// - `response_time_ms`: The request's response time in milliseconds
///
/// Uses EMA formula: new_value = (alpha * observation) + ((1 - alpha) * old_value)
/// This gives more weight to recent observations while maintaining some history.
pub(super) fn update_metrics(&mut self, success: bool, response_time_ms: u64) {
let response_time_f64 = response_time_ms as f64;
// Update latency EMA
self.ema_latency_ms =
(self.alpha * response_time_f64) + ((1.0 - self.alpha) * self.ema_latency_ms);
// Update success rate EMA
let success_value = if success { 1.0 } else { 0.0 };
self.ema_success_rate =
(self.alpha * success_value) + ((1.0 - self.alpha) * self.ema_success_rate);
self.total_requests += 1;
}
/// Returns the current EMA success rate.
pub(super) fn ema_success_rate(&self) -> f64 {
self.ema_success_rate
}
/// Returns the total number of requests processed.
pub(super) fn total_requests(&self) -> u64 {
self.total_requests
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/requests_scheduler/in_flight_tracker.rs | linera-core/src/client/requests_scheduler/in_flight_tracker.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::HashMap, fmt::Debug, sync::Arc};
use linera_base::time::{Duration, Instant};
use tokio::sync::broadcast;
use super::{
cache::SubsumingKey,
request::{RequestKey, RequestResult},
};
use crate::node::NodeError;
/// Tracks in-flight requests to deduplicate concurrent requests for the same data.
///
/// This structure manages a map of request keys to in-flight entries, each containing
/// broadcast senders for notifying waiters when a request completes, as well as timing
/// information and alternative data sources.
#[derive(Debug, Clone)]
pub(super) struct InFlightTracker<N> {
/// Maps request keys to in-flight entries containing broadcast senders and metadata
entries: Arc<tokio::sync::RwLock<HashMap<RequestKey, InFlightEntry<N>>>>,
/// Maximum duration before an in-flight request is considered stale and deduplication is skipped
timeout: Duration,
}
impl<N: Clone> InFlightTracker<N> {
/// Creates a new `InFlightTracker` with the specified timeout.
///
/// # Arguments
/// - `timeout`: Maximum duration before an in-flight request is considered too old to deduplicate against
pub(super) fn new(timeout: Duration) -> Self {
Self {
entries: Arc::new(tokio::sync::RwLock::new(HashMap::new())),
timeout,
}
}
/// Attempts to subscribe to an existing in-flight request (exact or subsuming match).
///
/// Searches for either an exact key match or a subsuming request (whose result would
/// contain all the data needed by this request). Returns information about which type
/// of match was found, along with subscription details.
///
/// # Arguments
/// - `key`: The request key to look up
///
/// # Returns
/// - `None`: No matching in-flight request found. Also returned if the found request is stale (exceeds timeout).
/// - `Some(InFlightMatch::Subsuming { key, outcome })`: Subsuming request found
pub(super) async fn try_subscribe(&self, key: &RequestKey) -> Option<InFlightMatch> {
let in_flight = self.entries.read().await;
if let Some(entry) = in_flight.get(key) {
let elapsed = Instant::now().duration_since(entry.started_at);
if elapsed <= self.timeout {
return Some(InFlightMatch::Exact(Subscribed(entry.sender.subscribe())));
}
}
// Sometimes a request key may not have the exact match but may be subsumed by a larger one.
for (in_flight_key, entry) in in_flight.iter() {
if in_flight_key.subsumes(key) {
let elapsed = Instant::now().duration_since(entry.started_at);
if elapsed <= self.timeout {
return Some(InFlightMatch::Subsuming {
key: in_flight_key.clone(),
outcome: Subscribed(entry.sender.subscribe()),
});
}
}
}
None
}
/// Inserts a new in-flight request entry.
///
/// Creates a new broadcast channel and in-flight entry for the given key,
/// marking the start time as now.
///
/// # Arguments
/// - `key`: The request key to insert
pub(super) async fn insert_new(&self, key: RequestKey) {
let (sender, _receiver) = broadcast::channel(1);
let mut in_flight = self.entries.write().await;
in_flight.insert(
key,
InFlightEntry {
sender,
started_at: Instant::now(),
alternative_peers: Arc::new(tokio::sync::RwLock::new(Vec::new())),
},
);
}
/// Completes an in-flight request by removing it and broadcasting the result.
///
/// Removes the entry for the given key and broadcasts the result to all waiting
/// subscribers. Logs the number of waiters that received the notification.
///
/// # Arguments
/// - `key`: The request key to complete
/// - `result`: The result to broadcast to waiters
pub(super) async fn complete_and_broadcast(
&self,
key: &RequestKey,
result: Arc<Result<RequestResult, NodeError>>,
) -> usize {
let mut in_flight = self.entries.write().await;
if let Some(entry) = in_flight.remove(key) {
let waiter_count = entry.sender.receiver_count();
tracing::trace!(
key = ?key,
waiters = waiter_count,
"request completed; broadcasting result to waiters",
);
if waiter_count != 0 {
if let Err(err) = entry.sender.send(result) {
tracing::warn!(
key = ?key,
error = ?err,
"failed to broadcast result to waiters"
);
}
}
return waiter_count;
}
0
}
/// Registers an alternative peer for an in-flight request.
///
/// If an entry exists for the given key, registers the peer as an alternative source
/// (if not already registered).
///
/// # Arguments
/// - `key`: The request key
/// - `peer`: The peer to register as an alternative
pub(super) async fn add_alternative_peer(&self, key: &RequestKey, peer: N)
where
N: PartialEq + Eq,
{
if let Some(entry) = self.entries.read().await.get(key) {
// Register this peer as an alternative source if not already present
{
let mut alt_peers = entry.alternative_peers.write().await;
if !alt_peers.contains(&peer) {
alt_peers.push(peer);
}
}
}
}
/// Retrieves the list of alternative peers registered for an in-flight request.
///
/// Returns a clone of the alternative peers list if an entry exists for the given key.
///
/// # Arguments
/// - `key`: The request key to look up
///
/// # Returns
/// - `Vec<N>`: List of alternative peers (empty if no entry exists)
pub(super) async fn get_alternative_peers(&self, key: &RequestKey) -> Option<Vec<N>> {
let in_flight = self.entries.read().await;
let entry = in_flight.get(key)?;
let peers = entry.alternative_peers.read().await;
Some(peers.clone())
}
/// Removes a specific peer from the alternative peers list.
///
/// # Arguments
/// - `key`: The request key to look up
/// - `peer`: The peer to remove from alternatives
pub(super) async fn remove_alternative_peer(&self, key: &RequestKey, peer: &N)
where
N: PartialEq + Eq,
{
if let Some(entry) = self.entries.read().await.get(key) {
let mut alt_peers = entry.alternative_peers.write().await;
alt_peers.retain(|p| p != peer);
}
}
/// Pops and returns the newest alternative peer from the list.
///
/// Removes and returns the last peer from the alternative peers list (LIFO - newest first).
/// Returns `None` if the entry doesn't exist or the list is empty.
///
/// # Arguments
/// - `key`: The request key to look up
///
/// # Returns
/// - `Some(N)`: The newest alternative peer
/// - `None`: No entry exists or alternatives list is empty
pub(super) async fn pop_alternative_peer(&self, key: &RequestKey) -> Option<N> {
if let Some(entry) = self.entries.read().await.get(key) {
let mut alt_peers = entry.alternative_peers.write().await;
alt_peers.pop()
} else {
None
}
}
}
/// Type of in-flight request match found.
#[derive(Debug)]
pub(super) enum InFlightMatch {
/// Exact key match found
Exact(Subscribed),
/// Subsuming key match found (larger request that contains this request)
Subsuming {
/// The key of the subsuming request
key: RequestKey,
/// Outcome of attempting to subscribe
outcome: Subscribed,
},
}
/// Outcome of attempting to subscribe to an in-flight request.
/// Successfully subscribed; receiver will be notified when request completes
#[derive(Debug)]
pub(super) struct Subscribed(pub(super) broadcast::Receiver<Arc<Result<RequestResult, NodeError>>>);
/// In-flight request entry that tracks when the request was initiated.
#[derive(Debug)]
pub(super) struct InFlightEntry<N> {
/// Broadcast sender for notifying waiters when the request completes
sender: broadcast::Sender<Arc<Result<RequestResult, NodeError>>>,
/// Time when this request was initiated
started_at: Instant,
/// Alternative peers that can provide this data if the primary request fails
alternative_peers: Arc<tokio::sync::RwLock<Vec<N>>>,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/requests_scheduler/scoring.rs | linera-core/src/client/requests_scheduler/scoring.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/// Configurable weights for the scoring algorithm.
///
/// These weights determine the relative importance of different metrics
/// when calculating a node's performance score. All weights should sum to 1.0.
///
/// # Examples
///
/// ```ignore
/// // Prioritize response time and success rate equally
/// let balanced_weights = ScoringWeights {
/// latency: 0.4,
/// success: 0.4,
/// load: 0.2,
/// };
///
/// // Prioritize low latency above all else
/// let latency_focused = ScoringWeights {
/// latency: 0.7,
/// success: 0.2,
/// load: 0.1,
/// };
/// ```
#[derive(Debug, Clone, Copy)]
pub struct ScoringWeights {
/// Weight for latency metric (lower latency = higher score)
pub latency: f64,
/// Weight for success rate metric (higher success = higher score)
pub success: f64,
/// Weight for load metric (lower load = higher score)
pub load: f64,
}
impl Default for ScoringWeights {
fn default() -> Self {
Self {
latency: 0.4, // 40% weight on response time
success: 0.4, // 40% weight on success rate
load: 0.2, // 20% weight on current load
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/requests_scheduler/mod.rs | linera-core/src/client/requests_scheduler/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module manages communication with validator nodes, including connection pooling,
//! load balancing, request deduplication, caching, and performance tracking.
mod cache;
mod in_flight_tracker;
mod node_info;
mod request;
mod scheduler;
mod scoring;
pub use scheduler::RequestsScheduler;
pub use scoring::ScoringWeights;
// Module constants - default values for RequestsSchedulerConfig
pub const MAX_IN_FLIGHT_REQUESTS: usize = 100;
pub const MAX_ACCEPTED_LATENCY_MS: f64 = 5000.0;
pub const CACHE_TTL_MS: u64 = 2000;
pub const CACHE_MAX_SIZE: usize = 1000;
pub const MAX_REQUEST_TTL_MS: u64 = 200;
pub const ALPHA_SMOOTHING_FACTOR: f64 = 0.1;
pub const STAGGERED_DELAY_MS: u64 = 150;
/// Configuration for the `RequestsScheduler`.
#[derive(Debug, Clone)]
pub struct RequestsSchedulerConfig {
/// Maximum expected latency in milliseconds for score normalization
pub max_accepted_latency_ms: f64,
/// Time-to-live for cached responses in milliseconds
pub cache_ttl_ms: u64,
/// Maximum number of entries in the cache
pub cache_max_size: usize,
/// Maximum latency for an in-flight request before we stop deduplicating it (in milliseconds)
pub max_request_ttl_ms: u64,
/// Smoothing factor for Exponential Moving Averages (0 < alpha < 1)
pub alpha: f64,
/// Delay in milliseconds between starting requests to different peers.
pub retry_delay_ms: u64,
}
impl Default for RequestsSchedulerConfig {
fn default() -> Self {
Self {
max_accepted_latency_ms: MAX_ACCEPTED_LATENCY_MS,
cache_ttl_ms: CACHE_TTL_MS,
cache_max_size: CACHE_MAX_SIZE,
max_request_ttl_ms: MAX_REQUEST_TTL_MS,
alpha: ALPHA_SMOOTHING_FACTOR,
retry_delay_ms: STAGGERED_DELAY_MS,
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/requests_scheduler/request.rs | linera-core/src/client/requests_scheduler/request.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
data_types::{Blob, BlobContent, BlockHeight},
identifiers::{BlobId, ChainId},
};
use linera_chain::types::ConfirmedBlockCertificate;
use crate::client::requests_scheduler::cache::SubsumingKey;
/// Unique identifier for different types of download requests.
///
/// Used for request deduplication to avoid redundant downloads of the same data.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum RequestKey {
/// Download certificates by specific heights
Certificates {
chain_id: ChainId,
heights: Vec<BlockHeight>,
},
/// Download a blob by ID
Blob(BlobId),
/// Download a pending blob
PendingBlob { chain_id: ChainId, blob_id: BlobId },
/// Download certificate for a specific blob
CertificateForBlob(BlobId),
}
impl RequestKey {
/// Returns the chain ID associated with the request, if applicable.
pub(super) fn chain_id(&self) -> Option<ChainId> {
match self {
RequestKey::Certificates { chain_id, .. } => Some(*chain_id),
RequestKey::PendingBlob { chain_id, .. } => Some(*chain_id),
_ => None,
}
}
/// Converts certificate-related requests to a common representation of (chain_id, sorted heights).
///
/// This helper method normalizes both `Certificates` and `CertificatesByHeights` variants
/// into a uniform format for easier comparison and overlap detection.
///
/// # Returns
/// - `Some((chain_id, heights))` for certificate requests, where heights are sorted
/// - `None` for non-certificate requests (Blob, PendingBlob, CertificateForBlob)
fn heights(&self) -> Option<Vec<BlockHeight>> {
match self {
RequestKey::Certificates { heights, .. } => Some(heights.clone()),
_ => None,
}
}
}
/// Result types that can be shared across deduplicated requests
#[derive(Debug, Clone)]
pub enum RequestResult {
Certificates(Vec<ConfirmedBlockCertificate>),
Blob(Option<Blob>),
BlobContent(BlobContent),
Certificate(Box<ConfirmedBlockCertificate>),
}
/// Marker trait for types that can be converted to/from `RequestResult`
/// for use in the requests cache.
pub trait Cacheable: TryFrom<RequestResult> + Into<RequestResult> {}
impl<T> Cacheable for T where T: TryFrom<RequestResult> + Into<RequestResult> {}
impl From<Option<Blob>> for RequestResult {
fn from(blob: Option<Blob>) -> Self {
RequestResult::Blob(blob)
}
}
impl From<Vec<ConfirmedBlockCertificate>> for RequestResult {
fn from(certs: Vec<ConfirmedBlockCertificate>) -> Self {
RequestResult::Certificates(certs)
}
}
impl From<BlobContent> for RequestResult {
fn from(content: BlobContent) -> Self {
RequestResult::BlobContent(content)
}
}
impl From<ConfirmedBlockCertificate> for RequestResult {
fn from(cert: ConfirmedBlockCertificate) -> Self {
RequestResult::Certificate(Box::new(cert))
}
}
impl TryFrom<RequestResult> for Option<Blob> {
type Error = ();
fn try_from(result: RequestResult) -> Result<Self, Self::Error> {
match result {
RequestResult::Blob(blob) => Ok(blob),
_ => Err(()),
}
}
}
impl TryFrom<RequestResult> for Vec<ConfirmedBlockCertificate> {
type Error = ();
fn try_from(result: RequestResult) -> Result<Self, Self::Error> {
match result {
RequestResult::Certificates(certs) => Ok(certs),
_ => Err(()),
}
}
}
impl TryFrom<RequestResult> for BlobContent {
type Error = ();
fn try_from(result: RequestResult) -> Result<Self, Self::Error> {
match result {
RequestResult::BlobContent(content) => Ok(content),
_ => Err(()),
}
}
}
impl TryFrom<RequestResult> for ConfirmedBlockCertificate {
type Error = ();
fn try_from(result: RequestResult) -> Result<Self, Self::Error> {
match result {
RequestResult::Certificate(cert) => Ok(*cert),
_ => Err(()),
}
}
}
impl SubsumingKey<RequestResult> for super::request::RequestKey {
fn subsumes(&self, other: &Self) -> bool {
// Different chains can't subsume each other
if self.chain_id() != other.chain_id() {
return false;
}
let (in_flight_req_heights, new_req_heights) = match (self.heights(), other.heights()) {
(Some(range1), Some(range2)) => (range1, range2),
_ => return false, // We subsume only certificate requests
};
let mut in_flight_req_heights_iter = in_flight_req_heights.into_iter();
for new_height in new_req_heights {
if !in_flight_req_heights_iter.any(|h| h == new_height) {
return false; // Found a height not covered by in-flight request
}
}
true
}
fn try_extract_result(
&self,
in_flight_request: &RequestKey,
result: &RequestResult,
) -> Option<RequestResult> {
// Only certificate results can be extracted
let certificates = match result {
RequestResult::Certificates(certs) => certs,
_ => return None,
};
if !in_flight_request.subsumes(self) {
return None; // Can't extract if not subsumed
}
let mut requested_heights = self.heights()?;
if requested_heights.is_empty() {
return Some(RequestResult::Certificates(vec![])); // Nothing requested
}
let mut certificates_iter = certificates.iter();
let mut collected = vec![];
while let Some(height) = requested_heights.first() {
// Remove certs below the requested height.
if let Some(cert) = certificates_iter.find(|cert| &cert.value().height() == height) {
collected.push(cert.clone());
requested_heights.remove(0);
} else {
return None; // Missing a requested height
}
}
Some(RequestResult::Certificates(collected))
}
}
#[cfg(test)]
mod tests {
use linera_base::{crypto::CryptoHash, data_types::BlockHeight, identifiers::ChainId};
use super::{RequestKey, SubsumingKey};
#[test]
fn test_subsumes_complete_containment() {
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let large = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(11), BlockHeight(12), BlockHeight(13)],
};
let small = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12)],
};
assert!(large.subsumes(&small));
assert!(!small.subsumes(&large));
}
#[test]
fn test_subsumes_partial_containment() {
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12), BlockHeight(13)],
};
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12), BlockHeight(14)],
};
assert!(!req1.subsumes(&req2));
assert!(!req2.subsumes(&req1));
}
#[test]
fn test_subsumes_different_chains() {
let chain1 = ChainId(CryptoHash::test_hash("chain1"));
let chain2 = ChainId(CryptoHash::test_hash("chain2"));
let req1 = RequestKey::Certificates {
chain_id: chain1,
heights: vec![BlockHeight(12)],
};
let req2 = RequestKey::Certificates {
chain_id: chain2,
heights: vec![BlockHeight(12)],
};
assert!(!req1.subsumes(&req2));
}
// Helper function to create a test certificate at a specific height
fn make_test_cert(
height: u64,
chain_id: ChainId,
) -> linera_chain::types::ConfirmedBlockCertificate {
use linera_base::{
crypto::ValidatorKeypair,
data_types::{Round, Timestamp},
};
use linera_chain::{
block::ConfirmedBlock,
data_types::{BlockExecutionOutcome, LiteValue, LiteVote},
test::{make_first_block, BlockTestExt, VoteTestExt},
};
let keypair = ValidatorKeypair::generate();
let mut proposed_block = make_first_block(chain_id).with_timestamp(Timestamp::from(height));
// Set the correct height
proposed_block.height = BlockHeight(height);
// Create a Block from the proposed block with default execution outcome
let block = BlockExecutionOutcome::default().with(proposed_block);
// Create a ConfirmedBlock
let confirmed_block = ConfirmedBlock::new(block);
// Create a LiteVote and convert to Vote
let lite_vote = LiteVote::new(
LiteValue::new(&confirmed_block),
Round::MultiLeader(0),
&keypair.secret_key,
);
// Convert to full vote
let vote = lite_vote.with_value(confirmed_block).unwrap();
// Convert vote to certificate
vote.into_certificate(keypair.secret_key.public())
}
#[test]
fn test_try_extract_result_non_certificate_result() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12)],
};
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12)],
};
// Non-certificate result should return None
let blob_result = RequestResult::Blob(None);
assert!(req1.try_extract_result(&req2, &blob_result).is_none());
}
#[test]
fn test_try_extract_result_empty_request_range() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![],
};
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10)],
};
let certs = vec![make_test_cert(10, chain_id)];
let result = RequestResult::Certificates(certs);
// Empty request is always extractable, should return empty result
match req1.try_extract_result(&req2, &result) {
Some(RequestResult::Certificates(extracted_certs)) => {
assert!(extracted_certs.is_empty());
}
_ => panic!("Expected Some empty Certificates result"),
}
}
#[test]
fn test_try_extract_result_empty_result_range() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12)],
};
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12)],
};
let result = RequestResult::Certificates(vec![]); // Empty result
// Empty result should return None
assert!(req1.try_extract_result(&req2, &result).is_none());
}
#[test]
fn test_try_extract_result_non_overlapping_ranges() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let new_req = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10)],
};
let in_flight_req = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(11)],
};
// Result does not contain all requested heights
let certs = vec![make_test_cert(11, chain_id)];
let result = RequestResult::Certificates(certs);
// No overlap, should return None
assert!(new_req
.try_extract_result(&in_flight_req, &result)
.is_none());
}
#[test]
fn test_try_extract_result_partial_overlap_missing_start() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10), BlockHeight(11), BlockHeight(12)],
};
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(11), BlockHeight(12)],
};
// Result missing the first height (10)
let certs = vec![make_test_cert(11, chain_id), make_test_cert(12, chain_id)];
let result = RequestResult::Certificates(certs);
// Missing start height, should return None
assert!(req1.try_extract_result(&req2, &result).is_none());
}
#[test]
fn test_try_extract_result_partial_overlap_missing_end() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10), BlockHeight(11), BlockHeight(12)],
};
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10), BlockHeight(11)],
};
// Result missing the last height (14)
let certs = vec![make_test_cert(10, chain_id), make_test_cert(11, chain_id)];
let result = RequestResult::Certificates(certs);
// Missing end height, should return None
assert!(req1.try_extract_result(&req2, &result).is_none());
}
#[test]
fn test_try_extract_result_partial_overlap_missing_middle() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let new_req = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10), BlockHeight(12), BlockHeight(13)],
};
let in_flight_req = RequestKey::Certificates {
chain_id,
heights: vec![
BlockHeight(10),
BlockHeight(12),
BlockHeight(13),
BlockHeight(14),
],
};
let certs = vec![
make_test_cert(10, chain_id),
make_test_cert(13, chain_id),
make_test_cert(14, chain_id),
];
let result = RequestResult::Certificates(certs);
assert!(new_req
.try_extract_result(&in_flight_req, &result)
.is_none());
assert!(in_flight_req
.try_extract_result(&new_req, &result)
.is_none());
}
#[test]
fn test_try_extract_result_exact_match() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10), BlockHeight(11), BlockHeight(12)],
}; // [10, 11, 12]
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10), BlockHeight(11), BlockHeight(12)],
};
let certs = vec![
make_test_cert(10, chain_id),
make_test_cert(11, chain_id),
make_test_cert(12, chain_id),
];
let result = RequestResult::Certificates(certs.clone());
// Exact match should return all certificates
let extracted = req1.try_extract_result(&req2, &result);
assert!(extracted.is_some());
match extracted.unwrap() {
RequestResult::Certificates(extracted_certs) => {
assert_eq!(extracted_certs, certs);
}
_ => panic!("Expected Certificates result"),
}
}
#[test]
fn test_try_extract_result_superset_extraction() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12), BlockHeight(13)],
};
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(12), BlockHeight(13)],
};
// Result has more certificates than requested
let certs = vec![
make_test_cert(10, chain_id),
make_test_cert(11, chain_id),
make_test_cert(12, chain_id),
make_test_cert(13, chain_id),
make_test_cert(14, chain_id),
];
let result = RequestResult::Certificates(certs);
// Should extract only the requested range [12, 13]
let extracted = req1.try_extract_result(&req2, &result);
assert!(extracted.is_some());
match extracted.unwrap() {
RequestResult::Certificates(extracted_certs) => {
assert_eq!(extracted_certs.len(), 2);
assert_eq!(extracted_certs[0].value().height(), BlockHeight(12));
assert_eq!(extracted_certs[1].value().height(), BlockHeight(13));
}
_ => panic!("Expected Certificates result"),
}
}
#[test]
fn test_try_extract_result_single_height() {
use super::RequestResult;
let chain_id = ChainId(CryptoHash::test_hash("chain1"));
let req1 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(15)],
}; // [15]
let req2 = RequestKey::Certificates {
chain_id,
heights: vec![BlockHeight(10), BlockHeight(15), BlockHeight(20)],
};
let certs = vec![
make_test_cert(10, chain_id),
make_test_cert(15, chain_id),
make_test_cert(20, chain_id),
];
let result = RequestResult::Certificates(certs);
// Should extract only height 15
let extracted = req1.try_extract_result(&req2, &result);
assert!(extracted.is_some());
match extracted.unwrap() {
RequestResult::Certificates(extracted_certs) => {
assert_eq!(extracted_certs.len(), 1);
assert_eq!(extracted_certs[0].value().height(), BlockHeight(15));
}
_ => panic!("Expected Certificates result"),
}
}
#[test]
fn test_try_extract_result_different_chains() {
use super::RequestResult;
let chain1 = ChainId(CryptoHash::test_hash("chain1"));
let chain2 = ChainId(CryptoHash::test_hash("chain2"));
let req1 = RequestKey::Certificates {
chain_id: chain1,
heights: vec![BlockHeight(12)],
};
let req2 = RequestKey::Certificates {
chain_id: chain2,
heights: vec![BlockHeight(12)],
};
let certs = vec![make_test_cert(12, chain1)];
let result = RequestResult::Certificates(certs);
// Different chains should return None
assert!(req1.try_extract_result(&req2, &result).is_none());
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/requests_scheduler/cache.rs | linera-core/src/client/requests_scheduler/cache.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::HashMap, sync::Arc};
use linera_base::time::{Duration, Instant};
#[cfg(with_metrics)]
use super::scheduler::metrics;
/// Cached result entry with timestamp for TTL expiration
#[derive(Debug, Clone)]
pub(super) struct CacheEntry<R> {
result: Arc<R>,
cached_at: Instant,
}
/// Cache for request results with TTL-based expiration and LRU eviction.
///
/// This cache supports:
/// - Exact match lookups
/// - Subsumption-based lookups (larger requests can satisfy smaller ones)
/// - TTL-based expiration
/// - LRU eviction
#[derive(Debug, Clone)]
pub(super) struct RequestsCache<K, R> {
/// Cache of recently completed requests with their results and timestamps.
/// Used to avoid re-executing requests for the same data within the TTL window.
cache: Arc<tokio::sync::RwLock<HashMap<K, CacheEntry<R>>>>,
/// Time-to-live for cached entries. Entries older than this duration are considered expired.
cache_ttl: Duration,
/// Maximum number of entries to store in the cache. When exceeded, oldest entries are evicted (LRU).
max_cache_size: usize,
}
impl<K, R> RequestsCache<K, R>
where
K: Eq + std::hash::Hash + std::fmt::Debug + Clone + SubsumingKey<R>,
R: Clone + std::fmt::Debug,
{
/// Creates a new `RequestsCache` with the specified TTL and maximum size.
///
/// # Arguments
/// - `cache_ttl`: Time-to-live for cached entries
/// - `max_cache_size`: Maximum number of entries in the cache
pub(super) fn new(cache_ttl: Duration, max_cache_size: usize) -> Self {
Self {
cache: Arc::new(tokio::sync::RwLock::new(HashMap::new())),
cache_ttl,
max_cache_size,
}
}
/// Attempts to retrieve a cached result for the given key.
///
/// This method performs both exact match lookups and subsumption-based lookups.
/// If a larger request that contains all the data needed by this request is cached,
/// we can extract the subset result instead of making a new request.
///
/// # Returns
/// - `Some(T)` if a cached result is found (either exact or subsumed)
/// - `None` if no suitable cached result exists
pub(super) async fn get<T>(&self, key: &K) -> Option<T>
where
T: TryFrom<R>,
{
let cache = self.cache.read().await;
// Check cache for exact match first
if let Some(entry) = cache.get(key) {
tracing::trace!(
key = ?key,
"cache hit (exact match) - returning cached result"
);
#[cfg(with_metrics)]
metrics::REQUEST_CACHE_HIT.inc();
return T::try_from((*entry.result).clone()).ok();
}
// Check cache for subsuming requests
for (cached_key, entry) in cache.iter() {
if cached_key.subsumes(key) {
if let Some(extracted) = key.try_extract_result(cached_key, &entry.result) {
tracing::trace!(
key = ?key,
"cache hit (subsumption) - extracted result from larger cached request"
);
#[cfg(with_metrics)]
metrics::REQUEST_CACHE_HIT.inc();
return T::try_from(extracted).ok();
}
}
}
None
}
/// Stores a result in the cache with LRU eviction if cache is full.
///
/// If the cache is at capacity, this method removes the oldest expired entries first.
/// Entries are considered "oldest" based on their cached_at timestamp.
///
/// # Arguments
/// - `key`: The request key to cache
/// - `result`: The result to cache
pub(super) async fn store(&self, key: K, result: Arc<R>) {
self.evict_expired_entries().await; // Clean up expired entries first
let mut cache = self.cache.write().await;
// Insert new entry
cache.insert(
key.clone(),
CacheEntry {
result,
cached_at: Instant::now(),
},
);
tracing::trace!(
key = ?key,
"stored result in cache"
);
}
/// Removes all cache entries that are older than the configured cache TTL.
///
/// This method scans the cache and removes entries where the time elapsed since
/// `cached_at` exceeds `cache_ttl`. It's useful for explicitly cleaning up stale
/// cache entries rather than relying on lazy expiration checks.
///
/// # Returns
/// The number of entries that were evicted
async fn evict_expired_entries(&self) -> usize {
let mut cache = self.cache.write().await;
let now = Instant::now();
// Not strictly smaller b/c we want to add a new entry after eviction.
if cache.len() < self.max_cache_size {
return 0; // No need to evict if under max size
}
let mut expired_keys = 0usize;
cache.retain(|_key, entry| {
if now.duration_since(entry.cached_at) > self.cache_ttl {
expired_keys += 1;
false
} else {
true
}
});
if expired_keys > 0 {
tracing::trace!(count = expired_keys, "evicted expired cache entries");
}
expired_keys
}
}
/// Trait for request keys that support subsumption-based matching and result extraction.
pub(super) trait SubsumingKey<R> {
/// Checks if this request fully subsumes another request.
///
/// Request `self` subsumes request `other` if `self`'s result would contain all the data that
/// `other`'s result would contain. This means `other`'s request is redundant if `self` is already
/// in-flight or cached.
fn subsumes(&self, other: &Self) -> bool;
/// Attempts to extract a subset result for this request from a larger request's result.
///
/// This is used when a request A subsumes this request B. We can extract B's result
/// from A's result by filtering the certificates to only those requested by B.
///
/// # Arguments
/// - `from`: The key of the larger request that subsumes this one
/// - `result`: The result from the larger request
///
/// # Returns
/// - `Some(result)` with the extracted subset if possible
/// - `None` if extraction is not possible (wrong variant, different chain, etc.)
fn try_extract_result(&self, from: &Self, result: &R) -> Option<R>;
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use linera_base::time::Duration;
use super::*;
// Mock key type for testing: represents a range request [start, end]
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct RangeKey {
start: u64,
end: u64,
}
// Mock result type: vector of values in the range
#[derive(Debug, Clone, PartialEq)]
struct RangeResult(Vec<u64>);
impl SubsumingKey<RangeResult> for RangeKey {
fn subsumes(&self, other: &Self) -> bool {
// This range subsumes another if it contains the other's range
self.start <= other.start && self.end >= other.end
}
fn try_extract_result(&self, from: &Self, result: &RangeResult) -> Option<RangeResult> {
if !from.subsumes(self) {
return None;
}
// Extract values that fall within our range
let filtered: Vec<u64> = result
.0
.iter()
.filter(|&&v| v >= self.start && v <= self.end)
.copied()
.collect();
Some(RangeResult(filtered))
}
}
#[tokio::test]
async fn test_cache_miss_on_empty_cache() {
let cache: RequestsCache<RangeKey, RangeResult> =
RequestsCache::new(Duration::from_secs(60), 10);
let key = RangeKey { start: 0, end: 5 };
let result: Option<RangeResult> = cache.get(&key).await;
assert!(result.is_none());
}
#[tokio::test]
async fn test_exact_match_hit() {
let cache = RequestsCache::new(Duration::from_secs(60), 10);
let key = RangeKey { start: 0, end: 5 };
let result = RangeResult(vec![0, 1, 2, 3, 4, 5]);
cache.store(key.clone(), Arc::new(result.clone())).await;
let retrieved: Option<RangeResult> = cache.get(&key).await;
assert_eq!(retrieved, Some(result));
}
#[tokio::test]
async fn test_exact_match_takes_priority_over_subsumption() {
let cache = RequestsCache::new(Duration::from_secs(60), 10);
// Store a larger range
let large_key = RangeKey { start: 0, end: 10 };
let large_result = RangeResult(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
cache
.store(large_key.clone(), Arc::new(large_result.clone()))
.await;
// Store an exact match
let exact_key = RangeKey { start: 2, end: 5 };
let exact_result = RangeResult(vec![2, 3, 4, 5]);
cache
.store(exact_key.clone(), Arc::new(exact_result.clone()))
.await;
// Should get exact match, not extracted from larger range
let retrieved: Option<RangeResult> = cache.get(&exact_key).await;
assert_eq!(retrieved, Some(exact_result));
}
#[tokio::test]
async fn test_subsumption_hit() {
let cache = RequestsCache::new(Duration::from_secs(60), 10);
// Store a larger range
let large_key = RangeKey { start: 0, end: 10 };
let large_result = RangeResult(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
cache.store(large_key, Arc::new(large_result.clone())).await;
// Request a subset
let subset_key = RangeKey { start: 3, end: 7 };
let retrieved: Option<RangeResult> = cache.get(&subset_key).await;
assert_eq!(retrieved, Some(RangeResult(vec![3, 4, 5, 6, 7])));
}
#[tokio::test]
async fn test_subsumption_miss_when_no_overlap() {
let cache = RequestsCache::new(Duration::from_secs(60), 10);
let key1 = RangeKey { start: 0, end: 5 };
let result1 = RangeResult(vec![0, 1, 2, 3, 4, 5]);
cache.store(key1, Arc::new(result1)).await;
// Non-overlapping range
let key2 = RangeKey { start: 10, end: 15 };
let retrieved: Option<RangeResult> = cache.get(&key2).await;
assert!(retrieved.is_none());
}
#[tokio::test]
async fn test_eviction_when_exceeding_max_size() {
let cache_size = 3u64;
let cache = RequestsCache::new(Duration::from_millis(50), cache_size as usize);
// Fill cache to max size
for i in 0..cache_size {
let key = RangeKey {
start: i * 10,
end: i * 10,
};
cache.store(key, Arc::new(RangeResult(vec![i * 10]))).await;
// Small delay to ensure different timestamps
tokio::time::sleep(tokio::time::Duration::from_millis(5)).await;
}
// Wait for first entry to expire
tokio::time::sleep(tokio::time::Duration::from_millis(60)).await;
// Cache is now at max size (3) with expired entries, so next store triggers eviction.
let key_4 = RangeKey {
start: 100,
end: 100,
};
cache
.store(key_4.clone(), Arc::new(RangeResult(vec![100])))
.await;
let cache_guard = cache.cache.read().await;
// Expired entries should have been evicted
let first_key = RangeKey { start: 0, end: 5 };
assert!(!cache_guard.contains_key(&first_key));
// Latest entries should still be there
assert!(cache_guard.contains_key(&key_4));
}
#[tokio::test]
async fn test_subsumption_with_extraction_failure_tries_next() {
// Mock key that subsumes but extraction returns None
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
struct FailingKey {
id: u64,
always_fail_extraction: bool,
}
#[derive(Debug, Clone, PartialEq)]
struct SimpleResult(u64);
impl SubsumingKey<SimpleResult> for FailingKey {
fn subsumes(&self, other: &Self) -> bool {
self.id >= other.id
}
fn try_extract_result(
&self,
from: &Self,
_result: &SimpleResult,
) -> Option<SimpleResult> {
if from.always_fail_extraction {
None
} else {
Some(SimpleResult(self.id))
}
}
}
let cache = RequestsCache::<FailingKey, SimpleResult>::new(Duration::from_secs(60), 10);
// Store entry that subsumes but fails extraction
let failing_key = FailingKey {
id: 10,
always_fail_extraction: true,
};
cache.store(failing_key, Arc::new(SimpleResult(10))).await;
// Store entry that subsumes and succeeds extraction
let working_key = FailingKey {
id: 20,
always_fail_extraction: false,
};
cache.store(working_key, Arc::new(SimpleResult(20))).await;
// Request should find the working one
let target_key = FailingKey {
id: 5,
always_fail_extraction: false,
};
let retrieved: Option<SimpleResult> = cache.get(&target_key).await;
assert!(retrieved.is_some());
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/src/client/requests_scheduler/scheduler.rs | linera-core/src/client/requests_scheduler/scheduler.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::BTreeMap,
future::Future,
sync::{
atomic::{AtomicU32, Ordering},
Arc,
},
};
use custom_debug_derive::Debug;
use futures::stream::{FuturesUnordered, StreamExt};
use linera_base::{
crypto::ValidatorPublicKey,
data_types::{Blob, BlobContent, BlockHeight},
identifiers::{BlobId, ChainId},
time::{Duration, Instant},
};
use linera_chain::types::ConfirmedBlockCertificate;
use rand::{
distributions::{Distribution, WeightedIndex},
prelude::SliceRandom as _,
};
use tracing::instrument;
use super::{
cache::{RequestsCache, SubsumingKey},
in_flight_tracker::{InFlightMatch, InFlightTracker},
node_info::NodeInfo,
request::{RequestKey, RequestResult},
scoring::ScoringWeights,
};
use crate::{
client::{
communicate_concurrently,
requests_scheduler::{in_flight_tracker::Subscribed, request::Cacheable},
RequestsSchedulerConfig,
},
environment::Environment,
node::{NodeError, ValidatorNode},
remote_node::RemoteNode,
};
#[cfg(with_metrics)]
pub(super) mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{
exponential_bucket_latencies, register_histogram_vec, register_int_counter,
register_int_counter_vec,
};
use prometheus::{HistogramVec, IntCounter, IntCounterVec};
/// Histogram of response times per validator (in milliseconds)
pub(super) static VALIDATOR_RESPONSE_TIME: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"requests_scheduler_response_time_ms",
"Response time for requests to validators in milliseconds",
&["validator"],
exponential_bucket_latencies(10000.0), // up to 10 seconds
)
});
/// Counter of total requests made to each validator
pub(super) static VALIDATOR_REQUEST_TOTAL: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"requests_scheduler_request_total",
"Total number of requests made to each validator",
&["validator"],
)
});
/// Counter of successful requests per validator
pub(super) static VALIDATOR_REQUEST_SUCCESS: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"requests_scheduler_request_success",
"Number of successful requests to each validator",
&["validator"],
)
});
/// Counter for requests that were resolved from the response cache.
pub(super) static REQUEST_CACHE_DEDUPLICATION: LazyLock<IntCounter> = LazyLock::new(|| {
register_int_counter(
"requests_scheduler_request_deduplication_total",
"Number of requests that were deduplicated by finding the result in the cache.",
)
});
/// Counter for requests that were served from cache
pub static REQUEST_CACHE_HIT: LazyLock<IntCounter> = LazyLock::new(|| {
register_int_counter(
"requests_scheduler_request_cache_hit_total",
"Number of requests that were served from cache",
)
});
}
/// Manages a pool of validator nodes with intelligent load balancing and performance tracking.
///
/// The `RequestsScheduler` maintains performance metrics for each validator node using
/// Exponential Moving Averages (EMA) and uses these metrics to make intelligent routing
/// decisions. It prevents node overload through request capacity limits and automatically
/// retries failed requests on alternative nodes.
///
/// # Examples
///
/// ```ignore
/// // Create with default configuration (balanced scoring)
/// let manager = RequestsScheduler::new(validator_nodes);
///
/// // Create with custom configuration prioritizing low latency
/// let latency_weights = ScoringWeights {
/// latency: 0.6,
/// success: 0.3,
/// load: 0.1,
/// };
/// let manager = RequestsScheduler::with_config(
/// validator_nodes,
/// 15, // max 15 concurrent requests per node
/// latency_weights, // custom scoring weights
/// 0.2, // higher alpha for faster adaptation
/// 3000.0, // max expected latency (3 seconds)
/// Duration::from_secs(60), // 60 second cache TTL
/// 200, // cache up to 200 entries
/// );
/// ```
#[derive(Debug, Clone)]
pub struct RequestsScheduler<Env: Environment> {
/// Thread-safe map of validator nodes indexed by their public keys.
/// Each node is wrapped with EMA-based performance tracking information.
nodes: Arc<tokio::sync::RwLock<BTreeMap<ValidatorPublicKey, NodeInfo<Env>>>>,
/// Default scoring weights applied to new nodes.
weights: ScoringWeights,
/// Default EMA smoothing factor for new nodes.
alpha: f64,
/// Default maximum expected latency in milliseconds for score normalization.
max_expected_latency: f64,
/// Delay between starting requests to alternative peers.
retry_delay: Duration,
/// Tracks in-flight requests to deduplicate concurrent requests for the same data.
in_flight_tracker: InFlightTracker<RemoteNode<Env::ValidatorNode>>,
/// Cache of recently completed requests with their results and timestamps.
cache: RequestsCache<RequestKey, RequestResult>,
}
impl<Env: Environment> RequestsScheduler<Env> {
/// Creates a new `RequestsScheduler` with the provided configuration.
pub fn new(
nodes: impl IntoIterator<Item = RemoteNode<Env::ValidatorNode>>,
config: RequestsSchedulerConfig,
) -> Self {
Self::with_config(
nodes,
ScoringWeights::default(),
config.alpha,
config.max_accepted_latency_ms,
Duration::from_millis(config.cache_ttl_ms),
config.cache_max_size,
Duration::from_millis(config.max_request_ttl_ms),
Duration::from_millis(config.retry_delay_ms),
)
}
/// Creates a new `RequestsScheduler` with custom configuration.
///
/// # Arguments
/// - `nodes`: Initial set of validator nodes
/// - `max_requests_per_node`: Maximum concurrent requests per node
/// - `weights`: Scoring weights for performance metrics
/// - `alpha`: EMA smoothing factor (0 < alpha < 1)
/// - `max_expected_latency_ms`: Maximum expected latency for score normalization
/// - `cache_ttl`: Time-to-live for cached responses
/// - `max_cache_size`: Maximum number of entries in the cache
/// - `max_request_ttl`: Maximum latency for an in-flight request before we stop deduplicating it
/// - `retry_delay_ms`: Delay in milliseconds between starting requests to different peers.
#[allow(clippy::too_many_arguments)]
pub fn with_config(
nodes: impl IntoIterator<Item = RemoteNode<Env::ValidatorNode>>,
weights: ScoringWeights,
alpha: f64,
max_expected_latency_ms: f64,
cache_ttl: Duration,
max_cache_size: usize,
max_request_ttl: Duration,
retry_delay: Duration,
) -> Self {
assert!(alpha > 0.0 && alpha < 1.0, "Alpha must be in (0, 1) range");
Self {
nodes: Arc::new(tokio::sync::RwLock::new(
nodes
.into_iter()
.map(|node| {
(
node.public_key,
NodeInfo::with_config(node, weights, alpha, max_expected_latency_ms),
)
})
.collect(),
)),
weights,
alpha,
max_expected_latency: max_expected_latency_ms,
retry_delay,
in_flight_tracker: InFlightTracker::new(max_request_ttl),
cache: RequestsCache::new(cache_ttl, max_cache_size),
}
}
/// Executes an operation with an automatically selected peer, handling deduplication,
/// tracking, and peer selection.
///
/// This method provides a high-level API for executing operations against remote nodes
/// while leveraging the [`RequestsScheduler`]'s intelligent peer selection, performance tracking,
/// and request deduplication capabilities.
///
/// # Type Parameters
/// - `R`: The inner result type (what the operation returns on success)
/// - `F`: The async closure type that takes a `RemoteNode` and returns a future
/// - `Fut`: The future type returned by the closure
///
/// # Arguments
/// - `key`: Unique identifier for request deduplication
/// - `operation`: Async closure that takes a selected peer and performs the operation
///
/// # Returns
/// The result from the operation, potentially from cache or a deduplicated in-flight request
///
/// # Example
/// ```ignore
/// let result: Result<Vec<ConfirmedBlockCertificate>, NodeError> = requests_scheduler
/// .with_best(
/// RequestKey::Certificates { chain_id, start, limit },
/// |peer| async move {
/// peer.download_certificates_from(chain_id, start, limit).await
/// }
/// )
/// .await;
/// ```
#[allow(unused)]
async fn with_best<R, F, Fut>(&self, key: RequestKey, operation: F) -> Result<R, NodeError>
where
R: Cacheable + Clone + Send + 'static,
F: Fn(RemoteNode<Env::ValidatorNode>) -> Fut,
Fut: Future<Output = Result<R, NodeError>> + 'static,
{
// Select the best available peer
let peer = self
.select_best_peer()
.await
.ok_or_else(|| NodeError::WorkerError {
error: "No validators available".to_string(),
})?;
self.with_peer(key, peer, operation).await
}
/// Executes an operation with a specific peer.
///
/// Similar to [`with_best`](Self::with_best), but uses the provided peer directly
/// instead of selecting the best available peer. This is useful when you need to
/// query a specific validator node.
///
/// # Type Parameters
/// - `R`: The inner result type (what the operation returns on success)
/// - `F`: The async closure type that takes a `RemoteNode` and returns a future
/// - `Fut`: The future type returned by the closure
///
/// # Arguments
/// - `key`: Unique identifier for request deduplication
/// - `peer`: The specific peer to use for the operation
/// - `operation`: Async closure that takes the peer and performs the operation
///
/// # Returns
/// The result from the operation, potentially from cache or a deduplicated in-flight request
async fn with_peer<R, F, Fut>(
&self,
key: RequestKey,
peer: RemoteNode<Env::ValidatorNode>,
operation: F,
) -> Result<R, NodeError>
where
R: Cacheable + Clone + Send + 'static,
F: Fn(RemoteNode<Env::ValidatorNode>) -> Fut,
Fut: Future<Output = Result<R, NodeError>> + 'static,
{
self.add_peer(peer.clone()).await;
self.in_flight_tracker
.add_alternative_peer(&key, peer.clone())
.await;
// Clone the nodes Arc so we can move it into the closure
let nodes = self.nodes.clone();
self.deduplicated_request(key, peer, move |peer| {
let fut = operation(peer.clone());
let nodes = nodes.clone();
async move { Self::track_request(nodes, peer, fut).await }
})
.await
}
#[instrument(level = "trace", skip_all)]
async fn download_blob(
&self,
peers: &[RemoteNode<Env::ValidatorNode>],
blob_id: BlobId,
timeout: Duration,
) -> Result<Option<Blob>, NodeError> {
let key = RequestKey::Blob(blob_id);
let mut peers = peers.to_vec();
peers.shuffle(&mut rand::thread_rng());
communicate_concurrently(
&peers,
async move |peer| {
self.with_peer(key, peer, move |peer| async move {
peer.download_blob(blob_id).await
})
.await
},
|errors| errors.last().cloned().unwrap(),
timeout,
)
.await
.map_err(|(_validator, error)| error)
}
/// Downloads the blobs with the given IDs. This is done in one concurrent task per blob.
/// Uses intelligent peer selection based on scores and load balancing.
/// Returns `None` if it couldn't find all blobs.
#[instrument(level = "trace", skip_all)]
pub async fn download_blobs(
&self,
peers: &[RemoteNode<Env::ValidatorNode>],
blob_ids: &[BlobId],
timeout: Duration,
) -> Result<Option<Vec<Blob>>, NodeError> {
let mut stream = blob_ids
.iter()
.map(|blob_id| self.download_blob(peers, *blob_id, timeout))
.collect::<FuturesUnordered<_>>();
let mut blobs = Vec::new();
while let Some(maybe_blob) = stream.next().await {
blobs.push(maybe_blob?);
}
Ok(blobs.into_iter().collect::<Option<Vec<_>>>())
}
pub async fn download_certificates(
&self,
peer: &RemoteNode<Env::ValidatorNode>,
chain_id: ChainId,
start: BlockHeight,
limit: u64,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
let heights = (start.0..start.0 + limit)
.map(BlockHeight)
.collect::<Vec<_>>();
self.with_peer(
RequestKey::Certificates {
chain_id,
heights: heights.clone(),
},
peer.clone(),
move |peer| {
let heights = heights.clone();
async move {
Box::pin(peer.download_certificates_by_heights(chain_id, heights)).await
}
},
)
.await
}
pub async fn download_certificates_by_heights(
&self,
peer: &RemoteNode<Env::ValidatorNode>,
chain_id: ChainId,
heights: Vec<BlockHeight>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
self.with_peer(
RequestKey::Certificates {
chain_id,
heights: heights.clone(),
},
peer.clone(),
move |peer| {
let heights = heights.clone();
async move {
peer.download_certificates_by_heights(chain_id, heights)
.await
}
},
)
.await
}
pub async fn download_certificate_for_blob(
&self,
peer: &RemoteNode<Env::ValidatorNode>,
blob_id: BlobId,
) -> Result<ConfirmedBlockCertificate, NodeError> {
self.with_peer(
RequestKey::CertificateForBlob(blob_id),
peer.clone(),
move |peer| async move { peer.download_certificate_for_blob(blob_id).await },
)
.await
}
pub async fn download_pending_blob(
&self,
peer: &RemoteNode<Env::ValidatorNode>,
chain_id: ChainId,
blob_id: BlobId,
) -> Result<BlobContent, NodeError> {
self.with_peer(
RequestKey::PendingBlob { chain_id, blob_id },
peer.clone(),
move |peer| async move { peer.node.download_pending_blob(chain_id, blob_id).await },
)
.await
}
/// Returns the alternative peers registered for an in-flight request, if any.
///
/// This can be used to retry a failed request with alternative data sources
/// that were registered during request deduplication.
pub async fn get_alternative_peers(
&self,
key: &RequestKey,
) -> Option<Vec<RemoteNode<Env::ValidatorNode>>> {
self.in_flight_tracker.get_alternative_peers(key).await
}
/// Returns current performance metrics for all managed nodes.
///
/// Each entry contains:
/// - Performance score (f64, normalized 0.0-1.0)
/// - EMA success rate (f64, 0.0-1.0)
/// - Total requests processed (u64)
///
/// Useful for monitoring and debugging node performance.
pub async fn get_node_scores(&self) -> BTreeMap<ValidatorPublicKey, (f64, f64, u64)> {
let nodes = self.nodes.read().await;
let mut result = BTreeMap::new();
for (key, info) in nodes.iter() {
let score = info.calculate_score().await;
result.insert(
*key,
(score, info.ema_success_rate(), info.total_requests()),
);
}
result
}
/// Wraps a request operation with performance tracking and capacity management.
///
/// This method:
/// 1. Measures response time
/// 2. Updates node metrics based on success/failure
///
/// # Arguments
/// - `nodes`: Arc to the nodes map for updating metrics
/// - `peer`: The remote node to track metrics for
/// - `operation`: Future that performs the actual request
///
/// # Behavior
/// Executes the provided future and tracks metrics for the given peer.
async fn track_request<T, Fut>(
nodes: Arc<tokio::sync::RwLock<BTreeMap<ValidatorPublicKey, NodeInfo<Env>>>>,
peer: RemoteNode<Env::ValidatorNode>,
operation: Fut,
) -> Result<T, NodeError>
where
Fut: Future<Output = Result<T, NodeError>> + 'static,
{
let start_time = Instant::now();
let public_key = peer.public_key;
// Execute the operation
let result = operation.await;
// Update metrics and release slot
let response_time_ms = start_time.elapsed().as_millis() as u64;
let is_success = result.is_ok();
{
let mut nodes_guard = nodes.write().await;
if let Some(info) = nodes_guard.get_mut(&public_key) {
info.update_metrics(is_success, response_time_ms);
let score = info.calculate_score().await;
tracing::trace!(
node = %public_key,
address = %info.node.node.address(),
success = %is_success,
response_time_ms = %response_time_ms,
score = %score,
total_requests = %info.total_requests(),
"Request completed"
);
}
}
// Record Prometheus metrics
#[cfg(with_metrics)]
{
let validator_name = public_key.to_string();
metrics::VALIDATOR_RESPONSE_TIME
.with_label_values(&[&validator_name])
.observe(response_time_ms as f64);
metrics::VALIDATOR_REQUEST_TOTAL
.with_label_values(&[&validator_name])
.inc();
if is_success {
metrics::VALIDATOR_REQUEST_SUCCESS
.with_label_values(&[&validator_name])
.inc();
}
}
result
}
/// Deduplicates concurrent requests for the same data.
///
/// If a request for the same key is already in flight, this method waits for
/// the existing request to complete and returns its result. Otherwise, it
/// executes the operation and broadcasts the result to all waiting callers.
///
/// This method also performs **subsumption-based deduplication**: if a larger
/// request that contains all the data needed by this request is already cached
/// or in flight, we can extract the subset result instead of making a new request.
///
/// # Arguments
/// - `key`: Unique identifier for the request
/// - `operation`: Async closure that performs the actual request
///
/// # Returns
/// The result from either the in-flight request or the newly executed operation
async fn deduplicated_request<T, F, Fut>(
&self,
key: RequestKey,
peer: RemoteNode<Env::ValidatorNode>,
operation: F,
) -> Result<T, NodeError>
where
T: Cacheable + Clone + Send + 'static,
F: Fn(RemoteNode<Env::ValidatorNode>) -> Fut,
Fut: Future<Output = Result<T, NodeError>> + 'static,
{
// Check cache for exact or subsuming match
if let Some(result) = self.cache.get(&key).await {
return Ok(result);
}
// Check if there's an in-flight request (exact or subsuming)
if let Some(in_flight_match) = self.in_flight_tracker.try_subscribe(&key).await {
match in_flight_match {
InFlightMatch::Exact(Subscribed(mut receiver)) => {
tracing::trace!(
?key,
"deduplicating request (exact match) - joining existing in-flight request"
);
#[cfg(with_metrics)]
metrics::REQUEST_CACHE_DEDUPLICATION.inc();
// Wait for result from existing request
match receiver.recv().await {
Ok(result) => match result.as_ref().clone() {
Ok(res) => match T::try_from(res) {
Ok(converted) => {
tracing::trace!(
?key,
"received result from deduplicated in-flight request"
);
return Ok(converted);
}
Err(_) => {
tracing::warn!(
?key,
"failed to convert result from deduplicated in-flight request, will execute independently"
);
}
},
Err(error) => {
tracing::trace!(
?key,
%error,
"in-flight request failed",
);
// Fall through to execute a new request
}
},
Err(_) => {
tracing::trace!(?key, "in-flight request sender dropped");
// Fall through to execute a new request
}
}
}
InFlightMatch::Subsuming {
key: subsuming_key,
outcome: Subscribed(mut receiver),
} => {
tracing::trace!(
?key,
subsumed_by = ?subsuming_key,
"deduplicating request (subsumption) - joining larger in-flight request"
);
#[cfg(with_metrics)]
metrics::REQUEST_CACHE_DEDUPLICATION.inc();
// Wait for result from the subsuming request
match receiver.recv().await {
Ok(result) => {
match result.as_ref() {
Ok(res) => {
if let Some(extracted) =
key.try_extract_result(&subsuming_key, res)
{
tracing::trace!(
?key,
"extracted subset result from larger in-flight request"
);
match T::try_from(extracted) {
Ok(converted) => return Ok(converted),
Err(_) => {
tracing::trace!(
?key,
"failed to convert extracted result, will execute independently"
);
}
}
} else {
// Extraction failed, fall through to execute our own request
tracing::trace!(
?key,
"failed to extract from subsuming request, will execute independently"
);
}
}
Err(error) => {
tracing::trace!(
?key,
?error,
"subsuming in-flight request failed",
);
// Fall through to execute our own request
}
}
}
Err(_) => {
tracing::trace!(?key, "subsuming in-flight request sender dropped");
}
}
}
}
};
// Create new in-flight entry for this request
self.in_flight_tracker.insert_new(key.clone()).await;
// Remove the peer we're about to use from alternatives (it shouldn't retry with itself)
self.in_flight_tracker
.remove_alternative_peer(&key, &peer)
.await;
// Execute request with staggered parallel - first peer starts immediately,
// alternatives are tried after stagger delays (even if first peer is slow but not failing)
tracing::trace!(?key, ?peer, "executing staggered parallel request");
let result = self
.try_staggered_parallel(&key, peer, &operation, self.retry_delay)
.await;
let result_for_broadcast: Result<RequestResult, NodeError> = result.clone().map(Into::into);
let shared_result = Arc::new(result_for_broadcast);
// Broadcast result and clean up
self.in_flight_tracker
.complete_and_broadcast(&key, shared_result.clone())
.await;
if let Ok(success) = shared_result.as_ref() {
self.cache
.store(key.clone(), Arc::new(success.clone()))
.await;
}
result
}
/// Tries alternative peers in staggered parallel fashion.
///
/// Launches requests starting with the first peer, then dynamically pops alternative peers
/// with a stagger delay between each. Returns the first successful result. This provides
/// a balance between sequential (slow) and fully parallel (wasteful) approaches.
///
/// # Arguments
/// - `key`: The request key (for logging and popping alternatives)
/// - `first_peer`: The initial peer to try first (at time 0)
/// - `operation`: The operation to execute on each peer
/// - `staggered_delay_ms`: Delay in milliseconds between starting each subsequent peer
///
/// # Returns
/// The first successful result, or the last error if all fail
async fn try_staggered_parallel<T, F, Fut>(
&self,
key: &RequestKey,
first_peer: RemoteNode<Env::ValidatorNode>,
operation: &F,
staggered_delay: Duration,
) -> Result<T, NodeError>
where
T: 'static,
F: Fn(RemoteNode<Env::ValidatorNode>) -> Fut,
Fut: Future<Output = Result<T, NodeError>> + 'static,
{
use futures::{
future::{select, Either},
stream::{FuturesUnordered, StreamExt},
};
use linera_base::time::timer::sleep;
let mut futures: FuturesUnordered<Fut> = FuturesUnordered::new();
let peer_index = AtomicU32::new(0);
let push_future = |futures: &mut FuturesUnordered<Fut>, fut: Fut| {
futures.push(fut);
peer_index.fetch_add(1, Ordering::SeqCst)
};
// Start the first peer immediately (no delay)
push_future(&mut futures, operation(first_peer));
let mut last_error = NodeError::UnexpectedMessage;
let mut next_delay = Box::pin(sleep(staggered_delay * peer_index.load(Ordering::SeqCst)));
// Phase 1: Race between futures completion and delays (while alternatives might exist)
loop {
// Exit condition: no futures running and can't start any more
if futures.is_empty() {
if let Some(peer) = self.in_flight_tracker.pop_alternative_peer(key).await {
push_future(&mut futures, operation(peer));
next_delay =
Box::pin(sleep(staggered_delay * peer_index.load(Ordering::SeqCst)));
} else {
// No futures and no alternatives - we're done
break;
}
}
let next_result = Box::pin(futures.next());
match select(next_result, next_delay).await {
// A request completed
Either::Left((Some(result), delay_fut)) => {
// Keep the delay future for next iteration
next_delay = delay_fut;
match result {
Ok(value) => {
tracing::trace!(?key, "staggered parallel request succeeded");
return Ok(value);
}
Err(error) => {
tracing::debug!(
?key,
%error,
"staggered parallel request attempt failed"
);
last_error = error;
// Immediately try next alternative
if let Some(peer) =
self.in_flight_tracker.pop_alternative_peer(key).await
{
push_future(&mut futures, operation(peer));
next_delay = Box::pin(sleep(
staggered_delay * peer_index.load(Ordering::SeqCst),
));
}
}
}
}
// All running futures completed
Either::Left((None, delay_fut)) => {
// Restore the delay future
next_delay = delay_fut;
// Will check at top of loop if we should try more alternatives
continue;
}
// Delay elapsed - try to start next peer
Either::Right((_, _)) => {
if let Some(peer) = self.in_flight_tracker.pop_alternative_peer(key).await {
push_future(&mut futures, operation(peer));
next_delay =
Box::pin(sleep(staggered_delay * peer_index.load(Ordering::SeqCst)));
} else {
// No more alternatives - break out to phase 2
break;
}
}
}
}
// Phase 2: No more alternatives, just wait for remaining futures to complete
while let Some(result) = futures.next().await {
match result {
Ok(value) => {
tracing::trace!(?key, "staggered parallel request succeeded");
return Ok(value);
}
Err(error) => {
tracing::debug!(
?key,
%error,
"staggered parallel request attempt failed"
);
last_error = error;
}
}
}
// All attempts failed
tracing::debug!(?key, "all staggered parallel retry attempts failed");
Err(last_error)
}
/// Returns all peers ordered by their score (highest first).
///
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/benches/recorder.rs | linera-core/benches/recorder.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashSet;
use criterion::measurement::{Measurement, ValueFormatter};
use prometheus::proto::MetricType;
/// A `BenchRecorder` together with a set of counter names. This can be used in benchmarks that
/// measure the sum of the selected counters instead of wall clock time.
pub struct BenchRecorderMeasurement {
counter_names: HashSet<&'static str>,
}
impl BenchRecorderMeasurement {
pub fn new(counters: impl IntoIterator<Item = &'static str>) -> Self {
Self {
counter_names: counters.into_iter().collect(),
}
}
}
impl Measurement for BenchRecorderMeasurement {
type Intermediate = u64;
type Value = u64;
fn start(&self) -> Self::Intermediate {
let mut total = 0;
let metric_families = prometheus::gather();
for metric_family in metric_families {
if self.counter_names.contains(metric_family.get_name()) {
let metric_type = metric_family.get_field_type();
assert_eq!(metric_type, MetricType::COUNTER);
for metric in metric_family.get_metric() {
total += metric.get_counter().get_value() as Self::Intermediate;
}
}
}
total
}
fn end(&self, intermediate: Self::Intermediate) -> Self::Value {
self.start() - intermediate
}
fn add(&self, v1: &Self::Value, v2: &Self::Value) -> Self::Value {
v1 + v2
}
fn zero(&self) -> Self::Value {
0
}
fn to_f64(&self, value: &Self::Value) -> f64 {
*value as f64
}
fn formatter(&self) -> &dyn criterion::measurement::ValueFormatter {
struct CountFormatter;
const COUNT_FORMATTER: CountFormatter = CountFormatter;
impl ValueFormatter for CountFormatter {
fn scale_values(&self, _typical_value: f64, _values: &mut [f64]) -> &'static str {
""
}
fn scale_throughputs(
&self,
_typical_value: f64,
_throughput: &criterion::Throughput,
_values: &mut [f64],
) -> &'static str {
""
}
fn scale_for_machines(&self, _values: &mut [f64]) -> &'static str {
""
}
}
&COUNT_FORMATTER
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/benches/client_benchmarks.rs | linera-core/benches/client_benchmarks.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use criterion::{criterion_group, criterion_main, measurement::Measurement, BatchSize, Criterion};
use linera_base::{
crypto::InMemorySigner,
data_types::Amount,
identifiers::{Account, AccountOwner},
time::Duration,
};
use linera_core::test_utils::{ChainClient, MemoryStorageBuilder, StorageBuilder, TestBuilder};
use linera_storage::metrics::{
READ_CERTIFICATE_COUNTER, READ_CONFIRMED_BLOCK_COUNTER, WRITE_CERTIFICATE_COUNTER,
};
use linera_views::metrics::{LOAD_VIEW_COUNTER, SAVE_VIEW_COUNTER};
use prometheus::core::Collector;
use recorder::BenchRecorderMeasurement;
use tokio::runtime;
mod recorder;
/// Creates root chains 1 and 2, the first one with a positive balance.
pub fn setup_claim_bench<B>() -> (ChainClient<B::Storage>, ChainClient<B::Storage>)
where
B: StorageBuilder + Default,
{
let storage_builder = B::default();
let signer = InMemorySigner::new(None);
// Criterion doesn't allow setup functions to be async, but it runs them inside an async
// context. But our setup uses async functions:
let handle = runtime::Handle::current();
let _guard = handle.enter();
futures::executor::block_on(async move {
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer)
.await
.unwrap();
let chain1 = builder
.add_root_chain(1, Amount::from_tokens(10))
.await
.unwrap();
let chain2 = builder.add_root_chain(2, Amount::ZERO).await.unwrap();
(chain1, chain2)
})
}
/// Sends a token from the first chain to the first chain's owner on chain 2, then
/// reclaims that amount.
pub async fn run_claim_bench<B>(
(chain1, chain2): (ChainClient<B::Storage>, ChainClient<B::Storage>),
) where
B: StorageBuilder,
{
let owner1 = chain1.identity().await.unwrap();
let amt = Amount::ONE;
let account = Account::new(chain2.chain_id(), owner1);
chain1
.transfer_to_account(AccountOwner::CHAIN, amt, account)
.await
.unwrap()
.unwrap();
chain2.synchronize_from_validators().await.unwrap();
chain2.process_inbox().await.unwrap();
assert_eq!(
chain1.local_balance().await.unwrap(),
Amount::from_tokens(9)
);
let account = Account::chain(chain1.chain_id());
chain1
.claim(owner1, chain2.chain_id(), account, amt)
.await
.unwrap()
.unwrap();
chain2.synchronize_from_validators().await.unwrap();
chain2.process_inbox().await.unwrap().0.pop().unwrap();
chain1.synchronize_from_validators().await.unwrap();
chain1.process_inbox().await.unwrap();
assert_eq!(
chain1.local_balance().await.unwrap(),
Amount::from_tokens(10)
);
}
fn criterion_benchmark<M: Measurement + 'static>(c: &mut Criterion<M>) {
c.bench_function("claim", |b| {
b.to_async(tokio::runtime::Runtime::new().unwrap())
.iter_batched(
setup_claim_bench::<MemoryStorageBuilder>,
run_claim_bench::<MemoryStorageBuilder>,
BatchSize::PerIteration,
)
});
}
criterion_group!(
name = benches;
config = Criterion::default()
.measurement_time(Duration::from_secs(40))
.with_measurement(BenchRecorderMeasurement::new(vec![
READ_CONFIRMED_BLOCK_COUNTER.desc()[0].fq_name.as_str(),
READ_CERTIFICATE_COUNTER.desc()[0].fq_name.as_str(), WRITE_CERTIFICATE_COUNTER.desc()[0].fq_name.as_str(),
LOAD_VIEW_COUNTER.desc()[0].fq_name.as_str(), SAVE_VIEW_COUNTER.desc()[0].fq_name.as_str(),
]));
targets = criterion_benchmark
);
criterion_main!(benches);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-core/benches/hashing_benchmarks.rs | linera-core/benches/hashing_benchmarks.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use alloy_primitives::Keccak256;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use sha3::{Digest, Sha3_256};
fn keccak(i: u8) {
let mut hasher = Keccak256::new();
hasher.update([i; 32]);
hasher.finalize();
}
fn sha256(i: u8) {
let mut hasher = Sha3_256::new();
hasher.update([i; 32]);
hasher.finalize();
}
fn keccak_benchmark(c: &mut Criterion) {
c.bench_function("keccak", |b| b.iter(|| keccak(black_box(20))));
}
fn sha256_benchmark(c: &mut Criterion) {
c.bench_function("sha256", |b| b.iter(|| sha256(black_box(20))));
}
criterion_group!(benches, keccak_benchmark, sha256_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/plugins/src/lib.rs | linera-indexer/plugins/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Plugins for Linera indexer.
pub mod operations;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/plugins/src/template.rs | linera-indexer/plugins/src/template.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_indexer::{plugin, common::IndexerError};
use linera_views::{
map_view::MapView,
register_view::RegisterView,
views::{RootView, View},
};
// A plugin is centered around a RootView
#[derive(RootView)]
pub struct Template<C> {
view1: RegisterView<C, String>,
view2: MapView<C, u32, String>,
// more views
}
// The `plugin` macro attribute wraps the `Template` in an `Arc<Mutex<Template>>` and does the necessary transformations.
// The `self` attribute always refers to the `Template` struct.
#[plugin]
impl Template<C> {
// The `register` function is the only required function.
// It is used to register the wanted information from a HashedCertificateValue
async fn register(&self, value: &HashedCertificateValue) -> Result<(), IndexerError> {
// register information
}
// Private functions are directly applied to the `Template` struct.
async fn helper1(&self, ...) -> Result<.., IndexerError> {
// handle some things on the `Template` struct
}
// Public functions are the ones accessible through the GraphQL server.
pub async fn entrypoint1(&self) -> String {
self.view1.get()
}
pub async fn entrypoint2(&self, key: u32) -> Result<String, IndexerError> {
Ok(self.view2.get(&key).await?)
}
// Other functions are derived to fill the `linera_indexer::plugin::Plugin` trait.
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/plugins/src/operations.rs | linera-indexer/plugins/src/operations.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
cmp::{Ordering, PartialOrd},
sync::Arc,
};
use async_graphql::{OneofObject, SimpleObject};
use axum::Router;
use linera_base::{crypto::CryptoHash, data_types::BlockHeight, doc_scalar, identifiers::ChainId};
use linera_chain::{
data_types::Transaction,
types::{CertificateValue as _, ConfirmedBlock},
};
use linera_execution::Operation;
use linera_indexer::{
common::IndexerError,
plugin::{load, route, sdl, Plugin},
};
use linera_views::{
context::{Context, ViewContext},
map_view::MapView,
store::{KeyValueDatabase, KeyValueStore},
views::RootView,
};
use serde::{Deserialize, Serialize};
use tokio::sync::Mutex;
use tracing::info;
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq)]
pub struct OperationKey {
pub chain_id: ChainId,
pub height: BlockHeight,
pub index: usize,
}
doc_scalar!(OperationKey, "An operation key to index operations");
impl PartialOrd for OperationKey {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(
self.height
.cmp(&other.height)
.then_with(|| self.index.cmp(&other.index)),
)
}
}
#[derive(Deserialize, Serialize, Clone, SimpleObject, Debug)]
pub struct ChainOperation {
key: OperationKey,
previous_operation: Option<OperationKey>,
index: u64,
block: CryptoHash,
content: Operation,
}
#[derive(RootView)]
pub struct Operations<C> {
last: MapView<C, ChainId, OperationKey>,
count: MapView<C, ChainId, u64>,
/// `ChainOperation` `MapView` indexed by their hash
operations: MapView<C, OperationKey, ChainOperation>,
}
#[derive(OneofObject)]
pub enum OperationKeyKind {
Key(OperationKey),
Last(ChainId),
}
/// Implements helper functions on the `RootView`
impl<C> Operations<C>
where
C: Context + Send + Sync + 'static + Clone,
{
/// Registers an operation and update count and last entries for this chain ID
async fn register_operation(
&mut self,
key: OperationKey,
block: CryptoHash,
content: Operation,
) -> Result<(), IndexerError> {
let last_operation = self.last.get(&key.chain_id).await?;
match last_operation {
Some(last_key) if last_key >= key => Ok(()),
previous_operation => {
let index = self.count.get(&key.chain_id).await?.unwrap_or(0);
let operation = ChainOperation {
key: key.clone(),
previous_operation,
index,
block,
content,
};
info!(
"register operation for {:?}:\n{:?}",
key.chain_id, operation
);
self.operations.insert(&key, operation.clone())?;
self.count.insert(&key.chain_id, index + 1)?;
Ok(self.last.insert(&key.chain_id, key.clone())?)
}
}
}
}
#[derive(Clone)]
pub struct OperationsPlugin<C>(Arc<Mutex<Operations<C>>>);
static NAME: &str = "operations";
/// Implements `Plugin`
#[async_trait::async_trait]
impl<D> Plugin<D> for OperationsPlugin<ViewContext<(), D::Store>>
where
D: KeyValueDatabase + Clone + Send + Sync + 'static,
D::Store: KeyValueStore + Clone + Send + Sync + 'static,
D::Error: From<bcs::Error> + Send + Sync + std::error::Error + 'static,
{
fn name(&self) -> String {
NAME.to_string()
}
async fn load(database: D) -> Result<Self, IndexerError>
where
Self: Sized,
{
Ok(Self(load(database, NAME).await?))
}
async fn register(&self, value: &ConfirmedBlock) -> Result<(), IndexerError> {
let mut plugin = self.0.lock().await;
let chain_id = value.chain_id();
// Iterate over all transactions to find operations and their actual transaction indices
for (transaction_index, transaction) in value.block().body.transactions.iter().enumerate() {
if let Transaction::ExecuteOperation(operation) = transaction {
let key = OperationKey {
chain_id,
height: value.height(),
index: transaction_index,
};
match plugin
.register_operation(key, value.hash(), operation.clone())
.await
{
Err(e) => return Err(e),
Ok(()) => continue,
}
}
}
Ok(plugin.save().await?)
}
fn sdl(&self) -> String {
sdl(self.clone())
}
fn route(&self, app: Router) -> Router {
route(NAME, self.clone(), app)
}
}
/// Implements `ObjectType`
#[async_graphql::Object(cache_control(no_cache))]
impl<C> OperationsPlugin<C>
where
C: Context + Send + Sync + 'static + Clone,
{
/// Gets the operation associated to its hash
pub async fn operation(
&self,
key: OperationKeyKind,
) -> Result<Option<ChainOperation>, IndexerError> {
let plugin = self.0.lock().await;
let key = match key {
OperationKeyKind::Last(chain_id) => match plugin.last.get(&chain_id).await? {
None => return Ok(None),
Some(key) => key,
},
OperationKeyKind::Key(key) => key,
};
Ok(plugin.operations.get(&key).await?)
}
/// Gets the operations in downward order from an operation hash or from the last block of a chain
pub async fn operations(
&self,
from: OperationKeyKind,
limit: Option<u32>,
) -> Result<Vec<ChainOperation>, IndexerError> {
let plugin = self.0.lock().await;
let mut key = match from {
OperationKeyKind::Last(chain_id) => match plugin.last.get(&chain_id).await? {
None => return Ok(Vec::new()),
Some(key) => Some(key),
},
OperationKeyKind::Key(key) => Some(key),
};
let mut result = Vec::new();
let limit = limit.unwrap_or(20);
for _ in 0..limit {
let Some(next_key) = &key else { break };
let operation = plugin.operations.get(next_key).await?;
match operation {
None => break,
Some(op) => {
key.clone_from(&op.previous_operation);
result.push(op)
}
}
}
Ok(result)
}
/// Gets the number of operations registered for a chain
pub async fn count(&self, chain_id: ChainId) -> Result<u64, IndexerError> {
let plugin = self.0.lock().await;
Ok(plugin
.count
.get(&chain_id)
.await
.map(|opt| opt.unwrap_or(0))?)
}
/// Gets the hash of the last operation registered for a chain
pub async fn last(&self, chain_id: ChainId) -> Result<Option<OperationKey>, IndexerError> {
let plugin = self.0.lock().await;
Ok(plugin.last.get(&chain_id).await?)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/graphql-client/src/lib.rs | linera-indexer/graphql-client/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A GraphQL client for the indexer.
pub mod indexer;
pub mod operations;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/graphql-client/src/operations.rs | linera-indexer/graphql-client/src/operations.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use graphql_client::GraphQLQuery;
use linera_base::{crypto::CryptoHash, data_types::BlockHeight, identifiers::ChainId};
use serde::{Deserialize, Serialize};
#[cfg(target_arch = "wasm32")]
pub type Operation = serde_json::Value;
#[cfg(not(target_arch = "wasm32"))]
pub use linera_execution::Operation;
#[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
pub struct OperationKey {
pub chain_id: ChainId,
pub height: BlockHeight,
pub index: usize,
}
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/operations_schema.graphql",
query_path = "gql/operations_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct Operations;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/operations_schema.graphql",
query_path = "gql/operations_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct OperationsCount;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/operations_schema.graphql",
query_path = "gql/operations_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct LastOperation;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/operations_schema.graphql",
query_path = "gql/operations_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct GetOperation;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/graphql-client/src/indexer.rs | linera-indexer/graphql-client/src/indexer.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use graphql_client::GraphQLQuery;
use linera_base::{crypto::CryptoHash, data_types::BlockHeight, identifiers::ChainId};
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/indexer_schema.graphql",
query_path = "gql/indexer_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct Plugins;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/indexer_schema.graphql",
query_path = "gql/indexer_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct State;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/graphql-client/tests/test.rs | linera-indexer/graphql-client/tests/test.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::io::Read;
use linera_base::command::resolve_binary;
use tempfile::tempdir;
use tokio::process::Command;
#[test_log::test(tokio::test)]
async fn test_check_indexer_schema() {
let tmp_dir = tempdir().unwrap();
let path = resolve_binary("linera-indexer", "linera-indexer-example")
.await
.unwrap();
let mut command = Command::new(path);
let output = command
.current_dir(tmp_dir.path())
.arg("schema")
.output()
.await
.unwrap();
let indexer_schema = String::from_utf8(output.stdout).unwrap();
let mut file_base = std::fs::File::open("gql/indexer_schema.graphql").unwrap();
let mut graphql_schema = String::new();
file_base.read_to_string(&mut graphql_schema).unwrap();
assert_eq!(
graphql_schema, indexer_schema,
"\nGraphQL indexer schema has changed -> \
regenerate schema following steps in linera-indexer-graphql-client/README.md\n"
)
}
#[test_log::test(tokio::test)]
async fn test_check_indexer_operations_schema() {
let tmp_dir = tempdir().unwrap();
let path = resolve_binary("linera-indexer", "linera-indexer-example")
.await
.unwrap();
let mut command = Command::new(path);
let output = command
.current_dir(tmp_dir.path())
.args(["schema", "operations"])
.output()
.await
.unwrap();
let service_schema = String::from_utf8(output.stdout).unwrap();
let mut file_base = std::fs::File::open("gql/operations_schema.graphql").unwrap();
let mut graphql_schema = String::new();
file_base.read_to_string(&mut graphql_schema).unwrap();
assert_eq!(
graphql_schema, service_schema,
"\nGraphQL indexer operations schema has changed -> \
regenerate schema following steps in linera-indexer-graphql-client/README.md\n"
)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/example/src/main.rs | linera-indexer/example/src/main.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! An example of an indexer with the operations plugin.
use linera_indexer::{common::IndexerError, plugin::Plugin, rocks_db::RocksDbRunner};
use linera_indexer_plugins::operations::OperationsPlugin;
#[tokio::main]
async fn main() -> Result<(), IndexerError> {
let env_filter = tracing_subscriber::EnvFilter::builder()
.with_default_directive(tracing_subscriber::filter::LevelFilter::INFO.into())
.from_env_lossy();
tracing_subscriber::fmt()
.with_writer(std::io::stderr)
.with_env_filter(env_filter)
.init();
let mut runner = RocksDbRunner::load().await?;
runner
.add_plugin(OperationsPlugin::load(runner.database.clone()).await?)
.await?;
runner.run().await
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/example/src/grpc_main.rs | linera-indexer/example/src/grpc_main.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! An example of a gRPC indexer server with multiple database backend support.
use clap::Parser;
use linera_indexer::{
common::IndexerError,
db::{postgres::PostgresDatabase, sqlite::SqliteDatabase},
grpc::IndexerGrpcServer,
};
#[derive(Parser, Debug)]
#[command(version = linera_version::VersionInfo::default_clap_str())]
#[command(group = clap::ArgGroup::new("database").required(true).multiple(false))]
struct Args {
/// The port of the gRPC indexer server
#[arg(long, default_value = "8081")]
port: u16,
/// Use in-memory SQLite database (data is lost on restart)
#[arg(long, group = "database")]
memory: bool,
/// Use SQLite database with file persistence
#[arg(long, group = "database", value_name = "PATH")]
sqlite: Option<String>,
/// Use PostgreSQL database
#[arg(long, group = "database", value_name = "URL")]
postgres: Option<String>,
}
#[tokio::main]
async fn main() -> Result<(), IndexerError> {
let env_filter = tracing_subscriber::EnvFilter::builder()
.with_default_directive(tracing_subscriber::filter::LevelFilter::INFO.into())
.from_env_lossy();
tracing_subscriber::fmt()
.with_writer(std::io::stderr)
.with_env_filter(env_filter)
.init();
let args = Args::parse();
// Start server with the selected database backend
if args.memory {
tracing::info!("Starting indexer with in-memory SQLite database");
let database = SqliteDatabase::new("sqlite::memory:").await?;
let grpc_server = IndexerGrpcServer::new(database);
grpc_server
.serve(args.port)
.await
.map_err(IndexerError::Other)?;
} else if let Some(path) = args.sqlite {
tracing::info!(?path, "Starting indexer with SQLite database");
let database = SqliteDatabase::new(&path).await?;
let grpc_server = IndexerGrpcServer::new(database);
grpc_server
.serve(args.port)
.await
.map_err(IndexerError::Other)?;
} else if let Some(url) = args.postgres {
tracing::info!(?url, "Starting indexer with PostgreSQL database");
let database = PostgresDatabase::new(&url).await?;
let grpc_server = IndexerGrpcServer::new(database);
grpc_server
.serve(args.port)
.await
.map_err(IndexerError::Other)?;
} else {
return Err(IndexerError::Other(
"No database backend specified. Use --memory, --sqlite, or --postgres".into(),
));
}
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/example/tests/test.rs | linera-indexer/example/tests/test.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg(any(
feature = "storage-service",
feature = "dynamodb",
feature = "scylladb"
))]
use std::{str::FromStr, sync::LazyLock, time::Duration};
use linera_base::{
command::resolve_binary,
data_types::Amount,
identifiers::{Account, AccountOwner, ChainId},
};
use linera_indexer_graphql_client::{
indexer::{plugins, state, Plugins, State},
operations::{get_operation, GetOperation, OperationKey},
};
use linera_service::cli_wrappers::{
local_net::{Database, LocalNetConfig, PathProvider, ProcessInbox},
LineraNet, LineraNetConfig, Network,
};
use linera_service_graphql_client::{block, request, transfer, Block, Transfer};
use test_case::test_case;
use tokio::{
process::{Child, Command},
sync::Mutex,
};
use tracing::{info, warn};
/// A static lock to prevent integration tests from running in parallel.
static INTEGRATION_TEST_GUARD: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));
fn reqwest_client() -> reqwest::Client {
reqwest::Client::builder()
.timeout(Duration::from_secs(30))
.build()
.unwrap()
}
async fn run_indexer(path_provider: &PathProvider) -> anyhow::Result<Child> {
let port = 8081;
let path = resolve_binary("linera-indexer", "linera-indexer-example").await?;
let mut command = Command::new(path);
command
.current_dir(path_provider.path())
.kill_on_drop(true)
.args(["run-graph-ql"]);
let child = command.spawn()?;
let client = reqwest_client();
for i in 0..10 {
linera_base::time::timer::sleep(Duration::from_secs(i)).await;
let request = client
.get(format!("http://localhost:{}/", port))
.send()
.await;
if request.is_ok() {
info!("Indexer has started");
return Ok(child);
} else {
warn!("Waiting for indexer to start");
}
}
panic!("Failed to start indexer");
}
fn indexer_running(child: &mut Child) {
if let Some(status) = child.try_wait().unwrap() {
assert!(status.success());
}
}
async fn transfer(
client: &reqwest::Client,
from: ChainId,
to: Account,
amount: &str,
) -> anyhow::Result<()> {
let variables = transfer::Variables {
chain_id: from,
owner: AccountOwner::CHAIN,
recipient: transfer::Account {
chain_id: to.chain_id,
owner: to.owner,
},
amount: Amount::from_str(amount)?,
};
request::<Transfer, _>(client, "http://localhost:8080", variables).await?;
Ok(())
}
#[cfg(debug_assertions)]
const TRANSFER_DELAY_MILLIS: u64 = 1000;
#[cfg(not(debug_assertions))]
const TRANSFER_DELAY_MILLIS: u64 = 100;
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc); "storage_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[test_log::test(tokio::test)]
async fn test_end_to_end_operations_indexer(config: impl LineraNetConfig) -> anyhow::Result<()> {
// launching network, service and indexer
let _guard = INTEGRATION_TEST_GUARD.lock().await;
let (mut net, client) = config.instantiate().await?;
let mut node_service = client
.run_node_service(None, ProcessInbox::Automatic)
.await?;
let mut indexer = run_indexer(&client.path_provider).await?;
// check operations plugin
let req_client = reqwest_client();
let plugins = request::<Plugins, _>(&req_client, "http://localhost:8081", plugins::Variables)
.await?
.plugins;
assert_eq!(
plugins,
vec!["operations"],
"Indexer plugin 'operations' not loaded",
);
// making a few transfers
let node_chains = {
let wallet = client.load_wallet()?;
wallet.chain_ids()
};
let chain0 = node_chains[0];
let chain1 = Account::chain(node_chains[1]);
for _ in 0..10 {
transfer(&req_client, chain0, chain1, "0.1").await?;
linera_base::time::timer::sleep(Duration::from_millis(TRANSFER_DELAY_MILLIS)).await;
}
linera_base::time::timer::sleep(Duration::from_secs(2)).await;
// checking indexer state
let variables = block::Variables {
hash: None,
chain_id: chain0,
};
let last_block = request::<Block, _>(&req_client, "http://localhost:8080", variables)
.await?
.block
.unwrap_or_else(|| panic!("no block found"));
let last_hash = last_block.clone().hash;
let indexer_state = request::<State, _>(&req_client, "http://localhost:8081", state::Variables)
.await?
.state;
let indexer_hash =
indexer_state
.iter()
.find_map(|arg| if arg.chain == chain0 { arg.block } else { None });
assert_eq!(
Some(last_hash),
indexer_hash,
"Different states between service and indexer"
);
// checking indexer operation (updated for new transaction structure)
// Note: The transactions field is not exposed via GraphQL due to technical limitations,
// but we can still verify that the indexer correctly tracks operations
let variables = get_operation::Variables {
key: get_operation::OperationKeyKind::Last(chain0),
};
let indexer_operation =
request::<GetOperation, _>(&req_client, "http://localhost:8081/operations", variables)
.await?
.operation;
match indexer_operation {
Some(get_operation::GetOperationOperation { key, block, .. }) => {
assert_eq!(
(key, block),
(
OperationKey {
chain_id: chain0,
height: last_block.block.header.height,
index: 0
},
last_hash,
),
"service and indexer operations are different"
)
}
None => panic!("no operation found"),
}
indexer_running(&mut indexer);
node_service.ensure_is_running()?;
net.terminate().await?;
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/build.rs | linera-indexer/lib/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_prost_build::compile_protos("proto/indexer.proto")?;
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/scylla_db.rs | linera-indexer/lib/src/scylla_db.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_views::{
lru_prefix_cache::StorageCacheConfig,
scylla_db::{ScyllaDbDatabase, ScyllaDbStoreConfig, ScyllaDbStoreInternalConfig},
store::KeyValueDatabase,
};
use crate::{
common::IndexerError,
runner::{IndexerConfig, Runner},
};
#[derive(clap::Parser, Clone, Debug)]
#[command(version = linera_version::VersionInfo::default_clap_str())]
pub struct ScyllaDbConfig {
/// ScyllaDB address
#[arg(long, default_value = "localhost:9042")]
pub uri: String,
#[arg(long, default_value = "linera")]
pub table: String,
/// The maximal number of simultaneous queries to the database
#[arg(long)]
max_concurrent_queries: Option<usize>,
/// The maximal number of simultaneous stream queries to the database
#[arg(long, default_value = "10")]
pub max_stream_queries: usize,
/// The maximal memory used in the storage cache in bytes.
#[arg(long, default_value = "10000000")]
pub max_cache_size: usize,
/// The maximal size of a value entry in the storage cache in bytes.
#[arg(long, default_value = "1000000")]
pub max_value_entry_size: usize,
/// The maximal size of a find-keys entry in the storage cache in bytes.
#[arg(long, default_value = "1000000")]
pub max_find_keys_entry_size: usize,
/// The maximal size of a find-key-values entry in the storage cache in bytes.
#[arg(long, default_value = "1000000")]
pub max_find_key_values_entry_size: usize,
/// The maximal number of entries in the storage cache.
#[arg(long, default_value = "1000")]
pub max_cache_entries: usize,
/// The maximal memory used in the value cache in bytes.
#[arg(long, default_value = "10000000")]
pub max_cache_value_size: usize,
/// The maximal memory used in the find_keys_by_prefix cache in bytes.
#[arg(long, default_value = "10000000")]
pub max_cache_find_keys_size: usize,
/// The maximal memory used in the find_key_values_by_prefix cache in bytes.
#[arg(long, default_value = "10000000")]
pub max_cache_find_key_values_size: usize,
/// The replication factor for the keyspace
#[arg(long, default_value = "1")]
pub replication_factor: u32,
}
pub type ScyllaDbRunner = Runner<ScyllaDbDatabase, ScyllaDbConfig>;
impl ScyllaDbRunner {
pub async fn load() -> Result<Self, IndexerError> {
let config = <IndexerConfig<ScyllaDbConfig> as clap::Parser>::parse();
let storage_cache_config = StorageCacheConfig {
max_cache_size: config.client.max_cache_size,
max_value_entry_size: config.client.max_value_entry_size,
max_find_keys_entry_size: config.client.max_find_keys_entry_size,
max_find_key_values_entry_size: config.client.max_find_key_values_entry_size,
max_cache_entries: config.client.max_cache_entries,
max_cache_value_size: config.client.max_cache_value_size,
max_cache_find_keys_size: config.client.max_cache_find_keys_size,
max_cache_find_key_values_size: config.client.max_cache_find_key_values_size,
};
let inner_config = ScyllaDbStoreInternalConfig {
uri: config.client.uri.clone(),
max_stream_queries: config.client.max_stream_queries,
max_concurrent_queries: config.client.max_concurrent_queries,
replication_factor: config.client.replication_factor,
};
let store_config = ScyllaDbStoreConfig {
inner_config,
storage_cache_config,
};
let namespace = config.client.table.clone();
let database = ScyllaDbDatabase::connect(&store_config, &namespace).await?;
Self::new(config, database).await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/rocks_db.rs | linera-indexer/lib/src/rocks_db.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::path::PathBuf;
use clap::Parser as _;
use linera_views::{
lru_prefix_cache::StorageCacheConfig,
rocks_db::{
PathWithGuard, RocksDbDatabase, RocksDbSpawnMode, RocksDbStoreConfig,
RocksDbStoreInternalConfig,
},
store::KeyValueDatabase as _,
};
use crate::{
common::IndexerError,
runner::{IndexerConfig, Runner},
};
#[derive(clap::Parser, Clone, Debug)]
#[command(version = linera_version::VersionInfo::default_clap_str())]
pub struct RocksDbConfig {
/// RocksDB storage path
#[arg(long, default_value = "./indexer.db")]
pub storage: PathBuf,
#[arg(long, default_value = "linera")]
pub namespace: String,
/// The maximal number of simultaneous queries to the database
#[arg(long)]
max_concurrent_queries: Option<usize>,
/// The maximal number of simultaneous stream queries to the database
#[arg(long, default_value = "10")]
pub max_stream_queries: usize,
/// The maximal memory used in the storage cache in bytes.
#[arg(long, default_value = "10000000")]
pub max_cache_size: usize,
/// The maximal size of a value entry in the storage cache in bytes.
#[arg(long, default_value = "1000000")]
pub max_value_entry_size: usize,
/// The maximal size of a find-keys entry in the storage cache in bytes.
#[arg(long, default_value = "1000000")]
pub max_find_keys_entry_size: usize,
/// The maximal size of a find-key-values entry in the storage cache in bytes.
#[arg(long, default_value = "1000000")]
pub max_find_key_values_entry_size: usize,
/// The maximal number of entries in the storage cache.
#[arg(long, default_value = "1000")]
pub max_cache_entries: usize,
/// The maximal memory used in the value cache in bytes.
#[arg(long, default_value = "10000000")]
pub max_cache_value_size: usize,
/// The maximal memory used in the find_keys_by_prefix cache in bytes.
#[arg(long, default_value = "10000000")]
pub max_cache_find_keys_size: usize,
/// The maximal memory used in the find_key_values_by_prefix cache in bytes.
#[arg(long, default_value = "10000000")]
pub max_cache_find_key_values_size: usize,
}
pub type RocksDbRunner = Runner<RocksDbDatabase, RocksDbConfig>;
impl RocksDbRunner {
pub async fn load() -> Result<Self, IndexerError> {
let config = IndexerConfig::<RocksDbConfig>::parse();
let storage_cache_config = StorageCacheConfig {
max_cache_size: config.client.max_cache_size,
max_value_entry_size: config.client.max_value_entry_size,
max_find_keys_entry_size: config.client.max_find_keys_entry_size,
max_find_key_values_entry_size: config.client.max_find_key_values_entry_size,
max_cache_entries: config.client.max_cache_entries,
max_cache_value_size: config.client.max_cache_value_size,
max_cache_find_keys_size: config.client.max_cache_find_keys_size,
max_cache_find_key_values_size: config.client.max_cache_find_key_values_size,
};
let path_buf = config.client.storage.as_path().to_path_buf();
let path_with_guard = PathWithGuard::new(path_buf);
// The tests are run in single threaded mode, therefore we need
// to use the safe default value of SpawnBlocking.
let spawn_mode = RocksDbSpawnMode::SpawnBlocking;
let inner_config = RocksDbStoreInternalConfig {
spawn_mode,
path_with_guard,
max_stream_queries: config.client.max_stream_queries,
};
let store_config = RocksDbStoreConfig {
inner_config,
storage_cache_config,
};
let namespace = config.client.namespace.clone();
let database = RocksDbDatabase::maybe_create_and_connect(&store_config, &namespace).await?;
Self::new(config, database).await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/lib.rs | linera-indexer/lib/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines the linera-indexer library including:
//! - the indexer connection to the node service (service.rs)
//! - the block processing (indexer.rs)
//! - the generic plugin trait (plugin.rs)
//! - the runner struct (runner.rs)
//! - the gRPC server implementation (grpc_server.rs)
pub mod common;
pub mod indexer;
pub mod plugin;
pub mod runner;
pub mod service;
#[cfg(feature = "rocksdb")]
pub mod rocks_db;
#[cfg(feature = "scylladb")]
pub mod scylla_db;
pub mod db;
pub mod grpc;
// Generated protobuf types
pub mod indexer_api {
tonic::include_proto!("indexer.linera_indexer");
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/runner.rs | linera-indexer/lib/src/runner.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines the trait for indexer runners.
use linera_base::identifiers::ChainId;
use linera_views::store::{KeyValueDatabase, KeyValueStore};
use tokio::select;
use tracing::{info, warn};
use crate::{common::IndexerError, indexer::Indexer, plugin::Plugin, service::Listener};
#[derive(clap::Parser, Debug, Clone)]
#[command(version = linera_version::VersionInfo::default_clap_str())]
pub enum IndexerCommand {
Schema {
plugin: Option<String>,
},
/// Legacy mode: Run GraphQL server with WebSocket client (deprecated)
#[deprecated(note = "Use RunGrpc instead")]
RunGraphQL {
#[command(flatten)]
listener: Listener,
/// The port of the indexer server
#[arg(long, default_value = "8081")]
port: u16,
/// Chains to index (default: the ones on the service wallet)
chains: Vec<ChainId>,
},
}
#[derive(clap::Parser, Debug, Clone)]
pub struct IndexerConfig<Config: clap::Args> {
#[command(flatten)]
pub client: Config,
#[command(subcommand)]
pub command: IndexerCommand,
}
pub struct Runner<D, Config: clap::Args>
where
D: KeyValueDatabase,
{
pub database: D,
pub config: IndexerConfig<Config>,
pub indexer: Indexer<D>,
}
impl<D, Config> Runner<D, Config>
where
Self: Send,
Config: Clone + std::fmt::Debug + Send + Sync + clap::Parser + clap::Args,
D: KeyValueDatabase + Clone + Send + Sync + 'static,
D::Store: KeyValueStore + Clone + Send + Sync + 'static,
D::Error: Send + Sync + std::error::Error + 'static,
{
/// Loads a new runner
pub async fn new(config: IndexerConfig<Config>, database: D) -> Result<Self, IndexerError>
where
Self: Sized,
{
let indexer = Indexer::load(database.clone()).await?;
Ok(Self {
database,
config,
indexer,
})
}
/// Registers a new plugin to the indexer
pub async fn add_plugin(
&mut self,
plugin: impl Plugin<D> + 'static,
) -> Result<(), IndexerError> {
self.indexer.add_plugin(plugin).await
}
/// Runs a server from the indexer and the plugins
async fn server(port: u16, indexer: &Indexer<D>) -> Result<(), IndexerError> {
let mut app = indexer.route(None);
for plugin in indexer.plugins.values() {
app = plugin.route(app);
}
axum::serve(
tokio::net::TcpListener::bind(format!("127.0.0.1:{}", port)).await?,
app,
)
.await?;
Ok(())
}
/// Runs a server and the chains listener
pub async fn run(&mut self) -> Result<(), IndexerError> {
let config = self.config.clone();
match config.clone().command {
IndexerCommand::Schema { plugin } => {
println!("{}", self.indexer.sdl(plugin)?);
Ok(())
}
#[allow(deprecated)]
IndexerCommand::RunGraphQL {
chains,
listener,
port,
} => {
warn!("Running in legacy GraphQL mode. Consider migrating to RunGrpc.");
info!("config: {:?}", config);
let chains = if chains.is_empty() {
listener.service.get_chains().await?
} else {
chains
};
let initialize_chains = chains
.iter()
.map(|chain_id| self.indexer.init(&listener, *chain_id));
futures::future::try_join_all(initialize_chains).await?;
let connections = {
chains
.into_iter()
.map(|chain_id| listener.listen(&self.indexer, chain_id))
};
select! {
result = Self::server(port, &self.indexer) => {
result.map(|()| warn!("GraphQL server stopped"))
}
(result, _, _) = futures::future::select_all(connections.map(Box::pin)) => {
result.map(|chain_id| {
warn!("Connection to {:?} notifications websocket stopped", chain_id)
})
}
}
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/service.rs | linera-indexer/lib/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines the service client for the indexer.
use async_tungstenite::{
tokio::connect_async,
tungstenite::{client::IntoClientRequest, http::HeaderValue},
};
use futures::{
task::{FutureObj, Spawn, SpawnError},
StreamExt,
};
use graphql_client::reqwest::post_graphql;
use graphql_ws_client::{graphql::StreamingOperation, GraphQLClientClientBuilder};
use linera_base::{
crypto::CryptoHash, data_types::BlockHeight, identifiers::ChainId, time::Duration,
};
use linera_chain::types::ConfirmedBlock;
use linera_core::worker::Reason;
use linera_service_graphql_client::{block, chains, notifications, Block, Chains, Notifications};
use linera_views::store::{KeyValueDatabase, KeyValueStore};
use tokio::runtime::Handle;
use tracing::error;
use crate::{common::IndexerError, indexer::Indexer};
struct TokioSpawner(Handle);
impl Spawn for TokioSpawner {
fn spawn_obj(&self, obj: FutureObj<'static, ()>) -> Result<(), SpawnError> {
self.0.spawn(obj);
Ok(())
}
}
pub enum Protocol {
Http,
WebSocket,
}
fn reqwest_client() -> reqwest::Client {
reqwest::Client::builder()
.timeout(Duration::from_secs(30))
.build()
.unwrap()
}
#[derive(clap::Parser, Debug, Clone)]
#[command(version = linera_version::VersionInfo::default_clap_str())]
pub struct Service {
/// The port of the node service
#[arg(long, default_value = "8080")]
pub service_port: u16,
/// The address of the node service
#[arg(long, default_value = "localhost")]
pub service_address: String,
/// Use SSL/TLS
#[arg(long)]
pub tls: bool,
}
impl Service {
pub fn with_protocol(&self, protocol: Protocol) -> String {
let tls = if self.tls { "s" } else { "" };
let (protocol, suffix) = match protocol {
Protocol::Http => ("http", ""),
Protocol::WebSocket => ("ws", "/ws"),
};
format!(
"{}{}://{}:{}{}",
protocol, tls, self.service_address, self.service_port, suffix
)
}
pub fn websocket(&self) -> String {
self.with_protocol(Protocol::WebSocket)
}
pub fn http(&self) -> String {
self.with_protocol(Protocol::Http)
}
/// Gets one hashed value from the node service
pub async fn get_value(
&self,
chain_id: ChainId,
hash: Option<CryptoHash>,
) -> Result<ConfirmedBlock, IndexerError> {
let client = reqwest_client();
let variables = block::Variables { hash, chain_id };
let response = post_graphql::<Block, _>(&client, &self.http(), variables).await?;
response
.data
.ok_or_else(|| IndexerError::NullData(response.errors))?
.block
.ok_or_else(|| IndexerError::NotFound(hash))?
.try_into()
.map_err(IndexerError::ConversionError)
}
/// Gets chains
pub async fn get_chains(&self) -> Result<Vec<ChainId>, IndexerError> {
let client = reqwest_client();
let variables = chains::Variables;
let result = post_graphql::<Chains, _>(&client, &self.http(), variables).await?;
Ok(result
.data
.ok_or(IndexerError::NullData(result.errors))?
.chains
.list)
}
}
#[derive(clap::Parser, Debug, Clone)]
pub struct Listener {
#[command(flatten)]
pub service: Service,
/// The height at which the indexer should start
#[arg(long = "start", default_value = "0")]
pub start: BlockHeight,
}
impl Listener {
/// Connects to the WebSocket of the service node for a particular chain
pub async fn listen<D>(
&self,
indexer: &Indexer<D>,
chain_id: ChainId,
) -> Result<ChainId, IndexerError>
where
D: KeyValueDatabase + Clone + Send + Sync + 'static,
D::Store: KeyValueStore + Clone + Send + Sync + 'static,
D::Error: Send + Sync + std::error::Error + 'static,
{
let mut request = self.service.websocket().into_client_request()?;
request.headers_mut().insert(
"Sec-WebSocket-Protocol",
HeaderValue::from_str("graphql-transport-ws")?,
);
let (connection, _) = connect_async(request).await?;
let (sink, stream) = connection.split();
let mut client = GraphQLClientClientBuilder::new()
.build(stream, sink, TokioSpawner(Handle::current()))
.await?;
let operation: StreamingOperation<Notifications> =
StreamingOperation::new(notifications::Variables { chain_id });
let mut stream = client.streaming_operation(operation).await?;
while let Some(item) = stream.next().await {
match item {
Ok(response) => {
if let Some(data) = response.data {
if let Reason::NewBlock { hash, .. } = data.notifications.reason {
if let Ok(value) = self.service.get_value(chain_id, Some(hash)).await {
indexer.process(self, &value).await?;
}
}
} else {
error!("null data from GraphQL WebSocket")
}
}
Err(error) => error!("error in WebSocket stream: {}", error),
}
}
Ok(chain_id)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/common.rs | linera-indexer/lib/src/common.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::net::AddrParseError;
use async_graphql::http::GraphiQLSource;
use axum::{
http::Uri,
response::{self, IntoResponse},
};
use linera_base::crypto::CryptoHash;
use reqwest::header::InvalidHeaderValue;
use thiserror::Error;
use crate::db::sqlite::SqliteError;
#[derive(Error, Debug)]
pub enum IndexerError {
#[error(transparent)]
ViewError(#[from] linera_views::ViewError),
#[error(transparent)]
ReqwestError(#[from] reqwest::Error),
#[error(transparent)]
GraphQLError(#[from] graphql_ws_client::Error),
#[error(transparent)]
TungsteniteError(#[from] Box<async_tungstenite::tungstenite::Error>),
#[error(transparent)]
InvalidHeader(#[from] InvalidHeaderValue),
#[error(transparent)]
IoError(#[from] std::io::Error),
#[error(transparent)]
ParserError(#[from] AddrParseError),
#[error("Null GraphQL data: {0:?}")]
NullData(Option<Vec<graphql_client::Error>>),
#[error("Block not found: {0:?}")]
NotFound(Option<CryptoHash>),
#[error("Unknown plugin: {0}")]
UnknownPlugin(String),
#[error(transparent)]
ConversionError(linera_service_graphql_client::ConversionError),
#[error("Plugin is already registered")]
PluginAlreadyRegistered,
#[error("Open exclusive error")]
OpenExclusiveError,
#[error("Other error: {0}")]
Other(#[from] Box<dyn std::error::Error + Send + Sync>),
#[cfg(feature = "rocksdb")]
#[error(transparent)]
RocksDbError(#[from] linera_views::rocks_db::RocksDbStoreError),
#[cfg(feature = "scylladb")]
#[error(transparent)]
ScyllaDbError(#[from] Box<linera_views::scylla_db::ScyllaDbStoreError>),
}
impl From<SqliteError> for IndexerError {
fn from(error: SqliteError) -> Self {
Self::Other(Box::new(error).into())
}
}
impl From<crate::db::postgres::PostgresError> for IndexerError {
fn from(error: crate::db::postgres::PostgresError) -> Self {
Self::Other(Box::new(error).into())
}
}
impl From<async_tungstenite::tungstenite::Error> for IndexerError {
fn from(error: async_tungstenite::tungstenite::Error) -> Self {
Box::new(error).into()
}
}
#[cfg(feature = "scylladb")]
impl From<linera_views::scylla_db::ScyllaDbStoreError> for IndexerError {
fn from(error: linera_views::scylla_db::ScyllaDbStoreError) -> Self {
Box::new(error).into()
}
}
pub async fn graphiql(uri: Uri) -> impl IntoResponse {
response::Html(GraphiQLSource::build().endpoint(uri.path()).finish())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/mock_database.rs | linera-indexer/lib/src/mock_database.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Mock database implementations for testing.
use std::{collections::HashMap, sync::RwLock};
use async_trait::async_trait;
use linera_base::{
crypto::CryptoHash,
data_types::BlockHeight,
identifiers::{BlobId, ChainId},
};
use linera_chain::data_types::IncomingBundle;
use crate::{
database_trait::{DatabaseTransaction, IndexerDatabase},
sqlite_db::{IncomingBundleInfo, PostedMessageInfo, SqliteError},
};
/// Mock database that fails on transaction operations for testing error paths
pub struct MockFailingDatabase;
impl MockFailingDatabase {
pub fn new() -> Self {
Self
}
}
impl Default for MockFailingDatabase {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl IndexerDatabase for MockFailingDatabase {
async fn begin_transaction(&self) -> Result<DatabaseTransaction<'_>, SqliteError> {
// Always fail transaction creation for testing error paths
Err(SqliteError::Serialization(
"Mock: Cannot create real transaction".to_string(),
))
}
async fn insert_block_tx(
&self,
_tx: &mut DatabaseTransaction<'_>,
_hash: &CryptoHash,
_chain_id: &ChainId,
_height: BlockHeight,
_timestamp: Timestamp,
_data: &[u8],
) -> Result<(), SqliteError> {
Ok(())
}
async fn commit_transaction(&self, _tx: DatabaseTransaction<'_>) -> Result<(), SqliteError> {
Ok(())
}
async fn get_block(&self, _hash: &CryptoHash) -> Result<Vec<u8>, SqliteError> {
Err(SqliteError::Serialization(
"Mock: get_block not implemented".to_string(),
))
}
async fn get_blob(&self, _blob_id: &BlobId) -> Result<Vec<u8>, SqliteError> {
Err(SqliteError::Serialization(
"Mock: get_blob not implemented".to_string(),
))
}
async fn get_latest_block_for_chain(
&self,
_chain_id: &ChainId,
) -> Result<Option<(CryptoHash, BlockHeight, Vec<u8>)>, SqliteError> {
Err(SqliteError::Serialization(
"Mock: get_latest_block_for_chain not implemented".to_string(),
))
}
async fn get_blocks_for_chain_range(
&self,
_chain_id: &ChainId,
_start_height: BlockHeight,
_end_height: BlockHeight,
) -> Result<Vec<(CryptoHash, BlockHeight, Vec<u8>)>, SqliteError> {
Err(SqliteError::Serialization(
"Mock: get_blocks_for_chain_range not implemented".to_string(),
))
}
async fn blob_exists(&self, _blob_id: &BlobId) -> Result<bool, SqliteError> {
Ok(false)
}
async fn block_exists(&self, _hash: &CryptoHash) -> Result<bool, SqliteError> {
Ok(false)
}
async fn get_incoming_bundles_for_block(
&self,
_block_hash: &CryptoHash,
) -> Result<Vec<(i64, IncomingBundleInfo)>, SqliteError> {
Ok(vec![])
}
async fn get_posted_messages_for_bundle(
&self,
_bundle_id: i64,
) -> Result<Vec<PostedMessageInfo>, SqliteError> {
Ok(vec![])
}
async fn get_bundles_from_origin_chain(
&self,
_origin_chain_id: &ChainId,
) -> Result<Vec<(CryptoHash, i64, IncomingBundleInfo)>, SqliteError> {
Ok(vec![])
}
}
type Blocks = HashMap<CryptoHash, (ChainId, BlockHeight, Vec<u8>)>;
/// A more sophisticated mock that actually works for successful paths
/// and stores data in internal HashMaps for testing verification
pub struct MockSuccessDatabase {
/// Storage for blobs: BlobId -> blob data
blobs: RwLock<HashMap<BlobId, Vec<u8>>>,
/// Storage for blocks: CryptoHash -> (ChainId, BlockHeight, block data)
blocks: RwLock<Blocks>,
}
impl Default for MockSuccessDatabase {
fn default() -> Self {
Self::new()
}
}
impl MockSuccessDatabase {
pub fn new() -> Self {
Self {
blobs: RwLock::new(HashMap::new()),
blocks: RwLock::new(HashMap::new()),
}
}
/// Get the count of stored blobs
pub fn blob_count(&self) -> usize {
self.blobs.read().unwrap().len()
}
/// Get the count of stored blocks
pub fn block_count(&self) -> usize {
self.blocks.read().unwrap().len()
}
}
#[async_trait]
impl IndexerDatabase for MockSuccessDatabase {
/// Override the high-level method to succeed and store data
async fn store_block_with_blobs(
&self,
block_hash: &CryptoHash,
chain_id: &ChainId,
height: BlockHeight,
block_data: &[u8],
blobs: &[(BlobId, Vec<u8>)],
) -> Result<(), SqliteError> {
// Store all blobs
{
let mut blob_storage = self.blobs.write().unwrap();
for (blob_id, blob_data) in blobs {
blob_storage.insert(*blob_id, blob_data.clone());
}
}
// Store the block
{
let mut block_storage = self.blocks.write().unwrap();
block_storage.insert(*block_hash, (*chain_id, height, block_data.to_vec()));
}
Ok(())
}
async fn begin_transaction(&self) -> Result<DatabaseTransaction<'_>, SqliteError> {
// We can't create a real transaction, but for successful testing we can just
// return an error that indicates we can't create a mock transaction
Err(SqliteError::Serialization(
"Mock: Cannot create real transaction".to_string(),
))
}
async fn insert_block_tx(
&self,
_tx: &mut DatabaseTransaction<'_>,
_hash: &CryptoHash,
_chain_id: &ChainId,
_height: BlockHeight,
_timestamp: Timestamp,
_data: &[u8],
) -> Result<(), SqliteError> {
Ok(())
}
async fn commit_transaction(&self, _tx: DatabaseTransaction<'_>) -> Result<(), SqliteError> {
Ok(())
}
async fn get_block(&self, _hash: &CryptoHash) -> Result<Vec<u8>, SqliteError> {
Ok(vec![])
}
async fn get_blob(&self, _blob_id: &BlobId) -> Result<Vec<u8>, SqliteError> {
Ok(vec![])
}
async fn get_latest_block_for_chain(
&self,
_chain_id: &ChainId,
) -> Result<Option<(CryptoHash, BlockHeight, Vec<u8>)>, SqliteError> {
Ok(None)
}
async fn get_blocks_for_chain_range(
&self,
_chain_id: &ChainId,
_start_height: BlockHeight,
_end_height: BlockHeight,
) -> Result<Vec<(CryptoHash, BlockHeight, Vec<u8>)>, SqliteError> {
Ok(vec![])
}
async fn blob_exists(&self, _blob_id: &BlobId) -> Result<bool, SqliteError> {
Ok(false)
}
async fn block_exists(&self, _hash: &CryptoHash) -> Result<bool, SqliteError> {
Ok(false)
}
async fn get_incoming_bundles_for_block(
&self,
_block_hash: &CryptoHash,
) -> Result<Vec<(i64, IncomingBundleInfo)>, SqliteError> {
Ok(vec![])
}
async fn get_posted_messages_for_bundle(
&self,
_bundle_id: i64,
) -> Result<Vec<PostedMessageInfo>, SqliteError> {
Ok(vec![])
}
async fn get_bundles_from_origin_chain(
&self,
_origin_chain_id: &ChainId,
) -> Result<Vec<(CryptoHash, i64, IncomingBundleInfo)>, SqliteError> {
Ok(vec![])
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/indexer.rs | linera-indexer/lib/src/indexer.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines the base component of linera-indexer.
use std::{collections::BTreeMap, sync::Arc};
use allocative::Allocative;
use async_graphql::{EmptyMutation, EmptySubscription, Schema, SimpleObject};
use async_graphql_axum::{GraphQLRequest, GraphQLResponse};
use axum::{extract::Extension, routing::get, Router};
use linera_base::{crypto::CryptoHash, data_types::BlockHeight, identifiers::ChainId};
use linera_chain::types::{CertificateValue as _, ConfirmedBlock};
use linera_views::{
context::{Context, ViewContext},
map_view::MapView,
register_view::RegisterView,
set_view::SetView,
store::{KeyValueDatabase, KeyValueStore},
views::{RootView, View},
};
use tokio::sync::Mutex;
use tower_http::cors::CorsLayer;
use tracing::info;
use crate::{
common::{graphiql, IndexerError},
plugin::Plugin,
service::Listener,
};
#[derive(RootView, Allocative)]
#[allocative(bound = "C")]
pub struct StateView<C> {
chains: MapView<C, ChainId, (CryptoHash, BlockHeight)>,
plugins: SetView<C, String>,
initiated: RegisterView<C, bool>,
}
#[derive(Clone)]
pub struct State<C>(Arc<Mutex<StateView<C>>>);
type StateSchema<S> = Schema<State<ViewContext<(), S>>, EmptyMutation, EmptySubscription>;
pub struct Indexer<D>
where
D: KeyValueDatabase,
{
pub state: State<ViewContext<(), D::Store>>,
pub plugins: BTreeMap<String, Box<dyn Plugin<D>>>,
}
pub enum IndexerCommand {
Run,
Schema,
}
#[derive(Debug)]
enum LatestBlock {
LatestHash(CryptoHash),
StartHeight(BlockHeight),
}
impl<D> Indexer<D>
where
D: KeyValueDatabase + Clone + Send + Sync + 'static,
D::Store: KeyValueStore + Clone + Send + Sync + 'static,
D::Error: Send + Sync + std::error::Error + 'static,
{
/// Loads the indexer using a database backend with an `indexer` prefix.
pub async fn load(database: D) -> Result<Self, IndexerError> {
let root_key = "indexer".as_bytes().to_vec();
let store = database
.open_exclusive(&root_key)
.map_err(|_e| IndexerError::OpenExclusiveError)?;
let context = ViewContext::create_root_context(store, ())
.await
.map_err(|e| IndexerError::ViewError(e.into()))?;
let state = State(Arc::new(Mutex::new(StateView::load(context).await?)));
Ok(Indexer {
state,
plugins: BTreeMap::new(),
})
}
/// Processes one block: registers the block in all the plugins and saves the state of
/// the indexer.
pub async fn process_value(
&self,
state: &mut StateView<ViewContext<(), D::Store>>,
value: &ConfirmedBlock,
) -> Result<(), IndexerError> {
for plugin in self.plugins.values() {
plugin.register(value).await?
}
let chain_id = value.chain_id();
let hash = value.hash();
let height = value.height();
info!("save {:?}: {:?} ({})", chain_id, hash, height);
state
.chains
.insert(&chain_id, (value.hash(), value.height()))?;
state.save().await.map_err(IndexerError::ViewError)
}
/// Processes a `NewBlock` notification: processes all blocks from the latest
/// registered to the one in the notification in the corresponding chain.
pub async fn process(
&self,
listener: &Listener,
value: &ConfirmedBlock,
) -> Result<(), IndexerError> {
let chain_id = value.chain_id();
let hash = value.hash();
let height = value.height();
let mut state = self.state.0.lock().await;
if height < listener.start {
return Ok(());
};
let latest_block = match state.chains.get(&chain_id).await? {
None => LatestBlock::StartHeight(listener.start),
Some((last_hash, last_height)) => {
if last_hash == hash || last_height >= height {
return Ok(());
}
LatestBlock::LatestHash(last_hash)
}
};
info!("process {:?}: {:?} ({})", chain_id, hash, height);
let mut values = Vec::new();
let mut value = value.clone();
loop {
let header = &value.block().header;
values.push(value.clone());
if let Some(hash) = header.previous_block_hash {
match latest_block {
LatestBlock::LatestHash(latest_hash) if latest_hash != hash => {
value = listener.service.get_value(chain_id, Some(hash)).await?;
continue;
}
LatestBlock::StartHeight(start) if header.height > start => {
value = listener.service.get_value(chain_id, Some(hash)).await?;
continue;
}
_ => break,
}
}
break;
}
while let Some(value) = values.pop() {
self.process_value(&mut state, &value).await?
}
Ok(())
}
pub async fn init(&self, listener: &Listener, chain_id: ChainId) -> Result<(), IndexerError> {
match listener.service.get_value(chain_id, None).await {
Ok(value) => self.process(listener, &value).await,
Err(IndexerError::NotFound(_)) => Ok(()),
Err(e) => Err(e),
}
}
/// Produces the GraphQL schema for the indexer or for a certain plugin
pub fn sdl(&self, plugin: Option<String>) -> Result<String, IndexerError> {
match plugin {
None => Ok(self.state.clone().schema().sdl()),
Some(plugin) => match self.plugins.get(&plugin) {
Some(plugin) => Ok(plugin.sdl()),
None => Err(IndexerError::UnknownPlugin(plugin.to_string())),
},
}
}
/// Registers a new plugin in the indexer
pub async fn add_plugin(
&mut self,
plugin: impl Plugin<D> + 'static,
) -> Result<(), IndexerError> {
let name = plugin.name();
self.plugins
.insert(name.clone(), Box::new(plugin))
.map_or_else(|| Ok(()), |_| Err(IndexerError::PluginAlreadyRegistered))?;
let mut state = self.state.0.lock().await;
Ok(state.plugins.insert(&name)?)
}
/// Handles queries made to the root of the indexer
async fn handler(
schema: Extension<StateSchema<D::Store>>,
req: GraphQLRequest,
) -> GraphQLResponse {
schema.execute(req.into_inner()).await.into()
}
/// Registers the handler to an Axum router
pub fn route(&self, app: Option<Router>) -> Router {
let app = app.unwrap_or_default();
app.route("/", get(graphiql).post(Self::handler))
.layer(Extension(self.state.clone().schema()))
.layer(CorsLayer::permissive())
}
}
#[derive(SimpleObject)]
pub struct HighestBlock {
chain: ChainId,
block: Option<CryptoHash>,
height: Option<BlockHeight>,
}
#[async_graphql::Object(cache_control(no_cache))]
impl<C> State<C>
where
C: Context + Clone + Send + Sync + 'static,
{
/// Gets the plugins registered in the indexer
pub async fn plugins(&self) -> Result<Vec<String>, IndexerError> {
let state = self.0.lock().await;
Ok(state.plugins.indices().await?)
}
/// Gets the latest blocks registered for each chain handled by the indexer
pub async fn state(&self) -> Result<Vec<HighestBlock>, IndexerError> {
let state = self.0.lock().await;
let chains = state.chains.indices().await?;
let mut result = Vec::new();
for chain in chains {
let block = state.chains.get(&chain).await?;
result.push(HighestBlock {
chain,
block: block.map(|b| b.0),
height: block.map(|b| b.1),
});
}
Ok(result)
}
}
impl<C> State<C>
where
C: Context + Clone + Send + Sync + 'static,
{
pub fn schema(self) -> Schema<Self, EmptyMutation, EmptySubscription> {
Schema::build(self, EmptyMutation, EmptySubscription).finish()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/plugin.rs | linera-indexer/lib/src/plugin.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines the trait for indexer plugins.
use std::sync::Arc;
use async_graphql::{EmptyMutation, EmptySubscription, ObjectType, Schema};
use axum::Router;
use linera_chain::types::ConfirmedBlock;
use linera_views::{
context::ViewContext,
store::{KeyValueDatabase, KeyValueStore},
views::View,
};
use tokio::sync::Mutex;
use crate::common::IndexerError;
#[async_trait::async_trait]
pub trait Plugin<D>: Send + Sync
where
D: KeyValueDatabase + Clone + Send + Sync + 'static,
D::Store: KeyValueStore + Clone + Send + Sync + 'static,
D::Error: From<bcs::Error> + Send + Sync + std::error::Error + 'static,
{
/// Gets the name of the plugin
fn name(&self) -> String
where
Self: Sized;
/// Loads the plugin from a database
async fn load(database: D) -> Result<Self, IndexerError>
where
Self: Sized;
/// Main function of the plugin: registers the information required for a hashed value
async fn register(&self, value: &ConfirmedBlock) -> Result<(), IndexerError>;
/// Produces the GraphQL schema for the plugin
fn sdl(&self) -> String;
/// Registers the plugin to an Axum router
fn route(&self, app: Router) -> Router;
}
async fn handler<Q: ObjectType + 'static>(
schema: axum::extract::Extension<Schema<Q, EmptyMutation, EmptySubscription>>,
req: async_graphql_axum::GraphQLRequest,
) -> async_graphql_axum::GraphQLResponse {
schema.execute(req.into_inner()).await.into()
}
fn schema<Q: ObjectType + 'static>(query: Q) -> Schema<Q, EmptyMutation, EmptySubscription> {
Schema::new(query, EmptyMutation, EmptySubscription)
}
pub fn sdl<Q: ObjectType + 'static>(query: Q) -> String {
schema(query).sdl()
}
pub fn route<Q: ObjectType + 'static>(name: &str, query: Q, app: axum::Router) -> axum::Router {
app.route(
&format!("/{}", name),
axum::routing::get(crate::common::graphiql).post(handler::<Q>),
)
.layer(axum::extract::Extension(schema(query)))
.layer(tower_http::cors::CorsLayer::permissive())
}
pub async fn load<D, V: View<Context = ViewContext<(), D::Store>>>(
database: D,
name: &str,
) -> Result<Arc<Mutex<V>>, IndexerError>
where
D: KeyValueDatabase + Clone + Send + Sync + 'static,
D::Store: KeyValueStore + Clone + Send + Sync + 'static,
D::Error: From<bcs::Error> + Send + Sync + std::error::Error + 'static,
{
let root_key = name.as_bytes().to_vec();
let store = database
.open_exclusive(&root_key)
.map_err(|_e| IndexerError::OpenExclusiveError)?;
let context = ViewContext::create_root_context(store, ())
.await
.map_err(|e| IndexerError::ViewError(e.into()))?;
let plugin = V::load(context).await?;
Ok(Arc::new(Mutex::new(plugin)))
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/tests.rs | linera-indexer/lib/src/db/tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Mock database implementations for testing.
use std::{collections::HashMap, sync::RwLock};
use async_trait::async_trait;
use linera_base::{
crypto::CryptoHash,
data_types::{BlockHeight, Timestamp},
identifiers::{BlobId, ChainId},
};
use sqlx::Sqlite;
use crate::{
db::{IncomingBundleInfo, IndexerDatabase, PostedMessageInfo},
grpc::ProcessingError,
};
/// Mock database that fails on transaction operations for testing error paths
pub struct MockFailingDatabase;
impl MockFailingDatabase {
pub fn new() -> Self {
Self
}
}
impl Default for MockFailingDatabase {
fn default() -> Self {
Self::new()
}
}
pub enum MockDatabaseError {
Serialization(String),
}
impl From<MockDatabaseError> for ProcessingError {
fn from(error: MockDatabaseError) -> Self {
match error {
MockDatabaseError::Serialization(msg) => ProcessingError::BlockDeserialization(msg),
}
}
}
#[async_trait]
impl IndexerDatabase for MockFailingDatabase {
type Error = MockDatabaseError;
type Transaction<'a> = sqlx::Transaction<'a, Sqlite>;
async fn begin_transaction(&self) -> Result<Self::Transaction<'_>, Self::Error> {
// Always fail transaction creation for testing error paths
Err(MockDatabaseError::Serialization(
"Mock: Cannot create real transaction".to_string(),
))
}
async fn insert_blob_tx(
&self,
_tx: &mut Self::Transaction<'_>,
_blob_id: &BlobId,
_data: &[u8],
) -> Result<(), Self::Error> {
Ok(())
}
async fn insert_block_tx(
&self,
_tx: &mut Self::Transaction<'_>,
_hash: &CryptoHash,
_chain_id: &ChainId,
_height: BlockHeight,
_timestamp: Timestamp,
_data: &[u8],
) -> Result<(), Self::Error> {
Ok(())
}
async fn commit_transaction(&self, _tx: Self::Transaction<'_>) -> Result<(), Self::Error> {
Ok(())
}
async fn get_block(&self, _hash: &CryptoHash) -> Result<Vec<u8>, Self::Error> {
Err(MockDatabaseError::Serialization(
"Mock: get_block not implemented".to_string(),
))
}
async fn get_blob(&self, _blob_id: &BlobId) -> Result<Vec<u8>, Self::Error> {
Err(MockDatabaseError::Serialization(
"Mock: get_blob not implemented".to_string(),
))
}
async fn get_latest_block_for_chain(
&self,
_chain_id: &ChainId,
) -> Result<Option<(CryptoHash, BlockHeight, Vec<u8>)>, Self::Error> {
Err(MockDatabaseError::Serialization(
"Mock: get_latest_block_for_chain not implemented".to_string(),
))
}
async fn get_blocks_for_chain_range(
&self,
_chain_id: &ChainId,
_start_height: BlockHeight,
_end_height: BlockHeight,
) -> Result<Vec<(CryptoHash, BlockHeight, Vec<u8>)>, Self::Error> {
Err(MockDatabaseError::Serialization(
"Mock: get_blocks_for_chain_range not implemented".to_string(),
))
}
async fn blob_exists(&self, _blob_id: &BlobId) -> Result<bool, Self::Error> {
Ok(false)
}
async fn block_exists(&self, _hash: &CryptoHash) -> Result<bool, Self::Error> {
Ok(false)
}
async fn get_incoming_bundles_for_block(
&self,
_block_hash: &CryptoHash,
) -> Result<Vec<(i64, IncomingBundleInfo)>, Self::Error> {
Ok(vec![])
}
async fn get_posted_messages_for_bundle(
&self,
_bundle_id: i64,
) -> Result<Vec<PostedMessageInfo>, Self::Error> {
Ok(vec![])
}
async fn get_bundles_from_origin_chain(
&self,
_origin_chain_id: &ChainId,
) -> Result<Vec<(CryptoHash, i64, IncomingBundleInfo)>, Self::Error> {
Ok(vec![])
}
}
type Blocks = HashMap<CryptoHash, (ChainId, BlockHeight, Timestamp, Vec<u8>)>;
/// A more sophisticated mock that actually works for successful paths
/// and stores data in internal HashMaps for testing verification
pub struct MockSuccessDatabase {
/// Storage for blobs: BlobId -> blob data
blobs: RwLock<HashMap<BlobId, Vec<u8>>>,
/// Storage for blocks: CryptoHash -> (ChainId, BlockHeight, Timestamp, block data)
blocks: RwLock<Blocks>,
}
impl Default for MockSuccessDatabase {
fn default() -> Self {
Self::new()
}
}
impl MockSuccessDatabase {
pub fn new() -> Self {
Self {
blobs: RwLock::new(HashMap::new()),
blocks: RwLock::new(HashMap::new()),
}
}
/// Get the count of stored blobs
pub fn blob_count(&self) -> usize {
self.blobs.read().unwrap().len()
}
/// Get the count of stored blocks
pub fn block_count(&self) -> usize {
self.blocks.read().unwrap().len()
}
}
#[async_trait]
impl IndexerDatabase for MockSuccessDatabase {
type Error = MockDatabaseError;
type Transaction<'a> = sqlx::Transaction<'a, Sqlite>;
/// Override the high-level method to succeed and store data
async fn store_block_with_blobs(
&self,
block_hash: &CryptoHash,
chain_id: &ChainId,
height: BlockHeight,
timestamp: Timestamp,
block_data: &[u8],
blobs: &[(BlobId, Vec<u8>)],
) -> Result<(), Self::Error> {
// Store all blobs
{
let mut blob_storage = self.blobs.write().unwrap();
for (blob_id, blob_data) in blobs {
blob_storage.insert(*blob_id, blob_data.clone());
}
}
// Store the block
{
let mut block_storage = self.blocks.write().unwrap();
block_storage.insert(
*block_hash,
(*chain_id, height, timestamp, block_data.to_vec()),
);
}
Ok(())
}
async fn begin_transaction(&self) -> Result<Self::Transaction<'_>, Self::Error> {
// We can't create a real transaction, but for successful testing we can just
// return an error that indicates we can't create a mock transaction
Err(MockDatabaseError::Serialization(
"Mock: Cannot create real transaction".to_string(),
))
}
async fn insert_blob_tx(
&self,
_tx: &mut Self::Transaction<'_>,
_blob_id: &BlobId,
_data: &[u8],
) -> Result<(), Self::Error> {
Ok(())
}
async fn insert_block_tx(
&self,
_tx: &mut Self::Transaction<'_>,
_hash: &CryptoHash,
_chain_id: &ChainId,
_height: BlockHeight,
_timestamp: Timestamp,
_data: &[u8],
) -> Result<(), Self::Error> {
Ok(())
}
async fn commit_transaction(&self, _tx: Self::Transaction<'_>) -> Result<(), Self::Error> {
Ok(())
}
async fn get_block(&self, _hash: &CryptoHash) -> Result<Vec<u8>, Self::Error> {
Ok(vec![])
}
async fn get_blob(&self, _blob_id: &BlobId) -> Result<Vec<u8>, Self::Error> {
Ok(vec![])
}
async fn get_latest_block_for_chain(
&self,
_chain_id: &ChainId,
) -> Result<Option<(CryptoHash, BlockHeight, Vec<u8>)>, Self::Error> {
Ok(None)
}
async fn get_blocks_for_chain_range(
&self,
_chain_id: &ChainId,
_start_height: BlockHeight,
_end_height: BlockHeight,
) -> Result<Vec<(CryptoHash, BlockHeight, Vec<u8>)>, Self::Error> {
Ok(vec![])
}
async fn blob_exists(&self, _blob_id: &BlobId) -> Result<bool, Self::Error> {
Ok(false)
}
async fn block_exists(&self, _hash: &CryptoHash) -> Result<bool, Self::Error> {
Ok(false)
}
async fn get_incoming_bundles_for_block(
&self,
_block_hash: &CryptoHash,
) -> Result<Vec<(i64, IncomingBundleInfo)>, Self::Error> {
Ok(vec![])
}
async fn get_posted_messages_for_bundle(
&self,
_bundle_id: i64,
) -> Result<Vec<PostedMessageInfo>, Self::Error> {
Ok(vec![])
}
async fn get_bundles_from_origin_chain(
&self,
_origin_chain_id: &ChainId,
) -> Result<Vec<(CryptoHash, i64, IncomingBundleInfo)>, Self::Error> {
Ok(vec![])
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/mod.rs | linera-indexer/lib/src/db/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Database trait for the indexer.
#[cfg(test)]
pub(crate) mod tests;
pub mod common;
pub mod postgres;
pub mod sqlite;
use async_trait::async_trait;
use linera_base::{
crypto::CryptoHash,
data_types::{BlockHeight, Timestamp},
identifiers::{BlobId, ChainId},
};
use linera_service_graphql_client::MessageAction;
/// Trait defining the database operations for the indexer
#[async_trait]
pub trait IndexerDatabase: Send + Sync {
type Error;
type Transaction<'a>: Send + Sync;
/// Atomically store a block with its required blobs
/// This is the high-level API that can be implemented in terms of the other methods
async fn store_block_with_blobs(
&self,
block_hash: &CryptoHash,
chain_id: &ChainId,
height: BlockHeight,
timestamp: Timestamp,
block_data: &[u8],
blobs: &[(BlobId, Vec<u8>)],
) -> Result<(), Self::Error> {
// Start atomic transaction
let mut tx = self.begin_transaction().await?;
// Insert all blobs first
for (blob_id, blob_data) in blobs {
self.insert_blob_tx(&mut tx, blob_id, blob_data).await?;
}
// Insert the block
self.insert_block_tx(&mut tx, block_hash, chain_id, height, timestamp, block_data)
.await?;
// Commit transaction - this is the only point where data becomes visible
self.commit_transaction(tx).await?;
Ok(())
}
/// Start a new transaction
async fn begin_transaction(&self) -> Result<Self::Transaction<'_>, Self::Error>;
/// Insert a blob within a transaction
async fn insert_blob_tx(
&self,
tx: &mut Self::Transaction<'_>,
blob_id: &BlobId,
data: &[u8],
) -> Result<(), Self::Error>;
/// Insert a block within a transaction
async fn insert_block_tx(
&self,
tx: &mut Self::Transaction<'_>,
hash: &CryptoHash,
chain_id: &ChainId,
height: BlockHeight,
timestamp: Timestamp,
data: &[u8],
) -> Result<(), Self::Error>;
/// Commit a transaction
async fn commit_transaction(&self, tx: Self::Transaction<'_>) -> Result<(), Self::Error>;
/// Get a block by hash
async fn get_block(&self, hash: &CryptoHash) -> Result<Vec<u8>, Self::Error>;
/// Get a blob by blob_id
async fn get_blob(&self, blob_id: &BlobId) -> Result<Vec<u8>, Self::Error>;
/// Get the latest block for a chain
async fn get_latest_block_for_chain(
&self,
chain_id: &ChainId,
) -> Result<Option<(CryptoHash, BlockHeight, Vec<u8>)>, Self::Error>;
/// Get blocks for a chain within a height range
async fn get_blocks_for_chain_range(
&self,
chain_id: &ChainId,
start_height: BlockHeight,
end_height: BlockHeight,
) -> Result<Vec<(CryptoHash, BlockHeight, Vec<u8>)>, Self::Error>;
/// Check if a blob exists
async fn blob_exists(&self, blob_id: &BlobId) -> Result<bool, Self::Error>;
/// Check if a block exists
async fn block_exists(&self, hash: &CryptoHash) -> Result<bool, Self::Error>;
/// Get incoming bundles for a specific block
async fn get_incoming_bundles_for_block(
&self,
block_hash: &CryptoHash,
) -> Result<Vec<(i64, IncomingBundleInfo)>, Self::Error>;
/// Get posted messages for a specific bundle
async fn get_posted_messages_for_bundle(
&self,
bundle_id: i64,
) -> Result<Vec<PostedMessageInfo>, Self::Error>;
/// Get all bundles from a specific origin chain
async fn get_bundles_from_origin_chain(
&self,
origin_chain_id: &ChainId,
) -> Result<Vec<(CryptoHash, i64, IncomingBundleInfo)>, Self::Error>;
}
/// Information about an incoming bundle (denormalized for queries)
#[derive(Debug, Clone)]
pub struct IncomingBundleInfo {
pub bundle_index: usize,
pub origin_chain_id: ChainId,
pub action: MessageAction,
pub source_height: BlockHeight,
pub source_timestamp: Timestamp,
pub source_cert_hash: CryptoHash,
pub transaction_index: u32,
}
/// Information about a posted message (with serialized complex fields)
#[derive(Debug, Clone)]
pub struct PostedMessageInfo {
pub message_index: u32,
pub authenticated_owner_data: Option<String>,
pub grant_amount: String,
pub refund_grant_to_data: Option<String>,
pub message_kind: String,
pub message_data: Vec<u8>,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/common.rs | linera-indexer/lib/src/db/common.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Common utilities for database implementations.
use linera_base::data_types::Amount;
use linera_execution::{Message, MessageKind, SystemMessage};
/// Classification result for a Message with denormalized SystemMessage fields
#[derive(Debug)]
pub struct MessageClassification {
pub message_type: String,
pub application_id: Option<String>,
pub system_message_type: Option<String>,
pub system_target: Option<String>,
pub system_amount: Option<Amount>,
pub system_source: Option<String>,
pub system_owner: Option<String>,
pub system_recipient: Option<String>,
}
/// Classify a Message into database fields with denormalized SystemMessage fields
pub fn classify_message(message: &Message) -> MessageClassification {
match message {
Message::System(sys_msg) => {
let (
sys_msg_type,
system_target,
system_amount,
system_source,
system_owner,
system_recipient,
) = match sys_msg {
SystemMessage::Credit {
target,
amount,
source,
} => (
"Credit",
Some(target.to_string()),
Some(*amount),
Some(source.to_string()),
None,
None,
),
SystemMessage::Withdraw {
owner,
amount,
recipient,
} => (
"Withdraw",
None,
Some(*amount),
None,
Some(owner.to_string()),
Some(recipient.to_string()),
),
};
MessageClassification {
message_type: "System".to_string(),
application_id: None,
system_message_type: Some(sys_msg_type.to_string()),
system_target,
system_amount,
system_source,
system_owner,
system_recipient,
}
}
Message::User { application_id, .. } => MessageClassification {
message_type: "User".to_string(),
application_id: Some(application_id.to_string()),
system_message_type: None,
system_target: None,
system_amount: None,
system_source: None,
system_owner: None,
system_recipient: None,
},
}
}
/// Convert MessageKind to string
pub fn message_kind_to_string(kind: &MessageKind) -> String {
format!("{:?}", kind)
}
/// Parse MessageKind from string
pub fn parse_message_kind(kind_str: &str) -> Result<MessageKind, String> {
match kind_str {
"Simple" => Ok(MessageKind::Simple),
"Tracked" => Ok(MessageKind::Tracked),
"Bouncing" => Ok(MessageKind::Bouncing),
"Protected" => Ok(MessageKind::Protected),
_ => Err(format!("Unknown message kind: {}", kind_str)),
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/postgres/consts.rs | linera-indexer/lib/src/db/postgres/consts.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! PostgreSQL schema definitions and constants.
/// SQL schema for creating the blocks table with denormalized fields
pub const CREATE_BLOCKS_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS blocks (
hash TEXT PRIMARY KEY NOT NULL,
chain_id TEXT NOT NULL,
height BIGINT NOT NULL,
timestamp BIGINT NOT NULL,
-- Denormalized fields from BlockHeader
epoch BIGINT NOT NULL,
state_hash TEXT NOT NULL,
previous_block_hash TEXT,
authenticated_owner TEXT,
-- Aggregated counts for filtering and display
operation_count BIGINT NOT NULL DEFAULT 0,
incoming_bundle_count BIGINT NOT NULL DEFAULT 0,
message_count BIGINT NOT NULL DEFAULT 0,
event_count BIGINT NOT NULL DEFAULT 0,
blob_count BIGINT NOT NULL DEFAULT 0,
-- Original serialized block data for backward compatibility
data BYTEA NOT NULL,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_blocks_chain_height ON blocks(chain_id, height);
CREATE INDEX IF NOT EXISTS idx_blocks_epoch ON blocks(epoch);
CREATE INDEX IF NOT EXISTS idx_blocks_timestamp ON blocks(timestamp);
CREATE INDEX IF NOT EXISTS idx_blocks_state_hash ON blocks(state_hash);
"#;
/// SQL schema for creating the operations table
pub const CREATE_OPERATIONS_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS operations (
id BIGSERIAL PRIMARY KEY,
block_hash TEXT NOT NULL,
operation_index BIGINT NOT NULL,
operation_type TEXT NOT NULL, -- 'System' or 'User'
application_id TEXT, -- For user operations
system_operation_type TEXT, -- For system operations (Transfer, OpenChain, etc.)
authenticated_owner TEXT,
data BYTEA NOT NULL, -- Serialized operation
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_operations_block_hash ON operations(block_hash);
CREATE INDEX IF NOT EXISTS idx_operations_type ON operations(operation_type);
CREATE INDEX IF NOT EXISTS idx_operations_application_id ON operations(application_id);
CREATE INDEX IF NOT EXISTS idx_operations_system_type ON operations(system_operation_type);
"#;
/// SQL schema for creating the outgoing messages table
pub const CREATE_OUTGOING_MESSAGES_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS outgoing_messages (
id BIGSERIAL PRIMARY KEY,
block_hash TEXT NOT NULL,
transaction_index BIGINT NOT NULL,
message_index BIGINT NOT NULL,
destination_chain_id TEXT NOT NULL,
authenticated_owner TEXT,
grant_amount TEXT,
message_kind TEXT NOT NULL, -- 'Simple', 'Tracked', 'Bouncing', 'Protected'
message_type TEXT NOT NULL, -- 'System' or 'User'
application_id TEXT, -- For user messages
system_message_type TEXT, -- For system messages (Credit, Withdraw, etc.)
system_target TEXT, -- Credit target
system_amount TEXT, -- Credit/Withdraw amount
system_source TEXT, -- Credit source
system_owner TEXT, -- Withdraw owner
system_recipient TEXT, -- Withdraw recipient
data BYTEA NOT NULL, -- Serialized message content
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_block_hash ON outgoing_messages(block_hash);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_destination ON outgoing_messages(destination_chain_id);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_type ON outgoing_messages(message_type);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_application_id ON outgoing_messages(application_id);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_system_type ON outgoing_messages(system_message_type);
"#;
/// SQL schema for creating the events table
pub const CREATE_EVENTS_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS events (
id BIGSERIAL PRIMARY KEY,
block_hash TEXT NOT NULL,
transaction_index BIGINT NOT NULL,
event_index BIGINT NOT NULL,
stream_id TEXT NOT NULL,
stream_index BIGINT NOT NULL,
data BYTEA NOT NULL, -- Event payload
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_events_block_hash ON events(block_hash);
CREATE INDEX IF NOT EXISTS idx_events_stream_id ON events(stream_id);
"#;
/// SQL schema for creating the oracle responses table
pub const CREATE_ORACLE_RESPONSES_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS oracle_responses (
id BIGSERIAL PRIMARY KEY,
block_hash TEXT NOT NULL,
transaction_index BIGINT NOT NULL,
response_index BIGINT NOT NULL,
response_type TEXT NOT NULL, -- 'Service' or 'Blob'
blob_hash TEXT, -- For blob responses
data BYTEA, -- For service responses
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_oracle_responses_block_hash ON oracle_responses(block_hash);
CREATE INDEX IF NOT EXISTS idx_oracle_responses_type ON oracle_responses(response_type);
"#;
/// SQL schema for creating the blobs table with enhanced metadata
pub const CREATE_BLOBS_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS blobs (
hash TEXT PRIMARY KEY NOT NULL,
blob_type TEXT NOT NULL, -- 'Data', 'ContractBytecode', 'ServiceBytecode', etc.
application_id TEXT, -- If applicable
block_hash TEXT, -- Block that created this blob
transaction_index BIGINT, -- Transaction that created this blob
data BYTEA NOT NULL,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_blobs_type ON blobs(blob_type);
CREATE INDEX IF NOT EXISTS idx_blobs_block_hash ON blobs(block_hash);
CREATE INDEX IF NOT EXISTS idx_blobs_application_id ON blobs(application_id);
"#;
/// SQL schema for creating the incoming_bundles table
pub const CREATE_INCOMING_BUNDLES_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS incoming_bundles (
id BIGSERIAL PRIMARY KEY,
block_hash TEXT NOT NULL,
bundle_index BIGINT NOT NULL,
origin_chain_id TEXT NOT NULL,
action TEXT NOT NULL,
source_height BIGINT NOT NULL,
source_timestamp BIGINT NOT NULL,
source_cert_hash TEXT NOT NULL,
transaction_index BIGINT NOT NULL,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_incoming_bundles_block_hash ON incoming_bundles(block_hash);
CREATE INDEX IF NOT EXISTS idx_incoming_bundles_origin_chain ON incoming_bundles(origin_chain_id);
CREATE INDEX IF NOT EXISTS idx_incoming_bundles_action ON incoming_bundles(action);
"#;
/// SQL schema for creating the posted_messages table
pub const CREATE_POSTED_MESSAGES_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS posted_messages (
id BIGSERIAL PRIMARY KEY,
bundle_id BIGINT NOT NULL,
message_index BIGINT NOT NULL,
authenticated_owner TEXT,
grant_amount TEXT,
refund_grant_to TEXT,
message_kind TEXT NOT NULL,
message_type TEXT NOT NULL, -- 'System' or 'User'
application_id TEXT, -- For user messages
system_message_type TEXT, -- For system messages (Credit, Withdraw, etc.)
system_target TEXT, -- Credit target
system_amount TEXT, -- Credit/Withdraw amount
system_source TEXT, -- Credit source
system_owner TEXT, -- Withdraw owner
system_recipient TEXT, -- Withdraw recipient
message_data BYTEA NOT NULL,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (bundle_id) REFERENCES incoming_bundles(id)
);
CREATE INDEX IF NOT EXISTS idx_posted_messages_bundle_id ON posted_messages(bundle_id);
CREATE INDEX IF NOT EXISTS idx_posted_messages_kind ON posted_messages(message_kind);
CREATE INDEX IF NOT EXISTS idx_posted_messages_type ON posted_messages(message_type);
CREATE INDEX IF NOT EXISTS idx_posted_messages_system_type ON posted_messages(system_message_type);
"#;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/postgres/tests.rs | linera-indexer/lib/src/db/postgres/tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use dockertest::{waitfor, DockerTest, Image, Source, TestBodySpecification};
use linera_base::{
crypto::{CryptoHash, TestString},
data_types::{Amount, Blob, BlockHeight, Epoch, Timestamp},
hashed::Hashed,
identifiers::{ApplicationId, ChainId},
};
use linera_chain::{
block::{Block, BlockBody, BlockHeader},
data_types::{IncomingBundle, MessageAction, PostedMessage},
};
use linera_execution::{Message, MessageKind};
use linera_service_graphql_client::MessageBundle;
use crate::db::{postgres::PostgresDatabase, IndexerDatabase};
#[tokio::test]
async fn test_postgres_database_operations() {
run_with_postgres(|database_url| async move {
let db = PostgresDatabase::new(&database_url)
.await
.expect("Failed to create test database");
// Test blob storage
let blob = Blob::new_data(b"test blob content".to_vec());
let blob_hash = blob.id();
let blob_data = bincode::serialize(&blob).unwrap();
let mut tx = db.begin_transaction().await.unwrap();
db.insert_blob_tx(&mut tx, &blob_hash, &blob_data)
.await
.unwrap();
tx.commit().await.unwrap();
// Verify blob was stored
let retrieved_blob_data = db.get_blob(&blob_hash).await.unwrap();
assert_eq!(blob_data, retrieved_blob_data);
})
.await;
}
#[tokio::test]
async fn test_atomic_transaction_behavior() {
run_with_postgres(|database_url| async move {
let db = PostgresDatabase::new(&database_url)
.await
.expect("Failed to create test database");
// Test that failed transactions are rolled back
let blob = Blob::new_data(b"test content".to_vec());
let blob_hash = blob.id();
let blob_data = bincode::serialize(&blob).unwrap();
// Start transaction but don't commit
{
let mut tx = db.begin_transaction().await.unwrap();
db.insert_blob_tx(&mut tx, &blob_hash, &blob_data)
.await
.unwrap();
// tx is dropped here without commit, should rollback
}
// Verify blob was not stored
assert!(db.get_blob(&blob_hash).await.is_err());
// Now test successful commit
let mut tx = db.begin_transaction().await.unwrap();
db.insert_blob_tx(&mut tx, &blob_hash, &blob_data)
.await
.unwrap();
tx.commit().await.unwrap();
// Verify blob was stored
let retrieved = db.get_blob(&blob_hash).await.unwrap();
assert_eq!(blob_data, retrieved);
})
.await;
}
#[tokio::test]
async fn test_high_level_atomic_api() {
run_with_postgres(|database_url| async move {
let db = PostgresDatabase::new(&database_url)
.await
.expect("Failed to create test database");
// Create test data using simple hashes for testing
let blob1 = Blob::new_data(b"test blob 1".to_vec());
let blob2 = Blob::new_data(b"test blob 2".to_vec());
let blob1_data = bincode::serialize(&blob1).unwrap();
let blob2_data = bincode::serialize(&blob2).unwrap();
// Create a proper test block
let chain_id = ChainId(CryptoHash::new(blob2.content()));
let height = BlockHeight(1);
let timestamp = Timestamp::now();
let test_block = create_test_block(chain_id, height);
let block_hash = Hashed::new(test_block.clone()).hash();
let block_data = bincode::serialize(&test_block).unwrap();
let blobs = vec![
(blob1.id(), blob1_data.clone()),
(blob2.id(), blob2_data.clone()),
];
// Test atomic storage of block with blobs
db.store_block_with_blobs(
&block_hash,
&chain_id,
height,
timestamp,
&block_data,
&blobs,
)
.await
.unwrap();
// Verify block was stored
let retrieved_block = db.get_block(&block_hash).await.unwrap();
assert_eq!(block_data, retrieved_block);
// Verify blobs were stored
let retrieved_blob1 = db.get_blob(&blob1.id()).await.unwrap();
let retrieved_blob2 = db.get_blob(&blob2.id()).await.unwrap();
assert_eq!(blob1_data, retrieved_blob1);
assert_eq!(blob2_data, retrieved_blob2);
})
.await;
}
#[tokio::test]
async fn test_incoming_bundles_storage_and_query() {
run_with_postgres(|database_url| async move {
let db = PostgresDatabase::new(&database_url)
.await
.expect("Failed to create test database");
// First insert a test block that the bundle can reference
let mut test_block = create_test_block(
ChainId(CryptoHash::new(&TestString::new("test_chain_id"))),
BlockHeight(100),
);
let incoming_bundle_message = PostedMessage {
index: 0,
authenticated_owner: None,
grant: Amount::from_tokens(100),
refund_grant_to: None,
kind: MessageKind::Protected,
message: Message::User {
application_id: ApplicationId::new(CryptoHash::new(&TestString::new(
"test_app_id",
))),
bytes: b"test_message_data".to_vec(),
},
};
let origin_chain_id = ChainId(CryptoHash::new(&TestString::new("origin_chain")));
let source_cert_hash = CryptoHash::new(&TestString::new("source_cert_hash"));
let incoming_bundle = IncomingBundle {
origin: origin_chain_id,
bundle: MessageBundle {
height: test_block.header.height,
timestamp: Timestamp::now(),
certificate_hash: source_cert_hash,
transaction_index: 2,
messages: vec![incoming_bundle_message.clone()],
},
action: MessageAction::Reject,
};
test_block
.body
.transactions
.push(linera_chain::data_types::Transaction::ReceiveMessages(
incoming_bundle.clone(),
));
let block_hash = Hashed::new(test_block.clone()).hash();
let block_data = bincode::serialize(&test_block).unwrap();
let mut tx = db.begin_transaction().await.unwrap();
db.insert_block_tx(
&mut tx,
&block_hash,
&test_block.header.chain_id,
test_block.header.height,
test_block.header.timestamp,
&block_data,
)
.await
.unwrap();
tx.commit().await.unwrap();
// Test the query methods
let bundles = db
.get_incoming_bundles_for_block(&block_hash)
.await
.unwrap();
assert_eq!(bundles.len(), 1);
let (queried_bundle_id, bundle_info) = &bundles[0];
assert_eq!(bundle_info.bundle_index, 0);
assert_eq!(bundle_info.origin_chain_id, origin_chain_id);
assert_eq!(bundle_info.action, incoming_bundle.action);
assert_eq!(bundle_info.source_height, incoming_bundle.bundle.height);
assert_eq!(
bundle_info.transaction_index,
incoming_bundle.bundle.transaction_index
);
let messages = db
.get_posted_messages_for_bundle(*queried_bundle_id)
.await
.unwrap();
assert_eq!(messages.len(), 1);
let message_info = &messages[0];
assert_eq!(message_info.message_index, 0);
assert_eq!(
message_info.grant_amount,
Amount::from_tokens(100).to_string()
);
assert_eq!(
message_info.message_kind,
incoming_bundle_message.kind.to_string()
);
assert!(message_info.authenticated_owner_data.is_none());
assert!(message_info.refund_grant_to_data.is_none());
assert_eq!(
message_info.message_data,
bincode::serialize(&incoming_bundle_message.message).unwrap()
);
// Test querying by origin chain
let origin_bundles = db
.get_bundles_from_origin_chain(&origin_chain_id)
.await
.unwrap();
assert_eq!(origin_bundles.len(), 1);
assert_eq!(origin_bundles[0].0, block_hash);
assert_eq!(origin_bundles[0].1, *queried_bundle_id);
})
.await;
}
/// Helper function to run a test with a Postgres container
async fn run_with_postgres<F, Fut>(test_fn: F)
where
F: FnOnce(String) -> Fut + Send + 'static,
Fut: std::future::Future<Output = ()> + Send + 'static,
{
if let Ok(home) = std::env::var("HOME") {
let docker_desktop_sock = if cfg!(target_os = "macos") {
format!("{}/.docker/run/docker.sock", home)
} else {
format!("{}/var/run/docker.sock", home)
};
if std::path::Path::new(&docker_desktop_sock).exists() {
std::env::set_var("DOCKER_HOST", format!("unix://{}", docker_desktop_sock));
}
}
let mut test = DockerTest::new().with_default_source(Source::DockerHub);
let mut postgres_composition =
TestBodySpecification::with_image(Image::with_repository("postgres").tag("16-alpine"))
.set_publish_all_ports(true)
.set_wait_for(Box::new(waitfor::MessageWait {
message: "database system is ready to accept connections".to_string(),
source: waitfor::MessageSource::Stderr,
timeout: 30,
}));
postgres_composition.modify_env("POSTGRES_PASSWORD", "testpass");
postgres_composition.modify_env("POSTGRES_USER", "testuser");
postgres_composition.modify_env("POSTGRES_DB", "testdb");
test.provide_container(postgres_composition);
test.run_async(|ops| async move {
let container = ops.handle("postgres");
let (_, host_port) = container.host_port(5432).unwrap();
let database_url = format!(
"postgresql://testuser:testpass@localhost:{}/testdb",
host_port
);
test_fn(database_url).await;
})
.await;
}
fn create_test_block(chain_id: ChainId, height: BlockHeight) -> Block {
Block {
header: BlockHeader {
chain_id,
epoch: Epoch::ZERO,
height,
timestamp: Timestamp::now(),
state_hash: CryptoHash::new(&TestString::new("test_state_hash")),
previous_block_hash: None,
authenticated_owner: None,
transactions_hash: CryptoHash::new(&TestString::new("transactions_hash")),
messages_hash: CryptoHash::new(&TestString::new("messages_hash")),
previous_message_blocks_hash: CryptoHash::new(&TestString::new("prev_msg_blocks_hash")),
previous_event_blocks_hash: CryptoHash::new(&TestString::new("prev_event_blocks_hash")),
oracle_responses_hash: CryptoHash::new(&TestString::new("oracle_responses_hash")),
events_hash: CryptoHash::new(&TestString::new("events_hash")),
blobs_hash: CryptoHash::new(&TestString::new("blobs_hash")),
operation_results_hash: CryptoHash::new(&TestString::new("operation_results_hash")),
},
body: BlockBody {
transactions: vec![],
messages: vec![],
previous_message_blocks: Default::default(),
previous_event_blocks: Default::default(),
oracle_responses: vec![],
events: vec![],
blobs: vec![],
operation_results: vec![],
},
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/postgres/mod.rs | linera-indexer/lib/src/db/postgres/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! PostgreSQL database module for storing blocks and blobs.
mod consts;
#[cfg(test)]
mod tests;
use std::str::FromStr;
use async_trait::async_trait;
use consts::{
CREATE_BLOBS_TABLE, CREATE_BLOCKS_TABLE, CREATE_EVENTS_TABLE, CREATE_INCOMING_BUNDLES_TABLE,
CREATE_OPERATIONS_TABLE, CREATE_ORACLE_RESPONSES_TABLE, CREATE_OUTGOING_MESSAGES_TABLE,
CREATE_POSTED_MESSAGES_TABLE,
};
use linera_base::{
crypto::CryptoHash,
data_types::{BlockHeight, Event, OracleResponse, Timestamp},
identifiers::{BlobId, ChainId},
};
use linera_chain::{
block::Block,
data_types::{IncomingBundle, MessageAction, PostedMessage},
};
use linera_execution::{Message, Operation, OutgoingMessage, SystemOperation};
use sqlx::{
postgres::{PgPool, PgPoolOptions},
Postgres, Row, Transaction,
};
use thiserror::Error;
use crate::db::{
common::{classify_message, message_kind_to_string, parse_message_kind},
IncomingBundleInfo, IndexerDatabase, PostedMessageInfo,
};
#[derive(Error, Debug)]
pub enum PostgresError {
#[error("Database error: {0}")]
Database(#[from] sqlx::Error),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("Block not found: {0}")]
BlockNotFound(CryptoHash),
#[error("Blob not found: {0}")]
BlobNotFound(BlobId),
}
pub struct PostgresDatabase {
pool: PgPool,
}
impl PostgresDatabase {
/// Create a new PostgreSQL database connection
pub async fn new(database_url: &str) -> Result<Self, PostgresError> {
tracing::info!(?database_url, "connecting to PostgreSQL database");
let pool = PgPoolOptions::new()
.max_connections(5)
.connect(database_url)
.await
.map_err(PostgresError::Database)?;
let db = Self { pool };
db.initialize_schema().await?;
Ok(db)
}
/// Initialize the database schema
async fn initialize_schema(&self) -> Result<(), PostgresError> {
// Helper to execute multi-statement SQL by splitting on semicolons
async fn execute_multi(pool: &PgPool, sql: &str) -> Result<(), PostgresError> {
for statement in sql.split(';') {
let trimmed = statement.trim();
if !trimmed.is_empty() {
sqlx::query(trimmed).execute(pool).await?;
}
}
Ok(())
}
// Create core tables
execute_multi(&self.pool, CREATE_BLOCKS_TABLE).await?;
execute_multi(&self.pool, CREATE_BLOBS_TABLE).await?;
// Create denormalized tables for block data
execute_multi(&self.pool, CREATE_OPERATIONS_TABLE).await?;
execute_multi(&self.pool, CREATE_OUTGOING_MESSAGES_TABLE).await?;
execute_multi(&self.pool, CREATE_EVENTS_TABLE).await?;
execute_multi(&self.pool, CREATE_ORACLE_RESPONSES_TABLE).await?;
// Create existing message-related tables
execute_multi(&self.pool, CREATE_INCOMING_BUNDLES_TABLE).await?;
execute_multi(&self.pool, CREATE_POSTED_MESSAGES_TABLE).await?;
Ok(())
}
/// Start a new transaction
async fn begin_transaction(&self) -> Result<Transaction<'_, Postgres>, PostgresError> {
Ok(self.pool.begin().await?)
}
/// Commit a transaction
async fn commit_transaction(&self, tx: Transaction<'_, Postgres>) -> Result<(), PostgresError> {
tx.commit().await.map_err(PostgresError::Database)
}
/// Insert a blob within a transaction
async fn insert_blob_tx(
&self,
tx: &mut Transaction<'_, Postgres>,
blob_id: &BlobId,
data: &[u8],
) -> Result<(), PostgresError> {
let blob_id_str = blob_id.hash.to_string();
let blob_type = format!("{:?}", blob_id.blob_type);
// For now, we don't have block_hash and application_id context here
// These could be passed as optional parameters in the future
sqlx::query("INSERT INTO blobs (hash, blob_type, data) VALUES ($1, $2, $3) ON CONFLICT (hash) DO NOTHING")
.bind(&blob_id_str)
.bind(&blob_type)
.bind(data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert a block within a transaction
async fn insert_block_tx(
&self,
tx: &mut Transaction<'_, Postgres>,
hash: &CryptoHash,
chain_id: &ChainId,
height: BlockHeight,
timestamp: Timestamp,
data: &[u8],
) -> Result<(), PostgresError> {
// Deserialize the block to extract denormalized data
let block: Block = bincode::deserialize(data).map_err(|e| {
PostgresError::Serialization(format!("Failed to deserialize block: {}", e))
})?;
// Count aggregated data
let operation_count = block.body.operations().count();
let incoming_bundle_count = block.body.incoming_bundles().count();
let message_count = block.body.messages.iter().map(|v| v.len()).sum::<usize>();
let event_count = block.body.events.iter().map(|v| v.len()).sum::<usize>();
let blob_count = block.body.blobs.len();
// Insert main block record with denormalized fields
let hash_str = hash.to_string();
let chain_id_str = chain_id.to_string();
let state_hash_str = block.header.state_hash.to_string();
let previous_block_hash_str = block.header.previous_block_hash.map(|h| h.to_string());
let authenticated_owner_str = block.header.authenticated_owner.map(|s| s.to_string());
sqlx::query(
r#"
INSERT INTO blocks
(hash, chain_id, height, timestamp, epoch, state_hash, previous_block_hash,
authenticated_owner, operation_count, incoming_bundle_count, message_count,
event_count, blob_count, data)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
ON CONFLICT (hash) DO UPDATE SET
chain_id = EXCLUDED.chain_id,
height = EXCLUDED.height,
timestamp = EXCLUDED.timestamp,
epoch = EXCLUDED.epoch,
state_hash = EXCLUDED.state_hash,
previous_block_hash = EXCLUDED.previous_block_hash,
authenticated_owner = EXCLUDED.authenticated_owner,
operation_count = EXCLUDED.operation_count,
incoming_bundle_count = EXCLUDED.incoming_bundle_count,
message_count = EXCLUDED.message_count,
event_count = EXCLUDED.event_count,
blob_count = EXCLUDED.blob_count,
data = EXCLUDED.data
"#,
)
.bind(&hash_str)
.bind(&chain_id_str)
.bind(height.0 as i64)
.bind(timestamp.micros() as i64)
.bind(block.header.epoch.0 as i64)
.bind(&state_hash_str)
.bind(&previous_block_hash_str)
.bind(&authenticated_owner_str)
.bind(operation_count as i64)
.bind(incoming_bundle_count as i64)
.bind(message_count as i64)
.bind(event_count as i64)
.bind(blob_count as i64)
.bind(data)
.execute(&mut **tx)
.await?;
// Insert operations
for (index, transaction) in block.body.transactions.iter().enumerate() {
match transaction {
linera_chain::data_types::Transaction::ExecuteOperation(operation) => {
self.insert_operation_tx(
tx,
hash,
index,
operation,
block.header.authenticated_owner,
)
.await?;
}
linera_chain::data_types::Transaction::ReceiveMessages(bundle) => {
let bundle_id = self
.insert_incoming_bundle_tx(tx, hash, index, bundle)
.await?;
for message in &bundle.bundle.messages {
self.insert_bundle_message_tx(tx, bundle_id, message)
.await?;
}
}
}
}
// Insert outgoing messages
for (txn_index, messages) in block.body.messages.iter().enumerate() {
for (msg_index, message) in messages.iter().enumerate() {
self.insert_outgoing_message_tx(tx, hash, txn_index, msg_index, message)
.await?;
}
}
// Insert events
for (txn_index, events) in block.body.events.iter().enumerate() {
for (event_index, event) in events.iter().enumerate() {
self.insert_event_tx(tx, hash, txn_index, event_index, event)
.await?;
}
}
// Insert oracle responses
for (txn_index, responses) in block.body.oracle_responses.iter().enumerate() {
for (response_index, response) in responses.iter().enumerate() {
self.insert_oracle_response_tx(tx, hash, txn_index, response_index, response)
.await?;
}
}
Ok(())
}
/// Insert an operation within a transaction
async fn insert_operation_tx(
&self,
tx: &mut Transaction<'_, Postgres>,
block_hash: &CryptoHash,
operation_index: usize,
operation: &Operation,
authenticated_owner: Option<linera_base::identifiers::AccountOwner>,
) -> Result<(), PostgresError> {
let block_hash_str = block_hash.to_string();
let authenticated_owner_str = authenticated_owner.map(|s| s.to_string());
let (operation_type, application_id, system_operation_type) = match operation {
Operation::System(sys_op) => {
let sys_op_type = match sys_op.as_ref() {
SystemOperation::Transfer { .. } => "Transfer",
SystemOperation::Claim { .. } => "Claim",
SystemOperation::OpenChain { .. } => "OpenChain",
SystemOperation::CloseChain => "CloseChain",
SystemOperation::ChangeApplicationPermissions { .. } => {
"ChangeApplicationPermissions"
}
SystemOperation::CreateApplication { .. } => "CreateApplication",
SystemOperation::PublishModule { .. } => "PublishModule",
SystemOperation::PublishDataBlob { .. } => "PublishDataBlob",
SystemOperation::Admin(_) => "Admin",
SystemOperation::ProcessNewEpoch(_) => "ProcessNewEpoch",
SystemOperation::ProcessRemovedEpoch(_) => "ProcessRemovedEpoch",
SystemOperation::UpdateStreams(_) => "UpdateStreams",
SystemOperation::ChangeOwnership { .. } => "ChangeOwnership",
SystemOperation::VerifyBlob { .. } => "VerifyBlob",
};
("System", None, Some(sys_op_type))
}
Operation::User { application_id, .. } => {
("User", Some(application_id.to_string()), None)
}
};
let data = bincode::serialize(operation).map_err(|e| {
PostgresError::Serialization(format!("Failed to serialize operation: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO operations
(block_hash, operation_index, operation_type, application_id, system_operation_type, authenticated_owner, data)
VALUES ($1, $2, $3, $4, $5, $6, $7)
"#,
)
.bind(&block_hash_str)
.bind(operation_index as i64)
.bind(operation_type)
.bind(application_id)
.bind(system_operation_type)
.bind(&authenticated_owner_str)
.bind(&data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert an outgoing message within a transaction
async fn insert_outgoing_message_tx(
&self,
tx: &mut Transaction<'_, Postgres>,
block_hash: &CryptoHash,
transaction_index: usize,
message_index: usize,
message: &OutgoingMessage,
) -> Result<(), PostgresError> {
let block_hash_str = block_hash.to_string();
let destination_chain_id_str = message.destination.to_string();
let authenticated_owner_str = message.authenticated_owner.map(|s| s.to_string());
let message_kind_str = message_kind_to_string(&message.kind);
let classification = classify_message(&message.message);
let data = Self::serialize_message(&message.message)?;
sqlx::query(
r#"
INSERT INTO outgoing_messages
(block_hash, transaction_index, message_index, destination_chain_id, authenticated_owner,
grant_amount, message_kind, message_type, application_id, system_message_type,
system_target, system_amount, system_source, system_owner, system_recipient, data)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
"#,
)
.bind(&block_hash_str)
.bind(transaction_index as i64)
.bind(message_index as i64)
.bind(&destination_chain_id_str)
.bind(&authenticated_owner_str)
.bind(message.grant.to_string())
.bind(&message_kind_str)
.bind(classification.message_type)
.bind(classification.application_id)
.bind(classification.system_message_type)
.bind(classification.system_target)
.bind(classification.system_amount.map(|a| a.to_string()))
.bind(classification.system_source)
.bind(classification.system_owner)
.bind(classification.system_recipient)
.bind(&data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert an event within a transaction
async fn insert_event_tx(
&self,
tx: &mut Transaction<'_, Postgres>,
block_hash: &CryptoHash,
transaction_index: usize,
event_index: usize,
event: &Event,
) -> Result<(), PostgresError> {
let block_hash_str = block_hash.to_string();
let stream_id_str = event.stream_id.to_string();
sqlx::query(
r#"
INSERT INTO events
(block_hash, transaction_index, event_index, stream_id, stream_index, data)
VALUES ($1, $2, $3, $4, $5, $6)
"#,
)
.bind(&block_hash_str)
.bind(transaction_index as i64)
.bind(event_index as i64)
.bind(&stream_id_str)
.bind(event.index as i64)
.bind(&event.value)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert an oracle response within a transaction
async fn insert_oracle_response_tx(
&self,
tx: &mut Transaction<'_, Postgres>,
block_hash: &CryptoHash,
transaction_index: usize,
response_index: usize,
response: &OracleResponse,
) -> Result<(), PostgresError> {
let block_hash_str = block_hash.to_string();
let (response_type, blob_hash, data): (&str, Option<String>, Option<Vec<u8>>) =
match response {
OracleResponse::Service(service_data) => {
("Service", None, Some(service_data.clone()))
}
OracleResponse::Blob(blob_id) => ("Blob", Some(blob_id.hash.to_string()), None),
OracleResponse::Http(http_response) => {
let serialized = bincode::serialize(http_response).map_err(|e| {
PostgresError::Serialization(format!(
"Failed to serialize HTTP response: {}",
e
))
})?;
("Http", None, Some(serialized))
}
OracleResponse::Assert => ("Assert", None, None),
OracleResponse::Round(round) => {
let serialized = bincode::serialize(round).map_err(|e| {
PostgresError::Serialization(format!("Failed to serialize round: {}", e))
})?;
("Round", None, Some(serialized))
}
OracleResponse::Event(stream_id, index) => {
let serialized = bincode::serialize(&(stream_id, index)).map_err(|e| {
PostgresError::Serialization(format!("Failed to serialize event: {}", e))
})?;
("Event", None, Some(serialized))
}
OracleResponse::EventExists(event_exists) => {
let serialized = bincode::serialize(event_exists).map_err(|e| {
PostgresError::Serialization(format!(
"Failed to serialize event exists: {}",
e
))
})?;
("EventExists", None, Some(serialized))
}
};
sqlx::query(
r#"
INSERT INTO oracle_responses
(block_hash, transaction_index, response_index, response_type, blob_hash, data)
VALUES ($1, $2, $3, $4, $5, $6)
"#,
)
.bind(&block_hash_str)
.bind(transaction_index as i64)
.bind(response_index as i64)
.bind(response_type)
.bind(blob_hash)
.bind(data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert an incoming bundle within a transaction and return the bundle ID
async fn insert_incoming_bundle_tx(
&self,
tx: &mut Transaction<'_, Postgres>,
block_hash: &CryptoHash,
bundle_index: usize,
incoming_bundle: &IncomingBundle,
) -> Result<i64, PostgresError> {
let block_hash_str = block_hash.to_string();
let origin_chain_str = incoming_bundle.origin.to_string();
let action_str = match incoming_bundle.action {
MessageAction::Accept => "Accept",
MessageAction::Reject => "Reject",
};
let source_cert_hash_str = incoming_bundle.bundle.certificate_hash.to_string();
let result = sqlx::query(
r#"
INSERT INTO incoming_bundles
(block_hash, bundle_index, origin_chain_id, action, source_height, source_timestamp, source_cert_hash, transaction_index)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
RETURNING id
"#
)
.bind(&block_hash_str)
.bind(bundle_index as i64)
.bind(&origin_chain_str)
.bind(action_str)
.bind(incoming_bundle.bundle.height.0 as i64)
.bind(incoming_bundle.bundle.timestamp.micros() as i64)
.bind(&source_cert_hash_str)
.bind(incoming_bundle.bundle.transaction_index as i64)
.fetch_one(&mut **tx)
.await?;
Ok(result.get("id"))
}
/// Insert a posted message within a transaction
async fn insert_bundle_message_tx(
&self,
tx: &mut Transaction<'_, Postgres>,
bundle_id: i64,
message: &PostedMessage,
) -> Result<(), PostgresError> {
let authenticated_owner_str = message.authenticated_owner.map(|s| s.to_string());
let refund_grant_to = message.refund_grant_to.as_ref().map(|s| format!("{s}"));
let message_kind_str = message_kind_to_string(&message.kind);
let classification = classify_message(&message.message);
let message_data = Self::serialize_message(&message.message)?;
sqlx::query(
r#"
INSERT INTO posted_messages
(bundle_id, message_index, authenticated_owner, grant_amount, refund_grant_to,
message_kind, message_type, application_id, system_message_type,
system_target, system_amount, system_source, system_owner, system_recipient, message_data)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
"#
)
.bind(bundle_id)
.bind(message.index as i64)
.bind(authenticated_owner_str)
.bind(message.grant.to_string())
.bind(refund_grant_to)
.bind(&message_kind_str)
.bind(classification.message_type)
.bind(classification.application_id)
.bind(classification.system_message_type)
.bind(classification.system_target)
.bind(classification.system_amount.map(|a| a.to_string()))
.bind(classification.system_source)
.bind(classification.system_owner)
.bind(classification.system_recipient)
.bind(&message_data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Get a block by hash
pub async fn get_block(&self, hash: &CryptoHash) -> Result<Vec<u8>, PostgresError> {
let hash_str = hash.to_string();
let row = sqlx::query("SELECT data FROM blocks WHERE hash = $1")
.bind(&hash_str)
.fetch_optional(&self.pool)
.await?;
match row {
Some(row) => Ok(row.get("data")),
None => Err(PostgresError::BlockNotFound(*hash)),
}
}
/// Get a blob by blob_id
pub async fn get_blob(&self, blob_id: &BlobId) -> Result<Vec<u8>, PostgresError> {
let blob_id_str = blob_id.hash.to_string();
let row = sqlx::query("SELECT data FROM blobs WHERE hash = $1")
.bind(&blob_id_str)
.fetch_optional(&self.pool)
.await?;
match row {
Some(row) => Ok(row.get("data")),
None => Err(PostgresError::BlobNotFound(*blob_id)),
}
}
/// Get the latest block for a chain
pub async fn get_latest_block_for_chain(
&self,
chain_id: &ChainId,
) -> Result<Option<(CryptoHash, BlockHeight, Vec<u8>)>, PostgresError> {
let chain_id_str = chain_id.to_string();
let row = sqlx::query(
"SELECT hash, height, data FROM blocks WHERE chain_id = $1 ORDER BY height DESC LIMIT 1"
)
.bind(&chain_id_str)
.fetch_optional(&self.pool)
.await?;
match row {
Some(row) => {
let hash_str: String = row.get("hash");
let height: i64 = row.get("height");
let data: Vec<u8> = row.get("data");
let hash = hash_str
.parse()
.map_err(|_| PostgresError::Serialization("Invalid hash format".to_string()))?;
Ok(Some((hash, BlockHeight(height as u64), data)))
}
None => Ok(None),
}
}
/// Get blocks for a chain within a height range
pub async fn get_blocks_for_chain_range(
&self,
chain_id: &ChainId,
start_height: BlockHeight,
end_height: BlockHeight,
) -> Result<Vec<(CryptoHash, BlockHeight, Vec<u8>)>, PostgresError> {
let chain_id_str = chain_id.to_string();
let rows = sqlx::query(
"SELECT hash, height, data FROM blocks WHERE chain_id = $1 AND height >= $2 AND height <= $3 ORDER BY height ASC"
)
.bind(&chain_id_str)
.bind(start_height.0 as i64)
.bind(end_height.0 as i64)
.fetch_all(&self.pool)
.await?;
let mut result = Vec::new();
for row in rows {
let hash_str: String = row.get("hash");
let height: i64 = row.get("height");
let data: Vec<u8> = row.get("data");
let hash = hash_str
.parse()
.map_err(|_| PostgresError::Serialization("Invalid hash format".to_string()))?;
result.push((hash, BlockHeight(height as u64), data));
}
Ok(result)
}
/// Check if a blob exists
pub async fn blob_exists(&self, blob_id: &BlobId) -> Result<bool, PostgresError> {
let blob_id_str = blob_id.hash.to_string();
let row = sqlx::query("SELECT 1 FROM blobs WHERE hash = $1 LIMIT 1")
.bind(&blob_id_str)
.fetch_optional(&self.pool)
.await?;
Ok(row.is_some())
}
/// Check if a block exists
pub async fn block_exists(&self, hash: &CryptoHash) -> Result<bool, PostgresError> {
let hash_str = hash.to_string();
let row = sqlx::query("SELECT 1 FROM blocks WHERE hash = $1 LIMIT 1")
.bind(&hash_str)
.fetch_optional(&self.pool)
.await?;
Ok(row.is_some())
}
/// Get incoming bundles for a specific block
pub async fn get_incoming_bundles_for_block(
&self,
block_hash: &CryptoHash,
) -> Result<Vec<(i64, IncomingBundleInfo)>, PostgresError> {
let block_hash_str = block_hash.to_string();
let rows = sqlx::query(
r#"
SELECT id, bundle_index, origin_chain_id, action, source_height,
source_timestamp, source_cert_hash, transaction_index
FROM incoming_bundles
WHERE block_hash = $1
ORDER BY bundle_index ASC
"#,
)
.bind(&block_hash_str)
.fetch_all(&self.pool)
.await?;
let mut bundles = Vec::new();
for row in rows {
let bundle_id: i64 = row.get("id");
let bundle_info = IncomingBundleInfo {
bundle_index: row.get::<i64, _>("bundle_index") as usize,
origin_chain_id: row
.get::<String, _>("origin_chain_id")
.parse()
.map_err(|_| PostgresError::Serialization("Invalid chain ID".to_string()))?,
action: match row.get::<String, _>("action").as_str() {
"Accept" => MessageAction::Accept,
"Reject" => MessageAction::Reject,
_ => return Err(PostgresError::Serialization("Invalid action".to_string())),
},
source_height: BlockHeight(row.get::<i64, _>("source_height") as u64),
source_timestamp: Timestamp::from(row.get::<i64, _>("source_timestamp") as u64),
source_cert_hash: row
.get::<String, _>("source_cert_hash")
.parse()
.map_err(|_| PostgresError::Serialization("Invalid cert hash".to_string()))?,
transaction_index: row.get::<i64, _>("transaction_index") as u32,
};
bundles.push((bundle_id, bundle_info));
}
Ok(bundles)
}
/// Get posted messages for a specific bundle
pub async fn get_posted_messages_for_bundle(
&self,
bundle_id: i64,
) -> Result<Vec<PostedMessageInfo>, PostgresError> {
let rows = sqlx::query(
r#"
SELECT message_index, authenticated_owner, grant_amount, refund_grant_to,
message_kind, message_data
FROM posted_messages
WHERE bundle_id = $1
ORDER BY message_index ASC
"#,
)
.bind(bundle_id)
.fetch_all(&self.pool)
.await?;
let mut messages = Vec::new();
for row in rows {
let message_info = PostedMessageInfo {
message_index: row.get::<i64, _>("message_index") as u32,
authenticated_owner_data: row.get("authenticated_owner"),
grant_amount: row.get("grant_amount"),
refund_grant_to_data: row.get("refund_grant_to"),
message_kind: row.get("message_kind"),
message_data: row.get("message_data"),
};
messages.push(message_info);
}
Ok(messages)
}
/// Get all bundles from a specific origin chain
pub async fn get_bundles_from_origin_chain(
&self,
origin_chain_id: &ChainId,
) -> Result<Vec<(CryptoHash, i64, IncomingBundleInfo)>, PostgresError> {
let origin_chain_str = origin_chain_id.to_string();
let rows = sqlx::query(
r#"
SELECT block_hash, id, bundle_index, origin_chain_id, action, source_height,
source_timestamp, source_cert_hash, transaction_index
FROM incoming_bundles
WHERE origin_chain_id = $1
ORDER BY source_height ASC, bundle_index ASC
"#,
)
.bind(&origin_chain_str)
.fetch_all(&self.pool)
.await?;
let mut bundles = Vec::new();
for row in rows {
let block_hash: CryptoHash = row
.get::<String, _>("block_hash")
.parse()
.map_err(|_| PostgresError::Serialization("Invalid block hash".to_string()))?;
let bundle_id: i64 = row.get("id");
let bundle_info = IncomingBundleInfo {
bundle_index: row.get::<i64, _>("bundle_index") as usize,
origin_chain_id: row
.get::<String, _>("origin_chain_id")
.parse()
.map_err(|_| PostgresError::Serialization("Invalid chain ID".to_string()))?,
action: match row.get::<String, _>("action").as_str() {
"Accept" => MessageAction::Accept,
"Reject" => MessageAction::Reject,
_ => return Err(PostgresError::Serialization("Invalid action".to_string())),
},
source_height: BlockHeight(row.get::<i64, _>("source_height") as u64),
source_timestamp: Timestamp::from(row.get::<i64, _>("source_timestamp") as u64),
source_cert_hash: row
.get::<String, _>("source_cert_hash")
.parse()
.map_err(|_| PostgresError::Serialization("Invalid cert hash".to_string()))?,
transaction_index: row.get::<i64, _>("transaction_index") as u32,
};
bundles.push((block_hash, bundle_id, bundle_info));
}
Ok(bundles)
}
/// Get operations for a specific block
pub async fn get_operations_for_block(
&self,
block_hash: &CryptoHash,
) -> Result<Vec<(usize, Operation)>, PostgresError> {
let block_hash_str = block_hash.to_string();
let rows = sqlx::query(
r#"
SELECT operation_index, data
FROM operations
WHERE block_hash = $1
ORDER BY operation_index ASC
"#,
)
.bind(&block_hash_str)
.fetch_all(&self.pool)
.await?;
let mut operations = Vec::new();
for row in rows {
let index = row.get::<i64, _>("operation_index") as usize;
let data: Vec<u8> = row.get("data");
let operation: Operation = bincode::deserialize(&data).map_err(|e| {
PostgresError::Serialization(format!("Failed to deserialize operation: {}", e))
})?;
operations.push((index, operation));
}
Ok(operations)
}
/// Get outgoing messages for a specific block
pub async fn get_outgoing_messages_for_block(
&self,
block_hash: &CryptoHash,
) -> Result<Vec<OutgoingMessage>, PostgresError> {
let block_hash_str = block_hash.to_string();
let rows = sqlx::query(
r#"
SELECT destination_chain_id, authenticated_owner, grant_amount, message_kind, data
FROM outgoing_messages
WHERE block_hash = $1
ORDER BY transaction_index, message_index ASC
"#,
)
.bind(&block_hash_str)
.fetch_all(&self.pool)
.await?;
let mut messages = Vec::new();
for row in rows {
let destination_str: String = row.get("destination_chain_id");
let destination = destination_str
.parse()
.map_err(|_| PostgresError::Serialization("Invalid chain ID".to_string()))?;
let authenticated_owner_str: Option<String> = row.get("authenticated_owner");
let authenticated_owner = authenticated_owner_str.and_then(|s| s.parse().ok());
let grant_amount: String = row.get("grant_amount");
let grant = linera_base::data_types::Amount::from_str(grant_amount.as_str())
.map_err(|_| PostgresError::Serialization("Invalid grant amount".to_string()))?;
let kind_str: String = row.get("message_kind");
let kind =
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/sqlite/consts.rs | linera-indexer/lib/src/db/sqlite/consts.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! SQLite schema definitions and constants.
/// SQL schema for creating the blocks table with denormalized fields
pub const CREATE_BLOCKS_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS blocks (
hash TEXT PRIMARY KEY NOT NULL,
chain_id TEXT NOT NULL,
height INTEGER NOT NULL,
timestamp INTEGER NOT NULL,
-- Denormalized fields from BlockHeader
epoch INTEGER NOT NULL,
state_hash TEXT NOT NULL,
previous_block_hash TEXT,
authenticated_owner TEXT,
-- Aggregated counts for filtering and display
operation_count INTEGER NOT NULL DEFAULT 0,
incoming_bundle_count INTEGER NOT NULL DEFAULT 0,
message_count INTEGER NOT NULL DEFAULT 0,
event_count INTEGER NOT NULL DEFAULT 0,
blob_count INTEGER NOT NULL DEFAULT 0,
-- Original serialized block data for backward compatibility
data BLOB NOT NULL,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX IF NOT EXISTS idx_blocks_chain_height ON blocks(chain_id, height);
CREATE INDEX IF NOT EXISTS idx_blocks_chain_id ON blocks(chain_id);
CREATE INDEX IF NOT EXISTS idx_blocks_epoch ON blocks(epoch);
CREATE INDEX IF NOT EXISTS idx_blocks_timestamp ON blocks(timestamp);
CREATE INDEX IF NOT EXISTS idx_blocks_state_hash ON blocks(state_hash);
"#;
/// SQL schema for creating the operations table
pub const CREATE_OPERATIONS_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS operations (
id INTEGER PRIMARY KEY AUTOINCREMENT,
block_hash TEXT NOT NULL,
operation_index INTEGER NOT NULL,
operation_type TEXT NOT NULL, -- 'System' or 'User'
application_id TEXT, -- For user operations
system_operation_type TEXT, -- For system operations (Transfer, OpenChain, etc.)
authenticated_owner TEXT,
data BLOB NOT NULL, -- Serialized operation
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_operations_block_hash ON operations(block_hash);
CREATE INDEX IF NOT EXISTS idx_operations_type ON operations(operation_type);
CREATE INDEX IF NOT EXISTS idx_operations_application_id ON operations(application_id);
CREATE INDEX IF NOT EXISTS idx_operations_system_type ON operations(system_operation_type);
"#;
/// SQL schema for creating the outgoing messages table
pub const CREATE_OUTGOING_MESSAGES_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS outgoing_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
block_hash TEXT NOT NULL,
transaction_index INTEGER NOT NULL,
message_index INTEGER NOT NULL,
destination_chain_id TEXT NOT NULL,
authenticated_owner TEXT,
grant_amount TEXT,
message_kind TEXT NOT NULL, -- 'Simple', 'Tracked', 'Bouncing', 'Protected'
message_type TEXT NOT NULL, -- 'System' or 'User'
application_id TEXT, -- For user messages
system_message_type TEXT, -- For system messages (Credit, Withdraw, etc.)
system_target TEXT, -- Credit target
system_amount TEXT, -- Credit/Withdraw amount
system_source TEXT, -- Credit source
system_owner TEXT, -- Withdraw owner
system_recipient TEXT, -- Withdraw recipient
data BLOB NOT NULL, -- Serialized message content
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_block_hash ON outgoing_messages(block_hash);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_destination ON outgoing_messages(destination_chain_id);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_type ON outgoing_messages(message_type);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_application_id ON outgoing_messages(application_id);
CREATE INDEX IF NOT EXISTS idx_outgoing_messages_system_type ON outgoing_messages(system_message_type);
"#;
/// SQL schema for creating the events table
pub const CREATE_EVENTS_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
block_hash TEXT NOT NULL,
transaction_index INTEGER NOT NULL,
event_index INTEGER NOT NULL,
stream_id TEXT NOT NULL,
stream_index INTEGER NOT NULL,
data BLOB NOT NULL, -- Event payload
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_events_block_hash ON events(block_hash);
CREATE INDEX IF NOT EXISTS idx_events_stream_id ON events(stream_id);
"#;
/// SQL schema for creating the oracle responses table
pub const CREATE_ORACLE_RESPONSES_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS oracle_responses (
id INTEGER PRIMARY KEY AUTOINCREMENT,
block_hash TEXT NOT NULL,
transaction_index INTEGER NOT NULL,
response_index INTEGER NOT NULL,
response_type TEXT NOT NULL, -- 'Service' or 'Blob'
blob_hash TEXT, -- For blob responses
data BLOB, -- For service responses
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_oracle_responses_block_hash ON oracle_responses(block_hash);
CREATE INDEX IF NOT EXISTS idx_oracle_responses_type ON oracle_responses(response_type);
"#;
/// SQL schema for creating the blobs table with enhanced metadata
pub const CREATE_BLOBS_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS blobs (
hash TEXT PRIMARY KEY NOT NULL,
blob_type TEXT NOT NULL, -- 'Data', 'ContractBytecode', 'ServiceBytecode', etc.
application_id TEXT, -- If applicable
block_hash TEXT, -- Block that created this blob
transaction_index INTEGER, -- Transaction that created this blob
data BLOB NOT NULL,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_blobs_type ON blobs(blob_type);
CREATE INDEX IF NOT EXISTS idx_blobs_block_hash ON blobs(block_hash);
CREATE INDEX IF NOT EXISTS idx_blobs_application_id ON blobs(application_id);
"#;
/// SQL schema for creating the incoming_bundles table
pub const CREATE_INCOMING_BUNDLES_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS incoming_bundles (
id INTEGER PRIMARY KEY AUTOINCREMENT,
block_hash TEXT NOT NULL,
bundle_index INTEGER NOT NULL,
origin_chain_id TEXT NOT NULL,
action TEXT NOT NULL,
source_height INTEGER NOT NULL,
source_timestamp INTEGER NOT NULL,
source_cert_hash TEXT NOT NULL,
transaction_index INTEGER NOT NULL,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (block_hash) REFERENCES blocks(hash)
);
CREATE INDEX IF NOT EXISTS idx_incoming_bundles_block_hash ON incoming_bundles(block_hash);
CREATE INDEX IF NOT EXISTS idx_incoming_bundles_origin_chain ON incoming_bundles(origin_chain_id);
CREATE INDEX IF NOT EXISTS idx_incoming_bundles_action ON incoming_bundles(action);
"#;
/// SQL schema for creating the posted_messages table
pub const CREATE_POSTED_MESSAGES_TABLE: &str = r#"
CREATE TABLE IF NOT EXISTS posted_messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
bundle_id INTEGER NOT NULL,
message_index INTEGER NOT NULL,
authenticated_owner TEXT,
grant_amount TEXT,
refund_grant_to TEXT,
message_kind TEXT NOT NULL,
message_type TEXT NOT NULL, -- 'System' or 'User'
application_id TEXT, -- For user messages
system_message_type TEXT, -- For system messages (Credit, Withdraw, etc.)
system_target TEXT, -- Credit target
system_amount TEXT, -- Credit/Withdraw amount
system_source TEXT, -- Credit source
system_owner TEXT, -- Withdraw owner
system_recipient TEXT, -- Withdraw recipient
message_data BLOB NOT NULL,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (bundle_id) REFERENCES incoming_bundles(id)
);
CREATE INDEX IF NOT EXISTS idx_posted_messages_bundle_id ON posted_messages(bundle_id);
CREATE INDEX IF NOT EXISTS idx_posted_messages_kind ON posted_messages(message_kind);
CREATE INDEX IF NOT EXISTS idx_posted_messages_type ON posted_messages(message_type);
CREATE INDEX IF NOT EXISTS idx_posted_messages_system_type ON posted_messages(system_message_type);
"#;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/sqlite/tests.rs | linera-indexer/lib/src/db/sqlite/tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::{CryptoHash, TestString},
data_types::{Amount, Blob, BlockHeight, Epoch, Timestamp},
hashed::Hashed,
identifiers::{ApplicationId, ChainId},
};
use linera_chain::{
block::{Block, BlockBody, BlockHeader},
data_types::{IncomingBundle, MessageAction, PostedMessage},
};
use linera_execution::{Message, MessageKind};
use linera_service_graphql_client::MessageBundle;
use crate::db::{sqlite::SqliteDatabase, IndexerDatabase};
#[tokio::test]
async fn test_sqlite_database_operations() {
let db = create_test_database().await;
// Test blob storage
let blob = Blob::new_data(b"test blob content".to_vec());
let blob_hash = blob.id();
let blob_data = bincode::serialize(&blob).unwrap();
let mut tx = db.begin_transaction().await.unwrap();
db.insert_blob_tx(&mut tx, &blob_hash, &blob_data)
.await
.unwrap();
tx.commit().await.unwrap();
// Verify blob was stored
let retrieved_blob_data = db.get_blob(&blob_hash).await.unwrap();
assert_eq!(blob_data, retrieved_blob_data);
// Test block storage (we'd need to create a proper ConfirmedBlockCertificate here)
// For now, just test that the database operations work
}
#[tokio::test]
async fn test_atomic_transaction_behavior() {
let db = create_test_database().await;
// Test that failed transactions are rolled back
let blob = Blob::new_data(b"test content".to_vec());
let blob_hash = blob.id();
let blob_data = bincode::serialize(&blob).unwrap();
// Start transaction but don't commit
{
let mut tx = db.begin_transaction().await.unwrap();
db.insert_blob_tx(&mut tx, &blob_hash, &blob_data)
.await
.unwrap();
// tx is dropped here without commit, should rollback
}
// Verify blob was not stored
assert!(db.get_blob(&blob_hash).await.is_err());
// Now test successful commit
let mut tx = db.begin_transaction().await.unwrap();
db.insert_blob_tx(&mut tx, &blob_hash, &blob_data)
.await
.unwrap();
tx.commit().await.unwrap();
// Verify blob was stored
let retrieved = db.get_blob(&blob_hash).await.unwrap();
assert_eq!(blob_data, retrieved);
}
#[tokio::test]
async fn test_high_level_atomic_api() {
let db = create_test_database().await;
// Create test data using simple hashes for testing
let blob1 = Blob::new_data(b"test blob 1".to_vec());
let blob2 = Blob::new_data(b"test blob 2".to_vec());
let blob1_data = bincode::serialize(&blob1).unwrap();
let blob2_data = bincode::serialize(&blob2).unwrap();
// Create a proper test block
let chain_id = ChainId(CryptoHash::new(blob2.content()));
let height = BlockHeight(1);
let timestamp = Timestamp::now();
let test_block = create_test_block(chain_id, height);
let block_hash = Hashed::new(test_block.clone()).hash();
let block_data = bincode::serialize(&test_block).unwrap();
let blobs = vec![
(blob1.id(), blob1_data.clone()),
(blob2.id(), blob2_data.clone()),
];
// Test atomic storage of block with blobs
db.store_block_with_blobs(
&block_hash,
&chain_id,
height,
timestamp,
&block_data,
&blobs,
)
.await
.unwrap();
// Verify block was stored
let retrieved_block = db.get_block(&block_hash).await.unwrap();
assert_eq!(block_data, retrieved_block);
// Verify blobs were stored
let retrieved_blob1 = db.get_blob(&blob1.id()).await.unwrap();
let retrieved_blob2 = db.get_blob(&blob2.id()).await.unwrap();
assert_eq!(blob1_data, retrieved_blob1);
assert_eq!(blob2_data, retrieved_blob2);
}
#[tokio::test]
async fn test_incoming_bundles_storage_and_query() {
let db = create_test_database().await;
// Test that we can create the database schema with the new tables
// The tables should be created in initialize_schema()
// Verify the new tables exist by trying to query them
let bundles_result = sqlx::query("SELECT COUNT(*) FROM incoming_bundles")
.fetch_one(&db.pool)
.await;
assert!(
bundles_result.is_ok(),
"incoming_bundles table should exist"
);
let messages_result = sqlx::query("SELECT COUNT(*) FROM posted_messages")
.fetch_one(&db.pool)
.await;
assert!(
messages_result.is_ok(),
"posted_messages table should exist"
);
// First insert a test block that the bundle can reference
let mut test_block = create_test_block(
ChainId(CryptoHash::new(&TestString::new("test_chain_id"))),
BlockHeight(100),
);
let incoming_bundle_message = PostedMessage {
index: 0,
authenticated_owner: None,
grant: Amount::from_tokens(100),
refund_grant_to: None,
kind: MessageKind::Protected,
message: Message::User {
application_id: ApplicationId::new(CryptoHash::new(&TestString::new("test_app_id"))),
bytes: b"test_message_data".to_vec(),
},
};
let origin_chain_id = ChainId(CryptoHash::new(&TestString::new("origin_chain")));
let source_cert_hash = CryptoHash::new(&TestString::new("source_cert_hash"));
let incoming_bundle = IncomingBundle {
origin: origin_chain_id,
bundle: MessageBundle {
height: test_block.header.height,
timestamp: Timestamp::now(),
certificate_hash: source_cert_hash,
transaction_index: 2,
messages: vec![incoming_bundle_message.clone()],
},
action: MessageAction::Reject,
};
test_block
.body
.transactions
.push(linera_chain::data_types::Transaction::ReceiveMessages(
incoming_bundle.clone(),
));
let block_hash = Hashed::new(test_block.clone()).hash();
let block_data = bincode::serialize(&test_block).unwrap();
let mut tx = db.begin_transaction().await.unwrap();
db.insert_block_tx(
&mut tx,
&block_hash,
&test_block.header.chain_id,
test_block.header.height,
test_block.header.timestamp,
&block_data,
)
.await
.unwrap();
tx.commit().await.unwrap();
// Test the query methods
let bundles = db
.get_incoming_bundles_for_block(&block_hash)
.await
.unwrap();
assert_eq!(bundles.len(), 1);
let (queried_bundle_id, bundle_info) = &bundles[0];
assert_eq!(bundle_info.bundle_index, 0);
assert_eq!(bundle_info.origin_chain_id, origin_chain_id);
assert_eq!(bundle_info.action, incoming_bundle.action);
assert_eq!(bundle_info.source_height, incoming_bundle.bundle.height);
assert_eq!(
bundle_info.transaction_index,
incoming_bundle.bundle.transaction_index
);
let messages = db
.get_posted_messages_for_bundle(*queried_bundle_id)
.await
.unwrap();
assert_eq!(messages.len(), 1);
let message_info = &messages[0];
assert_eq!(message_info.message_index, 0);
assert_eq!(
message_info.grant_amount,
Amount::from_tokens(100).to_string()
);
assert_eq!(
message_info.message_kind,
incoming_bundle_message.kind.to_string()
);
assert!(message_info.authenticated_owner_data.is_none());
assert!(message_info.refund_grant_to_data.is_none());
assert_eq!(
message_info.message_data,
bincode::serialize(&incoming_bundle_message.message).unwrap()
);
// Test querying by origin chain
let origin_bundles = db
.get_bundles_from_origin_chain(&origin_chain_id)
.await
.unwrap();
assert_eq!(origin_bundles.len(), 1);
assert_eq!(origin_bundles[0].0, block_hash);
assert_eq!(origin_bundles[0].1, *queried_bundle_id);
}
async fn create_test_database() -> SqliteDatabase {
SqliteDatabase::new("sqlite::memory:")
.await
.expect("Failed to create test database")
}
fn create_test_block(chain_id: ChainId, height: BlockHeight) -> Block {
Block {
header: BlockHeader {
chain_id,
epoch: Epoch::ZERO,
height,
timestamp: Timestamp::now(),
state_hash: CryptoHash::new(&TestString::new("test_state_hash")),
previous_block_hash: None,
authenticated_owner: None,
transactions_hash: CryptoHash::new(&TestString::new("transactions_hash")),
messages_hash: CryptoHash::new(&TestString::new("messages_hash")),
previous_message_blocks_hash: CryptoHash::new(&TestString::new("prev_msg_blocks_hash")),
previous_event_blocks_hash: CryptoHash::new(&TestString::new("prev_event_blocks_hash")),
oracle_responses_hash: CryptoHash::new(&TestString::new("oracle_responses_hash")),
events_hash: CryptoHash::new(&TestString::new("events_hash")),
blobs_hash: CryptoHash::new(&TestString::new("blobs_hash")),
operation_results_hash: CryptoHash::new(&TestString::new("operation_results_hash")),
},
body: BlockBody {
transactions: vec![],
messages: vec![],
previous_message_blocks: Default::default(),
previous_event_blocks: Default::default(),
oracle_responses: vec![],
events: vec![],
blobs: vec![],
operation_results: vec![],
},
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/db/sqlite/mod.rs | linera-indexer/lib/src/db/sqlite/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! SQLite database module for storing blocks and blobs.
mod consts;
#[cfg(test)]
mod tests;
use std::str::FromStr;
use async_trait::async_trait;
use consts::{
CREATE_BLOBS_TABLE, CREATE_BLOCKS_TABLE, CREATE_EVENTS_TABLE, CREATE_INCOMING_BUNDLES_TABLE,
CREATE_OPERATIONS_TABLE, CREATE_ORACLE_RESPONSES_TABLE, CREATE_OUTGOING_MESSAGES_TABLE,
CREATE_POSTED_MESSAGES_TABLE,
};
use linera_base::{
crypto::CryptoHash,
data_types::{BlockHeight, Event, OracleResponse, Timestamp},
identifiers::{BlobId, ChainId},
};
use linera_chain::{
block::Block,
data_types::{IncomingBundle, MessageAction, PostedMessage},
};
use linera_execution::{Message, Operation, OutgoingMessage, SystemOperation};
use sqlx::{
sqlite::{SqlitePool, SqlitePoolOptions},
Row, Sqlite, Transaction,
};
use thiserror::Error;
use crate::db::{
common::{classify_message, message_kind_to_string, parse_message_kind},
IncomingBundleInfo, IndexerDatabase, PostedMessageInfo,
};
#[derive(Error, Debug)]
pub enum SqliteError {
#[error("Database error: {0}")]
Database(#[from] sqlx::Error),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("Block not found: {0}")]
BlockNotFound(CryptoHash),
#[error("Blob not found: {0}")]
BlobNotFound(BlobId),
}
pub struct SqliteDatabase {
pool: SqlitePool,
}
impl SqliteDatabase {
/// Create a new SQLite database connection
pub async fn new(database_url: &str) -> Result<Self, SqliteError> {
if !database_url.contains("memory") {
match std::fs::exists(database_url) {
Ok(true) => {
tracing::info!(?database_url, "opening existing SQLite database");
}
Ok(false) => {
tracing::info!(?database_url, "creating new SQLite database");
// Create the database file if it doesn't exist
std::fs::File::create(database_url).unwrap_or_else(|e| {
panic!(
"failed to create SQLite database file: {}, error: {}",
database_url, e
)
});
}
Err(e) => {
panic!(
"failed to check SQLite database existence. file: {}, error: {}",
database_url, e
)
}
}
}
let pool = SqlitePoolOptions::new()
.max_connections(5)
.connect(database_url)
.await
.map_err(SqliteError::Database)?;
let db = Self { pool };
db.initialize_schema().await?;
Ok(db)
}
/// Initialize the database schema
async fn initialize_schema(&self) -> Result<(), SqliteError> {
// Create core tables
sqlx::query(CREATE_BLOCKS_TABLE).execute(&self.pool).await?;
sqlx::query(CREATE_BLOBS_TABLE).execute(&self.pool).await?;
// Create denormalized tables for block data
sqlx::query(CREATE_OPERATIONS_TABLE)
.execute(&self.pool)
.await?;
sqlx::query(CREATE_OUTGOING_MESSAGES_TABLE)
.execute(&self.pool)
.await?;
sqlx::query(CREATE_EVENTS_TABLE).execute(&self.pool).await?;
sqlx::query(CREATE_ORACLE_RESPONSES_TABLE)
.execute(&self.pool)
.await?;
// Create existing message-related tables
sqlx::query(CREATE_INCOMING_BUNDLES_TABLE)
.execute(&self.pool)
.await?;
sqlx::query(CREATE_POSTED_MESSAGES_TABLE)
.execute(&self.pool)
.await?;
Ok(())
}
/// Start a new transaction
async fn begin_transaction(&self) -> Result<Transaction<'_, Sqlite>, SqliteError> {
Ok(self.pool.begin().await?)
}
/// Commit a transaction
async fn commit_transaction(&self, tx: Transaction<'_, Sqlite>) -> Result<(), SqliteError> {
tx.commit().await.map_err(SqliteError::Database)
}
/// Insert a blob within a transaction
async fn insert_blob_tx(
&self,
tx: &mut Transaction<'_, Sqlite>,
blob_id: &BlobId,
data: &[u8],
) -> Result<(), SqliteError> {
let blob_id_str = blob_id.hash.to_string();
let blob_type = format!("{:?}", blob_id.blob_type);
// For now, we don't have block_hash and application_id context here
// These could be passed as optional parameters in the future
sqlx::query("INSERT OR IGNORE INTO blobs (hash, blob_type, data) VALUES (?1, ?2, ?3)")
.bind(&blob_id_str)
.bind(&blob_type)
.bind(data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert a block within a transaction
async fn insert_block_tx(
&self,
tx: &mut Transaction<'_, Sqlite>,
hash: &CryptoHash,
chain_id: &ChainId,
height: BlockHeight,
timestamp: Timestamp,
data: &[u8],
) -> Result<(), SqliteError> {
// Deserialize the block to extract denormalized data
let block: Block = bincode::deserialize(data).map_err(|e| {
SqliteError::Serialization(format!("Failed to deserialize block: {}", e))
})?;
// Count aggregated data
let operation_count = block.body.operations().count();
let incoming_bundle_count = block.body.incoming_bundles().count();
let message_count = block.body.messages.iter().map(|v| v.len()).sum::<usize>();
let event_count = block.body.events.iter().map(|v| v.len()).sum::<usize>();
let blob_count = block.body.blobs.len();
// Insert main block record with denormalized fields
let hash_str = hash.to_string();
let chain_id_str = chain_id.to_string();
let state_hash_str = block.header.state_hash.to_string();
let previous_block_hash_str = block.header.previous_block_hash.map(|h| h.to_string());
let authenticated_owner_str = block.header.authenticated_owner.map(|s| s.to_string());
sqlx::query(
r#"
INSERT OR REPLACE INTO blocks
(hash, chain_id, height, timestamp, epoch, state_hash, previous_block_hash,
authenticated_owner, operation_count, incoming_bundle_count, message_count,
event_count, blob_count, data)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)
"#,
)
.bind(&hash_str)
.bind(&chain_id_str)
.bind(height.0 as i64)
.bind(timestamp.micros() as i64)
.bind(block.header.epoch.0 as i64)
.bind(&state_hash_str)
.bind(&previous_block_hash_str)
.bind(&authenticated_owner_str)
.bind(operation_count as i64)
.bind(incoming_bundle_count as i64)
.bind(message_count as i64)
.bind(event_count as i64)
.bind(blob_count as i64)
.bind(data)
.execute(&mut **tx)
.await?;
// Insert operations
for (index, transaction) in block.body.transactions.iter().enumerate() {
match transaction {
linera_chain::data_types::Transaction::ExecuteOperation(operation) => {
self.insert_operation_tx(
tx,
hash,
index,
operation,
block.header.authenticated_owner,
)
.await?;
}
linera_chain::data_types::Transaction::ReceiveMessages(bundle) => {
let bundle_id = self
.insert_incoming_bundle_tx(tx, hash, index, bundle)
.await?;
for message in &bundle.bundle.messages {
self.insert_bundle_message_tx(tx, bundle_id, message)
.await?;
}
}
}
}
// Insert outgoing messages
for (txn_index, messages) in block.body.messages.iter().enumerate() {
for (msg_index, message) in messages.iter().enumerate() {
self.insert_outgoing_message_tx(tx, hash, txn_index, msg_index, message)
.await?;
}
}
// Insert events
for (txn_index, events) in block.body.events.iter().enumerate() {
for (event_index, event) in events.iter().enumerate() {
self.insert_event_tx(tx, hash, txn_index, event_index, event)
.await?;
}
}
// Insert oracle responses
for (txn_index, responses) in block.body.oracle_responses.iter().enumerate() {
for (response_index, response) in responses.iter().enumerate() {
self.insert_oracle_response_tx(tx, hash, txn_index, response_index, response)
.await?;
}
}
Ok(())
}
/// Insert an operation within a transaction
async fn insert_operation_tx(
&self,
tx: &mut Transaction<'_, Sqlite>,
block_hash: &CryptoHash,
operation_index: usize,
operation: &Operation,
authenticated_owner: Option<linera_base::identifiers::AccountOwner>,
) -> Result<(), SqliteError> {
let block_hash_str = block_hash.to_string();
let authenticated_owner_str = authenticated_owner.map(|s| s.to_string());
let (operation_type, application_id, system_operation_type) = match operation {
Operation::System(sys_op) => {
let sys_op_type = match sys_op.as_ref() {
SystemOperation::Transfer { .. } => "Transfer",
SystemOperation::Claim { .. } => "Claim",
SystemOperation::OpenChain { .. } => "OpenChain",
SystemOperation::CloseChain => "CloseChain",
SystemOperation::ChangeApplicationPermissions { .. } => {
"ChangeApplicationPermissions"
}
SystemOperation::CreateApplication { .. } => "CreateApplication",
SystemOperation::PublishModule { .. } => "PublishModule",
SystemOperation::PublishDataBlob { .. } => "PublishDataBlob",
SystemOperation::Admin(_) => "Admin",
SystemOperation::ProcessNewEpoch(_) => "ProcessNewEpoch",
SystemOperation::ProcessRemovedEpoch(_) => "ProcessRemovedEpoch",
SystemOperation::UpdateStreams(_) => "UpdateStreams",
SystemOperation::ChangeOwnership { .. } => "ChangeOwnership",
SystemOperation::VerifyBlob { .. } => "VerifyBlob",
};
("System", None, Some(sys_op_type))
}
Operation::User { application_id, .. } => {
("User", Some(application_id.to_string()), None)
}
};
let data = bincode::serialize(operation).map_err(|e| {
SqliteError::Serialization(format!("Failed to serialize operation: {}", e))
})?;
sqlx::query(
r#"
INSERT INTO operations
(block_hash, operation_index, operation_type, application_id, system_operation_type, authenticated_owner, data)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)
"#,
)
.bind(&block_hash_str)
.bind(operation_index as i64)
.bind(operation_type)
.bind(application_id)
.bind(system_operation_type)
.bind(&authenticated_owner_str)
.bind(&data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert an outgoing message within a transaction
async fn insert_outgoing_message_tx(
&self,
tx: &mut Transaction<'_, Sqlite>,
block_hash: &CryptoHash,
transaction_index: usize,
message_index: usize,
message: &OutgoingMessage,
) -> Result<(), SqliteError> {
let block_hash_str = block_hash.to_string();
let destination_chain_id_str = message.destination.to_string();
let authenticated_owner_str = message.authenticated_owner.map(|s| s.to_string());
let message_kind_str = message_kind_to_string(&message.kind);
let classification = classify_message(&message.message);
let data = Self::serialize_message(&message.message)?;
sqlx::query(
r#"
INSERT INTO outgoing_messages
(block_hash, transaction_index, message_index, destination_chain_id, authenticated_owner,
grant_amount, message_kind, message_type, application_id, system_message_type,
system_target, system_amount, system_source, system_owner, system_recipient, data)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16)
"#,
)
.bind(&block_hash_str)
.bind(transaction_index as i64)
.bind(message_index as i64)
.bind(&destination_chain_id_str)
.bind(&authenticated_owner_str)
.bind(message.grant.to_string())
.bind(&message_kind_str)
.bind(classification.message_type)
.bind(classification.application_id)
.bind(classification.system_message_type)
.bind(classification.system_target)
.bind(classification.system_amount.map(|a| a.to_string()))
.bind(classification.system_source)
.bind(classification.system_owner)
.bind(classification.system_recipient)
.bind(&data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert an event within a transaction
async fn insert_event_tx(
&self,
tx: &mut Transaction<'_, Sqlite>,
block_hash: &CryptoHash,
transaction_index: usize,
event_index: usize,
event: &Event,
) -> Result<(), SqliteError> {
let block_hash_str = block_hash.to_string();
let stream_id_str = event.stream_id.to_string();
sqlx::query(
r#"
INSERT INTO events
(block_hash, transaction_index, event_index, stream_id, stream_index, data)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)
"#,
)
.bind(&block_hash_str)
.bind(transaction_index as i64)
.bind(event_index as i64)
.bind(&stream_id_str)
.bind(event.index as i64)
.bind(&event.value)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert an oracle response within a transaction
async fn insert_oracle_response_tx(
&self,
tx: &mut Transaction<'_, Sqlite>,
block_hash: &CryptoHash,
transaction_index: usize,
response_index: usize,
response: &OracleResponse,
) -> Result<(), SqliteError> {
let block_hash_str = block_hash.to_string();
let (response_type, blob_hash, data): (&str, Option<String>, Option<Vec<u8>>) =
match response {
OracleResponse::Service(service_data) => {
("Service", None, Some(service_data.clone()))
}
OracleResponse::Blob(blob_id) => ("Blob", Some(blob_id.hash.to_string()), None),
OracleResponse::Http(http_response) => {
let serialized = bincode::serialize(http_response).map_err(|e| {
SqliteError::Serialization(format!(
"Failed to serialize HTTP response: {}",
e
))
})?;
("Http", None, Some(serialized))
}
OracleResponse::Assert => ("Assert", None, None),
OracleResponse::Round(round) => {
let serialized = bincode::serialize(round).map_err(|e| {
SqliteError::Serialization(format!("Failed to serialize round: {}", e))
})?;
("Round", None, Some(serialized))
}
OracleResponse::Event(stream_id, index) => {
let serialized = bincode::serialize(&(stream_id, index)).map_err(|e| {
SqliteError::Serialization(format!("Failed to serialize event: {}", e))
})?;
("Event", None, Some(serialized))
}
OracleResponse::EventExists(event_exists) => {
let serialized = bincode::serialize(event_exists).map_err(|e| {
SqliteError::Serialization(format!(
"Failed to serialize event exists: {}",
e
))
})?;
("EventExists", None, Some(serialized))
}
};
sqlx::query(
r#"
INSERT INTO oracle_responses
(block_hash, transaction_index, response_index, response_type, blob_hash, data)
VALUES (?1, ?2, ?3, ?4, ?5, ?6)
"#,
)
.bind(&block_hash_str)
.bind(transaction_index as i64)
.bind(response_index as i64)
.bind(response_type)
.bind(blob_hash)
.bind(data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Insert an incoming bundle within a transaction and return the bundle ID
async fn insert_incoming_bundle_tx(
&self,
tx: &mut Transaction<'_, Sqlite>,
block_hash: &CryptoHash,
bundle_index: usize,
incoming_bundle: &IncomingBundle,
) -> Result<i64, SqliteError> {
let block_hash_str = block_hash.to_string();
let origin_chain_str = incoming_bundle.origin.to_string();
let action_str = match incoming_bundle.action {
MessageAction::Accept => "Accept",
MessageAction::Reject => "Reject",
};
let source_cert_hash_str = incoming_bundle.bundle.certificate_hash.to_string();
let result = sqlx::query(
r#"
INSERT INTO incoming_bundles
(block_hash, bundle_index, origin_chain_id, action, source_height, source_timestamp, source_cert_hash, transaction_index)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)
"#
)
.bind(&block_hash_str)
.bind(bundle_index as i64)
.bind(&origin_chain_str)
.bind(action_str)
.bind(incoming_bundle.bundle.height.0 as i64)
.bind(incoming_bundle.bundle.timestamp.micros() as i64)
.bind(&source_cert_hash_str)
.bind(incoming_bundle.bundle.transaction_index as i64)
.execute(&mut **tx)
.await?;
Ok(result.last_insert_rowid())
}
/// Insert a posted message within a transaction
async fn insert_bundle_message_tx(
&self,
tx: &mut Transaction<'_, Sqlite>,
bundle_id: i64,
message: &PostedMessage,
) -> Result<(), SqliteError> {
let authenticated_owner_str = message.authenticated_owner.map(|s| s.to_string());
let refund_grant_to = message.refund_grant_to.as_ref().map(|s| format!("{s}"));
let message_kind_str = message_kind_to_string(&message.kind);
let classification = classify_message(&message.message);
let message_data = Self::serialize_message(&message.message)?;
sqlx::query(
r#"
INSERT INTO posted_messages
(bundle_id, message_index, authenticated_owner, grant_amount, refund_grant_to,
message_kind, message_type, application_id, system_message_type,
system_target, system_amount, system_source, system_owner, system_recipient, message_data)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15)
"#
)
.bind(bundle_id)
.bind(message.index as i64)
.bind(authenticated_owner_str)
.bind(message.grant.to_string())
.bind(refund_grant_to)
.bind(&message_kind_str)
.bind(classification.message_type)
.bind(classification.application_id)
.bind(classification.system_message_type)
.bind(classification.system_target)
.bind(classification.system_amount.map(|a| a.to_string()))
.bind(classification.system_source)
.bind(classification.system_owner)
.bind(classification.system_recipient)
.bind(&message_data)
.execute(&mut **tx)
.await?;
Ok(())
}
/// Get a block by hash
pub async fn get_block(&self, hash: &CryptoHash) -> Result<Vec<u8>, SqliteError> {
let hash_str = hash.to_string();
let row = sqlx::query("SELECT data FROM blocks WHERE hash = ?1")
.bind(&hash_str)
.fetch_optional(&self.pool)
.await?;
match row {
Some(row) => Ok(row.get("data")),
None => Err(SqliteError::BlockNotFound(*hash)),
}
}
/// Get a blob by blob_id
pub async fn get_blob(&self, blob_id: &BlobId) -> Result<Vec<u8>, SqliteError> {
let blob_id_str = blob_id.hash.to_string();
let row = sqlx::query("SELECT data FROM blobs WHERE hash = ?1")
.bind(&blob_id_str)
.fetch_optional(&self.pool)
.await?;
match row {
Some(row) => Ok(row.get("data")),
None => Err(SqliteError::BlobNotFound(*blob_id)),
}
}
/// Get the latest block for a chain
pub async fn get_latest_block_for_chain(
&self,
chain_id: &ChainId,
) -> Result<Option<(CryptoHash, BlockHeight, Vec<u8>)>, SqliteError> {
let chain_id_str = chain_id.to_string();
let row = sqlx::query(
"SELECT hash, height, data FROM blocks WHERE chain_id = ?1 ORDER BY height DESC LIMIT 1"
)
.bind(&chain_id_str)
.fetch_optional(&self.pool)
.await?;
match row {
Some(row) => {
let hash_str: String = row.get("hash");
let height: i64 = row.get("height");
let data: Vec<u8> = row.get("data");
let hash = hash_str
.parse()
.map_err(|_| SqliteError::Serialization("Invalid hash format".to_string()))?;
Ok(Some((hash, BlockHeight(height as u64), data)))
}
None => Ok(None),
}
}
/// Get blocks for a chain within a height range
pub async fn get_blocks_for_chain_range(
&self,
chain_id: &ChainId,
start_height: BlockHeight,
end_height: BlockHeight,
) -> Result<Vec<(CryptoHash, BlockHeight, Vec<u8>)>, SqliteError> {
let chain_id_str = chain_id.to_string();
let rows = sqlx::query(
"SELECT hash, height, data FROM blocks WHERE chain_id = ?1 AND height >= ?2 AND height <= ?3 ORDER BY height ASC"
)
.bind(&chain_id_str)
.bind(start_height.0 as i64)
.bind(end_height.0 as i64)
.fetch_all(&self.pool)
.await?;
let mut result = Vec::new();
for row in rows {
let hash_str: String = row.get("hash");
let height: i64 = row.get("height");
let data: Vec<u8> = row.get("data");
let hash = hash_str
.parse()
.map_err(|_| SqliteError::Serialization("Invalid hash format".to_string()))?;
result.push((hash, BlockHeight(height as u64), data));
}
Ok(result)
}
/// Check if a blob exists
pub async fn blob_exists(&self, blob_id: &BlobId) -> Result<bool, SqliteError> {
let blob_id_str = blob_id.hash.to_string();
let row = sqlx::query("SELECT 1 FROM blobs WHERE hash = ?1 LIMIT 1")
.bind(&blob_id_str)
.fetch_optional(&self.pool)
.await?;
Ok(row.is_some())
}
/// Check if a block exists
pub async fn block_exists(&self, hash: &CryptoHash) -> Result<bool, SqliteError> {
let hash_str = hash.to_string();
let row = sqlx::query("SELECT 1 FROM blocks WHERE hash = ?1 LIMIT 1")
.bind(&hash_str)
.fetch_optional(&self.pool)
.await?;
Ok(row.is_some())
}
/// Get incoming bundles for a specific block
pub async fn get_incoming_bundles_for_block(
&self,
block_hash: &CryptoHash,
) -> Result<Vec<(i64, IncomingBundleInfo)>, SqliteError> {
let block_hash_str = block_hash.to_string();
let rows = sqlx::query(
r#"
SELECT id, bundle_index, origin_chain_id, action, source_height,
source_timestamp, source_cert_hash, transaction_index
FROM incoming_bundles
WHERE block_hash = ?1
ORDER BY bundle_index ASC
"#,
)
.bind(&block_hash_str)
.fetch_all(&self.pool)
.await?;
let mut bundles = Vec::new();
for row in rows {
let bundle_id: i64 = row.get("id");
let bundle_info = IncomingBundleInfo {
bundle_index: row.get::<i64, _>("bundle_index") as usize,
origin_chain_id: row
.get::<String, _>("origin_chain_id")
.parse()
.map_err(|_| SqliteError::Serialization("Invalid chain ID".to_string()))?,
action: match row.get::<String, _>("action").as_str() {
"Accept" => MessageAction::Accept,
"Reject" => MessageAction::Reject,
_ => return Err(SqliteError::Serialization("Invalid action".to_string())),
},
source_height: BlockHeight(row.get::<i64, _>("source_height") as u64),
source_timestamp: Timestamp::from(row.get::<i64, _>("source_timestamp") as u64),
source_cert_hash: row
.get::<String, _>("source_cert_hash")
.parse()
.map_err(|_| SqliteError::Serialization("Invalid cert hash".to_string()))?,
transaction_index: row.get::<i64, _>("transaction_index") as u32,
};
bundles.push((bundle_id, bundle_info));
}
Ok(bundles)
}
/// Get posted messages for a specific bundle
pub async fn get_posted_messages_for_bundle(
&self,
bundle_id: i64,
) -> Result<Vec<PostedMessageInfo>, SqliteError> {
let rows = sqlx::query(
r#"
SELECT message_index, authenticated_owner, grant_amount, refund_grant_to,
message_kind, message_data
FROM posted_messages
WHERE bundle_id = ?1
ORDER BY message_index ASC
"#,
)
.bind(bundle_id)
.fetch_all(&self.pool)
.await?;
let mut messages = Vec::new();
for row in rows {
let message_info = PostedMessageInfo {
message_index: row.get::<i64, _>("message_index") as u32,
authenticated_owner_data: row.get("authenticated_owner"),
grant_amount: row.get("grant_amount"),
refund_grant_to_data: row.get("refund_grant_to"),
message_kind: row.get("message_kind"),
message_data: row.get("message_data"),
};
messages.push(message_info);
}
Ok(messages)
}
/// Get all bundles from a specific origin chain
pub async fn get_bundles_from_origin_chain(
&self,
origin_chain_id: &ChainId,
) -> Result<Vec<(CryptoHash, i64, IncomingBundleInfo)>, SqliteError> {
let origin_chain_str = origin_chain_id.to_string();
let rows = sqlx::query(
r#"
SELECT block_hash, id, bundle_index, origin_chain_id, action, source_height,
source_timestamp, source_cert_hash, transaction_index
FROM incoming_bundles
WHERE origin_chain_id = ?1
ORDER BY source_height ASC, bundle_index ASC
"#,
)
.bind(&origin_chain_str)
.fetch_all(&self.pool)
.await?;
let mut bundles = Vec::new();
for row in rows {
let block_hash: CryptoHash = row
.get::<String, _>("block_hash")
.parse()
.map_err(|_| SqliteError::Serialization("Invalid block hash".to_string()))?;
let bundle_id: i64 = row.get("id");
let bundle_info = IncomingBundleInfo {
bundle_index: row.get::<i64, _>("bundle_index") as usize,
origin_chain_id: row
.get::<String, _>("origin_chain_id")
.parse()
.map_err(|_| SqliteError::Serialization("Invalid chain ID".to_string()))?,
action: match row.get::<String, _>("action").as_str() {
"Accept" => MessageAction::Accept,
"Reject" => MessageAction::Reject,
_ => return Err(SqliteError::Serialization("Invalid action".to_string())),
},
source_height: BlockHeight(row.get::<i64, _>("source_height") as u64),
source_timestamp: Timestamp::from(row.get::<i64, _>("source_timestamp") as u64),
source_cert_hash: row
.get::<String, _>("source_cert_hash")
.parse()
.map_err(|_| SqliteError::Serialization("Invalid cert hash".to_string()))?,
transaction_index: row.get::<i64, _>("transaction_index") as u32,
};
bundles.push((block_hash, bundle_id, bundle_info));
}
Ok(bundles)
}
/// Get operations for a specific block
pub async fn get_operations_for_block(
&self,
block_hash: &CryptoHash,
) -> Result<Vec<(usize, Operation)>, SqliteError> {
let block_hash_str = block_hash.to_string();
let rows = sqlx::query(
r#"
SELECT operation_index, data
FROM operations
WHERE block_hash = ?1
ORDER BY operation_index ASC
"#,
)
.bind(&block_hash_str)
.fetch_all(&self.pool)
.await?;
let mut operations = Vec::new();
for row in rows {
let index = row.get::<i64, _>("operation_index") as usize;
let data: Vec<u8> = row.get("data");
let operation: Operation = bincode::deserialize(&data).map_err(|e| {
SqliteError::Serialization(format!("Failed to deserialize operation: {}", e))
})?;
operations.push((index, operation));
}
Ok(operations)
}
/// Get outgoing messages for a specific block
pub async fn get_outgoing_messages_for_block(
&self,
block_hash: &CryptoHash,
) -> Result<Vec<OutgoingMessage>, SqliteError> {
let block_hash_str = block_hash.to_string();
let rows = sqlx::query(
r#"
SELECT destination_chain_id, authenticated_owner, grant_amount, message_kind, data
FROM outgoing_messages
WHERE block_hash = ?1
ORDER BY transaction_index, message_index ASC
"#,
)
.bind(&block_hash_str)
.fetch_all(&self.pool)
.await?;
let mut messages = Vec::new();
for row in rows {
let destination_str: String = row.get("destination_chain_id");
let destination = destination_str
.parse()
.map_err(|_| SqliteError::Serialization("Invalid chain ID".to_string()))?;
let authenticated_owner_str: Option<String> = row.get("authenticated_owner");
let authenticated_owner = authenticated_owner_str.and_then(|s| s.parse().ok());
let grant_amount: String = row.get("grant_amount");
let grant = linera_base::data_types::Amount::from_str(grant_amount.as_str())
.map_err(|_| SqliteError::Serialization("Invalid grant amount".to_string()))?;
let kind_str: String = row.get("message_kind");
let kind = parse_message_kind(kind_str.as_str()).map_err(SqliteError::Serialization)?;
let message_bytes: Vec<u8> = row.get("data");
let message = Self::deserialize_message(message_bytes.as_slice())?;
messages.push(OutgoingMessage {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/grpc/tests.rs | linera-indexer/lib/src/grpc/tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use super::*;
use crate::{
db::tests::{MockFailingDatabase, MockSuccessDatabase},
indexer_api::{element::Payload, Element},
};
fn test_blob_element() -> Element {
let test_blob = Blob::new_data(b"test blob content".to_vec());
let blob_data = bincode::serialize(&test_blob).unwrap();
Element {
payload: Some(Payload::Blob(crate::indexer_api::Blob { bytes: blob_data })),
}
}
// Create a protobuf message that is not a valid ConfiredBlockCertificate instance.
fn invalid_block_element() -> Element {
Element {
payload: Some(Payload::Block(crate::indexer_api::Block {
bytes: b"fake_block_certificate_data".to_vec(),
})),
}
}
// Create a valid block element with minimal data.
fn valid_block_element() -> Element {
valid_block_element_with_chain_id("test_chain")
}
fn valid_block_element_with_chain_id(chain_suffix: &str) -> Element {
use std::collections::BTreeMap;
use linera_base::{
crypto::CryptoHash,
data_types::{BlockHeight, Epoch, Round, Timestamp},
identifiers::{ChainId, StreamId},
};
use linera_chain::{
block::{Block, ConfirmedBlock},
data_types::{BlockExecutionOutcome, ProposedBlock},
};
// Create a simple test ChainId with unique suffix
let chain_id = ChainId(CryptoHash::test_hash(chain_suffix));
// Create a minimal proposed block (genesis block)
let proposed_block = ProposedBlock {
epoch: Epoch::ZERO,
chain_id,
transactions: vec![],
previous_block_hash: None,
height: BlockHeight::ZERO,
authenticated_owner: None,
timestamp: Timestamp::default(),
};
// Create a minimal block execution outcome with proper BTreeMap types
let outcome = BlockExecutionOutcome {
messages: vec![],
state_hash: CryptoHash::default(),
oracle_responses: vec![],
events: vec![],
blobs: vec![],
operation_results: vec![],
previous_event_blocks: BTreeMap::<StreamId, (CryptoHash, BlockHeight)>::new(),
previous_message_blocks: BTreeMap::<ChainId, (CryptoHash, BlockHeight)>::new(),
};
let block = Block::new(proposed_block, outcome);
let confirmed_block = ConfirmedBlock::new(block);
let certificate = ConfirmedBlockCertificate::new(confirmed_block, Round::Fast, vec![]);
let block_data = bincode::serialize(&certificate).unwrap();
Element {
payload: Some(Payload::Block(crate::indexer_api::Block {
bytes: block_data,
})),
}
}
#[tokio::test]
async fn test_process_element_blob_success() {
let database = MockSuccessDatabase::new();
let mut pending_blobs = HashMap::new();
let element = test_blob_element();
let result = IndexerGrpcServer::process_element(&database, &mut pending_blobs, element).await;
// Processing blob returns `Ok(None)` (no ACK).
assert!(matches!(result, Ok(None)));
// Blob should be added to pending blobs
assert_eq!(pending_blobs.len(), 1);
}
#[tokio::test]
async fn test_process_element_invalid_block() {
let database = MockFailingDatabase::new();
let mut pending_blobs = HashMap::new();
match IndexerGrpcServer::process_element(&database, &mut pending_blobs, test_blob_element())
.await
{
Ok(None) => {}
_ => panic!("Expected Ok(None)"),
};
let element = invalid_block_element();
let result = IndexerGrpcServer::process_element(&database, &mut pending_blobs, element).await;
// Should return an error due to deserialization failure
assert!(result.is_err());
match result.unwrap_err() {
ProcessingError::BlockDeserialization(_) => {}
_ => panic!("Expected BlockDeserialization error"),
}
assert_eq!(
pending_blobs.len(),
1,
"Pending blobs should remain after block failure"
);
}
#[tokio::test]
async fn test_process_element_empty_payload() {
let database = MockFailingDatabase::new();
let mut pending_blobs = HashMap::new();
let element = Element { payload: None };
match IndexerGrpcServer::process_element(&database, &mut pending_blobs, element).await {
Err(ProcessingError::EmptyPayload) => {}
_ => panic!("Expected EmptyPayload error"),
}
}
#[tokio::test]
async fn test_process_element_invalid_blob() {
let database = MockFailingDatabase::new();
let mut pending_blobs = HashMap::new();
// Create element with invalid blob data
let element = Element {
payload: Some(Payload::Blob(crate::indexer_api::Blob {
bytes: vec![0x00, 0x01, 0x02], // Invalid blob data
})),
};
match IndexerGrpcServer::process_element(&database, &mut pending_blobs, element).await {
Err(ProcessingError::BlobDeserialization(_)) => {}
_ => panic!("Expected BlobDeserialization error"),
}
}
#[tokio::test]
async fn test_process_valid_block() {
use std::sync::Arc;
// Use MockSuccessDatabase to test successful paths and ACK behavior
let database = Arc::new(MockSuccessDatabase::new());
let mut pending_blobs = HashMap::new();
// First add a blob to pending_blobs
let blob_element = test_blob_element();
let blob_result =
IndexerGrpcServer::process_element(&*database, &mut pending_blobs, blob_element).await;
assert!(
matches!(blob_result, Ok(None)),
"Blobs should return Ok(None)"
);
assert_eq!(pending_blobs.len(), 1, "Pending blobs should have 1 blob");
let block_element = invalid_block_element();
let block_result =
IndexerGrpcServer::process_element(&*database, &mut pending_blobs, block_element).await;
assert!(
block_result.is_err(),
"Invalid block should return an error"
);
match block_result.unwrap_err() {
ProcessingError::BlockDeserialization(_) => {
// This should fail due to invalid block deserialization
}
_ => panic!("Expected BlockDeserialization error"),
}
assert_eq!(
pending_blobs.len(),
1,
"Pending blobs should remain after block failure"
);
// Valid block should produce ACK
let block_element = valid_block_element();
let block_result =
IndexerGrpcServer::process_element(&*database, &mut pending_blobs, block_element).await;
assert!(
matches!(block_result, Ok(Some(()))),
"Valid blocks should return Ok(Some(())) ACK"
);
assert!(
pending_blobs.is_empty(),
"Pending blobs should be cleared after block"
);
}
// === STREAM PROCESSING TESTS (Integration Tests) ===
#[tokio::test]
async fn test_process_stream_end_to_end_mixed_elements() {
use std::sync::Arc;
use futures::StreamExt;
use tokio_stream;
use tonic::{Code, Status};
let database = Arc::new(MockSuccessDatabase::new());
// Create test elements - same blob content will have same BlobId
let blob1 = test_blob_element();
let blob2 = test_blob_element();
let blob3 = test_blob_element();
let valid_block1 = valid_block_element_with_chain_id("test_chain_1");
let invalid_block = invalid_block_element();
let valid_block2 = valid_block_element_with_chain_id("test_chain_2");
// Create a mixed stream of elements: blobs and blocks
let elements = vec![
Ok(blob1), // Blob #1 - should not produce ACK
Ok(blob2), // Blob #2 - should not produce ACK
Ok(valid_block1), // Valid Block #1 - should produce ACK and store blobs 1&2
Ok(invalid_block), // Invalid Block - should produce ERROR, no storage
Ok(blob3), // Blob #3 - should not produce ACK
Ok(valid_block2), // Valid Block #2 - should produce ACK and store blob 3
];
// Verify initial state - no data stored
assert_eq!(database.blob_count(), 0, "Database should start empty");
assert_eq!(database.block_count(), 0, "Database should start empty");
// Create a BoxStream from the elements
let input_stream = tokio_stream::iter(elements).boxed();
// Call the process_stream method
let output_stream = IndexerGrpcServer::process_stream(database.clone(), input_stream);
// Collect all results from the output stream
let results: Vec<Result<(), Status>> = output_stream.collect().await;
// === VERIFY OUTPUT STREAM RESPONSES ===
// Verify we get exactly 3 responses:
// 1. ACK for first valid block (processes 2 blobs + block 1)
// 2. ERROR for invalid block (deserialization fails)
// 3. ACK for second valid block (processes 1 blob + block 2)
// (No responses for the individual blobs)
assert_eq!(results.len(), 3, "Expected exactly 3 responses from stream");
// Verify the first result is a successful ACK
assert!(
matches!(results[0], Ok(())),
"First valid block should produce successful ACK"
);
// Verify the second result is an error for invalid block
// Verify the error details
if let Err(status) = &results[1] {
assert_eq!(
status.code(),
Code::InvalidArgument,
"Invalid block should return InvalidArgument status"
);
assert!(
status.message().contains("Invalid block"),
"Error message should mention invalid block"
);
}
// Verify the third result is a successful ACK
assert!(
matches!(results[2], Ok(())),
"Second valid block should produce successful ACK"
);
// After processing the stream, we should have:
// - 1 blob stored (all blobs have same content/BlobId, so they overwrite each other)
// - 2 blocks stored (valid_block1 and valid_block2 with different chain IDs)
// - invalid_block should NOT be stored (deserialization failed)
assert_eq!(
database.blob_count(),
1,
"Should have 1 unique blob stored (all blobs have same content/BlobId)"
);
assert_eq!(
database.block_count(),
2,
"Should have 2 valid blocks stored (invalid block should not be stored)"
);
}
#[tokio::test]
async fn test_process_stream_database_failure() {
use std::sync::Arc;
use futures::StreamExt;
use tokio_stream;
use tonic::{Code, Status};
// Use MockFailingDatabase to simulate database failures during block storage
let database = Arc::new(MockFailingDatabase::new());
// Create test elements: blobs + valid block (which will fail at database level)
let blob1 = test_blob_element();
let blob2 = test_blob_element();
let blob3 = test_blob_element();
let valid_block = valid_block_element(); // This will fail when trying to store
// Create a stream: blobs followed by a valid block that will fail at DB level
let elements = vec![
Ok(blob1), // Blob #1 - should not produce ACK, stored in pending_blobs
Ok(blob2), // Blob #2 - should not produce ACK, stored in pending_blobs
Ok(blob3), // Blob #3 - should not produce ACK, stored in pending_blobs
Ok(valid_block), // Valid Block - should produce ERROR due to database failure
];
// Create a BoxStream from the elements
let input_stream = tokio_stream::iter(elements).boxed();
// Call the process_stream method
let output_stream = IndexerGrpcServer::process_stream(database.clone(), input_stream);
// Collect all results from the output stream
let results: Vec<Result<(), Status>> = output_stream.collect().await;
// === VERIFY OUTPUT STREAM RESPONSES ===
// Verify we get exactly 1 response: ERROR for the block (database failure)
// (No responses for the individual blobs, they just accumulate in pending_blobs)
assert_eq!(results.len(), 1, "Expected exactly 1 response from stream");
// Verify the result is an error due to database failure
assert!(results[0].is_err(), "Block should produce database error");
// Verify the error details
if let Err(status) = &results[0] {
assert_eq!(
status.code(),
Code::InvalidArgument,
"MockFailingDatabase returns SqliteError::Serialization which maps to InvalidArgument"
);
assert!(
status
.message()
.contains("Mock: Cannot create real transaction"),
"Error message should contain database failure details, got: {}",
status.message()
);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-indexer/lib/src/grpc/mod.rs | linera-indexer/lib/src/grpc/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! gRPC server implementation for the indexer.
#[cfg(test)]
mod tests;
use std::{collections::HashMap, pin::Pin, sync::Arc};
use async_trait::async_trait;
use futures::{stream::BoxStream, Stream, StreamExt};
use linera_base::{data_types::Blob, identifiers::BlobId};
use linera_chain::types::ConfirmedBlockCertificate;
use tonic::{transport::Server, Request, Response, Status, Streaming};
use tracing::{error, info, warn};
use crate::{
db::{sqlite::SqliteError, IndexerDatabase},
indexer_api::{
element::Payload,
indexer_server::{Indexer, IndexerServer},
Element,
},
};
/// Error type for processing elements in the indexer
#[derive(Debug, thiserror::Error)]
pub enum ProcessingError {
#[error("Failed to deserialize blob: {0}")]
BlobDeserialization(#[from] bincode::Error),
#[error("Failed to deserialize block: {0}")]
BlockDeserialization(String),
#[error("Failed to serialize blob: {0}")]
BlobSerialization(bincode::Error),
#[error("Failed to serialize block: {0}")]
BlockSerialization(bincode::Error),
#[error("Database error: {0}")]
DatabaseSqlite(#[from] SqliteError),
#[error("Database error: {0}")]
DatabasePostgres(#[from] crate::db::postgres::PostgresError),
#[error("Empty element payload")]
EmptyPayload,
}
pub struct IndexerGrpcServer<D> {
database: Arc<D>,
}
impl<D> IndexerGrpcServer<D> {
pub fn new(database: D) -> Self {
Self {
database: Arc::new(database),
}
}
}
impl<D: IndexerDatabase + 'static> IndexerGrpcServer<D>
where
D::Error: Into<ProcessingError>,
{
/// Start the gRPC indexer server
pub async fn serve(self, port: u16) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let addr = format!("0.0.0.0:{}", port).parse()?;
info!("Starting gRPC indexer server on {}", addr);
Server::builder()
.add_service(IndexerServer::new(self))
.serve(addr)
.await?;
Ok(())
}
/// Process the entire stream and return responses
fn process_stream(
database: Arc<D>,
stream: BoxStream<'static, Result<Element, Status>>,
) -> impl Stream<Item = Result<(), Status>>
where
D::Error: Into<ProcessingError>,
{
futures::stream::unfold(
(stream, database, HashMap::<BlobId, Vec<u8>>::new()),
|(mut input_stream, database, mut pending_blobs)| async move {
loop {
match input_stream.next().await {
Some(Ok(element)) => {
match Self::process_element(&database, &mut pending_blobs, element)
.await
{
Ok(Some(())) => {
// If processing was successful, return an ACK
info!("Processed element successfully");
return Some((Ok(()), (input_stream, database, pending_blobs)));
}
Err(error) => {
// If there was an error, return it
let status = Status::from(error);
error!("Error processing element: {status:?}");
return Some((
Err(status),
(input_stream, database, pending_blobs),
));
}
Ok(None) => {
// If processing was a blob, we just continue without returning a response
}
}
}
Some(Err(e)) => {
error!("Error receiving element: {}", e);
return Some((Err(e), (input_stream, database, pending_blobs)));
}
None => {
// Stream ended
return None;
}
}
}
},
)
}
/// Process a single element and return a response if needed.
/// This handles both blobs and blocks.
/// For blobs, it stores them in `pending_blobs` and returns `Ok(None)`.
/// For blocks, it processes them and returns `Ok(Some(()))` on success or `Err(ProcessingError)` on failure.
async fn process_element(
database: &D,
pending_blobs: &mut HashMap<BlobId, Vec<u8>>,
element: Element,
) -> Result<Option<()>, ProcessingError>
where
D::Error: Into<ProcessingError>,
{
match element.payload {
Some(Payload::Blob(proto_blob)) => {
// Convert protobuf blob to linera blob
let blob = Blob::try_from(proto_blob)?;
let blob_id = blob.id();
let blob_data =
bincode::serialize(&blob).map_err(ProcessingError::BlobSerialization)?;
info!("Received blob: {}", blob_id);
pending_blobs.insert(blob_id, blob_data);
Ok(None) // No response for blobs, just store them
}
Some(Payload::Block(proto_block)) => {
// Convert protobuf block to linera block first
let block_cert = ConfirmedBlockCertificate::try_from(proto_block)
.map_err(|e| ProcessingError::BlockDeserialization(e.to_string()))?;
// Extract block metadata
let block_hash = block_cert.hash();
let chain_id = block_cert.inner().chain_id();
let height = block_cert.inner().height();
let timestamp = block_cert.inner().timestamp();
info!(
"Received block: {} for chain: {} at height: {}",
block_hash, chain_id, height
);
// Serialize block BEFORE taking any database locks
let block_data =
bincode::serialize(&block_cert).map_err(ProcessingError::BlockSerialization)?;
// Convert pending blobs to the format expected by the high-level API
let blobs: Vec<(BlobId, Vec<u8>)> = pending_blobs
.iter()
.map(|(blob_id, blob_data)| (*blob_id, blob_data.clone()))
.collect();
// Use the high-level atomic API - this manages all locking internally
database
.store_block_with_blobs(
&block_hash,
&chain_id,
height,
timestamp,
&block_data,
&blobs,
)
.await
.map_err(Into::into)?;
info!(
"Successfully committed block {} with {} blobs",
block_hash,
pending_blobs.len()
);
pending_blobs.clear();
Ok(Some(()))
}
None => {
warn!("Received empty element");
Err(ProcessingError::EmptyPayload)
}
}
}
}
#[async_trait]
impl<D: IndexerDatabase + 'static> Indexer for IndexerGrpcServer<D>
where
D::Error: Into<ProcessingError>,
{
type IndexBatchStream = Pin<Box<dyn Stream<Item = Result<(), Status>> + Send + 'static>>;
async fn index_batch(
&self,
request: Request<Streaming<Element>>,
) -> Result<Response<Self::IndexBatchStream>, Status> {
let stream = request.into_inner();
let database = Arc::clone(&self.database);
let output_stream = Self::process_stream(database, stream.boxed());
Ok(Response::new(Box::pin(output_stream)))
}
}
impl From<SqliteError> for Status {
fn from(error: SqliteError) -> Self {
match error {
SqliteError::Database(e) => Status::internal(format!("Database error: {}", e)),
SqliteError::Serialization(e) => {
Status::invalid_argument(format!("Serialization error: {}", e))
}
SqliteError::BlockNotFound(hash) => {
Status::not_found(format!("Block not found: {}", hash))
}
SqliteError::BlobNotFound(hash) => {
Status::not_found(format!("Blob not found: {}", hash))
}
}
}
}
impl From<crate::db::postgres::PostgresError> for Status {
fn from(error: crate::db::postgres::PostgresError) -> Self {
use crate::db::postgres::PostgresError;
match error {
PostgresError::Database(e) => Status::internal(format!("Database error: {}", e)),
PostgresError::Serialization(e) => {
Status::invalid_argument(format!("Serialization error: {}", e))
}
PostgresError::BlockNotFound(hash) => {
Status::not_found(format!("Block not found: {}", hash))
}
PostgresError::BlobNotFound(hash) => {
Status::not_found(format!("Blob not found: {}", hash))
}
}
}
}
impl From<ProcessingError> for Status {
fn from(error: ProcessingError) -> Self {
match error {
ProcessingError::BlobDeserialization(e) => {
Status::invalid_argument(format!("Invalid blob: {}", e))
}
ProcessingError::BlockDeserialization(e) => {
Status::invalid_argument(format!("Invalid block: {}", e))
}
ProcessingError::BlobSerialization(e) => {
Status::internal(format!("Failed to serialize blob: {}", e))
}
ProcessingError::BlockSerialization(e) => {
Status::internal(format!("Failed to serialize block: {}", e))
}
ProcessingError::DatabaseSqlite(e) => e.into(),
ProcessingError::DatabasePostgres(e) => e.into(),
ProcessingError::EmptyPayload => Status::invalid_argument("Empty element"),
}
}
}
/// Type conversions between protobuf and linera types
impl TryFrom<crate::indexer_api::Block> for ConfirmedBlockCertificate {
type Error = bincode::Error;
fn try_from(value: crate::indexer_api::Block) -> Result<Self, Self::Error> {
bincode::deserialize(&value.bytes)
}
}
impl TryFrom<crate::indexer_api::Blob> for Blob {
type Error = bincode::Error;
fn try_from(value: crate::indexer_api::Blob) -> Result<Self, Self::Error> {
bincode::deserialize(&value.bytes)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/crowd-funding/src/contract.rs | examples/crowd-funding/src/contract.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use crowd_funding::{CrowdFundingAbi, InstantiationArgument, Message, Operation};
use fungible::FungibleTokenAbi;
use linera_sdk::{
abis::fungible::FungibleOperation,
linera_base_types::{Account, AccountOwner, Amount, ApplicationId, WithContractAbi},
views::{RootView, View},
Contract, ContractRuntime,
};
use state::{CrowdFundingState, Status};
pub struct CrowdFundingContract {
state: CrowdFundingState,
runtime: ContractRuntime<Self>,
}
linera_sdk::contract!(CrowdFundingContract);
impl WithContractAbi for CrowdFundingContract {
type Abi = CrowdFundingAbi;
}
impl Contract for CrowdFundingContract {
type Message = Message;
type InstantiationArgument = InstantiationArgument;
type Parameters = ApplicationId<fungible::FungibleTokenAbi>;
type EventValue = ();
async fn load(runtime: ContractRuntime<Self>) -> Self {
let state = CrowdFundingState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
CrowdFundingContract { state, runtime }
}
async fn instantiate(&mut self, argument: InstantiationArgument) {
// Validate that the application parameters were configured correctly.
let _ = self.runtime.application_parameters();
self.state.instantiation_argument.set(Some(argument));
let deadline = self.instantiation_argument().deadline;
assert!(
deadline > self.runtime.system_time(),
"Crowd-funding campaign cannot start after its deadline"
);
}
async fn execute_operation(&mut self, operation: Operation) -> Self::Response {
match operation {
Operation::Pledge { owner, amount } => {
if self.runtime.chain_id() == self.runtime.application_creator_chain_id() {
self.execute_pledge_with_account(owner, amount).await;
} else {
self.execute_pledge_with_transfer(owner, amount);
}
}
Operation::Collect => self.collect_pledges(),
Operation::Cancel => self.cancel_campaign().await,
}
}
async fn execute_message(&mut self, message: Message) {
match message {
Message::PledgeWithAccount { owner, amount } => {
assert_eq!(
self.runtime.chain_id(),
self.runtime.application_creator_chain_id(),
"Action can only be executed on the chain that created the crowd-funding \
campaign"
);
self.execute_pledge_with_account(owner, amount).await;
}
}
}
async fn store(mut self) {
self.state.save().await.expect("Failed to save state");
}
}
impl CrowdFundingContract {
fn fungible_id(&mut self) -> ApplicationId<FungibleTokenAbi> {
// TODO(#723): We should be able to pull the fungible ID from the
// `required_application_ids` of the application description.
self.runtime.application_parameters()
}
/// Adds a pledge from a local account to the remote campaign chain.
fn execute_pledge_with_transfer(&mut self, owner: AccountOwner, amount: Amount) {
assert!(amount > Amount::ZERO, "Pledge is empty");
// The campaign chain.
let chain_id = self.runtime.application_creator_chain_id();
// First, move the funds to the campaign chain (under the same owner).
// TODO(#589): Simplify this when the messaging system guarantees atomic delivery
// of all messages created in the same operation/message.
let target_account = Account { chain_id, owner };
let call = FungibleOperation::Transfer {
owner,
amount,
target_account,
};
let fungible_id = self.fungible_id();
self.runtime
.call_application(/* authenticated by owner */ true, fungible_id, &call);
// Second, schedule the attribution of the funds to the (remote) campaign.
self.runtime
.prepare_message(Message::PledgeWithAccount { owner, amount })
.with_authentication()
.send_to(chain_id);
}
/// Adds a pledge from a local account to the campaign chain.
async fn execute_pledge_with_account(&mut self, owner: AccountOwner, amount: Amount) {
assert!(amount > Amount::ZERO, "Pledge is empty");
self.receive_from_account(owner, amount);
self.finish_pledge(owner, amount).await
}
/// Marks a pledge in the application state, so that it can be returned if the campaign is
/// cancelled.
async fn finish_pledge(&mut self, source: AccountOwner, amount: Amount) {
match self.state.status.get() {
Status::Active => self
.state
.pledges
.get_mut_or_default(&source)
.await
.expect("view access should not fail")
.saturating_add_assign(amount),
Status::Complete => self.send_to(amount, self.instantiation_argument().owner),
Status::Cancelled => panic!("Crowd-funding campaign has been cancelled"),
}
}
/// Collects all pledges and completes the campaign if the target has been reached.
fn collect_pledges(&mut self) {
let total = self.balance();
match self.state.status.get() {
Status::Active => {
assert!(
total >= self.instantiation_argument().target,
"Crowd-funding campaign has not reached its target yet"
);
}
Status::Complete => (),
Status::Cancelled => panic!("Crowd-funding campaign has been cancelled"),
}
self.send_to(total, self.instantiation_argument().owner);
self.state.pledges.clear();
self.state.status.set(Status::Complete);
}
/// Cancels the campaign if the deadline has passed, refunding all pledges.
async fn cancel_campaign(&mut self) {
assert!(
!self.state.status.get().is_complete(),
"Crowd-funding campaign has already been completed"
);
// TODO(#728): Remove this.
#[cfg(not(test))]
assert!(
self.runtime.system_time() >= self.instantiation_argument().deadline,
"Crowd-funding campaign has not reached its deadline yet"
);
let mut pledges = Vec::new();
self.state
.pledges
.for_each_index_value(|pledger, amount| {
let amount = amount.into_owned();
pledges.push((pledger, amount));
Ok(())
})
.await
.expect("view iteration should not fail");
for (pledger, amount) in pledges {
self.send_to(amount, pledger);
}
let balance = self.balance();
self.send_to(balance, self.instantiation_argument().owner);
self.state.status.set(Status::Cancelled);
}
/// Queries the token application to determine the total amount of tokens in custody.
fn balance(&mut self) -> Amount {
let owner = self.runtime.application_id().into();
let fungible_id = self.fungible_id();
let response =
self.runtime
.call_application(true, fungible_id, &FungibleOperation::Balance { owner });
match response {
fungible::FungibleResponse::Balance(balance) => balance,
response => panic!("Unexpected response from fungible token application: {response:?}"),
}
}
/// Transfers `amount` tokens from the funds in custody to the `owner`'s account.
fn send_to(&mut self, amount: Amount, owner: AccountOwner) {
let target_account = Account {
chain_id: self.runtime.chain_id(),
owner,
};
let transfer = FungibleOperation::Transfer {
owner: self.runtime.application_id().into(),
amount,
target_account,
};
let fungible_id = self.fungible_id();
self.runtime.call_application(true, fungible_id, &transfer);
}
/// Calls into the Fungible Token application to receive tokens from the given account.
fn receive_from_account(&mut self, owner: AccountOwner, amount: Amount) {
let target_account = Account {
chain_id: self.runtime.chain_id(),
owner: self.runtime.application_id().into(),
};
let transfer = FungibleOperation::Transfer {
owner,
amount,
target_account,
};
let fungible_id = self.fungible_id();
self.runtime.call_application(true, fungible_id, &transfer);
}
pub fn instantiation_argument(&self) -> &InstantiationArgument {
self.state
.instantiation_argument
.get()
.as_ref()
.expect("Application is not running on the host chain or was not instantiated yet")
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/crowd-funding/src/lib.rs | examples/crowd-funding/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*! ABI of the Crowd-funding Example Application */
use async_graphql::{Request, Response, SimpleObject};
use linera_sdk::{
graphql::GraphQLMutationRoot,
linera_base_types::{AccountOwner, Amount, ContractAbi, ServiceAbi, Timestamp},
};
use serde::{Deserialize, Serialize};
pub struct CrowdFundingAbi;
impl ContractAbi for CrowdFundingAbi {
type Operation = Operation;
type Response = ();
}
impl ServiceAbi for CrowdFundingAbi {
type Query = Request;
type QueryResponse = Response;
}
/// The instantiation data required to create a crowd-funding campaign.
#[derive(Clone, Copy, Debug, Deserialize, Serialize, SimpleObject)]
pub struct InstantiationArgument {
/// The receiver of the pledges of a successful campaign.
pub owner: AccountOwner,
/// The deadline of the campaign, after which it can be cancelled if it hasn't met its target.
pub deadline: Timestamp,
/// The funding target of the campaign.
pub target: Amount,
}
impl std::fmt::Display for InstantiationArgument {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"{}",
serde_json::to_string(self).expect("Serialization failed")
)
}
}
/// Operations that can be executed by the application.
#[derive(Debug, Deserialize, Serialize, GraphQLMutationRoot)]
pub enum Operation {
/// Pledge some tokens to the campaign (from an account on the current chain to the campaign chain).
Pledge { owner: AccountOwner, amount: Amount },
/// Collect the pledges after the campaign has reached its target (campaign chain only).
Collect,
/// Cancel the campaign and refund all pledges after the campaign has reached its deadline (campaign chain only).
Cancel,
}
/// Messages that can be exchanged across chains from the same application instance.
#[derive(Debug, Deserialize, Serialize)]
pub enum Message {
/// Pledge some tokens to the campaign (from an account on the receiver chain).
PledgeWithAccount { owner: AccountOwner, amount: Amount },
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/crowd-funding/src/state.rs | examples/crowd-funding/src/state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use async_graphql::scalar;
use crowd_funding::InstantiationArgument;
use linera_sdk::{
linera_base_types::{AccountOwner, Amount},
views::{linera_views, MapView, RegisterView, RootView, ViewStorageContext},
};
use serde::{Deserialize, Serialize};
/// The status of a crowd-funding campaign.
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)]
pub enum Status {
/// The campaign is active and can receive pledges.
#[default]
Active,
/// The campaign has ended successfully and can still receive additional pledges.
Complete,
/// The campaign was cancelled, all pledges have been returned and no more pledges can be made.
Cancelled,
}
scalar!(Status);
/// The crowd-funding campaign's state.
#[derive(RootView, async_graphql::SimpleObject)]
#[view(context = ViewStorageContext)]
pub struct CrowdFundingState {
/// The status of the campaign.
pub status: RegisterView<Status>,
/// The map of pledges that will be collected if the campaign succeeds.
pub pledges: MapView<AccountOwner, Amount>,
/// The instantiation data that determines the details of the campaign.
pub instantiation_argument: RegisterView<Option<InstantiationArgument>>,
}
#[allow(dead_code)]
impl Status {
/// Returns `true` if the campaign status is [`Status::Complete`].
pub fn is_complete(&self) -> bool {
matches!(self, Status::Complete)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/crowd-funding/src/service.rs | examples/crowd-funding/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use std::sync::Arc;
use async_graphql::{EmptySubscription, Request, Response, Schema};
use crowd_funding::Operation;
use linera_sdk::{
graphql::GraphQLMutationRoot as _,
linera_base_types::{ApplicationId, WithServiceAbi},
views::View,
Service, ServiceRuntime,
};
use state::CrowdFundingState;
pub struct CrowdFundingService {
state: Arc<CrowdFundingState>,
runtime: Arc<ServiceRuntime<Self>>,
}
linera_sdk::service!(CrowdFundingService);
impl WithServiceAbi for CrowdFundingService {
type Abi = crowd_funding::CrowdFundingAbi;
}
impl Service for CrowdFundingService {
type Parameters = ApplicationId<fungible::FungibleTokenAbi>;
async fn new(runtime: ServiceRuntime<Self>) -> Self {
let state = CrowdFundingState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
CrowdFundingService {
state: Arc::new(state),
runtime: Arc::new(runtime),
}
}
async fn handle_query(&self, request: Request) -> Response {
let schema = Schema::build(
self.state.clone(),
Operation::mutation_root(self.runtime.clone()),
EmptySubscription,
)
.finish();
schema.execute(request).await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/crowd-funding/tests/campaign_lifecycle.rs | examples/crowd-funding/tests/campaign_lifecycle.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Integration tests for the Fungible Token application.
#![cfg(not(target_arch = "wasm32"))]
use std::iter;
use crowd_funding::{CrowdFundingAbi, InstantiationArgument, Operation};
use fungible::FungibleTokenAbi;
use linera_sdk::{
linera_base_types::{
AccountOwner, AccountSecretKey, Amount, ApplicationId, Ed25519SecretKey,
Secp256k1SecretKey, Timestamp,
},
test::TestValidator,
};
/// Test creating a campaign and collecting pledges.
///
/// Creates a campaign on a `campaign_chain` and sets up the fungible token to use with three
/// backer chains. Pledges part of each backer's balance to the campaign and then completes it,
/// collecting the pledges. The final balance of each backer and the campaign owner is checked.
#[tokio::test(flavor = "multi_thread")]
async fn collect_pledges() {
let initial_amount = Amount::from_tokens(100);
let target_amount = Amount::from_tokens(220);
let pledge_amount = Amount::from_tokens(75);
let (validator, module_id) = TestValidator::with_current_module::<
CrowdFundingAbi,
ApplicationId<FungibleTokenAbi>,
InstantiationArgument,
>()
.await;
let fungible_chain_owner = AccountSecretKey::Ed25519(Ed25519SecretKey::generate());
let fungible_publisher_chain = validator.new_chain_with_keypair(fungible_chain_owner).await;
let campaign_chain_owner = AccountSecretKey::Secp256k1(Secp256k1SecretKey::generate());
let mut campaign_chain = validator.new_chain_with_keypair(campaign_chain_owner).await;
let campaign_account = AccountOwner::from(campaign_chain.public_key());
let fungible_module_id = fungible_publisher_chain
.publish_bytecode_files_in("../fungible")
.await;
let (token_id, backers) = fungible::create_with_accounts(
&validator,
fungible_module_id,
iter::repeat_n(initial_amount, 3),
)
.await;
let campaign_state = InstantiationArgument {
owner: campaign_account,
deadline: Timestamp::from(u64::MAX),
target: target_amount,
};
let campaign_id = campaign_chain
.create_application(
module_id,
token_id,
campaign_state,
vec![token_id.forget_abi()],
)
.await;
let mut pledges_and_transfers = Vec::new();
for (backer_chain, backer_account, _balance) in &backers {
let pledge_certificate = backer_chain
.add_block(|block| {
block.with_operation(
campaign_id,
Operation::Pledge {
owner: *backer_account,
amount: pledge_amount,
},
);
})
.await;
assert_eq!(pledge_certificate.outgoing_message_count(), 2);
pledges_and_transfers.push(pledge_certificate);
}
campaign_chain
.add_block(|block| {
for certificate in &pledges_and_transfers {
block.with_messages_from(certificate);
}
})
.await;
assert_eq!(
fungible::query_account(token_id, &campaign_chain, campaign_account).await,
None
);
campaign_chain
.add_block(|block| {
block.with_operation(campaign_id, Operation::Collect);
})
.await;
assert_eq!(
fungible::query_account(token_id, &campaign_chain, campaign_account).await,
Some(pledge_amount.saturating_mul(backers.len() as u128)),
);
for (backer_chain, backer_account, initial_amount) in backers {
assert_eq!(
fungible::query_account(token_id, &backer_chain, backer_account).await,
Some(initial_amount.saturating_sub(pledge_amount)),
);
assert_eq!(
fungible::query_account(token_id, &campaign_chain, backer_account).await,
None,
);
}
}
/// Test creating a campaign and cancelling it.
///
/// Creates a campaign on a `campaign_chain` and sets up the fungible token to use with three
/// backer chains. Pledges part of each backer's balance to the campaign and then completes it,
/// collecting the pledges. The final balance of each backer and the campaign owner is checked.
#[tokio::test(flavor = "multi_thread")]
async fn cancel_successful_campaign() {
let initial_amount = Amount::from_tokens(100);
let target_amount = Amount::from_tokens(220);
let pledge_amount = Amount::from_tokens(75);
let (validator, module_id) = TestValidator::with_current_module::<
CrowdFundingAbi,
ApplicationId<FungibleTokenAbi>,
InstantiationArgument,
>()
.await;
let fungible_publisher_chain = validator.new_chain().await;
let mut campaign_chain = validator.new_chain().await;
let campaign_account = AccountOwner::from(campaign_chain.public_key());
let fungible_module_id = fungible_publisher_chain
.publish_bytecode_files_in("../fungible")
.await;
let (token_id, backers) = fungible::create_with_accounts(
&validator,
fungible_module_id,
iter::repeat_n(initial_amount, 3),
)
.await;
let campaign_state = InstantiationArgument {
owner: campaign_account,
deadline: Timestamp::from(10),
target: target_amount,
};
let campaign_id = campaign_chain
.create_application(
module_id,
token_id,
campaign_state,
vec![token_id.forget_abi()],
)
.await;
let mut pledges_and_transfers = Vec::new();
for (backer_chain, backer_account, _balance) in &backers {
let pledge_certificate = backer_chain
.add_block(|block| {
block.with_operation(
campaign_id,
Operation::Pledge {
owner: *backer_account,
amount: pledge_amount,
},
);
})
.await;
assert_eq!(pledge_certificate.outgoing_message_count(), 2);
pledges_and_transfers.push(pledge_certificate);
}
campaign_chain
.add_block(|block| {
for certificate in &pledges_and_transfers {
block.with_messages_from(certificate);
}
})
.await;
assert_eq!(
fungible::query_account(token_id, &campaign_chain, campaign_account).await,
None
);
campaign_chain
.add_block(|block| {
block
.with_timestamp(Timestamp::from(20))
.with_operation(campaign_id, Operation::Cancel);
})
.await;
assert_eq!(
fungible::query_account(token_id, &campaign_chain, campaign_account).await,
None,
);
for (backer_chain, backer_account, initial_amount) in backers {
assert_eq!(
fungible::query_account(token_id, &backer_chain, backer_account).await,
Some(initial_amount.saturating_sub(pledge_amount)),
);
assert_eq!(
fungible::query_account(token_id, &campaign_chain, backer_account).await,
Some(pledge_amount),
);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/controller/src/contract.rs | examples/controller/src/contract.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use std::collections::HashSet;
use async_graphql::ComplexObject;
use linera_sdk::{
abis::controller::{
ControllerAbi, ControllerCommand, ManagedServiceId, Operation, Worker, WorkerCommand,
},
graphql::GraphQLMutationRoot,
linera_base_types::{AccountOwner, ChainId, WithContractAbi},
views::{RootView, View},
Contract, ContractRuntime,
};
use serde::{Deserialize, Serialize};
use self::state::ControllerState;
pub struct ControllerContract {
state: ControllerState,
runtime: ContractRuntime<Self>,
}
linera_sdk::contract!(ControllerContract);
impl WithContractAbi for ControllerContract {
type Abi = ControllerAbi;
}
#[derive(Clone, Debug, Deserialize, Serialize, GraphQLMutationRoot)]
pub enum Message {
// -- Message to the controller chain --
ExecuteWorkerCommand {
owner: AccountOwner,
command: WorkerCommand,
},
ExecuteControllerCommand {
admin: AccountOwner,
command: ControllerCommand,
},
// -- Messages sent to the workers from the controller chain --
Reset,
Start {
service_id: ManagedServiceId,
},
Stop {
service_id: ManagedServiceId,
},
FollowChain {
chain_id: ChainId,
},
ForgetChain {
chain_id: ChainId,
},
}
impl Contract for ControllerContract {
type Message = Message;
type Parameters = ();
type InstantiationArgument = ();
type EventValue = ();
async fn load(runtime: ContractRuntime<Self>) -> Self {
let state = ControllerState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
ControllerContract { state, runtime }
}
async fn instantiate(&mut self, _argument: Self::InstantiationArgument) {
// validate that the application parameters were configured correctly.
self.runtime.application_parameters();
}
async fn execute_operation(&mut self, operation: Self::Operation) -> Self::Response {
match operation {
Operation::ExecuteWorkerCommand { owner, command } => {
self.runtime
.check_account_permission(owner)
.expect("Failed to authenticate owner for ExecuteWorkerCommand operation");
self.prepare_worker_command_locally(owner, &command).await;
let creator_chain_id = self.runtime.application_creator_chain_id();
if self.runtime.chain_id() == creator_chain_id {
self.execute_worker_command_locally(owner, command, creator_chain_id)
.await;
} else {
self.runtime
.prepare_message(Message::ExecuteWorkerCommand { owner, command })
.send_to(creator_chain_id);
}
}
Operation::ExecuteControllerCommand { admin, command } => {
self.runtime
.check_account_permission(admin)
.expect("Failed to authenticate admin for ExecuteControllerCommand operation");
let creator_chain_id = self.runtime.application_creator_chain_id();
if self.runtime.chain_id() == creator_chain_id {
self.execute_controller_command_locally(admin, command)
.await;
} else {
self.runtime
.prepare_message(Message::ExecuteControllerCommand { admin, command })
.send_to(creator_chain_id);
}
}
}
}
async fn execute_message(&mut self, message: Self::Message) {
match message {
Message::ExecuteWorkerCommand { owner, command } => {
assert_eq!(
self.runtime.chain_id(),
self.runtime.application_creator_chain_id(),
"ExecuteAdminCommand can only be executed on the chain that created the PM engine"
);
let origin_chain_id = self.runtime.message_origin_chain_id().expect(
"Incoming message origin chain ID has to be available when executing a message",
);
self.execute_worker_command_locally(owner, command, origin_chain_id)
.await;
}
Message::ExecuteControllerCommand { admin, command } => {
assert_eq!(
self.runtime.chain_id(),
self.runtime.application_creator_chain_id(),
"ExecuteAdminCommand can only be executed on the chain that created the PM engine"
);
self.execute_controller_command_locally(admin, command)
.await;
}
Message::Reset => {
self.state.local_worker.set(None);
self.state.local_services.clear();
}
Message::Start { service_id } => {
self.state
.local_services
.insert(&service_id)
.expect("storage");
}
Message::Stop { service_id } => {
self.state
.local_services
.remove(&service_id)
.expect("storage");
}
Message::FollowChain { chain_id } => {
self.state.local_chains.insert(&chain_id).expect("storage");
}
Message::ForgetChain { chain_id } => {
self.state.local_chains.remove(&chain_id).expect("storage");
}
}
}
async fn store(mut self) {
self.state.save().await.expect("Failed to save state");
}
}
impl ControllerContract {
async fn prepare_worker_command_locally(
&mut self,
owner: AccountOwner,
command: &WorkerCommand,
) {
match command {
WorkerCommand::RegisterWorker { capabilities } => {
assert!(
self.state.local_worker.get().is_none(),
"Cannot register worker twice"
);
let worker = Worker {
owner,
capabilities: capabilities.clone(),
};
self.state.local_worker.set(Some(worker));
}
WorkerCommand::DeregisterWorker => {
assert!(
self.state.local_worker.get().is_some(),
"Cannot unregister worker that is not registered"
);
self.state.local_worker.set(None);
}
}
}
async fn execute_worker_command_locally(
&mut self,
owner: AccountOwner,
command: WorkerCommand,
origin_chain_id: ChainId,
) {
match command {
WorkerCommand::RegisterWorker { capabilities } => {
let worker = Worker {
owner,
capabilities,
};
self.state
.workers
.insert(&origin_chain_id, worker)
.expect("storage");
}
WorkerCommand::DeregisterWorker => {
self.state
.workers
.remove(&origin_chain_id)
.expect("storage");
}
}
}
async fn execute_controller_command_locally(
&mut self,
admin: AccountOwner,
command: ControllerCommand,
) {
let Some(admins) = self.state.admins.get() else {
// No admin list was set yet. Hence, everyone is an owner. However, messages
// are disallowed.
assert!(
self.runtime.message_origin_chain_id().is_none(),
"Refusing to execute remote control command",
);
return;
};
assert!(
admins.contains(&admin),
"Controller command can only be executed by an authorized admin account. Got {admin}"
);
match command {
ControllerCommand::SetAdmins { admins } => {
self.state
.admins
.set(admins.map(|v| v.into_iter().collect()));
}
ControllerCommand::RemoveWorker { worker_id } => {
self.state.workers.remove(&worker_id).expect("storage");
self.runtime
.prepare_message(Message::Reset)
.send_to(worker_id);
let services_ids = self.state.services.indices().await.expect("storage");
for id in services_ids {
let mut workers = self
.state
.services
.get(&id)
.await
.expect("storage")
.expect("value should be present");
if workers.remove(&worker_id) {
self.update_service(id, workers).await;
}
}
}
ControllerCommand::UpdateService {
service_id,
workers,
} => {
self.update_service(service_id, workers.into_iter().collect())
.await;
}
ControllerCommand::RemoveService { service_id } => {
self.update_service(service_id, HashSet::new()).await;
}
ControllerCommand::UpdateAllServices { services } => {
let mut previous_ids = self
.state
.services
.indices()
.await
.expect("storage")
.into_iter()
.collect::<HashSet<_>>();
for (id, workers) in services {
previous_ids.remove(&id);
self.update_service(id, workers.into_iter().collect()).await;
}
for id in previous_ids {
self.update_service(id, HashSet::new()).await;
}
}
}
}
async fn update_service(
&mut self,
service_id: ManagedServiceId,
new_workers: HashSet<ChainId>,
) {
let existing_workers = self
.state
.services
.get(&service_id)
.await
.expect("storage")
.unwrap_or_default();
let message = Message::Start { service_id };
for worker in &new_workers {
if !existing_workers.contains(worker) {
self.runtime
.prepare_message(message.clone())
.send_to(*worker);
}
}
let message = Message::Stop { service_id };
for worker in &existing_workers {
if !new_workers.contains(worker) {
self.runtime
.prepare_message(message.clone())
.send_to(*worker);
}
}
if new_workers.is_empty() {
self.state.services.remove(&service_id).expect("storage");
} else {
self.state
.services
.insert(&service_id, new_workers)
.expect("storage");
}
}
}
/// This implementation is only nonempty in the service.
#[ComplexObject]
impl ControllerState {}
#[cfg(test)]
mod tests {
use futures::FutureExt as _;
use linera_sdk::{util::BlockingWait, views::View, Contract, ContractRuntime};
use super::{ControllerContract, ControllerState};
#[test]
fn instantiate() {
let _app = create_and_instantiate_app();
// Basic instantiation test
}
fn create_and_instantiate_app() -> ControllerContract {
let runtime = ContractRuntime::new().with_application_parameters(());
let mut contract = ControllerContract {
state: ControllerState::load(runtime.root_view_storage_context())
.blocking_wait()
.expect("Failed to read from mock key value store"),
runtime,
};
contract
.instantiate(())
.now_or_never()
.expect("Initialization of application state should not await anything");
contract
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/controller/src/state.rs | examples/controller/src/state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashSet;
use linera_sdk::{
abis::controller::{ManagedServiceId, Worker},
linera_base_types::{AccountOwner, ChainId},
views::{linera_views, MapView, RegisterView, RootView, SetView, ViewStorageContext},
};
/// The state of the service controller application.
#[derive(RootView, async_graphql::SimpleObject)]
#[graphql(complex)]
#[view(context = ViewStorageContext)]
pub struct ControllerState {
// -- Worker chain only --
/// The description of this worker as we registered it.
pub local_worker: RegisterView<Option<Worker>>,
/// The services currently running locally.
pub local_services: SetView<ManagedServiceId>,
/// The chains currently followed locally (besides ours and the active service
/// chains).
pub local_chains: SetView<ChainId>,
// -- Controller chain only --
/// The admin account owners (user or application) allowed to update services.
pub admins: RegisterView<Option<HashSet<AccountOwner>>>,
/// All the workers declared in the network.
pub workers: MapView<ChainId, Worker>,
/// All the services currently defined and where they run. If services run on several
/// workers, the chain is configured so that workers can collaborate to produce blocks
/// (e.g. only one worker is actively producing blocks while the other waits as a
/// backup).
// NOTE: Currently, services should run on a single worker at a time.
pub services: MapView<ManagedServiceId, HashSet<ChainId>>,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/controller/src/service.rs | examples/controller/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use std::sync::Arc;
use async_graphql::{ComplexObject, Context, EmptySubscription, Schema};
use linera_sdk::{
abis::controller::{ControllerAbi, LocalWorkerState, ManagedService, Operation},
graphql::GraphQLMutationRoot,
linera_base_types::{DataBlobHash, WithServiceAbi},
views::View,
Service, ServiceRuntime,
};
use self::state::ControllerState;
pub struct ControllerService {
state: Arc<ControllerState>,
runtime: Arc<ServiceRuntime<Self>>,
}
linera_sdk::service!(ControllerService);
impl WithServiceAbi for ControllerService {
type Abi = ControllerAbi;
}
impl Service for ControllerService {
type Parameters = ();
async fn new(runtime: ServiceRuntime<Self>) -> Self {
let state = ControllerState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
ControllerService {
state: Arc::new(state),
runtime: Arc::new(runtime),
}
}
async fn handle_query(&self, query: Self::Query) -> Self::QueryResponse {
Schema::build(
self.state.clone(),
Operation::mutation_root(self.runtime.clone()),
EmptySubscription,
)
.data(self.runtime.clone())
.finish()
.execute(query)
.await
}
}
#[ComplexObject]
impl ControllerState {
/// Retrieve information on a given service.
async fn read_service(
&self,
ctx: &Context<'_>,
service_id: DataBlobHash,
) -> Option<ManagedService> {
let runtime = ctx
.data::<Arc<ServiceRuntime<ControllerService>>>()
.unwrap();
let bytes = runtime.read_data_blob(service_id);
bcs::from_bytes(&bytes).ok()
}
/// Retrieve the local worker state.
async fn local_worker_state(&self, ctx: &Context<'_>) -> LocalWorkerState {
let runtime = ctx
.data::<Arc<ServiceRuntime<ControllerService>>>()
.unwrap();
let local_worker = self.local_worker.get().clone();
let local_service_ids = self
.local_services
.indices()
.await
.expect("storage")
.into_iter()
.collect::<Vec<_>>();
let local_services = local_service_ids
.into_iter()
.map(|id| runtime.read_data_blob(id))
.map(|bytes| bcs::from_bytes(&bytes).ok())
.collect::<Option<_>>()
.expect("Service IDs should be valid data blobs");
let local_chains = self
.local_chains
.indices()
.await
.expect("storage")
.into_iter()
.collect();
LocalWorkerState {
local_worker,
local_services,
local_chains,
}
}
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use linera_sdk::{
abis::controller::{LocalWorkerState, ManagedService, Worker},
linera_base_types::{
AccountOwner, ApplicationId, BlobContent, ChainId, CryptoHash, DataBlobHash,
},
util::BlockingWait,
views::View,
Service, ServiceRuntime,
};
use serde_json::json;
use super::{ControllerService, ControllerState};
fn create_service() -> ControllerService {
let runtime = ServiceRuntime::<ControllerService>::new();
let state = ControllerState::load(runtime.root_view_storage_context())
.blocking_wait()
.expect("Failed to read from mock key value store");
ControllerService {
state: Arc::new(state),
runtime: Arc::new(runtime),
}
}
#[test]
fn query_local_worker_state_empty() {
let service = create_service();
let response = service
.handle_query(async_graphql::Request::new("{ localWorkerState }"))
.blocking_wait()
.data
.into_json()
.expect("Response should be JSON");
let state: LocalWorkerState = serde_json::from_value(response["localWorkerState"].clone())
.expect("Should deserialize LocalWorkerState");
assert!(state.local_worker.is_none());
assert!(state.local_services.is_empty());
assert!(state.local_chains.is_empty());
}
#[test]
fn query_read_service() {
let service = create_service();
// Create a test ManagedService and store it as a data blob.
let managed_service = ManagedService {
application_id: ApplicationId::default(),
name: "test-service".to_string(),
chain_id: ChainId::default(),
requirements: vec!["API_KEY".to_string()],
};
let service_bytes = bcs::to_bytes(&managed_service).expect("Failed to serialize service");
let service_id = DataBlobHash(CryptoHash::new(&BlobContent::new_data(
service_bytes.clone(),
)));
service.runtime.set_blob(service_id, service_bytes);
// Query the service.
let response = service
.handle_query(async_graphql::Request::new(format!(
r#"{{ readService(serviceId: "{}") }}"#,
service_id.0
)))
.blocking_wait()
.data
.into_json()
.expect("Response should be JSON");
let result: ManagedService = serde_json::from_value(response["readService"].clone())
.expect("Should deserialize ManagedService");
assert_eq!(result.name, "test-service");
assert_eq!(result.chain_id, ChainId::default());
assert_eq!(result.requirements, vec!["API_KEY"]);
}
#[test]
fn query_read_service_empty_blob() {
let service = create_service();
// Create a service ID pointing to an empty blob.
let empty_bytes = vec![];
let blob_id = DataBlobHash(CryptoHash::new(&BlobContent::new_data(empty_bytes.clone())));
service.runtime.set_blob(blob_id, empty_bytes);
// Query should return null for empty/unparseable blob.
let response = service
.handle_query(async_graphql::Request::new(format!(
r#"{{ readService(serviceId: "{}") }}"#,
blob_id.0
)))
.blocking_wait()
.data
.into_json()
.expect("Response should be JSON");
assert_eq!(response["readService"], json!(null));
}
#[test]
fn query_local_worker_state_populated() {
let runtime = ServiceRuntime::<ControllerService>::new();
let mut state = ControllerState::load(runtime.root_view_storage_context())
.blocking_wait()
.expect("Failed to read from mock key value store");
// Set up a local worker.
let worker = Worker {
owner: AccountOwner::Address32(CryptoHash::test_hash("owner")),
capabilities: vec!["GPU".to_string(), "API_KEY".to_string()],
};
state.local_worker.set(Some(worker));
// Create and store a managed service as a blob.
let managed_service = ManagedService {
application_id: ApplicationId::default(),
name: "test-engine".to_string(),
chain_id: ChainId::default(),
requirements: vec!["DATABASE_URL".to_string()],
};
let service_bytes = bcs::to_bytes(&managed_service).expect("Failed to serialize");
let service_id = DataBlobHash(CryptoHash::new(&BlobContent::new_data(
service_bytes.clone(),
)));
state
.local_services
.insert(&service_id)
.expect("Failed to insert service");
// Add some local chains.
let chain1 = ChainId(CryptoHash::test_hash("chain1"));
let chain2 = ChainId(CryptoHash::test_hash("chain2"));
state
.local_chains
.insert(&chain1)
.expect("Failed to insert chain");
state
.local_chains
.insert(&chain2)
.expect("Failed to insert chain");
let runtime = Arc::new(runtime);
runtime.set_blob(service_id, service_bytes);
let service = ControllerService {
state: Arc::new(state),
runtime,
};
let response = service
.handle_query(async_graphql::Request::new("{ localWorkerState }"))
.blocking_wait()
.data
.into_json()
.expect("Response should be JSON");
let state: LocalWorkerState = serde_json::from_value(response["localWorkerState"].clone())
.expect("Should deserialize LocalWorkerState");
// Verify worker.
let local_worker = state.local_worker.expect("Worker should be present");
assert_eq!(local_worker.capabilities, vec!["GPU", "API_KEY"]);
// Verify services.
assert_eq!(state.local_services.len(), 1);
assert_eq!(state.local_services[0].name, "test-engine");
// Verify chains.
assert_eq!(state.local_chains.len(), 2);
assert!(state.local_chains.contains(&chain1));
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/examples/controller/tests/single_chain.rs | examples/controller/tests/single_chain.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Integration testing for the controller application.
#![cfg(not(target_arch = "wasm32"))]
use linera_sdk::{
abis::controller::ControllerAbi,
test::{QueryOutcome, TestValidator},
};
/// Tests basic application instantiation and query
#[tokio::test(flavor = "multi_thread")]
async fn single_chain_test() {
let (validator, module_id) =
TestValidator::with_current_module::<ControllerAbi, (), ()>().await;
let mut chain = validator.new_chain().await;
let application_id = chain.create_application(module_id, (), (), vec![]).await;
// Query the localWorker field - should be null for a freshly instantiated app
let QueryOutcome { response, .. } = chain
.graphql_query(application_id, "query { localWorker { owner } }")
.await;
assert!(response["localWorker"].is_null());
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.