repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/controller.rs | linera-service/src/controller.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, BTreeSet},
sync::Arc,
};
use futures::{lock::Mutex, stream::StreamExt, FutureExt};
use linera_base::identifiers::{ApplicationId, ChainId};
use linera_client::chain_listener::{ClientContext, ListenerCommand};
use linera_core::{
client::{ChainClient, ListeningMode},
node::NotificationStream,
worker::Reason,
};
use linera_sdk::abis::controller::{LocalWorkerState, Operation, WorkerCommand};
use serde_json::json;
use tokio::{
select,
sync::mpsc::{self, UnboundedSender},
};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info};
use crate::task_processor::{OperatorMap, TaskProcessor};
/// An update message sent to a TaskProcessor to change its set of applications.
#[derive(Debug)]
pub struct Update {
pub application_ids: Vec<ApplicationId>,
}
struct ProcessorHandle {
update_sender: mpsc::UnboundedSender<Update>,
}
pub struct Controller<Ctx: ClientContext> {
chain_id: ChainId,
controller_id: ApplicationId,
context: Arc<Mutex<Ctx>>,
chain_client: ChainClient<Ctx::Environment>,
cancellation_token: CancellationToken,
notifications: NotificationStream,
operators: OperatorMap,
processors: BTreeMap<ChainId, ProcessorHandle>,
command_sender: UnboundedSender<ListenerCommand>,
}
impl<Ctx> Controller<Ctx>
where
Ctx: ClientContext + Send + Sync + 'static,
Ctx::Environment: 'static,
<Ctx::Environment as linera_core::Environment>::Storage: Clone,
{
pub fn new(
chain_id: ChainId,
controller_id: ApplicationId,
context: Arc<Mutex<Ctx>>,
chain_client: ChainClient<Ctx::Environment>,
cancellation_token: CancellationToken,
operators: OperatorMap,
command_sender: UnboundedSender<ListenerCommand>,
) -> Self {
let notifications = chain_client.subscribe().expect("client subscription");
Self {
chain_id,
controller_id,
context,
chain_client,
cancellation_token,
notifications,
operators,
processors: BTreeMap::new(),
command_sender,
}
}
pub async fn run(mut self) {
info!(
"Watching for notifications for controller chain {}",
self.chain_id
);
self.process_controller_state().await;
loop {
select! {
Some(notification) = self.notifications.next() => {
if let Reason::NewBlock { .. } = notification.reason {
debug!("Processing notification on controller chain {}", self.chain_id);
self.process_controller_state().await;
}
}
_ = self.cancellation_token.cancelled().fuse() => {
break;
}
}
}
debug!("Notification stream ended.");
}
async fn process_controller_state(&mut self) {
let state = match self.query_controller_state().await {
Ok(state) => state,
Err(error) => {
error!("Error reading controller state: {error}");
return;
}
};
let Some(worker) = state.local_worker else {
// Worker needs to be registered.
self.register_worker().await;
return;
};
assert_eq!(
worker.owner,
self.chain_client
.preferred_owner()
.expect("The current wallet should own the chain being watched"),
"We should be registered with the current account owner."
);
// Build a map of ChainId -> Vec<ApplicationId> from local_services
let mut chain_apps: BTreeMap<ChainId, Vec<ApplicationId>> = BTreeMap::new();
for service in &state.local_services {
chain_apps
.entry(service.chain_id)
.or_default()
.push(service.application_id);
}
let old_chains: BTreeSet<_> = self.processors.keys().cloned().collect();
// Update or spawn processors for each chain
for (service_chain_id, application_ids) in chain_apps {
if let Err(err) = self
.update_or_spawn_processor(service_chain_id, application_ids)
.await
{
error!("Error updating or spawning processor: {err}");
return;
}
}
// Send empty updates to processors for chains no longer in the state
// This effectively tells them to stop processing applications
let active_chains: std::collections::BTreeSet<_> =
state.local_services.iter().map(|s| s.chain_id).collect();
let stale_chains: BTreeSet<_> = self
.processors
.keys()
.filter(|chain_id| !active_chains.contains(chain_id))
.cloned()
.collect();
for chain_id in &stale_chains {
if let Some(handle) = self.processors.get(chain_id) {
let update = Update {
application_ids: Vec::new(),
};
if handle.update_sender.send(update).is_err() {
// Processor has stopped, remove it
self.processors.remove(chain_id);
}
}
}
let new_chains: BTreeMap<_, _> = active_chains
.difference(&old_chains)
.map(|chain_id| (*chain_id, ListeningMode::FullChain))
.collect();
if let Err(err) = self
.command_sender
.send(ListenerCommand::Listen(new_chains))
{
error!(%err, "error sending a command to chain listener");
}
if let Err(err) = self
.command_sender
.send(ListenerCommand::StopListening(stale_chains))
{
error!(%err, "error sending a command to chain listener");
}
}
async fn register_worker(&mut self) {
let capabilities = self.operators.keys().cloned().collect();
let command = WorkerCommand::RegisterWorker { capabilities };
let owner = self
.chain_client
.preferred_owner()
.expect("The current wallet should own the chain being watched");
let bytes =
bcs::to_bytes(&Operation::ExecuteWorkerCommand { owner, command }).expect("bcs bytes");
let operation = linera_execution::Operation::User {
application_id: self.controller_id,
bytes,
};
if let Err(e) = self
.chain_client
.execute_operations(vec![operation], vec![])
.await
{
// TODO: handle leader timeouts
error!("Failed to execute worker on-chain registration: {e}");
}
}
async fn update_or_spawn_processor(
&mut self,
service_chain_id: ChainId,
application_ids: Vec<ApplicationId>,
) -> Result<(), anyhow::Error> {
if let Some(handle) = self.processors.get(&service_chain_id) {
// Processor exists, send update
let update = Update {
application_ids: application_ids.clone(),
};
if handle.update_sender.send(update).is_err() {
// Processor has stopped, remove and respawn
self.processors.remove(&service_chain_id);
self.spawn_processor(service_chain_id, application_ids)
.await?;
}
} else {
// No processor for this chain, spawn one
self.spawn_processor(service_chain_id, application_ids)
.await?;
}
Ok(())
}
async fn spawn_processor(
&mut self,
service_chain_id: ChainId,
application_ids: Vec<ApplicationId>,
) -> Result<(), anyhow::Error> {
info!(
"Spawning TaskProcessor for chain {} with applications {:?}",
service_chain_id, application_ids
);
let (update_sender, update_receiver) = mpsc::unbounded_channel();
let chain_client = self
.context
.lock()
.await
.make_chain_client(service_chain_id)
.await?;
let processor = TaskProcessor::new(
service_chain_id,
application_ids,
chain_client,
self.cancellation_token.child_token(),
self.operators.clone(),
Some(update_receiver),
);
tokio::spawn(processor.run());
self.processors
.insert(service_chain_id, ProcessorHandle { update_sender });
Ok(())
}
async fn query_controller_state(&mut self) -> Result<LocalWorkerState, anyhow::Error> {
let query = "query { localWorkerState }";
let bytes = serde_json::to_vec(&json!({"query": query}))?;
let query = linera_execution::Query::User {
application_id: self.controller_id,
bytes,
};
let linera_execution::QueryOutcome {
response,
operations: _,
} = self.chain_client.query_application(query, None).await?;
let linera_execution::QueryResponse::User(response) = response else {
anyhow::bail!("cannot get a system response for a user query");
};
let mut response: serde_json::Value = serde_json::from_slice(&response)?;
let state = serde_json::from_value(response["data"]["localWorkerState"].take())?;
Ok(state)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/storage.rs | linera-service/src/storage.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{fmt, path::PathBuf, str::FromStr};
use anyhow::{anyhow, bail};
use async_trait::async_trait;
use linera_client::config::GenesisConfig;
use linera_execution::WasmRuntime;
use linera_storage::{DbStorage, Storage, DEFAULT_NAMESPACE};
#[cfg(feature = "storage-service")]
use linera_storage_service::{
client::StorageServiceDatabase,
common::{StorageServiceStoreConfig, StorageServiceStoreInternalConfig},
};
#[cfg(feature = "dynamodb")]
use linera_views::dynamo_db::{DynamoDbDatabase, DynamoDbStoreConfig, DynamoDbStoreInternalConfig};
#[cfg(feature = "rocksdb")]
use linera_views::rocks_db::{
PathWithGuard, RocksDbDatabase, RocksDbSpawnMode, RocksDbStoreConfig,
RocksDbStoreInternalConfig,
};
use linera_views::{
lru_prefix_cache::StorageCacheConfig,
memory::{MemoryDatabase, MemoryStoreConfig},
store::{KeyValueDatabase, KeyValueStore},
};
use serde::{Deserialize, Serialize};
use tracing::error;
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
use {
linera_storage::ChainStatesFirstAssignment,
linera_views::backends::dual::{DualDatabase, DualStoreConfig},
std::path::Path,
};
#[cfg(feature = "scylladb")]
use {
linera_views::scylla_db::{ScyllaDbDatabase, ScyllaDbStoreConfig, ScyllaDbStoreInternalConfig},
std::num::NonZeroU16,
tracing::debug,
};
#[derive(Clone, Debug, clap::Parser)]
pub struct CommonStorageOptions {
/// The maximal number of simultaneous queries to the database
#[arg(long, global = true)]
pub storage_max_concurrent_queries: Option<usize>,
/// The maximal number of simultaneous stream queries to the database
#[arg(long, default_value = "10", global = true)]
pub storage_max_stream_queries: usize,
/// The maximal memory used in the storage cache.
#[arg(long, default_value = "10000000", global = true)]
pub storage_max_cache_size: usize,
/// The maximal size of a value entry in the storage cache.
#[arg(long, default_value = "1000000", global = true)]
pub storage_max_value_entry_size: usize,
/// The maximal size of a find-keys entry in the storage cache.
#[arg(long, default_value = "1000000", global = true)]
pub storage_max_find_keys_entry_size: usize,
/// The maximal size of a find-key-values entry in the storage cache.
#[arg(long, default_value = "1000000", global = true)]
pub storage_max_find_key_values_entry_size: usize,
/// The maximal number of entries in the storage cache.
#[arg(long, default_value = "1000", global = true)]
pub storage_max_cache_entries: usize,
/// The maximal memory used in the value cache.
#[arg(long, default_value = "10000000", global = true)]
pub storage_max_cache_value_size: usize,
/// The maximal memory used in the find_keys_by_prefix cache.
#[arg(long, default_value = "10000000", global = true)]
pub storage_max_cache_find_keys_size: usize,
/// The maximal memory used in the find_key_values_by_prefix cache.
#[arg(long, default_value = "10000000", global = true)]
pub storage_max_cache_find_key_values_size: usize,
/// The replication factor for the keyspace
#[arg(long, default_value = "1", global = true)]
pub storage_replication_factor: u32,
}
impl CommonStorageOptions {
pub fn storage_cache_config(&self) -> StorageCacheConfig {
StorageCacheConfig {
max_cache_size: self.storage_max_cache_size,
max_value_entry_size: self.storage_max_value_entry_size,
max_find_keys_entry_size: self.storage_max_find_keys_entry_size,
max_find_key_values_entry_size: self.storage_max_find_key_values_entry_size,
max_cache_entries: self.storage_max_cache_entries,
max_cache_value_size: self.storage_max_cache_value_size,
max_cache_find_keys_size: self.storage_max_cache_find_keys_size,
max_cache_find_key_values_size: self.storage_max_cache_find_key_values_size,
}
}
}
/// The configuration of the key value store in use.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum StoreConfig {
/// The memory key value store
Memory {
config: MemoryStoreConfig,
namespace: String,
genesis_path: PathBuf,
},
/// The storage service key-value store
#[cfg(feature = "storage-service")]
StorageService {
config: StorageServiceStoreConfig,
namespace: String,
},
/// The RocksDB key value store
#[cfg(feature = "rocksdb")]
RocksDb {
config: RocksDbStoreConfig,
namespace: String,
},
/// The DynamoDB key value store
#[cfg(feature = "dynamodb")]
DynamoDb {
config: DynamoDbStoreConfig,
namespace: String,
},
/// The ScyllaDB key value store
#[cfg(feature = "scylladb")]
ScyllaDb {
config: ScyllaDbStoreConfig,
namespace: String,
},
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
DualRocksDbScyllaDb {
config: DualStoreConfig<RocksDbStoreConfig, ScyllaDbStoreConfig>,
namespace: String,
},
}
/// The description of a storage implementation.
#[derive(Clone, Debug)]
#[cfg_attr(any(test), derive(Eq, PartialEq))]
pub enum InnerStorageConfig {
/// The memory description.
Memory {
/// The path to the genesis configuration. This is needed because we reinitialize
/// memory databases from the genesis config everytime.
genesis_path: PathBuf,
},
/// The storage service description.
#[cfg(feature = "storage-service")]
Service {
/// The endpoint used.
endpoint: String,
},
/// The RocksDB description.
#[cfg(feature = "rocksdb")]
RocksDb {
/// The path used.
path: PathBuf,
/// Whether to use `block_in_place` or `spawn_blocking`.
spawn_mode: RocksDbSpawnMode,
},
/// The DynamoDB description.
#[cfg(feature = "dynamodb")]
DynamoDb {
/// Whether to use the DynamoDB Local system
use_dynamodb_local: bool,
},
/// The ScyllaDB description.
#[cfg(feature = "scylladb")]
ScyllaDb {
/// The URI for accessing the database.
uri: String,
},
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
DualRocksDbScyllaDb {
/// The path used.
path_with_guard: PathWithGuard,
/// Whether to use `block_in_place` or `spawn_blocking`.
spawn_mode: RocksDbSpawnMode,
/// The URI for accessing the database.
uri: String,
},
}
/// The description of a storage implementation.
#[derive(Clone, Debug)]
#[cfg_attr(any(test), derive(Eq, PartialEq))]
pub struct StorageConfig {
/// The inner storage config.
pub inner_storage_config: InnerStorageConfig,
/// The namespace used
pub namespace: String,
}
const MEMORY: &str = "memory:";
#[cfg(feature = "storage-service")]
const STORAGE_SERVICE: &str = "service:";
#[cfg(feature = "rocksdb")]
const ROCKS_DB: &str = "rocksdb:";
#[cfg(feature = "dynamodb")]
const DYNAMO_DB: &str = "dynamodb:";
#[cfg(feature = "scylladb")]
const SCYLLA_DB: &str = "scylladb:";
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
const DUAL_ROCKS_DB_SCYLLA_DB: &str = "dualrocksdbscylladb:";
impl FromStr for StorageConfig {
type Err = anyhow::Error;
fn from_str(input: &str) -> Result<Self, Self::Err> {
if let Some(s) = input.strip_prefix(MEMORY) {
let parts = s.split(':').collect::<Vec<_>>();
if parts.len() == 1 {
let genesis_path = parts[0].to_string().into();
let namespace = DEFAULT_NAMESPACE.to_string();
let inner_storage_config = InnerStorageConfig::Memory { genesis_path };
return Ok(StorageConfig {
inner_storage_config,
namespace,
});
}
if parts.len() != 2 {
bail!("We should have one genesis config path and one optional namespace");
}
let genesis_path = parts[0].to_string().into();
let namespace = parts[1].to_string();
let inner_storage_config = InnerStorageConfig::Memory { genesis_path };
return Ok(StorageConfig {
inner_storage_config,
namespace,
});
}
#[cfg(feature = "storage-service")]
if let Some(s) = input.strip_prefix(STORAGE_SERVICE) {
if s.is_empty() {
bail!(
"For Storage service, the formatting has to be service:endpoint:namespace,\
example service:tcp:127.0.0.1:7878:table_do_my_test"
);
}
let parts = s.split(':').collect::<Vec<_>>();
if parts.len() != 4 {
bail!("We should have one endpoint and one namespace");
}
let protocol = parts[0];
if protocol != "tcp" {
bail!("Only allowed protocol is tcp");
}
let endpoint = parts[1];
let port = parts[2];
let mut endpoint = endpoint.to_string();
endpoint.push(':');
endpoint.push_str(port);
let endpoint = endpoint.to_string();
let namespace = parts[3].to_string();
let inner_storage_config = InnerStorageConfig::Service { endpoint };
return Ok(StorageConfig {
inner_storage_config,
namespace,
});
}
#[cfg(feature = "rocksdb")]
if let Some(s) = input.strip_prefix(ROCKS_DB) {
if s.is_empty() {
bail!(
"For RocksDB, the formatting has to be rocksdb:directory or rocksdb:directory:spawn_mode:namespace");
}
let parts = s.split(':').collect::<Vec<_>>();
if parts.len() == 1 {
let path = parts[0].to_string().into();
let namespace = DEFAULT_NAMESPACE.to_string();
let spawn_mode = RocksDbSpawnMode::SpawnBlocking;
let inner_storage_config = InnerStorageConfig::RocksDb { path, spawn_mode };
return Ok(StorageConfig {
inner_storage_config,
namespace,
});
}
if parts.len() == 2 || parts.len() == 3 {
let path = parts[0].to_string().into();
let spawn_mode = match parts[1] {
"spawn_blocking" => Ok(RocksDbSpawnMode::SpawnBlocking),
"block_in_place" => Ok(RocksDbSpawnMode::BlockInPlace),
"runtime" => Ok(RocksDbSpawnMode::get_spawn_mode_from_runtime()),
_ => Err(anyhow!("Failed to parse {} as a spawn_mode", parts[1])),
}?;
let namespace = if parts.len() == 2 {
DEFAULT_NAMESPACE.to_string()
} else {
parts[2].to_string()
};
let inner_storage_config = InnerStorageConfig::RocksDb { path, spawn_mode };
return Ok(StorageConfig {
inner_storage_config,
namespace,
});
}
bail!("We should have one, two or three parts");
}
#[cfg(feature = "dynamodb")]
if let Some(s) = input.strip_prefix(DYNAMO_DB) {
let mut parts = s.splitn(2, ':');
let namespace = parts
.next()
.ok_or_else(|| anyhow!("Missing DynamoDB table name, e.g. {DYNAMO_DB}TABLE"))?
.to_string();
let use_dynamodb_local = match parts.next() {
None | Some("env") => false,
Some("dynamodb_local") => true,
Some(unknown) => {
bail!(
"Invalid DynamoDB endpoint {unknown:?}. \
Expected {DYNAMO_DB}TABLE:[env|dynamodb_local]"
);
}
};
let inner_storage_config = InnerStorageConfig::DynamoDb { use_dynamodb_local };
return Ok(StorageConfig {
inner_storage_config,
namespace,
});
}
#[cfg(feature = "scylladb")]
if let Some(s) = input.strip_prefix(SCYLLA_DB) {
let mut uri: Option<String> = None;
let mut namespace: Option<String> = None;
let parse_error: &'static str = "Correct format is tcp:db_hostname:port.";
if !s.is_empty() {
let mut parts = s.split(':');
while let Some(part) = parts.next() {
match part {
"tcp" => {
let address = parts.next().ok_or_else(|| {
anyhow!("Failed to find address for {s}. {parse_error}")
})?;
let port_str = parts.next().ok_or_else(|| {
anyhow!("Failed to find port for {s}. {parse_error}")
})?;
let port = NonZeroU16::from_str(port_str).map_err(|_| {
anyhow!(
"Failed to find parse port {port_str} for {s}. {parse_error}",
)
})?;
if uri.is_some() {
bail!("The uri has already been assigned");
}
uri = Some(format!("{}:{}", &address, port));
}
_ if part.starts_with("table") => {
if namespace.is_some() {
bail!("The namespace has already been assigned");
}
namespace = Some(part.to_string());
}
_ => {
bail!("the entry \"{part}\" is not matching");
}
}
}
}
let uri = uri.unwrap_or("localhost:9042".to_string());
let namespace = namespace.unwrap_or(DEFAULT_NAMESPACE.to_string());
let inner_storage_config = InnerStorageConfig::ScyllaDb { uri };
debug!("ScyllaDB connection info: {:?}", inner_storage_config);
return Ok(StorageConfig {
inner_storage_config,
namespace,
});
}
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
if let Some(s) = input.strip_prefix(DUAL_ROCKS_DB_SCYLLA_DB) {
let parts = s.split(':').collect::<Vec<_>>();
if parts.len() != 5 && parts.len() != 6 {
bail!(
"For DualRocksDbScyllaDb, the formatting has to be dualrocksdbscylladb:directory:mode:tcp:hostname:port:namespace"
);
}
let path = Path::new(parts[0]);
let path = path.to_path_buf();
let path_with_guard = PathWithGuard::new(path);
let spawn_mode = match parts[1] {
"spawn_blocking" => Ok(RocksDbSpawnMode::SpawnBlocking),
"block_in_place" => Ok(RocksDbSpawnMode::BlockInPlace),
"runtime" => Ok(RocksDbSpawnMode::get_spawn_mode_from_runtime()),
_ => Err(anyhow!("Failed to parse {} as a spawn_mode", parts[1])),
}?;
let protocol = parts[2];
if protocol != "tcp" {
bail!("The only allowed protocol is tcp");
}
let address = parts[3];
let port_str = parts[4];
let port = NonZeroU16::from_str(port_str)
.map_err(|_| anyhow!("Failed to find parse port {port_str} for {s}"))?;
let uri = format!("{}:{}", &address, port);
let inner_storage_config = InnerStorageConfig::DualRocksDbScyllaDb {
path_with_guard,
spawn_mode,
uri,
};
let namespace = if parts.len() == 5 {
DEFAULT_NAMESPACE.to_string()
} else {
parts[5].to_string()
};
return Ok(StorageConfig {
inner_storage_config,
namespace,
});
}
error!("available storage: memory");
#[cfg(feature = "storage-service")]
error!("Also available is linera-storage-service");
#[cfg(feature = "rocksdb")]
error!("Also available is RocksDB");
#[cfg(feature = "dynamodb")]
error!("Also available is DynamoDB");
#[cfg(feature = "scylladb")]
error!("Also available is ScyllaDB");
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
error!("Also available is DualRocksDbScyllaDb");
Err(anyhow!("The input has not matched: {input}"))
}
}
impl StorageConfig {
pub fn maybe_append_shard_path(&mut self, _shard: usize) -> std::io::Result<()> {
match &mut self.inner_storage_config {
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
InnerStorageConfig::DualRocksDbScyllaDb {
path_with_guard,
spawn_mode: _,
uri: _,
} => {
let shard_str = format!("shard_{}", _shard);
path_with_guard.path_buf.push(shard_str);
std::fs::create_dir_all(&path_with_guard.path_buf)
}
_ => Ok(()),
}
}
/// The addition of the common config to get a full configuration
pub fn add_common_storage_options(
&self,
options: &CommonStorageOptions,
) -> Result<StoreConfig, anyhow::Error> {
let namespace = self.namespace.clone();
match &self.inner_storage_config {
InnerStorageConfig::Memory { genesis_path } => {
let config = MemoryStoreConfig {
max_stream_queries: options.storage_max_stream_queries,
kill_on_drop: false,
};
let genesis_path = genesis_path.clone();
Ok(StoreConfig::Memory {
config,
namespace,
genesis_path,
})
}
#[cfg(feature = "storage-service")]
InnerStorageConfig::Service { endpoint } => {
let inner_config = StorageServiceStoreInternalConfig {
endpoint: endpoint.clone(),
max_concurrent_queries: options.storage_max_concurrent_queries,
max_stream_queries: options.storage_max_stream_queries,
};
let config = StorageServiceStoreConfig {
inner_config,
storage_cache_config: options.storage_cache_config(),
};
Ok(StoreConfig::StorageService { config, namespace })
}
#[cfg(feature = "rocksdb")]
InnerStorageConfig::RocksDb { path, spawn_mode } => {
let path_with_guard = PathWithGuard::new(path.to_path_buf());
let inner_config = RocksDbStoreInternalConfig {
spawn_mode: *spawn_mode,
path_with_guard,
max_stream_queries: options.storage_max_stream_queries,
};
let config = RocksDbStoreConfig {
inner_config,
storage_cache_config: options.storage_cache_config(),
};
Ok(StoreConfig::RocksDb { config, namespace })
}
#[cfg(feature = "dynamodb")]
InnerStorageConfig::DynamoDb { use_dynamodb_local } => {
let inner_config = DynamoDbStoreInternalConfig {
use_dynamodb_local: *use_dynamodb_local,
max_concurrent_queries: options.storage_max_concurrent_queries,
max_stream_queries: options.storage_max_stream_queries,
};
let config = DynamoDbStoreConfig {
inner_config,
storage_cache_config: options.storage_cache_config(),
};
Ok(StoreConfig::DynamoDb { config, namespace })
}
#[cfg(feature = "scylladb")]
InnerStorageConfig::ScyllaDb { uri } => {
let inner_config = ScyllaDbStoreInternalConfig {
uri: uri.clone(),
max_stream_queries: options.storage_max_stream_queries,
max_concurrent_queries: options.storage_max_concurrent_queries,
replication_factor: options.storage_replication_factor,
};
let config = ScyllaDbStoreConfig {
inner_config,
storage_cache_config: options.storage_cache_config(),
};
Ok(StoreConfig::ScyllaDb { config, namespace })
}
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
InnerStorageConfig::DualRocksDbScyllaDb {
path_with_guard,
spawn_mode,
uri,
} => {
let inner_config = RocksDbStoreInternalConfig {
spawn_mode: *spawn_mode,
path_with_guard: path_with_guard.clone(),
max_stream_queries: options.storage_max_stream_queries,
};
let first_config = RocksDbStoreConfig {
inner_config,
storage_cache_config: options.storage_cache_config(),
};
let inner_config = ScyllaDbStoreInternalConfig {
uri: uri.clone(),
max_stream_queries: options.storage_max_stream_queries,
max_concurrent_queries: options.storage_max_concurrent_queries,
replication_factor: options.storage_replication_factor,
};
let second_config = ScyllaDbStoreConfig {
inner_config,
storage_cache_config: options.storage_cache_config(),
};
let config = DualStoreConfig {
first_config,
second_config,
};
Ok(StoreConfig::DualRocksDbScyllaDb { config, namespace })
}
}
}
}
impl fmt::Display for StorageConfig {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let namespace = &self.namespace;
match &self.inner_storage_config {
#[cfg(feature = "storage-service")]
InnerStorageConfig::Service { endpoint } => {
write!(f, "service:tcp:{}:{}", endpoint, namespace)
}
InnerStorageConfig::Memory { genesis_path } => {
write!(f, "memory:{}:{}", genesis_path.display(), namespace)
}
#[cfg(feature = "rocksdb")]
InnerStorageConfig::RocksDb { path, spawn_mode } => {
let spawn_mode = spawn_mode.to_string();
write!(f, "rocksdb:{}:{}:{}", path.display(), spawn_mode, namespace)
}
#[cfg(feature = "dynamodb")]
InnerStorageConfig::DynamoDb { use_dynamodb_local } => match use_dynamodb_local {
true => write!(f, "dynamodb:{}:dynamodb_local", namespace),
false => write!(f, "dynamodb:{}:env", namespace),
},
#[cfg(feature = "scylladb")]
InnerStorageConfig::ScyllaDb { uri } => {
write!(f, "scylladb:tcp:{}:{}", uri, namespace)
}
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
InnerStorageConfig::DualRocksDbScyllaDb {
path_with_guard,
spawn_mode,
uri,
} => {
write!(
f,
"dualrocksdbscylladb:{}:{}:tcp:{}:{}",
path_with_guard.path_buf.display(),
spawn_mode,
uri,
namespace
)
}
}
}
}
#[async_trait]
pub trait Runnable {
type Output;
async fn run<S>(self, storage: S) -> Self::Output
where
S: Storage + Clone + Send + Sync + 'static;
}
#[async_trait]
pub trait RunnableWithStore {
type Output;
async fn run<D>(
self,
config: D::Config,
namespace: String,
) -> Result<Self::Output, anyhow::Error>
where
D: KeyValueDatabase + Clone + Send + Sync + 'static,
D::Store: KeyValueStore + Clone + Send + Sync + 'static,
D::Error: Send + Sync;
}
impl StoreConfig {
pub async fn run_with_storage<Job>(
self,
wasm_runtime: Option<WasmRuntime>,
allow_application_logs: bool,
job: Job,
) -> Result<Job::Output, anyhow::Error>
where
Job: Runnable,
{
match self {
StoreConfig::Memory {
config,
namespace,
genesis_path,
} => {
let mut storage = DbStorage::<MemoryDatabase, _>::maybe_create_and_connect(
&config,
&namespace,
wasm_runtime,
)
.await?
.with_allow_application_logs(allow_application_logs);
let genesis_config = crate::util::read_json::<GenesisConfig>(genesis_path)?;
// Memory storage must be initialized every time.
genesis_config.initialize_storage(&mut storage).await?;
Ok(job.run(storage).await)
}
#[cfg(feature = "storage-service")]
StoreConfig::StorageService { config, namespace } => {
let storage = DbStorage::<StorageServiceDatabase, _>::connect(
&config,
&namespace,
wasm_runtime,
)
.await?
.with_allow_application_logs(allow_application_logs);
Ok(job.run(storage).await)
}
#[cfg(feature = "rocksdb")]
StoreConfig::RocksDb { config, namespace } => {
let storage =
DbStorage::<RocksDbDatabase, _>::connect(&config, &namespace, wasm_runtime)
.await?
.with_allow_application_logs(allow_application_logs);
Ok(job.run(storage).await)
}
#[cfg(feature = "dynamodb")]
StoreConfig::DynamoDb { config, namespace } => {
let storage =
DbStorage::<DynamoDbDatabase, _>::connect(&config, &namespace, wasm_runtime)
.await?
.with_allow_application_logs(allow_application_logs);
Ok(job.run(storage).await)
}
#[cfg(feature = "scylladb")]
StoreConfig::ScyllaDb { config, namespace } => {
let storage =
DbStorage::<ScyllaDbDatabase, _>::connect(&config, &namespace, wasm_runtime)
.await?
.with_allow_application_logs(allow_application_logs);
Ok(job.run(storage).await)
}
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
StoreConfig::DualRocksDbScyllaDb { config, namespace } => {
let storage = DbStorage::<
DualDatabase<RocksDbDatabase, ScyllaDbDatabase, ChainStatesFirstAssignment>,
_,
>::connect(&config, &namespace, wasm_runtime)
.await?
.with_allow_application_logs(allow_application_logs);
Ok(job.run(storage).await)
}
}
}
#[allow(unused_variables)]
pub async fn run_with_store<Job>(self, job: Job) -> Result<Job::Output, anyhow::Error>
where
Job: RunnableWithStore,
{
match self {
StoreConfig::Memory { .. } => {
Err(anyhow!("Cannot run admin operations on the memory store"))
}
#[cfg(feature = "storage-service")]
StoreConfig::StorageService { config, namespace } => {
Ok(job.run::<StorageServiceDatabase>(config, namespace).await?)
}
#[cfg(feature = "rocksdb")]
StoreConfig::RocksDb { config, namespace } => {
Ok(job.run::<RocksDbDatabase>(config, namespace).await?)
}
#[cfg(feature = "dynamodb")]
StoreConfig::DynamoDb { config, namespace } => {
Ok(job.run::<DynamoDbDatabase>(config, namespace).await?)
}
#[cfg(feature = "scylladb")]
StoreConfig::ScyllaDb { config, namespace } => {
Ok(job.run::<ScyllaDbDatabase>(config, namespace).await?)
}
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
StoreConfig::DualRocksDbScyllaDb { config, namespace } => Ok(job
.run::<DualDatabase<RocksDbDatabase, ScyllaDbDatabase, ChainStatesFirstAssignment>>(
config, namespace,
)
.await?),
}
}
pub async fn initialize(self, config: &GenesisConfig) -> Result<(), anyhow::Error> {
self.run_with_store(InitializeStorageJob(config)).await
}
}
struct InitializeStorageJob<'a>(&'a GenesisConfig);
#[async_trait]
impl RunnableWithStore for InitializeStorageJob<'_> {
type Output = ();
async fn run<D>(
self,
config: D::Config,
namespace: String,
) -> Result<Self::Output, anyhow::Error>
where
D: KeyValueDatabase + Clone + Send + Sync + 'static,
D::Store: KeyValueStore + Clone + Send + Sync + 'static,
D::Error: Send + Sync,
{
let mut storage =
DbStorage::<D, _>::maybe_create_and_connect(&config, &namespace, None).await?;
self.0.initialize_storage(&mut storage).await?;
Ok(())
}
}
#[test]
fn test_memory_storage_config_from_str() {
assert_eq!(
StorageConfig::from_str("memory:path/to/genesis.json").unwrap(),
StorageConfig {
inner_storage_config: InnerStorageConfig::Memory {
genesis_path: PathBuf::from("path/to/genesis.json")
},
namespace: DEFAULT_NAMESPACE.into()
}
);
assert_eq!(
StorageConfig::from_str("memory:path/to/genesis.json:namespace").unwrap(),
StorageConfig {
inner_storage_config: InnerStorageConfig::Memory {
genesis_path: PathBuf::from("path/to/genesis.json")
},
namespace: "namespace".into()
}
);
assert!(StorageConfig::from_str("memory").is_err(),);
}
#[cfg(feature = "storage-service")]
#[test]
fn test_shared_store_config_from_str() {
assert_eq!(
StorageConfig::from_str("service:tcp:127.0.0.1:8942:linera").unwrap(),
StorageConfig {
inner_storage_config: InnerStorageConfig::Service {
endpoint: "127.0.0.1:8942".to_string()
},
namespace: "linera".into()
}
);
assert!(StorageConfig::from_str("service:tcp:127.0.0.1:8942").is_err());
assert!(StorageConfig::from_str("service:tcp:127.0.0.1:linera").is_err());
}
#[cfg(feature = "rocksdb")]
#[test]
fn test_rocks_db_storage_config_from_str() {
assert!(StorageConfig::from_str("rocksdb_foo.db").is_err());
assert_eq!(
StorageConfig::from_str("rocksdb:foo.db").unwrap(),
StorageConfig {
inner_storage_config: InnerStorageConfig::RocksDb {
path: "foo.db".into(),
spawn_mode: RocksDbSpawnMode::SpawnBlocking,
},
namespace: DEFAULT_NAMESPACE.to_string()
}
);
assert_eq!(
StorageConfig::from_str("rocksdb:foo.db:block_in_place").unwrap(),
StorageConfig {
inner_storage_config: InnerStorageConfig::RocksDb {
path: "foo.db".into(),
spawn_mode: RocksDbSpawnMode::BlockInPlace,
},
namespace: DEFAULT_NAMESPACE.to_string()
}
);
assert_eq!(
StorageConfig::from_str("rocksdb:foo.db:block_in_place:chosen_namespace").unwrap(),
StorageConfig {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/util.rs | linera-service/src/util.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
io::{BufRead, BufReader, Write},
num::ParseIntError,
path::Path,
time::Duration,
};
use anyhow::{bail, Context as _, Result};
use async_graphql::http::GraphiQLSource;
use axum::response::{self, IntoResponse};
use http::Uri;
#[cfg(test)]
use linera_base::command::parse_version_message;
use linera_base::data_types::TimeDelta;
pub use linera_client::util::*;
use tracing::debug;
// Exported for readme e2e tests.
pub static DEFAULT_PAUSE_AFTER_LINERA_SERVICE_SECS: &str = "3";
pub static DEFAULT_PAUSE_AFTER_GQL_MUTATIONS_SECS: &str = "3";
/// Extension trait for [`tokio::process::Child`].
pub trait ChildExt: std::fmt::Debug {
fn ensure_is_running(&mut self) -> Result<()>;
}
impl ChildExt for tokio::process::Child {
fn ensure_is_running(&mut self) -> Result<()> {
if let Some(status) = self.try_wait().context("try_wait child process")? {
bail!(
"Child process {:?} already exited with status: {}",
self,
status
);
}
debug!("Child process {:?} is running as expected.", self);
Ok(())
}
}
pub fn read_json<T: serde::de::DeserializeOwned>(path: impl Into<std::path::PathBuf>) -> Result<T> {
Ok(serde_json::from_reader(fs_err::File::open(path)?)?)
}
#[cfg(with_testing)]
#[macro_export]
macro_rules! test_name {
() => {
stdext::function_name!()
.strip_suffix("::{{closure}}")
.expect("should be called from the body of a test")
};
}
pub struct Markdown<B> {
buffer: B,
}
impl Markdown<BufReader<fs_err::File>> {
pub fn new(path: impl AsRef<Path>) -> std::io::Result<Self> {
let buffer = BufReader::new(fs_err::File::open(path.as_ref())?);
Ok(Self { buffer })
}
}
impl<B> Markdown<B>
where
B: BufRead,
{
#[expect(clippy::while_let_on_iterator)]
pub fn extract_bash_script_to(
self,
mut output: impl Write,
pause_after_linera_service: Option<Duration>,
pause_after_gql_mutations: Option<Duration>,
) -> std::io::Result<()> {
let mut lines = self.buffer.lines();
while let Some(line) = lines.next() {
let line = line?;
if line.starts_with("```bash") {
if line.ends_with("ignore") {
continue;
} else {
let mut quote = String::new();
while let Some(line) = lines.next() {
let line = line?;
if line.starts_with("```") {
break;
}
quote += &line;
quote += "\n";
if let Some(pause) = pause_after_linera_service {
if line.contains("linera service") {
quote += &format!("sleep {}\n", pause.as_secs());
}
}
}
writeln!(output, "{}", quote)?;
}
} else if let Some(uri) = line.strip_prefix("```gql,uri=") {
let mut quote = String::new();
while let Some(line) = lines.next() {
let line = line?;
if line.starts_with("```") {
break;
}
quote += &line;
quote += "\n";
}
writeln!(output, "QUERY=\"{}\"", quote.replace('"', "\\\""))?;
writeln!(
output,
"JSON_QUERY=$( jq -n --arg q \"$QUERY\" '{{\"query\": $q}}' )"
)?;
writeln!(
output,
"QUERY_RESULT=$( \
curl -w '\\n' -g -X POST \
-H \"Content-Type: application/json\" \
-d \"$JSON_QUERY\" {uri} \
| tee /dev/stderr \
| jq -e .data \
)"
)?;
if let Some(pause) = pause_after_gql_mutations {
// Hack: let's add a pause after mutations.
if quote.starts_with("mutation") {
writeln!(output, "sleep {}\n", pause.as_secs())?;
}
}
}
}
output.flush()?;
Ok(())
}
}
/// Returns an HTML response constructing the GraphiQL web page for the given URI.
pub(crate) async fn graphiql(uri: Uri) -> impl IntoResponse {
let source = GraphiQLSource::build()
.endpoint(uri.path())
.subscription_endpoint("/ws")
.finish()
.replace("@17", "@18")
.replace(
"ReactDOM.render(",
"ReactDOM.createRoot(document.getElementById(\"graphiql\")).render(",
);
response::Html(source)
}
pub fn parse_millis(s: &str) -> Result<Duration, ParseIntError> {
Ok(Duration::from_millis(s.parse()?))
}
pub fn parse_millis_delta(s: &str) -> Result<TimeDelta, ParseIntError> {
Ok(TimeDelta::from_millis(s.parse()?))
}
pub fn parse_ascii_alphanumeric_string(s: &str) -> Result<String, &'static str> {
if s.chars().all(|x| x.is_ascii_alphanumeric()) {
Ok(s.to_string())
} else {
Err("Expecting ASCII alphanumeric characters")
}
}
/// Checks the condition five times with increasing delays. Returns `true` if it is met.
#[cfg(with_testing)]
pub async fn eventually<F>(condition: impl Fn() -> F) -> bool
where
F: std::future::Future<Output = bool>,
{
for i in 0..5 {
linera_base::time::timer::sleep(linera_base::time::Duration::from_secs(i)).await;
if condition().await {
return true;
}
}
false
}
#[test]
fn test_parse_version_message() {
let s = "something\n . . . version12\nother things";
assert_eq!(parse_version_message(s), "version12");
let s = "something\n . . . version12other things";
assert_eq!(parse_version_message(s), "things");
let s = "something . . . version12 other things";
assert_eq!(parse_version_message(s), "");
let s = "";
assert_eq!(parse_version_message(s), "");
}
#[test]
fn test_ignore() {
let readme = r#"
first line
```bash
some bash
```
second line
```bash
some other bash
```
third line
```bash,ignore
this will be ignored
```
"#;
let buffer = std::io::Cursor::new(readme);
let markdown = Markdown { buffer };
let mut script = Vec::new();
markdown
.extract_bash_script_to(&mut script, None, None)
.unwrap();
let expected = "some bash\n\nsome other bash\n\n";
assert_eq!(String::from_utf8_lossy(&script), expected);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/wallet.rs | linera-service/src/wallet.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
iter::IntoIterator,
sync::{Arc, RwLock},
};
use futures::{stream, Stream};
use linera_base::{
data_types::{ChainDescription, ChainOrigin},
identifiers::{AccountOwner, ChainId},
};
use linera_client::config::GenesisConfig;
use linera_core::wallet;
use linera_persistent as persistent;
#[derive(serde::Serialize, serde::Deserialize)]
struct Data {
pub chains: wallet::Memory,
default: Arc<RwLock<Option<ChainId>>>,
genesis_config: GenesisConfig,
}
struct ChainDetails {
is_default: bool,
is_admin: bool,
origin: Option<ChainOrigin>,
chain_id: ChainId,
user_chain: wallet::Chain,
}
impl ChainDetails {
fn new(chain_id: ChainId, wallet: &Data) -> Self {
let Some(user_chain) = wallet.chains.get(chain_id) else {
panic!("Chain {} not found.", chain_id);
};
ChainDetails {
is_default: Some(chain_id) == *wallet.default.read().unwrap(),
is_admin: chain_id == wallet.genesis_config.admin_id(),
chain_id,
origin: wallet
.genesis_config
.chains
.iter()
.find(|description| description.id() == chain_id)
.map(ChainDescription::origin),
user_chain,
}
}
fn print_paragraph(&self) {
println!("-----------------------");
println!("{:<20} {}", "Chain ID:", self.chain_id);
let mut tags = Vec::new();
if self.is_default {
tags.push("DEFAULT");
}
if self.is_admin {
tags.push("ADMIN");
}
if self.user_chain.follow_only {
tags.push("FOLLOW-ONLY");
}
if !tags.is_empty() {
println!("{:<20} {}", "Tags:", tags.join(", "));
}
match self.origin {
Some(ChainOrigin::Root(_)) | None => {
println!("{:<20} -", "Parent chain:");
}
Some(ChainOrigin::Child { parent, .. }) => {
println!("{:<20} {parent}", "Parent chain:");
}
}
if let Some(owner) = &self.user_chain.owner {
println!("{:<20} {owner}", "Default owner:");
} else {
println!("{:<20} No owner key", "Default owner:");
}
println!("{:<20} {}", "Timestamp:", self.user_chain.timestamp);
println!("{:<20} {}", "Blocks:", self.user_chain.next_block_height);
if let Some(epoch) = self.user_chain.epoch {
println!("{:<20} {epoch}", "Epoch:");
} else {
println!("{:<20} -", "Epoch:");
}
if let Some(hash) = self.user_chain.block_hash {
println!("{:<20} {hash}", "Latest block hash:");
}
if self.user_chain.pending_proposal.is_some() {
println!("{:<20} present", "Pending proposal:");
}
}
}
pub struct Wallet(persistent::File<Data>);
// TODO(#5081): `persistent` is no longer necessary here, we can move the locking
// logic right here
impl linera_core::Wallet for Wallet {
type Error = persistent::file::Error;
async fn get(&self, id: ChainId) -> Result<Option<wallet::Chain>, Self::Error> {
Ok(self.get(id))
}
async fn remove(&self, id: ChainId) -> Result<Option<wallet::Chain>, Self::Error> {
self.remove(id)
}
fn items(&self) -> impl Stream<Item = Result<(ChainId, wallet::Chain), Self::Error>> {
stream::iter(self.items().into_iter().map(Ok))
}
async fn insert(
&self,
id: ChainId,
chain: wallet::Chain,
) -> Result<Option<wallet::Chain>, Self::Error> {
self.insert(id, chain)
}
async fn try_insert(
&self,
id: ChainId,
chain: wallet::Chain,
) -> Result<Option<wallet::Chain>, Self::Error> {
let chain = self.try_insert(id, chain)?;
self.save()?;
Ok(chain)
}
async fn modify(
&self,
id: ChainId,
f: impl FnMut(&mut wallet::Chain) + Send,
) -> Result<Option<()>, Self::Error> {
self.mutate(id, f).transpose()
}
}
impl Extend<(ChainId, wallet::Chain)> for Wallet {
fn extend<It: IntoIterator<Item = (ChainId, wallet::Chain)>>(&mut self, chains: It) {
for (id, chain) in chains {
if self.0.chains.try_insert(id, chain).is_none() {
self.try_set_default(id);
}
}
}
}
impl Wallet {
pub fn get(&self, id: ChainId) -> Option<wallet::Chain> {
self.0.chains.get(id)
}
pub fn remove(&self, id: ChainId) -> Result<Option<wallet::Chain>, persistent::file::Error> {
let chain = self.0.chains.remove(id);
{
let mut default = self.0.default.write().unwrap();
if *default == Some(id) {
*default = None;
}
}
self.0.save()?;
Ok(chain)
}
pub fn items(&self) -> Vec<(ChainId, wallet::Chain)> {
self.0.chains.items()
}
fn try_set_default(&self, id: ChainId) {
let mut guard = self.0.default.write().unwrap();
if guard.is_none() {
*guard = Some(id);
}
}
pub fn insert(
&self,
id: ChainId,
chain: wallet::Chain,
) -> Result<Option<wallet::Chain>, persistent::file::Error> {
let has_owner = chain.owner.is_some();
let old_chain = self.0.chains.insert(id, chain.clone());
if has_owner {
self.try_set_default(id);
}
self.0.save()?;
Ok(old_chain)
}
pub fn try_insert(
&self,
id: ChainId,
chain: wallet::Chain,
) -> Result<Option<wallet::Chain>, persistent::file::Error> {
let chain = self.0.chains.try_insert(id, chain);
if chain.is_none() {
self.try_set_default(id);
}
self.save()?;
Ok(chain)
}
pub fn create(
path: &std::path::Path,
genesis_config: GenesisConfig,
) -> Result<Self, persistent::file::Error> {
Ok(Self(persistent::File::new(
path,
Data {
chains: wallet::Memory::default(),
default: Arc::new(RwLock::new(None)),
genesis_config,
},
)?))
}
pub fn read(path: &std::path::Path) -> Result<Self, persistent::file::Error> {
Ok(Self(persistent::File::read(path)?))
}
pub fn genesis_config(&self) -> &GenesisConfig {
&self.0.genesis_config
}
pub fn genesis_admin_chain(&self) -> ChainId {
self.0.genesis_config.admin_id()
}
// TODO(#5082): now that wallets only store chains, not keys, there's not much point in
// allowing wallets with no default chain (i.e. no chains)
pub fn default_chain(&self) -> Option<ChainId> {
*self.0.default.read().unwrap()
}
pub fn pretty_print(&self, chain_ids: Vec<ChainId>) {
let chain_ids: Vec<_> = chain_ids.into_iter().collect();
let total_chains = chain_ids.len();
let plural_s = if total_chains == 1 { "" } else { "s" };
tracing::info!("Found {total_chains} chain{plural_s}");
let mut chains = chain_ids
.into_iter()
.map(|chain_id| ChainDetails::new(chain_id, &self.0))
.collect::<Vec<_>>();
// Print first the default, then the admin chain, then other root chains, and finally the
// child chains.
chains.sort_unstable_by_key(|chain| {
let root_id = chain
.origin
.and_then(|origin| origin.root())
.unwrap_or(u32::MAX);
let chain_id = chain.chain_id;
(!chain.is_default, !chain.is_admin, root_id, chain_id)
});
for chain in chains {
chain.print_paragraph();
}
println!("------------------------");
}
pub fn set_default_chain(&mut self, id: ChainId) -> Result<(), persistent::file::Error> {
assert!(self.0.chains.get(id).is_some());
*self.0.default.write().unwrap() = Some(id);
self.0.save()
}
pub fn mutate<R>(
&self,
chain_id: ChainId,
mutate: impl FnMut(&mut wallet::Chain) -> R,
) -> Option<Result<R, persistent::file::Error>> {
self.0
.chains
.mutate(chain_id, mutate)
.map(|outcome| self.0.save().map(|()| outcome))
}
pub fn forget_keys(&self, chain_id: ChainId) -> anyhow::Result<AccountOwner> {
self.mutate(chain_id, |chain| {
// Without keys we can no longer propose blocks, so switch to follow-only mode.
chain.follow_only = true;
chain.owner.take()
})
.ok_or(anyhow::anyhow!("nonexistent chain `{chain_id}`"))??
.ok_or(anyhow::anyhow!("keypair not found for chain `{chain_id}`"))
}
pub fn forget_chain(&self, chain_id: ChainId) -> anyhow::Result<wallet::Chain> {
let chain = self
.0
.chains
.remove(chain_id)
.ok_or(anyhow::anyhow!("nonexistent chain `{chain_id}`"))?;
self.0.save()?;
Ok(chain)
}
pub fn save(&self) -> Result<(), persistent::file::Error> {
self.0.save()
}
pub fn num_chains(&self) -> usize {
self.0.chains.items().len()
}
pub fn chain_ids(&self) -> Vec<ChainId> {
self.0.chains.chain_ids()
}
/// Returns the list of all chain IDs for which we have a secret key.
pub fn owned_chain_ids(&self) -> Vec<ChainId> {
self.0.chains.owned_chain_ids()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/project.rs | linera-service/src/project.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
io::Write,
path::{Path, PathBuf},
process::Command,
};
use anyhow::{ensure, Context, Result};
use cargo_toml::Manifest;
use convert_case::{Case, Casing};
use current_platform::CURRENT_PLATFORM;
use fs_err::File;
use tracing::debug;
pub struct Project {
root: PathBuf,
}
impl Project {
pub fn create_new(name: &str, linera_root: Option<&Path>) -> Result<Self> {
ensure!(
!name.contains(std::path::is_separator),
"Project name {name} should not contain path-separators",
);
let root = PathBuf::from(name);
ensure!(
!root.exists(),
"Directory {} already exists",
root.display(),
);
ensure!(
root.extension().is_none(),
"Project name {name} should not have a file extension",
);
debug!("Creating directory at {}", root.display());
fs_err::create_dir_all(&root)?;
debug!("Creating the source directory");
let source_directory = Self::create_source_directory(&root)?;
debug!("Creating the tests directory");
let test_directory = Self::create_test_directory(&root)?;
debug!("Initializing git repository");
Self::initialize_git_repository(&root)?;
debug!("Writing Cargo.toml");
Self::create_cargo_toml(&root, name, linera_root)?;
debug!("Writing rust-toolchain.toml");
Self::create_rust_toolchain(&root)?;
debug!("Writing state.rs");
Self::create_state_file(&source_directory, name)?;
debug!("Writing lib.rs");
Self::create_lib_file(&source_directory, name)?;
debug!("Writing contract.rs");
Self::create_contract_file(&source_directory, name)?;
debug!("Writing service.rs");
Self::create_service_file(&source_directory, name)?;
debug!("Writing single_chain.rs");
Self::create_test_file(&test_directory, name)?;
Ok(Self { root })
}
pub fn from_existing_project(root: PathBuf) -> Result<Self> {
ensure!(
root.exists(),
"could not find project at {}",
root.display()
);
Ok(Self { root })
}
/// Runs the unit and integration tests of an application.
pub fn test(&self) -> Result<()> {
let tests = Command::new("cargo")
.arg("test")
.args(["--target", CURRENT_PLATFORM])
.current_dir(&self.root)
.spawn()?
.wait()?;
ensure!(tests.success(), "tests failed");
Ok(())
}
/// Finds the workspace for a given crate. If the workspace
/// does not exist, returns the path of the crate.
fn workspace_root(&self) -> Result<&Path> {
let mut current_path = self.root.as_path();
loop {
let toml_path = current_path.join("Cargo.toml");
if toml_path.exists() {
let toml = Manifest::from_path(toml_path)?;
if toml.workspace.is_some() {
return Ok(current_path);
}
}
match current_path.parent() {
None => {
break;
}
Some(parent) => current_path = parent,
}
}
Ok(self.root.as_path())
}
fn create_source_directory(project_root: &Path) -> Result<PathBuf> {
let source_directory = project_root.join("src");
fs_err::create_dir(&source_directory)?;
Ok(source_directory)
}
fn create_test_directory(project_root: &Path) -> Result<PathBuf> {
let test_directory = project_root.join("tests");
fs_err::create_dir(&test_directory)?;
Ok(test_directory)
}
fn initialize_git_repository(project_root: &Path) -> Result<()> {
let output = Command::new("git")
.args([
"init",
project_root
.to_str()
.context("project name contains non UTF-8 characters")?,
])
.output()?;
ensure!(
output.status.success(),
"failed to initialize git repository at {}",
&project_root.display()
);
Self::write_string_to_file(&project_root.join(".gitignore"), "/target")
}
fn create_cargo_toml(
project_root: &Path,
project_name: &str,
linera_root: Option<&Path>,
) -> Result<()> {
let toml_path = project_root.join("Cargo.toml");
let (linera_sdk_dep, linera_sdk_dev_dep) = Self::linera_sdk_dependencies(linera_root);
let binary_root_name = project_name.replace('-', "_");
let contract_binary_name = format!("{binary_root_name}_contract");
let service_binary_name = format!("{binary_root_name}_service");
let toml_contents = format!(
include_str!("../template/Cargo.toml.template"),
project_name = project_name,
contract_binary_name = contract_binary_name,
service_binary_name = service_binary_name,
linera_sdk_dep = linera_sdk_dep,
linera_sdk_dev_dep = linera_sdk_dev_dep,
);
Self::write_string_to_file(&toml_path, &toml_contents)
}
fn create_rust_toolchain(project_root: &Path) -> Result<()> {
Self::write_string_to_file(
&project_root.join("rust-toolchain.toml"),
include_str!("../template/rust-toolchain.toml.template"),
)
}
fn create_state_file(source_directory: &Path, project_name: &str) -> Result<()> {
let project_name = project_name.to_case(Case::Pascal);
let state_path = source_directory.join("state.rs");
let file_content = format!(
include_str!("../template/state.rs.template"),
project_name = project_name
);
Self::write_string_to_file(&state_path, &file_content)
}
fn create_lib_file(source_directory: &Path, project_name: &str) -> Result<()> {
let project_name = project_name.to_case(Case::Pascal);
let state_path = source_directory.join("lib.rs");
let file_content = format!(
include_str!("../template/lib.rs.template"),
project_name = project_name
);
Self::write_string_to_file(&state_path, &file_content)
}
fn create_contract_file(source_directory: &Path, name: &str) -> Result<()> {
let project_name = name.to_case(Case::Pascal);
let contract_path = source_directory.join("contract.rs");
let contract_contents = format!(
include_str!("../template/contract.rs.template"),
module_name = name.replace('-', "_"),
project_name = project_name
);
Self::write_string_to_file(&contract_path, &contract_contents)
}
fn create_service_file(source_directory: &Path, name: &str) -> Result<()> {
let project_name = name.to_case(Case::Pascal);
let service_path = source_directory.join("service.rs");
let service_contents = format!(
include_str!("../template/service.rs.template"),
module_name = name.replace('-', "_"),
project_name = project_name
);
Self::write_string_to_file(&service_path, &service_contents)
}
fn create_test_file(test_directory: &Path, name: &str) -> Result<()> {
let project_name = name.to_case(Case::Pascal);
let test_path = test_directory.join("single_chain.rs");
let test_contents = format!(
include_str!("../template/tests/single_chain.rs.template"),
project_name = name.replace('-', "_"),
project_abi = project_name,
);
Self::write_string_to_file(&test_path, &test_contents)
}
fn write_string_to_file(path: &Path, content: &str) -> Result<()> {
let mut file = File::create(path)?;
file.write_all(content.as_bytes())?;
Ok(())
}
/// Resolves [`linera_sdk`] and [`linera_views`] dependencies.
fn linera_sdk_dependencies(linera_root: Option<&Path>) -> (String, String) {
match linera_root {
Some(path) => Self::linera_sdk_testing_dependencies(path),
None => Self::linera_sdk_production_dependencies(),
}
}
/// Resolves [`linera_sdk`] and [`linera_views`] dependencies in testing mode.
fn linera_sdk_testing_dependencies(linera_root: &Path) -> (String, String) {
// We're putting the Cargo.toml file one level above the current directory.
let linera_root = PathBuf::from("..").join(linera_root);
let linera_sdk_path = linera_root.join("linera-sdk");
let linera_sdk_dep = format!(
"linera-sdk = {{ path = \"{}\" }}",
linera_sdk_path.display()
);
let linera_sdk_dev_dep = format!(
"linera-sdk = {{ path = \"{}\", features = [\"test\", \"wasmer\"] }}",
linera_sdk_path.display()
);
(linera_sdk_dep, linera_sdk_dev_dep)
}
/// Adds [`linera_sdk`] dependencies in production mode.
fn linera_sdk_production_dependencies() -> (String, String) {
let version = env!("CARGO_PKG_VERSION");
let linera_sdk_dep = format!("linera-sdk = \"{}\"", version);
let linera_sdk_dev_dep = format!(
"linera-sdk = {{ version = \"{}\", features = [\"test\", \"wasmer\"] }}",
version
);
(linera_sdk_dep, linera_sdk_dev_dep)
}
pub fn build(&self, name: Option<String>) -> Result<(PathBuf, PathBuf), anyhow::Error> {
let name = match name {
Some(name) => name,
None => self.project_package_name()?.replace('-', "_"),
};
let contract_name = format!("{}_contract", name);
let service_name = format!("{}_service", name);
let cargo_build = Command::new("cargo")
.arg("build")
.arg("--release")
.args(["--target", "wasm32-unknown-unknown"])
.current_dir(&self.root)
.spawn()?
.wait()?;
ensure!(cargo_build.success(), "build failed");
let build_path = self
.workspace_root()?
.join("target/wasm32-unknown-unknown/release");
Ok((
build_path.join(contract_name).with_extension("wasm"),
build_path.join(service_name).with_extension("wasm"),
))
}
fn project_package_name(&self) -> Result<String> {
let manifest = Manifest::from_path(self.cargo_toml_path())?;
let name = manifest
.package
.context("Cargo.toml is missing `[package]`")?
.name;
Ok(name)
}
fn cargo_toml_path(&self) -> PathBuf {
self.root.join("Cargo.toml")
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/server.rs | linera-service/src/server.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![recursion_limit = "256"]
#[cfg(feature = "jemalloc")]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
// jemalloc configuration for memory profiling with jemalloc_pprof
// prof:true,prof_active:true - Enable profiling from start
// lg_prof_sample:19 - Sample every 512KB for good detail/overhead balance
// Linux/other platforms: use unprefixed malloc (with unprefixed_malloc_on_supported_platforms)
#[cfg(all(feature = "memory-profiling", not(target_os = "macos")))]
#[allow(non_upper_case_globals)]
#[export_name = "malloc_conf"]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
// macOS: use prefixed malloc (without unprefixed_malloc_on_supported_platforms)
#[cfg(all(feature = "memory-profiling", target_os = "macos"))]
#[allow(non_upper_case_globals)]
#[export_name = "_rjem_malloc_conf"]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
use std::{
borrow::Cow,
num::NonZeroU16,
path::{Path, PathBuf},
time::Duration,
};
use anyhow::{bail, Context};
use async_trait::async_trait;
use futures::{stream::FuturesUnordered, FutureExt as _, StreamExt, TryFutureExt as _};
use linera_base::{
crypto::{CryptoRng, Ed25519SecretKey},
listen_for_shutdown_signals,
};
use linera_client::config::{CommitteeConfig, ValidatorConfig, ValidatorServerConfig};
use linera_core::{worker::WorkerState, JoinSetExt as _, CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES};
use linera_execution::{WasmRuntime, WithWasmDefault};
#[cfg(with_metrics)]
use linera_metrics::monitoring_server;
use linera_persistent::{self as persistent, Persist};
use linera_rpc::{
config::{
CrossChainConfig, ExporterServiceConfig, NetworkProtocol, NotificationConfig, ProxyConfig,
ShardConfig, ShardId, TlsConfig, ValidatorInternalNetworkConfig,
ValidatorPublicNetworkConfig,
},
grpc, simple,
};
use linera_sdk::linera_base_types::{AccountSecretKey, ValidatorKeypair};
use linera_service::{
storage::{CommonStorageOptions, Runnable, StorageConfig},
util,
};
use linera_storage::Storage;
use serde::Deserialize;
use tokio::task::JoinSet;
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
struct ServerContext {
server_config: ValidatorServerConfig,
cross_chain_config: CrossChainConfig,
notification_config: NotificationConfig,
shard: Option<usize>,
block_time_grace_period: Duration,
chain_worker_ttl: Duration,
block_cache_size: usize,
execution_state_cache_size: usize,
chain_info_max_received_log_entries: usize,
}
impl ServerContext {
fn make_shard_state<S>(
&self,
local_ip_addr: &str,
shard_id: ShardId,
storage: S,
) -> (WorkerState<S>, ShardId, ShardConfig)
where
S: Storage + Clone + Send + Sync + 'static,
{
let shard = self.server_config.internal_network.shard(shard_id);
info!("Shard booted on {}", shard.host);
info!(
"Public key: {}",
self.server_config.validator_secret.public()
);
let state = WorkerState::new(
format!("Shard {} @ {}:{}", shard_id, local_ip_addr, shard.port),
Some(self.server_config.validator_secret.copy()),
storage,
self.block_cache_size,
self.execution_state_cache_size,
)
.with_allow_inactive_chains(false)
.with_allow_messages_from_deprecated_epochs(false)
.with_block_time_grace_period(self.block_time_grace_period)
.with_chain_worker_ttl(self.chain_worker_ttl)
.with_chain_info_max_received_log_entries(self.chain_info_max_received_log_entries);
(state, shard_id, shard.clone())
}
fn spawn_simple<S>(
&self,
listen_address: &str,
states: Vec<(WorkerState<S>, ShardId, ShardConfig)>,
protocol: simple::TransportProtocol,
shutdown_signal: CancellationToken,
) -> JoinSet<()>
where
S: Storage + Clone + Send + Sync + 'static,
{
let mut join_set = JoinSet::new();
let handles = FuturesUnordered::new();
let internal_network = self
.server_config
.internal_network
.clone_with_protocol(protocol);
for (state, shard_id, shard) in states {
let internal_network = internal_network.clone();
let cross_chain_config = self.cross_chain_config.clone();
let listen_address = listen_address.to_owned();
#[cfg(with_metrics)]
if let Some(port) = shard.metrics_port {
monitoring_server::start_metrics(
(listen_address.clone(), port),
shutdown_signal.clone(),
);
}
let server_handle = simple::Server::new(
internal_network,
listen_address,
shard.port,
state,
shard_id,
cross_chain_config,
)
.spawn(shutdown_signal.clone(), &mut join_set);
handles.push(
server_handle
.join()
.inspect_err(move |error| {
error!("Error running server for shard {shard_id}: {error:?}")
})
.map(|_| ()),
);
}
join_set.spawn_task(handles.collect::<()>());
join_set
}
fn spawn_grpc<S>(
&self,
listen_address: &str,
states: Vec<(WorkerState<S>, ShardId, ShardConfig)>,
shutdown_signal: CancellationToken,
) -> JoinSet<()>
where
S: Storage + Clone + Send + Sync + 'static,
{
let mut join_set = JoinSet::new();
let handles = FuturesUnordered::new();
for (state, shard_id, shard) in states {
#[cfg(with_metrics)]
if let Some(port) = shard.metrics_port {
monitoring_server::start_metrics(
(listen_address.to_string(), port),
shutdown_signal.clone(),
);
}
let server_handle = grpc::GrpcServer::spawn(
listen_address.to_string(),
shard.port,
state,
shard_id,
self.server_config.internal_network.clone(),
self.cross_chain_config.clone(),
self.notification_config.clone(),
shutdown_signal.clone(),
&mut join_set,
);
handles.push(
server_handle
.join()
.inspect_err(move |error| {
error!("Error running server for shard {shard_id}: {error:?}")
})
.map(|_| ()),
);
}
join_set.spawn_task(handles.collect::<()>());
join_set
}
fn get_listen_address() -> String {
// Allow local IP address to be different from the public one.
"0.0.0.0".to_string()
}
}
#[async_trait]
impl Runnable for ServerContext {
type Output = anyhow::Result<()>;
async fn run<S>(self, storage: S) -> anyhow::Result<()>
where
S: Storage + Clone + Send + Sync + 'static,
{
let shutdown_notifier = CancellationToken::new();
let listen_address = Self::get_listen_address();
tokio::spawn(listen_for_shutdown_signals(shutdown_notifier.clone()));
// Run the server
let states = match self.shard {
Some(shard) => {
info!("Running shard number {}", shard);
vec![self.make_shard_state(&listen_address, shard, storage)]
}
None => {
info!("Running all shards");
let num_shards = self.server_config.internal_network.shards.len();
(0..num_shards)
.map(|shard| self.make_shard_state(&listen_address, shard, storage.clone()))
.collect()
}
};
let mut join_set = match self.server_config.internal_network.protocol {
NetworkProtocol::Simple(protocol) => {
self.spawn_simple(&listen_address, states, protocol, shutdown_notifier)
}
NetworkProtocol::Grpc(tls_config) => match tls_config {
TlsConfig::ClearText => self.spawn_grpc(&listen_address, states, shutdown_notifier),
TlsConfig::Tls => bail!("TLS not supported between proxy and shards."),
},
};
join_set.await_all_tasks().await;
Ok(())
}
}
#[derive(clap::Parser)]
#[command(
name = "linera-server",
about = "Server implementation (aka validator shard) for the Linera blockchain",
version = linera_version::VersionInfo::default_clap_str(),
)]
struct ServerOptions {
/// Subcommands. Acceptable values are run and generate.
#[command(subcommand)]
command: ServerCommand,
/// The number of Tokio worker threads to use.
#[arg(long, env = "LINERA_SERVER_TOKIO_THREADS")]
tokio_threads: Option<usize>,
/// The number of Tokio blocking threads to use.
#[arg(long, env = "LINERA_SERVER_TOKIO_BLOCKING_THREADS")]
tokio_blocking_threads: Option<usize>,
/// Size of the block cache (default: 5000)
#[arg(long, env = "LINERA_BLOCK_CACHE_SIZE", default_value = "5000")]
block_cache_size: usize,
/// Size of the execution state cache (default: 10000)
#[arg(
long,
env = "LINERA_EXECUTION_STATE_CACHE_SIZE",
default_value = "10000"
)]
execution_state_cache_size: usize,
}
#[derive(Debug, PartialEq, Eq, Deserialize)]
struct ValidatorOptions {
/// Path to the file containing the server configuration of this Linera validator (including its secret key)
server_config_path: PathBuf,
/// The host of the validator (IP address or hostname)
host: String,
/// The port of the validator
port: u16,
/// The server configurations for the linera-exporter.
#[serde(default)]
block_exporters: Vec<ExporterServiceConfig>,
/// The network protocol for the frontend.
external_protocol: NetworkProtocol,
/// The network protocol for workers.
internal_protocol: NetworkProtocol,
/// The public name and the port of each of the shards
shards: Vec<ShardConfig>,
/// The name and the port of the proxies
proxies: Vec<ProxyConfig>,
}
fn make_server_config<R: CryptoRng>(
path: &Path,
rng: &mut R,
options: ValidatorOptions,
) -> anyhow::Result<persistent::File<ValidatorServerConfig>> {
let validator_keypair = ValidatorKeypair::generate_from(rng);
let account_secret = AccountSecretKey::Ed25519(Ed25519SecretKey::generate_from(rng));
let public_key = validator_keypair.public_key;
let network = ValidatorPublicNetworkConfig {
protocol: options.external_protocol,
host: options.host,
port: options.port,
};
let internal_network = ValidatorInternalNetworkConfig {
public_key,
protocol: options.internal_protocol,
shards: options.shards,
block_exporters: options.block_exporters,
proxies: options.proxies,
};
let validator = ValidatorConfig {
network,
public_key,
account_key: account_secret.public(),
};
Ok(persistent::File::new(
path,
ValidatorServerConfig {
validator,
validator_secret: validator_keypair.secret_key,
internal_network,
},
)?)
}
#[derive(clap::Parser)]
enum ServerCommand {
/// Runs a service for each shard of the Linera validator")
#[command(name = "run")]
Run {
/// Path to the file containing the server configuration of this Linera validator (including its secret key)
#[arg(long = "server")]
server_config_path: PathBuf,
/// Storage configuration for the blockchain history, chain states and binary blobs.
#[arg(long = "storage")]
storage_config: StorageConfig,
/// Common storage options.
#[command(flatten)]
common_storage_options: Box<CommonStorageOptions>,
/// Configuration for cross-chain requests
#[command(flatten)]
cross_chain_config: CrossChainConfig,
/// Configuration for notifications
#[command(flatten)]
notification_config: NotificationConfig,
/// Runs a specific shard (from 0 to shards-1)
#[arg(long)]
shard: Option<usize>,
/// Blocks with a timestamp this far in the future will still be accepted, but the validator
/// will wait until that timestamp before voting.
#[arg(long = "block-time-grace-period-ms", default_value = "500", value_parser = util::parse_millis)]
block_time_grace_period: Duration,
/// The WebAssembly runtime to use.
#[arg(long)]
wasm_runtime: Option<WasmRuntime>,
/// The duration in milliseconds after which an idle chain worker will free its memory.
#[arg(
long = "chain-worker-ttl-ms",
default_value = "30000",
value_parser = util::parse_millis
)]
chain_worker_ttl: Duration,
/// Maximum size for received_log entries in chain info responses. This should
/// generally only be increased from the default value.
#[arg(
long,
default_value_t = CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES,
env = "LINERA_SERVER_CHAIN_INFO_MAX_RECEIVED_LOG_ENTRIES",
)]
chain_info_max_received_log_entries: usize,
/// OpenTelemetry OTLP exporter endpoint (requires opentelemetry feature).
#[arg(long, env = "LINERA_OTLP_EXPORTER_ENDPOINT")]
otlp_exporter_endpoint: Option<String>,
},
/// Act as a trusted third-party and generate all server configurations
#[command(name = "generate")]
Generate {
/// Configuration file of each validator in the committee
#[arg(long, num_args(0..))]
validators: Vec<PathBuf>,
/// Path where to write the description of the Linera committee
#[arg(long)]
committee: Option<PathBuf>,
/// Force this command to generate keys using a PRNG and a given seed. USE FOR
/// TESTING ONLY.
#[arg(long)]
testing_prng_seed: Option<u64>,
},
/// Replaces the configurations of the shards by following the given template.
#[command(name = "edit-shards")]
EditShards {
/// Path to the file containing the server configuration of this Linera validator.
#[arg(long = "server")]
server_config_path: PathBuf,
/// The number N of shard configs to generate, possibly starting with zeroes. If
/// `N` was written using `D` digits, we will replace the first occurrence of the
/// string `"%" * D` (`%` repeated D times) by the shard number.
#[arg(long)]
num_shards: String,
/// The host of the validator (IP address or hostname), possibly containing `%`
/// for digits of the shard number.
#[arg(long)]
host: String,
/// The port of the main endpoint, possibly containing `%` for digits of the shard
/// number.
#[arg(long)]
port: String,
/// The port for the metrics endpoint, possibly containing `%` for digits of the
/// shard number.
#[arg(long)]
metrics_port: Option<String>,
},
}
fn main() {
let options = <ServerOptions as clap::Parser>::parse();
let mut runtime = if options.tokio_threads == Some(1) {
tokio::runtime::Builder::new_current_thread()
} else {
let mut builder = tokio::runtime::Builder::new_multi_thread();
if let Some(threads) = options.tokio_threads {
builder.worker_threads(threads);
}
builder
};
if let Some(blocking_threads) = options.tokio_blocking_threads {
runtime.max_blocking_threads(blocking_threads);
}
runtime
.enable_all()
.build()
.expect("Failed to create Tokio runtime")
.block_on(run(options))
}
/// Returns the log file name to use based on the [`ServerCommand`] that will run.
fn otlp_exporter_endpoint_for(command: &ServerCommand) -> Option<&str> {
match command {
ServerCommand::Run {
otlp_exporter_endpoint,
..
} => otlp_exporter_endpoint.as_deref(),
ServerCommand::Generate { .. } | ServerCommand::EditShards { .. } => None,
}
}
fn log_file_name_for(command: &ServerCommand) -> Cow<'static, str> {
match command {
ServerCommand::Run {
shard,
server_config_path,
..
} => {
let server_config: ValidatorServerConfig =
util::read_json(server_config_path).expect("Failed to read server config");
let public_key = &server_config.validator.public_key;
if let Some(shard) = shard {
format!("validator-{public_key}-shard-{shard}")
} else {
format!("validator-{public_key}")
}
.into()
}
ServerCommand::Generate { .. } | ServerCommand::EditShards { .. } => "server".into(),
}
}
async fn run(options: ServerOptions) {
linera_service::tracing::opentelemetry::init(
&log_file_name_for(&options.command),
otlp_exporter_endpoint_for(&options.command),
);
match options.command {
ServerCommand::Run {
server_config_path,
storage_config,
common_storage_options,
cross_chain_config,
notification_config,
shard,
block_time_grace_period,
wasm_runtime,
chain_worker_ttl,
chain_info_max_received_log_entries,
otlp_exporter_endpoint: _,
} => {
linera_version::VERSION_INFO.log();
let server_config: ValidatorServerConfig =
util::read_json(&server_config_path).expect("Failed to read server config");
let job = ServerContext {
server_config,
cross_chain_config,
notification_config,
shard,
block_time_grace_period,
chain_worker_ttl,
block_cache_size: options.block_cache_size,
execution_state_cache_size: options.execution_state_cache_size,
chain_info_max_received_log_entries,
};
let wasm_runtime = wasm_runtime.with_wasm_default();
let store_config = storage_config
.add_common_storage_options(&common_storage_options)
.unwrap();
// Validators should not output contract logs.
let allow_application_logs = false;
store_config
.run_with_storage(wasm_runtime, allow_application_logs, job)
.boxed()
.await
.unwrap()
.unwrap();
}
ServerCommand::Generate {
validators,
committee,
testing_prng_seed,
} => {
let mut config_validators = Vec::new();
let mut rng = Box::<dyn CryptoRng>::from(testing_prng_seed);
for options_path in validators {
let options_string = fs_err::tokio::read_to_string(options_path)
.await
.expect("Unable to read validator options file");
let options: ValidatorOptions =
toml::from_str(&options_string).unwrap_or_else(|_| {
panic!("Invalid options file format: \n {}", options_string)
});
let path = options.server_config_path.clone();
let mut server = make_server_config(&path, &mut rng, options)
.expect("Unable to open server config file");
Persist::persist(&mut server)
.await
.expect("Unable to write server config file");
info!("Wrote server config {}", path.to_str().unwrap());
println!(
"{},{}",
server.validator.public_key, server.validator.account_key
);
config_validators.push(Persist::into_value(server).validator);
}
if let Some(committee) = committee {
let mut config = persistent::File::new(
&committee,
CommitteeConfig {
validators: config_validators,
},
)
.expect("Unable to open committee configuration");
Persist::persist(&mut config)
.await
.expect("Unable to write committee description");
info!("Wrote committee config {}", committee.to_str().unwrap());
}
}
ServerCommand::EditShards {
server_config_path,
num_shards,
host,
port,
metrics_port,
} => {
let mut server_config =
persistent::File::<ValidatorServerConfig>::read(&server_config_path)
.expect("Failed to read server config");
let shards = generate_shard_configs(num_shards, host, port, metrics_port)
.expect("Failed to generate shard configs");
server_config.internal_network.shards = shards;
Persist::persist(&mut server_config)
.await
.expect("Failed to write updated server config");
}
}
}
fn generate_shard_configs(
num_shards: String,
host: String,
port: String,
metrics_port: Option<String>,
) -> anyhow::Result<Vec<ShardConfig>> {
let mut shards = Vec::new();
let len = num_shards.len();
let num_shards = num_shards
.parse::<NonZeroU16>()
.context("Failed to parse the number of shards")?;
let pattern = "%".repeat(len);
for i in 1u16..=num_shards.into() {
let index = format!("{i:0len$}");
let host = host.replacen(&pattern, &index, 1);
let port = port
.replacen(&pattern, &index, 1)
.parse()
.context("Failed to decode port into an integers")?;
let metrics_port = metrics_port
.as_ref()
.map(|port| {
port.replacen(&pattern, &index, 1)
.parse()
.context("Failed to decode metrics port into an integers")
})
.transpose()?;
let shard = ShardConfig {
host,
port,
metrics_port,
};
shards.push(shard);
}
Ok(shards)
}
#[cfg(test)]
mod test {
use linera_rpc::simple::TransportProtocol;
use super::*;
#[test]
fn test_validator_options() {
let toml_str = r#"
server_config_path = "server.json"
host = "host"
port = 9000
external_protocol = { Simple = "Tcp" }
internal_protocol = { Simple = "Udp" }
[[proxies]]
host = "proxy"
public_port = 20100
private_port = 20200
metrics_port = 21100
[[shards]]
host = "host1"
port = 9001
metrics_port = 5001
[[shards]]
host = "host2"
port = 9002
metrics_port = 5002
[[block_exporters]]
host = "exporter"
port = 12000
"#;
let options: ValidatorOptions = toml::from_str(toml_str).unwrap();
assert_eq!(
options,
ValidatorOptions {
server_config_path: "server.json".into(),
external_protocol: NetworkProtocol::Simple(TransportProtocol::Tcp),
internal_protocol: NetworkProtocol::Simple(TransportProtocol::Udp),
host: "host".into(),
port: 9000,
proxies: vec![ProxyConfig {
host: "proxy".into(),
public_port: 20100,
private_port: 20200,
metrics_port: 21100,
}],
block_exporters: vec![ExporterServiceConfig {
host: "exporter".into(),
port: 12000
}],
shards: vec![
ShardConfig {
host: "host1".into(),
port: 9001,
metrics_port: Some(5001),
},
ShardConfig {
host: "host2".into(),
port: 9002,
metrics_port: Some(5002),
},
],
}
);
}
#[test]
fn test_generate_shard_configs() {
assert_eq!(
generate_shard_configs(
"02".into(),
"host%%".into(),
"10%%".into(),
Some("11%%".into())
)
.unwrap(),
vec![
ShardConfig {
host: "host01".into(),
port: 1001,
metrics_port: Some(1101),
},
ShardConfig {
host: "host02".into(),
port: 1002,
metrics_port: Some(1102),
},
],
);
assert!(generate_shard_configs(
"2".into(),
"host%%".into(),
"10%%".into(),
Some("11%%".into())
)
.is_err());
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/proxy/grpc.rs | linera-service/src/proxy/grpc.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
// `tracing::instrument` is not compatible with this nightly Clippy lint
#![allow(unknown_lints)]
use std::{
fmt::Debug,
marker::PhantomData,
net::SocketAddr,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
use anyhow::Result;
use async_trait::async_trait;
use futures::{future::BoxFuture, FutureExt as _};
use linera_base::identifiers::ChainId;
use linera_core::{
data_types::{CertificatesByHeightRequest, ChainInfo, ChainInfoQuery},
node::NodeError,
notifier::ChannelNotifier,
JoinSetExt as _,
};
#[cfg(with_metrics)]
use linera_metrics::monitoring_server;
use linera_rpc::{
config::{ProxyConfig, ShardConfig, TlsConfig, ValidatorInternalNetworkConfig},
grpc::{
api::{
self,
notifier_service_server::{NotifierService, NotifierServiceServer},
validator_node_server::{ValidatorNode, ValidatorNodeServer},
validator_worker_client::ValidatorWorkerClient,
BlobContent, BlobId, BlobIds, BlockProposal, Certificate, CertificatesBatchRequest,
CertificatesBatchResponse, ChainInfoResult, CryptoHash, HandlePendingBlobRequest,
LiteCertificate, NetworkDescription, Notification, PendingBlobRequest,
PendingBlobResult, RawCertificate, RawCertificatesBatch, SubscriptionRequest,
VersionInfo,
},
pool::GrpcConnectionPool,
GrpcProtoConversionError, GrpcProxyable, GRPC_CHUNKED_MESSAGE_FILL_LIMIT,
GRPC_MAX_MESSAGE_SIZE,
},
};
use linera_sdk::{linera_base_types::Blob, views::ViewError};
use linera_storage::{ResultReadCertificates, Storage};
use prost::Message;
use tokio::{select, task::JoinSet};
use tokio_stream::wrappers::UnboundedReceiverStream;
use tokio_util::sync::CancellationToken;
use tonic::{
transport::{Channel, Identity, Server, ServerTlsConfig},
Request, Response, Status,
};
use tonic_web::GrpcWebLayer;
use tower::{builder::ServiceBuilder, Layer, Service};
use tracing::{debug, info, instrument, Instrument as _, Level};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{
linear_bucket_interval, register_histogram_vec, register_int_counter_vec,
};
use prometheus::{HistogramVec, IntCounterVec};
pub static PROXY_REQUEST_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"proxy_request_latency",
"Proxy request latency",
&[],
linear_bucket_interval(1.0, 50.0, 2000.0),
)
});
pub static PROXY_REQUEST_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec("proxy_request_count", "Proxy request count", &[])
});
pub static PROXY_REQUEST_SUCCESS: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"proxy_request_success",
"Proxy request success",
&["method_name"],
)
});
pub static PROXY_REQUEST_ERROR: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"proxy_request_error",
"Proxy request error",
&["method_name"],
)
});
}
#[derive(Clone)]
pub struct PrometheusMetricsMiddlewareLayer;
#[derive(Clone)]
pub struct PrometheusMetricsMiddlewareService<T> {
service: T,
}
impl<S> Layer<S> for PrometheusMetricsMiddlewareLayer {
type Service = PrometheusMetricsMiddlewareService<S>;
fn layer(&self, service: S) -> Self::Service {
PrometheusMetricsMiddlewareService { service }
}
}
impl<S, Req> Service<Req> for PrometheusMetricsMiddlewareService<S>
where
S::Future: Send + 'static,
S: Service<Req> + std::marker::Send,
{
type Response = S::Response;
type Error = S::Error;
type Future = BoxFuture<'static, Result<S::Response, S::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, request: Req) -> Self::Future {
#[cfg(with_metrics)]
let start = linera_base::time::Instant::now();
let future = self.service.call(request);
async move {
let response = future.await?;
#[cfg(with_metrics)]
{
metrics::PROXY_REQUEST_LATENCY
.with_label_values(&[])
.observe(start.elapsed().as_secs_f64() * 1000.0);
metrics::PROXY_REQUEST_COUNT.with_label_values(&[]).inc();
}
Ok(response)
}
.boxed()
}
}
#[derive(Clone)]
pub struct GrpcProxy<S>(Arc<GrpcProxyInner<S>>);
struct GrpcProxyInner<S> {
internal_config: ValidatorInternalNetworkConfig,
worker_connection_pool: GrpcConnectionPool,
notifier: ChannelNotifier<Result<Notification, Status>>,
tls: TlsConfig,
storage: S,
id: usize,
}
impl<S> GrpcProxy<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
pub fn new(
internal_config: ValidatorInternalNetworkConfig,
connect_timeout: Duration,
timeout: Duration,
tls: TlsConfig,
storage: S,
id: usize,
) -> Self {
Self(Arc::new(GrpcProxyInner {
internal_config,
worker_connection_pool: GrpcConnectionPool::default()
.with_connect_timeout(connect_timeout)
.with_timeout(timeout),
notifier: ChannelNotifier::default(),
tls,
storage,
id,
}))
}
fn as_validator_node(&self) -> ValidatorNodeServer<Self> {
ValidatorNodeServer::new(self.clone())
.max_encoding_message_size(GRPC_MAX_MESSAGE_SIZE)
.max_decoding_message_size(GRPC_MAX_MESSAGE_SIZE)
}
fn config(&self) -> &ProxyConfig {
self.0
.internal_config
.proxies
.get(self.0.id)
.expect("No proxy config provided.")
}
fn as_notifier_service(&self) -> NotifierServiceServer<Self> {
NotifierServiceServer::new(self.clone())
}
fn public_address(&self) -> SocketAddr {
SocketAddr::from(([0, 0, 0, 0], self.config().public_port))
}
fn metrics_address(&self) -> SocketAddr {
SocketAddr::from(([0, 0, 0, 0], self.config().metrics_port))
}
fn internal_address(&self) -> SocketAddr {
SocketAddr::from(([0, 0, 0, 0], self.config().private_port))
}
fn shard_for(&self, proxyable: &impl GrpcProxyable) -> Option<ShardConfig> {
Some(
self.0
.internal_config
.get_shard_for(proxyable.chain_id()?)
.clone(),
)
}
fn worker_client_for_shard(
&self,
shard: &ShardConfig,
) -> Result<ValidatorWorkerClient<Channel>> {
let address = shard.http_address();
let channel = self.0.worker_connection_pool.channel(address)?;
let client = ValidatorWorkerClient::new(channel)
.max_encoding_message_size(GRPC_MAX_MESSAGE_SIZE)
.max_decoding_message_size(GRPC_MAX_MESSAGE_SIZE);
Ok(client)
}
/// Runs the proxy. If either the public server or private server dies for whatever
/// reason we'll kill the proxy.
#[instrument(
name = "GrpcProxy::run",
skip_all,
fields(
public_address = %self.public_address(),
internal_address = %self.internal_address(),
metrics_address = %self.metrics_address(),
),
err,
)]
pub async fn run(self, shutdown_signal: CancellationToken) -> Result<()> {
info!("Starting proxy");
let mut join_set = JoinSet::new();
#[cfg(with_metrics)]
monitoring_server::start_metrics(self.metrics_address(), shutdown_signal.clone());
let (health_reporter, health_service) = tonic_health::server::health_reporter();
health_reporter
.set_serving::<ValidatorNodeServer<GrpcProxy<S>>>()
.await;
let internal_server = join_set.spawn_task(
Server::builder()
.add_service(self.as_notifier_service())
.serve(self.internal_address())
.in_current_span(),
);
let reflection_service = tonic_reflection::server::Builder::configure()
.register_encoded_file_descriptor_set(linera_rpc::FILE_DESCRIPTOR_SET)
.build_v1()?;
let public_server = join_set.spawn_task(
self.public_server()?
.max_concurrent_streams(
// we subtract one to make sure
// that the value is not
// interpreted as "not set"
Some(u32::MAX - 1),
)
.layer(
ServiceBuilder::new()
.layer(PrometheusMetricsMiddlewareLayer)
.into_inner(),
)
.layer(
// enable
// [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/CORS)
// for the proxy to originate anywhere
tower_http::cors::CorsLayer::permissive(),
)
.layer(GrpcWebLayer::new())
.accept_http1(true)
.add_service(health_service)
.add_service(self.as_validator_node())
.add_service(reflection_service)
.serve_with_shutdown(self.public_address(), shutdown_signal.cancelled_owned())
.in_current_span(),
);
select! {
internal_res = internal_server => internal_res??,
public_res = public_server => public_res??,
}
Ok(())
}
/// Pre-configures the public server with no services attached.
/// If a certificate and key are defined, creates a TLS server.
fn public_server(&self) -> Result<Server> {
match self.0.tls {
TlsConfig::Tls => {
use linera_rpc::{CERT_PEM, KEY_PEM};
let identity = Identity::from_pem(CERT_PEM, KEY_PEM);
let tls_config = ServerTlsConfig::new().identity(identity);
Ok(Server::builder().tls_config(tls_config)?)
}
TlsConfig::ClearText => Ok(Server::builder()),
}
}
#[instrument(skip_all, fields(remote_addr = ?request.remote_addr(), chain_id = ?request.get_ref().chain_id()))]
fn worker_client<R>(
&self,
request: Request<R>,
) -> Result<(ValidatorWorkerClient<Channel>, R), Status>
where
R: Debug + GrpcProxyable,
{
debug!("proxying request from {:?}", request.remote_addr());
let inner = request.into_inner();
let shard = self
.shard_for(&inner)
.ok_or_else(|| Status::not_found("could not find shard for message"))?;
let client = self
.worker_client_for_shard(&shard)
.map_err(|_| Status::internal("could not connect to shard"))?;
Ok((client, inner))
}
#[allow(clippy::result_large_err)]
fn log_and_return_proxy_request_outcome(
result: Result<Response<ChainInfoResult>, Status>,
method_name: &str,
) -> Result<Response<ChainInfoResult>, Status> {
#![allow(unused_variables)]
match result {
Ok(chain_info_result) => {
#[cfg(with_metrics)]
metrics::PROXY_REQUEST_SUCCESS
.with_label_values(&[method_name])
.inc();
Ok(chain_info_result)
}
Err(status) => {
#[cfg(with_metrics)]
metrics::PROXY_REQUEST_ERROR
.with_label_values(&[method_name])
.inc();
Err(status)
}
}
}
/// Returns the appropriate gRPC status for the given [`ViewError`].
fn view_error_to_status(err: ViewError) -> Status {
let mut status = match &err {
ViewError::BcsError(_) => Status::invalid_argument(err.to_string()),
ViewError::StoreError { .. }
| ViewError::TokioJoinError(_)
| ViewError::TryLockError(_)
| ViewError::InconsistentEntries
| ViewError::PostLoadValuesError
| ViewError::IoError(_) => Status::internal(err.to_string()),
ViewError::KeyTooLong | ViewError::ArithmeticError(_) => {
Status::out_of_range(err.to_string())
}
ViewError::NotFound(_) | ViewError::MissingEntries(_) => {
Status::not_found(err.to_string())
}
};
status.set_source(Arc::new(err));
status
}
}
#[async_trait]
impl<S> ValidatorNode for GrpcProxy<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
type SubscribeStream = UnboundedReceiverStream<Result<Notification, Status>>;
#[instrument(skip_all, err(Display), fields(method = "handle_block_proposal"))]
async fn handle_block_proposal(
&self,
request: Request<BlockProposal>,
) -> Result<Response<ChainInfoResult>, Status> {
let (mut client, inner) = self.worker_client(request)?;
Self::log_and_return_proxy_request_outcome(
client.handle_block_proposal(inner).await,
"handle_block_proposal",
)
}
#[instrument(skip_all, err(Display), fields(method = "handle_lite_certificate"))]
async fn handle_lite_certificate(
&self,
request: Request<LiteCertificate>,
) -> Result<Response<ChainInfoResult>, Status> {
let (mut client, inner) = self.worker_client(request)?;
Self::log_and_return_proxy_request_outcome(
client.handle_lite_certificate(inner).await,
"handle_lite_certificate",
)
}
#[instrument(
skip_all,
err(Display),
fields(method = "handle_confirmed_certificate")
)]
async fn handle_confirmed_certificate(
&self,
request: Request<api::HandleConfirmedCertificateRequest>,
) -> Result<Response<ChainInfoResult>, Status> {
let (mut client, inner) = self.worker_client(request)?;
Self::log_and_return_proxy_request_outcome(
client.handle_confirmed_certificate(inner).await,
"handle_confirmed_certificate",
)
}
#[instrument(
skip_all,
err(Display),
fields(method = "handle_validated_certificate")
)]
async fn handle_validated_certificate(
&self,
request: Request<api::HandleValidatedCertificateRequest>,
) -> Result<Response<ChainInfoResult>, Status> {
let (mut client, inner) = self.worker_client(request)?;
Self::log_and_return_proxy_request_outcome(
client.handle_validated_certificate(inner).await,
"handle_validated_certificate",
)
}
#[instrument(skip_all, err(Display), fields(method = "handle_timeout_certificate"))]
async fn handle_timeout_certificate(
&self,
request: Request<api::HandleTimeoutCertificateRequest>,
) -> Result<Response<ChainInfoResult>, Status> {
let (mut client, inner) = self.worker_client(request)?;
Self::log_and_return_proxy_request_outcome(
client.handle_timeout_certificate(inner).await,
"handle_timeout_certificate",
)
}
#[instrument(skip_all, err(Display), fields(method = "handle_chain_info_query"))]
async fn handle_chain_info_query(
&self,
request: Request<api::ChainInfoQuery>,
) -> Result<Response<ChainInfoResult>, Status> {
let (mut client, inner) = self.worker_client(request)?;
Self::log_and_return_proxy_request_outcome(
client.handle_chain_info_query(inner).await,
"handle_chain_info_query",
)
}
#[instrument(skip_all, err(Display), fields(method = "subscribe"))]
async fn subscribe(
&self,
request: Request<SubscriptionRequest>,
) -> Result<Response<Self::SubscribeStream>, Status> {
let subscription_request = request.into_inner();
let chain_ids = subscription_request
.chain_ids
.into_iter()
.map(ChainId::try_from)
.collect::<Result<Vec<ChainId>, _>>()?;
// The empty notification seems to be needed in some cases to force
// completion of HTTP2 headers.
let rx = self
.0
.notifier
.subscribe_with_ack(chain_ids, Ok(Notification::default()));
Ok(Response::new(UnboundedReceiverStream::new(rx)))
}
#[instrument(skip_all, err(Display))]
async fn get_version_info(
&self,
_request: Request<()>,
) -> Result<Response<VersionInfo>, Status> {
// We assume each shard is running the same version as the proxy
Ok(Response::new(linera_version::VersionInfo::default().into()))
}
#[instrument(skip_all, err(Display), fields(method = "get_network_description"))]
async fn get_network_description(
&self,
_request: Request<()>,
) -> Result<Response<NetworkDescription>, Status> {
let description = self
.0
.storage
.read_network_description()
.await
.map_err(Self::view_error_to_status)?
.ok_or_else(|| Status::not_found("Cannot find network description in the database"))?;
Ok(Response::new(description.into()))
}
#[instrument(skip_all, err(Display), fields(method = "get_shard_info"))]
async fn get_shard_info(
&self,
request: Request<api::ChainId>,
) -> Result<Response<api::ShardInfo>, Status> {
let chain_id = request.into_inner().try_into()?;
let shard_id = self.0.internal_config.get_shard_id(chain_id);
let total_shards = self.0.internal_config.shards.len();
let shard_info = api::ShardInfo {
shard_id: shard_id as u64,
total_shards: total_shards as u64,
};
Ok(Response::new(shard_info))
}
#[instrument(skip_all, err(Display), fields(method = "upload_blob"))]
async fn upload_blob(&self, request: Request<BlobContent>) -> Result<Response<BlobId>, Status> {
let content: linera_sdk::linera_base_types::BlobContent =
request.into_inner().try_into()?;
let blob = Blob::new(content);
let id = blob.id();
let result = self.0.storage.maybe_write_blobs(&[blob]).await;
if !result.map_err(Self::view_error_to_status)?[0] {
return Err(Status::not_found("Blob not found"));
}
Ok(Response::new(id.try_into()?))
}
#[instrument(skip_all, err(Display), fields(method = "download_blob"))]
async fn download_blob(
&self,
request: Request<BlobId>,
) -> Result<Response<BlobContent>, Status> {
let blob_id = request.into_inner().try_into()?;
let blob = self
.0
.storage
.read_blob(blob_id)
.await
.map_err(Self::view_error_to_status)?;
let blob = blob.ok_or_else(|| Status::not_found(format!("Blob not found {}", blob_id)))?;
Ok(Response::new(blob.into_content().try_into()?))
}
#[instrument(skip_all, err(Display), fields(method = "download_pending_blob"))]
async fn download_pending_blob(
&self,
request: Request<PendingBlobRequest>,
) -> Result<Response<PendingBlobResult>, Status> {
let (mut client, inner) = self.worker_client(request)?;
#[cfg_attr(not(with_metrics), expect(clippy::needless_match))]
match client.download_pending_blob(inner).await {
Ok(blob_result) => {
#[cfg(with_metrics)]
metrics::PROXY_REQUEST_SUCCESS
.with_label_values(&["download_pending_blob"])
.inc();
Ok(blob_result)
}
Err(status) => {
#[cfg(with_metrics)]
metrics::PROXY_REQUEST_ERROR
.with_label_values(&["download_pending_blob"])
.inc();
Err(status)
}
}
}
#[instrument(skip_all, err(Display), fields(method = "handle_pending_blob"))]
async fn handle_pending_blob(
&self,
request: Request<HandlePendingBlobRequest>,
) -> Result<Response<ChainInfoResult>, Status> {
let (mut client, inner) = self.worker_client(request)?;
#[cfg_attr(not(with_metrics), expect(clippy::needless_match))]
match client.handle_pending_blob(inner).await {
Ok(blob_result) => {
#[cfg(with_metrics)]
metrics::PROXY_REQUEST_SUCCESS
.with_label_values(&["handle_pending_blob"])
.inc();
Ok(blob_result)
}
Err(status) => {
#[cfg(with_metrics)]
metrics::PROXY_REQUEST_ERROR
.with_label_values(&["handle_pending_blob"])
.inc();
Err(status)
}
}
}
#[instrument(skip_all, err(Display), fields(method = "download_certificate"))]
async fn download_certificate(
&self,
request: Request<CryptoHash>,
) -> Result<Response<Certificate>, Status> {
let hash = request.into_inner().try_into()?;
let certificate: linera_chain::types::Certificate = self
.0
.storage
.read_certificate(hash)
.await
.map_err(Self::view_error_to_status)?
.ok_or(Status::not_found(hash.to_string()))?
.into();
Ok(Response::new(certificate.try_into()?))
}
#[instrument(skip_all, err(Display), fields(method = "download_certificates"))]
async fn download_certificates(
&self,
request: Request<CertificatesBatchRequest>,
) -> Result<Response<CertificatesBatchResponse>, Status> {
let hashes: Vec<linera_base::crypto::CryptoHash> = request
.into_inner()
.hashes
.into_iter()
.map(linera_base::crypto::CryptoHash::try_from)
.collect::<Result<Vec<linera_base::crypto::CryptoHash>, _>>()?;
// Use 70% of the max message size as a buffer capacity.
// Leave 30% as overhead.
let mut grpc_message_limiter: GrpcMessageLimiter<linera_chain::types::Certificate> =
GrpcMessageLimiter::new(GRPC_CHUNKED_MESSAGE_FILL_LIMIT);
let mut returned_certificates = vec![];
'outer: for batch in hashes.chunks(100) {
let certificates = self
.0
.storage
.read_certificates(batch.to_vec())
.await
.map_err(Self::view_error_to_status)?;
let certificates = match ResultReadCertificates::new(certificates, batch.to_vec()) {
ResultReadCertificates::Certificates(certificates) => certificates,
ResultReadCertificates::InvalidHashes(hashes) => {
return Err(Status::not_found(format!("{:?}", hashes)))
}
};
for certificate in certificates {
if grpc_message_limiter.fits::<Certificate>(certificate.clone().into())? {
returned_certificates.push(linera_chain::types::Certificate::from(certificate));
} else {
break 'outer;
}
}
}
Ok(Response::new(CertificatesBatchResponse::try_from(
returned_certificates,
)?))
}
#[instrument(
skip_all,
err(Display),
fields(method = "download_certificates_by_heights")
)]
async fn download_certificates_by_heights(
&self,
request: Request<api::DownloadCertificatesByHeightsRequest>,
) -> Result<Response<CertificatesBatchResponse>, Status> {
let original_request: CertificatesByHeightRequest = request.into_inner().try_into()?;
let chain_info_request = ChainInfoQuery::new(original_request.chain_id)
.with_sent_certificate_hashes_by_heights(original_request.heights);
// Use handle_chain_info_query to get the certificate hashes
let chain_info_response = self
.handle_chain_info_query(Request::new(chain_info_request.try_into()?))
.await?;
// Extract the ChainInfoResult from the response
let chain_info_result = chain_info_response.into_inner();
// Extract the certificate hashes from the ChainInfo
let hashes = match chain_info_result.inner {
Some(api::chain_info_result::Inner::ChainInfoResponse(response)) => {
let chain_info: ChainInfo =
bincode::deserialize(&response.chain_info).map_err(|e| {
Status::internal(format!("Failed to deserialize ChainInfo: {}", e))
})?;
chain_info.requested_sent_certificate_hashes
}
Some(api::chain_info_result::Inner::Error(error)) => {
let error =
bincode::deserialize(&error).unwrap_or_else(|err| NodeError::GrpcError {
error: format!("failed to unmarshal error message: {}", err),
});
return Err(Status::internal(format!(
"Chain info query failed: {error}"
)));
}
None => {
return Err(Status::internal("Empty chain info result"));
}
};
// Use download_certificates to get the actual certificates
let certificates_request = CertificatesBatchRequest {
hashes: hashes.into_iter().map(|h| h.into()).collect(),
};
self.download_certificates(Request::new(certificates_request))
.await
}
#[instrument(skip_all, err(Display))]
async fn download_raw_certificates_by_heights(
&self,
request: Request<api::DownloadCertificatesByHeightsRequest>,
) -> Result<Response<api::RawCertificatesBatch>, Status> {
let original_request: CertificatesByHeightRequest = request.into_inner().try_into()?;
let chain_info_request = ChainInfoQuery::new(original_request.chain_id)
.with_sent_certificate_hashes_by_heights(original_request.heights);
// Use handle_chain_info_query to get the certificate hashes
let chain_info_response = self
.handle_chain_info_query(Request::new(chain_info_request.try_into()?))
.await?;
// Extract the ChainInfoResult from the response
let chain_info_result = chain_info_response.into_inner();
// Extract the certificate hashes from the ChainInfo
let hashes = match chain_info_result.inner {
Some(api::chain_info_result::Inner::ChainInfoResponse(response)) => {
let chain_info: ChainInfo =
bincode::deserialize(&response.chain_info).map_err(|e| {
Status::internal(format!("Failed to deserialize ChainInfo: {}", e))
})?;
chain_info.requested_sent_certificate_hashes
}
Some(api::chain_info_result::Inner::Error(error)) => {
let error =
bincode::deserialize(&error).unwrap_or_else(|err| NodeError::GrpcError {
error: format!("failed to unmarshal error message: {}", err),
});
return Err(Status::internal(format!(
"Chain info query failed: {error}"
)));
}
None => {
return Err(Status::internal("Empty chain info result"));
}
};
// Use 70% of the max message size as a buffer capacity.
// Leave 30% as overhead.
let mut grpc_message_limiter: GrpcMessageLimiter<linera_chain::types::Certificate> =
GrpcMessageLimiter::new(GRPC_CHUNKED_MESSAGE_FILL_LIMIT);
let mut returned_certificates = vec![];
'outer: for batch in hashes.chunks(100) {
let certificates: Vec<(Vec<u8>, Vec<u8>)> = self
.0
.storage
.read_certificates_raw(batch.to_vec())
.await
.map_err(Self::view_error_to_status)?
.into_iter()
.collect();
for (lite_cert_bytes, confirmed_block_bytes) in certificates {
if grpc_message_limiter
.fits_raw(lite_cert_bytes.len() + confirmed_block_bytes.len())
{
returned_certificates.push(RawCertificate {
lite_certificate: lite_cert_bytes,
confirmed_block: confirmed_block_bytes,
});
} else {
break 'outer;
}
}
}
Ok(Response::new(RawCertificatesBatch {
certificates: returned_certificates,
}))
}
#[instrument(skip_all, err(level = Level::WARN), fields(
method = "blob_last_used_by"
))]
async fn blob_last_used_by(
&self,
request: Request<BlobId>,
) -> Result<Response<CryptoHash>, Status> {
let blob_id = request.into_inner().try_into()?;
let blob_state = self
.0
.storage
.read_blob_state(blob_id)
.await
.map_err(Self::view_error_to_status)?;
let blob_state =
blob_state.ok_or_else(|| Status::not_found(format!("Blob not found {}", blob_id)))?;
let last_used_by = blob_state
.last_used_by
.ok_or_else(|| Status::not_found(format!("Blob not found {}", blob_id)))?;
Ok(Response::new(last_used_by.into()))
}
#[instrument(skip_all, err(level = Level::WARN), fields(
method = "blob_last_used_by_certificate"
))]
async fn blob_last_used_by_certificate(
&self,
request: Request<BlobId>,
) -> Result<Response<Certificate>, Status> {
let cert_hash = self.blob_last_used_by(request).await?;
let request = Request::new(cert_hash.into_inner());
self.download_certificate(request).await
}
#[instrument(skip_all, err(level = Level::WARN), fields(
method = "missing_blob_ids"
))]
async fn missing_blob_ids(
&self,
request: Request<BlobIds>,
) -> Result<Response<BlobIds>, Status> {
let blob_ids: Vec<linera_base::identifiers::BlobId> = request.into_inner().try_into()?;
let missing_blob_ids = self
.0
.storage
.missing_blobs(&blob_ids)
.await
.map_err(Self::view_error_to_status)?;
Ok(Response::new(missing_blob_ids.try_into()?))
}
}
#[async_trait]
impl<S> NotifierService for GrpcProxy<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
#[instrument(skip_all, err(Display), fields(method = "notify"))]
async fn notify(&self, request: Request<Notification>) -> Result<Response<()>, Status> {
let notification = request.into_inner();
let chain_id = notification
.chain_id
.clone()
.ok_or_else(|| Status::invalid_argument("Missing field: chain_id."))?
.try_into()?;
self.0.notifier.notify_chain(&chain_id, &Ok(notification));
Ok(Response::new(()))
}
}
/// A message limiter that keeps track of the remaining capacity in bytes.
struct GrpcMessageLimiter<T> {
remaining: usize,
_phantom: PhantomData<T>,
}
impl<T> GrpcMessageLimiter<T> {
fn new(limit: usize) -> Self {
Self {
remaining: limit,
_phantom: PhantomData,
}
}
#[cfg(test)]
fn empty() -> Self {
Self::new(0)
}
// Returns true if the element, after serialising to proto bytes, fits within the remaining capacity.
fn fits<U>(&mut self, el: T) -> Result<bool, GrpcProtoConversionError>
where
U: TryFrom<T, Error = GrpcProtoConversionError> + Message,
{
let required = U::try_from(el).map(|proto| proto.encoded_len())?;
Ok(self.fits_raw(required))
}
/// Adds the given number of bytes to the remaining capacity.
///
/// Returns whether we managed to fit the element.
fn fits_raw(&mut self, bytes_len: usize) -> bool {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/proxy/main.rs | linera-service/src/proxy/main.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#[cfg(feature = "jemalloc")]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
// jemalloc configuration for memory profiling with jemalloc_pprof
// prof:true,prof_active:true - Enable profiling from start
// lg_prof_sample:19 - Sample every 512KB for good detail/overhead balance
// Linux/other platforms: use unprefixed malloc (with unprefixed_malloc_on_supported_platforms)
#[cfg(all(feature = "memory-profiling", not(target_os = "macos")))]
#[allow(non_upper_case_globals)]
#[export_name = "malloc_conf"]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
// macOS: use prefixed malloc (without unprefixed_malloc_on_supported_platforms)
#[cfg(all(feature = "memory-profiling", target_os = "macos"))]
#[allow(non_upper_case_globals)]
#[export_name = "_rjem_malloc_conf"]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
use std::{net::SocketAddr, path::PathBuf, time::Duration};
use anyhow::{anyhow, bail, ensure, Result};
use async_trait::async_trait;
use futures::{FutureExt as _, SinkExt, StreamExt};
use linera_base::listen_for_shutdown_signals;
use linera_client::config::ValidatorServerConfig;
use linera_core::{node::NodeError, JoinSetExt as _};
#[cfg(with_metrics)]
use linera_metrics::monitoring_server;
use linera_rpc::{
config::{
NetworkProtocol, ShardConfig, ValidatorInternalNetworkPreConfig,
ValidatorPublicNetworkPreConfig,
},
simple::{MessageHandler, TransportProtocol},
RpcMessage,
};
use linera_sdk::linera_base_types::Blob;
use linera_service::{
storage::{CommonStorageOptions, Runnable, StorageConfig},
util,
};
use linera_storage::{ResultReadCertificates, Storage};
use tokio::task::JoinSet;
use tokio_util::sync::CancellationToken;
use tracing::{error, info, instrument};
mod grpc;
use grpc::GrpcProxy;
/// Options for running the proxy.
#[derive(clap::Parser, Debug, Clone)]
#[command(
name = "Linera Proxy",
about = "A proxy to redirect incoming requests to Linera Server shards",
version = linera_version::VersionInfo::default_clap_str(),
)]
pub struct ProxyOptions {
/// Path to server configuration.
config_path: PathBuf,
/// Timeout for sending queries (ms)
#[arg(long = "send-timeout-ms",
default_value = "4000",
value_parser = util::parse_millis,
env = "LINERA_PROXY_SEND_TIMEOUT")]
send_timeout: Duration,
/// Timeout for receiving responses (ms)
#[arg(long = "recv-timeout-ms",
default_value = "4000",
value_parser = util::parse_millis,
env = "LINERA_PROXY_RECV_TIMEOUT")]
recv_timeout: Duration,
/// The number of Tokio worker threads to use.
#[arg(long, env = "LINERA_PROXY_TOKIO_THREADS")]
tokio_threads: Option<usize>,
/// The number of Tokio blocking threads to use.
#[arg(long, env = "LINERA_PROXY_TOKIO_BLOCKING_THREADS")]
tokio_blocking_threads: Option<usize>,
/// Storage configuration for the blockchain history, chain states and binary blobs.
#[arg(long = "storage")]
storage_config: StorageConfig,
/// Common storage options.
#[command(flatten)]
common_storage_options: CommonStorageOptions,
/// Runs a specific proxy instance.
#[arg(long)]
id: Option<usize>,
/// OpenTelemetry OTLP exporter endpoint (requires opentelemetry feature).
#[arg(long, env = "LINERA_OTLP_EXPORTER_ENDPOINT")]
otlp_exporter_endpoint: Option<String>,
}
/// A Linera Proxy, either gRPC or over 'Simple Transport', meaning TCP or UDP.
/// The proxy can be configured to have a gRPC ingress and egress, or a combination
/// of TCP / UDP ingress and egress.
enum Proxy<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
Simple(Box<SimpleProxy<S>>),
Grpc(GrpcProxy<S>),
}
struct ProxyContext {
config: ValidatorServerConfig,
send_timeout: Duration,
recv_timeout: Duration,
id: usize,
}
impl ProxyContext {
pub fn from_options(options: &ProxyOptions) -> Result<Self> {
let config = util::read_json(&options.config_path)?;
Ok(Self {
config,
send_timeout: options.send_timeout,
recv_timeout: options.recv_timeout,
id: options.id.unwrap_or(0),
})
}
}
#[async_trait]
impl Runnable for ProxyContext {
type Output = Result<(), anyhow::Error>;
async fn run<S>(self, storage: S) -> Result<(), anyhow::Error>
where
S: Storage + Clone + Send + Sync + 'static,
{
let shutdown_notifier = CancellationToken::new();
tokio::spawn(listen_for_shutdown_signals(shutdown_notifier.clone()));
let proxy = Proxy::from_context(self, storage)?;
match proxy {
Proxy::Simple(simple_proxy) => simple_proxy.run(shutdown_notifier).await,
Proxy::Grpc(grpc_proxy) => grpc_proxy.run(shutdown_notifier).await,
}
}
}
impl<S> Proxy<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
/// Constructs and configures the [`Proxy`] given [`ProxyContext`].
fn from_context(context: ProxyContext, storage: S) -> Result<Self> {
let internal_protocol = context.config.internal_network.protocol;
let external_protocol = context.config.validator.network.protocol;
let proxy = match (internal_protocol, external_protocol) {
(NetworkProtocol::Grpc { .. }, NetworkProtocol::Grpc(tls)) => {
Self::Grpc(GrpcProxy::new(
context.config.internal_network,
context.send_timeout,
context.recv_timeout,
tls,
storage,
context.id,
))
}
(
NetworkProtocol::Simple(internal_transport),
NetworkProtocol::Simple(public_transport),
) => Self::Simple(Box::new(SimpleProxy {
internal_config: context
.config
.internal_network
.clone_with_protocol(internal_transport),
public_config: context
.config
.validator
.network
.clone_with_protocol(public_transport),
send_timeout: context.send_timeout,
recv_timeout: context.recv_timeout,
storage,
id: context.id,
})),
_ => {
bail!(
"network protocol mismatch: cannot have {} and {} ",
internal_protocol,
external_protocol,
);
}
};
Ok(proxy)
}
}
#[derive(Debug, Clone)]
pub struct SimpleProxy<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
public_config: ValidatorPublicNetworkPreConfig<TransportProtocol>,
internal_config: ValidatorInternalNetworkPreConfig<TransportProtocol>,
send_timeout: Duration,
recv_timeout: Duration,
storage: S,
id: usize,
}
#[async_trait]
impl<S> MessageHandler for SimpleProxy<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
#[instrument(skip_all, fields(chain_id = ?message.target_chain_id()))]
async fn handle_message(&mut self, message: RpcMessage) -> Option<RpcMessage> {
if message.is_local_message() {
match self.try_local_message(message).await {
Ok(maybe_response) => {
return maybe_response;
}
Err(error) => {
error!(error = %error, "Failed to handle local message");
return None;
}
}
}
let Some(chain_id) = message.target_chain_id() else {
error!("Can't proxy message without chain ID");
return None;
};
let shard = self.internal_config.get_shard_for(chain_id).clone();
let protocol = self.internal_config.protocol;
match Self::try_proxy_message(
message,
shard.clone(),
protocol,
self.send_timeout,
self.recv_timeout,
)
.await
{
Ok(maybe_response) => maybe_response,
Err(error) => {
error!(%error, "Failed to proxy message to {}", shard.address());
None
}
}
}
}
impl<S> SimpleProxy<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
#[instrument(name = "SimpleProxy::run", skip_all, fields(port = self.public_config.port, metrics_port = self.metrics_port()), err)]
async fn run(self, shutdown_signal: CancellationToken) -> Result<()> {
info!("Starting proxy");
let mut join_set = JoinSet::new();
let address = self.get_listen_address();
#[cfg(with_metrics)]
monitoring_server::start_metrics(address, shutdown_signal.clone());
self.public_config
.protocol
.spawn_server(address, self, shutdown_signal, &mut join_set)
.join()
.await?;
join_set.await_all_tasks().await;
Ok(())
}
fn port(&self) -> u16 {
self.internal_config
.proxies
.get(self.id)
.unwrap_or_else(|| panic!("proxy with id {} must be present", self.id))
.public_port
}
fn metrics_port(&self) -> u16 {
self.internal_config
.proxies
.get(self.id)
.unwrap_or_else(|| panic!("proxy with id {} must be present", self.id))
.metrics_port
}
fn get_listen_address(&self) -> SocketAddr {
SocketAddr::from(([0, 0, 0, 0], self.port()))
}
async fn try_proxy_message(
message: RpcMessage,
shard: ShardConfig,
protocol: TransportProtocol,
send_timeout: Duration,
recv_timeout: Duration,
) -> Result<Option<RpcMessage>> {
let mut connection = protocol.connect((shard.host, shard.port)).await?;
linera_base::time::timer::timeout(send_timeout, connection.send(message)).await??;
let message = linera_base::time::timer::timeout(recv_timeout, connection.next())
.await?
.transpose()?;
Ok(message)
}
async fn try_local_message(&self, message: RpcMessage) -> Result<Option<RpcMessage>> {
use RpcMessage::*;
match message {
VersionInfoQuery => {
// We assume each shard is running the same version as the proxy
Ok(Some(RpcMessage::VersionInfoResponse(
linera_version::VersionInfo::default().into(),
)))
}
NetworkDescriptionQuery => {
let description = self
.storage
.read_network_description()
.await?
.ok_or_else(|| anyhow!("Cannot find network description in the database"))?;
Ok(Some(RpcMessage::NetworkDescriptionResponse(Box::new(
description,
))))
}
ShardInfoQuery(chain_id) => {
let shard_id = self.internal_config.get_shard_id(chain_id);
let total_shards = self.internal_config.shards.len();
let shard_info = linera_rpc::ShardInfo {
shard_id,
total_shards,
};
Ok(Some(RpcMessage::ShardInfoResponse(shard_info)))
}
UploadBlob(content) => {
let blob = Blob::new(*content);
let id = blob.id();
ensure!(
self.storage.maybe_write_blobs(&[blob]).await?[0],
"Blob not found"
);
Ok(Some(RpcMessage::UploadBlobResponse(Box::new(id))))
}
DownloadBlob(blob_id) => {
let blob = self.storage.read_blob(*blob_id).await?;
let blob = blob.ok_or_else(|| anyhow!("Blob not found {}", blob_id))?;
let content = blob.into_content();
Ok(Some(RpcMessage::DownloadBlobResponse(Box::new(content))))
}
DownloadConfirmedBlock(hash) => {
let block = self.storage.read_confirmed_block(*hash).await?;
let block = block.ok_or_else(|| anyhow!("Missing confirmed block {hash}"))?;
Ok(Some(RpcMessage::DownloadConfirmedBlockResponse(Box::new(
block,
))))
}
DownloadCertificates(hashes) => {
let certificates = self.storage.read_certificates(hashes.clone()).await?;
let certificates = match ResultReadCertificates::new(certificates, hashes) {
ResultReadCertificates::Certificates(certificates) => certificates,
ResultReadCertificates::InvalidHashes(hashes) => {
bail!("Missing certificates: {hashes:?}")
}
};
Ok(Some(RpcMessage::DownloadCertificatesResponse(certificates)))
}
DownloadCertificatesByHeights(chain_id, heights) => {
let shard = self.internal_config.get_shard_for(chain_id).clone();
let protocol = self.internal_config.protocol;
let chain_info_query = RpcMessage::ChainInfoQuery(Box::new(
linera_core::data_types::ChainInfoQuery::new(chain_id)
.with_sent_certificate_hashes_by_heights(heights),
));
let hashes = match Self::try_proxy_message(
chain_info_query,
shard.clone(),
protocol,
self.send_timeout,
self.recv_timeout,
)
.await
{
Ok(Some(RpcMessage::ChainInfoResponse(response))) => {
response.info.requested_sent_certificate_hashes
}
_ => bail!("Failed to retrieve sent certificate hashes"),
};
let certificates = self.storage.read_certificates(hashes.clone()).await?;
let certificates = match ResultReadCertificates::new(certificates, hashes) {
ResultReadCertificates::Certificates(certificates) => certificates,
ResultReadCertificates::InvalidHashes(hashes) => {
bail!("Missing certificates: {hashes:?}")
}
};
Ok(Some(RpcMessage::DownloadCertificatesByHeightsResponse(
certificates,
)))
}
BlobLastUsedBy(blob_id) => {
let blob_state = self.storage.read_blob_state(*blob_id).await?;
let blob_state = blob_state.ok_or_else(|| anyhow!("Blob not found {}", blob_id))?;
let last_used_by = blob_state
.last_used_by
.ok_or_else(|| anyhow!("Blob not found {}", blob_id))?;
Ok(Some(RpcMessage::BlobLastUsedByResponse(Box::new(
last_used_by,
))))
}
BlobLastUsedByCertificate(blob_id) => {
let blob_state = self.storage.read_blob_state(*blob_id).await?;
let blob_state = blob_state.ok_or_else(|| anyhow!("Blob not found {}", blob_id))?;
let last_used_by = blob_state
.last_used_by
.ok_or_else(|| anyhow!("Blob not found {}", blob_id))?;
let certificate = self
.storage
.read_certificate(last_used_by)
.await?
.ok_or_else(|| anyhow!("Certificate not found {}", last_used_by))?;
Ok(Some(RpcMessage::BlobLastUsedByCertificateResponse(
Box::new(certificate),
)))
}
MissingBlobIds(blob_ids) => Ok(Some(RpcMessage::MissingBlobIdsResponse(
self.storage.missing_blobs(&blob_ids).await?,
))),
BlockProposal(_)
| LiteCertificate(_)
| TimeoutCertificate(_)
| ConfirmedCertificate(_)
| ValidatedCertificate(_)
| ChainInfoQuery(_)
| CrossChainRequest(_)
| Vote(_)
| Error(_)
| ChainInfoResponse(_)
| VersionInfoResponse(_)
| NetworkDescriptionResponse(_)
| ShardInfoResponse(_)
| DownloadBlobResponse(_)
| DownloadPendingBlob(_)
| DownloadPendingBlobResponse(_)
| HandlePendingBlob(_)
| BlobLastUsedByResponse(_)
| BlobLastUsedByCertificateResponse(_)
| MissingBlobIdsResponse(_)
| DownloadConfirmedBlockResponse(_)
| DownloadCertificatesResponse(_)
| UploadBlobResponse(_)
| DownloadCertificatesByHeightsResponse(_) => {
Err(anyhow::Error::from(NodeError::UnexpectedMessage))
}
}
}
}
fn main() -> Result<()> {
let options = <ProxyOptions as clap::Parser>::parse();
let mut runtime = if options.tokio_threads == Some(1) {
tokio::runtime::Builder::new_current_thread()
} else {
let mut builder = tokio::runtime::Builder::new_multi_thread();
if let Some(threads) = options.tokio_threads {
builder.worker_threads(threads);
}
builder
};
if let Some(blocking_threads) = options.tokio_blocking_threads {
runtime.max_blocking_threads(blocking_threads);
}
runtime.enable_all().build()?.block_on(options.run())
}
impl ProxyOptions {
async fn run(&self) -> Result<()> {
let server_config: ValidatorServerConfig =
util::read_json(&self.config_path).expect("Fail to read server config");
let public_key = &server_config.validator.public_key;
linera_service::tracing::opentelemetry::init(
&format!("validator-{public_key}-proxy"),
self.otlp_exporter_endpoint.as_deref(),
);
let store_config = self
.storage_config
.add_common_storage_options(&self.common_storage_options)?;
// Proxies are part of validator infrastructure and should not output contract logs.
let allow_application_logs = false;
store_config
.run_with_storage(
None,
allow_application_logs,
ProxyContext::from_options(self)?,
)
.boxed()
.await?
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/tracing/chrome.rs | linera-service/src/tracing/chrome.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use tracing_subscriber::{layer::SubscriberExt as _, util::SubscriberInitExt as _};
/// Guard that flushes Chrome trace file when dropped.
///
/// Store this guard in a variable that lives for the duration of your program.
/// When it's dropped, the trace file will be completed and closed.
pub type ChromeTraceGuard = tracing_chrome::FlushGuard;
/// Builds a Chrome trace layer and guard.
///
/// Returns a subscriber and guard. The subscriber should be used with `with_default`
/// to avoid global state conflicts.
pub fn build(
log_name: &str,
writer: impl std::io::Write + Send + 'static,
) -> (impl tracing::Subscriber + Send + Sync, ChromeTraceGuard) {
let (chrome_layer, guard) = tracing_chrome::ChromeLayerBuilder::new()
.writer(writer)
.build();
let config = crate::tracing::get_env_config(log_name);
let maybe_log_file_layer = config.maybe_log_file_layer();
let stderr_layer = config.stderr_layer();
let subscriber = tracing_subscriber::registry()
.with(chrome_layer)
.with(config.env_filter)
.with(maybe_log_file_layer)
.with(stderr_layer);
(subscriber, guard)
}
/// Initializes tracing with Chrome Trace JSON exporter.
///
/// Returns a guard that must be kept alive for the duration of the program.
/// When the guard is dropped, the trace data is flushed and completed.
///
/// Exports traces to Chrome Trace JSON format which can be visualized in:
/// - Chrome: `chrome://tracing`
/// - Perfetto UI: <https://ui.perfetto.dev>
///
/// Note: Uses `try_init()` to avoid panicking if a global subscriber is already set.
/// In that case, tracing may not work as expected.
pub fn init(log_name: &str, writer: impl std::io::Write + Send + 'static) -> ChromeTraceGuard {
let (subscriber, guard) = build(log_name, writer);
let _ = subscriber.try_init();
guard
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/tracing/opentelemetry.rs | linera-service/src/tracing/opentelemetry.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! OpenTelemetry integration for tracing with OTLP export and Chrome trace export.
use opentelemetry::{global, trace::TracerProvider};
use opentelemetry_otlp::{SpanExporter, WithExportConfig};
#[cfg(with_testing)]
use opentelemetry_sdk::trace::InMemorySpanExporter;
use opentelemetry_sdk::{trace::SdkTracerProvider, Resource};
use tracing_opentelemetry::OpenTelemetryLayer;
use tracing_subscriber::{
filter::{filter_fn, FilterFn},
layer::Layer,
prelude::__tracing_subscriber_SubscriberExt as _,
util::SubscriberInitExt,
};
/// Creates a filter that excludes spans with the `opentelemetry.skip` field.
///
/// Any span that declares an `opentelemetry.skip` field will be excluded from export,
/// regardless of the field's value. This is a limitation of the tracing metadata API.
///
/// Usage examples:
/// ```ignore
/// // Always skip this span
/// #[tracing::instrument(fields(opentelemetry.skip = true))]
/// fn internal_helper() { }
///
/// // Conditionally skip based on a parameter
/// #[tracing::instrument(fields(opentelemetry.skip = should_skip))]
/// fn my_function(should_skip: bool) {
/// // Will be skipped if should_skip is true when called
/// // Note: The field must be declared in the span, so the span is
/// // created with knowledge that it might be skipped
/// }
/// ```
fn opentelemetry_skip_filter() -> FilterFn<impl Fn(&tracing::Metadata<'_>) -> bool> {
filter_fn(|metadata| {
if !metadata.is_span() {
return false;
}
metadata.fields().field("opentelemetry.skip").is_none()
})
}
/// Initializes tracing with a custom OpenTelemetry tracer provider.
///
/// This is an internal function used by both production and test code.
fn init_with_tracer_provider(log_name: &str, tracer_provider: SdkTracerProvider) {
global::set_tracer_provider(tracer_provider.clone());
let tracer = tracer_provider.tracer("linera");
let opentelemetry_layer =
OpenTelemetryLayer::new(tracer).with_filter(opentelemetry_skip_filter());
let config = crate::tracing::get_env_config(log_name);
let maybe_log_file_layer = config.maybe_log_file_layer();
let stderr_layer = config.stderr_layer();
tracing_subscriber::registry()
.with(opentelemetry_layer)
.with(config.env_filter)
.with(maybe_log_file_layer)
.with(stderr_layer)
.init();
}
/// Builds an OpenTelemetry layer with the opentelemetry.skip filter.
///
/// This is used for testing to avoid setting the global subscriber.
/// Returns the layer, exporter, and tracer provider (which must be kept alive and shutdown).
#[cfg(with_testing)]
pub fn build_opentelemetry_layer_with_test_exporter(
log_name: &str,
) -> (
impl tracing_subscriber::Layer<tracing_subscriber::Registry>,
InMemorySpanExporter,
SdkTracerProvider,
) {
let exporter = InMemorySpanExporter::default();
let exporter_clone = exporter.clone();
let resource = Resource::builder()
.with_service_name(log_name.to_string())
.build();
let tracer_provider = SdkTracerProvider::builder()
.with_resource(resource)
.with_simple_exporter(exporter)
.with_sampler(opentelemetry_sdk::trace::Sampler::AlwaysOn)
.build();
global::set_tracer_provider(tracer_provider.clone());
let tracer = tracer_provider.tracer("linera");
let opentelemetry_layer =
OpenTelemetryLayer::new(tracer).with_filter(opentelemetry_skip_filter());
(opentelemetry_layer, exporter_clone, tracer_provider)
}
/// Initializes tracing with OpenTelemetry OTLP exporter.
///
/// Exports traces using the OTLP protocol to any OpenTelemetry-compatible backend.
/// Requires the `opentelemetry` feature.
/// Only enables OpenTelemetry if LINERA_OTLP_EXPORTER_ENDPOINT env var is set.
/// This prevents DNS errors in environments where OpenTelemetry is not deployed.
pub fn init(log_name: &str, otlp_endpoint: Option<&str>) {
// Check if OpenTelemetry endpoint is configured via parameter or env var
let endpoint = match otlp_endpoint {
Some(ep) if !ep.is_empty() => ep.to_string(),
_ => match std::env::var("LINERA_OTLP_EXPORTER_ENDPOINT") {
Ok(ep) if !ep.is_empty() => ep,
_ => {
eprintln!(
"LINERA_OTLP_EXPORTER_ENDPOINT not set and no endpoint provided. \
Falling back to standard tracing without OpenTelemetry support."
);
crate::tracing::init(log_name);
return;
}
},
};
let resource = Resource::builder()
.with_service_name(log_name.to_string())
.build();
let exporter = SpanExporter::builder()
.with_tonic()
.with_endpoint(endpoint)
.build()
.expect("Failed to create OTLP exporter");
let tracer_provider = SdkTracerProvider::builder()
.with_resource(resource)
.with_batch_exporter(exporter)
.with_sampler(opentelemetry_sdk::trace::Sampler::AlwaysOn)
.build();
init_with_tracer_provider(log_name, tracer_provider);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/tracing/mod.rs | linera-service/src/tracing/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module provides unified handling for tracing subscribers within Linera binaries.
pub mod chrome;
pub mod opentelemetry;
use std::{
env,
fs::{File, OpenOptions},
path::Path,
sync::Arc,
};
use is_terminal::IsTerminal as _;
use tracing::Subscriber;
use tracing_subscriber::{
fmt::{
self,
format::{FmtSpan, Format, Full},
time::FormatTime,
FormatFields, MakeWriter,
},
layer::{Layer, SubscriberExt as _},
registry::LookupSpan,
util::SubscriberInitExt,
EnvFilter,
};
pub(crate) struct EnvConfig {
pub(crate) env_filter: EnvFilter,
span_events: FmtSpan,
format: Option<String>,
color_output: bool,
log_name: String,
}
impl EnvConfig {
pub(crate) fn stderr_layer<S>(&self) -> Box<dyn Layer<S> + Send + Sync>
where
S: Subscriber + for<'span> LookupSpan<'span>,
{
prepare_formatted_layer(
self.format.as_deref(),
fmt::layer()
.with_span_events(self.span_events.clone())
.with_writer(std::io::stderr)
.with_ansi(self.color_output),
)
}
pub(crate) fn maybe_log_file_layer<S>(&self) -> Option<Box<dyn Layer<S> + Send + Sync>>
where
S: Subscriber + for<'span> LookupSpan<'span>,
{
open_log_file(&self.log_name).map(|file_writer| {
prepare_formatted_layer(
self.format.as_deref(),
fmt::layer()
.with_span_events(self.span_events.clone())
.with_writer(Arc::new(file_writer))
.with_ansi(false),
)
})
}
}
/// Initializes tracing in a standard way.
///
/// The environment variables `RUST_LOG`, `RUST_LOG_SPAN_EVENTS`, and `RUST_LOG_FORMAT`
/// can be used to control the verbosity, the span event verbosity, and the output format,
/// respectively.
///
/// The `LINERA_LOG_DIR` environment variable can be used to configure a directory to
/// store log files. If it is set, a file named `log_name` with the `log` extension is
/// created in the directory.
pub fn init(log_name: &str) {
let config = get_env_config(log_name);
let maybe_log_file_layer = config.maybe_log_file_layer();
let stderr_layer = config.stderr_layer();
tracing_subscriber::registry()
.with(config.env_filter)
.with(maybe_log_file_layer)
.with(stderr_layer)
.init();
}
pub(crate) fn get_env_config(log_name: &str) -> EnvConfig {
let env_filter = EnvFilter::builder()
.with_default_directive(tracing_subscriber::filter::LevelFilter::INFO.into())
.from_env_lossy();
let span_events = std::env::var("RUST_LOG_SPAN_EVENTS")
.ok()
.map_or(FmtSpan::NONE, |s| fmt_span_from_str(&s));
let format = std::env::var("RUST_LOG_FORMAT").ok();
let color_output =
!std::env::var("NO_COLOR").is_ok_and(|x| !x.is_empty()) && std::io::stderr().is_terminal();
EnvConfig {
env_filter,
span_events,
format,
color_output,
log_name: log_name.to_string(),
}
}
/// Opens a log file for writing.
///
/// The location of the file is determined by the `LINERA_LOG_DIR` environment variable,
/// and its name by the `log_name` parameter.
///
/// Returns [`None`] if the `LINERA_LOG_DIR` environment variable is not set.
pub(crate) fn open_log_file(log_name: &str) -> Option<File> {
let log_directory = env::var_os("LINERA_LOG_DIR")?;
let mut log_file_path = Path::new(&log_directory).join(log_name);
log_file_path.set_extension("log");
Some(
OpenOptions::new()
.append(true)
.create(true)
.open(log_file_path)
.expect("Failed to open log file for writing"),
)
}
/// Applies a requested `formatting` to the log output of the provided `layer`.
///
/// Returns a boxed [`Layer`] with the formatting applied to the original `layer`.
pub(crate) fn prepare_formatted_layer<S, N, W, T>(
formatting: Option<&str>,
layer: fmt::Layer<S, N, Format<Full, T>, W>,
) -> Box<dyn Layer<S> + Send + Sync>
where
S: Subscriber + for<'span> LookupSpan<'span>,
N: for<'writer> FormatFields<'writer> + Send + Sync + 'static,
W: for<'writer> MakeWriter<'writer> + Send + Sync + 'static,
T: FormatTime + Send + Sync + 'static,
{
match formatting.unwrap_or("plain") {
"json" => layer.json().boxed(),
"pretty" => layer.pretty().boxed(),
"plain" => layer.boxed(),
format => {
panic!("Invalid RUST_LOG_FORMAT: `{format}`. Valid values are `json` or `pretty`.")
}
}
}
pub(crate) fn fmt_span_from_str(events: &str) -> FmtSpan {
let mut fmt_span = FmtSpan::NONE;
for event in events.split(',') {
fmt_span |= match event {
"new" => FmtSpan::NEW,
"enter" => FmtSpan::ENTER,
"exit" => FmtSpan::EXIT,
"close" => FmtSpan::CLOSE,
"active" => FmtSpan::ACTIVE,
"full" => FmtSpan::FULL,
_ => FmtSpan::NONE,
};
}
fmt_span
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/tests.rs | linera-service/src/exporter/tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg(any(
feature = "dynamodb",
feature = "scylladb",
feature = "storage-service",
))]
use anyhow::Result;
use linera_base::time::Duration;
use linera_core::{data_types::ChainInfoQuery, node::ValidatorNode};
use linera_rpc::config::ExporterServiceConfig;
use linera_service::{
cli_wrappers::{
local_net::{Database, ExportersSetup, LocalNet, LocalNetConfig},
LineraNetConfig, Network,
},
config::{BlockExporterConfig, Destination, DestinationConfig, LimitsConfig},
test_name,
};
use test_case::test_case;
#[cfg_attr(feature = "storage-service", test_case(Database::Service, Network::Grpc ; "storage_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(Database::ScyllaDb, Network::Grpc ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(Database::DynamoDb, Network::Grpc ; "aws_grpc"))]
#[test_log::test(tokio::test)]
async fn test_linera_exporter(database: Database, network: Network) -> Result<()> {
tracing::info!("Starting test {}", test_name!());
let num_shards = 1;
let num_initial_validators = 1;
// This is based on the formula for proxy_public_port in local_net.rs
let port = LocalNet::first_public_port() + num_shards;
let destination = Destination::Validator {
endpoint: "127.0.0.1".to_owned(),
port: port as u16,
};
let destination_config = DestinationConfig {
committee_destination: false,
destinations: vec![destination],
};
let block_exporter_config = BlockExporterConfig {
destination_config,
id: 0,
service_config: ExporterServiceConfig {
host: "".to_owned(),
port: 0,
},
limits: LimitsConfig::default(),
metrics_port: 1234,
};
let config = LocalNetConfig {
num_initial_validators,
num_shards,
block_exporters: ExportersSetup::Local(vec![block_exporter_config]),
..LocalNetConfig::new_test(database, network)
};
let (mut net, client) = config.instantiate().await?;
net.generate_validator_config(1).await?;
// Start a new validator.
net.start_validator(1).await?;
let chain = client.default_chain().expect("Client has no default chain");
// Trigger a block export and sync.
client
.transfer_with_silent_logs(1.into(), chain, chain)
.await?;
tokio::time::sleep(Duration::from_secs(4)).await;
let validator_client = net.validator_client(1)?;
let chain_info = validator_client
.handle_chain_info_query(ChainInfoQuery::new(chain))
.await?;
// Check that the block exporter has exported the block.
assert!(chain_info.info.next_block_height == 1.into());
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/state.rs | linera-service/src/exporter/state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::HashMap,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
};
use linera_base::{
data_types::BlockHeight,
identifiers::{BlobId, ChainId},
};
use linera_sdk::{
ensure,
views::{RootView, View},
};
use linera_service::config::DestinationId;
use linera_views::{
context::Context, log_view::LogView, map_view::MapView, register_view::RegisterView,
set_view::SetView, views::ClonableView,
};
use serde::{Deserialize, Serialize};
use crate::common::{BlockId, CanonicalBlock, ExporterError, LiteBlockId};
/// State of the linera exporter as a view.
#[derive(Debug, RootView, ClonableView)]
pub struct BlockExporterStateView<C> {
/// Ordered collection of block hashes from all microchains, as indexed by the exporter.
canonical_state: LogView<C, CanonicalBlock>,
/// The global blob state.
/// These blobs have been seen, processed
/// and indexed by the exporter at least once.
blob_state: SetView<C, BlobId>,
/// Tracks the highest block already processed with its hash.
chain_states: MapView<C, ChainId, LiteBlockId>,
/// The exporter state per destination.
destination_states: RegisterView<C, DestinationStates>,
}
impl<C> BlockExporterStateView<C>
where
C: Context + Clone + Send + Sync + 'static,
{
pub async fn initiate(
context: C,
destinations: Vec<DestinationId>,
) -> Result<(Self, LogView<C, CanonicalBlock>, DestinationStates), ExporterError> {
let mut view = BlockExporterStateView::load(context)
.await
.map_err(ExporterError::StateError)?;
let stored_destinations = {
let pinned = view.destination_states.get().states.pin();
pinned.iter().map(|(id, _)| id).cloned().collect::<Vec<_>>()
};
tracing::info!(
init_destinations=?destinations,
?stored_destinations,
"initialized exporter state with destinations",
);
if view.destination_states.get().states.is_empty() {
let states = DestinationStates::new(destinations);
view.destination_states.set(states);
}
let states = view.destination_states.get().clone();
let canonical_state = view.canonical_state.clone_unchecked()?;
Ok((view, canonical_state, states))
}
pub fn index_blob(&mut self, blob: BlobId) -> Result<(), ExporterError> {
Ok(self.blob_state.insert(&blob)?)
}
pub async fn index_block(&mut self, block: BlockId) -> Result<bool, ExporterError> {
if let Some(last_processed) = self.chain_states.get_mut(&block.chain_id).await? {
let expected_block_height = last_processed
.height
.try_add_one()
.map_err(|e| ExporterError::GenericError(e.into()))?;
if block.height == expected_block_height {
*last_processed = block.into();
return Ok(true);
}
tracing::warn!(
?expected_block_height,
?block,
"attempted to index a block out of order",
);
Ok(false)
} else {
Err(ExporterError::UnprocessedChain)
}
}
pub async fn initialize_chain(&mut self, block: BlockId) -> Result<(), ExporterError> {
ensure!(
block.height == BlockHeight::ZERO,
ExporterError::BadInitialization
);
if self.chain_states.contains_key(&block.chain_id).await? {
Err(ExporterError::ChainAlreadyExists(block.chain_id))?
}
let chain_id = block.chain_id;
self.chain_states.insert(&chain_id, block.into())?;
Ok(())
}
pub async fn get_chain_status(
&self,
chain_id: &ChainId,
) -> Result<Option<LiteBlockId>, ExporterError> {
Ok(self.chain_states.get(chain_id).await?)
}
pub async fn is_blob_indexed(&self, blob: BlobId) -> Result<bool, ExporterError> {
Ok(self.blob_state.contains(&blob).await?)
}
pub fn set_destination_states(&mut self, destination_states: DestinationStates) {
self.destination_states.set(destination_states);
}
}
#[derive(Debug, Clone)]
pub(super) struct DestinationStates {
states: Arc<papaya::HashMap<DestinationId, Arc<AtomicU64>>>,
}
impl Default for DestinationStates {
fn default() -> Self {
Self {
states: Arc::new(papaya::HashMap::new()),
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename = "DestinationStates")]
struct SerializableDestinationStates {
states: HashMap<DestinationId, u64>,
}
impl Serialize for DestinationStates {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let states = {
let pinned = self.states.pin();
pinned
.iter()
.map(|(key, value)| (key.clone(), value.load(Ordering::Acquire)))
.collect::<HashMap<_, _>>()
};
SerializableDestinationStates::serialize(
&SerializableDestinationStates { states },
serializer,
)
}
}
impl<'de> Deserialize<'de> for DestinationStates {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let SerializableDestinationStates { states } =
SerializableDestinationStates::deserialize(deserializer)?;
let map = papaya::HashMap::new();
{
let pinned = map.pin();
for (id, state) in states {
pinned.insert(id, Arc::new(AtomicU64::new(state)));
}
}
Ok(Self {
states: Arc::from(map),
})
}
}
impl DestinationStates {
fn new(destinations: Vec<DestinationId>) -> Self {
let states = destinations
.into_iter()
.map(|id| (id, Arc::new(AtomicU64::new(0))))
.collect::<papaya::HashMap<_, _>>();
Self {
states: Arc::from(states),
}
}
pub fn load_state(&self, id: &DestinationId) -> Arc<AtomicU64> {
let pinned = self.states.pin();
pinned
.get(id)
.unwrap_or_else(|| panic!("{:?} not found in DestinationStates", id))
.clone()
}
pub fn get(&self, id: &DestinationId) -> Option<Arc<AtomicU64>> {
self.states.pin().get(id).cloned()
}
pub fn insert(&mut self, id: DestinationId, state: Arc<AtomicU64>) {
self.states.pin().insert(id, state);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/test_utils.rs | linera-service/src/exporter/test_utils.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use async_trait::async_trait;
use futures::{stream, StreamExt};
use linera_base::{
crypto::CryptoHash,
data_types::{
Amount, Blob, BlockHeight, ChainDescription, ChainOrigin, Epoch, InitialChainConfig,
OracleResponse, Round, Timestamp,
},
identifiers::{BlobId, ChainId},
};
use linera_chain::{
data_types::BlockExecutionOutcome,
manager::ChainManagerInfo,
test::{make_child_block, make_first_block, BlockTestExt},
types::{CertificateValue, ConfirmedBlock, ConfirmedBlockCertificate},
};
use linera_core::{
data_types::{ChainInfo, ChainInfoResponse},
node::NodeError,
};
use linera_execution::{Operation, SystemOperation};
use linera_rpc::{
grpc::api::{
validator_node_server::{ValidatorNode, ValidatorNodeServer},
BlobIds,
},
HandleConfirmedCertificateRequest,
};
use linera_service::config::DestinationKind;
use linera_storage::Storage;
use tokio_stream::{wrappers::UnboundedReceiverStream, Stream};
use tokio_util::sync::CancellationToken;
use tonic::{transport::Server, Request, Response, Status, Streaming};
use crate::{
common::{get_address, BlockId, CanonicalBlock},
runloops::indexer_api::{
element::Payload,
indexer_server::{Indexer, IndexerServer},
Element,
},
};
#[derive(Clone, Default)]
pub(crate) struct DummyIndexer {
pub(crate) fault_guard: Arc<AtomicBool>,
pub(crate) blobs: Arc<papaya::HashSet<BlobId>>,
pub(crate) state: Arc<papaya::HashSet<CryptoHash>>,
}
impl DummyIndexer {
pub(crate) async fn start(
self,
port: u16,
cancellation_token: CancellationToken,
) -> Result<(), anyhow::Error> {
let endpoint = get_address(port);
let (health_reporter, health_service) = tonic_health::server::health_reporter();
health_reporter.set_serving::<IndexerServer<Self>>().await;
Server::builder()
.add_service(health_service)
.add_service(IndexerServer::new(self))
.serve_with_shutdown(endpoint, cancellation_token.cancelled_owned())
.await
.expect("a running indexer");
Ok(())
}
}
#[async_trait]
impl Indexer for DummyIndexer {
type IndexBatchStream = Pin<Box<dyn Stream<Item = Result<(), Status>> + Send + 'static>>;
async fn index_batch(
&self,
request: Request<Streaming<Element>>,
) -> Result<Response<Self::IndexBatchStream>, Status> {
let stream = request.into_inner();
let is_faulty = self.fault_guard.clone();
let state_moved = self.state.clone();
let blobs_state_moved = self.blobs.clone();
let output = stream::unfold(stream, move |mut stream| {
let is_faulty_moved = is_faulty.clone();
let moved_state = state_moved.clone();
let moved_blobs_state = blobs_state_moved.clone();
async move {
while let Some(result) = stream.next().await {
if is_faulty_moved.load(Ordering::Acquire) {
return Some((Err(Status::from_error("err".into())), stream));
}
match result {
Ok(Element {
payload: Some(Payload::Block(indexer_block)),
}) => match TryInto::<ConfirmedBlockCertificate>::try_into(indexer_block) {
Ok(block) => {
moved_state.pin().insert(block.hash());
return Some((Ok(()), stream));
}
Err(e) => return Some((Err(Status::from_error(e)), stream)),
},
Ok(Element {
payload: Some(Payload::Blob(indexer_blob)),
}) => {
let blob = Blob::try_from(indexer_blob).unwrap();
moved_blobs_state.pin().insert(blob.id());
}
Ok(_) => continue,
Err(e) => return Some((Err(e), stream)),
}
}
None
}
});
Ok(Response::new(Box::pin(output)))
}
}
#[derive(Clone, Default)]
pub(crate) struct DummyValidator {
pub(crate) validator_port: u16,
pub(crate) fault_guard: Arc<AtomicBool>,
pub(crate) blobs: Arc<papaya::HashSet<BlobId>>,
pub(crate) state: Arc<papaya::HashSet<CryptoHash>>,
// Tracks whether a block has been received multiple times.
pub(crate) duplicate_blocks: Arc<papaya::HashMap<CryptoHash, u64>>,
}
impl DummyValidator {
pub fn new(port: u16) -> Self {
Self {
validator_port: port,
fault_guard: Arc::new(AtomicBool::new(false)),
blobs: Arc::new(papaya::HashSet::new()),
state: Arc::new(papaya::HashSet::new()),
duplicate_blocks: Arc::new(papaya::HashMap::new()),
}
}
pub(crate) async fn start(
self,
port: u16,
cancellation_token: CancellationToken,
) -> Result<(), anyhow::Error> {
let endpoint = get_address(port);
let (health_reporter, health_service) = tonic_health::server::health_reporter();
health_reporter
.set_serving::<ValidatorNodeServer<Self>>()
.await;
Server::builder()
.add_service(health_service)
.add_service(ValidatorNodeServer::new(self))
.serve_with_shutdown(endpoint, cancellation_token.cancelled_owned())
.await
.expect("a running validator");
Ok(())
}
}
#[async_trait]
impl ValidatorNode for DummyValidator {
type SubscribeStream =
UnboundedReceiverStream<Result<linera_rpc::grpc::api::Notification, Status>>;
async fn handle_confirmed_certificate(
&self,
request: Request<linera_rpc::grpc::api::HandleConfirmedCertificateRequest>,
) -> Result<Response<linera_rpc::grpc::api::ChainInfoResult>, Status> {
if self.fault_guard.load(Ordering::Acquire) {
return Err(Status::from_error("err".into()));
}
let req = HandleConfirmedCertificateRequest::try_from(request.into_inner())
.map_err(Status::from)?;
let mut missing_blobs = Vec::new();
let created_blobs = req.certificate.inner().block().created_blob_ids();
for blob in req.certificate.inner().required_blob_ids() {
if !self.blobs.pin().contains(&blob) && !created_blobs.contains(&blob) {
missing_blobs.push(blob);
}
}
let chain_info = ChainInfo {
chain_id: ChainId(CryptoHash::test_hash("test")),
epoch: Epoch::ZERO,
description: None,
manager: ChainManagerInfo::default().into(),
chain_balance: Amount::ONE,
block_hash: None,
timestamp: Timestamp::now(),
next_block_height: BlockHeight::ZERO,
state_hash: None,
requested_owner_balance: None,
requested_committees: None,
requested_pending_message_bundles: vec![],
requested_sent_certificate_hashes: vec![],
count_received_log: 0,
requested_received_log: vec![],
};
let response = if missing_blobs.is_empty() {
let response = ChainInfoResponse::new(chain_info, None).try_into()?;
for blob in created_blobs {
self.blobs.pin().insert(blob);
}
if !self.state.pin().insert(req.certificate.hash()) {
tracing::warn!(validator=?self.validator_port, certificate=?req.certificate.hash(), "duplicate block received");
let pinned = self.duplicate_blocks.pin();
pinned.update_or_insert(req.certificate.hash(), |count| count + 1, 2);
}
response
} else {
NodeError::BlobsNotFound(missing_blobs).try_into()?
};
Ok(Response::new(response))
}
async fn upload_blob(
&self,
request: Request<linera_rpc::grpc::api::BlobContent>,
) -> Result<Response<linera_rpc::grpc::api::BlobId>, Status> {
if self.fault_guard.load(Ordering::Acquire) {
return Err(Status::from_error("err".into()));
}
let content: linera_sdk::linera_base_types::BlobContent =
request.into_inner().try_into()?;
let blob = Blob::new(content);
let id = blob.id();
self.blobs.pin().insert(id);
Ok(Response::new(id.try_into()?))
}
async fn handle_block_proposal(
&self,
_request: Request<linera_rpc::grpc::api::BlockProposal>,
) -> Result<Response<linera_rpc::grpc::api::ChainInfoResult>, Status> {
unimplemented!()
}
async fn handle_lite_certificate(
&self,
_request: Request<linera_rpc::grpc::api::LiteCertificate>,
) -> Result<Response<linera_rpc::grpc::api::ChainInfoResult>, Status> {
unimplemented!()
}
async fn handle_validated_certificate(
&self,
_request: Request<linera_rpc::grpc::api::HandleValidatedCertificateRequest>,
) -> Result<Response<linera_rpc::grpc::api::ChainInfoResult>, Status> {
unimplemented!()
}
async fn handle_timeout_certificate(
&self,
_request: Request<linera_rpc::grpc::api::HandleTimeoutCertificateRequest>,
) -> Result<Response<linera_rpc::grpc::api::ChainInfoResult>, Status> {
unimplemented!()
}
async fn handle_chain_info_query(
&self,
_request: Request<linera_rpc::grpc::api::ChainInfoQuery>,
) -> Result<Response<linera_rpc::grpc::api::ChainInfoResult>, Status> {
unimplemented!()
}
async fn subscribe(
&self,
_request: Request<linera_rpc::grpc::api::SubscriptionRequest>,
) -> Result<Response<Self::SubscribeStream>, Status> {
unimplemented!()
}
async fn get_version_info(
&self,
_request: Request<()>,
) -> Result<Response<linera_rpc::grpc::api::VersionInfo>, Status> {
unimplemented!()
}
async fn get_network_description(
&self,
_request: Request<()>,
) -> Result<Response<linera_rpc::grpc::api::NetworkDescription>, Status> {
unimplemented!()
}
async fn download_blob(
&self,
_request: Request<linera_rpc::grpc::api::BlobId>,
) -> Result<Response<linera_rpc::grpc::api::BlobContent>, Status> {
unimplemented!()
}
async fn download_pending_blob(
&self,
_request: Request<linera_rpc::grpc::api::PendingBlobRequest>,
) -> Result<Response<linera_rpc::grpc::api::PendingBlobResult>, Status> {
unimplemented!()
}
async fn handle_pending_blob(
&self,
_request: Request<linera_rpc::grpc::api::HandlePendingBlobRequest>,
) -> Result<Response<linera_rpc::grpc::api::ChainInfoResult>, Status> {
unimplemented!()
}
async fn download_certificate(
&self,
_request: Request<linera_rpc::grpc::api::CryptoHash>,
) -> Result<Response<linera_rpc::grpc::api::Certificate>, Status> {
unimplemented!()
}
async fn download_certificates(
&self,
_request: Request<linera_rpc::grpc::api::CertificatesBatchRequest>,
) -> Result<Response<linera_rpc::grpc::api::CertificatesBatchResponse>, Status> {
unimplemented!()
}
async fn download_certificates_by_heights(
&self,
_request: Request<linera_rpc::grpc::api::DownloadCertificatesByHeightsRequest>,
) -> Result<Response<linera_rpc::grpc::api::CertificatesBatchResponse>, Status> {
unimplemented!()
}
async fn download_raw_certificates_by_heights(
&self,
_request: Request<linera_rpc::grpc::api::DownloadCertificatesByHeightsRequest>,
) -> Result<Response<linera_rpc::grpc::api::RawCertificatesBatch>, Status> {
unimplemented!()
}
async fn blob_last_used_by(
&self,
_request: Request<linera_rpc::grpc::api::BlobId>,
) -> Result<Response<linera_rpc::grpc::api::CryptoHash>, Status> {
unimplemented!()
}
async fn blob_last_used_by_certificate(
&self,
_request: Request<linera_rpc::grpc::api::BlobId>,
) -> Result<Response<linera_rpc::grpc::api::Certificate>, Status> {
unimplemented!()
}
async fn missing_blob_ids(
&self,
_request: Request<BlobIds>,
) -> Result<Response<BlobIds>, Status> {
unimplemented!()
}
async fn get_shard_info(
&self,
_request: Request<linera_rpc::grpc::api::ChainId>,
) -> Result<Response<linera_rpc::grpc::api::ShardInfo>, Status> {
unimplemented!()
}
}
#[async_trait]
pub trait TestDestination {
fn kind(&self) -> DestinationKind;
fn blobs(&self) -> &papaya::HashSet<BlobId>;
fn set_faulty(&self);
fn unset_faulty(&self);
fn state(&self) -> &papaya::HashSet<CryptoHash>;
async fn start(
self,
port: u16,
cancellation_token: CancellationToken,
) -> Result<(), anyhow::Error>;
}
#[async_trait]
impl TestDestination for DummyIndexer {
fn kind(&self) -> DestinationKind {
DestinationKind::Indexer
}
fn blobs(&self) -> &papaya::HashSet<BlobId> {
self.blobs.as_ref()
}
fn set_faulty(&self) {
self.fault_guard.store(true, Ordering::Release);
}
fn unset_faulty(&self) {
self.fault_guard.store(false, Ordering::Release);
}
fn state(&self) -> &papaya::HashSet<CryptoHash> {
self.state.as_ref()
}
async fn start(
self,
port: u16,
cancellation_token: CancellationToken,
) -> Result<(), anyhow::Error> {
self.start(port, cancellation_token).await
}
}
#[async_trait]
impl TestDestination for DummyValidator {
fn kind(&self) -> DestinationKind {
DestinationKind::Validator
}
fn blobs(&self) -> &papaya::HashSet<BlobId> {
self.blobs.as_ref()
}
fn set_faulty(&self) {
self.fault_guard.store(true, Ordering::Release);
}
fn unset_faulty(&self) {
self.fault_guard.store(false, Ordering::Release);
}
fn state(&self) -> &papaya::HashSet<CryptoHash> {
self.state.as_ref()
}
async fn start(
self,
port: u16,
cancellation_token: CancellationToken,
) -> Result<(), anyhow::Error> {
self.start(port, cancellation_token).await
}
}
/// Creates a chain state with two blocks, each containing blobs.
pub(crate) async fn make_simple_state_with_blobs<S: Storage>(
storage: &S,
) -> (BlockId, Vec<CanonicalBlock>) {
let chain_description = ChainDescription::new(
ChainOrigin::Root(0),
InitialChainConfig {
ownership: Default::default(),
epoch: Default::default(),
balance: Default::default(),
application_permissions: Default::default(),
min_active_epoch: Default::default(),
max_active_epoch: Default::default(),
},
Timestamp::now(),
);
let blob_1 = Blob::new_data("1".as_bytes());
let blob_2 = Blob::new_data("2".as_bytes());
let blob_3 = Blob::new_data("3".as_bytes());
let chain_id = chain_description.id();
let chain_blob = Blob::new_chain_description(&chain_description);
let block_1 = ConfirmedBlock::new(
BlockExecutionOutcome {
blobs: vec![vec![blob_1.clone()]],
..Default::default()
}
.with(make_first_block(chain_id).with_operation(Operation::system(
SystemOperation::PublishDataBlob {
blob_hash: CryptoHash::new(blob_2.content()),
},
))),
);
let block_2 = ConfirmedBlock::new(
BlockExecutionOutcome {
oracle_responses: vec![vec![OracleResponse::Blob(blob_3.id())]],
..Default::default()
}
.with(
make_child_block(&block_1)
.with_operation(Operation::system(SystemOperation::PublishDataBlob {
blob_hash: CryptoHash::new(blob_2.content()),
}))
.with_operation(Operation::system(SystemOperation::PublishDataBlob {
blob_hash: CryptoHash::new(blob_1.content()),
})),
),
);
storage
.write_blobs(&[chain_blob.clone(), blob_1, blob_2.clone(), blob_3.clone()])
.await
.unwrap();
storage
.write_blobs_and_certificate(
&[],
&ConfirmedBlockCertificate::new(block_1.clone(), Round::Fast, vec![]),
)
.await
.unwrap();
storage
.write_blobs_and_certificate(
&[],
&ConfirmedBlockCertificate::new(block_2.clone(), Round::Fast, vec![]),
)
.await
.unwrap();
let notification = BlockId::from_confirmed_block(&block_2);
let state = vec![
CanonicalBlock::new(block_1.inner().hash(), &[blob_2.id(), chain_blob.id()]),
CanonicalBlock::new(block_2.inner().hash(), &[blob_3.id()]),
];
(notification, state)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/exporter_service.rs | linera-service/src/exporter/exporter_service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use async_trait::async_trait;
use linera_core::worker::Reason;
use linera_rpc::grpc::api::{
notifier_service_server::{NotifierService, NotifierServiceServer},
Notification,
};
use tokio::sync::mpsc::UnboundedSender;
use tokio_util::sync::CancellationToken;
use tonic::{transport::Server, Request, Response, Status};
use tracing::info;
use crate::common::{get_address, BadNotificationKind, BlockId, ExporterError};
pub(crate) struct ExporterService {
block_processor_sender: UnboundedSender<BlockId>,
}
#[async_trait]
impl NotifierService for ExporterService {
async fn notify(&self, request: Request<Notification>) -> Result<Response<()>, Status> {
let notification = request.into_inner();
let block_id =
match parse_notification(notification).map_err(|e| Status::from_error(e.into())) {
Ok(block_id) => {
tracing::debug!(
?block_id,
"received new block notification from notifier service"
);
block_id
}
Err(status) => {
// We assume errors when parsing are not critical and just log them.
tracing::warn!(error=?status, "received bad notification");
return Ok(Response::new(()));
}
};
#[cfg(with_metrics)]
crate::metrics::EXPORTER_NOTIFICATION_QUEUE_LENGTH.inc();
self.block_processor_sender
.send(block_id)
.expect("sender should never fail");
Ok(Response::new(()))
}
}
impl ExporterService {
pub(crate) fn new(sender: UnboundedSender<BlockId>) -> ExporterService {
ExporterService {
block_processor_sender: sender,
}
}
pub async fn run(
self,
cancellation_token: CancellationToken,
port: u16,
) -> core::result::Result<(), ExporterError> {
info!("Linera exporter is running.");
self.start_notification_server(port, cancellation_token)
.await
}
async fn start_notification_server(
self,
port: u16,
cancellation_token: CancellationToken,
) -> core::result::Result<(), ExporterError> {
let endpoint = get_address(port);
info!(
"Starting linera_exporter_service on endpoint = {}",
endpoint
);
let (health_reporter, health_service) = tonic_health::server::health_reporter();
health_reporter
.set_serving::<NotifierServiceServer<Self>>()
.await;
Server::builder()
.add_service(health_service)
.add_service(NotifierServiceServer::new(self))
.serve_with_shutdown(endpoint, cancellation_token.cancelled_owned())
.await
.expect("a running notification server");
Ok(())
}
}
fn parse_notification(notification: Notification) -> core::result::Result<BlockId, ExporterError> {
let chain_id = notification
.chain_id
.ok_or(BadNotificationKind::InvalidChainId { inner: None })?
.try_into()
.map_err(|err| BadNotificationKind::InvalidChainId { inner: Some(err) })?;
let reason = bincode::deserialize::<Reason>(¬ification.reason)
.map_err(|err| BadNotificationKind::InvalidReason { inner: Some(err) })?;
if let Reason::NewBlock { height, hash } = reason {
return Ok(BlockId::new(chain_id, hash, height));
}
Err(BadNotificationKind::InvalidReason { inner: None })?
}
#[cfg(test)]
mod test {
use linera_base::{crypto::CryptoHash, identifiers::ChainId, port::get_free_port};
use linera_core::worker::Notification;
use linera_rpc::grpc::api::notifier_service_client::NotifierServiceClient;
use linera_service::cli_wrappers::local_net::LocalNet;
use tokio::sync::mpsc::unbounded_channel;
use super::*;
#[test_log::test(tokio::test)]
async fn test_notification_server() -> anyhow::Result<()> {
let port = get_free_port().await?;
let endpoint = format!("127.0.0.1:{port}");
let cancellation_token = CancellationToken::new();
let (tx, mut rx) = unbounded_channel();
let server = ExporterService::new(tx);
let server_handle = tokio::spawn(server.run(cancellation_token.clone(), port));
LocalNet::ensure_grpc_server_has_started("test server", port as usize, "http").await?;
let mut client = NotifierServiceClient::connect(format!("http://{endpoint}")).await?;
let reason = Reason::NewBlock {
height: 4.into(),
hash: CryptoHash::test_hash("s"),
};
let request = Notification {
chain_id: ChainId::default(),
reason,
};
assert!(client
.notify(Request::new(request.try_into().unwrap()))
.await
.is_ok());
let expected_block_id =
BlockId::new(ChainId::default(), CryptoHash::test_hash("s"), 4.into());
assert!(rx
.recv()
.await
.is_some_and(|block_id| block_id == expected_block_id));
cancellation_token.cancel();
assert!(server_handle.await.is_ok());
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/storage.rs | linera-service/src/exporter/storage.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::BTreeMap,
marker::PhantomData,
sync::{atomic::AtomicU64, Arc},
};
use futures::future::try_join_all;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::{
crypto::CryptoHash,
data_types::{Blob, BlockHeight},
identifiers::{BlobId, ChainId},
};
use linera_chain::types::ConfirmedBlockCertificate;
use linera_sdk::{ensure, views::View};
use linera_service::config::{DestinationId, LimitsConfig};
use linera_storage::Storage;
use linera_views::{
batch::Batch, context::Context, log_view::LogView, store::WritableKeyValueStore as _,
views::ClonableView,
};
use mini_moka::unsync::Cache as LfuCache;
use quick_cache::{sync::Cache as FifoCache, Weighter};
#[cfg(with_metrics)]
use crate::metrics;
use crate::{
common::{BlockId, CanonicalBlock, ExporterError, LiteBlockId},
state::{BlockExporterStateView, DestinationStates},
};
const NUM_OF_BLOBS: usize = 20;
pub(super) struct ExporterStorage<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
shared_storage: SharedStorage<S::BlockExporterContext, S>,
}
type BlobCache = FifoCache<BlobId, Arc<Blob>, BlobCacheWeighter>;
type BlockCache = FifoCache<CryptoHash, Arc<ConfirmedBlockCertificate>, BlockCacheWeighter>;
struct SharedStorage<C, S>
where
S: Storage + Clone + Send + Sync + 'static,
{
storage: S,
destination_states: DestinationStates,
shared_canonical_state: CanonicalState<C>,
blobs_cache: Arc<BlobCache>,
blocks_cache: Arc<BlockCache>,
}
pub(super) struct BlockProcessorStorage<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
blob_state_cache: LfuCache<BlobId, ()>,
chain_states_cache: LfuCache<ChainId, LiteBlockId>,
shared_storage: SharedStorage<S::BlockExporterContext, S>,
// Handle on the persistent storage where the exporter state is pushed to periodically.
exporter_state_view: BlockExporterStateView<<S as Storage>::BlockExporterContext>,
}
impl<C, S> SharedStorage<C, S>
where
C: Context + Send + Sync + 'static,
S: Storage + Clone + Send + Sync + 'static,
{
fn new(
storage: S,
state_context: LogView<C, CanonicalBlock>,
destination_states: DestinationStates,
limits: LimitsConfig,
) -> Self {
let shared_canonical_state =
CanonicalState::new((limits.auxiliary_cache_size_mb / 3).into(), state_context);
let blobs_cache = Arc::new(FifoCache::with_weighter(
limits.blob_cache_items_capacity as usize,
(limits.blob_cache_weight_mb as u64) * 1024 * 1024,
CacheWeighter::default(),
));
let blocks_cache = Arc::new(FifoCache::with_weighter(
limits.block_cache_items_capacity as usize,
(limits.block_cache_weight_mb as u64) * 1024 * 1024,
CacheWeighter::default(),
));
Self {
storage,
shared_canonical_state,
blobs_cache,
blocks_cache,
destination_states,
}
}
async fn get_block(
&self,
hash: CryptoHash,
) -> Result<Arc<ConfirmedBlockCertificate>, ExporterError> {
match self.blocks_cache.get_value_or_guard_async(&hash).await {
Ok(value) => Ok(value),
Err(guard) => {
#[cfg(with_metrics)]
metrics::GET_CERTIFICATE_HISTOGRAM.measure_latency();
let block = self.storage.read_certificate(hash).await?;
let block = block.ok_or_else(|| ExporterError::ReadCertificateError(hash))?;
let heaped_block = Arc::new(block);
let _ = guard.insert(heaped_block.clone());
Ok(heaped_block)
}
}
}
async fn get_blob(&self, blob_id: BlobId) -> Result<Arc<Blob>, ExporterError> {
match self.blobs_cache.get_value_or_guard_async(&blob_id).await {
Ok(blob) => Ok(blob),
Err(guard) => {
#[cfg(with_metrics)]
metrics::GET_BLOB_HISTOGRAM.measure_latency();
let blob = self.storage.read_blob(blob_id).await?.unwrap();
let heaped_blob = Arc::new(blob);
let _ = guard.insert(heaped_blob.clone());
Ok(heaped_blob)
}
}
}
async fn get_blobs(&self, blobs: &[BlobId]) -> Result<Vec<Arc<Blob>>, ExporterError> {
let tasks = blobs.iter().map(|id| self.get_blob(*id));
let results = try_join_all(tasks).await?;
Ok(results)
}
fn push_block(&mut self, block: CanonicalBlock) {
self.shared_canonical_state.push(block)
}
fn clone(&mut self) -> Result<Self, ExporterError> {
Ok(Self {
storage: self.storage.clone(),
shared_canonical_state: self.shared_canonical_state.clone()?,
blobs_cache: self.blobs_cache.clone(),
blocks_cache: self.blocks_cache.clone(),
destination_states: self.destination_states.clone(),
})
}
}
impl<S> ExporterStorage<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
fn new(shared_storage: SharedStorage<S::BlockExporterContext, S>) -> Self {
Self { shared_storage }
}
pub(crate) async fn get_block_with_blob_ids(
&self,
index: usize,
) -> Result<(Arc<ConfirmedBlockCertificate>, Vec<BlobId>), ExporterError> {
let block = self
.shared_storage
.shared_canonical_state
.get(index)
.await?;
Ok((
self.shared_storage.get_block(block.block_hash).await?,
block.blobs.into(),
))
}
pub(crate) async fn get_block_with_blobs(
&self,
index: usize,
) -> Result<(Arc<ConfirmedBlockCertificate>, Vec<Arc<Blob>>), ExporterError> {
let canonical_block = self
.shared_storage
.shared_canonical_state
.get(index)
.await?;
let block_task = self.shared_storage.get_block(canonical_block.block_hash);
let blobs_task = self.shared_storage.get_blobs(&canonical_block.blobs);
let (block, blobs) = tokio::try_join!(block_task, blobs_task)?;
Ok((block, blobs))
}
pub(crate) async fn get_blob(&self, blob_id: BlobId) -> Result<Arc<Blob>, ExporterError> {
self.shared_storage.get_blob(blob_id).await
}
pub(crate) fn load_destination_state(&self, id: &DestinationId) -> Arc<AtomicU64> {
self.shared_storage.destination_states.load_state(id)
}
pub(crate) fn clone(&mut self) -> Result<Self, ExporterError> {
Ok(ExporterStorage::new(self.shared_storage.clone()?))
}
pub(crate) fn get_latest_index(&self) -> usize {
self.shared_storage.shared_canonical_state.latest_index()
}
}
impl<S> BlockProcessorStorage<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
pub(super) async fn load(
storage: S,
id: u32,
destinations: Vec<DestinationId>,
limits: LimitsConfig,
) -> Result<(Self, ExporterStorage<S>), ExporterError> {
let context = storage.block_exporter_context(id).await?;
let (view, canonical_state, destination_states) =
BlockExporterStateView::initiate(context, destinations).await?;
let chain_states_cache_capacity =
((limits.auxiliary_cache_size_mb / 3) as u64 * 1024 * 1024)
/ (size_of::<CryptoHash>() + size_of::<LiteBlockId>()) as u64;
let chain_states_cache = LfuCache::builder()
.max_capacity(chain_states_cache_capacity)
.build();
let blob_state_cache_capacity = ((limits.auxiliary_cache_size_mb / 3) as u64 * 1024 * 1024)
/ (size_of::<BlobId>() as u64);
let blob_state_cache = LfuCache::builder()
.max_capacity(blob_state_cache_capacity)
.build();
let mut shared_storage =
SharedStorage::new(storage, canonical_state, destination_states, limits);
let exporter_storage = ExporterStorage::new(shared_storage.clone()?);
Ok((
Self {
shared_storage,
chain_states_cache,
exporter_state_view: view,
blob_state_cache,
},
exporter_storage,
))
}
pub(super) async fn get_block(
&self,
hash: CryptoHash,
) -> Result<Arc<ConfirmedBlockCertificate>, ExporterError> {
self.shared_storage.get_block(hash).await
}
pub(super) async fn get_blob(&self, blob: BlobId) -> Result<Arc<Blob>, ExporterError> {
self.shared_storage.get_blob(blob).await
}
pub(super) async fn is_blob_indexed(&mut self, blob: BlobId) -> Result<bool, ExporterError> {
match self.blob_state_cache.get(&blob) {
Some(_) => Ok(true),
None => self.exporter_state_view.is_blob_indexed(blob).await,
}
}
pub(super) async fn is_block_indexed(
&mut self,
block_id: &BlockId,
) -> Result<bool, ExporterError> {
if let Some(status) = self.chain_states_cache.get(&block_id.chain_id) {
return Ok(status.height >= block_id.height);
}
if let Some(status) = self
.exporter_state_view
.get_chain_status(&block_id.chain_id)
.await?
{
let result = status.height >= block_id.height;
self.chain_states_cache.insert(block_id.chain_id, status);
return Ok(result);
}
Err(ExporterError::UnprocessedChain)
}
pub(super) async fn index_chain(&mut self, block_id: &BlockId) -> Result<(), ExporterError> {
ensure!(
block_id.height == BlockHeight::ZERO,
ExporterError::BadInitialization
);
self.exporter_state_view.initialize_chain(*block_id).await?;
self.chain_states_cache
.insert(block_id.chain_id, (*block_id).into());
Ok(())
}
pub(super) async fn index_block(&mut self, block_id: &BlockId) -> Result<bool, ExporterError> {
if block_id.height == BlockHeight::ZERO {
self.index_chain(block_id).await?;
return Ok(true);
}
if self.exporter_state_view.index_block(*block_id).await? {
self.chain_states_cache
.insert(block_id.chain_id, (*block_id).into());
return Ok(true);
}
Ok(false)
}
pub(super) fn index_blob(&mut self, blob: BlobId) -> Result<(), ExporterError> {
self.exporter_state_view.index_blob(blob)?;
self.blob_state_cache.insert(blob, ());
Ok(())
}
pub(super) fn push_block(&mut self, block: CanonicalBlock) {
self.shared_storage.push_block(block)
}
pub(super) fn new_committee(&mut self, committee_destinations: Vec<DestinationId>) {
committee_destinations.into_iter().for_each(|id| {
let state = match self.shared_storage.destination_states.get(&id) {
None => {
tracing::info!(id=?id, "adding new committee member");
#[cfg(with_metrics)]
{
metrics::DESTINATION_STATE_COUNTER
.with_label_values(&[id.address()])
.reset();
}
Arc::new(AtomicU64::new(0))
}
Some(state) => state.clone(),
};
self.shared_storage.destination_states.insert(id, state);
});
}
pub(super) async fn save(&mut self) -> Result<(), ExporterError> {
let mut batch = Batch::new();
self.shared_storage
.shared_canonical_state
.flush(&mut batch)?;
self.exporter_state_view
.set_destination_states(self.shared_storage.destination_states.clone());
self.exporter_state_view.pre_save(&mut batch)?;
#[cfg(with_metrics)]
metrics::SAVE_HISTOGRAM.measure_latency();
if let Err(e) = self
.exporter_state_view
.context()
.store()
.write_batch(batch)
.await
{
Err(ExporterError::ViewError(e.into()))?;
};
self.exporter_state_view.post_save();
// clear the shared state only after persisting it
// only matters for the shared updates buffer
self.shared_storage.shared_canonical_state.clear();
Ok(())
}
}
/// A view of the canonical state that is used to store blocks in the exporter.
struct CanonicalState<C> {
/// The number of blocks in the canonical state.
count: usize,
/// The (persistent) storage view that is used to access the canonical state.
state_context: LogView<C, CanonicalBlock>,
/// A cache that stores the canonical blocks.
/// This cache is used to speed up access to the canonical state.
/// It uses a FIFO eviction policy and is limited by the size of the cache.
/// The cache is used to avoid reading the canonical state from the persistent storage
/// for every request.
state_cache: Arc<FifoCache<usize, CanonicalBlock, CanonicalStateCacheWeighter>>,
/// A buffer that stores the updates to the canonical state.
///
/// This buffer is used to temporarily hold updates to the canonical state before they are
/// flushed to the persistent storage.
state_updates_buffer: Arc<papaya::HashMap<usize, CanonicalBlock>>,
}
impl<C> CanonicalState<C>
where
C: Context + Send + Sync + 'static,
{
fn new(cache_size: usize, state_context: LogView<C, CanonicalBlock>) -> Self {
let cache_size = cache_size * 1024 * 1024;
let items_capacity = cache_size
/ (size_of::<usize>()
+ size_of::<CanonicalBlock>()
+ NUM_OF_BLOBS * size_of::<BlobId>());
Self {
count: state_context.count(),
state_cache: Arc::new(FifoCache::with_weighter(
items_capacity,
cache_size as u64,
CacheWeighter::default(),
)),
state_updates_buffer: Arc::new(papaya::HashMap::new()),
state_context,
}
}
fn clone(&mut self) -> Result<Self, ExporterError> {
Ok(Self {
count: self.count,
state_cache: self.state_cache.clone(),
state_updates_buffer: self.state_updates_buffer.clone(),
state_context: self.state_context.clone_unchecked()?,
})
}
/// Returns the latest index of the canonical state.
fn latest_index(&self) -> usize {
self.count
}
async fn get(&self, index: usize) -> Result<CanonicalBlock, ExporterError> {
match self.state_cache.get_value_or_guard_async(&index).await {
Ok(value) => Ok(value),
Err(guard) => {
let block = if let Some(entry) = {
let pinned = self.state_updates_buffer.pin();
pinned.get(&index).cloned()
} {
entry
} else {
#[cfg(with_metrics)]
metrics::GET_CANONICAL_BLOCK_HISTOGRAM.measure_latency();
self.state_context
.get(index)
.await?
.ok_or(ExporterError::UnprocessedBlock)?
};
let _ = guard.insert(block.clone());
Ok(block)
}
}
}
fn push(&mut self, value: CanonicalBlock) {
let index = self.next_index();
self.state_updates_buffer.pin().insert(index, value.clone());
self.state_cache.insert(index, value);
}
fn flush(&mut self, batch: &mut Batch) -> Result<(), ExporterError> {
for (_, value) in self
.state_updates_buffer
.pin()
.iter()
.map(|(key, value)| (*key, value.clone()))
.collect::<BTreeMap<_, _>>()
{
self.state_context.push(value);
}
self.state_context.pre_save(batch)?;
self.state_context.post_save();
Ok(())
}
fn clear(&mut self) {
self.state_updates_buffer.pin().clear();
self.state_context.rollback();
}
fn next_index(&mut self) -> usize {
let next = self.count;
self.count += 1;
next
}
}
#[derive(Clone)]
struct CacheWeighter<Q, V> {
key: PhantomData<Q>,
value: PhantomData<V>,
}
impl Weighter<BlobId, Arc<Blob>> for BlobCacheWeighter {
fn weight(&self, _key: &BlobId, val: &Arc<Blob>) -> u64 {
(size_of::<BlobId>()
+ size_of::<Arc<Blob>>()
+ 2 * size_of::<usize>() // two reference counts in Arc, just a micro-optimization
+ size_of::<Blob>()
+ val.bytes().len()) as u64
}
}
impl Weighter<CryptoHash, Arc<ConfirmedBlockCertificate>> for BlockCacheWeighter {
fn weight(&self, _key: &CryptoHash, _val: &Arc<ConfirmedBlockCertificate>) -> u64 {
(size_of::<CryptoHash>()
+ 2 * size_of::<usize>()
+ size_of::<Arc<ConfirmedBlockCertificate>>()
+ 1_000_000) as u64 // maximum block size in testnet resource control policy
}
}
impl Weighter<usize, CanonicalBlock> for CanonicalStateCacheWeighter {
fn weight(&self, _key: &usize, _val: &CanonicalBlock) -> u64 {
(size_of::<usize>() + size_of::<CanonicalBlock>() + NUM_OF_BLOBS * size_of::<BlobId>())
as u64
}
}
impl<Q, V> Default for CacheWeighter<Q, V> {
fn default() -> Self {
Self {
key: PhantomData,
value: PhantomData,
}
}
}
type BlobCacheWeighter = CacheWeighter<BlobId, Arc<Blob>>;
type BlockCacheWeighter = CacheWeighter<CryptoHash, Arc<ConfirmedBlockCertificate>>;
type CanonicalStateCacheWeighter = CacheWeighter<usize, CanonicalBlock>;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/main.rs | linera-service/src/exporter/main.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{path::PathBuf, time::Duration};
use anyhow::Result;
use async_trait::async_trait;
use common::{ExporterCancellationSignal, ExporterError};
use exporter_service::ExporterService;
use futures::FutureExt;
use linera_base::listen_for_shutdown_signals;
#[cfg(with_metrics)]
use linera_metrics::monitoring_server;
use linera_rpc::NodeOptions;
use linera_service::{
config::BlockExporterConfig,
storage::{CommonStorageOptions, Runnable, StorageConfig},
util,
};
use linera_storage::Storage;
use runloops::start_block_processor_task;
use tokio_util::sync::CancellationToken;
mod common;
mod exporter_service;
#[cfg(with_metrics)]
mod metrics;
mod runloops;
mod state;
mod storage;
#[cfg(test)]
mod test_utils;
#[cfg(test)]
mod tests;
#[cfg(not(feature = "metrics"))]
const IS_WITH_METRICS: bool = false;
#[cfg(feature = "metrics")]
const IS_WITH_METRICS: bool = true;
/// Options for running the linera block exporter.
#[derive(clap::Parser, Debug, Clone)]
#[command(
name = "Linera Exporter",
version = linera_version::VersionInfo::default_clap_str(),
)]
struct ExporterOptions {
/// Path to the TOML file describing the configuration for the block exporter.
#[arg(long)]
config_path: PathBuf,
/// Storage configuration for the blockchain history, chain states and binary blobs.
#[arg(long = "storage")]
storage_config: StorageConfig,
/// Common storage options.
#[command(flatten)]
common_storage_options: CommonStorageOptions,
/// Maximum number of threads to use for exporters
#[arg(long, default_value = "16")]
max_exporter_threads: usize,
/// Timeout in milliseconds for sending queries.
#[arg(long = "send-timeout-ms", default_value = "4000", value_parser = util::parse_millis)]
pub send_timeout: Duration,
/// Timeout in milliseconds for receiving responses.
#[arg(long = "recv-timeout-ms", default_value = "4000", value_parser = util::parse_millis)]
pub recv_timeout: Duration,
/// Delay increment for retrying to connect to a destination.
#[arg(
long = "retry-delay-ms",
default_value = "1000",
value_parser = util::parse_millis
)]
pub retry_delay: Duration,
/// Number of times to retry connecting to a destination.
#[arg(long, default_value = "10")]
pub max_retries: u32,
/// Port for the metrics server.
#[arg(long)]
pub metrics_port: Option<u16>,
}
struct ExporterContext {
node_options: NodeOptions,
config: BlockExporterConfig,
}
#[async_trait]
impl Runnable for ExporterContext {
type Output = Result<(), ExporterError>;
async fn run<S>(self, storage: S) -> Self::Output
where
S: Storage + Clone + Send + Sync + 'static,
{
let shutdown_notifier = CancellationToken::new();
tokio::spawn(listen_for_shutdown_signals(shutdown_notifier.clone()));
#[cfg(with_metrics)]
monitoring_server::start_metrics(self.config.metrics_address(), shutdown_notifier.clone());
let (sender, handle) = start_block_processor_task(
storage,
ExporterCancellationSignal::new(shutdown_notifier.clone()),
self.config.limits,
self.node_options,
self.config.id,
self.config.destination_config,
);
let service = ExporterService::new(sender);
let mut block_processor_task = tokio::task::spawn_blocking(move || handle.join().unwrap());
tokio::select! {
result = service.run(shutdown_notifier, self.config.service_config.port) => {
result?;
block_processor_task.await.expect("block processor task panicked")
}
result = &mut block_processor_task => {
result.expect("block processor task panicked")
}
}
}
}
impl ExporterContext {
fn new(node_options: NodeOptions, config: BlockExporterConfig) -> ExporterContext {
Self {
config,
node_options,
}
}
}
fn main() -> Result<()> {
linera_service::tracing::init("linera-exporter");
let options = <ExporterOptions as clap::Parser>::parse();
options.run()
}
impl ExporterOptions {
fn run(&self) -> anyhow::Result<()> {
let config_string = fs_err::read_to_string(&self.config_path)
.expect("Unable to read the configuration file");
let mut config: BlockExporterConfig =
toml::from_str(&config_string).expect("Invalid configuration file format");
let node_options = NodeOptions {
send_timeout: self.send_timeout,
recv_timeout: self.recv_timeout,
retry_delay: self.retry_delay,
max_retries: self.max_retries,
};
if let Some(port) = self.metrics_port {
if IS_WITH_METRICS {
tracing::info!("overriding metrics port to {}", port);
config.metrics_port = port;
} else {
tracing::warn!(
"Metrics are not enabled in this build, ignoring metrics port configuration."
);
}
}
let context = ExporterContext::new(node_options, config);
let runtime = tokio::runtime::Builder::new_multi_thread()
.thread_name("block-exporter-worker")
.worker_threads(self.max_exporter_threads)
.enable_all()
.build()?;
let future = async {
let store_config = self
.storage_config
.add_common_storage_options(&self.common_storage_options)
.unwrap();
// Exporters are part of validator infrastructure and should not output contract logs.
let allow_application_logs = false;
store_config
.run_with_storage(None, allow_application_logs, context)
.boxed()
.await
};
runtime.block_on(future)?.map_err(|e| e.into())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/common.rs | linera-service/src/exporter/common.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{future::IntoFuture, net::SocketAddr};
use bincode::ErrorKind;
use custom_debug_derive::Debug;
use linera_base::{
crypto::CryptoHash,
data_types::BlockHeight,
identifiers::{BlobId, ChainId},
};
use linera_chain::{data_types::IncomingBundle, types::ConfirmedBlock};
use linera_rpc::grpc::{GrpcError, GrpcProtoConversionError};
use linera_sdk::views::ViewError;
use serde::{Deserialize, Serialize};
use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned};
use tonic::Status;
#[derive(thiserror::Error, Debug)]
pub(crate) enum ExporterError {
#[error("received an invalid notification.")]
BadNotification(BadNotificationKind),
#[error("unable to load the exporter state: {0}")]
StateError(ViewError),
#[error("Missing certificate: {0}")]
ReadCertificateError(CryptoHash),
#[error("generic storage error: {0}")]
ViewError(#[from] ViewError),
#[error("block not processed by the block processor yet")]
UnprocessedBlock,
#[error("chain not yet processed by the block exporter")]
UnprocessedChain,
#[error("chain should be initialized from block zero")]
BadInitialization,
#[error("trying to re-initialize the chain: {0}")]
ChainAlreadyExists(ChainId),
#[error("unable to establish an underlying stream")]
SynchronizationFailed(Box<Status>),
#[error(transparent)]
GrpcError(#[from] GrpcError),
#[error("generic error: {0}")]
GenericError(Box<dyn std::error::Error + Send + Sync + 'static>),
}
#[derive(Debug)]
pub(crate) enum BadNotificationKind {
InvalidChainId {
#[debug(skip_if = Option::is_none)]
inner: Option<GrpcProtoConversionError>,
},
InvalidReason {
#[debug(skip_if = Option::is_none)]
inner: Option<Box<ErrorKind>>,
},
}
impl From<BadNotificationKind> for ExporterError {
fn from(value: BadNotificationKind) -> Self {
ExporterError::BadNotification(value)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct CanonicalBlock {
pub blobs: Box<[BlobId]>,
pub block_hash: CryptoHash,
}
impl CanonicalBlock {
pub(crate) fn new(hash: CryptoHash, blobs: &[BlobId]) -> CanonicalBlock {
CanonicalBlock {
block_hash: hash,
blobs: blobs.to_vec().into_boxed_slice(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Copy)]
pub(crate) struct BlockId {
pub hash: CryptoHash,
pub chain_id: ChainId,
pub height: BlockHeight,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub(crate) struct LiteBlockId {
pub hash: CryptoHash,
pub height: BlockHeight,
}
impl BlockId {
pub(crate) fn new(chain_id: ChainId, hash: CryptoHash, height: BlockHeight) -> BlockId {
BlockId {
hash,
chain_id,
height,
}
}
pub(crate) fn from_incoming_bundle(incoming_bundle: &IncomingBundle) -> Self {
Self {
hash: incoming_bundle.bundle.certificate_hash,
chain_id: incoming_bundle.origin,
height: incoming_bundle.bundle.height,
}
}
pub(crate) fn from_confirmed_block(block: &ConfirmedBlock) -> BlockId {
BlockId::new(block.chain_id(), block.inner().hash(), block.height())
}
}
impl LiteBlockId {
pub(crate) fn new(height: BlockHeight, hash: CryptoHash) -> LiteBlockId {
LiteBlockId { hash, height }
}
}
impl From<BlockId> for LiteBlockId {
fn from(value: BlockId) -> Self {
LiteBlockId::new(value.height, value.hash)
}
}
#[derive(Clone)]
pub(crate) struct ExporterCancellationSignal {
token: CancellationToken,
}
impl ExporterCancellationSignal {
pub(crate) fn new(token: CancellationToken) -> Self {
Self { token }
}
}
impl IntoFuture for ExporterCancellationSignal {
type Output = ();
type IntoFuture = WaitForCancellationFutureOwned;
fn into_future(self) -> Self::IntoFuture {
self.token.cancelled_owned()
}
}
pub(crate) fn get_address(port: u16) -> SocketAddr {
SocketAddr::from(([0, 0, 0, 0], port))
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/metrics.rs | linera-service/src/exporter/metrics.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::LazyLock;
use linera_base::prometheus_util::{self};
use prometheus::{Histogram, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec};
pub(crate) static GET_BLOB_HISTOGRAM: LazyLock<Histogram> = LazyLock::new(|| {
prometheus_util::register_histogram(
"get_blob_histogram_ms",
"Time it took to read a blob from the storage",
None,
)
});
pub(crate) static GET_CERTIFICATE_HISTOGRAM: LazyLock<Histogram> = LazyLock::new(|| {
prometheus_util::register_histogram(
"get_certificate_histogram",
"Time it took to read a certificate from the storage",
None,
)
});
pub(crate) static GET_CANONICAL_BLOCK_HISTOGRAM: LazyLock<Histogram> = LazyLock::new(|| {
prometheus_util::register_histogram(
"get_canonical_block_histogram_ms",
"Time it took to read a canonical block from the storage",
None,
)
});
pub(crate) static SAVE_HISTOGRAM: LazyLock<Histogram> = LazyLock::new(|| {
prometheus_util::register_histogram(
"block_processor_state_save_histogram_ms",
"Time it took to save the exporter state to the storage",
None,
)
});
pub(crate) static DISPATCH_BLOCK_HISTOGRAM: LazyLock<HistogramVec> = LazyLock::new(|| {
prometheus_util::register_histogram_vec(
"dispatch_block_histogram_ms",
"Time it took to dispatch a block to a destination",
&["destination"],
None,
)
});
pub(crate) static DISPATCH_BLOB_HISTOGRAM: LazyLock<HistogramVec> = LazyLock::new(|| {
prometheus_util::register_histogram_vec(
"dispatch_blob_histogram_ms",
"Time it took to dispatch a blob to a validator destination",
&["destination"],
None,
)
});
pub(crate) static DESTINATION_STATE_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"destination_state_counter",
"Current state (height) of the destination as seen by the exporter",
&["destination"],
)
});
pub(crate) static VALIDATOR_EXPORTER_QUEUE_LENGTH: LazyLock<IntGaugeVec> = LazyLock::new(|| {
prometheus_util::register_int_gauge_vec(
"validator_exporter_queue_length",
"Length of the block queue for validator exporters",
&["destination"],
)
});
pub(crate) static EXPORTER_NOTIFICATION_QUEUE_LENGTH: LazyLock<IntGauge> = LazyLock::new(|| {
prometheus_util::register_int_gauge(
"exporter_notification_queue_length",
"Length of the notification queue for the exporter service",
)
});
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/logging_exporter.rs | linera-service/src/exporter/runloops/logging_exporter.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{future::IntoFuture, io::Write, path::Path};
use linera_chain::types::CertificateValue;
use tokio::select;
use crate::storage::ExporterStorage;
/// A logging exporter that writes logs to a file.
///
/// This exporter does not track any state or process data; it simply logs messages to a specified file.
/// It will export events as they occur, never exporting past ones,
/// which can be useful for debugging and monitoring purposes.
pub(crate) struct LoggingExporter {
file: std::fs::File,
}
impl LoggingExporter {
/// Creates a new `LoggingExporter` that logs to the specified file.
pub fn new(log_file: &Path) -> Self {
let file = std::fs::File::create(log_file).expect("Failed to create log file");
LoggingExporter { file }
}
pub(crate) async fn run_with_shutdown<S, F: IntoFuture<Output = ()>>(
self,
shutdown_signal: F,
storage: ExporterStorage<S>,
) -> anyhow::Result<()>
where
S: linera_storage::Storage + Clone + Send + Sync + 'static,
{
let shutdown_signal_future = shutdown_signal.into_future();
let mut pinned_shutdown_signal = Box::pin(shutdown_signal_future);
select! {
_ = &mut pinned_shutdown_signal => {
tracing::info!("logging exporter shutdown signal received, exiting.");
}
_ = self.start_logger(storage) => {
}
}
Ok(())
}
async fn start_logger<S>(mut self, storage: ExporterStorage<S>) -> anyhow::Result<()>
where
S: linera_storage::Storage + Clone + Send + Sync + 'static,
{
let mut canonical_chain_height = storage.get_latest_index();
tracing::info!(
"starting logging exporter at height {}",
canonical_chain_height
);
loop {
if let Ok((block, blobs)) = storage.get_block_with_blobs(canonical_chain_height).await {
let inner = block.inner();
writeln!(
self.file,
"Block ID: {}, Chain: {}, Height: {}, State Hash: {}, Authenticated Owner: {}",
inner.hash(),
inner.chain_id(),
inner.height(),
inner.block().header.state_hash,
inner
.block()
.header
.authenticated_owner
.map_or_else(|| "N/A".into(), |signer| signer.to_string()),
)?;
for blob in blobs {
writeln!(self.file, "\tBlob ID: {}", blob.id(),)?;
}
canonical_chain_height += 1;
} else {
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/mod.rs | linera-service/src/exporter/runloops/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::future::{Future, IntoFuture};
use block_processor::BlockProcessor;
use indexer::indexer_exporter::Exporter as IndexerExporter;
use linera_rpc::NodeOptions;
use linera_service::config::{DestinationConfig, LimitsConfig};
use linera_storage::Storage;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use validator_exporter::Exporter as ValidatorExporter;
use crate::{
common::{BlockId, ExporterError},
runloops::task_manager::ExportersTracker,
storage::BlockProcessorStorage,
};
mod block_processor;
mod indexer;
mod logging_exporter;
mod task_manager;
mod validator_exporter;
#[cfg(test)]
pub use indexer::indexer_api;
pub(crate) fn start_block_processor_task<S, F>(
storage: S,
shutdown_signal: F,
limits: LimitsConfig,
options: NodeOptions,
block_exporter_id: u32,
destination_config: DestinationConfig,
) -> (
UnboundedSender<BlockId>,
std::thread::JoinHandle<Result<(), ExporterError>>,
)
where
S: Storage + Clone + Send + Sync + 'static,
F: IntoFuture<Output = ()> + Clone + Send + Sync + 'static,
<F as IntoFuture>::IntoFuture: Future<Output = ()> + Send + Sync + 'static,
{
let (task_sender, queue_front) = unbounded_channel();
let new_block_queue = NewBlockQueue {
queue_rear: task_sender.clone(),
queue_front,
};
let handle = std::thread::spawn(move || {
start_block_processor(
storage,
shutdown_signal,
limits,
options,
block_exporter_id,
new_block_queue,
destination_config,
)
});
(task_sender, handle)
}
struct NewBlockQueue {
pub(crate) queue_rear: UnboundedSender<BlockId>,
pub(crate) queue_front: UnboundedReceiver<BlockId>,
}
impl NewBlockQueue {
async fn recv(&mut self) -> Option<BlockId> {
let block = self.queue_front.recv().await;
#[cfg(with_metrics)]
crate::metrics::EXPORTER_NOTIFICATION_QUEUE_LENGTH.dec();
block
}
fn push_back(&self, block_id: BlockId) {
self.queue_rear
.send(block_id)
.expect("sender should never fail");
#[cfg(with_metrics)]
crate::metrics::EXPORTER_NOTIFICATION_QUEUE_LENGTH.inc();
}
}
#[tokio::main(flavor = "current_thread")]
async fn start_block_processor<S, F>(
storage: S,
shutdown_signal: F,
limits: LimitsConfig,
options: NodeOptions,
block_exporter_id: u32,
new_block_queue: NewBlockQueue,
destination_config: DestinationConfig,
) -> Result<(), ExporterError>
where
S: Storage + Clone + Send + Sync + 'static,
F: IntoFuture<Output = ()> + Clone + Send + Sync + 'static,
<F as IntoFuture>::IntoFuture: Future<Output = ()> + Send + Sync + 'static,
{
let destination_ids = destination_config
.destinations
.iter()
.map(|destination| destination.id())
.collect::<Vec<_>>();
let (block_processor_storage, mut exporter_storage) =
BlockProcessorStorage::load(storage.clone(), block_exporter_id, destination_ids, limits)
.await?;
let tracker = ExportersTracker::new(
options,
limits.work_queue_size.into(),
shutdown_signal.clone(),
exporter_storage.clone()?,
destination_config.destinations.clone(),
);
let mut block_processor = BlockProcessor::new(
tracker,
block_processor_storage,
new_block_queue,
destination_config.committee_destination,
);
block_processor
.run_with_shutdown(shutdown_signal, limits.persistence_period_ms)
.await?;
block_processor.pool_state().join_all().await;
Ok(())
}
#[cfg(test)]
mod test {
use std::{collections::BTreeMap, sync::atomic::Ordering, time::Duration};
use linera_base::{
crypto::{AccountPublicKey, Secp256k1PublicKey},
data_types::{
Blob, BlobContent, ChainDescription, ChainOrigin, Epoch, InitialChainConfig, Round,
Timestamp,
},
port::get_free_port,
};
use linera_chain::{
data_types::BlockExecutionOutcome,
test::{make_child_block, make_first_block, BlockTestExt},
types::{CertificateValue, ConfirmedBlock, ConfirmedBlockCertificate},
};
use linera_execution::{
committee::{Committee, ValidatorState},
system::AdminOperation,
Operation, ResourceControlPolicy, SystemOperation,
};
use linera_rpc::{config::TlsConfig, NodeOptions};
use linera_service::{
cli_wrappers::local_net::LocalNet,
config::{Destination, DestinationConfig, DestinationKind, LimitsConfig},
};
use linera_storage::{DbStorage, Storage};
use linera_views::{memory::MemoryDatabase, ViewError};
use test_case::test_case;
use tokio::time::sleep;
use tokio_util::sync::CancellationToken;
use super::start_block_processor_task;
use crate::{
common::{get_address, BlockId, CanonicalBlock},
state::BlockExporterStateView,
test_utils::{make_simple_state_with_blobs, DummyIndexer, DummyValidator, TestDestination},
ExporterCancellationSignal,
};
#[test_case(DummyIndexer::default())]
#[test_case(DummyValidator::default())]
#[test_log::test(tokio::test)]
async fn test_destinations<T>(destination: T) -> Result<(), anyhow::Error>
where
T: TestDestination + Clone + Send + 'static,
{
let port = get_free_port().await?;
let cancellation_token = CancellationToken::new();
tokio::spawn(destination.clone().start(port, cancellation_token.clone()));
LocalNet::ensure_grpc_server_has_started("test server", port as usize, "http").await?;
let signal = ExporterCancellationSignal::new(cancellation_token.clone());
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(None).await;
let destination_address = match destination.kind() {
DestinationKind::Indexer => Destination::Indexer {
port,
tls: TlsConfig::ClearText,
endpoint: "127.0.0.1".to_owned(),
},
DestinationKind::Validator => Destination::Validator {
port,
endpoint: "127.0.0.1".to_owned(),
},
DestinationKind::Logging => {
unreachable!("Logging destination is not supported in tests")
}
};
// make some blocks
let (notification, state) = make_simple_state_with_blobs(&storage).await;
let (notifier, handle) = start_block_processor_task(
storage,
signal,
LimitsConfig::default(),
NodeOptions {
send_timeout: Duration::from_millis(4000),
recv_timeout: Duration::from_millis(4000),
retry_delay: Duration::from_millis(1000),
max_retries: 10,
},
0,
DestinationConfig {
committee_destination: false,
destinations: vec![destination_address],
},
);
assert!(
notifier.send(notification).is_ok(),
"notifier should work as long as there exists a receiver to receive notifications"
);
sleep(Duration::from_secs(4)).await;
for CanonicalBlock { blobs, block_hash } in state {
assert!(destination.state().pin().contains(&block_hash));
for blob in blobs {
assert!(destination.blobs().pin().contains(&blob));
}
}
cancellation_token.cancel();
handle.join().unwrap()?;
Ok(())
}
#[test_log::test(tokio::test)]
async fn test_restart_persistence_and_faulty_destination_restart() -> Result<(), anyhow::Error>
{
let mut destinations = Vec::new();
let cancellation_token = CancellationToken::new();
let _indexer = spawn_dummy_indexer(&mut destinations, &cancellation_token).await?;
let faulty_indexer = spawn_faulty_indexer(&mut destinations, &cancellation_token).await?;
let validator = spawn_dummy_validator(&mut destinations, &cancellation_token).await?;
let faulty_validator =
spawn_faulty_validator(&mut destinations, &cancellation_token).await?;
let child = cancellation_token.child_token();
let signal = ExporterCancellationSignal::new(child.clone());
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(None).await;
let (notification, _state) = make_simple_state_with_blobs(&storage).await;
let (notifier, _handle) = start_block_processor_task(
storage.clone(),
signal,
LimitsConfig {
persistence_period_ms: 3000,
..Default::default()
},
NodeOptions {
send_timeout: Duration::from_millis(4000),
recv_timeout: Duration::from_millis(4000),
retry_delay: Duration::from_millis(1000),
max_retries: 10,
},
0,
DestinationConfig {
committee_destination: false,
destinations: destinations.clone(),
},
);
assert!(
notifier.send(notification).is_ok(),
"notifier should work as long as there exists a receiver to receive notifications"
);
sleep(Duration::from_secs(4)).await;
child.cancel();
// handle.join().unwrap()?;
let context = storage.block_exporter_context(0).await?;
let destination_ids = destinations.iter().map(|d| d.id()).collect::<Vec<_>>();
let (_, _, destination_states) =
BlockExporterStateView::initiate(context.clone(), destination_ids.clone()).await?;
for (i, destination) in destination_ids.iter().enumerate() {
let state = destination_states.load_state(destination);
// We created destinations such that odd ones were faulty.
if i % 2 == 0 {
assert_eq!(state.load(Ordering::Acquire), 2);
} else {
assert_eq!(state.load(Ordering::Acquire), 0);
}
}
assert!(validator.duplicate_blocks.is_empty());
tracing::info!("restarting block processor task with faulty destinations fixed");
faulty_indexer.unset_faulty();
faulty_validator.unset_faulty();
let child = cancellation_token.child_token();
let signal = ExporterCancellationSignal::new(child.clone());
// restart
let (_notifier, handle) = start_block_processor_task(
storage.clone(),
signal,
LimitsConfig {
persistence_period_ms: 3000,
..Default::default()
},
NodeOptions {
send_timeout: Duration::from_millis(4000),
recv_timeout: Duration::from_millis(4000),
retry_delay: Duration::from_millis(1000),
max_retries: 10,
},
0,
DestinationConfig {
destinations: destinations.clone(),
committee_destination: false,
},
);
sleep(Duration::from_secs(4)).await;
child.cancel();
handle.join().unwrap()?;
let (_, _, destination_states) =
BlockExporterStateView::initiate(context.clone(), destination_ids).await?;
for destination in destinations {
assert_eq!(
destination_states
.load_state(&destination.id())
.load(Ordering::Acquire),
2
);
}
assert!(validator.duplicate_blocks.is_empty());
assert!(faulty_validator.duplicate_blocks.is_empty());
Ok(())
}
#[test_log::test(tokio::test)]
async fn test_committee_destination() -> anyhow::Result<()> {
tracing::info!("Starting test_committee_destination test");
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(None).await;
let test_chain = TestChain::new(storage.clone());
let cancellation_token = CancellationToken::new();
let child = cancellation_token.child_token();
let signal = ExporterCancellationSignal::new(child.clone());
let mut destinations = vec![];
let dummy_validator = spawn_dummy_validator(&mut destinations, &cancellation_token).await?;
let destination = destinations[0].clone();
let validator_state = ValidatorState {
network_address: destination.address(),
votes: 0,
account_public_key: AccountPublicKey::test_key(0),
};
let (notifier, block_processor_handle) = start_block_processor_task(
storage.clone(),
signal,
LimitsConfig {
persistence_period_ms: 3000,
..Default::default()
},
NodeOptions {
send_timeout: Duration::from_millis(4000),
recv_timeout: Duration::from_millis(4000),
retry_delay: Duration::from_millis(1000),
max_retries: 10,
},
0,
DestinationConfig {
committee_destination: true,
destinations: vec![],
},
);
let mut single_validator = BTreeMap::new();
single_validator.insert(Secp256k1PublicKey::test_key(0), validator_state);
let confirmed_certificate = test_chain
.publish_committee(&single_validator, None)
.await
.expect("Failed to publish committee");
let first_notification = BlockId::from_confirmed_block(confirmed_certificate.value());
notifier.send(first_notification)?;
sleep(Duration::from_secs(4)).await;
{
let pinned = dummy_validator.state.pin();
assert!(pinned.contains(&first_notification.hash));
}
// We expect the validator to receive the confired certificate only once.
{
let pinned = dummy_validator.duplicate_blocks.pin();
assert!(pinned.get(&first_notification.hash).is_none());
}
///////////
// Add new validator to the committee.
///////////
let second_dummy = spawn_dummy_validator(&mut destinations, &cancellation_token).await?;
let destination = destinations[1].clone();
let validator_state = ValidatorState {
network_address: destination.address(),
votes: 0,
account_public_key: AccountPublicKey::test_key(1),
};
let mut two_validators = single_validator.clone();
two_validators.insert(Secp256k1PublicKey::test_key(1), validator_state);
let add_validator_certificate = test_chain
.publish_committee(&two_validators, Some(confirmed_certificate.value().clone()))
.await
.expect("Failed to publish new committee");
let second_notification = BlockId::from_confirmed_block(add_validator_certificate.value());
notifier.send(second_notification)?;
sleep(Duration::from_secs(4)).await;
{
let pinned = second_dummy.state.pin();
assert!(pinned.contains(&second_notification.hash));
}
// We expect the new validator to receive the new confirmed certificate only once.
{
let pinned = second_dummy.duplicate_blocks.pin();
assert!(pinned.get(&second_notification.hash).is_none());
}
// The first certificate should not be duplicated.
{
let pinned = second_dummy.duplicate_blocks.pin();
assert!(pinned.get(&first_notification.hash).is_none());
}
// The first validator should receive the new committee as well.
{
let pinned = dummy_validator.state.pin();
assert!(pinned.contains(&second_notification.hash));
}
// We expect the first validator to receive the new confirmed certificate only once.
{
let pinned = dummy_validator.duplicate_blocks.pin();
assert!(pinned.get(&second_notification.hash).is_none());
}
///////////
// Remove the validator from the committee.
///////////
let mut new_validators = two_validators.clone();
new_validators.remove(&Secp256k1PublicKey::test_key(0));
let remove_validator_certificate = test_chain
.publish_committee(
&new_validators,
Some(add_validator_certificate.value().clone()),
)
.await
.expect("Failed to publish new committee");
let third_notification =
BlockId::from_confirmed_block(remove_validator_certificate.value());
notifier.send(third_notification)?;
sleep(Duration::from_secs(4)).await;
// The first validator should not receive the new confirmed certificate.
{
let pinned = dummy_validator.state.pin();
assert!(!pinned.contains(&third_notification.hash));
}
// We expect the first validator to receive the new confirmed certificate only once.
{
let pinned = dummy_validator.duplicate_blocks.pin();
assert!(pinned.get(&third_notification.hash).is_none());
}
// The second validator should receive the new confirmed certificate.
{
let pinned = second_dummy.state.pin();
assert!(pinned.contains(&third_notification.hash));
}
// We expect the second validator to receive the new confirmed certificate only once.
{
let pinned = second_dummy.duplicate_blocks.pin();
assert!(pinned.get(&third_notification.hash).is_none());
}
cancellation_token.cancel();
block_processor_handle.join().unwrap()?;
Ok(())
}
struct TestChain<S> {
chain_description: ChainDescription,
storage: S,
}
impl<S> TestChain<S> {
fn new(storage: S) -> Self {
let chain_description = ChainDescription::new(
ChainOrigin::Root(0),
InitialChainConfig {
ownership: Default::default(),
epoch: Default::default(),
balance: Default::default(),
application_permissions: Default::default(),
min_active_epoch: Epoch::ZERO,
max_active_epoch: Epoch::ZERO,
},
Timestamp::now(),
);
Self {
chain_description,
storage,
}
}
// Constructs a new block, with the blob containing the committee.
async fn publish_committee(
&self,
validators: &BTreeMap<Secp256k1PublicKey, ValidatorState>,
prev_block: Option<ConfirmedBlock>,
) -> Result<ConfirmedBlockCertificate, ViewError>
where
S: Storage + Clone + Send + Sync + 'static,
{
let committee = Committee::new(validators.clone(), ResourceControlPolicy::testnet());
let chain_id = self.chain_description.id();
let chain_blob = Blob::new_chain_description(&self.chain_description);
let committee_blob = Blob::new(BlobContent::new_committee(bcs::to_bytes(&committee)?));
let proposed_block = if let Some(parent_block) = prev_block {
make_child_block(&parent_block).with_operation(Operation::System(Box::new(
SystemOperation::Admin(AdminOperation::CreateCommittee {
epoch: parent_block.epoch().try_add_one().unwrap(),
blob_hash: committee_blob.id().hash,
}),
)))
} else {
make_first_block(chain_id).with_operation(Operation::System(Box::new(
SystemOperation::Admin(AdminOperation::CreateCommittee {
epoch: Epoch::ZERO,
blob_hash: committee_blob.id().hash,
}),
)))
};
let blobs = vec![chain_blob, committee_blob];
let block = BlockExecutionOutcome {
blobs: vec![blobs.clone()],
..Default::default()
}
.with(proposed_block);
let confirmed_block = ConfirmedBlock::new(block);
let certificate = ConfirmedBlockCertificate::new(confirmed_block, Round::Fast, vec![]);
self.storage
.write_blobs_and_certificate(blobs.as_ref(), &certificate)
.await?;
Ok(certificate)
}
}
async fn spawn_dummy_indexer(
destinations: &mut Vec<Destination>,
token: &CancellationToken,
) -> anyhow::Result<DummyIndexer> {
let port = get_free_port().await?;
let destination = DummyIndexer::default();
tokio::spawn(destination.clone().start(port, token.clone()));
LocalNet::ensure_grpc_server_has_started("dummy indexer", port as usize, "http").await?;
let destination_address = Destination::Indexer {
port,
tls: TlsConfig::ClearText,
endpoint: "127.0.0.1".to_owned(),
};
destinations.push(destination_address);
Ok(destination)
}
async fn spawn_dummy_validator(
destinations: &mut Vec<Destination>,
token: &CancellationToken,
) -> anyhow::Result<DummyValidator> {
let port = get_free_port().await?;
let destination = DummyValidator::new(port);
tokio::spawn(destination.clone().start(port, token.clone()));
LocalNet::ensure_grpc_server_has_started("dummy validator", port as usize, "http").await?;
let destination_address = Destination::Validator {
port,
endpoint: get_address(port as u16).ip().to_string(),
};
destinations.push(destination_address);
Ok(destination)
}
async fn spawn_faulty_indexer(
destinations: &mut Vec<Destination>,
token: &CancellationToken,
) -> anyhow::Result<DummyIndexer> {
let port = get_free_port().await?;
let destination = DummyIndexer::default();
destination.set_faulty();
tokio::spawn(destination.clone().start(port, token.clone()));
LocalNet::ensure_grpc_server_has_started("faulty indexer", port as usize, "http").await?;
let destination_address = Destination::Indexer {
port,
tls: TlsConfig::ClearText,
endpoint: "127.0.0.1".to_owned(),
};
destinations.push(destination_address);
Ok(destination)
}
async fn spawn_faulty_validator(
destinations: &mut Vec<Destination>,
token: &CancellationToken,
) -> anyhow::Result<DummyValidator> {
let port = get_free_port().await?;
let destination = DummyValidator::default();
destination.set_faulty();
tokio::spawn(destination.clone().start(port, token.clone()));
LocalNet::ensure_grpc_server_has_started("faulty validator", port as usize, "http").await?;
let destination_address = Destination::Validator {
port,
endpoint: "127.0.0.1".to_owned(),
};
destinations.push(destination_address);
Ok(destination)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/task_manager.rs | linera-service/src/exporter/runloops/task_manager.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{HashMap, HashSet},
future::{Future, IntoFuture},
path::Path,
sync::Arc,
};
use linera_rpc::{grpc::GrpcNodeProvider, NodeOptions};
use linera_service::config::{Destination, DestinationId, DestinationKind};
use linera_storage::Storage;
use crate::{runloops::logging_exporter::LoggingExporter, storage::ExporterStorage};
/// This type manages tasks like spawning different exporters on the different
/// threads, discarding the committees and joining every thread properly at the
/// end for graceful shutdown of the process.
pub(super) struct ExportersTracker<F, S>
where
S: Storage + Clone + Send + Sync + 'static,
{
exporters_builder: ExporterBuilder<F>,
storage: ExporterStorage<S>,
startup_destinations: HashSet<DestinationId>,
current_committee_destinations: HashSet<DestinationId>,
// Handles to all the exporter tasks spawned. Allows to join them later and shut down the thread gracefully.
join_handles: HashMap<DestinationId, tokio::task::JoinHandle<anyhow::Result<()>>>,
}
impl<F, S> ExportersTracker<F, S>
where
S: Storage + Clone + Send + Sync + 'static,
F: IntoFuture<Output = ()> + Clone + Send + Sync + 'static,
<F as IntoFuture>::IntoFuture: Future<Output = ()> + Send + Sync + 'static,
{
pub(super) fn new(
node_options: NodeOptions,
work_queue_size: usize,
shutdown_signal: F,
storage: ExporterStorage<S>,
startup_destinations: Vec<Destination>,
) -> Self {
let exporters_builder =
ExporterBuilder::new(node_options, work_queue_size, shutdown_signal);
Self {
exporters_builder,
storage,
startup_destinations: startup_destinations
.into_iter()
.map(|destination| destination.id())
.collect(),
current_committee_destinations: HashSet::new(),
join_handles: HashMap::new(),
}
}
pub(super) fn start_startup_exporters(&mut self)
where
S: Storage + Clone + Send + Sync + 'static,
F: IntoFuture<Output = ()> + Clone + Send + Sync + 'static,
<F as IntoFuture>::IntoFuture: Future<Output = ()> + Send + Sync + 'static,
{
for id in self.startup_destinations.clone() {
self.spawn(id.clone())
}
}
pub(super) fn start_committee_exporters(&mut self, destination_ids: Vec<DestinationId>)
where
S: Storage + Clone + Send + Sync + 'static,
F: IntoFuture<Output = ()> + Clone + Send + Sync + 'static,
<F as IntoFuture>::IntoFuture: Future<Output = ()> + Send + Sync + 'static,
{
for destination in destination_ids {
if !self.startup_destinations.contains(&destination)
&& !self.current_committee_destinations.contains(&destination)
{
self.current_committee_destinations
.insert(destination.clone());
tracing::info!(id=?destination, "starting committee exporter");
self.spawn(destination);
} else {
tracing::info!(id=?destination, "skipping already running committee exporter");
}
}
}
/// Shuts down block exporters for destinations that are not in the new committee.
pub(super) fn shutdown_old_committee(&mut self, new_committee: Vec<DestinationId>) {
// Shutdown the old committee members that are not in the new committee.
for id in self
.current_committee_destinations
.difference(&new_committee.iter().cloned().collect())
{
tracing::info!(id=?id, "shutting down old committee member");
if let Some(abort_handle) = self.join_handles.remove(id) {
abort_handle.abort();
}
}
}
pub(super) async fn join_all(self) {
for (id, handle) in self.join_handles {
// Wait for all tasks to finish.
if let Err(e) = handle.await.unwrap() {
tracing::error!(id=?id, error=?e, "failed to join task");
}
}
}
fn spawn(&mut self, id: DestinationId) {
let exporter_builder = &self.exporters_builder;
let storage = self.storage.clone().expect("Failed to clone storage");
let join_handle = exporter_builder.spawn(id.clone(), storage);
self.join_handles.insert(id, join_handle);
}
}
/// All the data required by a thread to spawn different tasks
/// on its runtime, join the thread, handle the committees etc.
pub(super) struct ExporterBuilder<F> {
options: NodeOptions,
work_queue_size: usize,
node_provider: Arc<GrpcNodeProvider>,
shutdown_signal: F,
}
impl<F> ExporterBuilder<F>
where
F: IntoFuture<Output = ()> + Clone + Send + Sync + 'static,
<F as IntoFuture>::IntoFuture: Future<Output = ()> + Send + Sync + 'static,
{
pub(super) fn new(options: NodeOptions, work_queue_size: usize, shutdown_signal: F) -> Self {
let node_provider = GrpcNodeProvider::new(options);
let arced_node_provider = Arc::new(node_provider);
Self {
options,
shutdown_signal,
work_queue_size,
node_provider: arced_node_provider,
}
}
pub(super) fn spawn<S>(
&self,
id: DestinationId,
storage: ExporterStorage<S>,
) -> tokio::task::JoinHandle<anyhow::Result<()>>
where
S: Storage + Clone + Send + Sync + 'static,
{
match id.kind() {
DestinationKind::Indexer => {
let exporter_task =
super::IndexerExporter::new(id.clone(), self.work_queue_size, self.options);
tokio::task::spawn(
exporter_task.run_with_shutdown(self.shutdown_signal.clone(), storage),
)
}
DestinationKind::Validator => {
let exporter_task = super::ValidatorExporter::new(
id.clone(),
self.node_provider.clone(),
self.work_queue_size,
);
tokio::task::spawn(
exporter_task.run_with_shutdown(self.shutdown_signal.clone(), storage),
)
}
DestinationKind::Logging => {
let path = Path::new(id.address());
let exporter_task = LoggingExporter::new(path);
tokio::task::spawn(
exporter_task.run_with_shutdown(self.shutdown_signal.clone(), storage),
)
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/validator_exporter.rs | linera-service/src/exporter/runloops/validator_exporter.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
future::IntoFuture,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::Duration,
};
use futures::{future::try_join_all, stream::FuturesOrdered};
use linera_base::identifiers::BlobId;
use linera_chain::types::ConfirmedBlockCertificate;
use linera_core::node::{
CrossChainMessageDelivery, NodeError, ValidatorNode, ValidatorNodeProvider,
};
use linera_rpc::grpc::{GrpcClient, GrpcNodeProvider};
use linera_service::config::DestinationId;
use linera_storage::Storage;
use tokio::sync::mpsc::{Receiver, Sender};
use tokio_stream::StreamExt;
use crate::{
common::{BlockId, ExporterError},
storage::ExporterStorage,
};
pub(crate) struct Exporter {
node_provider: Arc<GrpcNodeProvider>,
destination_id: DestinationId,
work_queue_size: usize,
}
impl Exporter {
pub(super) fn new(
destination_id: DestinationId,
node_provider: Arc<GrpcNodeProvider>,
work_queue_size: usize,
) -> Self {
Self {
node_provider,
destination_id,
work_queue_size,
}
}
pub(super) async fn run_with_shutdown<S, F: IntoFuture<Output = ()>>(
self,
shutdown_signal: F,
mut storage: ExporterStorage<S>,
) -> anyhow::Result<()>
where
S: Storage + Clone + Send + Sync + 'static,
{
let address = self.destination_id.address().to_owned();
let destination_state = storage.load_destination_state(&self.destination_id);
let node = self.node_provider.make_node(&address)?;
let (mut task_queue, task_receiver) = TaskQueue::new(
self.work_queue_size,
destination_state.load(Ordering::Acquire) as usize,
storage.clone()?,
);
let export_task = ExportTask::new(node, storage.clone()?, destination_state);
tokio::select! {
biased;
_ = shutdown_signal => {},
res = task_queue.run() => res?,
res = export_task.run(task_receiver) => res?,
};
Ok(())
}
}
struct ExportTask<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
node: GrpcClient,
storage: ExporterStorage<S>,
destination_state: Arc<AtomicU64>,
}
impl<S> ExportTask<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
fn new(
node: GrpcClient,
storage: ExporterStorage<S>,
destination_state: Arc<AtomicU64>,
) -> ExportTask<S> {
ExportTask {
node,
storage,
destination_state,
}
}
async fn run(
&self,
mut receiver: Receiver<(Arc<ConfirmedBlockCertificate>, Vec<BlobId>)>,
) -> anyhow::Result<()> {
while let Some((block, blobs_ids)) = receiver.recv().await {
#[cfg(with_metrics)]
crate::metrics::VALIDATOR_EXPORTER_QUEUE_LENGTH
.with_label_values(&[self.node.address()])
.set(receiver.len() as i64);
match self.dispatch_block((*block).clone()).await {
Ok(_) => {}
Err(NodeError::BlobsNotFound(blobs_to_maybe_send)) => {
let blobs = blobs_ids
.into_iter()
.filter(|id| blobs_to_maybe_send.contains(id))
.collect();
self.upload_blobs(blobs).await?;
self.dispatch_block((*block).clone()).await?
}
Err(e) => Err(e)?,
}
self.increment_destination_state();
}
Ok(())
}
fn increment_destination_state(&self) {
let _ = self.destination_state.fetch_add(1, Ordering::Release);
#[cfg(with_metrics)]
crate::metrics::DESTINATION_STATE_COUNTER
.with_label_values(&[self.node.address()])
.inc();
}
async fn upload_blobs(&self, blobs: Vec<BlobId>) -> anyhow::Result<()> {
let tasks = blobs.iter().map(|id| async {
match self.storage.get_blob(*id).await {
Err(e) => Err(e),
Ok(blob) => {
tracing::info!(
blob_id=?blob.id(),
"dispatching blob",
);
#[cfg(with_metrics)]
let start = linera_base::time::Instant::now();
let result = self
.node
.upload_blob((*blob).clone().into())
.await
.map(|_| ())
.map_err(|e| ExporterError::GenericError(e.into()));
#[cfg(with_metrics)]
crate::metrics::DISPATCH_BLOB_HISTOGRAM
.with_label_values(&[self.node.address()])
.observe(start.elapsed().as_secs_f64() * 1000.0);
result
}
}
});
let _ = try_join_all(tasks).await?;
Ok(())
}
async fn dispatch_block(
&self,
certificate: ConfirmedBlockCertificate,
) -> Result<(), NodeError> {
let delivery = CrossChainMessageDelivery::NonBlocking;
let block_id = BlockId::from_confirmed_block(certificate.value());
tracing::info!(?block_id, "dispatching block");
#[cfg(with_metrics)]
let start = linera_base::time::Instant::now();
match Box::pin(
self.node
.handle_confirmed_certificate(certificate, delivery),
)
.await
{
Ok(_) => {
#[cfg(with_metrics)]
crate::metrics::DISPATCH_BLOCK_HISTOGRAM
.with_label_values(&[self.node.address()])
.observe(start.elapsed().as_secs_f64() * 1000.0);
}
Err(e) => {
tracing::error!(error=%e, ?block_id, "error when dispatching block");
#[cfg(with_metrics)]
crate::metrics::DISPATCH_BLOCK_HISTOGRAM
.with_label_values(&[self.node.address()])
.observe(start.elapsed().as_secs_f64() * 1000.0);
Err(e)?
}
}
Ok(())
}
}
struct TaskQueue<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
queue_size: usize,
start_height: usize,
storage: ExporterStorage<S>,
buffer: Sender<(Arc<ConfirmedBlockCertificate>, Vec<BlobId>)>,
}
impl<S> TaskQueue<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
#[expect(clippy::type_complexity)]
fn new(
queue_size: usize,
start_height: usize,
storage: ExporterStorage<S>,
) -> (
TaskQueue<S>,
Receiver<(Arc<ConfirmedBlockCertificate>, Vec<BlobId>)>,
) {
let (sender, receiver) = tokio::sync::mpsc::channel(queue_size);
let queue = Self {
queue_size,
start_height,
storage,
buffer: sender,
};
(queue, receiver)
}
async fn run(&mut self) -> anyhow::Result<()> {
let mut index = self.start_height;
let mut futures = FuturesOrdered::new();
while futures.len() < self.queue_size {
futures.push_back(self.get_block_task(index));
index += 1;
}
while let Some(certificate) = futures.next().await.transpose()? {
let _ = self.buffer.send(certificate).await;
futures.push_back(self.get_block_task(index));
index += 1;
}
Ok(())
}
async fn get_block_task(
&self,
index: usize,
) -> Result<(Arc<ConfirmedBlockCertificate>, Vec<BlobId>), ExporterError> {
loop {
match self.storage.get_block_with_blob_ids(index).await {
Ok(block_with_blobs_ids) => return Ok(block_with_blobs_ids),
Err(ExporterError::UnprocessedBlock) => {
tokio::time::sleep(Duration::from_secs(1)).await
}
Err(e) => return Err(e),
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/indexer/client.rs | linera-service/src/exporter/runloops/indexer/client.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#[cfg(with_metrics)]
use std::{
collections::VecDeque,
sync::{Arc, Mutex},
};
use futures::StreamExt;
use linera_base::time::Duration;
use linera_rpc::{
grpc::{transport::Options, GrpcError, GRPC_MAX_MESSAGE_SIZE},
NodeOptions,
};
use tokio::{sync::mpsc::Sender, time::sleep};
use tokio_stream::wrappers::ReceiverStream;
use tonic::{
transport::{Channel, Endpoint},
Request, Streaming,
};
#[cfg(with_metrics)]
use super::indexer_api::element::Payload;
use super::indexer_api::{indexer_client::IndexerClient as IndexerClientInner, Element};
use crate::ExporterError;
pub(super) struct IndexerClient {
max_retries: u32,
retry_delay: Duration,
client: IndexerClientInner<Channel>,
#[cfg(with_metrics)]
// Tracks timestamps of when we start exporting a Block to an indexer.
sent_latency: Arc<Mutex<VecDeque<linera_base::time::Instant>>>,
#[cfg(with_metrics)]
address: String,
}
impl IndexerClient {
pub(super) fn new(address: &str, options: NodeOptions) -> Result<Self, GrpcError> {
let channel = create_channel(address, (&options).into())?;
let client = IndexerClientInner::new(channel)
.max_encoding_message_size(GRPC_MAX_MESSAGE_SIZE)
.max_decoding_message_size(GRPC_MAX_MESSAGE_SIZE);
Ok(Self {
client,
retry_delay: options.retry_delay,
max_retries: options.max_retries,
#[cfg(with_metrics)]
sent_latency: Arc::from(Mutex::new(VecDeque::new())),
#[cfg(with_metrics)]
address: address.to_string(),
})
}
// try to make a streaming connection with the destination
pub(super) async fn setup_indexer_client(
&mut self,
queue_size: usize,
) -> Result<(Sender<Element>, Streaming<()>), ExporterError> {
let mut retry_count = 0;
loop {
let (sender, receiver) = tokio::sync::mpsc::channel(queue_size);
#[cfg(with_metrics)]
let request = {
let stream = ReceiverStream::new(receiver).map(|element: Element| {
if let Some(Payload::Block(_)) = &element.payload {
use linera_base::time::Instant;
self.sent_latency.lock().unwrap().push_back(Instant::now());
}
element
});
Request::new(stream.into_inner())
};
#[cfg(not(with_metrics))]
let request = Request::new(ReceiverStream::new(receiver));
match self.client.index_batch(request).await {
Ok(res) => {
let ack_stream = res.into_inner().map(|response: Result<(), tonic::Status>| {
#[cfg(with_metrics)]
{
// We assume that indexer responds with ACKs only after storing a block
// and that it doesn't ACK blocks out of order.
let start_time = self
.sent_latency
.lock()
.unwrap()
.pop_front()
.expect("have timer waiting");
crate::metrics::DISPATCH_BLOCK_HISTOGRAM
.with_label_values(&[&self.address])
.observe(start_time.elapsed().as_secs_f64() * 1000.0);
}
response
});
return Ok((sender, ack_stream.into_inner()));
}
Err(e) => {
if retry_count > self.max_retries {
tracing::error!(
retry_count,
max_retries = self.max_retries,
error = %e,
"Failed to connect to indexer after exhausting all retries, exporter task will exit"
);
return Err(ExporterError::SynchronizationFailed(e.into()));
}
let delay = self.retry_delay.saturating_mul(retry_count);
tracing::warn!(
retry_count,
max_retries = self.max_retries,
retry_delay_ms = delay.as_millis(),
error = %e,
"Failed to connect to indexer, retrying with exponential backoff"
);
sleep(delay).await;
retry_count += 1;
}
}
}
}
}
fn create_channel(address: &str, options: Options) -> Result<Channel, tonic::transport::Error> {
let mut endpoint = Endpoint::from_shared(address.to_string())?
.tls_config(tonic::transport::channel::ClientTlsConfig::default().with_webpki_roots())?;
if let Some(timeout) = options.connect_timeout {
endpoint = endpoint.connect_timeout(timeout);
}
if let Some(timeout) = options.timeout {
endpoint = endpoint.timeout(timeout);
}
endpoint = endpoint
.http2_keep_alive_interval(Duration::from_secs(20))
.keep_alive_timeout(Duration::from_secs(10))
.tcp_keepalive(Some(Duration::from_secs(20)))
.keep_alive_while_idle(true)
.tcp_nodelay(false);
Ok(endpoint.connect_lazy())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/indexer/mod.rs | linera-service/src/exporter/runloops/indexer/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod client;
mod conversions;
pub(crate) mod indexer_exporter;
pub mod indexer_api {
tonic::include_proto!("indexer.linera_indexer");
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/indexer/conversions.rs | linera-service/src/exporter/runloops/indexer/conversions.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use bincode::Error;
use linera_base::data_types::Blob;
use linera_chain::types::ConfirmedBlockCertificate;
use super::indexer_api::{self, element::Payload, Block, Element};
impl TryFrom<Arc<ConfirmedBlockCertificate>> for Element {
type Error = Error;
fn try_from(value: Arc<ConfirmedBlockCertificate>) -> Result<Self, Self::Error> {
let bytes = bincode::serialize(value.as_ref())?;
let element = Element {
payload: Some(Payload::Block(Block { bytes })),
};
Ok(element)
}
}
impl TryFrom<Arc<Blob>> for Element {
type Error = Error;
fn try_from(value: Arc<Blob>) -> Result<Self, Self::Error> {
let bytes = bincode::serialize(value.as_ref())?;
let element = Element {
payload: Some(Payload::Blob(indexer_api::Blob { bytes })),
};
Ok(element)
}
}
impl TryFrom<indexer_api::Block> for ConfirmedBlockCertificate {
type Error = Error;
fn try_from(value: indexer_api::Block) -> Result<ConfirmedBlockCertificate, Self::Error> {
bincode::deserialize(&value.bytes)
}
}
impl TryFrom<indexer_api::Blob> for Blob {
type Error = Error;
fn try_from(value: indexer_api::Blob) -> Result<Self, Self::Error> {
bincode::deserialize(&value.bytes)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/indexer/indexer_exporter.rs | linera-service/src/exporter/runloops/indexer/indexer_exporter.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
future::IntoFuture,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::Duration,
};
use futures::{stream::FuturesOrdered, StreamExt};
use linera_base::data_types::Blob;
use linera_chain::types::ConfirmedBlockCertificate;
use linera_rpc::NodeOptions;
use linera_service::config::DestinationId;
use linera_storage::Storage;
use tokio::{select, sync::mpsc::Sender, time::sleep};
use tonic::Streaming;
use super::indexer_api::Element;
use crate::{
common::BlockId, runloops::indexer::client::IndexerClient, storage::ExporterStorage,
ExporterError,
};
pub(crate) struct Exporter {
options: NodeOptions,
work_queue_size: usize,
destination_id: DestinationId,
}
impl Exporter {
pub(crate) fn new(
destination_id: DestinationId,
work_queue_size: usize,
options: NodeOptions,
) -> Exporter {
Self {
options,
destination_id,
work_queue_size,
}
}
pub(crate) async fn run_with_shutdown<S, F: IntoFuture<Output = ()>>(
self,
shutdown_signal: F,
mut storage: ExporterStorage<S>,
) -> anyhow::Result<()>
where
S: Storage + Clone + Send + Sync + 'static,
{
let shutdown_signal_future = shutdown_signal.into_future();
let mut pinned_shutdown_signal = Box::pin(shutdown_signal_future);
let address = self.destination_id.address();
let mut client = IndexerClient::new(address, self.options)?;
let destination_state = storage.load_destination_state(&self.destination_id);
tracing::info!(start_index=&destination_state.load(Ordering::SeqCst), indexer_address=%address, "starting indexer exporter");
loop {
let (outgoing_stream, incoming_stream) =
client.setup_indexer_client(self.work_queue_size).await?;
let mut streamer = ExportTaskQueue::new(
self.work_queue_size,
destination_state.load(Ordering::Acquire) as usize,
outgoing_stream,
storage.clone()?,
);
let mut acknowledgement_task =
AcknowledgementTask::new(incoming_stream, destination_state.clone());
select! {
biased;
_ = &mut pinned_shutdown_signal => {break},
res = streamer.run() => {
if let Err(e) = res {
tracing::error!("unexpected error: {e}, re-trying to establish a stream");
sleep(Duration::from_secs(1)).await;
}
},
res = acknowledgement_task.run() => {
match res {
Err(e) => {
tracing::error!("unexpected error: {e}, re-trying to establish a stream");
}
Ok(_) => {
tracing::error!("stream closed unexpectedly, retrying to establish a stream");
}
}
sleep(Duration::from_secs(1)).await;
},
}
}
Ok(())
}
}
struct AcknowledgementTask {
incoming: Streaming<()>,
destination_state: Arc<AtomicU64>,
}
impl AcknowledgementTask {
fn new(incoming: Streaming<()>, destination_state: Arc<AtomicU64>) -> Self {
Self {
incoming,
destination_state,
}
}
async fn run(&mut self) -> anyhow::Result<()> {
while self.incoming.message().await?.is_some() {
self.increment_destination_state();
}
Ok(())
}
fn increment_destination_state(&self) {
let _ = self.destination_state.fetch_add(1, Ordering::Release);
}
}
struct ExportTaskQueue<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
queue_size: usize,
start_height: usize,
buffer: CanonicalBlockStream,
storage: ExporterStorage<S>,
}
impl<S> ExportTaskQueue<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
fn new(
queue_size: usize,
start_height: usize,
sender: CanonicalBlockStream,
storage: ExporterStorage<S>,
) -> ExportTaskQueue<S> {
Self {
queue_size,
start_height,
storage,
buffer: sender,
}
}
async fn run(&mut self) -> anyhow::Result<()> {
let mut index = self.start_height;
let mut futures = FuturesOrdered::new();
while futures.len() < self.queue_size {
futures.push_back(self.get_block_with_blobs_task(index));
index += 1;
}
while let Some((block, blobs)) = futures.next().await.transpose()? {
for blob in blobs {
tracing::info!(
blob_id=?blob.id(),
"dispatching blob"
);
self.buffer.send(blob.try_into().unwrap()).await?
}
let block_id = BlockId::from_confirmed_block(block.value());
tracing::info!(?block_id, "dispatching block");
self.buffer.send(block.try_into().unwrap()).await?;
futures.push_back(self.get_block_with_blobs_task(index));
index += 1;
}
Ok(())
}
async fn get_block_with_blobs_task(
&self,
index: usize,
) -> Result<(Arc<ConfirmedBlockCertificate>, Vec<Arc<Blob>>), ExporterError> {
loop {
match self.storage.get_block_with_blobs(index).await {
Ok(res) => return Ok(res),
Err(ExporterError::UnprocessedBlock) => {
tokio::time::sleep(Duration::from_secs(1)).await
}
Err(e) => return Err(e),
}
}
}
}
type CanonicalBlockStream = Sender<Element>;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/block_processor/walker.rs | linera-service/src/exporter/runloops/block_processor/walker.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashSet;
use linera_base::identifiers::{BlobId, BlobType};
use linera_chain::types::{CertificateValue, ConfirmedBlock};
use linera_execution::{system::AdminOperation, Operation, SystemOperation};
use linera_storage::Storage;
use crate::{
common::{BlockId, CanonicalBlock},
storage::BlockProcessorStorage,
ExporterError,
};
pub(super) struct Walker<'a, S>
where
S: Storage + Clone + Send + Sync + 'static,
{
path: Vec<NodeVisitor>,
visited: HashSet<BlockId>,
new_committee_blob: Option<BlobId>,
storage: &'a mut BlockProcessorStorage<S>,
}
impl<'a, S> Walker<'a, S>
where
S: Storage + Clone + Send + Sync + 'static,
{
pub(super) fn new(storage: &'a mut BlockProcessorStorage<S>) -> Self {
Self {
storage,
path: Vec::new(),
visited: HashSet::new(),
new_committee_blob: None,
}
}
/// Walks through the block's dependencies in a depth wise manner
/// resolving, sorting and indexing all of them along the way.
pub(super) async fn walk(mut self, block: BlockId) -> Result<Option<BlobId>, ExporterError> {
if self.is_block_indexed(&block).await? {
return Ok(None);
}
let node_visitor = self.get_processed_block_node(&block).await?;
self.path.push(node_visitor);
while let Some(mut node_visitor) = self.path.pop() {
if self.visited.contains(&node_visitor.node.block) {
continue;
}
// resolve block dependencies
if let Some(dependency) = node_visitor.next_dependency() {
self.path.push(node_visitor);
if !self.is_block_indexed(&dependency).await? {
let dependency_node = self.get_processed_block_node(&dependency).await?;
self.path.push(dependency_node);
}
continue;
}
// all the block dependencies have been resolved for this block
// now just resolve the blobs
let mut blobs_to_send = Vec::new();
let mut blobs_to_index_block_with = Vec::new();
for id in node_visitor.node.required_blobs {
if !self.is_blob_indexed(id).await? {
blobs_to_index_block_with.push(id);
if !node_visitor.node.created_blobs.contains(&id) {
blobs_to_send.push(id);
}
}
}
let block_id = node_visitor.node.block;
if self.index_block(&block_id).await? {
let block_to_push = CanonicalBlock::new(block_id.hash, &blobs_to_send);
self.storage.push_block(block_to_push);
for blob in blobs_to_index_block_with {
let _ = self.storage.index_blob(blob);
}
}
self.new_committee_blob = node_visitor.node.new_committee_blob;
self.visited.insert(block_id);
}
Ok(self.new_committee_blob)
}
async fn get_processed_block_node(
&self,
block_id: &BlockId,
) -> Result<NodeVisitor, ExporterError> {
let block = self.storage.get_block(block_id.hash).await?;
let processed_block = ProcessedBlock::process_block(block.value());
let node = NodeVisitor::new(processed_block);
Ok(node)
}
async fn is_block_indexed(&mut self, block_id: &BlockId) -> Result<bool, ExporterError> {
match self.storage.is_block_indexed(block_id).await {
Ok(ok) => Ok(ok),
Err(ExporterError::UnprocessedChain) => Ok(false),
Err(e) => Err(e),
}
}
async fn index_block(&mut self, block_id: &BlockId) -> Result<bool, ExporterError> {
self.storage.index_block(block_id).await
}
async fn is_blob_indexed(&mut self, blob_id: BlobId) -> Result<bool, ExporterError> {
self.storage.is_blob_indexed(blob_id).await
}
}
struct NodeVisitor {
node: ProcessedBlock,
next_dependency: usize,
}
impl NodeVisitor {
fn new(processed_block: ProcessedBlock) -> Self {
Self {
next_dependency: 0,
node: processed_block,
}
}
fn next_dependency(&mut self) -> Option<BlockId> {
if let Some(block_id) = self.node.dependencies.get(self.next_dependency) {
self.next_dependency += 1;
return Some(*block_id);
}
None
}
}
#[derive(Debug)]
struct ProcessedBlock {
block: BlockId,
// blobs created by this block
// used for filtering which blobs
// we won't need to send separately
// as these blobs are part of the block itself.
created_blobs: Vec<BlobId>,
// all the blobs required by this block
required_blobs: Vec<BlobId>,
dependencies: Vec<BlockId>,
new_committee_blob: Option<BlobId>,
}
impl ProcessedBlock {
fn process_block(block: &ConfirmedBlock) -> Self {
let block_id = BlockId::new(block.chain_id(), block.hash(), block.height());
let mut dependencies = Vec::new();
if let Some(parent_hash) = block.block().header.previous_block_hash {
let height = block_id
.height
.try_sub_one()
.expect("parent only exists if child's height is greater than zero");
let parent = BlockId::new(block_id.chain_id, parent_hash, height);
dependencies.push(parent);
}
let message_senders = block
.block()
.body
.incoming_bundles()
.map(BlockId::from_incoming_bundle);
dependencies.extend(message_senders);
let new_committee = block.block().body.operations().find_map(|m| {
if let Operation::System(boxed) = m {
if let SystemOperation::Admin(AdminOperation::CreateCommittee {
blob_hash, ..
}) = boxed.as_ref()
{
let committee_blob = BlobId::new(*blob_hash, BlobType::Committee);
return Some(committee_blob);
}
}
None
});
Self {
dependencies,
block: block_id,
new_committee_blob: new_committee,
required_blobs: block.required_blob_ids().into_iter().collect(),
created_blobs: block.block().created_blob_ids().into_iter().collect(),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/exporter/runloops/block_processor/mod.rs | linera-service/src/exporter/runloops/block_processor/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::HashMap,
future::{Future, IntoFuture},
time::{Duration, Instant},
};
use linera_base::crypto::CryptoHash;
use linera_execution::committee::Committee;
use linera_service::config::DestinationId;
use linera_storage::Storage;
use tokio::time::{interval, MissedTickBehavior};
use crate::{
common::ExporterError,
runloops::{block_processor::walker::Walker, ExportersTracker, NewBlockQueue},
storage::BlockProcessorStorage,
};
mod walker;
pub(super) struct BlockProcessor<F, T>
where
T: Storage + Clone + Send + Sync + 'static,
{
exporters_tracker: ExportersTracker<F, T>,
storage: BlockProcessorStorage<T>,
new_block_queue: NewBlockQueue,
committee_destination_update: bool,
// Temporary solution.
// Tracks certificates that failed to be read from storage
// along with the time of the failure to avoid retrying for too long.
retried_certs: HashMap<CryptoHash, (u8, Instant)>,
}
impl<S, T> BlockProcessor<S, T>
where
T: Storage + Clone + Send + Sync + 'static,
S: IntoFuture<Output = ()> + Clone + Send + Sync + 'static,
<S as IntoFuture>::IntoFuture: Future<Output = ()> + Send + Sync + 'static,
{
pub(super) fn new(
exporters_tracker: ExportersTracker<S, T>,
storage: BlockProcessorStorage<T>,
new_block_queue: NewBlockQueue,
committee_destination_update: bool,
) -> Self {
Self {
storage,
exporters_tracker,
committee_destination_update,
new_block_queue,
retried_certs: HashMap::new(),
}
}
pub(super) fn pool_state(self) -> ExportersTracker<S, T> {
self.exporters_tracker
}
pub(super) async fn run_with_shutdown<F>(
&mut self,
shutdown_signal: F,
persistence_period: u32,
) -> Result<(), ExporterError>
where
F: IntoFuture<Output = ()>,
{
let shutdown_signal_future = shutdown_signal.into_future();
let mut pinned_shutdown_signal = Box::pin(shutdown_signal_future);
let mut interval = interval(Duration::from_millis(persistence_period.into()));
interval.set_missed_tick_behavior(MissedTickBehavior::Skip);
self.exporters_tracker.start_startup_exporters();
loop {
tokio::select! {
biased;
_ = &mut pinned_shutdown_signal => break,
_ = interval.tick() => self.storage.save().await?,
Some(next_block_notification) = self.new_block_queue.recv() => {
let walker = Walker::new(&mut self.storage);
match walker.walk(next_block_notification).await {
Ok(Some(new_committee_blob)) if self.committee_destination_update => {
tracing::info!(?new_committee_blob, "new committee blob found, updating the committee destination.");
let blob = match self.storage.get_blob(new_committee_blob).await {
Ok(blob) => blob,
Err(error) => {
tracing::error!(
blob_id=?new_committee_blob,
?error,
"failed to read the committee blob from storage"
);
return Err(error);
},
};
let committee: Committee = match bcs::from_bytes(blob.bytes()) {
Ok(committee) => committee,
Err(error) => {
tracing::error!(
blob_id=?new_committee_blob,
?error,
"failed to deserialize the committee blob"
);
continue;
}
};
let committee_destinations = committee.validator_addresses().map(|(_, address)| DestinationId::validator(address.to_owned())).collect::<Vec<_>>();
self.exporters_tracker.shutdown_old_committee(committee_destinations.clone());
self.storage.new_committee(committee_destinations.clone());
self.exporters_tracker.start_committee_exporters(committee_destinations.clone());
},
Ok(Some(_)) => {
tracing::info!(block=?next_block_notification, "New committee blob found but exporter is not configured \
to update the committee destination, skipping.");
},
Ok(None) => {
// No committee blob found, continue processing.
},
// this error variant is safe to retry as this block is already confirmed so this error will
// originate from things like missing dependencies or io error.
// Other error variants are either safe to skip or unreachable.
Err(ExporterError::ViewError(_)) => {
// return the block to the back of the task queue to process again later
self.new_block_queue.push_back(next_block_notification);
},
Err(ExporterError::ReadCertificateError(hash)) => {
match self.retried_certs.remove(&hash) {
// We retry only if the time elapsed since the first attempt is
// less than 1 second. The assumption is that Scylla cannot
// be inconsistent for too long.
Some((retries, first_attempt)) => {
let elapsed = Instant::now().duration_since(first_attempt);
if retries < 3 || elapsed < Duration::from_secs(1) {
tracing::warn!(?hash, retry=retries+1, "retrying to read certificate");
self.retried_certs.insert(hash, (retries + 1, first_attempt));
self.new_block_queue.push_back(next_block_notification);
} else {
tracing::error!(?hash, "certificate is missing from the database");
return Err(ExporterError::ReadCertificateError(hash));
}
},
None => {
tracing::warn!(?hash, retry=1, "retrying to read certificate");
self.retried_certs.insert(hash, (1, Instant::now()));
self.new_block_queue.push_back(next_block_notification);
}
}
},
Err(error @ (ExporterError::UnprocessedChain
| ExporterError::BadInitialization
| ExporterError::ChainAlreadyExists(_))
) => {
tracing::error!(
?error,
block_hash=?next_block_notification.hash,
"error when resolving block with hash"
);
},
Err(error) => {
tracing::error!(?error, "unexpected error");
return Err(error);
}
}
},
}
}
self.storage.save().await?;
Ok(())
}
}
#[cfg(test)]
mod test {
use linera_base::{
crypto::CryptoHash,
data_types::{Round, Timestamp},
identifiers::ChainId,
time::Duration,
};
use linera_chain::{
data_types::{BlockExecutionOutcome, IncomingBundle, MessageBundle},
test::{make_child_block, make_first_block, BlockTestExt},
types::{CertificateValue, ConfirmedBlock, ConfirmedBlockCertificate},
};
use linera_rpc::NodeOptions;
use linera_sdk::test::MessageAction;
use linera_service::config::LimitsConfig;
use linera_storage::{DbStorage, Storage, TestClock};
use linera_views::memory::MemoryDatabase;
use tokio::sync::mpsc::unbounded_channel;
use tokio_util::sync::CancellationToken;
use crate::{
common::BlockId,
runloops::{BlockProcessor, ExportersTracker, NewBlockQueue},
storage::BlockProcessorStorage,
test_utils::make_simple_state_with_blobs,
ExporterCancellationSignal,
};
#[test_log::test(tokio::test)]
async fn test_topological_sort() -> anyhow::Result<()> {
let (tx, rx) = unbounded_channel();
let new_block_queue = NewBlockQueue {
queue_rear: tx.clone(),
queue_front: rx,
};
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(None).await;
let (block_processor_storage, mut exporter_storage) =
BlockProcessorStorage::load(storage.clone(), 0, vec![], LimitsConfig::default())
.await?;
let token = CancellationToken::new();
let signal = ExporterCancellationSignal::new(token.clone());
let exporters_tracker = ExportersTracker::<
ExporterCancellationSignal,
DbStorage<MemoryDatabase, TestClock>,
>::new(
NodeOptions::default(),
0,
signal.clone(),
exporter_storage.clone()?,
vec![],
);
let mut block_processor = BlockProcessor::new(
exporters_tracker,
block_processor_storage,
new_block_queue,
false,
);
let (block_ids, state) = make_state(&storage).await;
for id in block_ids {
let _ = tx.send(id);
}
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(10)) => {},
_ = block_processor.run_with_shutdown(signal, 5) => {},
}
// oredered pair of (chain_id, block_height)
let expected_state = [
(1, 0),
(0, 0),
(0, 1),
(1, 1),
(1, 2),
(0, 2),
(0, 3),
(1, 3),
];
for (i, (x, y)) in expected_state.into_iter().enumerate() {
let hash = exporter_storage.get_block_with_blob_ids(i).await?.0.hash();
assert_eq!(hash, state[x][y]);
}
Ok(())
}
// A scenario to test topological sort with
// populates the storage with two chains, each with height of four blocks.
// Blocks have a dependency with the blocks of the chains that came before
// chronologically during creation
async fn make_state<S: Storage>(storage: &S) -> (Vec<BlockId>, Vec<Vec<CryptoHash>>) {
let mut notifications = Vec::new();
let chain_id_a = ChainId(CryptoHash::test_hash("0"));
let chain_id_b = ChainId(CryptoHash::test_hash("1"));
let mut chain_a = Vec::new();
let mut chain_b = Vec::new();
for i in 0..4 {
if i == 0 {
let block_a = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_first_block(chain_id_a)),
);
let block_b = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_first_block(chain_id_b)),
);
chain_a.push(block_a);
chain_b.push(block_b);
continue;
}
let block_a = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_child_block(chain_a.last().unwrap())),
);
chain_a.push(block_a);
let block_b = if i % 2 == 0 {
ConfirmedBlock::new(
BlockExecutionOutcome::default()
.with(make_child_block(chain_b.iter().last().unwrap())),
)
} else {
let incoming_bundle = IncomingBundle {
origin: chain_id_a,
bundle: MessageBundle {
height: (i as u64).into(),
timestamp: Timestamp::now(),
certificate_hash: chain_a.get(i as usize).unwrap().hash(),
transaction_index: 0,
messages: vec![],
},
action: MessageAction::Accept,
};
let block_b = ConfirmedBlock::new(BlockExecutionOutcome::default().with(
make_child_block(chain_b.last().unwrap()).with_incoming_bundle(incoming_bundle),
));
let block_id = BlockId::from_confirmed_block(&block_b);
notifications.push(block_id);
block_b
};
chain_b.push(block_b);
}
for block in chain_a.iter().chain(chain_b.iter()) {
let cert = ConfirmedBlockCertificate::new(block.clone(), Round::Fast, vec![]);
storage
.write_blobs_and_certificate(&[], &cert)
.await
.unwrap();
}
(
notifications,
vec![
chain_a.iter().map(|block| block.inner().hash()).collect(),
chain_b.iter().map(|block| block.inner().hash()).collect(),
],
)
}
#[test_log::test(tokio::test)]
async fn test_topological_sort_2() -> anyhow::Result<()> {
let (tx, rx) = unbounded_channel();
let new_block_queue = NewBlockQueue {
queue_rear: tx.clone(),
queue_front: rx,
};
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(None).await;
let (block_processor_storage, mut exporter_storage) =
BlockProcessorStorage::load(storage.clone(), 0, vec![], LimitsConfig::default())
.await?;
let token = CancellationToken::new();
let signal = ExporterCancellationSignal::new(token.clone());
let exporters_tracker = ExportersTracker::<
ExporterCancellationSignal,
DbStorage<MemoryDatabase, TestClock>,
>::new(
NodeOptions::default(),
0,
signal.clone(),
exporter_storage.clone()?,
vec![],
);
let mut block_processor = BlockProcessor::new(
exporters_tracker,
block_processor_storage,
new_block_queue,
false,
);
let (block_id, state) = make_state_2(&storage).await;
let _ = tx.send(block_id);
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(10)) => {},
_ = block_processor.run_with_shutdown(signal, 5) => {},
}
let expected_state = [(2, 0), (1, 0), (0, 0), (0, 1), (1, 1), (2, 1)];
for (i, (x, y)) in expected_state.into_iter().enumerate() {
let hash = exporter_storage.get_block_with_blob_ids(i).await?.0.hash();
assert_eq!(hash, state[x][y]);
}
Ok(())
}
// A scenario to test topological sort with
// populates the storage with three chains each with height of two blocks.
// Blocks have a dependency with the blocks of the chains that came before
// chronologically during creation
async fn make_state_2<S: Storage>(storage: &S) -> (BlockId, Vec<Vec<CryptoHash>>) {
let chain_id_a = ChainId(CryptoHash::test_hash("0"));
let chain_id_b = ChainId(CryptoHash::test_hash("1"));
let chain_id_c = ChainId(CryptoHash::test_hash("2"));
let get_bundle = |sender_block: &ConfirmedBlock| IncomingBundle {
origin: sender_block.chain_id(),
bundle: MessageBundle {
height: sender_block.height(),
timestamp: Timestamp::now(),
certificate_hash: sender_block.inner().hash(),
transaction_index: 0,
messages: vec![],
},
action: MessageAction::Accept,
};
let mut state = Vec::new();
let block_1_a = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_first_block(chain_id_a)),
);
let block_2_a = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_child_block(&block_1_a)),
);
let block_1_b = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_first_block(chain_id_b)),
);
let block_2_b = ConfirmedBlock::new(
BlockExecutionOutcome::default()
.with(make_child_block(&block_1_b).with_incoming_bundle(get_bundle(&block_2_a))),
);
let block_1_c = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_first_block(chain_id_c)),
);
let block_2_c = ConfirmedBlock::new(
BlockExecutionOutcome::default()
.with(make_child_block(&block_1_c).with_incoming_bundle(get_bundle(&block_2_b))),
);
let notification = BlockId::from_confirmed_block(&block_2_c);
state.push(vec![block_1_a, block_2_a]);
state.push(vec![block_1_b, block_2_b]);
state.push(vec![block_1_c, block_2_c]);
for block in state.iter().flatten() {
let cert = ConfirmedBlockCertificate::new(block.clone(), Round::Fast, vec![]);
storage
.write_blobs_and_certificate(&[], &cert)
.await
.unwrap()
}
(
notification,
state
.iter()
.map(|chain| chain.iter().map(|block| block.inner().hash()).collect())
.collect(),
)
}
#[tokio::test]
async fn test_topological_sort_3() -> anyhow::Result<()> {
let (tx, rx) = unbounded_channel();
let new_block_queue = NewBlockQueue {
queue_rear: tx.clone(),
queue_front: rx,
};
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(None).await;
let (block_processor_storage, mut exporter_storage) =
BlockProcessorStorage::load(storage.clone(), 0, vec![], LimitsConfig::default())
.await?;
let token = CancellationToken::new();
let signal = ExporterCancellationSignal::new(token.clone());
let exporters_tracker = ExportersTracker::<
ExporterCancellationSignal,
DbStorage<MemoryDatabase, TestClock>,
>::new(
NodeOptions::default(),
0,
signal.clone(),
exporter_storage.clone()?,
vec![],
);
let mut block_processor = BlockProcessor::new(
exporters_tracker,
block_processor_storage,
new_block_queue,
false,
);
let (block_id, state) = make_state_3(&storage).await;
let _ = tx.send(block_id);
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(10)) => {},
_ = block_processor.run_with_shutdown(signal, 5) => {},
}
for (index, expected_hash) in state.iter().enumerate() {
let sorted_hash = exporter_storage
.get_block_with_blob_ids(index)
.await?
.0
.hash();
assert_eq!(*expected_hash, sorted_hash);
}
Ok(())
}
// a simple single chain scenario with four blocks
async fn make_state_3<S: Storage>(storage: &S) -> (BlockId, Vec<CryptoHash>) {
let chain_id = ChainId(CryptoHash::test_hash("0"));
let mut chain = Vec::new();
for i in 0..4 {
if i == 0 {
let block = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_first_block(chain_id)),
);
chain.push(block);
continue;
}
let block = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_child_block(chain.last().unwrap())),
);
chain.push(block);
}
let notification = BlockId::from_confirmed_block(chain.last().unwrap());
for block in &chain {
let cert = ConfirmedBlockCertificate::new(block.clone(), Round::Fast, vec![]);
storage
.write_blobs_and_certificate(&[], &cert)
.await
.unwrap();
}
(
notification,
chain.iter().map(|block| block.inner().hash()).collect(),
)
}
#[tokio::test]
async fn test_topological_sort_4() -> anyhow::Result<()> {
let (tx, rx) = unbounded_channel();
let new_block_queue = NewBlockQueue {
queue_rear: tx.clone(),
queue_front: rx,
};
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(None).await;
let (block_processor_storage, mut exporter_storage) =
BlockProcessorStorage::load(storage.clone(), 0, vec![], LimitsConfig::default())
.await?;
let token = CancellationToken::new();
let signal = ExporterCancellationSignal::new(token.clone());
let exporters_tracker = ExportersTracker::<
ExporterCancellationSignal,
DbStorage<MemoryDatabase, TestClock>,
>::new(
NodeOptions::default(),
0,
signal.clone(),
exporter_storage.clone()?,
vec![],
);
let mut block_processor = BlockProcessor::new(
exporters_tracker,
block_processor_storage,
new_block_queue,
false,
);
let (block_id, state) = make_state_4(&storage).await;
let _ = tx.send(block_id);
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(10)) => {},
_ = block_processor.run_with_shutdown(signal, 5) => {},
}
for (index, expected_hash) in state.iter().enumerate() {
let sorted_hash = exporter_storage
.get_block_with_blob_ids(index)
.await?
.0
.hash();
assert_eq!(*expected_hash, sorted_hash);
}
Ok(())
}
// a simple single chain scenario with four blocks
// a message to the same chain is sent from the second
// block and reacieved by the last block.
async fn make_state_4<S: Storage>(storage: &S) -> (BlockId, Vec<CryptoHash>) {
let chain_id = ChainId(CryptoHash::test_hash("0"));
let mut chain = Vec::new();
for i in 0..4 {
if i == 0 {
let block = ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_first_block(chain_id)),
);
chain.push(block);
continue;
}
let block = if i == 3 {
let sender_block = chain.get(1).expect("we are at height 4");
let incoming_bundle = IncomingBundle {
origin: chain_id,
bundle: MessageBundle {
height: sender_block.height(),
timestamp: Timestamp::now(),
certificate_hash: sender_block.inner().hash(),
transaction_index: 0,
messages: vec![],
},
action: MessageAction::Accept,
};
ConfirmedBlock::new(BlockExecutionOutcome::default().with(
make_child_block(chain.last().unwrap()).with_incoming_bundle(incoming_bundle),
))
} else {
ConfirmedBlock::new(
BlockExecutionOutcome::default().with(make_child_block(chain.last().unwrap())),
)
};
chain.push(block);
}
let notification = BlockId::from_confirmed_block(chain.last().unwrap());
for block in &chain {
let cert = ConfirmedBlockCertificate::new(block.clone(), Round::Fast, vec![]);
storage
.write_blobs_and_certificate(&[], &cert)
.await
.unwrap();
}
(
notification,
chain.iter().map(|block| block.inner().hash()).collect(),
)
}
// tests a simple scenario for a chain with two blocks
// and some blobs
#[tokio::test]
async fn test_topological_sort_5() -> anyhow::Result<()> {
let (tx, rx) = unbounded_channel();
let new_block_queue = NewBlockQueue {
queue_rear: tx.clone(),
queue_front: rx,
};
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(None).await;
let (block_processor_storage, mut exporter_storage) =
BlockProcessorStorage::load(storage.clone(), 0, vec![], LimitsConfig::default())
.await?;
let token = CancellationToken::new();
let signal = ExporterCancellationSignal::new(token.clone());
let exporters_tracker = ExportersTracker::<
ExporterCancellationSignal,
DbStorage<MemoryDatabase, TestClock>,
>::new(
NodeOptions::default(),
0,
signal.clone(),
exporter_storage.clone()?,
vec![],
);
let mut block_processor = BlockProcessor::new(
exporters_tracker,
block_processor_storage,
new_block_queue,
false,
);
let (block_id, expected_state) = make_simple_state_with_blobs(&storage).await;
let _ = tx.send(block_id);
tokio::select! {
_ = tokio::time::sleep(Duration::from_secs(10)) => {},
_ = block_processor.run_with_shutdown(signal, 5) => {},
}
for (i, block_with_blobs) in expected_state.iter().enumerate() {
let (actual_block, actual_blobs) = exporter_storage.get_block_with_blobs(i).await?;
assert_eq!(actual_block.hash(), block_with_blobs.block_hash);
assert!(!actual_blobs.is_empty());
assert_eq!(actual_blobs.len(), block_with_blobs.blobs.len());
assert!(actual_blobs
.iter()
.map(|blob| blob.id())
.eq(block_with_blobs.blobs.iter().copied()));
}
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli/command.rs | linera-service/src/cli/command.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{borrow::Cow, num::NonZeroU16, path::PathBuf};
use chrono::{DateTime, Utc};
use linera_base::{
crypto::{AccountPublicKey, CryptoHash, ValidatorPublicKey},
data_types::{Amount, BlockHeight, Epoch},
identifiers::{Account, AccountOwner, ApplicationId, ChainId, ModuleId, StreamId},
time::Duration,
vm::VmRuntime,
};
use linera_client::{
chain_listener::ChainListenerConfig,
client_options::{
ApplicationPermissionsConfig, ChainOwnershipConfig, ResourceControlPolicyConfig,
},
util,
};
use linera_rpc::config::CrossChainConfig;
use crate::{cli::validator, task_processor::parse_operator};
const DEFAULT_TOKENS_PER_CHAIN: Amount = Amount::from_millis(100);
const DEFAULT_TRANSACTIONS_PER_BLOCK: usize = 1;
const DEFAULT_WRAP_UP_MAX_IN_FLIGHT: usize = 5;
const DEFAULT_NUM_CHAINS: usize = 10;
const DEFAULT_BPS: usize = 10;
/// Specification for a validator to be added to the committee.
#[derive(Clone, Debug)]
pub struct ValidatorToAdd {
pub public_key: ValidatorPublicKey,
pub account_key: AccountPublicKey,
pub address: String,
pub votes: u64,
}
impl std::str::FromStr for ValidatorToAdd {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split(',').collect();
anyhow::ensure!(
parts.len() == 4,
"Validator spec must be in format: public_key,account_key,address,votes"
);
Ok(ValidatorToAdd {
public_key: parts[0].parse()?,
account_key: parts[1].parse()?,
address: parts[2].to_string(),
votes: parts[3].parse()?,
})
}
}
#[derive(Clone, clap::Args, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct BenchmarkOptions {
/// How many chains to use.
#[arg(long, default_value_t = DEFAULT_NUM_CHAINS)]
pub num_chains: usize,
/// How many tokens to assign to each newly created chain.
/// These need to cover the transaction fees per chain for the benchmark.
#[arg(long, default_value_t = DEFAULT_TOKENS_PER_CHAIN)]
pub tokens_per_chain: Amount,
/// How many transactions to put in each block.
#[arg(long, default_value_t = DEFAULT_TRANSACTIONS_PER_BLOCK)]
pub transactions_per_block: usize,
/// The application ID of a fungible token on the wallet's default chain.
/// If none is specified, the benchmark uses the native token.
#[arg(long)]
pub fungible_application_id: Option<linera_base::identifiers::ApplicationId>,
/// The fixed BPS (Blocks Per Second) rate that block proposals will be sent at.
#[arg(long, default_value_t = DEFAULT_BPS)]
pub bps: usize,
/// If provided, will close the chains after the benchmark is finished. Keep in mind that
/// closing the chains might take a while, and will increase the validator latency while
/// they're being closed.
#[arg(long)]
pub close_chains: bool,
/// A comma-separated list of host:port pairs to query for health metrics.
/// If provided, the benchmark will check these endpoints for validator health
/// and terminate if any validator is unhealthy.
/// Example: "127.0.0.1:21100,validator-1.some-network.linera.net:21100"
#[arg(long)]
pub health_check_endpoints: Option<String>,
/// The maximum number of in-flight requests to validators when wrapping up the benchmark.
/// While wrapping up, this controls the concurrency level when processing inboxes and
/// closing chains.
#[arg(long, default_value_t = DEFAULT_WRAP_UP_MAX_IN_FLIGHT)]
pub wrap_up_max_in_flight: usize,
/// Confirm before starting the benchmark.
#[arg(long)]
pub confirm_before_start: bool,
/// How long to run the benchmark for. If not provided, the benchmark will run until
/// it is interrupted.
#[arg(long)]
pub runtime_in_seconds: Option<u64>,
/// The delay between chains, in milliseconds. For example, if set to 200ms, the first
/// chain will start, then the second will start 200 ms after the first one, the third
/// 200 ms after the second one, and so on.
/// This is used for slowly ramping up the TPS, so we don't pound the validators with the full
/// TPS all at once.
#[arg(long)]
pub delay_between_chains_ms: Option<u64>,
/// Path to YAML file containing chain IDs to send transfers to.
/// If not provided, only transfers between chains in the same wallet.
#[arg(long)]
pub config_path: Option<PathBuf>,
/// Transaction distribution mode. If false (default), distributes transactions evenly
/// across chains within each block. If true, sends all transactions in each block
/// to a single chain, rotating through chains for subsequent blocks.
#[arg(long)]
pub single_destination_per_block: bool,
}
impl Default for BenchmarkOptions {
fn default() -> Self {
Self {
num_chains: DEFAULT_NUM_CHAINS,
tokens_per_chain: DEFAULT_TOKENS_PER_CHAIN,
transactions_per_block: DEFAULT_TRANSACTIONS_PER_BLOCK,
wrap_up_max_in_flight: DEFAULT_WRAP_UP_MAX_IN_FLIGHT,
fungible_application_id: None,
bps: DEFAULT_BPS,
close_chains: false,
health_check_endpoints: None,
confirm_before_start: false,
runtime_in_seconds: None,
delay_between_chains_ms: None,
config_path: None,
single_destination_per_block: false,
}
}
}
#[derive(Clone, clap::Subcommand, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub enum BenchmarkCommand {
/// Start a single benchmark process, maintaining a given TPS.
Single {
#[command(flatten)]
options: BenchmarkOptions,
},
/// Run multiple benchmark processes in parallel.
Multi {
#[command(flatten)]
options: BenchmarkOptions,
/// The number of benchmark processes to run in parallel.
#[arg(long, default_value = "1")]
processes: usize,
/// The faucet (which implicitly defines the network)
#[arg(long, env = "LINERA_FAUCET_URL")]
faucet: String,
/// If specified, a directory with a random name will be created in this directory, and the
/// client state will be stored there.
/// If not specified, a temporary directory will be used for each client.
#[arg(long)]
client_state_dir: Option<String>,
/// The delay between starting the benchmark processes, in seconds.
/// If --cross-wallet-transfers is true, this will be ignored.
#[arg(long, default_value = "10")]
delay_between_processes: u64,
/// Whether to send transfers between chains in different wallets.
#[arg(long)]
cross_wallet_transfers: bool,
},
}
impl BenchmarkCommand {
pub fn transactions_per_block(&self) -> usize {
match self {
Self::Single { options } => options.transactions_per_block,
Self::Multi { options, .. } => options.transactions_per_block,
}
}
}
#[cfg(feature = "kubernetes")]
use crate::cli_wrappers::local_kubernetes_net::BuildMode;
use crate::util::{
DEFAULT_PAUSE_AFTER_GQL_MUTATIONS_SECS, DEFAULT_PAUSE_AFTER_LINERA_SERVICE_SECS,
};
#[derive(Clone, clap::Subcommand)]
pub enum ClientCommand {
/// Transfer funds
Transfer {
/// Sending chain ID (must be one of our chains)
#[arg(long = "from")]
sender: Account,
/// Recipient account
#[arg(long = "to")]
recipient: Account,
/// Amount to transfer
amount: Amount,
},
/// Open (i.e. activate) a new chain deriving the UID from an existing one.
OpenChain {
/// Chain ID (must be one of our chains).
#[arg(long = "from")]
chain_id: Option<ChainId>,
/// The new owner (otherwise create a key pair and remember it)
#[arg(long = "owner")]
owner: Option<AccountOwner>,
/// The initial balance of the new chain. This is subtracted from the parent chain's
/// balance.
#[arg(long = "initial-balance", default_value = "0")]
balance: Amount,
/// Whether to create a super owner for the new chain.
#[arg(long)]
super_owner: bool,
},
/// Open (i.e. activate) a new multi-owner chain deriving the UID from an existing one.
OpenMultiOwnerChain {
/// Chain ID (must be one of our chains).
#[arg(long = "from")]
chain_id: Option<ChainId>,
#[clap(flatten)]
ownership_config: ChainOwnershipConfig,
#[clap(flatten)]
application_permissions_config: ApplicationPermissionsConfig,
/// The initial balance of the new chain. This is subtracted from the parent chain's
/// balance.
#[arg(long = "initial-balance", default_value = "0")]
balance: Amount,
},
/// Display who owns the chain, and how the owners work together proposing blocks.
ShowOwnership {
/// The ID of the chain whose owners will be changed.
#[clap(long)]
chain_id: Option<ChainId>,
},
/// Change who owns the chain, and how the owners work together proposing blocks.
///
/// Specify the complete set of new owners, by public key. Existing owners that are
/// not included will be removed.
ChangeOwnership {
/// The ID of the chain whose owners will be changed.
#[clap(long)]
chain_id: Option<ChainId>,
#[clap(flatten)]
ownership_config: ChainOwnershipConfig,
},
/// Change the preferred owner of a chain.
SetPreferredOwner {
/// The ID of the chain whose preferred owner will be changed.
#[clap(long)]
chain_id: Option<ChainId>,
/// The new preferred owner.
#[arg(long)]
owner: AccountOwner,
},
/// Changes the application permissions configuration.
ChangeApplicationPermissions {
/// The ID of the chain to which the new permissions will be applied.
#[arg(long)]
chain_id: Option<ChainId>,
#[clap(flatten)]
application_permissions_config: ApplicationPermissionsConfig,
},
/// Close an existing chain.
///
/// A closed chain cannot execute operations or accept messages anymore.
/// It can still reject incoming messages, so they bounce back to the sender.
CloseChain {
/// Chain ID (must be one of our chains)
chain_id: ChainId,
},
/// Print out the network description.
ShowNetworkDescription,
/// Read the current native-token balance of the given account directly from the local
/// state.
///
/// NOTE: The local balance does not reflect messages that are waiting to be picked in
/// the local inbox, or that have not been synchronized from validators yet. Use
/// `linera sync` then either `linera query-balance` or `linera process-inbox &&
/// linera local-balance` for a consolidated balance.
LocalBalance {
/// The account to read, written as `CHAIN-ID:OWNER` or simply `CHAIN-ID` for the
/// chain balance. By default, we read the chain balance of the default chain in
/// the wallet.
account: Option<Account>,
},
/// Simulate the execution of one block made of pending messages from the local inbox,
/// then read the native-token balance of the account from the local state.
///
/// NOTE: The balance does not reflect messages that have not been synchronized from
/// validators yet. Call `linera sync` first to do so.
QueryBalance {
/// The account to query, written as `CHAIN-ID:OWNER` or simply `CHAIN-ID` for the
/// chain balance. By default, we read the chain balance of the default chain in
/// the wallet.
account: Option<Account>,
},
/// (DEPRECATED) Synchronize the local state of the chain with a quorum validators, then query the
/// local balance.
///
/// This command is deprecated. Use `linera sync && linera query-balance` instead.
SyncBalance {
/// The account to query, written as `CHAIN-ID:OWNER` or simply `CHAIN-ID` for the
/// chain balance. By default, we read the chain balance of the default chain in
/// the wallet.
account: Option<Account>,
},
/// Synchronize the local state of the chain with a quorum validators.
Sync {
/// The chain to synchronize with validators. If omitted, synchronizes the
/// default chain of the wallet.
chain_id: Option<ChainId>,
},
/// Process all pending incoming messages from the inbox of the given chain by creating as many
/// blocks as needed to execute all (non-failing) messages. Failing messages will be
/// marked as rejected and may bounce to their sender depending on their configuration.
ProcessInbox {
/// The chain to process. If omitted, uses the default chain of the wallet.
chain_id: Option<ChainId>,
},
/// Query validators for shard information about a specific chain.
QueryShardInfo {
/// The chain to query shard information for.
chain_id: ChainId,
},
/// Deprecates all committees up to and including the specified one.
RevokeEpochs { epoch: Epoch },
/// View or update the resource control policy
ResourceControlPolicy {
/// Set the price per unit of Wasm fuel.
#[arg(long)]
wasm_fuel_unit: Option<Amount>,
/// Set the price per unit of EVM fuel.
#[arg(long)]
evm_fuel_unit: Option<Amount>,
/// Set the price per read operation.
#[arg(long)]
read_operation: Option<Amount>,
/// Set the price per write operation.
#[arg(long)]
write_operation: Option<Amount>,
/// Set the price per byte read from runtime.
#[arg(long)]
byte_runtime: Option<Amount>,
/// Set the price per byte read.
#[arg(long)]
byte_read: Option<Amount>,
/// Set the price per byte written.
#[arg(long)]
byte_written: Option<Amount>,
/// Set the base price to read a blob.
#[arg(long)]
blob_read: Option<Amount>,
/// Set the base price to publish a blob.
#[arg(long)]
blob_published: Option<Amount>,
/// Set the price to read a blob, per byte.
#[arg(long)]
blob_byte_read: Option<Amount>,
/// The price to publish a blob, per byte.
#[arg(long)]
blob_byte_published: Option<Amount>,
/// Set the price per byte stored.
#[arg(long)]
byte_stored: Option<Amount>,
/// Set the base price of sending an operation from a block..
#[arg(long)]
operation: Option<Amount>,
/// Set the additional price for each byte in the argument of a user operation.
#[arg(long)]
operation_byte: Option<Amount>,
/// Set the base price of sending a message from a block..
#[arg(long)]
message: Option<Amount>,
/// Set the additional price for each byte in the argument of a user message.
#[arg(long)]
message_byte: Option<Amount>,
/// Set the price per query to a service as an oracle.
#[arg(long)]
service_as_oracle_query: Option<Amount>,
/// Set the price for performing an HTTP request.
#[arg(long)]
http_request: Option<Amount>,
/// Set the maximum amount of Wasm fuel per block.
#[arg(long)]
maximum_wasm_fuel_per_block: Option<u64>,
/// Set the maximum amount of EVM fuel per block.
#[arg(long)]
maximum_evm_fuel_per_block: Option<u64>,
/// Set the maximum time in milliseconds that a block can spend executing services as oracles.
#[arg(long)]
maximum_service_oracle_execution_ms: Option<u64>,
/// Set the maximum size of a block, in bytes.
#[arg(long)]
maximum_block_size: Option<u64>,
/// Set the maximum size of data blobs, compressed bytecode and other binary blobs,
/// in bytes.
#[arg(long)]
maximum_blob_size: Option<u64>,
/// Set the maximum number of published blobs per block.
#[arg(long)]
maximum_published_blobs: Option<u64>,
/// Set the maximum size of decompressed contract or service bytecode, in bytes.
#[arg(long)]
maximum_bytecode_size: Option<u64>,
/// Set the maximum size of a block proposal, in bytes.
#[arg(long)]
maximum_block_proposal_size: Option<u64>,
/// Set the maximum read data per block.
#[arg(long)]
maximum_bytes_read_per_block: Option<u64>,
/// Set the maximum write data per block.
#[arg(long)]
maximum_bytes_written_per_block: Option<u64>,
/// Set the maximum size of oracle responses.
#[arg(long)]
maximum_oracle_response_bytes: Option<u64>,
/// Set the maximum size in bytes of a received HTTP response.
#[arg(long)]
maximum_http_response_bytes: Option<u64>,
/// Set the maximum amount of time allowed to wait for an HTTP response.
#[arg(long)]
http_request_timeout_ms: Option<u64>,
/// Set the list of hosts that contracts and services can send HTTP requests to.
#[arg(long)]
http_request_allow_list: Option<Vec<String>>,
},
/// Run benchmarks to test network performance.
#[command(subcommand)]
Benchmark(BenchmarkCommand),
/// Create genesis configuration for a Linera deployment.
/// Create initial user chains and print information to be used for initialization of validator setup.
/// This will also create an initial wallet for the owner of the initial "root" chains.
CreateGenesisConfig {
/// Sets the file describing the public configurations of all validators
#[arg(long = "committee")]
committee_config_path: PathBuf,
/// The output config path to be consumed by the server
#[arg(long = "genesis")]
genesis_config_path: PathBuf,
/// Known initial balance of the chain
#[arg(long, default_value = "0")]
initial_funding: Amount,
/// The start timestamp: no blocks can be created before this time.
#[arg(long)]
start_timestamp: Option<DateTime<Utc>>,
/// Number of initial (aka "root") chains to create in addition to the admin chain.
num_other_initial_chains: u32,
/// Configure the resource control policy (notably fees) according to pre-defined
/// settings.
#[arg(long, default_value = "no-fees")]
policy_config: ResourceControlPolicyConfig,
/// Set the price per unit of Wasm fuel.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
wasm_fuel_unit_price: Option<Amount>,
/// Set the price per unit of EVM fuel.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
evm_fuel_unit_price: Option<Amount>,
/// Set the price per read operation.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
read_operation_price: Option<Amount>,
/// Set the price per write operation.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
write_operation_price: Option<Amount>,
/// Set the price per byte read from runtime.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
byte_runtime_price: Option<Amount>,
/// Set the price per byte read.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
byte_read_price: Option<Amount>,
/// Set the price per byte written.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
byte_written_price: Option<Amount>,
/// Set the base price to read a blob.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
blob_read_price: Option<Amount>,
/// Set the base price to publish a blob.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
blob_published_price: Option<Amount>,
/// Set the price to read a blob, per byte.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
blob_byte_read_price: Option<Amount>,
/// Set the price to publish a blob, per byte.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
blob_byte_published_price: Option<Amount>,
/// Set the price per byte stored.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
byte_stored_price: Option<Amount>,
/// Set the base price of sending an operation from a block..
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
operation_price: Option<Amount>,
/// Set the additional price for each byte in the argument of a user operation.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
operation_byte_price: Option<Amount>,
/// Set the base price of sending a message from a block..
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
message_price: Option<Amount>,
/// Set the additional price for each byte in the argument of a user message.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
message_byte_price: Option<Amount>,
/// Set the price per query to a service as an oracle.
#[arg(long)]
service_as_oracle_query_price: Option<Amount>,
/// Set the price for performing an HTTP request.
#[arg(long)]
http_request_price: Option<Amount>,
/// Set the maximum amount of Wasm fuel per block.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_wasm_fuel_per_block: Option<u64>,
/// Set the maximum amount of EVM fuel per block.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_evm_fuel_per_block: Option<u64>,
/// Set the maximum time in milliseconds that a block can spend executing services as oracles.
#[arg(long)]
maximum_service_oracle_execution_ms: Option<u64>,
/// Set the maximum size of a block.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_block_size: Option<u64>,
/// Set the maximum size of decompressed contract or service bytecode, in bytes.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_bytecode_size: Option<u64>,
/// Set the maximum size of data blobs, compressed bytecode and other binary blobs,
/// in bytes.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_blob_size: Option<u64>,
/// Set the maximum number of published blobs per block.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_published_blobs: Option<u64>,
/// Set the maximum size of a block proposal, in bytes.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_block_proposal_size: Option<u64>,
/// Set the maximum read data per block.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_bytes_read_per_block: Option<u64>,
/// Set the maximum write data per block.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_bytes_written_per_block: Option<u64>,
/// Set the maximum size of oracle responses.
/// (This will overwrite value from `--policy-config`)
#[arg(long)]
maximum_oracle_response_bytes: Option<u64>,
/// Set the maximum size in bytes of a received HTTP response.
#[arg(long)]
maximum_http_response_bytes: Option<u64>,
/// Set the maximum amount of time allowed to wait for an HTTP response.
#[arg(long)]
http_request_timeout_ms: Option<u64>,
/// Set the list of hosts that contracts and services can send HTTP requests to.
#[arg(long)]
http_request_allow_list: Option<Vec<String>>,
/// Force this wallet to generate keys using a PRNG and a given seed. USE FOR
/// TESTING ONLY.
#[arg(long)]
testing_prng_seed: Option<u64>,
/// A unique name to identify this network.
#[arg(long)]
network_name: Option<String>,
},
/// Watch the network for notifications.
Watch {
/// The chain ID to watch.
chain_id: Option<ChainId>,
/// Show all notifications from all validators.
#[arg(long)]
raw: bool,
},
/// Run a GraphQL service to explore and extend the chains of the wallet.
Service {
#[command(flatten)]
config: ChainListenerConfig,
/// The port on which to run the server
#[arg(long)]
port: NonZeroU16,
/// The port to expose metrics on.
#[cfg(with_metrics)]
#[arg(long)]
metrics_port: NonZeroU16,
/// Application IDs of operator applications to watch.
/// When specified, a task processor is started alongside the node service.
#[arg(long = "operator-application-ids")]
operator_application_ids: Vec<ApplicationId>,
/// A controller to execute a dynamic set of applications running on a dynamic set of
/// chains.
#[arg(long = "controller-id")]
controller_application_id: Option<ApplicationId>,
/// Supported operators and their binary paths.
/// Format: `name=path` or just `name` (uses name as path).
/// Example: `--operators my-operator=/path/to/binary`
#[arg(long = "operators", value_parser = parse_operator)]
operators: Vec<(String, PathBuf)>,
},
/// Run a GraphQL service that exposes a faucet where users can claim tokens.
/// This gives away the chain's tokens, and is mainly intended for testing.
Faucet {
/// The chain that gives away its tokens.
chain_id: Option<ChainId>,
/// The port on which to run the server
#[arg(long, default_value = "8080")]
port: u16,
/// The port for prometheus to scrape.
#[cfg(with_metrics)]
#[arg(long, default_value = "9090")]
metrics_port: u16,
/// The number of tokens to send to each new chain.
#[arg(long)]
amount: Amount,
/// The end timestamp: The faucet will rate-limit the token supply so it runs out of money
/// no earlier than this.
#[arg(long)]
limit_rate_until: Option<DateTime<Utc>>,
/// Configuration for the faucet chain listener.
#[command(flatten)]
config: ChainListenerConfig,
/// Path to the persistent storage file for faucet mappings.
#[arg(long)]
storage_path: PathBuf,
/// Maximum number of operations to include in a single block (default: 100).
#[arg(long, default_value = "100")]
max_batch_size: usize,
},
/// Publish module.
PublishModule {
/// Path to the Wasm file for the application "contract" bytecode.
contract: PathBuf,
/// Path to the Wasm file for the application "service" bytecode.
service: PathBuf,
/// The virtual machine runtime to use.
#[arg(long, default_value = "wasm")]
vm_runtime: VmRuntime,
/// An optional chain ID to publish the module. The default chain of the wallet
/// is used otherwise.
publisher: Option<ChainId>,
},
/// Print events from a specific chain and stream from a specified index.
ListEventsFromIndex {
/// The chain to query. If omitted, query the default chain of the wallet.
chain_id: Option<ChainId>,
/// The stream being considered.
#[arg(long)]
stream_id: StreamId,
/// Index of the message to start with
#[arg(long, default_value = "0")]
start_index: u32,
},
/// Publish a data blob of binary data.
PublishDataBlob {
/// Path to data blob file to be published.
blob_path: PathBuf,
/// An optional chain ID to publish the blob. The default chain of the wallet
/// is used otherwise.
publisher: Option<ChainId>,
},
// TODO(#2490): Consider removing or renaming this.
/// Verify that a data blob is readable.
ReadDataBlob {
/// The hash of the content.
hash: CryptoHash,
/// An optional chain ID to verify the blob. The default chain of the wallet
/// is used otherwise.
reader: Option<ChainId>,
},
/// Create an application.
CreateApplication {
/// The module ID of the application to create.
module_id: ModuleId,
/// An optional chain ID to host the application. The default chain of the wallet
/// is used otherwise.
creator: Option<ChainId>,
/// The shared parameters as JSON string.
#[arg(long)]
json_parameters: Option<String>,
/// Path to a JSON file containing the shared parameters.
#[arg(long)]
json_parameters_path: Option<PathBuf>,
/// The instantiation argument as a JSON string.
#[arg(long)]
json_argument: Option<String>,
/// Path to a JSON file containing the instantiation argument.
#[arg(long)]
json_argument_path: Option<PathBuf>,
/// The list of required dependencies of application, if any.
#[arg(long, num_args(0..))]
required_application_ids: Option<Vec<ApplicationId>>,
},
/// Create an application, and publish the required module.
PublishAndCreate {
/// Path to the Wasm file for the application "contract" bytecode.
contract: PathBuf,
/// Path to the Wasm file for the application "service" bytecode.
service: PathBuf,
/// The virtual machine runtime to use.
#[arg(long, default_value = "wasm")]
vm_runtime: VmRuntime,
/// An optional chain ID to publish the module. The default chain of the wallet
/// is used otherwise.
publisher: Option<ChainId>,
/// The shared parameters as JSON string.
#[arg(long)]
json_parameters: Option<String>,
/// Path to a JSON file containing the shared parameters.
#[arg(long)]
json_parameters_path: Option<PathBuf>,
/// The instantiation argument as a JSON string.
#[arg(long)]
json_argument: Option<String>,
/// Path to a JSON file containing the instantiation argument.
#[arg(long)]
json_argument_path: Option<PathBuf>,
/// The list of required dependencies of application, if any.
#[arg(long, num_args(0..))]
required_application_ids: Option<Vec<ApplicationId>>,
},
/// Create an unassigned key pair.
Keygen,
/// Link the owner to the chain.
/// Expects that the caller has a private key corresponding to the `public_key`,
/// otherwise block proposals will fail when signing with it.
Assign {
/// The owner to assign.
#[arg(long)]
owner: AccountOwner,
/// The ID of the chain.
#[arg(long)]
chain_id: ChainId,
},
/// Retry a block we unsuccessfully tried to propose earlier.
///
/// As long as a block is pending most other commands will fail, since it is unsafe to propose
/// multiple blocks at the same height.
RetryPendingBlock {
/// The chain with the pending block. If not specified, the wallet's default chain is used.
chain_id: Option<ChainId>,
},
/// Show the contents of the wallet.
#[command(subcommand)]
Wallet(WalletCommand),
/// Show the information about a chain.
#[command(subcommand)]
Chain(ChainCommand),
/// Manage Linera projects.
#[command(subcommand)]
Project(ProjectCommand),
/// Manage a local Linera Network.
#[command(subcommand)]
Net(NetCommand),
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli/options.rs | linera-service/src/cli/options.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{env, path::PathBuf};
use anyhow::{anyhow, bail, Error};
use linera_base::crypto::InMemorySigner;
use linera_client::{client_context::ClientContext, config::GenesisConfig};
use linera_execution::{WasmRuntime, WithWasmDefault as _};
use linera_persistent as persistent;
use linera_service::{
cli::command::ClientCommand,
storage::{CommonStorageOptions, Runnable, RunnableWithStore, StorageConfig},
Wallet,
};
use tracing::{debug, info};
#[derive(Clone, clap::Parser)]
#[command(
name = "linera",
version = linera_version::VersionInfo::default_clap_str(),
about = "Client implementation and command-line tool for the Linera blockchain",
)]
pub struct Options {
/// Common options.
#[command(flatten)]
pub client_options: linera_client::Options,
/// Sets the file storing the private state of user chains (an empty one will be created if missing)
#[arg(long = "wallet")]
pub wallet_state_path: Option<PathBuf>,
/// Sets the file storing the keystore state.
#[arg(long = "keystore")]
pub keystore_path: Option<PathBuf>,
/// Given an ASCII alphanumeric parameter `X`, read the wallet state and the wallet
/// storage config from the environment variables `LINERA_WALLET_{X}` and
/// `LINERA_STORAGE_{X}` instead of `LINERA_WALLET` and
/// `LINERA_STORAGE`.
#[arg(long, short = 'w', value_parser = crate::util::parse_ascii_alphanumeric_string)]
pub with_wallet: Option<String>,
/// Enable OpenTelemetry Chrome JSON exporter for trace data analysis.
#[arg(long)]
pub chrome_trace_exporter: bool,
/// Output file path for Chrome trace JSON format.
/// Can be visualized in chrome://tracing or Perfetto UI.
#[arg(long, env = "LINERA_CHROME_TRACE_FILE")]
pub chrome_trace_file: Option<String>,
/// OpenTelemetry OTLP exporter endpoint (requires opentelemetry feature).
#[arg(long, env = "LINERA_OTLP_EXPORTER_ENDPOINT")]
pub otlp_exporter_endpoint: Option<String>,
/// Storage configuration for the blockchain history.
#[arg(long = "storage", global = true)]
pub storage_config: Option<String>,
/// Common storage options.
#[command(flatten)]
pub common_storage_options: CommonStorageOptions,
/// The WebAssembly runtime to use.
#[arg(long)]
pub wasm_runtime: Option<WasmRuntime>,
/// Output log messages from contract execution.
#[arg(long = "with_application_logs")]
pub application_logs: bool,
/// The number of Tokio worker threads to use.
#[arg(long, env = "LINERA_CLIENT_TOKIO_THREADS")]
pub tokio_threads: Option<usize>,
/// The number of Tokio blocking threads to use.
#[arg(long, env = "LINERA_CLIENT_TOKIO_BLOCKING_THREADS")]
pub tokio_blocking_threads: Option<usize>,
/// Size of the block cache (default: 5000)
#[arg(long, env = "LINERA_BLOCK_CACHE_SIZE", default_value = "5000")]
pub block_cache_size: usize,
/// Size of the execution state cache (default: 10000)
#[arg(
long,
env = "LINERA_EXECUTION_STATE_CACHE_SIZE",
default_value = "10000"
)]
pub execution_state_cache_size: usize,
/// Subcommand.
#[command(subcommand)]
pub command: ClientCommand,
}
impl Options {
pub fn init() -> Self {
<Options as clap::Parser>::parse()
}
pub async fn create_client_context<S, Si>(
&self,
storage: S,
wallet: Wallet,
signer: Si,
) -> anyhow::Result<
ClientContext<linera_core::environment::Impl<S, linera_rpc::NodeProvider, Si, Wallet>>,
>
where
S: linera_core::environment::Storage,
Si: linera_core::environment::Signer,
{
let genesis_config = wallet.genesis_config().clone();
let default_chain = wallet.default_chain();
Ok(ClientContext::new(
storage,
wallet,
signer,
&self.client_options,
default_chain,
genesis_config,
self.block_cache_size,
self.execution_state_cache_size,
)
.await?)
}
pub async fn run_with_storage<R: Runnable>(&self, job: R) -> Result<R::Output, Error> {
let storage_config = self.storage_config()?;
debug!("Running command using storage configuration: {storage_config}");
let store_config =
storage_config.add_common_storage_options(&self.common_storage_options)?;
let output = Box::pin(store_config.run_with_storage(
self.wasm_runtime.with_wasm_default(),
self.application_logs,
job,
))
.await?;
Ok(output)
}
pub async fn run_with_store<R: RunnableWithStore>(&self, job: R) -> Result<R::Output, Error> {
let storage_config = self.storage_config()?;
debug!("Running command using storage configuration: {storage_config}");
let store_config =
storage_config.add_common_storage_options(&self.common_storage_options)?;
let output = Box::pin(store_config.run_with_store(job)).await?;
Ok(output)
}
pub async fn initialize_storage(&self) -> Result<(), Error> {
let storage_config = self.storage_config()?;
debug!("Initializing storage using configuration: {storage_config}");
let store_config =
storage_config.add_common_storage_options(&self.common_storage_options)?;
let wallet = self.wallet()?;
store_config.initialize(wallet.genesis_config()).await?;
Ok(())
}
pub fn wallet(&self) -> Result<Wallet, Error> {
Ok(Wallet::read(&self.wallet_path()?)?)
}
pub fn signer(&self) -> Result<persistent::File<InMemorySigner>, Error> {
Ok(persistent::File::read(&self.keystore_path()?)?)
}
pub fn suffix(&self) -> String {
self.with_wallet
.as_ref()
.map(|x| format!("_{}", x))
.unwrap_or_default()
}
pub fn config_path() -> Result<PathBuf, Error> {
let mut config_dir = dirs::config_dir().ok_or_else(|| anyhow!(
"Default wallet directory is not supported in this platform: please specify storage and wallet paths"
))?;
config_dir.push("linera");
if !config_dir.exists() {
debug!("Creating default wallet directory {}", config_dir.display());
fs_err::create_dir_all(&config_dir)?;
}
info!("Using default wallet directory {}", config_dir.display());
Ok(config_dir)
}
pub fn storage_config(&self) -> Result<StorageConfig, Error> {
if let Some(config) = &self.storage_config {
return config.parse();
}
let suffix = self.suffix();
let storage_env_var = env::var(format!("LINERA_STORAGE{suffix}")).ok();
if let Some(config) = storage_env_var {
return config.parse();
}
cfg_if::cfg_if! {
if #[cfg(feature = "rocksdb")] {
let spawn_mode =
linera_views::rocks_db::RocksDbSpawnMode::get_spawn_mode_from_runtime();
let inner_storage_config = linera_service::storage::InnerStorageConfig::RocksDb {
path: Self::config_path()?.join("wallet.db"),
spawn_mode,
};
let namespace = linera_storage::DEFAULT_NAMESPACE.to_string();
Ok(StorageConfig {
inner_storage_config,
namespace,
})
} else {
bail!("Cannot apply default storage because the feature 'rocksdb' was not selected");
}
}
}
pub fn wallet_path(&self) -> Result<PathBuf, Error> {
if let Some(path) = &self.wallet_state_path {
return Ok(path.clone());
}
let suffix = self.suffix();
let wallet_env_var = env::var(format!("LINERA_WALLET{suffix}")).ok();
if let Some(path) = wallet_env_var {
return Ok(path.parse()?);
}
let config_path = Self::config_path()?;
Ok(config_path.join("wallet.json"))
}
pub fn keystore_path(&self) -> Result<PathBuf, Error> {
if let Some(path) = &self.keystore_path {
return Ok(path.clone());
}
let suffix = self.suffix();
let keystore_env_var = env::var(format!("LINERA_KEYSTORE{suffix}")).ok();
if let Some(path) = keystore_env_var {
return Ok(path.parse()?);
}
let config_path = Self::config_path()?;
Ok(config_path.join("keystore.json"))
}
pub fn create_wallet(&self, genesis_config: GenesisConfig) -> Result<Wallet, Error> {
let wallet_path = self.wallet_path()?;
if wallet_path.exists() {
bail!("Wallet already exists: {}", wallet_path.display());
}
let wallet = Wallet::create(&wallet_path, genesis_config)?;
wallet.save()?;
Ok(wallet)
}
pub fn create_keystore(
&self,
testing_prng_seed: Option<u64>,
) -> Result<persistent::File<InMemorySigner>, Error> {
let keystore_path = self.keystore_path()?;
if keystore_path.exists() {
bail!("Keystore already exists: {}", keystore_path.display());
}
Ok(persistent::File::read_or_create(&keystore_path, || {
Ok(InMemorySigner::new(testing_prng_seed))
})?)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli/mod.rs | linera-service/src/cli/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Helper module for the Linera CLI binary.
pub mod command;
pub mod net_up_utils;
pub mod validator;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli/net_up_utils.rs | linera-service/src/cli/net_up_utils.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{num::NonZeroU16, str::FromStr};
use colored::Colorize as _;
use linera_base::{data_types::Amount, listen_for_shutdown_signals, time::Duration};
use linera_client::client_options::ResourceControlPolicyConfig;
use linera_rpc::config::CrossChainConfig;
#[cfg(feature = "storage-service")]
use linera_storage_service::{
child::{StorageService, StorageServiceGuard},
common::get_service_storage_binary,
};
use tokio_util::sync::CancellationToken;
use tracing::info;
#[cfg(feature = "kubernetes")]
use {
crate::cli_wrappers::local_kubernetes_net::{BuildMode, LocalKubernetesNetConfig},
std::path::PathBuf,
};
use crate::{
cli_wrappers::{
local_net::{
Database, ExportersSetup, InnerStorageConfigBuilder, LocalNetConfig, PathProvider,
},
ClientWrapper, FaucetService, LineraNet, LineraNetConfig, Network, NetworkConfig,
},
storage::{InnerStorageConfig, StorageConfig},
};
struct StorageConfigProvider {
/// The storage config.
config: StorageConfig,
#[cfg(feature = "storage-service")]
_service_guard: Option<StorageServiceGuard>,
}
impl StorageConfigProvider {
pub async fn new(storage: &Option<String>) -> anyhow::Result<StorageConfigProvider> {
match storage {
#[cfg(feature = "storage-service")]
None => {
let service_endpoint = linera_base::port::get_free_endpoint().await?;
let binary = get_service_storage_binary().await?.display().to_string();
let service = StorageService::new(&service_endpoint, binary);
let _service_guard = service.run().await?;
let _service_guard = Some(_service_guard);
let inner_storage_config = InnerStorageConfig::Service {
endpoint: service_endpoint,
};
let namespace = "table_default".to_string();
let config = StorageConfig {
inner_storage_config,
namespace,
};
Ok(StorageConfigProvider {
config,
_service_guard,
})
}
#[cfg(not(feature = "storage-service"))]
None => {
panic!("When storage is not selected, the storage-service needs to be enabled");
}
#[cfg(feature = "storage-service")]
Some(storage) => {
let config = StorageConfig::from_str(storage)?;
Ok(StorageConfigProvider {
config,
_service_guard: None,
})
}
#[cfg(not(feature = "storage-service"))]
Some(storage) => {
let config = StorageConfig::from_str(storage)?;
Ok(StorageConfigProvider { config })
}
}
}
pub fn inner_storage_config(&self) -> &InnerStorageConfig {
&self.config.inner_storage_config
}
pub fn namespace(&self) -> &str {
&self.config.namespace
}
pub fn database(&self) -> anyhow::Result<Database> {
match self.config.inner_storage_config {
InnerStorageConfig::Memory { .. } => anyhow::bail!("Not possible to work with memory"),
#[cfg(feature = "rocksdb")]
InnerStorageConfig::RocksDb { .. } => {
anyhow::bail!("Not possible to work with RocksDB")
}
#[cfg(feature = "storage-service")]
InnerStorageConfig::Service { .. } => Ok(Database::Service),
#[cfg(feature = "dynamodb")]
InnerStorageConfig::DynamoDb { .. } => Ok(Database::DynamoDb),
#[cfg(feature = "scylladb")]
InnerStorageConfig::ScyllaDb { .. } => Ok(Database::ScyllaDb),
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
InnerStorageConfig::DualRocksDbScyllaDb { .. } => Ok(Database::DualRocksDbScyllaDb),
}
}
}
#[expect(clippy::too_many_arguments)]
#[cfg(feature = "kubernetes")]
pub async fn handle_net_up_kubernetes(
num_other_initial_chains: u32,
initial_amount: u128,
num_initial_validators: usize,
num_proxies: usize,
num_shards: usize,
testing_prng_seed: Option<u64>,
binaries: &Option<Option<PathBuf>>,
no_build: bool,
docker_image_name: String,
build_mode: BuildMode,
policy_config: ResourceControlPolicyConfig,
with_faucet: bool,
faucet_chain: Option<u32>,
faucet_port: NonZeroU16,
faucet_amount: Amount,
with_block_exporter: bool,
num_block_exporters: usize,
indexer_image_name: String,
explorer_image_name: String,
dual_store: bool,
path: &Option<String>,
) -> anyhow::Result<()> {
assert!(
num_initial_validators >= 1,
"The local test network must have at least one validator."
);
assert!(
num_proxies >= 1,
"The local test network must have at least one proxy."
);
assert!(
num_shards >= 1,
"The local test network must have at least one shard per validator."
);
if faucet_chain.is_some() {
assert!(
with_faucet,
"--faucet-chain must be provided only with --with-faucet"
);
}
let shutdown_notifier = CancellationToken::new();
tokio::spawn(listen_for_shutdown_signals(shutdown_notifier.clone()));
let num_block_exporters = if with_block_exporter {
assert!(
num_block_exporters > 0,
"If --with-block-exporter is provided, --num-block-exporters must be greater than 0"
);
num_block_exporters
} else {
0
};
let config = LocalKubernetesNetConfig {
network: Network::Grpc,
testing_prng_seed,
num_other_initial_chains,
initial_amount: Amount::from_tokens(initial_amount),
num_initial_validators,
num_proxies,
num_shards,
binaries: binaries.clone().into(),
no_build,
docker_image_name,
build_mode,
policy_config,
num_block_exporters,
indexer_image_name,
explorer_image_name,
dual_store,
path_provider: PathProvider::from_path_option(path)?,
};
let (mut net, client) = config.instantiate().await?;
let faucet_service = print_messages_and_create_faucet(
client,
with_faucet,
faucet_chain,
faucet_port,
faucet_amount,
num_other_initial_chains,
)
.await?;
wait_for_shutdown(shutdown_notifier, &mut net, faucet_service).await
}
#[expect(clippy::too_many_arguments)]
pub async fn handle_net_up_service(
num_other_initial_chains: u32,
initial_amount: u128,
num_initial_validators: usize,
num_shards: usize,
testing_prng_seed: Option<u64>,
policy_config: ResourceControlPolicyConfig,
cross_chain_config: CrossChainConfig,
with_block_exporter: bool,
block_exporter_address: String,
block_exporter_port: NonZeroU16,
path: &Option<String>,
storage: &Option<String>,
external_protocol: String,
with_faucet: bool,
faucet_chain: Option<u32>,
faucet_port: NonZeroU16,
faucet_amount: Amount,
) -> anyhow::Result<()> {
assert!(
num_initial_validators >= 1,
"The local test network must have at least one validator."
);
assert!(
num_shards >= 1,
"The local test network must have at least one shard per validator."
);
let shutdown_notifier = CancellationToken::new();
tokio::spawn(listen_for_shutdown_signals(shutdown_notifier.clone()));
let storage = StorageConfigProvider::new(storage).await?;
let storage_config = storage.inner_storage_config().clone();
let namespace = storage.namespace().to_string();
let database = storage.database()?;
let storage_config_builder = InnerStorageConfigBuilder::ExistingConfig { storage_config };
let external = match external_protocol.as_str() {
"grpc" => Network::Grpc,
"grpcs" => Network::Grpcs,
_ => panic!("Only allowed options are grpc and grpcs"),
};
let internal = Network::Grpc;
let network = NetworkConfig { external, internal };
let path_provider = PathProvider::from_path_option(path)?;
let num_proxies = 1; // Local networks currently support exactly 1 proxy.
let block_exporters = ExportersSetup::new(
with_block_exporter,
block_exporter_address,
block_exporter_port,
);
let config = LocalNetConfig {
network,
database,
testing_prng_seed,
namespace,
num_other_initial_chains,
initial_amount: Amount::from_tokens(initial_amount),
num_initial_validators,
num_shards,
num_proxies,
policy_config,
cross_chain_config,
storage_config_builder,
path_provider,
block_exporters,
};
let (mut net, client) = config.instantiate().await?;
let faucet_service = print_messages_and_create_faucet(
client,
with_faucet,
faucet_chain,
faucet_port,
faucet_amount,
num_other_initial_chains,
)
.await?;
wait_for_shutdown(shutdown_notifier, &mut net, faucet_service).await
}
async fn wait_for_shutdown(
shutdown_notifier: CancellationToken,
net: &mut impl LineraNet,
faucet_service: Option<FaucetService>,
) -> anyhow::Result<()> {
shutdown_notifier.cancelled().await;
eprintln!();
if let Some(service) = faucet_service {
eprintln!("Terminating the faucet service");
service.terminate().await?;
}
eprintln!("Terminating the local test network");
net.terminate().await?;
eprintln!("Done.");
Ok(())
}
async fn print_messages_and_create_faucet(
client: ClientWrapper,
with_faucet: bool,
faucet_chain: Option<u32>,
faucet_port: NonZeroU16,
faucet_amount: Amount,
num_other_initial_chains: u32,
) -> Result<Option<FaucetService>, anyhow::Error> {
// Make time to (hopefully) display the message after the tracing logs.
linera_base::time::timer::sleep(Duration::from_secs(1)).await;
// Create the wallet for the initial "root" chains.
info!("Local test network successfully started.");
eprintln!(
"To use the admin wallet of this test network, you may set \
the environment variables LINERA_WALLET, LINERA_KEYSTORE, \
and LINERA_STORAGE as follows.\n"
);
println!(
"{}",
format!(
"export LINERA_WALLET=\"{}\"",
client.wallet_path().display(),
)
.bold(),
);
println!(
"{}",
format!(
"export LINERA_KEYSTORE=\"{}\"",
client.keystore_path().display(),
)
.bold()
);
println!(
"{}",
format!("export LINERA_STORAGE=\"{}\"", client.storage_path()).bold(),
);
let wallet: crate::wallet::Wallet = client.load_wallet()?;
let chains: Vec<_> = wallet.chain_ids();
// Run the faucet,
let faucet_service = if with_faucet {
let faucet_chain = if let Some(faucet_chain_idx) = faucet_chain {
assert!(
num_other_initial_chains > faucet_chain_idx,
"num_other_initial_chains must be strictly greater than the faucet chain index if \
with_faucet is true"
);
// This picks a lexicographically faucet_chain_idx-th non-admin chain.
Some(
chains
.into_iter()
.filter(|chain_id| *chain_id != wallet.genesis_admin_chain())
.nth(faucet_chain_idx as usize)
.expect("there should be at least one non-admin chain"),
)
} else {
None
};
eprintln!("To connect to this network, you can use the following faucet URL:");
println!(
"{}",
format!("export LINERA_FAUCET_URL=\"http://localhost:{faucet_port}\"").bold(),
);
let service = client
.run_faucet(Some(faucet_port.into()), faucet_chain, faucet_amount)
.await?;
Some(service)
} else {
None
};
println!();
eprintln!(
"\nREADY!\nPress ^C to terminate the local test network and clean the temporary directory."
);
Ok(faucet_service)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli/validator.rs | linera-service/src/cli/validator.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Validator management commands.
use std::{collections::HashMap, num::NonZero, str::FromStr};
use anyhow::Context as _;
use futures::stream::TryStreamExt as _;
use linera_base::{
crypto::{AccountPublicKey, ValidatorPublicKey},
identifiers::ChainId,
};
use linera_client::{chain_listener::ClientContext as _, client_context::ClientContext};
use linera_core::{data_types::ClientOutcome, node::ValidatorNodeProvider, Wallet as _};
use linera_execution::committee::{Committee, ValidatorState};
use serde::{Deserialize, Serialize};
/// Type alias for the complex ClientContext type used throughout validator operations.
/// This alias helps avoid clippy's type_complexity warnings while maintaining type safety.
/// Uses generic Environment trait to avoid coupling to implementation details.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct Votes(pub NonZero<u64>);
impl Default for Votes {
fn default() -> Self {
Self(nonzero_lit::u64!(1))
}
}
impl FromStr for Votes {
type Err = <NonZero<u64> as FromStr>::Err;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Votes(s.parse()?))
}
}
/// Specification for a validator to add or modify.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Spec {
pub public_key: ValidatorPublicKey,
pub account_key: AccountPublicKey,
pub network_address: url::Url,
#[serde(default)]
pub votes: Votes,
}
/// Represents an update to a validator's configuration in batch operations.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Change {
pub account_key: AccountPublicKey,
pub address: url::Url,
#[serde(default)]
pub votes: Votes,
}
/// Structure for batch validator operations from JSON file.
/// Maps validator public keys to their desired state:
/// - `null` means remove the validator
/// - `{accountKey, address, votes}` means add or modify the validator
/// - Keys not present in the map are left unchanged
pub type BatchFile = HashMap<ValidatorPublicKey, Option<Change>>;
/// Structure for batch validator queries from JSON file.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryBatch {
pub validators: Vec<Spec>,
}
/// Validator subcommands.
#[derive(Debug, Clone, clap::Subcommand)]
pub enum Command {
Add(Add),
BatchQuery(BatchQuery),
Update(Update),
List(List),
Query(Query),
Remove(Remove),
Sync(Sync),
}
/// Add a validator to the committee.
///
/// Adds a new validator with the specified public key, account key, network address,
/// and voting weight. The validator must not already exist in the committee.
#[derive(Debug, Clone, clap::Parser)]
pub struct Add {
/// Public key of the validator to add
#[arg(long)]
public_key: ValidatorPublicKey,
/// Account public key for receiving payments and rewards
#[arg(long)]
account_key: AccountPublicKey,
/// Network address where the validator can be reached (e.g., grpcs://host:port)
#[arg(long)]
address: url::Url,
/// Voting weight for consensus (default: 1)
#[arg(long, required = false)]
votes: Votes,
/// Skip online connectivity verification before adding
#[arg(long)]
skip_online_check: bool,
}
/// Query multiple validators using a JSON specification file.
///
/// Reads validator specifications from a JSON file and queries their state.
/// The JSON should contain an array of validator objects with publicKey and networkAddress.
#[derive(Debug, Clone, clap::Parser)]
pub struct BatchQuery {
/// Path to JSON file containing validator query specifications
file: clio::Input,
/// Chain ID to query (defaults to default chain)
#[arg(long)]
chain_id: Option<ChainId>,
}
/// Apply multiple validator changes from JSON input.
///
/// Reads a JSON object mapping validator public keys to their desired state:
/// - Key with state object (address, votes, accountKey): add or modify validator
/// - Key with null: remove validator
/// - Keys not present: unchanged
///
/// Input can be provided via file path, stdin pipe, or shell redirect.
#[derive(Debug, Clone, clap::Parser)]
pub struct Update {
/// Path to JSON file with validator changes (omit or use "-" for stdin)
#[arg(required = false)]
file: clio::Input,
/// Preview changes without applying them
#[arg(long)]
dry_run: bool,
/// Skip confirmation prompt (use with caution)
#[arg(long, short = 'y')]
yes: bool,
/// Skip online connectivity checks for validators being added or modified
#[arg(long)]
skip_online_check: bool,
}
/// List all validators in the committee.
///
/// Displays the current validator set with their network addresses, voting weights,
/// and connection status. Optionally filter by minimum voting weight.
#[derive(Debug, Clone, clap::Parser)]
pub struct List {
/// Chain ID to query (defaults to default chain)
#[arg(long)]
chain_id: Option<ChainId>,
/// Only show validators with at least this many votes
#[arg(long)]
min_votes: Option<u64>,
}
/// Query a single validator's state and connectivity.
///
/// Connects to a validator at the specified network address and queries its
/// view of the blockchain state, including block height and committee information.
#[derive(Debug, Clone, clap::Parser)]
pub struct Query {
/// Network address of the validator (e.g., grpcs://host:port)
address: String,
/// Chain ID to query about (defaults to default chain)
#[arg(long)]
chain_id: Option<ChainId>,
/// Expected public key of the validator (for verification)
#[arg(long)]
public_key: Option<ValidatorPublicKey>,
}
/// Remove a validator from the committee.
///
/// Removes the validator with the specified public key from the committee.
/// The validator will no longer participate in consensus.
#[derive(Debug, Clone, clap::Parser)]
pub struct Remove {
/// Public key of the validator to remove
#[arg(long)]
public_key: ValidatorPublicKey,
}
/// Synchronize chain state to a validator.
///
/// Pushes the current chain state from local storage to a validator node,
/// ensuring the validator has up-to-date information about specified chains.
#[derive(Debug, Clone, clap::Parser)]
pub struct Sync {
/// Network address of the validator to sync (e.g., grpcs://host:port)
address: String,
/// Chain IDs to synchronize (defaults to all chains in wallet)
#[arg(long)]
chains: Vec<ChainId>,
/// Verify validator is online before syncing
#[arg(long)]
check_online: bool,
}
/// Parse a batch operations file or stdin.
/// Reads from the provided clio::Input, which handles both files and stdin transparently.
fn parse_batch_file(input: clio::Input) -> anyhow::Result<BatchFile> {
Ok(serde_json::from_reader(input)?)
}
/// Parse a validator query batch file.
fn parse_query_batch_file(input: clio::Input) -> anyhow::Result<QueryBatch> {
Ok(serde_json::from_reader(input)?)
}
impl Command {
/// Main entry point for handling validator commands.
pub async fn run(
&self,
context: &mut ClientContext<
impl linera_core::Environment<ValidatorNode = linera_rpc::Client>,
>,
) -> anyhow::Result<()> {
use Command::*;
match self {
Add(command) => command.run(context).await,
BatchQuery(command) => command.run(context).await,
Update(command) => command.run(context).await,
List(command) => command.run(context).await,
Query(command) => command.run(context).await,
Remove(command) => command.run(context).await,
Sync(command) => Box::pin(command.run(context)).await,
}
}
}
impl Add {
async fn run(
&self,
context: &mut ClientContext<impl linera_core::Environment>,
) -> anyhow::Result<()> {
tracing::info!("Starting operation to add validator");
let time_start = std::time::Instant::now();
// Check validator is online if requested
if !self.skip_online_check {
let node = context
.make_node_provider()
.make_node(self.address.as_str())?;
context
.check_compatible_version_info(self.address.as_str(), &node)
.await?;
context
.check_matching_network_description(self.address.as_str(), &node)
.await?;
}
let admin_id = context.admin_chain();
let chain_client = context.make_chain_client(admin_id).await?;
// Synchronize the chain state
chain_client.synchronize_chain_state(admin_id).await?;
let maybe_certificate = context
.apply_client_command(&chain_client, |chain_client| {
let me = self.clone();
let chain_client = chain_client.clone();
async move {
// Create the new committee.
let mut committee = chain_client.local_committee().await?;
let policy = committee.policy().clone();
let mut validators = committee.validators().clone();
validators.insert(
me.public_key,
ValidatorState {
network_address: me.address.to_string(),
votes: me.votes.0.get(),
account_public_key: me.account_key,
},
);
committee = Committee::new(validators, policy);
chain_client
.stage_new_committee(committee)
.await
.map(|outcome| outcome.map(Some))
}
})
.await
.context("Failed to stage committee")?;
let Some(certificate) = maybe_certificate else {
return Ok(());
};
tracing::info!("Created new committee:\n{:?}", certificate);
let time_total = time_start.elapsed();
tracing::info!("Operation confirmed after {} ms", time_total.as_millis());
Ok(())
}
}
impl BatchQuery {
async fn run(
&self,
context: &mut ClientContext<impl linera_core::Environment>,
) -> anyhow::Result<()> {
let batch = parse_query_batch_file(self.file.clone())
.context("parsing query batch file `{file}`")?;
let chain_id = self.chain_id.unwrap_or_else(|| context.default_chain());
println!(
"Querying {} validators about chain {chain_id}.\n",
batch.validators.len()
);
let node_provider = context.make_node_provider();
let mut has_errors = false;
for spec in batch.validators {
let node = node_provider.make_node(spec.network_address.as_str())?;
let results = context
.query_validator(
spec.network_address.as_str(),
&node,
chain_id,
Some(&spec.public_key),
)
.await;
if !results.errors().is_empty() {
has_errors = true;
for error in results.errors() {
tracing::error!("Validator {}: {}", spec.public_key, error);
}
}
results.print(
Some(&spec.public_key),
Some(spec.network_address.as_str()),
None,
None,
);
}
if has_errors {
anyhow::bail!("Found issues while querying validators");
}
Ok(())
}
}
impl Update {
async fn run(
&self,
context: &mut ClientContext<impl linera_core::Environment>,
) -> anyhow::Result<()> {
tracing::info!("Starting batch update operation");
let time_start = std::time::Instant::now();
// Parse the batch file or stdin
let batch = parse_batch_file(self.file.clone())
.with_context(|| format!("parsing batch file `{}`", self.file))?;
if batch.is_empty() {
tracing::warn!("No validator changes specified in input.");
return Ok(());
}
// Separate operations by type for logging and validation
let mut adds = Vec::new();
let mut modifies = Vec::new();
let mut removes = Vec::new();
// Get current committee to determine if operation is add or modify
let admin_id = context.client().admin_chain();
let chain_client = context.make_chain_client(admin_id).await?;
let current_committee = chain_client.local_committee().await?;
let current_validators = current_committee.validators();
for (public_key, change_opt) in &batch {
match change_opt {
None => {
// null = removal
removes.push(*public_key);
}
Some(spec) => {
if current_validators.contains_key(public_key) {
modifies.push((public_key, spec));
} else {
adds.push((public_key, spec));
}
}
}
}
// Display recap of changes
println!(
"\n╔══════════════════════════════════════════════════════════════════════════════╗"
);
println!(
"║ VALIDATOR BATCH UPDATE RECAP ║"
);
println!(
"╚══════════════════════════════════════════════════════════════════════════════╝\n"
);
println!("Summary:");
println!(" • {} validator(s) to add", adds.len());
println!(" • {} validator(s) to modify", modifies.len());
println!(" • {} validator(s) to remove", removes.len());
println!();
if !adds.is_empty() {
println!("Validators to ADD:");
for (pk, spec) in &adds {
println!(" + {}", pk);
println!(" Address: {}", spec.address);
println!(" Account Key: {}", spec.account_key);
println!(" Votes: {}", spec.votes.0.get());
}
println!();
}
if !modifies.is_empty() {
println!("Validators to MODIFY:");
for (pk, spec) in &modifies {
println!(" * {}", pk);
println!(" New Address: {}", spec.address);
println!(" New Account Key: {}", spec.account_key);
println!(" New Votes: {}", spec.votes.0.get());
}
println!();
}
if !removes.is_empty() {
println!("Validators to REMOVE:");
for pk in &removes {
println!(" - {}", pk);
}
println!();
}
if self.dry_run {
println!(
"═════════════════════════════════════════════════════════════════════════════"
);
println!("DRY RUN MODE: No changes will be applied");
println!(
"═════════════════════════════════════════════════════════════════════════════\n"
);
return Ok(());
}
// Confirmation prompt (unless --yes flag is set)
if !self.yes {
println!(
"═════════════════════════════════════════════════════════════════════════════"
);
println!("⚠️ WARNING: This operation will modify the validator committee.");
println!(" Changes are permanent and will be broadcast to the network.");
println!(
"═════════════════════════════════════════════════════════════════════════════\n"
);
println!("Do you want to proceed? Type 'YES' (uppercase) to confirm: ");
use std::io::{self, Write};
io::stdout().flush()?;
let mut input = String::new();
io::stdin()
.read_line(&mut input)
.context("Failed to read confirmation input")?;
let input = input.trim();
if input != "YES" {
println!("\nOperation cancelled. (Expected 'YES', got '{}')", input);
return Ok(());
}
println!("\nConfirmed. Proceeding with batch update...\n");
}
// Check all validators are online if requested
if !self.skip_online_check {
let node_provider = context.make_node_provider();
tracing::info!("Checking validators are online...");
for (_, spec) in adds.iter().chain(modifies.iter()) {
let address = &spec.address;
let node = node_provider.make_node(address.as_str())?;
context
.check_compatible_version_info(address.as_str(), &node)
.await?;
context
.check_matching_network_description(address.as_str(), &node)
.await?;
}
}
let admin_id = context.admin_chain();
let chain_client = context.make_chain_client(admin_id).await?;
// Synchronize the chain state
chain_client.synchronize_chain_state(admin_id).await?;
let batch_clone = batch.clone();
let maybe_certificate = context
.apply_client_command(&chain_client, |chain_client| {
let chain_client = chain_client.clone();
let batch = batch_clone.clone();
async move {
// Get current committee
let mut committee = chain_client.local_committee().await?;
let policy = committee.policy().clone();
let mut validators = committee.validators().clone();
// Apply operations based on the batch specification
for (public_key, change_opt) in &batch {
if let Some(spec) = change_opt {
// Update object - add or modify validator
let address = &spec.address;
let votes = spec.votes.0.get();
let account_key = spec.account_key;
let exists = validators.contains_key(public_key);
validators.insert(
*public_key,
ValidatorState {
network_address: address.to_string(),
votes,
account_public_key: account_key,
},
);
if exists {
tracing::info!(
"Modified validator {} @ {} ({} votes)",
public_key,
address,
votes
);
} else {
tracing::info!(
"Added validator {} @ {} ({} votes)",
public_key,
address,
votes
);
}
} else {
// null - remove validator
if validators.remove(public_key).is_none() {
tracing::warn!(
"Validator {} does not exist; skipping remove",
public_key
);
} else {
tracing::info!("Removed validator {}", public_key);
}
}
}
// Create new committee
committee = Committee::new(validators, policy);
chain_client
.stage_new_committee(committee)
.await
.map(|outcome| outcome.map(Some))
}
})
.await
.context("Failed to stage committee")?;
let Some(certificate) = maybe_certificate else {
tracing::info!("No changes applied");
return Ok(());
};
tracing::info!("Created new committee:\n{:?}", certificate);
let time_total = time_start.elapsed();
tracing::info!("Batch update confirmed after {} ms", time_total.as_millis());
Ok(())
}
}
impl List {
async fn run(
&self,
context: &mut ClientContext<impl linera_core::Environment>,
) -> anyhow::Result<()> {
let chain_id = self.chain_id.unwrap_or_else(|| context.default_chain());
println!("Querying validators about chain {chain_id}.\n");
let local_results = context.query_local_node(chain_id).await?;
let chain_client = context.make_chain_client(chain_id).await?;
tracing::info!("Querying validators about chain {}", chain_id);
let result = chain_client.local_committee().await;
context.update_wallet_from_client(&chain_client).await?;
let committee = result.context("Failed to get local committee")?;
tracing::info!(
"Using the local set of validators: {:?}",
committee.validators()
);
let node_provider = context.make_node_provider();
let mut validator_results = Vec::new();
for (name, state) in committee.validators() {
if self.min_votes.is_some_and(|votes| state.votes < votes) {
continue; // Skip validator with little voting weight.
}
let address = &state.network_address;
let node = node_provider.make_node(address)?;
let results = context
.query_validator(address, &node, chain_id, Some(name))
.await;
validator_results.push((name, address, state.votes, results));
}
let mut faulty_validators = std::collections::BTreeMap::<_, Vec<_>>::new();
for (name, address, _votes, results) in &validator_results {
for error in results.errors() {
tracing::error!("{}", error);
faulty_validators
.entry((*name, *address))
.or_default()
.push(error);
}
}
// Print local node results first (everything)
println!("Local Node:");
local_results.print(None, None, None, None);
println!();
// Print validator results (only differences from local node)
for (name, address, votes, results) in &validator_results {
results.print(
Some(name),
Some(address),
Some(*votes),
Some(&local_results),
);
}
if !faulty_validators.is_empty() {
println!("\nFaulty validators:");
for ((name, address), errors) in faulty_validators {
println!(" {} at {}: {} error(s)", name, address, errors.len());
}
anyhow::bail!("Found faulty validators");
}
Ok(())
}
}
impl Query {
async fn run(
&self,
context: &mut ClientContext<impl linera_core::Environment>,
) -> anyhow::Result<()> {
let node = context.make_node_provider().make_node(&self.address)?;
let chain_id = self.chain_id.unwrap_or_else(|| context.default_chain());
println!("Querying validator about chain {chain_id}.\n");
let results = context
.query_validator(&self.address, &node, chain_id, self.public_key.as_ref())
.await;
for error in results.errors() {
tracing::error!("{}", error);
}
results.print(self.public_key.as_ref(), Some(&self.address), None, None);
if !results.errors().is_empty() {
anyhow::bail!(
"Found one or several issue(s) while querying validator {}",
self.address
);
}
Ok(())
}
}
impl Remove {
async fn run(
&self,
context: &mut ClientContext<impl linera_core::Environment>,
) -> anyhow::Result<()> {
tracing::info!("Starting operation to remove validator");
let time_start = std::time::Instant::now();
let admin_id = context.admin_chain();
let chain_client = context.make_chain_client(admin_id).await?;
// Synchronize the chain state
chain_client.synchronize_chain_state(admin_id).await?;
let maybe_certificate = context
.apply_client_command(&chain_client, |chain_client| {
let chain_client = chain_client.clone();
async move {
// Create the new committee.
let mut committee = chain_client.local_committee().await?;
let policy = committee.policy().clone();
let mut validators = committee.validators().clone();
if validators.remove(&self.public_key).is_none() {
tracing::error!("Validator {} does not exist; aborting.", self.public_key);
return Ok(ClientOutcome::Committed(None));
}
committee = Committee::new(validators, policy);
chain_client
.stage_new_committee(committee)
.await
.map(|outcome| outcome.map(Some))
}
})
.await
.context("Failed to stage committee")?;
let Some(certificate) = maybe_certificate else {
return Ok(());
};
tracing::info!("Created new committee:\n{:?}", certificate);
let time_total = time_start.elapsed();
tracing::info!("Operation confirmed after {} ms", time_total.as_millis());
Ok(())
}
}
impl Sync {
async fn run(
&self,
context: &mut ClientContext<
impl linera_core::Environment<ValidatorNode = linera_rpc::Client>,
>,
) -> anyhow::Result<()> {
tracing::info!("Starting sync operation for validator at {}", self.address);
// Check validator is online if requested
if self.check_online {
let node_provider = context.make_node_provider();
let node = node_provider.make_node(&self.address)?;
context
.check_compatible_version_info(&self.address, &node)
.await?;
context
.check_matching_network_description(&self.address, &node)
.await?;
}
// If no chains specified, use all chains from wallet
let chains_to_sync = if self.chains.is_empty() {
context.wallet().chain_ids().try_collect().await?
} else {
self.chains.clone()
};
tracing::info!(
"Syncing {} chains to validator {}",
chains_to_sync.len(),
self.address
);
// Create validator node
let node_provider = context.make_node_provider();
let validator = node_provider.make_node(&self.address)?;
// Sync each chain
for chain_id in chains_to_sync {
tracing::info!("Syncing chain {} to {}", chain_id, self.address);
let chain = context.make_chain_client(chain_id).await?;
Box::pin(chain.sync_validator(validator.clone())).await?;
tracing::info!("Chain {} synced successfully", chain_id);
}
tracing::info!("Sync operation completed successfully");
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::io::Write;
use tempfile::NamedTempFile;
use super::*;
#[test]
fn test_parse_batch_file_valid() {
// Generate correct JSON format using test keys
let pk0 = ValidatorPublicKey::test_key(0);
let pk1 = ValidatorPublicKey::test_key(1);
let pk2 = ValidatorPublicKey::test_key(2);
let mut batch = BatchFile::new();
// Add operation - validator with full spec
batch.insert(
pk0,
Some(Change {
account_key: AccountPublicKey::test_key(0),
address: "grpcs://validator1.example.com:443".parse().unwrap(),
votes: Votes(NonZero::new(100).unwrap()),
}),
);
// Modify operation - validator with full spec (would be modify if validator exists)
batch.insert(
pk1,
Some(Change {
account_key: AccountPublicKey::test_key(1),
address: "grpcs://validator2.example.com:443".parse().unwrap(),
votes: Votes(NonZero::new(150).unwrap()),
}),
);
// Remove operation - null
batch.insert(pk2, None);
let json = serde_json::to_string(&batch).unwrap();
let mut temp_file = NamedTempFile::new().unwrap();
temp_file.write_all(json.as_bytes()).unwrap();
temp_file.flush().unwrap();
let input = clio::Input::new(temp_file.path().to_str().unwrap()).unwrap();
let result = parse_batch_file(input);
assert!(
result.is_ok(),
"Failed to parse batch file: {:?}",
result.err()
);
let parsed_batch = result.unwrap();
assert_eq!(parsed_batch.len(), 3);
// Check pk0 (add)
assert!(parsed_batch.contains_key(&pk0));
let spec0 = parsed_batch.get(&pk0).unwrap().as_ref().unwrap();
assert_eq!(spec0.votes.0.get(), 100);
// Check pk1 (modify)
assert!(parsed_batch.contains_key(&pk1));
let spec1 = parsed_batch.get(&pk1).unwrap().as_ref().unwrap();
assert_eq!(spec1.votes.0.get(), 150);
// Check pk2 (remove with null)
assert!(parsed_batch.contains_key(&pk2));
assert!(parsed_batch.get(&pk2).unwrap().is_none());
}
#[test]
fn test_parse_batch_file_empty() {
let json = r#"{}"#;
let mut temp_file = NamedTempFile::new().unwrap();
temp_file.write_all(json.as_bytes()).unwrap();
temp_file.flush().unwrap();
let input = clio::Input::new(temp_file.path().to_str().unwrap()).unwrap();
let result = parse_batch_file(input);
assert!(result.is_ok());
let batch = result.unwrap();
assert_eq!(batch.len(), 0);
}
#[test]
fn test_parse_query_batch_file_valid() {
// Generate correct JSON format using test keys
let spec1 = Spec {
public_key: ValidatorPublicKey::test_key(0),
account_key: AccountPublicKey::test_key(0),
network_address: "grpcs://validator1.example.com:443".parse().unwrap(),
votes: Votes(NonZero::new(100).unwrap()),
};
let spec2 = Spec {
public_key: ValidatorPublicKey::test_key(1),
account_key: AccountPublicKey::test_key(1),
network_address: "grpcs://validator2.example.com:443".parse().unwrap(),
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli/main.rs | linera-service/src/cli/main.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![recursion_limit = "256"]
#[cfg(feature = "jemalloc")]
#[global_allocator]
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
// jemalloc configuration for memory profiling with jemalloc_pprof
// prof:true,prof_active:true - Enable profiling from start
// lg_prof_sample:19 - Sample every 512KB for good detail/overhead balance
// Linux/other platforms: use unprefixed malloc (with unprefixed_malloc_on_supported_platforms)
#[cfg(all(feature = "memory-profiling", not(target_os = "macos")))]
#[allow(non_upper_case_globals)]
#[export_name = "malloc_conf"]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
// macOS: use prefixed malloc (without unprefixed_malloc_on_supported_platforms)
#[cfg(all(feature = "memory-profiling", target_os = "macos"))]
#[allow(non_upper_case_globals)]
#[export_name = "_rjem_malloc_conf"]
pub static malloc_conf: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0";
mod options;
use std::{
collections::{BTreeMap, BTreeSet},
env,
path::PathBuf,
process,
sync::Arc,
};
use anyhow::{bail, ensure, Context, Error};
use async_trait::async_trait;
use chrono::Utc;
use clap_complete::generate;
use colored::Colorize;
use futures::{lock::Mutex, FutureExt as _, StreamExt as _};
use linera_base::{
crypto::Signer,
data_types::{ApplicationPermissions, Timestamp},
identifiers::{AccountOwner, ChainId},
listen_for_shutdown_signals,
ownership::ChainOwnership,
time::{Duration, Instant},
};
use linera_client::{
benchmark::BenchmarkConfig,
chain_listener::{ChainListener, ChainListenerConfig, ClientContext as _},
config::{CommitteeConfig, GenesisConfig},
};
use linera_core::{
client::{chain_client, ListeningMode},
data_types::ClientOutcome,
node::{ValidatorNode, ValidatorNodeProvider},
wallet,
worker::Reason,
JoinSetExt as _, LocalNodeError,
};
use linera_execution::committee::Committee;
use linera_faucet_server::{FaucetConfig, FaucetService};
#[cfg(with_metrics)]
use linera_metrics::monitoring_server;
use linera_persistent::{self as persistent, Persist, PersistExt as _};
use linera_service::{
cli::{
command::{
BenchmarkCommand, BenchmarkOptions, ChainCommand, ClientCommand, DatabaseToolCommand,
NetCommand, ProjectCommand, WalletCommand,
},
net_up_utils,
},
cli_wrappers::{self, local_net::PathProvider, ClientWrapper, Network, OnClientDrop},
controller::Controller,
node_service::NodeService,
project::{self, Project},
storage::{Runnable, RunnableWithStore},
task_processor::TaskProcessor,
util,
};
use linera_storage::{DbStorage, Storage};
use linera_views::store::{KeyValueDatabase, KeyValueStore};
use options::Options;
use serde_json::Value;
use tempfile::NamedTempFile;
use tokio::{
io::AsyncWriteExt,
process::{ChildStdin, Command},
sync::{mpsc, oneshot},
task::JoinSet,
time,
};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn, Instrument as _};
struct Job(Options);
/// Check if an error is retryable (HTTP 502, 503, 504, timeouts, connection errors)
fn is_retryable_error(err: &anyhow::Error) -> bool {
// Check for reqwest errors in the error chain
if let Some(reqwest_err) = err.downcast_ref::<reqwest::Error>() {
// Check for retryable HTTP status codes (502, 503, 504)
if let Some(status) = reqwest_err.status() {
return status == reqwest::StatusCode::BAD_GATEWAY
|| status == reqwest::StatusCode::SERVICE_UNAVAILABLE
|| status == reqwest::StatusCode::GATEWAY_TIMEOUT;
}
// Check for connection errors or timeouts
return reqwest_err.is_timeout() || reqwest_err.is_connect();
}
false
}
/// Retry a faucet operation with exponential backoff
async fn retry_faucet_operation<F, Fut, T>(operation: F) -> anyhow::Result<T>
where
F: Fn() -> Fut,
Fut: std::future::Future<Output = anyhow::Result<T>>,
{
let max_retries = 5;
let mut attempt = 0;
loop {
attempt += 1;
match operation().await {
Ok(result) => return Ok(result),
Err(err) if attempt < max_retries && is_retryable_error(&err) => {
let backoff_ms = 100 * 2_u64.pow(attempt - 1);
warn!(
"Faucet operation failed with retryable error (attempt {}/{}): {:?}. Retrying after {}ms",
attempt, max_retries, err, backoff_ms
);
tokio::time::sleep(Duration::from_millis(backoff_ms)).await;
}
Err(err) => return Err(err),
}
}
}
fn read_json(string: Option<String>, path: Option<PathBuf>) -> anyhow::Result<Vec<u8>> {
let value = match (string, path) {
(Some(_), Some(_)) => bail!("cannot have both a json string and file"),
(Some(s), None) => serde_json::from_str(&s)?,
(None, Some(path)) => {
let s = fs_err::read_to_string(path)?;
serde_json::from_str(&s)?
}
(None, None) => Value::Null,
};
Ok(serde_json::to_vec(&value)?)
}
#[async_trait]
impl Runnable for Job {
type Output = anyhow::Result<()>;
async fn run<S>(self, storage: S) -> anyhow::Result<()>
where
S: Storage + Clone + Send + Sync + 'static,
{
let Job(options) = self;
let mut wallet = options.wallet()?;
let mut signer = options.signer()?;
let command = options.command.clone();
use ClientCommand::*;
match command {
Transfer {
sender,
recipient,
amount,
} => {
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let chain_client = context.make_chain_client(sender.chain_id).await?;
info!(
"Starting transfer of {} native tokens from {} to {}",
amount, sender, recipient
);
let time_start = Instant::now();
let certificate = context
.apply_client_command(&chain_client, |chain_client| {
let chain_client = chain_client.clone();
async move {
chain_client
.transfer_to_account(sender.owner, amount, recipient)
.await
}
})
.await
.context("Failed to make transfer")?;
let time_total = time_start.elapsed();
info!("Transfer confirmed after {} ms", time_total.as_millis());
debug!("{:?}", certificate);
}
OpenChain {
chain_id,
owner,
balance,
super_owner,
} => {
let new_owner = owner.unwrap_or_else(|| signer.generate_new().into());
signer.persist().await?;
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let chain_id = chain_id.unwrap_or_else(|| context.default_chain());
let chain_client = context.make_chain_client(chain_id).await?;
info!("Opening a new chain from existing chain {}", chain_id);
let time_start = Instant::now();
let (description, certificate) = context
.apply_client_command(&chain_client, |chain_client| {
let ownership = if super_owner {
ChainOwnership::single_super(new_owner)
} else {
ChainOwnership::single(new_owner)
};
let chain_client = chain_client.clone();
async move {
chain_client
.open_chain(ownership, ApplicationPermissions::default(), balance)
.await
}
})
.await
.context("Failed to open chain")?;
let timestamp = certificate.block().header.timestamp;
let epoch = certificate.block().header.epoch;
let id = description.id();
context
.update_wallet_for_new_chain(id, Some(new_owner), timestamp, epoch)
.await?;
let time_total = time_start.elapsed();
info!(
"Opening a new chain confirmed after {} ms",
time_total.as_millis()
);
debug!("{:?}", certificate);
// Print the new chain ID, and owner on stdout for scripting purposes.
println!("{}", id);
println!("{}", new_owner);
}
OpenMultiOwnerChain {
chain_id,
balance,
ownership_config,
application_permissions_config,
} => {
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let chain_id = chain_id.unwrap_or_else(|| context.default_chain());
let chain_client = context.make_chain_client(chain_id).await?;
info!(
"Opening a new multi-owner chain from existing chain {}",
chain_id
);
let time_start = Instant::now();
let ownership = ChainOwnership::try_from(ownership_config)?;
let application_permissions =
ApplicationPermissions::from(application_permissions_config);
let (description, certificate) = context
.apply_client_command(&chain_client, |chain_client| {
let ownership = ownership.clone();
let application_permissions = application_permissions.clone();
let chain_client = chain_client.clone();
async move {
chain_client
.open_chain(ownership, application_permissions, balance)
.await
}
})
.await
.context("Failed to open chain")?;
let id = description.id();
// No owner. This chain can be assigned explicitly using the assign command.
let owner = None;
let timestamp = certificate.block().header.timestamp;
let epoch = certificate.block().header.epoch;
context
.update_wallet_for_new_chain(id, owner, timestamp, epoch)
.await?;
let time_total = time_start.elapsed();
info!(
"Opening a new multi-owner chain confirmed after {} ms",
time_total.as_millis()
);
debug!("{:?}", certificate);
// Print the new chain ID on stdout for scripting purposes.
println!("{}", id);
}
ShowOwnership { chain_id } => {
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let ownership = context.ownership(chain_id).await?;
let json = serde_json::to_string_pretty(&ownership)?;
println!("{}", json);
}
ChangeOwnership {
chain_id,
ownership_config,
} => {
ensure!(
!ownership_config.super_owners.is_empty()
|| !ownership_config.owners.is_empty(),
"This command requires at least one owner or super owner to be set. \
To close a chain, use `close-chain`. To show the current config, use `show-ownership`."
);
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
context.change_ownership(chain_id, ownership_config).await?
}
SetPreferredOwner { chain_id, owner } => {
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
context.set_preferred_owner(chain_id, owner).await?
}
ChangeApplicationPermissions {
chain_id,
application_permissions_config,
} => {
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let chain_id = chain_id.unwrap_or_else(|| context.default_chain());
let chain_client = context.make_chain_client(chain_id).await?;
info!("Changing application permissions for chain {}", chain_id);
let time_start = Instant::now();
let application_permissions =
ApplicationPermissions::from(application_permissions_config);
let certificate = context
.apply_client_command(&chain_client, |chain_client| {
let application_permissions = application_permissions.clone();
let chain_client = chain_client.clone();
async move {
chain_client
.change_application_permissions(application_permissions)
.await
}
})
.await
.context("Failed to change application permissions")?;
let time_total = time_start.elapsed();
info!(
"Changing application permissions confirmed after {} ms",
time_total.as_millis()
);
debug!("{:?}", certificate);
}
CloseChain { chain_id } => {
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let chain_client = context.make_chain_client(chain_id).await?;
info!("Closing chain {}", chain_id);
let time_start = Instant::now();
let result = context
.apply_client_command(&chain_client, |chain_client| {
let chain_client = chain_client.clone();
async move { chain_client.close_chain().await }
})
.await;
let certificate = match result {
Ok(Some(certificate)) => certificate,
Ok(None) => {
info!("Chain is already closed; nothing to do.");
return Ok(());
}
Err(error) => Err(error).context("Failed to close chain")?,
};
let time_total = time_start.elapsed();
info!(
"Closing chain confirmed after {} ms",
time_total.as_millis()
);
debug!("{:?}", certificate);
}
ShowNetworkDescription => {
let network_description = storage.read_network_description().await?;
let json = serde_json::to_string_pretty(&network_description)?;
println!("{}", json);
}
LocalBalance { account } => {
let context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let account = account.unwrap_or_else(|| context.default_account());
let chain_client = context.make_chain_client(account.chain_id).await?;
info!("Reading the balance of {} from the local state", account);
let time_start = Instant::now();
let balance = chain_client.local_owner_balance(account.owner).await?;
let time_total = time_start.elapsed();
info!("Local balance obtained after {} ms", time_total.as_millis());
println!("{}", balance);
}
QueryBalance { account } => {
let context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let account = account.unwrap_or_else(|| context.default_account());
let chain_client = context.make_chain_client(account.chain_id).await?;
info!(
"Evaluating the local balance of {account} by staging execution of known \
incoming messages"
);
let time_start = Instant::now();
let balance = chain_client.query_owner_balance(account.owner).await?;
let time_total = time_start.elapsed();
info!("Balance obtained after {} ms", time_total.as_millis());
println!("{}", balance);
}
SyncBalance { account } => {
let context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let account = account.unwrap_or_else(|| context.default_account());
let chain_client = context.make_chain_client(account.chain_id).await?;
info!("Synchronizing chain information and querying the local balance");
warn!("This command is deprecated. Use `linera sync && linera query-balance` instead.");
let time_start = Instant::now();
chain_client.synchronize_from_validators().await?;
let result = chain_client.query_owner_balance(account.owner).await;
context.update_wallet_from_client(&chain_client).await?;
let balance = result.context("Failed to synchronize from validators")?;
let time_total = time_start.elapsed();
info!(
"Synchronizing balance confirmed after {} ms",
time_total.as_millis()
);
println!("{}", balance);
}
Sync { chain_id } => {
let context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let chain_id = chain_id.unwrap_or_else(|| context.default_chain());
let chain_client = context.make_chain_client(chain_id).await?;
info!("Synchronizing chain information");
let time_start = Instant::now();
chain_client.synchronize_from_validators().await?;
context.update_wallet_from_client(&chain_client).await?;
let time_total = time_start.elapsed();
info!(
"Synchronized chain information in {} ms",
time_total.as_millis()
);
}
ProcessInbox { chain_id } => {
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
let chain_id = chain_id.unwrap_or_else(|| context.default_chain());
let follow_only = context
.wallet()
.get(chain_id)
.is_some_and(|chain| chain.follow_only);
if follow_only {
anyhow::bail!(
"Cannot process inbox for follow-only chain {chain_id}. \
Use `linera assign` to take ownership of the chain first."
);
}
let chain_client = context.make_chain_client(chain_id).await?;
info!("Processing the inbox of chain {}", chain_id);
let time_start = Instant::now();
let certificates = context.process_inbox(&chain_client).await?;
let time_total = time_start.elapsed();
info!(
"Processed incoming messages with {} blocks in {} ms",
certificates.len(),
time_total.as_millis()
);
}
QueryShardInfo { chain_id } => {
let context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
println!("Querying validators for shard information about chain {chain_id}.\n");
let chain_client = context.make_chain_client(chain_id).await?;
let result = chain_client.local_committee().await;
context.update_wallet_from_client(&chain_client).await?;
let committee = result.context("Failed to get local committee")?;
let node_provider = context.make_node_provider();
println!("Chain ID: {}", chain_id);
println!("Validator Shard Information:\n");
for (name, state) in committee.validators() {
let address = &state.network_address;
let node = node_provider.make_node(address)?;
match node.get_shard_info(chain_id).await {
Ok(shard_info) => {
println!(" Validator: {}", name);
println!(" Address: {}", address);
println!(" Total Shards: {}", shard_info.total_shards);
println!(" Shard ID for chain: {}", shard_info.shard_id);
println!();
}
Err(e) => {
println!(" Validator: {}", name);
println!(" Address: {}", address);
println!(" Error: Failed to get shard info - {}", e);
println!();
}
}
}
}
command @ ResourceControlPolicy { .. } => {
info!("Starting operations to change resource control policy");
let time_start = Instant::now();
let mut context = options
.create_client_context(storage, wallet, signer.into_value())
.await?;
// ResourceControlPolicy doesn't need version checks
let admin_id = context.admin_chain();
let chain_client = context.make_chain_client(admin_id).await?;
// Synchronize the chain state to make sure we're applying the changes to the
// latest committee.
chain_client.synchronize_chain_state(admin_id).await?;
let maybe_certificate = context
.apply_client_command(&chain_client, |chain_client| {
let chain_client = chain_client.clone();
let command = command.clone();
async move {
// Update resource control policy
let mut committee = chain_client.local_committee().await.unwrap();
let mut policy = committee.policy().clone();
let validators = committee.validators().clone();
match command {
ResourceControlPolicy {
wasm_fuel_unit,
evm_fuel_unit,
read_operation,
write_operation,
byte_runtime,
byte_read,
byte_written,
blob_read,
blob_published,
blob_byte_read,
blob_byte_published,
byte_stored,
operation,
operation_byte,
message,
message_byte,
service_as_oracle_query,
http_request,
maximum_wasm_fuel_per_block,
maximum_evm_fuel_per_block,
maximum_service_oracle_execution_ms,
maximum_block_size,
maximum_blob_size,
maximum_published_blobs,
maximum_bytecode_size,
maximum_block_proposal_size,
maximum_bytes_read_per_block,
maximum_bytes_written_per_block,
maximum_oracle_response_bytes,
maximum_http_response_bytes,
http_request_timeout_ms,
http_request_allow_list,
} => {
let existing_policy = policy.clone();
policy = linera_execution::ResourceControlPolicy {
wasm_fuel_unit: wasm_fuel_unit
.unwrap_or(existing_policy.wasm_fuel_unit),
evm_fuel_unit: evm_fuel_unit
.unwrap_or(existing_policy.evm_fuel_unit),
read_operation: read_operation
.unwrap_or(existing_policy.read_operation),
write_operation: write_operation
.unwrap_or(existing_policy.write_operation),
byte_runtime: byte_runtime
.unwrap_or(existing_policy.byte_runtime),
byte_read: byte_read.unwrap_or(existing_policy.byte_read),
byte_written: byte_written
.unwrap_or(existing_policy.byte_written),
blob_read: blob_read.unwrap_or(existing_policy.blob_read),
blob_published: blob_published
.unwrap_or(existing_policy.blob_published),
blob_byte_read: blob_byte_read
.unwrap_or(existing_policy.blob_byte_read),
blob_byte_published: blob_byte_published
.unwrap_or(existing_policy.blob_byte_published),
byte_stored: byte_stored
.unwrap_or(existing_policy.byte_stored),
operation: operation.unwrap_or(existing_policy.operation),
operation_byte: operation_byte
.unwrap_or(existing_policy.operation_byte),
message: message.unwrap_or(existing_policy.message),
message_byte: message_byte
.unwrap_or(existing_policy.message_byte),
service_as_oracle_query: service_as_oracle_query
.unwrap_or(existing_policy.service_as_oracle_query),
http_request: http_request
.unwrap_or(existing_policy.http_request),
maximum_wasm_fuel_per_block: maximum_wasm_fuel_per_block
.unwrap_or(existing_policy.maximum_wasm_fuel_per_block),
maximum_evm_fuel_per_block: maximum_evm_fuel_per_block
.unwrap_or(existing_policy.maximum_evm_fuel_per_block),
maximum_service_oracle_execution_ms:
maximum_service_oracle_execution_ms.unwrap_or(
existing_policy.maximum_service_oracle_execution_ms,
),
maximum_block_size: maximum_block_size
.unwrap_or(existing_policy.maximum_block_size),
maximum_bytecode_size: maximum_bytecode_size
.unwrap_or(existing_policy.maximum_bytecode_size),
maximum_blob_size: maximum_blob_size
.unwrap_or(existing_policy.maximum_blob_size),
maximum_published_blobs: maximum_published_blobs
.unwrap_or(existing_policy.maximum_published_blobs),
maximum_block_proposal_size: maximum_block_proposal_size
.unwrap_or(existing_policy.maximum_block_proposal_size),
maximum_bytes_read_per_block: maximum_bytes_read_per_block
.unwrap_or(
existing_policy.maximum_bytes_read_per_block,
),
maximum_bytes_written_per_block:
maximum_bytes_written_per_block.unwrap_or(
existing_policy.maximum_bytes_written_per_block,
),
maximum_oracle_response_bytes:
maximum_oracle_response_bytes.unwrap_or(
existing_policy.maximum_oracle_response_bytes,
),
maximum_http_response_bytes: maximum_http_response_bytes
.unwrap_or(existing_policy.maximum_http_response_bytes),
http_request_timeout_ms: http_request_timeout_ms
.unwrap_or(existing_policy.http_request_timeout_ms),
http_request_allow_list: http_request_allow_list
.map(BTreeSet::from_iter)
.unwrap_or(existing_policy.http_request_allow_list),
};
info!("{policy}");
if committee.policy() == &policy {
return Ok(ClientOutcome::Committed(None));
}
}
_ => unreachable!(),
}
committee = Committee::new(validators, policy);
chain_client
.stage_new_committee(committee)
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/local_kubernetes_net.rs | linera-service/src/cli_wrappers/local_kubernetes_net.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use anyhow::{anyhow, bail, ensure, Result};
use async_trait::async_trait;
use futures::{future, lock::Mutex};
use k8s_openapi::api::core::v1::Pod;
use kube::{api::ListParams, Api, Client};
use linera_base::{
command::{resolve_binary, CommandExt},
data_types::Amount,
};
use linera_client::client_options::ResourceControlPolicyConfig;
use tokio::{process::Command, task::JoinSet};
use crate::cli_wrappers::{
docker::{BuildArg, DockerImage, Dockerfile},
helmfile::{HelmFile, DEFAULT_BLOCK_EXPORTER_PORT},
kind::KindCluster,
kubectl::KubectlInstance,
local_net::PathProvider,
util::get_github_root,
ClientWrapper, LineraNet, LineraNetConfig, Network, OnClientDrop,
};
#[derive(Clone, clap::Parser, clap::ValueEnum, Debug, Default)]
pub enum BuildMode {
Debug,
#[default]
Release,
}
impl std::str::FromStr for BuildMode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
clap::ValueEnum::from_str(s, true)
}
}
impl std::fmt::Display for BuildMode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
/// The information needed to start a [`LocalKubernetesNet`].
pub struct LocalKubernetesNetConfig {
pub network: Network,
pub testing_prng_seed: Option<u64>,
pub num_other_initial_chains: u32,
pub initial_amount: Amount,
pub num_initial_validators: usize,
pub num_proxies: usize,
pub num_shards: usize,
pub binaries: BuildArg,
pub no_build: bool,
pub docker_image_name: String,
pub build_mode: BuildMode,
pub policy_config: ResourceControlPolicyConfig,
pub num_block_exporters: usize,
pub indexer_image_name: String,
pub explorer_image_name: String,
pub dual_store: bool,
pub path_provider: PathProvider,
}
/// A set of Linera validators running locally as native processes.
#[derive(Clone)]
pub struct LocalKubernetesNet {
network: Network,
testing_prng_seed: Option<u64>,
next_client_id: usize,
binaries: BuildArg,
no_build: bool,
docker_image_name: String,
build_mode: BuildMode,
kubectl_instance: Arc<Mutex<KubectlInstance>>,
kind_clusters: Vec<KindCluster>,
num_initial_validators: usize,
num_proxies: usize,
num_shards: usize,
num_block_exporters: usize,
indexer_image_name: String,
explorer_image_name: String,
dual_store: bool,
path_provider: PathProvider,
}
#[async_trait]
impl LineraNetConfig for LocalKubernetesNetConfig {
type Net = LocalKubernetesNet;
async fn instantiate(self) -> Result<(Self::Net, ClientWrapper)> {
ensure!(
self.num_initial_validators > 0,
"There should be at least one initial validator"
);
let clusters = future::join_all((0..self.num_initial_validators).map(|_| async {
KindCluster::create()
.await
.expect("Creating kind cluster should not fail")
}))
.await;
let mut net = LocalKubernetesNet::new(
self.network,
self.testing_prng_seed,
self.binaries,
self.no_build,
self.docker_image_name,
self.build_mode,
KubectlInstance::new(Vec::new()),
clusters,
self.num_initial_validators,
self.num_proxies,
self.num_shards,
self.num_block_exporters,
self.indexer_image_name,
self.explorer_image_name,
self.dual_store,
self.path_provider,
)?;
let client = net.make_client().await;
net.generate_initial_validator_config().await.unwrap();
client
.create_genesis_config(
self.num_other_initial_chains,
self.initial_amount,
self.policy_config,
Some(vec!["localhost".to_owned()]),
)
.await
.unwrap();
net.run().await.unwrap();
Ok((net, client))
}
}
#[async_trait]
impl LineraNet for Arc<Mutex<LocalKubernetesNet>> {
async fn ensure_is_running(&mut self) -> Result<()> {
let self_clone = self.clone();
let mut self_lock = self_clone.lock().await;
self_lock.ensure_is_running().await
}
async fn make_client(&mut self) -> ClientWrapper {
let self_clone = self.clone();
let mut self_lock = self_clone.lock().await;
self_lock.make_client().await
}
async fn terminate(&mut self) -> Result<()> {
// Users are responsible for killing the clusters if they want to
Ok(())
}
}
#[async_trait]
impl LineraNet for LocalKubernetesNet {
async fn ensure_is_running(&mut self) -> Result<()> {
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::namespaced(client, "default");
let list_params = ListParams::default().labels("app=proxy");
for pod in pods.list(&list_params).await? {
if let Some(status) = pod.status {
if let Some(phase) = status.phase {
if phase != "Running" {
bail!(
"Validator {} is not Running",
pod.metadata
.name
.expect("Fetching pod name should not fail")
);
}
}
}
}
let list_params = ListParams::default().labels("app=shards");
for pod in pods.list(&list_params).await? {
if let Some(status) = pod.status {
if let Some(phase) = status.phase {
if phase != "Running" {
bail!(
"Shard {} is not Running",
pod.metadata
.name
.expect("Fetching pod name should not fail")
);
}
}
}
}
Ok(())
}
async fn make_client(&mut self) -> ClientWrapper {
let client = ClientWrapper::new(
self.path_provider.clone(),
self.network,
self.testing_prng_seed,
self.next_client_id,
OnClientDrop::LeakChains,
);
if let Some(seed) = self.testing_prng_seed {
self.testing_prng_seed = Some(seed + 1);
}
self.next_client_id += 1;
client
}
async fn terminate(&mut self) -> Result<()> {
let mut kubectl_instance = self.kubectl_instance.lock().await;
let mut errors = Vec::new();
for port_forward_child in &mut kubectl_instance.port_forward_children {
if let Err(e) = port_forward_child.kill().await {
errors.push(e.into());
}
}
for kind_cluster in &mut self.kind_clusters {
if let Err(e) = kind_cluster.delete().await {
errors.push(e);
}
}
if errors.is_empty() {
Ok(())
} else {
let err_str = if errors.len() > 1 {
"Multiple errors"
} else {
"One error"
};
Err(errors
.into_iter()
.fold(anyhow!("{err_str} occurred"), |acc, e: anyhow::Error| {
acc.context(e)
}))
}
}
}
impl LocalKubernetesNet {
#[expect(clippy::too_many_arguments)]
fn new(
network: Network,
testing_prng_seed: Option<u64>,
binaries: BuildArg,
no_build: bool,
docker_image_name: String,
build_mode: BuildMode,
kubectl_instance: KubectlInstance,
kind_clusters: Vec<KindCluster>,
num_initial_validators: usize,
num_proxies: usize,
num_shards: usize,
num_block_exporters: usize,
indexer_image_name: String,
explorer_image_name: String,
dual_store: bool,
path_provider: PathProvider,
) -> Result<Self> {
Ok(Self {
network,
testing_prng_seed,
next_client_id: 0,
binaries,
no_build,
docker_image_name,
build_mode,
kubectl_instance: Arc::new(Mutex::new(kubectl_instance)),
kind_clusters,
num_initial_validators,
num_proxies,
num_shards,
num_block_exporters,
indexer_image_name,
explorer_image_name,
dual_store,
path_provider,
})
}
async fn command_for_binary(&self, name: &'static str) -> Result<Command> {
let path = resolve_binary(name, env!("CARGO_PKG_NAME")).await?;
let mut command = Command::new(path);
command.current_dir(self.path_provider.path());
Ok(command)
}
fn configuration_string(&self, validator_number: usize) -> Result<String> {
let path = self
.path_provider
.path()
.join(format!("validator_{validator_number}.toml"));
let public_port = 19100 + validator_number;
let private_port = 20100;
let metrics_port = 21100;
let protocol = self.network.toml();
let host = self.network.localhost();
let mut content = format!(
r#"
server_config_path = "server_{validator_number}.json"
host = "{host}"
port = {public_port}
external_protocol = {protocol}
internal_protocol = {protocol}
"#
);
for proxy_id in 0..self.num_proxies {
content.push_str(&format!(
r#"
[[proxies]]
host = "proxy-{proxy_id}.proxy-internal.default.svc.cluster.local"
public_port = {public_port}
private_port = {private_port}
metrics_port = {metrics_port}
"#
));
}
for shard_id in 0..self.num_shards {
content.push_str(&format!(
r#"
[[shards]]
host = "shards-{shard_id}.shards.default.svc.cluster.local"
port = {public_port}
metrics_port = {metrics_port}
"#
));
}
if self.num_block_exporters > 0 {
for exporter_num in 0..self.num_block_exporters {
let block_exporter_port = DEFAULT_BLOCK_EXPORTER_PORT;
let block_exporter_host =
format!("linera-block-exporter-{exporter_num}.linera-block-exporter");
let config_content = format!(
r#"
[[block_exporters]]
host = "{block_exporter_host}"
port = {block_exporter_port}
"#
);
content.push_str(&config_content);
}
}
fs_err::write(&path, content)?;
path.into_os_string().into_string().map_err(|error| {
anyhow!(
"could not parse OS string into string: {}",
error.to_string_lossy()
)
})
}
async fn generate_initial_validator_config(&mut self) -> Result<()> {
let mut command = self.command_for_binary("linera-server").await?;
command.arg("generate");
if let Some(seed) = self.testing_prng_seed {
command.arg("--testing-prng-seed").arg(seed.to_string());
self.testing_prng_seed = Some(seed + 1);
}
command.arg("--validators");
for validator_number in 0..self.num_initial_validators {
command.arg(&self.configuration_string(validator_number)?);
}
command
.args(["--committee", "committee.json"])
.spawn_and_wait_for_stdout()
.await?;
Ok(())
}
async fn run(&mut self) -> Result<()> {
let github_root = get_github_root().await?;
// Build Docker images
let (docker_image_name, indexer_image_name, explorer_image_name) = if self.no_build {
(
self.docker_image_name.clone(),
self.indexer_image_name.clone(),
self.explorer_image_name.clone(),
)
} else {
let mut join_set = JoinSet::new();
join_set.spawn(DockerImage::build(
self.docker_image_name.clone(),
self.binaries.clone(),
github_root.clone(),
self.build_mode.clone(),
self.dual_store,
Dockerfile::Main,
));
if self.num_block_exporters > 0 {
join_set.spawn(DockerImage::build(
self.indexer_image_name.clone(),
self.binaries.clone(),
github_root.clone(),
self.build_mode.clone(),
self.dual_store,
Dockerfile::Indexer,
));
join_set.spawn(DockerImage::build(
self.explorer_image_name.clone(),
self.binaries.clone(),
github_root.clone(),
self.build_mode.clone(),
self.dual_store,
Dockerfile::Explorer,
));
}
join_set
.join_all()
.await
.into_iter()
.collect::<Result<Vec<_>>>()?;
(
self.docker_image_name.clone(),
self.indexer_image_name.clone(),
self.explorer_image_name.clone(),
)
};
let base_dir = github_root
.join("kubernetes")
.join("linera-validator")
.join("working");
fs_err::copy(
self.path_provider.path().join("genesis.json"),
base_dir.join("genesis.json"),
)?;
let kubectl_instance_clone = self.kubectl_instance.clone();
let path_provider_path_clone = self.path_provider.path().to_path_buf();
let num_proxies = self.num_proxies;
let num_shards = self.num_shards;
let mut validators_initialization_futures = Vec::new();
for (validator_number, kind_cluster) in self.kind_clusters.iter().cloned().enumerate() {
let base_dir = base_dir.clone();
let github_root = github_root.clone();
let kubectl_instance = kubectl_instance_clone.clone();
let path_provider_path = path_provider_path_clone.clone();
let docker_image_name = docker_image_name.clone();
let indexer_image_name = indexer_image_name.clone();
let explorer_image_name = explorer_image_name.clone();
let dual_store = self.dual_store;
let num_block_exporters = self.num_block_exporters;
let future = async move {
let cluster_id = kind_cluster.id();
kind_cluster.load_docker_image(&docker_image_name).await?;
if num_block_exporters > 0 {
kind_cluster.load_docker_image(&indexer_image_name).await?;
kind_cluster.load_docker_image(&explorer_image_name).await?;
}
let server_config_filename = format!("server_{}.json", validator_number);
fs_err::copy(
path_provider_path.join(&server_config_filename),
base_dir.join(&server_config_filename),
)?;
HelmFile::sync(
validator_number,
&github_root,
num_proxies,
num_shards,
cluster_id,
docker_image_name,
num_block_exporters > 0,
num_block_exporters,
indexer_image_name,
explorer_image_name,
dual_store,
)
.await?;
let mut kubectl_instance = kubectl_instance.lock().await;
let proxy_service = "svc/proxy";
let local_port = 19100 + validator_number;
kubectl_instance.port_forward(
proxy_service,
&format!("{local_port}:19100"),
cluster_id,
)?;
Result::<(), anyhow::Error>::Ok(())
};
validators_initialization_futures.push(future);
}
future::join_all(validators_initialization_futures)
.await
.into_iter()
.collect()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/docker.rs | linera-service/src/cli_wrappers/docker.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::path::PathBuf;
use anyhow::{Context, Result};
use linera_base::command::{current_binary_parent, CommandExt};
use pathdiff::diff_paths;
use tokio::process::Command;
use crate::cli_wrappers::local_kubernetes_net::BuildMode;
pub enum Dockerfile {
Main,
Indexer,
Explorer,
}
impl Dockerfile {
pub fn path(&self) -> &'static str {
match self {
Dockerfile::Main => "docker/Dockerfile",
Dockerfile::Indexer => "docker/Dockerfile.indexer",
Dockerfile::Explorer => "docker/Dockerfile.explorer",
}
}
}
pub struct DockerImage {
name: String,
}
impl DockerImage {
pub fn name(&self) -> &String {
&self.name
}
pub async fn build(
name: String,
binaries: BuildArg,
github_root: PathBuf,
build_mode: BuildMode,
dual_store: bool,
dockerfile: Dockerfile,
) -> Result<Self> {
let docker_image = Self {
name: name.to_owned(),
};
let mut command = Command::new("docker");
command
.current_dir(github_root.clone())
.arg("build")
.args(["-f", dockerfile.path()]);
if let Dockerfile::Main = dockerfile {
let build_arg = match binaries {
BuildArg::Directory(bin_path) => {
// Get the binaries from the specified path
let bin_path = diff_paths(bin_path, github_root)
.context("Getting relative path failed")?;
let bin_path_str = bin_path.to_str().context("Getting str failed")?;
format!("binaries={bin_path_str}")
}
BuildArg::ParentDirectory => {
// Get the binaries from current_binary_parent
let parent_path = current_binary_parent()
.expect("Fetching current binaries path should not fail");
let bin_path = diff_paths(parent_path, github_root)
.context("Getting relative path failed")?;
let bin_path_str = bin_path.to_str().context("Getting str failed")?;
format!("binaries={bin_path_str}")
}
BuildArg::Build => {
// Build inside the Docker container
let arch = std::env::consts::ARCH;
// Translate architecture for Docker build arg
let docker_arch = match arch {
"arm" => "aarch",
_ => arch,
};
format!("target={}-unknown-linux-gnu", docker_arch)
}
};
command.args(["--build-arg", &build_arg]);
match build_mode {
// Release is the default, so no need to add any arguments
BuildMode::Release => {}
BuildMode::Debug => {
command.args(["--build-arg", "build_folder=debug"]);
command.args(["--build-arg", "build_flag="]);
}
}
if dual_store {
command.args(["--build-arg", "build_features=rocksdb,scylladb,metrics"]);
}
#[cfg(not(with_testing))]
command
.args([
"--build-arg",
&format!(
"git_commit={}",
linera_version::VersionInfo::get()?.git_commit
),
])
.args([
"--build-arg",
&format!(
"build_date={}",
// Same format as $(TZ=UTC date)
chrono::Utc::now().format("%a %b %d %T UTC %Y")
),
]);
}
command
.arg(".")
.args(["-t", &name])
.spawn_and_wait()
.await?;
Ok(docker_image)
}
}
/// Which binaries to use in the Docker container.
#[derive(Clone)]
pub enum BuildArg {
/// Build the binaries within the container.
Build,
/// Look for the binaries in the parent directory of the current binary.
ParentDirectory,
/// Look for the binaries in the specified path.
Directory(PathBuf),
}
impl From<Option<Option<PathBuf>>> for BuildArg {
fn from(arg: Option<Option<PathBuf>>) -> Self {
match arg {
None => BuildArg::Build,
Some(None) => BuildArg::ParentDirectory,
Some(Some(path)) => BuildArg::Directory(path),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/kind.rs | linera-service/src/cli_wrappers/kind.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use linera_base::command::CommandExt;
use rand::Rng;
use tokio::process::Command;
#[derive(Clone)]
pub struct KindCluster {
id: u32,
}
impl KindCluster {
fn get_random_cluster_id() -> u32 {
rand::thread_rng().gen_range(0..99999)
}
pub async fn create() -> Result<Self> {
let cluster = Self {
id: Self::get_random_cluster_id(),
};
Command::new("kind")
.args(["create", "cluster"])
.args(["--name", cluster.id().to_string().as_str()])
.spawn_and_wait()
.await?;
Ok(cluster)
}
pub fn id(&self) -> u32 {
self.id
}
pub async fn delete(&self) -> Result<()> {
Command::new("kind")
.args(["delete", "cluster"])
.args(["--name", &self.id.to_string()])
.spawn_and_wait()
.await
}
pub async fn load_docker_image(&self, docker_image: &str) -> Result<()> {
Command::new("kind")
.args(["load", "docker-image", docker_image])
.args(["--name", self.id.to_string().as_str()])
.spawn_and_wait()
.await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/util.rs | linera-service/src/cli_wrappers/util.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::path::PathBuf;
use anyhow::Result;
use linera_base::command::CommandExt;
use tokio::process::Command;
pub async fn get_github_root() -> Result<PathBuf> {
let github_root = Command::new("git")
.arg("rev-parse")
.arg("--show-toplevel")
.spawn_and_wait_for_stdout()
.await?;
Ok(PathBuf::from(
github_root
.strip_suffix('\n')
.expect("Stripping suffix should not fail")
.to_string(),
))
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/mod.rs | linera-service/src/cli_wrappers/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Helper module to call the binaries of `linera-service` with appropriate command-line
//! arguments.
#[cfg(feature = "kubernetes")]
/// How to run Docker operations
pub mod docker;
#[cfg(feature = "kubernetes")]
/// How to run helmfile operations
mod helmfile;
#[cfg(feature = "kubernetes")]
/// How to run kind operations
mod kind;
#[cfg(feature = "kubernetes")]
/// How to run `kubectl` operations
mod kubectl;
#[cfg(feature = "kubernetes")]
/// How to run Linera validators locally as a Kubernetes deployment.
pub mod local_kubernetes_net;
/// How to run Linera validators locally as native processes.
pub mod local_net;
#[cfg(all(with_testing, feature = "remote-net"))]
/// How to connect to running GCP devnet.
pub mod remote_net;
#[cfg(feature = "kubernetes")]
/// Utility functions for the wrappers
mod util;
/// How to run a Linera wallet and its GraphQL service.
mod wallet;
use anyhow::Result;
use async_trait::async_trait;
pub use linera_faucet_client::Faucet;
#[cfg(with_testing)]
pub use wallet::NotificationsExt;
pub use wallet::{ApplicationWrapper, ClientWrapper, FaucetService, NodeService, OnClientDrop};
/// The information needed to start a Linera net of a particular kind.
#[async_trait]
pub trait LineraNetConfig {
type Net: LineraNet + Sized + Send + Sync + 'static;
async fn instantiate(self) -> Result<(Self::Net, ClientWrapper)>;
}
/// A running Linera net.
#[async_trait]
pub trait LineraNet {
async fn ensure_is_running(&mut self) -> Result<()>;
async fn make_client(&mut self) -> ClientWrapper;
async fn terminate(&mut self) -> Result<()>;
}
/// Network protocol in use
#[derive(Copy, Clone)]
pub enum Network {
Grpc,
Grpcs,
Tcp,
Udp,
}
/// Network protocol in use outside and inside a Linera net.
#[derive(Copy, Clone)]
pub struct NetworkConfig {
/// The internal network (e.g. proxy to validator)
pub internal: Network,
/// The external network (e.g. proxy to the exterior)
pub external: Network,
}
impl Network {
fn toml(&self) -> &'static str {
match self {
Network::Grpc => "{ Grpc = \"ClearText\" }",
Network::Grpcs => "{ Grpc = \"Tls\" }",
Network::Tcp => "{ Simple = \"Tcp\" }",
Network::Udp => "{ Simple = \"Udp\" }",
}
}
pub fn short(&self) -> &'static str {
match self {
Network::Grpc => "grpc",
Network::Grpcs => "grpcs",
Network::Tcp => "tcp",
Network::Udp => "udp",
}
}
pub fn drop_tls(&self) -> Self {
match self {
Network::Grpc => Network::Grpc,
Network::Grpcs => Network::Grpc,
Network::Tcp => Network::Tcp,
Network::Udp => Network::Udp,
}
}
pub fn localhost(&self) -> &'static str {
match self {
Network::Grpc | Network::Grpcs => "localhost",
Network::Tcp | Network::Udp => "127.0.0.1",
}
}
/// Returns the protocol schema to use in the node address tuple.
pub fn schema(&self) -> &'static str {
match self {
Network::Grpc | Network::Grpcs => "grpc",
Network::Tcp => "tcp",
Network::Udp => "udp",
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/wallet.rs | linera-service/src/cli_wrappers/wallet.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
borrow::Cow,
collections::BTreeMap,
env,
marker::PhantomData,
mem,
path::{Path, PathBuf},
pin::Pin,
process::Stdio,
str::FromStr,
sync,
time::Duration,
};
use anyhow::{bail, ensure, Context, Result};
use async_graphql::InputType;
use async_tungstenite::tungstenite::{client::IntoClientRequest as _, http::HeaderValue};
use futures::{SinkExt as _, Stream, StreamExt as _, TryStreamExt as _};
use heck::ToKebabCase;
use linera_base::{
abi::ContractAbi,
command::{resolve_binary, CommandExt},
crypto::{CryptoHash, InMemorySigner},
data_types::{Amount, BlockHeight, Bytecode, Epoch},
identifiers::{
Account, AccountOwner, ApplicationId, ChainId, IndexAndEvent, ModuleId, StreamId,
},
vm::VmRuntime,
};
use linera_client::client_options::ResourceControlPolicyConfig;
use linera_core::worker::Notification;
use linera_execution::committee::Committee;
use linera_faucet_client::Faucet;
use serde::{de::DeserializeOwned, ser::Serialize};
use serde_command_opts::to_args;
use serde_json::{json, Value};
use tempfile::TempDir;
use tokio::{
io::{AsyncBufReadExt, BufReader},
process::{Child, Command},
sync::oneshot,
task::JoinHandle,
};
#[cfg(with_testing)]
use {
futures::FutureExt as _,
linera_core::worker::Reason,
std::{collections::BTreeSet, future::Future},
};
use crate::{
cli::command::BenchmarkCommand,
cli_wrappers::{
local_net::{PathProvider, ProcessInbox},
Network,
},
util::{self, ChildExt},
wallet::Wallet,
};
/// The name of the environment variable that allows specifying additional arguments to be passed
/// to the node-service command of the client.
const CLIENT_SERVICE_ENV: &str = "LINERA_CLIENT_SERVICE_PARAMS";
fn reqwest_client() -> reqwest::Client {
reqwest::ClientBuilder::new()
.timeout(Duration::from_secs(30))
.build()
.unwrap()
}
/// Wrapper to run a Linera client command.
pub struct ClientWrapper {
binary_path: sync::Mutex<Option<PathBuf>>,
testing_prng_seed: Option<u64>,
storage: String,
wallet: String,
keystore: String,
max_pending_message_bundles: usize,
network: Network,
pub path_provider: PathProvider,
on_drop: OnClientDrop,
extra_args: Vec<String>,
}
/// Action to perform when the [`ClientWrapper`] is dropped.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum OnClientDrop {
/// Close all the chains on the wallet.
CloseChains,
/// Do not close any chains, leaving them active.
LeakChains,
}
impl ClientWrapper {
pub fn new(
path_provider: PathProvider,
network: Network,
testing_prng_seed: Option<u64>,
id: usize,
on_drop: OnClientDrop,
) -> Self {
Self::new_with_extra_args(
path_provider,
network,
testing_prng_seed,
id,
on_drop,
vec!["--wait-for-outgoing-messages".to_string()],
)
}
pub fn new_with_extra_args(
path_provider: PathProvider,
network: Network,
testing_prng_seed: Option<u64>,
id: usize,
on_drop: OnClientDrop,
extra_args: Vec<String>,
) -> Self {
let storage = format!(
"rocksdb:{}/client_{}.db",
path_provider.path().display(),
id
);
let wallet = format!("wallet_{}.json", id);
let keystore = format!("keystore_{}.json", id);
Self {
binary_path: sync::Mutex::new(None),
testing_prng_seed,
storage,
wallet,
keystore,
max_pending_message_bundles: 10_000,
network,
path_provider,
on_drop,
extra_args,
}
}
/// Runs `linera project new`.
pub async fn project_new(&self, project_name: &str, linera_root: &Path) -> Result<TempDir> {
let tmp = TempDir::new()?;
let mut command = self.command().await?;
command
.current_dir(tmp.path())
.arg("project")
.arg("new")
.arg(project_name)
.arg("--linera-root")
.arg(linera_root)
.spawn_and_wait_for_stdout()
.await?;
Ok(tmp)
}
/// Runs `linera project publish`.
pub async fn project_publish<T: Serialize>(
&self,
path: PathBuf,
required_application_ids: Vec<String>,
publisher: impl Into<Option<ChainId>>,
argument: &T,
) -> Result<String> {
let json_parameters = serde_json::to_string(&())?;
let json_argument = serde_json::to_string(argument)?;
let mut command = self.command().await?;
command
.arg("project")
.arg("publish-and-create")
.arg(path)
.args(publisher.into().iter().map(ChainId::to_string))
.args(["--json-parameters", &json_parameters])
.args(["--json-argument", &json_argument]);
if !required_application_ids.is_empty() {
command.arg("--required-application-ids");
command.args(required_application_ids);
}
let stdout = command.spawn_and_wait_for_stdout().await?;
Ok(stdout.trim().to_string())
}
/// Runs `linera project test`.
pub async fn project_test(&self, path: &Path) -> Result<()> {
self.command()
.await
.context("failed to create project test command")?
.current_dir(path)
.arg("project")
.arg("test")
.spawn_and_wait_for_stdout()
.await?;
Ok(())
}
async fn command_with_envs_and_arguments(
&self,
envs: &[(&str, &str)],
arguments: impl IntoIterator<Item = Cow<'_, str>>,
) -> Result<Command> {
let mut command = self.command_binary().await?;
command.current_dir(self.path_provider.path());
for (key, value) in envs {
command.env(key, value);
}
for argument in arguments {
command.arg(&*argument);
}
Ok(command)
}
async fn command_with_envs(&self, envs: &[(&str, &str)]) -> Result<Command> {
self.command_with_envs_and_arguments(envs, self.command_arguments())
.await
}
async fn command_with_arguments(
&self,
arguments: impl IntoIterator<Item = Cow<'_, str>>,
) -> Result<Command> {
self.command_with_envs_and_arguments(
&[(
"RUST_LOG",
&std::env::var("RUST_LOG").unwrap_or(String::from("linera=debug")),
)],
arguments,
)
.await
}
async fn command(&self) -> Result<Command> {
self.command_with_envs(&[(
"RUST_LOG",
&std::env::var("RUST_LOG").unwrap_or(String::from("linera=debug")),
)])
.await
}
fn required_command_arguments(&self) -> impl Iterator<Item = Cow<'_, str>> + '_ {
[
"--wallet".into(),
self.wallet.as_str().into(),
"--keystore".into(),
self.keystore.as_str().into(),
"--storage".into(),
self.storage.as_str().into(),
"--send-timeout-ms".into(),
"500000".into(),
"--recv-timeout-ms".into(),
"500000".into(),
]
.into_iter()
.chain(self.extra_args.iter().map(|s| s.as_str().into()))
}
/// Returns an iterator over the arguments that should be added to all command invocations.
fn command_arguments(&self) -> impl Iterator<Item = Cow<'_, str>> + '_ {
self.required_command_arguments().chain([
"--max-pending-message-bundles".into(),
self.max_pending_message_bundles.to_string().into(),
])
}
/// Returns the [`Command`] instance configured to run the appropriate binary.
///
/// The path is resolved once and cached inside `self` for subsequent usages.
async fn command_binary(&self) -> Result<Command> {
match self.command_with_cached_binary_path() {
Some(command) => Ok(command),
None => {
let resolved_path = resolve_binary("linera", env!("CARGO_PKG_NAME")).await?;
let command = Command::new(&resolved_path);
self.set_cached_binary_path(resolved_path);
Ok(command)
}
}
}
/// Returns a [`Command`] instance configured with the cached `binary_path`, if available.
fn command_with_cached_binary_path(&self) -> Option<Command> {
let binary_path = self.binary_path.lock().unwrap();
binary_path.as_ref().map(Command::new)
}
/// Sets the cached `binary_path` with the `new_binary_path`.
///
/// # Panics
///
/// If the cache is already set to a different value. In theory the two threads calling
/// `command_binary` can race and resolve the binary path twice, but they should always be the
/// same path.
fn set_cached_binary_path(&self, new_binary_path: PathBuf) {
let mut binary_path = self.binary_path.lock().unwrap();
if binary_path.is_none() {
*binary_path = Some(new_binary_path);
} else {
assert_eq!(*binary_path, Some(new_binary_path));
}
}
/// Runs `linera create-genesis-config`.
pub async fn create_genesis_config(
&self,
num_other_initial_chains: u32,
initial_funding: Amount,
policy_config: ResourceControlPolicyConfig,
http_allow_list: Option<Vec<String>>,
) -> Result<()> {
let mut command = self.command().await?;
command
.args([
"create-genesis-config",
&num_other_initial_chains.to_string(),
])
.args(["--initial-funding", &initial_funding.to_string()])
.args(["--committee", "committee.json"])
.args(["--genesis", "genesis.json"])
.args([
"--policy-config",
&policy_config.to_string().to_kebab_case(),
]);
if let Some(allow_list) = http_allow_list {
command
.arg("--http-request-allow-list")
.arg(allow_list.join(","));
}
if let Some(seed) = self.testing_prng_seed {
command.arg("--testing-prng-seed").arg(seed.to_string());
}
command.spawn_and_wait_for_stdout().await?;
Ok(())
}
/// Runs `linera wallet init`. The genesis config is read from `genesis.json`, or from the
/// faucet if provided.
pub async fn wallet_init(&self, faucet: Option<&'_ Faucet>) -> Result<()> {
let mut command = self.command().await?;
command.args(["wallet", "init"]);
match faucet {
None => command.args(["--genesis", "genesis.json"]),
Some(faucet) => command.args(["--faucet", faucet.url()]),
};
if let Some(seed) = self.testing_prng_seed {
command.arg("--testing-prng-seed").arg(seed.to_string());
}
command.spawn_and_wait_for_stdout().await?;
Ok(())
}
/// Runs `linera wallet request-chain`.
pub async fn request_chain(
&self,
faucet: &Faucet,
set_default: bool,
) -> Result<(ChainId, AccountOwner)> {
let mut command = self.command().await?;
command.args(["wallet", "request-chain", "--faucet", faucet.url()]);
if set_default {
command.arg("--set-default");
}
let stdout = command.spawn_and_wait_for_stdout().await?;
let mut lines = stdout.split_whitespace();
let chain_id: ChainId = lines.next().context("missing chain ID")?.parse()?;
let owner = lines.next().context("missing chain owner")?.parse()?;
Ok((chain_id, owner))
}
/// Runs `linera wallet publish-and-create`.
#[expect(clippy::too_many_arguments)]
pub async fn publish_and_create<
A: ContractAbi,
Parameters: Serialize,
InstantiationArgument: Serialize,
>(
&self,
contract: PathBuf,
service: PathBuf,
vm_runtime: VmRuntime,
parameters: &Parameters,
argument: &InstantiationArgument,
required_application_ids: &[ApplicationId],
publisher: impl Into<Option<ChainId>>,
) -> Result<ApplicationId<A>> {
let json_parameters = serde_json::to_string(parameters)?;
let json_argument = serde_json::to_string(argument)?;
let mut command = self.command().await?;
let vm_runtime = format!("{}", vm_runtime);
command
.arg("publish-and-create")
.args([contract, service])
.args(["--vm-runtime", &vm_runtime.to_lowercase()])
.args(publisher.into().iter().map(ChainId::to_string))
.args(["--json-parameters", &json_parameters])
.args(["--json-argument", &json_argument]);
if !required_application_ids.is_empty() {
command.arg("--required-application-ids");
command.args(
required_application_ids
.iter()
.map(ApplicationId::to_string),
);
}
let stdout = command.spawn_and_wait_for_stdout().await?;
Ok(stdout.trim().parse::<ApplicationId>()?.with_abi())
}
/// Runs `linera publish-module`.
pub async fn publish_module<Abi, Parameters, InstantiationArgument>(
&self,
contract: PathBuf,
service: PathBuf,
vm_runtime: VmRuntime,
publisher: impl Into<Option<ChainId>>,
) -> Result<ModuleId<Abi, Parameters, InstantiationArgument>> {
let stdout = self
.command()
.await?
.arg("publish-module")
.args([contract, service])
.args(["--vm-runtime", &format!("{}", vm_runtime).to_lowercase()])
.args(publisher.into().iter().map(ChainId::to_string))
.spawn_and_wait_for_stdout()
.await?;
let module_id: ModuleId = stdout.trim().parse()?;
Ok(module_id.with_abi())
}
/// Runs `linera create-application`.
pub async fn create_application<
Abi: ContractAbi,
Parameters: Serialize,
InstantiationArgument: Serialize,
>(
&self,
module_id: &ModuleId<Abi, Parameters, InstantiationArgument>,
parameters: &Parameters,
argument: &InstantiationArgument,
required_application_ids: &[ApplicationId],
creator: impl Into<Option<ChainId>>,
) -> Result<ApplicationId<Abi>> {
let json_parameters = serde_json::to_string(parameters)?;
let json_argument = serde_json::to_string(argument)?;
let mut command = self.command().await?;
command
.arg("create-application")
.arg(module_id.forget_abi().to_string())
.args(["--json-parameters", &json_parameters])
.args(["--json-argument", &json_argument])
.args(creator.into().iter().map(ChainId::to_string));
if !required_application_ids.is_empty() {
command.arg("--required-application-ids");
command.args(
required_application_ids
.iter()
.map(ApplicationId::to_string),
);
}
let stdout = command.spawn_and_wait_for_stdout().await?;
Ok(stdout.trim().parse::<ApplicationId>()?.with_abi())
}
/// Runs `linera service`.
pub async fn run_node_service(
&self,
port: impl Into<Option<u16>>,
process_inbox: ProcessInbox,
) -> Result<NodeService> {
self.run_node_service_with_options(port, process_inbox, &[], &[])
.await
}
/// Runs `linera service` with optional task processor configuration.
pub async fn run_node_service_with_options(
&self,
port: impl Into<Option<u16>>,
process_inbox: ProcessInbox,
operator_application_ids: &[ApplicationId],
operators: &[(String, PathBuf)],
) -> Result<NodeService> {
let port = port.into().unwrap_or(8080);
let mut command = self.command().await?;
command.arg("service");
if let ProcessInbox::Skip = process_inbox {
command.arg("--listener-skip-process-inbox");
}
if let Ok(var) = env::var(CLIENT_SERVICE_ENV) {
command.args(var.split_whitespace());
}
for app_id in operator_application_ids {
command.args(["--operator-application-ids", &app_id.to_string()]);
}
for (name, path) in operators {
command.args(["--operators", &format!("{}={}", name, path.display())]);
}
let child = command
.args(["--port".to_string(), port.to_string()])
.spawn_into()?;
let client = reqwest_client();
for i in 0..10 {
linera_base::time::timer::sleep(Duration::from_secs(i)).await;
let request = client
.get(format!("http://localhost:{}/", port))
.send()
.await;
if request.is_ok() {
tracing::info!("Node service has started");
return Ok(NodeService::new(port, child));
} else {
tracing::warn!("Waiting for node service to start");
}
}
bail!("Failed to start node service");
}
/// Runs `linera validator query`
pub async fn query_validator(&self, address: &str) -> Result<CryptoHash> {
let mut command = self.command().await?;
command.arg("validator").arg("query").arg(address);
let stdout = command.spawn_and_wait_for_stdout().await?;
// Parse the genesis config hash from the output.
// It's on a line like "Genesis config hash: <hash>"
let hash = stdout
.lines()
.find_map(|line| {
line.strip_prefix("Genesis config hash: ")
.and_then(|hash_str| hash_str.trim().parse().ok())
})
.context("error while parsing the result of `linera validator query`")?;
Ok(hash)
}
/// Runs `linera validator list`.
pub async fn query_validators(&self, chain_id: Option<ChainId>) -> Result<()> {
let mut command = self.command().await?;
command.arg("validator").arg("list");
if let Some(chain_id) = chain_id {
command.args(["--chain-id", &chain_id.to_string()]);
}
command.spawn_and_wait_for_stdout().await?;
Ok(())
}
/// Runs `linera sync-validator`.
pub async fn sync_validator(
&self,
chain_ids: impl IntoIterator<Item = &ChainId>,
validator_address: impl Into<String>,
) -> Result<()> {
let mut command = self.command().await?;
command
.arg("validator")
.arg("sync")
.arg(validator_address.into());
let mut chain_ids = chain_ids.into_iter().peekable();
if chain_ids.peek().is_some() {
command
.arg("--chains")
.args(chain_ids.map(ChainId::to_string));
}
command.spawn_and_wait_for_stdout().await?;
Ok(())
}
/// Runs `linera faucet`.
pub async fn run_faucet(
&self,
port: impl Into<Option<u16>>,
chain_id: Option<ChainId>,
amount: Amount,
) -> Result<FaucetService> {
let port = port.into().unwrap_or(8080);
let temp_dir = tempfile::tempdir()
.context("Failed to create temporary directory for faucet storage")?;
let storage_path = temp_dir.path().join("faucet_storage.sqlite");
let mut command = self.command().await?;
let command = command
.arg("faucet")
.args(["--port".to_string(), port.to_string()])
.args(["--amount".to_string(), amount.to_string()])
.args([
"--storage-path".to_string(),
storage_path.to_string_lossy().to_string(),
]);
if let Some(chain_id) = chain_id {
command.arg(chain_id.to_string());
}
let child = command.spawn_into()?;
let client = reqwest_client();
for i in 0..10 {
linera_base::time::timer::sleep(Duration::from_secs(i)).await;
let request = client
.get(format!("http://localhost:{}/", port))
.send()
.await;
if request.is_ok() {
tracing::info!("Faucet has started");
return Ok(FaucetService::new(port, child, temp_dir));
} else {
tracing::debug!("Waiting for faucet to start");
}
}
bail!("Failed to start faucet");
}
/// Runs `linera local-balance`.
pub async fn local_balance(&self, account: Account) -> Result<Amount> {
let stdout = self
.command()
.await?
.arg("local-balance")
.arg(account.to_string())
.spawn_and_wait_for_stdout()
.await?;
let amount = stdout
.trim()
.parse()
.context("error while parsing the result of `linera local-balance`")?;
Ok(amount)
}
/// Runs `linera query-balance`.
pub async fn query_balance(&self, account: Account) -> Result<Amount> {
let stdout = self
.command()
.await?
.arg("query-balance")
.arg(account.to_string())
.spawn_and_wait_for_stdout()
.await?;
let amount = stdout
.trim()
.parse()
.context("error while parsing the result of `linera query-balance`")?;
Ok(amount)
}
/// Runs `linera sync`.
pub async fn sync(&self, chain_id: ChainId) -> Result<()> {
self.command()
.await?
.arg("sync")
.arg(chain_id.to_string())
.spawn_and_wait_for_stdout()
.await?;
Ok(())
}
/// Runs `linera process-inbox`.
pub async fn process_inbox(&self, chain_id: ChainId) -> Result<()> {
self.command()
.await?
.arg("process-inbox")
.arg(chain_id.to_string())
.spawn_and_wait_for_stdout()
.await?;
Ok(())
}
/// Runs `linera transfer`.
pub async fn transfer(&self, amount: Amount, from: ChainId, to: ChainId) -> Result<()> {
self.command()
.await?
.arg("transfer")
.arg(amount.to_string())
.args(["--from", &from.to_string()])
.args(["--to", &to.to_string()])
.spawn_and_wait_for_stdout()
.await?;
Ok(())
}
/// Runs `linera transfer` with no logging.
pub async fn transfer_with_silent_logs(
&self,
amount: Amount,
from: ChainId,
to: ChainId,
) -> Result<()> {
self.command()
.await?
.env("RUST_LOG", "off")
.arg("transfer")
.arg(amount.to_string())
.args(["--from", &from.to_string()])
.args(["--to", &to.to_string()])
.spawn_and_wait_for_stdout()
.await?;
Ok(())
}
/// Runs `linera transfer` with owner accounts.
pub async fn transfer_with_accounts(
&self,
amount: Amount,
from: Account,
to: Account,
) -> Result<()> {
self.command()
.await?
.arg("transfer")
.arg(amount.to_string())
.args(["--from", &from.to_string()])
.args(["--to", &to.to_string()])
.spawn_and_wait_for_stdout()
.await?;
Ok(())
}
fn benchmark_command_internal(command: &mut Command, args: BenchmarkCommand) -> Result<()> {
let mut formatted_args = to_args(&args)?;
let subcommand = formatted_args.remove(0);
// The subcommand is followed by the flattened options, which are preceded by "options".
// So remove that as well.
formatted_args.remove(0);
let options = formatted_args
.chunks_exact(2)
.flat_map(|pair| {
let option = format!("--{}", pair[0]);
match pair[1].as_str() {
"true" => vec![option],
"false" => vec![],
_ => vec![option, pair[1].clone()],
}
})
.collect::<Vec<_>>();
command
.args([
"--max-pending-message-bundles",
&args.transactions_per_block().to_string(),
])
.arg("benchmark")
.arg(subcommand)
.args(options);
Ok(())
}
async fn benchmark_command_with_envs(
&self,
args: BenchmarkCommand,
envs: &[(&str, &str)],
) -> Result<Command> {
let mut command = self
.command_with_envs_and_arguments(envs, self.required_command_arguments())
.await?;
Self::benchmark_command_internal(&mut command, args)?;
Ok(command)
}
async fn benchmark_command(&self, args: BenchmarkCommand) -> Result<Command> {
let mut command = self
.command_with_arguments(self.required_command_arguments())
.await?;
Self::benchmark_command_internal(&mut command, args)?;
Ok(command)
}
/// Runs `linera benchmark`.
pub async fn benchmark(&self, args: BenchmarkCommand) -> Result<()> {
let mut command = self.benchmark_command(args).await?;
command.spawn_and_wait_for_stdout().await?;
Ok(())
}
/// Runs `linera benchmark`, but detached: don't wait for the command to finish, just spawn it
/// and return the child process, and the handles to the stdout and stderr.
pub async fn benchmark_detached(
&self,
args: BenchmarkCommand,
tx: oneshot::Sender<()>,
) -> Result<(Child, JoinHandle<()>, JoinHandle<()>)> {
let mut child = self
.benchmark_command_with_envs(args, &[("RUST_LOG", "linera=info")])
.await?
.kill_on_drop(true)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
let pid = child.id().expect("failed to get pid");
let stdout = child.stdout.take().expect("stdout not open");
let stdout_handle = tokio::spawn(async move {
let mut lines = BufReader::new(stdout).lines();
while let Ok(Some(line)) = lines.next_line().await {
println!("benchmark{{pid={pid}}} {line}");
}
});
let stderr = child.stderr.take().expect("stderr not open");
let stderr_handle = tokio::spawn(async move {
let mut lines = BufReader::new(stderr).lines();
let mut tx = Some(tx);
while let Ok(Some(line)) = lines.next_line().await {
if line.contains("Ready to start benchmark") {
tx.take()
.expect("Should only send signal once")
.send(())
.expect("failed to send ready signal to main thread");
} else {
println!("benchmark{{pid={pid}}} {line}");
}
}
});
Ok((child, stdout_handle, stderr_handle))
}
async fn open_chain_internal(
&self,
from: ChainId,
owner: Option<AccountOwner>,
initial_balance: Amount,
super_owner: bool,
) -> Result<(ChainId, AccountOwner)> {
let mut command = self.command().await?;
command
.arg("open-chain")
.args(["--from", &from.to_string()])
.args(["--initial-balance", &initial_balance.to_string()]);
if let Some(owner) = owner {
command.args(["--owner", &owner.to_string()]);
}
if super_owner {
command.arg("--super-owner");
}
let stdout = command.spawn_and_wait_for_stdout().await?;
let mut split = stdout.split('\n');
let chain_id = ChainId::from_str(split.next().context("no chain ID in output")?)?;
let new_owner = AccountOwner::from_str(split.next().context("no owner in output")?)?;
if let Some(owner) = owner {
assert_eq!(owner, new_owner);
}
Ok((chain_id, new_owner))
}
/// Runs `linera open-chain --super-owner`.
pub async fn open_chain_super_owner(
&self,
from: ChainId,
owner: Option<AccountOwner>,
initial_balance: Amount,
) -> Result<(ChainId, AccountOwner)> {
self.open_chain_internal(from, owner, initial_balance, true)
.await
}
/// Runs `linera open-chain`.
pub async fn open_chain(
&self,
from: ChainId,
owner: Option<AccountOwner>,
initial_balance: Amount,
) -> Result<(ChainId, AccountOwner)> {
self.open_chain_internal(from, owner, initial_balance, false)
.await
}
/// Runs `linera open-chain` then `linera assign`.
pub async fn open_and_assign(
&self,
client: &ClientWrapper,
initial_balance: Amount,
) -> Result<ChainId> {
let our_chain = self
.load_wallet()?
.default_chain()
.context("no default chain found")?;
let owner = client.keygen().await?;
let (new_chain, _) = self
.open_chain(our_chain, Some(owner), initial_balance)
.await?;
client.assign(owner, new_chain).await?;
Ok(new_chain)
}
pub async fn open_multi_owner_chain(
&self,
from: ChainId,
owners: Vec<AccountOwner>,
weights: Vec<u64>,
multi_leader_rounds: u32,
balance: Amount,
base_timeout_ms: u64,
) -> Result<ChainId> {
let mut command = self.command().await?;
command
.arg("open-multi-owner-chain")
.args(["--from", &from.to_string()])
.arg("--owners")
.args(owners.iter().map(AccountOwner::to_string))
.args(["--base-timeout-ms", &base_timeout_ms.to_string()]);
if !weights.is_empty() {
command
.arg("--owner-weights")
.args(weights.iter().map(u64::to_string));
};
command
.args(["--multi-leader-rounds", &multi_leader_rounds.to_string()])
.args(["--initial-balance", &balance.to_string()]);
let stdout = command.spawn_and_wait_for_stdout().await?;
let mut split = stdout.split('\n');
let chain_id = ChainId::from_str(split.next().context("no chain ID in output")?)?;
Ok(chain_id)
}
pub async fn change_ownership(
&self,
chain_id: ChainId,
super_owners: Vec<AccountOwner>,
owners: Vec<AccountOwner>,
) -> Result<()> {
let mut command = self.command().await?;
command
.arg("change-ownership")
.args(["--chain-id", &chain_id.to_string()]);
if !super_owners.is_empty() {
command
.arg("--super-owners")
.args(super_owners.iter().map(AccountOwner::to_string));
}
if !owners.is_empty() {
command
.arg("--owners")
.args(owners.iter().map(AccountOwner::to_string));
}
command.spawn_and_wait_for_stdout().await?;
Ok(())
}
/// Runs `linera wallet follow-chain CHAIN_ID`.
pub async fn follow_chain(&self, chain_id: ChainId, sync: bool) -> Result<()> {
let mut command = self.command().await?;
command
.args(["wallet", "follow-chain"])
.arg(chain_id.to_string());
if sync {
command.arg("--sync");
}
command.spawn_and_wait_for_stdout().await?;
Ok(())
}
/// Runs `linera wallet forget-chain CHAIN_ID`.
pub async fn forget_chain(&self, chain_id: ChainId) -> Result<()> {
let mut command = self.command().await?;
command
.args(["wallet", "forget-chain"])
.arg(chain_id.to_string());
command.spawn_and_wait_for_stdout().await?;
Ok(())
}
/// Runs `linera wallet set-default CHAIN_ID`.
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/kubectl.rs | linera-service/src/cli_wrappers/kubectl.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use anyhow::{Context, Result};
use tokio::process::{Child, Command};
pub struct KubectlInstance {
pub port_forward_children: Vec<Child>,
}
impl KubectlInstance {
pub fn new(port_forward_children: Vec<Child>) -> Self {
Self {
port_forward_children,
}
}
pub fn port_forward(&mut self, resource: &str, ports: &str, cluster_id: u32) -> Result<()> {
let port_forward_child = Command::new("kubectl")
.arg("port-forward")
.arg(resource)
.arg(ports)
.args(["--context", &format!("kind-{}", cluster_id)])
.spawn()
.context("Port forwarding failed")?;
self.port_forward_children.push(port_forward_child);
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/remote_net.rs | linera-service/src/cli_wrappers/remote_net.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{env, sync::Arc};
use anyhow::Result;
use async_trait::async_trait;
use linera_base::data_types::Amount;
use linera_persistent::{self as persistent, Persist};
use tempfile::{tempdir, TempDir};
use super::{
local_net::PathProvider, ClientWrapper, Faucet, LineraNet, LineraNetConfig, Network,
OnClientDrop,
};
pub struct RemoteNetTestingConfig {
faucet: Faucet,
close_chains: OnClientDrop,
}
impl RemoteNetTestingConfig {
/// Creates a new [`RemoteNetTestingConfig`] for running tests with an external Linera
/// network.
///
/// The faucet URL is obtained from the `LINERA_FAUCET_URL` environment variable.
/// If `close_chains` is true, chains will be closed on drop, otherwise they will be left active.
pub fn new(close_chains: OnClientDrop) -> Self {
Self {
faucet: Faucet::new(
env::var("LINERA_FAUCET_URL")
.expect("Missing `LINERA_FAUCET_URL` environment variable"),
),
close_chains,
}
}
}
#[async_trait]
impl LineraNetConfig for RemoteNetTestingConfig {
type Net = RemoteNet;
async fn instantiate(self) -> Result<(Self::Net, ClientWrapper)> {
let mut net = RemoteNet::new(None, &self.faucet, self.close_chains)
.await
.expect("Creating RemoteNet should not fail");
let client = net.make_client().await;
// The tests assume we've created a genesis config with 2
// chains with 10 tokens each. We create the first chain here
client.wallet_init(Some(&self.faucet)).await?;
client.request_chain(&self.faucet, true).await?;
// And the remaining 2 here
for _ in 0..2 {
client
.open_and_assign(&client, Amount::from_tokens(100))
.await
.unwrap();
}
Ok((net, client))
}
}
/// Remote net
#[derive(Clone)]
pub struct RemoteNet {
network: Network,
testing_prng_seed: Option<u64>,
next_client_id: usize,
tmp_dir: Arc<TempDir>,
close_chains: OnClientDrop,
}
#[async_trait]
impl LineraNet for RemoteNet {
async fn ensure_is_running(&mut self) -> Result<()> {
// Leaving this just returning for now.
// We would have to connect to each validator in the remote net then run
// ensure_connected_cluster_is_running
Ok(())
}
async fn make_client(&mut self) -> ClientWrapper {
let path_provider = PathProvider::TemporaryDirectory {
tmp_dir: self.tmp_dir.clone(),
};
let client = ClientWrapper::new(
path_provider,
self.network,
self.testing_prng_seed,
self.next_client_id,
self.close_chains,
);
if let Some(seed) = self.testing_prng_seed {
self.testing_prng_seed = Some(seed + 1);
}
self.next_client_id += 1;
client
}
async fn terminate(&mut self) -> Result<()> {
// We're not killing the remote net :)
Ok(())
}
}
impl RemoteNet {
async fn new(
testing_prng_seed: Option<u64>,
faucet: &Faucet,
close_chains: OnClientDrop,
) -> Result<Self> {
let tmp_dir = Arc::new(tempdir()?);
// Write json config to disk
persistent::File::new(
tmp_dir.path().join("genesis.json").as_path(),
faucet.genesis_config().await?,
)?
.persist()
.await?;
Ok(Self {
network: Network::Grpc,
testing_prng_seed,
next_client_id: 0,
tmp_dir,
close_chains,
})
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/local_net.rs | linera-service/src/cli_wrappers/local_net.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#[cfg(with_testing)]
use std::sync::LazyLock;
use std::{
collections::BTreeMap,
env,
num::NonZeroU16,
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use anyhow::{anyhow, bail, ensure, Context, Result};
#[cfg(with_testing)]
use async_lock::RwLock;
use async_trait::async_trait;
use linera_base::{
command::{resolve_binary, CommandExt},
data_types::Amount,
};
use linera_client::client_options::ResourceControlPolicyConfig;
use linera_core::node::ValidatorNodeProvider;
use linera_rpc::config::{CrossChainConfig, ExporterServiceConfig, TlsConfig};
#[cfg(all(feature = "storage-service", with_testing))]
use linera_storage_service::common::storage_service_test_endpoint;
#[cfg(all(feature = "rocksdb", feature = "scylladb", with_testing))]
use linera_views::rocks_db::{RocksDbDatabase, RocksDbSpawnMode};
#[cfg(all(feature = "scylladb", with_testing))]
use linera_views::{scylla_db::ScyllaDbDatabase, store::TestKeyValueDatabase as _};
use tempfile::{tempdir, TempDir};
use tokio::process::{Child, Command};
use tonic::transport::{channel::ClientTlsConfig, Endpoint};
use tonic_health::pb::{
health_check_response::ServingStatus, health_client::HealthClient, HealthCheckRequest,
};
use tracing::{error, info, warn};
use crate::{
cli_wrappers::{
ClientWrapper, LineraNet, LineraNetConfig, Network, NetworkConfig, OnClientDrop,
},
config::{BlockExporterConfig, Destination, DestinationConfig},
storage::{InnerStorageConfig, StorageConfig},
util::ChildExt,
};
/// Maximum allowed number of shards over all validators.
const MAX_NUMBER_SHARDS: usize = 1000;
pub enum ProcessInbox {
Skip,
Automatic,
}
#[cfg(with_testing)]
static PORT_PROVIDER: LazyLock<RwLock<u16>> = LazyLock::new(|| RwLock::new(7080));
/// The offset of the port
fn test_offset_port() -> usize {
std::env::var("TEST_OFFSET_PORT")
.ok()
.and_then(|port_str| port_str.parse::<usize>().ok())
.unwrap_or(9000)
}
/// Provides a port for the node service. Increment the port numbers.
#[cfg(with_testing)]
pub async fn get_node_port() -> u16 {
let mut port = PORT_PROVIDER.write().await;
let port_ret = *port;
*port += 1;
info!("get_node_port returning port_ret={}", port_ret);
assert!(port_selector::is_free(port_ret));
port_ret
}
#[cfg(with_testing)]
async fn make_testing_config(database: Database) -> Result<InnerStorageConfig> {
match database {
Database::Service => {
#[cfg(feature = "storage-service")]
{
let endpoint = storage_service_test_endpoint()
.expect("Reading LINERA_STORAGE_SERVICE environment variable");
Ok(InnerStorageConfig::Service { endpoint })
}
#[cfg(not(feature = "storage-service"))]
panic!("Database::Service is selected without the feature storage_service");
}
Database::DynamoDb => {
#[cfg(feature = "dynamodb")]
{
let use_dynamodb_local = true;
Ok(InnerStorageConfig::DynamoDb { use_dynamodb_local })
}
#[cfg(not(feature = "dynamodb"))]
panic!("Database::DynamoDb is selected without the feature dynamodb");
}
Database::ScyllaDb => {
#[cfg(feature = "scylladb")]
{
let config = ScyllaDbDatabase::new_test_config().await?;
Ok(InnerStorageConfig::ScyllaDb {
uri: config.inner_config.uri,
})
}
#[cfg(not(feature = "scylladb"))]
panic!("Database::ScyllaDb is selected without the feature scylladb");
}
Database::DualRocksDbScyllaDb => {
#[cfg(all(feature = "rocksdb", feature = "scylladb"))]
{
let rocksdb_config = RocksDbDatabase::new_test_config().await?;
let scylla_config = ScyllaDbDatabase::new_test_config().await?;
let spawn_mode = RocksDbSpawnMode::get_spawn_mode_from_runtime();
Ok(InnerStorageConfig::DualRocksDbScyllaDb {
path_with_guard: rocksdb_config.inner_config.path_with_guard,
spawn_mode,
uri: scylla_config.inner_config.uri,
})
}
#[cfg(not(all(feature = "rocksdb", feature = "scylladb")))]
panic!("Database::DualRocksDbScyllaDb is selected without the features rocksdb and scylladb");
}
}
}
pub enum InnerStorageConfigBuilder {
#[cfg(with_testing)]
TestConfig,
ExistingConfig {
storage_config: InnerStorageConfig,
},
}
impl InnerStorageConfigBuilder {
#[cfg_attr(not(with_testing), expect(unused_variables))]
pub async fn build(self, database: Database) -> Result<InnerStorageConfig> {
match self {
#[cfg(with_testing)]
InnerStorageConfigBuilder::TestConfig => make_testing_config(database).await,
InnerStorageConfigBuilder::ExistingConfig { storage_config } => Ok(storage_config),
}
}
}
/// Path used for the run can come from a path whose lifetime is controlled
/// by an external user or as a temporary directory
#[derive(Clone)]
pub enum PathProvider {
ExternalPath { path_buf: PathBuf },
TemporaryDirectory { tmp_dir: Arc<TempDir> },
}
impl PathProvider {
pub fn path(&self) -> &Path {
match self {
PathProvider::ExternalPath { path_buf } => path_buf.as_path(),
PathProvider::TemporaryDirectory { tmp_dir } => tmp_dir.path(),
}
}
pub fn create_temporary_directory() -> Result<Self> {
let tmp_dir = Arc::new(tempdir()?);
Ok(PathProvider::TemporaryDirectory { tmp_dir })
}
pub fn from_path_option(path: &Option<String>) -> anyhow::Result<Self> {
Ok(match path {
None => {
let tmp_dir = Arc::new(tempfile::tempdir()?);
PathProvider::TemporaryDirectory { tmp_dir }
}
Some(path) => {
let path = Path::new(path);
let path_buf = path.to_path_buf();
PathProvider::ExternalPath { path_buf }
}
})
}
}
/// The information needed to start a [`LocalNet`].
pub struct LocalNetConfig {
pub database: Database,
pub network: NetworkConfig,
pub testing_prng_seed: Option<u64>,
pub namespace: String,
pub num_other_initial_chains: u32,
pub initial_amount: Amount,
pub num_initial_validators: usize,
pub num_shards: usize,
pub num_proxies: usize,
pub policy_config: ResourceControlPolicyConfig,
pub cross_chain_config: CrossChainConfig,
pub storage_config_builder: InnerStorageConfigBuilder,
pub path_provider: PathProvider,
pub block_exporters: ExportersSetup,
}
/// The setup for the block exporters.
#[derive(Clone, PartialEq)]
pub enum ExportersSetup {
// Block exporters are meant to be started and managed by the testing framework.
Local(Vec<BlockExporterConfig>),
// Block exporters are already started and we just need to connect to them.
Remote(Vec<ExporterServiceConfig>),
}
impl ExportersSetup {
pub fn new(
with_block_exporter: bool,
block_exporter_address: String,
block_exporter_port: NonZeroU16,
) -> ExportersSetup {
if with_block_exporter {
let exporter_config =
ExporterServiceConfig::new(block_exporter_address, block_exporter_port.into());
ExportersSetup::Remote(vec![exporter_config])
} else {
ExportersSetup::Local(vec![])
}
}
}
/// A set of Linera validators running locally as native processes.
pub struct LocalNet {
network: NetworkConfig,
testing_prng_seed: Option<u64>,
next_client_id: usize,
num_initial_validators: usize,
num_proxies: usize,
num_shards: usize,
validator_keys: BTreeMap<usize, (String, String)>,
running_validators: BTreeMap<usize, Validator>,
initialized_validator_storages: BTreeMap<usize, StorageConfig>,
common_namespace: String,
common_storage_config: InnerStorageConfig,
cross_chain_config: CrossChainConfig,
path_provider: PathProvider,
block_exporters: ExportersSetup,
}
/// The name of the environment variable that allows specifying additional arguments to be passed
/// to the binary when starting a server.
const SERVER_ENV: &str = "LINERA_SERVER_PARAMS";
/// Description of the database engine to use inside a local Linera network.
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum Database {
Service,
DynamoDb,
ScyllaDb,
DualRocksDbScyllaDb,
}
/// The processes of a running validator.
struct Validator {
proxies: Vec<Child>,
servers: Vec<Child>,
exporters: Vec<Child>,
}
impl Validator {
fn new() -> Self {
Self {
proxies: vec![],
servers: vec![],
exporters: vec![],
}
}
async fn terminate(&mut self) -> Result<()> {
for proxy in &mut self.proxies {
proxy.kill().await.context("terminating validator proxy")?;
}
for server in &mut self.servers {
server
.kill()
.await
.context("terminating validator server")?;
}
Ok(())
}
fn add_proxy(&mut self, proxy: Child) {
self.proxies.push(proxy)
}
fn add_server(&mut self, server: Child) {
self.servers.push(server)
}
#[cfg(with_testing)]
async fn terminate_server(&mut self, index: usize) -> Result<()> {
let mut server = self.servers.remove(index);
server
.kill()
.await
.context("terminating validator server")?;
Ok(())
}
fn add_block_exporter(&mut self, exporter: Child) {
self.exporters.push(exporter);
}
fn ensure_is_running(&mut self) -> Result<()> {
for proxy in &mut self.proxies {
proxy.ensure_is_running()?;
}
for child in &mut self.servers {
child.ensure_is_running()?;
}
for exporter in &mut self.exporters {
exporter.ensure_is_running()?;
}
Ok(())
}
}
#[cfg(with_testing)]
impl LocalNetConfig {
pub fn new_test(database: Database, network: Network) -> Self {
let num_shards = 4;
let num_proxies = 1;
let storage_config_builder = InnerStorageConfigBuilder::TestConfig;
let path_provider = PathProvider::create_temporary_directory().unwrap();
let internal = network.drop_tls();
let external = network;
let network = NetworkConfig { internal, external };
let cross_chain_config = CrossChainConfig::default();
Self {
database,
network,
num_other_initial_chains: 2,
initial_amount: Amount::from_tokens(1_000_000),
policy_config: ResourceControlPolicyConfig::Testnet,
cross_chain_config,
testing_prng_seed: Some(37),
namespace: linera_views::random::generate_test_namespace(),
num_initial_validators: 4,
num_shards,
num_proxies,
storage_config_builder,
path_provider,
block_exporters: ExportersSetup::Local(vec![]),
}
}
}
#[async_trait]
impl LineraNetConfig for LocalNetConfig {
type Net = LocalNet;
async fn instantiate(self) -> Result<(Self::Net, ClientWrapper)> {
let storage_config = self.storage_config_builder.build(self.database).await?;
let mut net = LocalNet::new(
self.network,
self.testing_prng_seed,
self.namespace,
self.num_initial_validators,
self.num_proxies,
self.num_shards,
storage_config,
self.cross_chain_config,
self.path_provider,
self.block_exporters,
);
let client = net.make_client().await;
ensure!(
self.num_initial_validators > 0,
"There should be at least one initial validator"
);
let total_number_shards = self.num_initial_validators * self.num_shards;
ensure!(
total_number_shards <= MAX_NUMBER_SHARDS,
"Total number of shards ({}) exceeds maximum allowed ({})",
self.num_shards,
MAX_NUMBER_SHARDS
);
net.generate_initial_validator_config().await?;
client
.create_genesis_config(
self.num_other_initial_chains,
self.initial_amount,
self.policy_config,
Some(vec!["localhost".to_owned()]),
)
.await?;
net.run().await?;
Ok((net, client))
}
}
#[async_trait]
impl LineraNet for LocalNet {
async fn ensure_is_running(&mut self) -> Result<()> {
for validator in self.running_validators.values_mut() {
validator.ensure_is_running().context("in local network")?;
}
Ok(())
}
async fn make_client(&mut self) -> ClientWrapper {
let client = ClientWrapper::new(
self.path_provider.clone(),
self.network.external,
self.testing_prng_seed,
self.next_client_id,
OnClientDrop::LeakChains,
);
if let Some(seed) = self.testing_prng_seed {
self.testing_prng_seed = Some(seed + 1);
}
self.next_client_id += 1;
client
}
async fn terminate(&mut self) -> Result<()> {
for validator in self.running_validators.values_mut() {
validator.terminate().await.context("in local network")?
}
Ok(())
}
}
impl LocalNet {
#[expect(clippy::too_many_arguments)]
fn new(
network: NetworkConfig,
testing_prng_seed: Option<u64>,
common_namespace: String,
num_initial_validators: usize,
num_proxies: usize,
num_shards: usize,
common_storage_config: InnerStorageConfig,
cross_chain_config: CrossChainConfig,
path_provider: PathProvider,
block_exporters: ExportersSetup,
) -> Self {
Self {
network,
testing_prng_seed,
next_client_id: 0,
num_initial_validators,
num_proxies,
num_shards,
validator_keys: BTreeMap::new(),
running_validators: BTreeMap::new(),
initialized_validator_storages: BTreeMap::new(),
common_namespace,
common_storage_config,
cross_chain_config,
path_provider,
block_exporters,
}
}
async fn command_for_binary(&self, name: &'static str) -> Result<Command> {
let path = resolve_binary(name, env!("CARGO_PKG_NAME")).await?;
let mut command = Command::new(path);
command.current_dir(self.path_provider.path());
Ok(command)
}
#[cfg(with_testing)]
pub fn genesis_config(&self) -> Result<linera_client::config::GenesisConfig> {
let path = self.path_provider.path();
crate::util::read_json(path.join("genesis.json"))
}
fn shard_port(&self, validator: usize, shard: usize) -> usize {
test_offset_port() + validator * self.num_shards + shard + 1
}
fn proxy_internal_port(&self, validator: usize, proxy_id: usize) -> usize {
test_offset_port() + 1000 + validator * self.num_proxies + proxy_id + 1
}
fn shard_metrics_port(&self, validator: usize, shard: usize) -> usize {
test_offset_port() + 2000 + validator * self.num_shards + shard + 1
}
fn proxy_metrics_port(&self, validator: usize, proxy_id: usize) -> usize {
test_offset_port() + 3000 + validator * self.num_proxies + proxy_id + 1
}
fn block_exporter_port(&self, validator: usize, exporter_id: usize) -> usize {
test_offset_port() + 3000 + validator * self.num_shards + exporter_id + 1
}
pub fn proxy_public_port(&self, validator: usize, proxy_id: usize) -> usize {
test_offset_port() + 4000 + validator * self.num_proxies + proxy_id + 1
}
pub fn first_public_port() -> usize {
test_offset_port() + 4000 + 1
}
fn block_exporter_metrics_port(exporter_id: usize) -> usize {
test_offset_port() + 4000 + exporter_id + 1
}
fn configuration_string(&self, server_number: usize) -> Result<String> {
let n = server_number;
let path = self
.path_provider
.path()
.join(format!("validator_{n}.toml"));
let port = self.proxy_public_port(n, 0);
let external_protocol = self.network.external.toml();
let internal_protocol = self.network.internal.toml();
let external_host = self.network.external.localhost();
let internal_host = self.network.internal.localhost();
let mut content = format!(
r#"
server_config_path = "server_{n}.json"
host = "{external_host}"
port = {port}
external_protocol = {external_protocol}
internal_protocol = {internal_protocol}
"#
);
for k in 0..self.num_proxies {
let public_port = self.proxy_public_port(n, k);
let internal_port = self.proxy_internal_port(n, k);
let metrics_port = self.proxy_metrics_port(n, k);
// In the local network, the validator ingress is
// the proxy - so the `public_port` is the validator
// port.
content.push_str(&format!(
r#"
[[proxies]]
host = "{internal_host}"
public_port = {public_port}
private_port = {internal_port}
metrics_port = {metrics_port}
"#
));
}
for k in 0..self.num_shards {
let shard_port = self.shard_port(n, k);
let shard_metrics_port = self.shard_metrics_port(n, k);
content.push_str(&format!(
r#"
[[shards]]
host = "{internal_host}"
port = {shard_port}
metrics_port = {shard_metrics_port}
"#
));
}
match self.block_exporters {
ExportersSetup::Local(ref exporters) => {
for (j, exporter) in exporters.iter().enumerate() {
let host = Network::Grpc.localhost();
let port = self.block_exporter_port(n, j);
let config_content = format!(
r#"
[[block_exporters]]
host = "{host}"
port = {port}
"#
);
content.push_str(&config_content);
let exporter_config = self.generate_block_exporter_config(
n,
j as u32,
&exporter.destination_config,
);
let config_path = self
.path_provider
.path()
.join(format!("exporter_config_{n}:{j}.toml"));
fs_err::write(&config_path, &exporter_config)?;
}
}
ExportersSetup::Remote(ref exporters) => {
for exporter in exporters {
let host = exporter.host.clone();
let port = exporter.port;
let config_content = format!(
r#"
[[block_exporters]]
host = "{host}"
port = {port}
"#
);
content.push_str(&config_content);
}
}
}
fs_err::write(&path, content)?;
path.into_os_string().into_string().map_err(|error| {
anyhow!(
"could not parse OS string into string: {}",
error.to_string_lossy()
)
})
}
fn generate_block_exporter_config(
&self,
validator: usize,
exporter_id: u32,
destination_config: &DestinationConfig,
) -> String {
let n = validator;
let host = Network::Grpc.localhost();
let port = self.block_exporter_port(n, exporter_id as usize);
let metrics_port = Self::block_exporter_metrics_port(exporter_id as usize);
let mut config = format!(
r#"
id = {exporter_id}
metrics_port = {metrics_port}
[service_config]
host = "{host}"
port = {port}
"#
);
let DestinationConfig {
destinations,
committee_destination,
} = destination_config;
if *committee_destination {
let destination_string_to_push = r#"
[destination_config]
committee_destination = true
"#
.to_string();
config.push_str(&destination_string_to_push);
}
for destination in destinations {
let destination_string_to_push = match destination {
Destination::Indexer {
tls,
endpoint,
port,
} => {
let tls = match tls {
TlsConfig::ClearText => "ClearText",
TlsConfig::Tls => "Tls",
};
format!(
r#"
[[destination_config.destinations]]
tls = "{tls}"
endpoint = "{endpoint}"
port = {port}
kind = "Indexer"
"#
)
}
Destination::Validator { endpoint, port } => {
format!(
r#"
[[destination_config.destinations]]
endpoint = "{endpoint}"
port = {port}
kind = "Validator"
"#
)
}
Destination::Logging { file_name } => {
format!(
r#"
[[destination_config.destinations]]
file_name = "{file_name}"
kind = "Logging"
"#
)
}
};
config.push_str(&destination_string_to_push);
}
config
}
async fn generate_initial_validator_config(&mut self) -> Result<()> {
let mut command = self.command_for_binary("linera-server").await?;
command.arg("generate");
if let Some(seed) = self.testing_prng_seed {
command.arg("--testing-prng-seed").arg(seed.to_string());
self.testing_prng_seed = Some(seed + 1);
}
command.arg("--validators");
for i in 0..self.num_initial_validators {
command.arg(&self.configuration_string(i)?);
}
let output = command
.args(["--committee", "committee.json"])
.spawn_and_wait_for_stdout()
.await?;
self.validator_keys = output
.split_whitespace()
.map(str::to_string)
.map(|keys| keys.split(',').map(str::to_string).collect::<Vec<_>>())
.enumerate()
.map(|(i, keys)| {
let validator_key = keys[0].to_string();
let account_key = keys[1].to_string();
(i, (validator_key, account_key))
})
.collect();
Ok(())
}
async fn run_proxy(&mut self, validator: usize, proxy_id: usize) -> Result<Child> {
let storage = self
.initialized_validator_storages
.get(&validator)
.expect("initialized storage");
let child = self
.command_for_binary("linera-proxy")
.await?
.arg(format!("server_{}.json", validator))
.args(["--storage", &storage.to_string()])
.args(["--id", &proxy_id.to_string()])
.spawn_into()?;
let port = self.proxy_public_port(validator, proxy_id);
let nickname = format!("validator proxy {validator}");
match self.network.external {
Network::Grpc => {
Self::ensure_grpc_server_has_started(&nickname, port, "http").await?;
let nickname = format!("validator proxy {validator}");
Self::ensure_grpc_server_has_started(&nickname, port, "http").await?;
}
Network::Grpcs => {
let nickname = format!("validator proxy {validator}");
Self::ensure_grpc_server_has_started(&nickname, port, "https").await?;
}
Network::Tcp => {
Self::ensure_simple_server_has_started(&nickname, port, "tcp").await?;
}
Network::Udp => {
Self::ensure_simple_server_has_started(&nickname, port, "udp").await?;
}
}
Ok(child)
}
async fn run_exporter(&mut self, validator: usize, exporter_id: u32) -> Result<Child> {
let config_path = format!("exporter_config_{validator}:{exporter_id}.toml");
let storage = self
.initialized_validator_storages
.get(&validator)
.expect("initialized storage");
tracing::debug!(config=?config_path, storage=?storage.to_string(), "starting block exporter");
let child = self
.command_for_binary("linera-exporter")
.await?
.args(["--config-path", &config_path])
.args(["--storage", &storage.to_string()])
.spawn_into()?;
match self.network.internal {
Network::Grpc => {
let port = self.block_exporter_port(validator, exporter_id as usize);
let nickname = format!("block exporter {validator}:{exporter_id}");
Self::ensure_grpc_server_has_started(&nickname, port, "http").await?;
}
Network::Grpcs => {
let port = self.block_exporter_port(validator, exporter_id as usize);
let nickname = format!("block exporter {validator}:{exporter_id}");
Self::ensure_grpc_server_has_started(&nickname, port, "https").await?;
}
Network::Tcp | Network::Udp => {
unreachable!("Only allowed options are grpc and grpcs")
}
}
tracing::info!("block exporter started {validator}:{exporter_id}");
Ok(child)
}
pub async fn ensure_grpc_server_has_started(
nickname: &str,
port: usize,
scheme: &str,
) -> Result<()> {
let endpoint = match scheme {
"http" => Endpoint::new(format!("http://localhost:{port}"))
.context("endpoint should always parse")?,
"https" => {
use linera_rpc::CERT_PEM;
let certificate = tonic::transport::Certificate::from_pem(CERT_PEM);
let tls_config = ClientTlsConfig::new().ca_certificate(certificate);
Endpoint::new(format!("https://localhost:{port}"))
.context("endpoint should always parse")?
.tls_config(tls_config)?
}
_ => bail!("Only supported scheme are http and https"),
};
let connection = endpoint.connect_lazy();
let mut client = HealthClient::new(connection);
linera_base::time::timer::sleep(Duration::from_millis(100)).await;
for i in 0..10 {
linera_base::time::timer::sleep(Duration::from_millis(i * 500)).await;
let result = client.check(HealthCheckRequest::default()).await;
if result.is_ok() && result.unwrap().get_ref().status() == ServingStatus::Serving {
info!(?port, "Successfully started {nickname}");
return Ok(());
} else {
warn!("Waiting for {nickname} to start");
}
}
bail!("Failed to start {nickname}");
}
async fn ensure_simple_server_has_started(
nickname: &str,
port: usize,
protocol: &str,
) -> Result<()> {
use linera_core::node::ValidatorNode as _;
let options = linera_rpc::NodeOptions {
send_timeout: Duration::from_secs(5),
recv_timeout: Duration::from_secs(5),
retry_delay: Duration::from_secs(1),
max_retries: 1,
};
let provider = linera_rpc::simple::SimpleNodeProvider::new(options);
let address = format!("{protocol}:127.0.0.1:{port}");
// All "simple" services (i.e. proxy and "server") are based on `RpcMessage` and
// support `VersionInfoQuery`.
let node = provider.make_node(&address)?;
linera_base::time::timer::sleep(Duration::from_millis(100)).await;
for i in 0..10 {
linera_base::time::timer::sleep(Duration::from_millis(i * 500)).await;
let result = node.get_version_info().await;
if result.is_ok() {
info!("Successfully started {nickname}");
return Ok(());
} else {
warn!("Waiting for {nickname} to start");
}
}
bail!("Failed to start {nickname}");
}
async fn initialize_storage(&mut self, validator: usize) -> Result<()> {
let namespace = format!("{}_server_{}_db", self.common_namespace, validator);
let inner_storage_config = self.common_storage_config.clone();
let storage = StorageConfig {
inner_storage_config,
namespace,
};
let mut command = self.command_for_binary("linera").await?;
if let Ok(var) = env::var(SERVER_ENV) {
command.args(var.split_whitespace());
}
command.args(["storage", "initialize"]);
command
.args(["--storage", &storage.to_string()])
.args(["--genesis", "genesis.json"])
.spawn_and_wait_for_stdout()
.await?;
self.initialized_validator_storages
.insert(validator, storage);
Ok(())
}
async fn run_server(&mut self, validator: usize, shard: usize) -> Result<Child> {
let mut storage = self
.initialized_validator_storages
.get(&validator)
.expect("initialized storage")
.clone();
// For the storage backends with a local directory, make sure that we don't reuse
// the same directory for all the shards.
storage.maybe_append_shard_path(shard)?;
let mut command = self.command_for_binary("linera-server").await?;
if let Ok(var) = env::var(SERVER_ENV) {
command.args(var.split_whitespace());
}
command
.arg("run")
.args(["--storage", &storage.to_string()])
.args(["--server", &format!("server_{}.json", validator)])
.args(["--shard", &shard.to_string()])
.args(self.cross_chain_config.to_args());
let child = command.spawn_into()?;
let port = self.shard_port(validator, shard);
let nickname = format!("validator server {validator}:{shard}");
match self.network.internal {
Network::Grpc => {
Self::ensure_grpc_server_has_started(&nickname, port, "http").await?;
}
Network::Grpcs => {
Self::ensure_grpc_server_has_started(&nickname, port, "https").await?;
}
Network::Tcp => {
Self::ensure_simple_server_has_started(&nickname, port, "tcp").await?;
}
Network::Udp => {
Self::ensure_simple_server_has_started(&nickname, port, "udp").await?;
}
}
Ok(child)
}
async fn run(&mut self) -> Result<()> {
for validator in 0..self.num_initial_validators {
self.start_validator(validator).await?;
}
Ok(())
}
/// Start a validator.
pub async fn start_validator(&mut self, index: usize) -> Result<()> {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/cli_wrappers/helmfile.rs | linera-service/src/cli_wrappers/helmfile.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::path::Path;
use anyhow::Result;
use fs_extra::dir::CopyOptions;
use linera_base::command::CommandExt;
use tokio::process::Command;
pub const DEFAULT_BLOCK_EXPORTER_PORT: u16 = 8882;
pub struct HelmFile;
impl HelmFile {
#[expect(clippy::too_many_arguments)]
pub async fn sync(
server_config_id: usize,
github_root: &Path,
num_proxies: usize,
num_shards: usize,
cluster_id: u32,
docker_image_name: String,
with_block_exporter: bool,
num_block_exporters: usize,
indexer_image_name: String,
explorer_image_name: String,
dual_store: bool,
) -> Result<()> {
let chart_dir = format!("{}/kubernetes/linera-validator", github_root.display());
let temp_dir = tempfile::tempdir()?;
fs_extra::copy_items(&[&chart_dir], temp_dir.path(), &CopyOptions::new())?;
let mut command = Command::new("helmfile");
command.current_dir(temp_dir.path().join("linera-validator"));
if dual_store {
command.env(
"LINERA_HELMFILE_SET_STORAGE",
"dualrocksdbscylladb:/linera.db:spawn_blocking:tcp:scylla-client.scylla.svc.cluster.local:9042",
);
command.env("LINERA_HELMFILE_SET_DUAL_STORE", "true");
}
if with_block_exporter {
command.env("LINERA_HELMFILE_SET_EXPLORER_ENABLED", "true");
command.env(
"LINERA_HELMFILE_NUM_BLOCK_EXPORTERS",
num_block_exporters.to_string(),
);
command.env(
"LINERA_HELMFILE_BLOCK_EXPORTER_PORT",
DEFAULT_BLOCK_EXPORTER_PORT.to_string(),
);
command.env("LINERA_HELMFILE_INDEXER_IMAGE", indexer_image_name);
command.env("LINERA_HELMFILE_EXPLORER_IMAGE", explorer_image_name);
}
command
.env(
"LINERA_HELMFILE_SET_SERVER_CONFIG",
format!("working/server_{server_config_id}.json"),
)
.env("LINERA_HELMFILE_SET_NUM_PROXIES", num_proxies.to_string())
.env("LINERA_HELMFILE_SET_NUM_SHARDS", num_shards.to_string())
.env("LINERA_HELMFILE_LINERA_IMAGE", docker_image_name)
.env(
"LINERA_HELMFILE_SET_KUBE_CONTEXT",
format!("kind-{}", cluster_id),
)
.arg("sync")
.arg("--wait")
.spawn_and_wait()
.await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/tests/local.rs | linera-service/tests/local.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{path::PathBuf, process::Command};
use anyhow::Result;
use linera_base::command::resolve_binary;
use linera_service::cli_wrappers::{local_net::PathProvider, ClientWrapper, Network, OnClientDrop};
mod common;
#[test_log::test(tokio::test)]
async fn test_project_new() -> Result<()> {
let _rustflags_override = common::override_disable_warnings_as_errors();
let path_provider = PathProvider::create_temporary_directory()?;
let id = 0;
let client = ClientWrapper::new(
path_provider,
Network::Grpc,
None,
id,
OnClientDrop::LeakChains,
);
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let linera_root = manifest_dir
.parent()
.expect("CARGO_MANIFEST_DIR should not be at the root");
let tmp_dir = client.project_new("init-test", linera_root).await?;
let project_dir = tmp_dir.path().join("init-test");
client
.build_application(project_dir.as_path(), "init-test", false)
.await?;
let mut child = Command::new("cargo")
.args(["fmt", "--check"])
.current_dir(project_dir.as_path())
.spawn()?;
assert!(child.wait()?.success());
let mut child = Command::new("cargo")
.arg("test")
.current_dir(project_dir.as_path())
.spawn()?;
assert!(child.wait()?.success());
Ok(())
}
#[test_log::test(tokio::test)]
async fn test_project_test() -> Result<()> {
let path_provider = PathProvider::create_temporary_directory()?;
let id = 0;
let client = ClientWrapper::new(
path_provider,
Network::Grpc,
None,
id,
OnClientDrop::LeakChains,
);
client
.project_test(&ClientWrapper::example_path("counter")?)
.await?;
Ok(())
}
#[test_log::test(tokio::test)]
async fn test_resolve_binary() -> Result<()> {
resolve_binary("linera", env!("CARGO_PKG_NAME")).await?;
resolve_binary("linera-proxy", env!("CARGO_PKG_NAME")).await?;
assert!(resolve_binary("linera-spaceship", env!("CARGO_PKG_NAME"))
.await
.is_err());
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/tests/opentelemetry.rs | linera-service/tests/opentelemetry.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use tracing::{info_span, instrument};
use tracing_subscriber::{layer::SubscriberExt as _, registry::Registry};
#[instrument]
fn span_with_export() {
tracing::info!("This span should be exported");
}
#[instrument(skip_all, fields(opentelemetry.skip = true))]
fn span_without_export() {
tracing::info!("This span should NOT be exported");
}
#[test]
fn test_opentelemetry_filters_skip() {
let (opentelemetry_layer, exporter, tracer_provider) =
linera_service::tracing::opentelemetry::build_opentelemetry_layer_with_test_exporter(
"test_opentelemetry",
);
let subscriber = Registry::default().with(opentelemetry_layer);
tracing::subscriber::with_default(subscriber, || {
span_with_export();
span_without_export();
let manual_exported_span = info_span!("manual_exported").entered();
tracing::info!("Manual span without skip");
drop(manual_exported_span);
let manual_skipped_span = info_span!("manual_skipped", opentelemetry.skip = true).entered();
tracing::info!("Manual span with opentelemetry.skip");
drop(manual_skipped_span);
});
drop(tracer_provider);
let exported_spans = exporter
.get_finished_spans()
.expect("Failed to get exported spans");
let span_names: Vec<String> = exported_spans.iter().map(|s| s.name.to_string()).collect();
assert!(
span_names.contains(&"span_with_export".to_string()),
"Regular span should be exported to OpenTelemetry. Found spans: {:?}",
span_names
);
assert!(
!span_names.contains(&"span_without_export".to_string()),
"Span with opentelemetry.skip should NOT be exported to OpenTelemetry. Found spans: {:?}",
span_names
);
assert!(
span_names.contains(&"manual_exported".to_string()),
"Manual span without skip should be exported. Found spans: {:?}",
span_names
);
assert!(
!span_names.contains(&"manual_skipped".to_string()),
"Manual span with opentelemetry.skip should NOT be exported. Found spans: {:?}",
span_names
);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/tests/wallet.rs | linera-service/tests/wallet.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::time::Duration;
use linera_base::{
crypto::InMemorySigner,
data_types::{Amount, Blob, BlockHeight, Epoch},
};
use linera_chain::data_types::ProposedBlock;
use linera_client::{client_context::ClientContext, config::GenesisConfig};
use linera_core::{
client::{Client, PendingProposal},
join_set_ext::JoinSet,
test_utils::{MemoryStorageBuilder, StorageBuilder, TestBuilder},
wallet,
};
use linera_rpc::{NodeOptions, NodeProvider};
use linera_service::Wallet;
pub async fn new_test_client_context(
storage: impl linera_core::environment::Storage,
wallet: Wallet,
signer: impl linera_core::environment::Signer,
block_cache_size: usize,
execution_state_cache_size: usize,
) -> anyhow::Result<ClientContext<impl linera_core::Environment>> {
use linera_core::{client::chain_client, node::CrossChainMessageDelivery};
let send_recv_timeout = Duration::from_millis(4000);
let retry_delay = Duration::from_millis(1000);
let max_retries = 10;
let chain_worker_ttl = Duration::from_secs(30);
let sender_chain_worker_ttl = Duration::from_secs(1);
let node_options = NodeOptions {
send_timeout: send_recv_timeout,
recv_timeout: send_recv_timeout,
retry_delay,
max_retries,
};
let chain_ids: Vec<_> = wallet.chain_ids();
let name = match chain_ids.len() {
0 => "Client node".to_string(),
1 => format!("Client node for {:.8}", chain_ids[0]),
n => format!("Client node for {:.8} and {} others", chain_ids[0], n - 1),
};
let genesis_config = wallet.genesis_config().clone();
Ok(ClientContext {
default_chain: wallet.default_chain(),
client: Client::new(
linera_core::environment::Impl {
storage,
network: NodeProvider::new(node_options),
signer,
wallet,
},
genesis_config.admin_id(),
false,
chain_ids,
name,
chain_worker_ttl,
sender_chain_worker_ttl,
chain_client::Options {
cross_chain_message_delivery: CrossChainMessageDelivery::Blocking,
..chain_client::Options::test_default()
},
block_cache_size,
execution_state_cache_size,
linera_core::client::RequestsSchedulerConfig::default(),
)
.into(),
genesis_config,
send_timeout: send_recv_timeout,
recv_timeout: send_recv_timeout,
retry_delay,
max_retries,
chain_listeners: JoinSet::default(),
client_metrics: None,
})
}
/// Tests whether we can correctly save a wallet that contains pending blobs.
#[test_log::test(tokio::test)]
async fn test_save_wallet_with_pending_blobs() -> anyhow::Result<()> {
let storage_builder = MemoryStorageBuilder::default();
let mut signer = InMemorySigner::new(Some(42));
let new_pubkey = signer.generate_new();
let clock = storage_builder.clock().clone();
let mut builder = TestBuilder::new(storage_builder, 4, 1, signer.clone()).await?;
builder.add_root_chain(0, Amount::ONE).await?;
let chain_id = builder.admin_id();
let genesis_config = GenesisConfig::new_testing(&builder);
let tmp_dir = tempfile::tempdir()?;
let mut config_dir = tmp_dir.keep();
config_dir.push("linera");
if !config_dir.exists() {
tracing::debug!("{} does not exist, creating", config_dir.display());
fs_err::create_dir(&config_dir)?;
tracing::debug!("{} created.", config_dir.display());
}
let wallet_path = config_dir.join("wallet.json");
if wallet_path.exists() {
return Err(anyhow::anyhow!("Wallet already exists!"));
}
let wallet = Wallet::create(&wallet_path, genesis_config)?;
let admin_description = builder.admin_description().unwrap().clone();
wallet
.insert(
admin_description.id(),
wallet::Chain {
owner: Some(new_pubkey.into()),
timestamp: clock.current_time(),
pending_proposal: Some(PendingProposal {
block: ProposedBlock {
chain_id,
epoch: Epoch::ZERO,
transactions: vec![],
height: BlockHeight::ZERO,
timestamp: clock.current_time(),
authenticated_owner: None,
previous_block_hash: None,
},
blobs: vec![Blob::new_data(b"blob".to_vec())],
}),
..admin_description.into()
},
)
.expect("wallet should be empty");
wallet.save()?;
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/tests/readme_test.rs | linera-service/tests/readme_test.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg(feature = "storage-service")]
mod guard;
use std::{env, path::PathBuf};
use guard::INTEGRATION_TEST_GUARD;
use linera_service::{
test_name,
util::{
parse_secs, Markdown, DEFAULT_PAUSE_AFTER_GQL_MUTATIONS_SECS,
DEFAULT_PAUSE_AFTER_LINERA_SERVICE_SECS,
},
};
use tempfile::tempdir;
use tokio::process::Command;
#[test_case::test_case(".." ; "main")]
#[test_case::test_case("../examples/amm" ; "amm")]
#[test_case::test_case("../examples/counter" ; "counter")]
#[test_case::test_case("../examples/crowd-funding" ; "crowd funding")]
#[test_case::test_case("../examples/fungible" ; "fungible")]
#[test_case::test_case("../examples/gen-nft" ; "gen-nft")]
#[test_case::test_case("../examples/how-to/perform-http-requests" ; "how-to-perform-http-requests")]
#[test_case::test_case("../examples/hex-game" ; "hex-game")]
#[test_case::test_case("../examples/llm" ; "llm")]
#[test_case::test_case("../examples/native-fungible" ; "native-fungible")]
#[test_case::test_case("../examples/non-fungible" ; "non-fungible")]
#[test_case::test_case("../examples/matching-engine" ; "matching engine")]
#[test_case::test_case("../linera-sdk/tests/fixtures/meta-counter" ; "meta counter")]
#[test_case::test_case("../examples/rfq" ; "requests for quotes")]
#[test_case::test_case("../examples/social" ; "social")]
#[test_log::test(tokio::test)]
async fn test_script_in_readme(path: &str) -> std::io::Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {} for path {}", test_name!(), path);
let file = Markdown::new(PathBuf::from(path).join("README.md"))?;
let tmp_dir = tempdir()?;
let path = tmp_dir.path().join("test.sh");
let mut script = fs_err::File::create(&path)?;
let pause_after_linera_service = parse_secs(DEFAULT_PAUSE_AFTER_LINERA_SERVICE_SECS).unwrap();
let pause_after_gql_mutations = parse_secs(DEFAULT_PAUSE_AFTER_GQL_MUTATIONS_SECS).unwrap();
file.extract_bash_script_to(
&mut script,
Some(pause_after_linera_service),
Some(pause_after_gql_mutations),
)?;
let mut command = Command::new("bash");
command
// Run from the root of the repo.
.current_dir("..")
.arg("-e")
.arg("-x")
.arg(script.path());
if env::var_os("RUST_LOG").is_none() {
// Increase log verbosity to verify that services can write to stderr.
command.env("RUST_LOG", "linera_execution::wasm=debug");
}
let status = command.status().await?;
assert!(status.success());
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/tests/local_net_tests.rs | linera-service/tests/local_net_tests.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg(any(
feature = "dynamodb",
feature = "scylladb",
feature = "storage-service",
))]
mod common;
mod guard;
use std::{env, path::PathBuf, time::Duration};
use anyhow::Result;
use guard::INTEGRATION_TEST_GUARD;
use linera_base::{
crypto::Secp256k1SecretKey,
data_types::{Amount, BlockHeight, Epoch},
identifiers::{Account, AccountOwner},
vm::VmRuntime,
};
use linera_core::{data_types::ChainInfoQuery, node::ValidatorNode};
use linera_sdk::linera_base_types::AccountSecretKey;
use linera_service::{
cli_wrappers::{
local_net::{get_node_port, Database, LocalNetConfig, ProcessInbox},
ClientWrapper, LineraNet, LineraNetConfig, Network, NotificationsExt,
},
test_name,
util::eventually,
};
use test_case::test_case;
#[cfg(feature = "ethereum")]
use {alloy_primitives::U256, linera_service::cli_wrappers::ApplicationWrapper};
#[cfg(feature = "storage-service")]
use {
linera_base::port::get_free_port, linera_service::cli_wrappers::Faucet, std::process::Command,
};
fn get_fungible_account_owner(client: &ClientWrapper) -> AccountOwner {
client.get_owner().unwrap()
}
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Udp) ; "scylladb_udp"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_service_grpc"))]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Tcp) ; "storage_service_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Tcp) ; "scylladb_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Tcp) ; "aws_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Udp) ; "aws_udp"))]
#[test_log::test(tokio::test)]
async fn test_end_to_end_reconfiguration(config: LocalNetConfig) -> Result<()> {
let _guard: tokio::sync::MutexGuard<'_, ()> = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let network = config.network.external;
let (mut net, client) = config.instantiate().await?;
let faucet_client = net.make_client().await;
faucet_client.wallet_init(None).await?;
let faucet_chain = client
.open_and_assign(&faucet_client, Amount::from_tokens(1_000u128))
.await?;
let mut faucet_service = faucet_client
.run_faucet(None, Some(faucet_chain), Amount::from_tokens(2))
.await?;
faucet_service.ensure_is_running()?;
let faucet = faucet_service.instance();
assert_eq!(faucet.current_validators().await?.len(), 4);
let client_2 = net.make_client().await;
client_2.wallet_init(None).await?;
let chain_1 = client
.load_wallet()?
.default_chain()
.expect("should have a default chain");
let chain_2 = client
.open_and_assign(&client_2, Amount::from_tokens(3))
.await?;
let port = get_node_port().await;
let mut node_service_2 = match network {
Network::Grpc | Network::Grpcs => {
let service = client_2.run_node_service(port, ProcessInbox::Skip).await?;
let notifications = service.notifications(chain_1).await?;
Some((service, notifications))
}
Network::Tcp | Network::Udp => None,
};
client.query_validators(None).await?;
let address = format!(
"{}:127.0.0.1:{}",
network.short(),
net.proxy_public_port(0, 0)
);
assert_eq!(
client.query_validator(&address).await?,
net.genesis_config()?.hash()
);
// Restart the first shard for the 4th validator.
// TODO(#2286): The proxy currently only re-establishes the connection with gRPC.
if matches!(network, Network::Grpc) {
net.terminate_server(3, 0).await?;
net.start_server(3, 0).await?;
}
// Create configurations for two more validators
net.generate_validator_config(4).await?;
net.generate_validator_config(5).await?;
// Start the validators
net.start_validator(4).await?;
net.start_validator(5).await?;
let address = format!(
"{}:127.0.0.1:{}",
network.short(),
net.proxy_public_port(4, 0)
);
assert_eq!(
client.query_validator(&address).await?,
net.genesis_config()?.hash()
);
// Add 5th and 6th validators in a single epoch using change-validators
let key_4 = net.validator_keys(4).unwrap();
let key_5 = net.validator_keys(5).unwrap();
client
.change_validators(
&[
(
key_4.0.clone(),
key_4.1.clone(),
net.proxy_public_port(4, 0),
100,
),
(
key_5.0.clone(),
key_5.1.clone(),
net.proxy_public_port(5, 0),
100,
),
],
&[],
&[],
)
.await?;
client.query_validators(None).await?;
client.query_validators(Some(chain_1)).await?;
if matches!(network, Network::Grpc) {
assert!(
eventually(|| async { faucet.current_validators().await.unwrap().len() == 6 }).await
);
}
// Remove 5th validator
client
.remove_validator(&net.validator_keys(4).unwrap().0)
.await?;
net.remove_validator(4)?;
if matches!(network, Network::Grpc) {
assert!(
eventually(|| async { faucet.current_validators().await.unwrap().len() == 5 }).await
)
}
client.query_validators(None).await?;
client.query_validators(Some(chain_1)).await?;
if let Some((service, notifications)) = &mut node_service_2 {
let admin_height = client
.load_wallet()?
.get(chain_1)
.unwrap()
.next_block_height;
let event_height = admin_height.try_sub_one()?;
notifications.wait_for_events(event_height).await?;
assert!(!service.process_inbox(&chain_2).await?.is_empty());
client.revoke_epochs(Epoch(1)).await?;
notifications.wait_for_events(None).await?;
assert!(!service.process_inbox(&chain_2).await.unwrap().is_empty());
let committees = service.query_committees(&chain_2).await?;
let epochs = committees.into_keys().collect::<Vec<_>>();
assert_eq!(&epochs, &[Epoch(2)]);
} else {
client_2.process_inbox(chain_2).await?;
client.revoke_epochs(Epoch(1)).await?;
client_2.process_inbox(chain_2).await?;
}
// Remove the first 4 validators in a single epoch using change-validators.
let validators_to_remove: Vec<String> = (0..4)
.map(|i| net.validator_keys(i).unwrap().0.clone())
.collect();
client
.change_validators(&[], &[], &validators_to_remove)
.await?;
if let Some((service, notifications)) = &mut node_service_2 {
notifications.wait_for_events(None).await?;
assert!(!service.process_inbox(&chain_2).await.unwrap().is_empty());
client.revoke_epochs(Epoch(2)).await?;
notifications.wait_for_events(None).await?;
assert!(!service.process_inbox(&chain_2).await?.is_empty());
let committees = service.query_committees(&chain_2).await?;
let epochs = committees.into_keys().collect::<Vec<_>>();
assert_eq!(&epochs, &[Epoch(3)]);
} else {
client_2.process_inbox(chain_2).await?;
client.revoke_epochs(Epoch(2)).await?;
client_2.process_inbox(chain_2).await?;
}
for i in 0..4 {
net.remove_validator(i)?;
}
let recipient =
AccountOwner::from(AccountSecretKey::Secp256k1(Secp256k1SecretKey::generate()).public());
let account_recipient = Account::new(chain_2, recipient);
client
.transfer_with_accounts(
Amount::from_tokens(5),
Account::chain(chain_1),
account_recipient,
)
.await?;
if let Some((service, notifications)) = &mut node_service_2 {
let height = client
.load_wallet()?
.get(chain_1)
.unwrap()
.next_block_height
.try_sub_one()?;
notifications.wait_for_block(height).await?;
assert!(!service.process_inbox(&chain_2).await?.is_empty());
let balance = service.balance(&account_recipient).await?;
assert_eq!(balance, Amount::from_tokens(5));
let committees = service.query_committees(&chain_2).await?;
let epochs = committees.into_keys().collect::<Vec<_>>();
assert_eq!(&epochs, &[Epoch(3)]);
service.ensure_is_running()?;
} else {
client_2.sync(chain_2).await?;
client_2.process_inbox(chain_2).await?;
assert_eq!(
client_2.local_balance(account_recipient).await?,
Amount::from_tokens(5),
);
}
if matches!(network, Network::Grpc) {
let client = net.make_client().await;
client.wallet_init(Some(&faucet)).await?;
let (chain_id, _owner) = client.request_chain(&faucet, true).await?;
let port = get_node_port().await;
let service = client
.run_node_service(port, ProcessInbox::Automatic)
.await?;
service
.publish_data_blob(&chain_id, b"blob bytes".to_vec())
.await?;
}
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
/// Test if it's possible to receive epoch change messages for past epochs.
///
/// The epoch change messages are protected, and can't be rejected.
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_service_grpc"))]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Tcp) ; "storage_service_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Tcp) ; "scylladb_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Tcp) ; "aws_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Udp) ; "aws_udp"))]
#[test_log::test(tokio::test)]
async fn test_end_to_end_receipt_of_old_create_committee_messages(
config: LocalNetConfig,
) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let network = config.network.external;
let (mut net, client) = config.instantiate().await?;
let faucet_client = net.make_client().await;
faucet_client.wallet_init(None).await?;
let faucet_chain = client
.open_and_assign(&faucet_client, Amount::from_tokens(1_000u128))
.await?;
if matches!(network, Network::Grpc) {
let mut faucet_service = faucet_client
.run_faucet(None, Some(faucet_chain), Amount::from_tokens(2))
.await?;
faucet_service.ensure_is_running()?;
let faucet = faucet_service.instance();
assert_eq!(faucet.current_validators().await?.len(), 4);
faucet_service.terminate().await?;
}
client.query_validators(None).await?;
// Start a new validator
net.generate_validator_config(4).await?;
net.start_validator(4).await?;
let address = format!(
"{}:127.0.0.1:{}",
network.short(),
net.proxy_public_port(4, 0)
);
assert_eq!(
client.query_validator(&address).await?,
net.genesis_config()?.hash()
);
// Add 5th validator to the network
client
.set_validator(
net.validator_keys(4).unwrap(),
net.proxy_public_port(4, 0),
100,
)
.await?;
client.query_validators(None).await?;
// Ensure the faucet is on the new epoch
faucet_client.process_inbox(faucet_chain).await?;
let mut faucet_service = faucet_client
.run_faucet(None, Some(faucet_chain), Amount::from_tokens(2))
.await?;
faucet_service.ensure_is_running()?;
let faucet = faucet_service.instance();
if matches!(network, Network::Grpc) {
assert_eq!(faucet.current_validators().await?.len(), 5);
}
// Create a new chain starting on the new epoch
let new_owner = client.keygen().await?;
let chain_id = faucet.claim(&new_owner).await?.id();
client.assign(new_owner, chain_id).await?;
// Attempt to receive the existing epoch change message
client.process_inbox(chain_id).await?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
/// Test if it's possible to receive epoch change messages for past epochs, even if they have been
/// deprecated.
///
/// The epoch change messages are protected, and can't be rejected.
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_service_grpc"))]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Tcp) ; "storage_service_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Tcp) ; "scylladb_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Tcp) ; "aws_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Udp) ; "aws_udp"))]
#[test_log::test(tokio::test)]
async fn test_end_to_end_receipt_of_old_remove_committee_messages(
config: LocalNetConfig,
) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let network = config.network.external;
let (mut net, client) = config.instantiate().await?;
let faucet_client = net.make_client().await;
faucet_client.wallet_init(None).await?;
let faucet_chain = client
.open_and_assign(&faucet_client, Amount::from_tokens(1_000u128))
.await?;
if matches!(network, Network::Grpc) {
let mut faucet_service = faucet_client
.run_faucet(None, Some(faucet_chain), Amount::from_tokens(2))
.await?;
faucet_service.ensure_is_running()?;
let faucet = faucet_service.instance();
assert_eq!(faucet.current_validators().await?.len(), 4);
faucet_service.terminate().await?;
}
client.query_validators(None).await?;
// Start a new validator
net.generate_validator_config(4).await?;
net.start_validator(4).await?;
let address = format!(
"{}:127.0.0.1:{}",
network.short(),
net.proxy_public_port(4, 0)
);
assert_eq!(
client.query_validator(&address).await?,
net.genesis_config()?.hash()
);
// Add 5th validator to the network
client
.set_validator(
net.validator_keys(4).unwrap(),
net.proxy_public_port(4, 0),
100,
)
.await?;
client.query_validators(None).await?;
// Ensure the faucet is on the new epoch before removing the old ones.
faucet_client.process_inbox(faucet_chain).await?;
client.revoke_epochs(Epoch::ZERO).await?;
faucet_client.process_inbox(faucet_chain).await?;
if matches!(network, Network::Grpc) {
let mut faucet_service = faucet_client
.run_faucet(None, Some(faucet_chain), Amount::from_tokens(2))
.await?;
faucet_service.ensure_is_running()?;
let faucet = faucet_service.instance();
assert_eq!(faucet.current_validators().await?.len(), 5);
faucet_service.terminate().await?;
}
// We need the epoch before the latest to still be active, so that it can send all the epoch
// change messages in a batch where the latest message is signed by a committee that the
// receiving chain trusts.
// Start another new validator
net.generate_validator_config(5).await?;
net.start_validator(5).await?;
let address = format!(
"{}:127.0.0.1:{}",
network.short(),
net.proxy_public_port(5, 0)
);
assert_eq!(
client.query_validator(&address).await?,
net.genesis_config()?.hash()
);
// Add 6th validator to the network
client
.set_validator(
net.validator_keys(5).unwrap(),
net.proxy_public_port(5, 0),
100,
)
.await?;
client.query_validators(None).await?;
// Ensure the faucet is on the new epoch
faucet_client.process_inbox(faucet_chain).await?;
let mut faucet_service = faucet_client
.run_faucet(None, Some(faucet_chain), Amount::from_tokens(2))
.await?;
faucet_service.ensure_is_running()?;
let faucet = faucet_service.instance();
if matches!(network, Network::Grpc) {
assert_eq!(faucet.current_validators().await?.len(), 6);
}
// Create a new chain starting on the new epoch
let new_owner = client.keygen().await?;
let chain_id = faucet.claim(&new_owner).await?.id();
client.assign(new_owner, chain_id).await?;
// Attempt to receive the existing epoch change messages
client.process_inbox(chain_id).await?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[test_log::test(tokio::test)]
async fn test_end_to_end_retry_notification_stream(config: LocalNetConfig) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let (mut net, client1) = config.instantiate().await?;
let (chain, chain1) = {
let wallet = client1.load_wallet()?;
let chains = wallet.owned_chain_ids();
(chains[0], chains[1])
};
let client2 = net.make_client().await;
let mut height = 0;
client2.wallet_init(None).await?;
client2.follow_chain(chain, false).await?;
client2.set_default_chain(chain).await?;
// Listen for updates on root chain 0. There are no blocks on that chain yet.
let port = get_node_port().await;
let mut node_service2 = client2.run_node_service(port, ProcessInbox::Skip).await?;
let response = node_service2
.query_node(format!(
"query {{ chain(chainId:\"{chain}\") {{ tipState {{ nextBlockHeight }} }} }}"
))
.await?;
assert_eq!(
response["chain"]["tipState"]["nextBlockHeight"].as_u64(),
Some(height)
);
// Oh no! The first validator has an outage and gets restarted!
net.remove_validator(0)?;
net.restart_validator(0).await?;
// The node service should try to reconnect.
'success: {
for i in 0..10 {
// Add a new block on the chain, triggering a notification.
client1
.transfer(Amount::from_tokens(1), chain, chain1)
.await?;
linera_base::time::timer::sleep(Duration::from_secs(i)).await;
height += 1;
let response = node_service2
.query_node(format!(
"query {{ chain(chainId:\"{chain}\") {{ tipState {{ nextBlockHeight }} }} }}"
))
.await?;
if response["chain"]["tipState"]["nextBlockHeight"].as_u64() == Some(height) {
break 'success;
}
}
panic!("Failed to re-establish notification stream");
}
node_service2.ensure_is_running()?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
#[cfg_attr(feature = "storage-service", test_case(Database::Service, Network::Grpc ; "storage_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(Database::ScyllaDb, Network::Grpc ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(Database::DynamoDb, Network::Grpc ; "aws_grpc"))]
#[test_log::test(tokio::test)]
async fn test_project_publish(database: Database, network: Network) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let _rustflags_override = common::override_disable_warnings_as_errors();
let config = LocalNetConfig {
num_initial_validators: 1,
num_shards: 1,
..LocalNetConfig::new_test(database, network)
};
let (mut net, client) = config.instantiate().await?;
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let linera_root = manifest_dir
.parent()
.expect("CARGO_MANIFEST_DIR should not be at the root");
let tmp_dir = client.project_new("init-test", linera_root).await?;
let project_dir = tmp_dir.path().join("init-test");
client
.project_publish(project_dir, vec![], None, &0)
.await?;
let port = get_node_port().await;
let mut node_service = client.run_node_service(port, ProcessInbox::Skip).await?;
node_service.ensure_is_running()?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
#[cfg_attr(feature = "storage-service", test_case(Database::Service, Network::Grpc ; "storage_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(Database::ScyllaDb, Network::Grpc ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(Database::DynamoDb, Network::Grpc ; "aws_grpc"))]
#[test_log::test(tokio::test)]
async fn test_example_publish(database: Database, network: Network) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let config = LocalNetConfig {
num_initial_validators: 1,
num_shards: 1,
..LocalNetConfig::new_test(database, network)
};
let (mut net, client) = config.instantiate().await?;
let example_dir = ClientWrapper::example_path("counter")?;
client
.project_publish(example_dir, vec![], None, &0)
.await?;
let port = get_node_port().await;
let mut node_service = client.run_node_service(port, ProcessInbox::Skip).await?;
node_service.ensure_is_running()?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
/// Test if the wallet file is correctly locked when used.
#[cfg(feature = "storage-service")]
// TODO(#2053): this test passes only if the wallet hasn't been saved
#[ignore]
#[test_log::test(tokio::test)]
async fn test_storage_service_wallet_lock() -> Result<()> {
use std::mem::drop;
let config = LocalNetConfig::new_test(Database::Service, Network::Grpc);
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let (mut net, client) = config.instantiate().await?;
let wallet = linera_service::Wallet::read(&client.wallet_path())?;
let chain_id = wallet.default_chain().unwrap();
let lock = wallet;
assert!(client.process_inbox(chain_id).await.is_err());
drop(lock);
assert!(client.process_inbox(chain_id).await.is_ok());
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
#[test_log::test(tokio::test)]
#[cfg(feature = "storage-service")]
async fn test_storage_service_linera_net_up_simple() -> Result<()> {
use std::{
io::{BufRead, BufReader},
process::Stdio,
};
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let port = get_free_port().await?;
let mut command = Command::new(env!("CARGO_BIN_EXE_linera"));
command.args([
"net",
"up",
"--with-faucet",
"--faucet-chain",
"1",
"--faucet-port",
&port.to_string(),
]);
let mut child = command
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
let stdout = BufReader::new(child.stdout.take().unwrap());
let stderr = BufReader::new(child.stderr.take().unwrap());
let mut lines = stderr.lines();
let mut is_ready = false;
eprintln!("waiting for network to be ready");
for line in &mut lines {
let line = line?;
eprintln!("[net up]: {line}");
if line.starts_with("READY!") {
is_ready = true;
break;
}
}
if !is_ready {
assert!(is_ready, "unexpected EOF for stderr");
} else {
eprintln!("network is ready");
}
// Echo faucet stderr for debugging and to empty the buffer.
std::thread::spawn(move || {
for line in lines {
eprintln!("[net up] {}", line.unwrap());
}
});
insta::assert_snapshot!(stdout
.lines()
.map_while(|line| {
println!("{line:?}");
line.unwrap()
.split_once("=")
.map(|x| x.0.to_owned())
.filter(|line| line.starts_with("export"))
})
.collect::<Vec<_>>()
.join("\n"));
// Test faucet.
let faucet = Faucet::new(format!("http://localhost:{}/", port));
faucet.version_info().await.unwrap();
// Send SIGINT to the child process.
Command::new("kill")
.args(["-s", "INT", &child.id().to_string()])
.output()?;
assert!(child.wait()?.success());
return Ok(());
}
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_service_grpc"))]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Tcp) ; "storage_service_tcp"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Tcp) ; "scylladb_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Tcp) ; "aws_tcp"))]
#[test_log::test(tokio::test)]
async fn test_end_to_end_benchmark(mut config: LocalNetConfig) -> Result<()> {
use std::collections::BTreeMap;
use fungible::{FungibleTokenAbi, InitialState, Parameters};
use linera_service::cli::command::{BenchmarkCommand, BenchmarkOptions};
config.num_other_initial_chains = 2;
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let (mut net, client) = config.instantiate().await?;
assert_eq!(client.load_wallet()?.num_chains(), 3);
// Launch local benchmark using some additional chains.
client
.benchmark(BenchmarkCommand::Single {
options: BenchmarkOptions {
num_chains: 2,
transactions_per_block: 10,
bps: 2,
runtime_in_seconds: Some(5),
close_chains: true,
..Default::default()
},
})
.await?;
assert_eq!(client.load_wallet()?.num_chains(), 3);
// Now we run the benchmark again, with the fungible token application instead of the
// native token.
let account_owner = get_fungible_account_owner(&client);
let accounts = BTreeMap::from([(account_owner, Amount::from_tokens(1_000_000))]);
let state = InitialState { accounts };
let (contract, service) = client.build_example("fungible").await?;
let params = Parameters::new("FUN");
let application_id = client
.publish_and_create::<FungibleTokenAbi, Parameters, InitialState>(
contract,
service,
VmRuntime::Wasm,
¶ms,
&state,
&[],
None,
)
.await?;
client
.benchmark(BenchmarkCommand::Single {
options: BenchmarkOptions {
num_chains: 2,
transactions_per_block: 10,
bps: 2,
runtime_in_seconds: Some(5),
fungible_application_id: Some(application_id.forget_abi()),
close_chains: true,
..Default::default()
},
})
.await?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
/// Tests if the `sync-validator` command uploads missing certificates to a validator.
// TODO(#3258): Fix test for simple-net
// #[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Udp) ; "scylladb_udp"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_service_grpc"))]
// #[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Tcp) ; "storage_service_tcp"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
// #[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Tcp) ; "scylladb_tcp"))]
// #[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Tcp) ; "aws_tcp"))]
// #[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Udp) ; "aws_udp"))]
#[test_log::test(tokio::test)]
async fn test_sync_validator(config: LocalNetConfig) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
const BLOCKS_TO_CREATE: usize = 5;
const LAGGING_VALIDATOR_INDEX: usize = 0;
let (mut net, client) = config.instantiate().await?;
// Stop a validator to force it to lag behind the others
net.stop_validator(LAGGING_VALIDATOR_INDEX).await?;
// Create some blocks
let sender_chain = client.default_chain().expect("Client has no default chain");
let (receiver_chain, _) = client
.open_chain(sender_chain, None, Amount::from_tokens(1_000))
.await?;
for amount in 1..=BLOCKS_TO_CREATE {
client
.transfer(
Amount::from_tokens(amount as u128),
sender_chain,
receiver_chain,
)
.await?;
}
// Restart the stopped validator
net.restart_validator(LAGGING_VALIDATOR_INDEX).await?;
let lagging_validator = net.validator_client(LAGGING_VALIDATOR_INDEX)?;
let state_before_sync = lagging_validator
.handle_chain_info_query(ChainInfoQuery::new(sender_chain))
.await?;
assert_eq!(state_before_sync.info.next_block_height, BlockHeight::ZERO);
// Synchronize the validator
let validator_address = net.validator_address(LAGGING_VALIDATOR_INDEX);
client
.sync_validator([&sender_chain], validator_address)
.await
.expect("Missing lagging validator name");
let state_after_sync = lagging_validator
.handle_chain_info_query(ChainInfoQuery::new(sender_chain))
.await?;
assert_eq!(
state_after_sync.info.next_block_height,
BlockHeight(BLOCKS_TO_CREATE as u64 + 1)
);
Ok(())
}
/// Tests if a validator can process blocks on a child chain without syncing the parent
/// chain.
// #[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Udp) ; "scylladb_udp"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_service_grpc"))]
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/tests/linera_net_tests.rs | linera-service/tests/linera_net_tests.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg(any(
feature = "dynamodb",
feature = "scylladb",
feature = "storage-service",
feature = "remote-net"
))]
mod guard;
use std::env;
use anyhow::Result;
use async_graphql::InputType;
use futures::{
channel::mpsc,
future::{self, Either},
StreamExt,
};
use guard::INTEGRATION_TEST_GUARD;
#[cfg(with_revm)]
use linera_base::vm::{EvmInstantiation, EvmOperation, EvmQuery};
use linera_base::{
crypto::{CryptoHash, Secp256k1SecretKey},
data_types::Amount,
identifiers::{Account, AccountOwner, ApplicationId, ChainId},
time::{Duration, Instant},
vm::VmRuntime,
};
use linera_core::worker::{Notification, Reason};
use linera_sdk::{
abis::fungible::FungibleTokenAbi,
linera_base_types::{AccountSecretKey, BlobContent, BlockHeight, DataBlobHash},
};
#[cfg(any(
feature = "dynamodb",
feature = "scylladb",
feature = "storage-service",
))]
use linera_service::cli_wrappers::local_net::{Database, LocalNetConfig};
#[cfg(any(
feature = "dynamodb",
feature = "scylladb",
feature = "storage-service",
))]
use linera_service::cli_wrappers::Network;
#[cfg(feature = "remote-net")]
use linera_service::cli_wrappers::{remote_net::RemoteNetTestingConfig, OnClientDrop::*};
use linera_service::{
cli_wrappers::{
local_net::{get_node_port, ProcessInbox},
ApplicationWrapper, ClientWrapper, LineraNet, LineraNetConfig, NotificationsExt,
},
test_name,
};
use serde_json::{json, Value};
use test_case::test_case;
#[cfg(with_revm)]
use {
alloy_primitives::{Address, Bytes, Log, B256, U256},
alloy_sol_types::{sol, SolCall, SolValue},
linera_execution::test_utils::solidity::{
get_evm_contract_path, load_solidity_example_by_name, read_evm_address_entry,
read_evm_u256_entry, read_evm_u64_entry, temporary_write_evm_module,
},
linera_sdk::abis::evm::EvmAbi,
};
#[cfg(with_revm)]
async fn assert_contract_balance(
app: &ApplicationWrapper<EvmAbi>,
address: Address,
balance: Amount,
) -> anyhow::Result<()> {
sol! {
function get_balance(address account);
}
let query = get_balanceCall { account: address };
let query = EvmQuery::Query(query.abi_encode());
let result = app.run_json_query(query).await?;
let balance_256: U256 = balance.into();
assert_eq!(read_evm_u256_entry(result), balance_256);
Ok(())
}
/// The environment variable name to specify the number of iterations in the performance-related
/// tests.
const LINERA_TEST_ITERATIONS: &str = "LINERA_TEST_ITERATIONS";
fn test_iterations() -> Option<usize> {
match env::var(LINERA_TEST_ITERATIONS) {
Ok(var) => Some(var.parse().unwrap_or_else(|error| {
panic!("{LINERA_TEST_ITERATIONS} is not a valid number: {error}")
})),
Err(env::VarError::NotPresent) => None,
Err(env::VarError::NotUnicode(_)) => {
panic!("{LINERA_TEST_ITERATIONS} must be valid Unicode")
}
}
}
fn get_account_owner(client: &ClientWrapper) -> AccountOwner {
client.get_owner().unwrap()
}
struct NativeFungibleApp(ApplicationWrapper<FungibleTokenAbi>);
impl NativeFungibleApp {
async fn get_amount(&self, account_owner: &AccountOwner) -> Amount {
let query = format!(
"accounts {{ entry(key: {}) {{ value }} }}",
account_owner.to_value()
);
let response_body = self.0.query(&query).await.unwrap();
let amount_option = serde_json::from_value::<Option<Amount>>(
response_body["accounts"]["entry"]["value"].clone(),
)
.unwrap();
amount_option.unwrap_or(Amount::ZERO)
}
async fn assert_balances(&self, accounts: impl IntoIterator<Item = (AccountOwner, Amount)>) {
for (account_owner, amount) in accounts {
let value = self.get_amount(&account_owner).await;
assert_eq!(value, amount);
}
}
async fn entries(&self) -> Vec<native_fungible::AccountEntry> {
let query = "accounts { entries { key, value } }";
let response_body = self.0.query(&query).await.unwrap();
serde_json::from_value(response_body["accounts"]["entries"].clone()).unwrap()
}
async fn assert_entries(&self, accounts: impl IntoIterator<Item = (AccountOwner, Amount)>) {
let entries: std::collections::BTreeMap<AccountOwner, Amount> = self
.entries()
.await
.into_iter()
.map(|entry| (entry.key, entry.value))
.collect();
for (account_owner, amount) in accounts {
assert_eq!(entries[&account_owner], amount);
}
}
async fn keys(&self) -> Vec<AccountOwner> {
let query = "accounts { keys }";
let response_body = self.0.query(&query).await.unwrap();
serde_json::from_value(response_body["accounts"]["keys"].clone()).unwrap()
}
async fn assert_keys(&self, accounts: impl IntoIterator<Item = AccountOwner>) {
let keys = self.keys().await;
for account_owner in accounts {
assert!(keys.contains(&account_owner));
}
}
async fn transfer(
&self,
account_owner: &AccountOwner,
amount_transfer: Amount,
destination: Account,
) -> Value {
let mutation = format!(
"transfer(owner: {}, amount: \"{}\", targetAccount: {})",
account_owner.to_value(),
amount_transfer,
destination.to_value(),
);
self.0.mutate(mutation).await.unwrap()
}
async fn repeated_transfer(
&self,
account_owner: &AccountOwner,
amount_transfer: Amount,
destination: Account,
num_operations: usize,
) -> Value {
let mutation = format!(
"transfer(owner: {}, amount: \"{}\", targetAccount: {})",
account_owner.to_value(),
amount_transfer,
destination.to_value(),
);
let mutations = vec![mutation; num_operations];
self.0.multiple_mutate(&mutations).await.unwrap()
}
async fn claim(&self, source: Account, target: Account, amount: Amount) {
// Claiming tokens from chain1 to chain2.
let mutation = format!(
"claim(sourceAccount: {}, amount: \"{}\", targetAccount: {})",
source.to_value(),
amount,
target.to_value()
);
self.0.mutate(mutation).await.unwrap();
}
}
struct FungibleApp(ApplicationWrapper<fungible::FungibleTokenAbi>);
impl FungibleApp {
async fn get_amount(&self, account_owner: &AccountOwner) -> Amount {
let query = format!(
"accounts {{ entry(key: {}) {{ value }} }}",
account_owner.to_value()
);
let response_body = self.0.query(&query).await.unwrap();
let amount_option = serde_json::from_value::<Option<Amount>>(
response_body["accounts"]["entry"]["value"].clone(),
)
.unwrap();
amount_option.unwrap_or(Amount::ZERO)
}
async fn assert_balances(&self, accounts: impl IntoIterator<Item = (AccountOwner, Amount)>) {
for (account_owner, amount) in accounts {
let value = self.get_amount(&account_owner).await;
assert_eq!(value, amount);
}
}
async fn get_allowance(&self, owner: &AccountOwner, spender: &AccountOwner) -> Amount {
let owner_spender = fungible::OwnerSpender::new(*owner, *spender);
let query = format!(
"allowances {{ entry(key: {}) {{ value }} }}",
owner_spender.to_value()
);
let response_body = self.0.query(&query).await.unwrap();
let amount_option = serde_json::from_value::<Option<Amount>>(
response_body["allowances"]["entry"]["value"].clone(),
)
.unwrap();
amount_option.unwrap_or(Amount::ZERO)
}
async fn assert_allowance(
&self,
owner: &AccountOwner,
spender: &AccountOwner,
allowance: Amount,
) {
let value = self.get_allowance(owner, spender).await;
assert_eq!(value, allowance);
}
async fn approve(
&self,
owner: &AccountOwner,
spender: &AccountOwner,
allowance: Amount,
) -> Value {
let mutation = format!(
"approve(owner: {}, spender: {}, allowance: \"{}\")",
owner.to_value(),
spender.to_value(),
allowance,
);
self.0.mutate(mutation).await.unwrap()
}
async fn transfer(
&self,
account_owner: &AccountOwner,
amount_transfer: Amount,
destination: Account,
) -> Value {
let mutation = format!(
"transfer(owner: {}, amount: \"{}\", targetAccount: {})",
account_owner.to_value(),
amount_transfer,
destination.to_value(),
);
self.0.mutate(mutation).await.unwrap()
}
async fn transfer_from(
&self,
owner: &AccountOwner,
spender: &AccountOwner,
amount_transfer: Amount,
destination: Account,
) -> Value {
let mutation = format!(
"transferFrom(owner: {}, spender: {}, amount: \"{}\", targetAccount: {})",
owner.to_value(),
spender.to_value(),
amount_transfer,
destination.to_value(),
);
self.0.mutate(mutation).await.unwrap()
}
}
struct NonFungibleApp(ApplicationWrapper<non_fungible::NonFungibleTokenAbi>);
impl NonFungibleApp {
pub fn create_token_id(
chain_id: &ChainId,
application_id: &ApplicationId,
name: &String,
minter: &AccountOwner,
hash: &DataBlobHash,
num_minted_nfts: u64,
) -> String {
use base64::engine::{general_purpose::STANDARD_NO_PAD, Engine as _};
let token_id_vec = non_fungible::Nft::create_token_id(
chain_id,
application_id,
name,
minter,
hash,
num_minted_nfts,
)
.expect("Creating token ID should not fail");
STANDARD_NO_PAD.encode(token_id_vec.id)
}
async fn get_nft(&self, token_id: &String) -> Result<non_fungible::NftOutput> {
let query = format!(
"nft(tokenId: {}) {{ tokenId, owner, name, minter, payload }}",
token_id.to_value()
);
let response_body = self.0.query(&query).await?;
Ok(serde_json::from_value(response_body["nft"].clone())?)
}
async fn get_owned_nfts(&self, owner: &AccountOwner) -> Result<Vec<String>> {
let query = format!("ownedTokenIdsByOwner(owner: {})", owner.to_value());
let response_body = self.0.query(&query).await?;
Ok(serde_json::from_value(
response_body["ownedTokenIdsByOwner"].clone(),
)?)
}
async fn mint(&self, minter: &AccountOwner, name: &String, blob_hash: &DataBlobHash) -> Value {
let mutation = format!(
"mint(minter: {}, name: {}, blobHash: {})",
minter.to_value(),
name.to_value(),
blob_hash.to_value(),
);
self.0.mutate(mutation).await.unwrap()
}
async fn transfer(
&self,
source_owner: &AccountOwner,
token_id: &String,
target_account: &Account,
) -> Value {
let mutation = format!(
"transfer(sourceOwner: {}, tokenId: {}, targetAccount: {})",
source_owner.to_value(),
token_id.to_value(),
target_account.to_value(),
);
self.0.mutate(mutation).await.unwrap()
}
async fn claim(
&self,
source_account: &Account,
token_id: &String,
target_account: &Account,
) -> Value {
// Claiming tokens from chain1 to chain2.
let mutation = format!(
"claim(sourceAccount: {}, tokenId: {}, targetAccount: {})",
source_account.to_value(),
token_id.to_value(),
target_account.to_value()
);
self.0.mutate(mutation).await.unwrap()
}
}
struct MatchingEngineApp(ApplicationWrapper<matching_engine::MatchingEngineAbi>);
impl MatchingEngineApp {
async fn get_account_info(
&self,
account_owner: &AccountOwner,
) -> Vec<matching_engine::OrderId> {
let query = format!(
"accountInfo {{ entry(key: {}) {{ value {{ orders }} }} }}",
account_owner.to_value()
);
let response_body = self.0.query(query).await.unwrap();
serde_json::from_value(response_body["accountInfo"]["entry"]["value"]["orders"].clone())
.unwrap()
}
async fn order(&self, order: matching_engine::Order) -> Value {
let mutation = format!("executeOrder(order: {})", order.to_value());
self.0.mutate(mutation).await.unwrap()
}
}
struct AmmApp(ApplicationWrapper<amm::AmmAbi>);
impl AmmApp {
async fn swap(
&self,
owner: AccountOwner,
input_token_idx: u32,
input_amount: Amount,
) -> Result<Value> {
let mutation = format!(
"swap(owner: {}, inputTokenIdx: {}, inputAmount: \"{}\")",
owner.to_value(),
input_token_idx,
input_amount
);
self.0.mutate(mutation).await
}
async fn add_liquidity(
&self,
owner: AccountOwner,
max_token0_amount: Amount,
max_token1_amount: Amount,
) -> Result<Value> {
let mutation = format!(
"addLiquidity(owner: {}, maxToken0Amount: \"{}\", maxToken1Amount: \"{}\")",
owner.to_value(),
max_token0_amount,
max_token1_amount
);
self.0.mutate(mutation).await
}
async fn remove_liquidity(
&self,
owner: AccountOwner,
token_to_remove_idx: u32,
token_to_remove_amount: Amount,
) -> Result<Value> {
let mutation = format!(
"removeLiquidity(owner: {}, tokenToRemoveIdx: {}, tokenToRemoveAmount: \"{}\")",
owner.to_value(),
token_to_remove_idx,
token_to_remove_amount
);
self.0.mutate(mutation).await
}
async fn remove_all_added_liquidity(&self, owner: AccountOwner) -> Result<Value> {
let mutation = format!("removeAllAddedLiquidity(owner: {})", owner.to_value(),);
self.0.mutate(mutation).await
}
}
#[cfg(with_revm)]
fn get_zero_operation(operation: impl alloy_sol_types::SolCall) -> Result<EvmQuery, bcs::Error> {
let operation = EvmOperation::new(Amount::ZERO, operation.abi_encode());
operation.to_evm_query()
}
#[cfg(with_revm)]
fn get_zero_operations(
operation: impl alloy_sol_types::SolCall,
num_operations: usize,
) -> Result<EvmQuery, bcs::Error> {
let operation = EvmOperation::new(Amount::ZERO, operation.abi_encode());
let operations = vec![operation.to_bytes()?; num_operations];
Ok(EvmQuery::Operations(operations))
}
#[cfg(with_revm)]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_test_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[cfg_attr(feature = "remote-net", test_case(RemoteNetTestingConfig::new(CloseChains) ; "remote_net_grpc"))]
#[test_log::test(tokio::test)]
async fn test_evm_end_to_end_counter(config: impl LineraNetConfig) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let (mut net, client) = config.instantiate().await?;
sol! {
struct ConstructorArgs {
uint64 initial_value;
}
function increment(uint64 input);
function get_value();
}
let original_counter_value = 35;
let constructor_argument = ConstructorArgs {
initial_value: original_counter_value,
};
let constructor_argument = constructor_argument.abi_encode();
let increment = 5;
let chain = client.load_wallet()?.default_chain().unwrap();
let (evm_contract, _dir) = get_evm_contract_path("tests/fixtures/evm_example_counter.sol")?;
let instantiation_argument = EvmInstantiation::default();
let application_id = client
.publish_and_create::<EvmAbi, Vec<u8>, EvmInstantiation>(
evm_contract.clone(),
evm_contract,
VmRuntime::Evm,
&constructor_argument,
&instantiation_argument,
&[],
None,
)
.await?;
let port = get_node_port().await;
let mut node_service = client.run_node_service(port, ProcessInbox::Skip).await?;
let application = node_service.make_application(&chain, &application_id)?;
let query = get_valueCall {};
let query = query.abi_encode();
let query = EvmQuery::Query(query);
let result = application.run_json_query(query.clone()).await?;
let counter_value = read_evm_u64_entry(result);
assert_eq!(counter_value, original_counter_value);
let operation = incrementCall { input: increment };
let operation = get_zero_operation(operation)?;
application.run_json_query(operation).await?;
let result = application.run_json_query(query).await?;
let counter_value = read_evm_u64_entry(result);
assert_eq!(counter_value, original_counter_value + increment);
node_service.ensure_is_running()?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
#[cfg(with_revm)]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_test_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[cfg_attr(feature = "remote-net", test_case(RemoteNetTestingConfig::new(CloseChains) ; "remote_net_grpc"))]
#[test_log::test(tokio::test)]
async fn test_evm_end_to_end_child_subcontract(config: impl LineraNetConfig) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let (mut net, client) = config.instantiate().await?;
let account_owner1 = client.get_owner().unwrap();
sol! {
function createCounter(uint256 initialValue);
function get_address(uint256 index);
function get_value();
function increment();
}
let constructor_argument = Vec::new();
let chain_id = client.load_wallet()?.default_chain().unwrap();
let account_chain = Account::chain(chain_id);
let account1 = Account {
chain_id,
owner: account_owner1,
};
client
.transfer_with_accounts(Amount::from_tokens(50), account_chain, account1)
.await?;
let module = load_solidity_example_by_name(
"tests/fixtures/evm_child_subcontract.sol",
"CounterFactory",
)?;
let (evm_contract, _dir) = temporary_write_evm_module(module)?;
let start_value = Amount::from_tokens(27);
let instantiation_argument = EvmInstantiation {
value: start_value.into(),
argument: vec![],
};
let application_id = client
.publish_and_create::<EvmAbi, Vec<u8>, EvmInstantiation>(
evm_contract.clone(),
evm_contract,
VmRuntime::Evm,
&constructor_argument,
&instantiation_argument,
&[],
None,
)
.await?;
let port = get_node_port().await;
let mut node_service = client.run_node_service(port, ProcessInbox::Skip).await?;
let application = node_service.make_application(&chain_id, &application_id)?;
let address_app = application_id.evm_address();
// Creating the subcontracts
let operation0 = createCounterCall {
initialValue: U256::from(42),
};
let operation0 = get_zero_operation(operation0)?;
application.run_json_query(operation0).await?;
let operation1 = createCounterCall {
initialValue: U256::from(149),
};
let operation1 = get_zero_operation(operation1)?;
application.run_json_query(operation1).await?;
let query0 = get_addressCall {
index: U256::from(0),
};
let query0 = EvmQuery::Query(query0.abi_encode());
let address0 = application.run_json_query(query0).await?;
let address0 = read_evm_address_entry(address0);
let query1 = get_addressCall {
index: U256::from(1),
};
let query1 = EvmQuery::Query(query1.abi_encode());
let address1 = application.run_json_query(query1).await?;
let address1 = read_evm_address_entry(address1);
assert_ne!(address0, address1);
// Creating the applications
// The balance in Linera and EVM have to match.
let application0 = ApplicationId::from(address0).with_abi::<EvmAbi>();
let application0 = node_service.make_application(&chain_id, &application0)?;
let application1 = ApplicationId::from(address1).with_abi::<EvmAbi>();
let application1 = node_service.make_application(&chain_id, &application1)?;
let query = get_valueCall {};
let query = EvmQuery::Query(query.abi_encode());
let result = application0.run_json_query(query.clone()).await?;
assert_eq!(read_evm_u256_entry(result), U256::from(42));
let result = application1.run_json_query(query).await?;
assert_eq!(read_evm_u256_entry(result), U256::from(149));
// Created contracts have balance of 1.
let account0 = Account {
chain_id,
owner: address0.into(),
};
let account1 = Account {
chain_id,
owner: address1.into(),
};
assert_eq!(node_service.balance(&account0).await?, Amount::ONE);
assert_eq!(node_service.balance(&account1).await?, Amount::ONE);
assert_contract_balance(&application0, address0, Amount::ONE).await?;
assert_contract_balance(&application0, address1, Amount::ONE).await?;
assert_contract_balance(&application0, address_app, Amount::from_tokens(25)).await?;
assert_contract_balance(&application1, address0, Amount::ONE).await?;
assert_contract_balance(&application1, address1, Amount::ONE).await?;
assert_contract_balance(&application1, address_app, Amount::from_tokens(25)).await?;
assert_contract_balance(&application, address0, Amount::ONE).await?;
assert_contract_balance(&application, address1, Amount::ONE).await?;
assert_contract_balance(&application, address_app, Amount::from_tokens(25)).await?;
node_service.ensure_is_running()?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
#[cfg(with_revm)]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_test_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[cfg_attr(feature = "remote-net", test_case(RemoteNetTestingConfig::new(CloseChains) ; "remote_net_grpc"))]
#[test_log::test(tokio::test)]
async fn test_evm_end_to_end_balance_and_transfer(config: impl LineraNetConfig) -> Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let (mut net, client_a) = config.instantiate().await?;
let client_b = net.make_client().await;
client_b.wallet_init(None).await?;
let client_c = net.make_client().await;
client_c.wallet_init(None).await?;
let chain_a = client_a.load_wallet()?.default_chain().unwrap();
let chain_b = client_a
.open_and_assign(&client_b, Amount::from_tokens(50))
.await?;
let chain_c = client_a
.open_and_assign(&client_c, Amount::from_tokens(50))
.await?;
let account_chain_a = Account::chain(chain_a);
let account_owner1 = client_a.get_owner().unwrap();
let account_owner2 = client_a.keygen().await?;
let address1 = account_owner1.to_evm_address().unwrap();
let address2 = account_owner2.to_evm_address().unwrap();
let account_a_1 = Account {
chain_id: chain_a,
owner: account_owner1,
};
let account_a_2 = Account {
chain_id: chain_a,
owner: account_owner2,
};
client_a
.transfer_with_accounts(Amount::from_tokens(50), account_chain_a, account_a_1)
.await?;
client_a
.transfer_with_accounts(Amount::from_tokens(50), account_chain_a, account_a_2)
.await?;
sol! {
function send_cash(address recipient, uint256 amount);
function null_operation();
}
// The balance in Linera and EVM have to match.
let constructor_argument = Vec::new();
let (evm_contract, _dir) =
get_evm_contract_path("tests/fixtures/evm_balance_and_transfer.sol")?;
let start_value = Amount::from_tokens(4);
let instantiation_argument = EvmInstantiation {
value: start_value.into(),
argument: vec![],
};
let application_id = client_a
.publish_and_create::<EvmAbi, Vec<u8>, EvmInstantiation>(
evm_contract.clone(),
evm_contract,
VmRuntime::Evm,
&constructor_argument,
&instantiation_argument,
&[],
None,
)
.await?;
let account_owner_app: AccountOwner = application_id.into();
let address_app = account_owner_app.to_evm_address().unwrap();
let account_a_app = Account {
chain_id: chain_a,
owner: account_owner_app,
};
let port_a = get_node_port().await;
let port_b = get_node_port().await;
let port_c = get_node_port().await;
let mut node_service_a = client_a
.run_node_service(port_a, ProcessInbox::Skip)
.await?;
let mut node_service_b = client_b
.run_node_service(port_b, ProcessInbox::Skip)
.await?;
let mut node_service_c = client_c
.run_node_service(port_c, ProcessInbox::Skip)
.await?;
let balance_a_1 = node_service_a.balance(&account_a_1).await?;
let balance_a_2 = node_service_a.balance(&account_a_2).await?;
let balance_a_app = node_service_a.balance(&account_a_app).await?;
assert_eq!(balance_a_1, Amount::from_tokens(46));
assert_eq!(balance_a_2, Amount::from_tokens(50));
assert_eq!(balance_a_app, Amount::from_tokens(4));
let app_a = node_service_a.make_application(&chain_a, &application_id)?;
let app_b = node_service_b.make_application(&chain_b, &application_id)?;
let app_c = node_service_c.make_application(&chain_c, &application_id)?;
// Checking the balances on input
assert_contract_balance(&app_a, address1, balance_a_1).await?;
assert_contract_balance(&app_a, address2, balance_a_2).await?;
assert_contract_balance(&app_a, address_app, balance_a_app).await?;
// Transferring amount
let amount = Amount::from_tokens(1);
let operation = send_cashCall {
recipient: address2,
amount: amount.into(),
};
let operation = get_zero_operation(operation)?;
app_a.run_json_query(operation).await?;
// Checking the balances of app_a
let balance_a_1_after = node_service_a.balance(&account_a_1).await?;
let balance_a_2_after = node_service_a.balance(&account_a_2).await?;
let balance_a_app_after = node_service_a.balance(&account_a_app).await?;
assert_eq!(balance_a_1_after, balance_a_1);
assert_eq!(balance_a_2_after, balance_a_2 + amount);
assert_eq!(balance_a_app_after, balance_a_app - amount);
assert_contract_balance(&app_a, address1, balance_a_1_after).await?;
assert_contract_balance(&app_a, address2, balance_a_2_after).await?;
assert_contract_balance(&app_a, address_app, balance_a_app_after).await?;
// Doing an operation with a non-zero amount
let operation = null_operationCall {};
let amount_operation = Amount::from(2);
let operation = EvmOperation::new(amount_operation, operation.abi_encode());
let operation = EvmQuery::Operation(operation.to_bytes()?);
app_a.run_json_query(operation).await?;
let balance_a_app_after2 = node_service_a.balance(&account_a_app).await?;
assert_eq!(balance_a_app_after2, balance_a_app_after + amount_operation);
// Creating app_b via null_operation and checking balances.
let account_b_1 = Account {
chain_id: chain_b,
owner: account_owner1,
};
let account_b_2 = Account {
chain_id: chain_b,
owner: account_owner2,
};
let account_b_app = Account {
chain_id: chain_b,
owner: account_owner_app,
};
let operation = null_operationCall {};
let operation = get_zero_operation(operation)?;
app_b.run_json_query(operation).await?;
assert_eq!(node_service_b.balance(&account_b_1).await?, Amount::ZERO);
assert_eq!(node_service_b.balance(&account_b_2).await?, Amount::ZERO);
assert_eq!(node_service_b.balance(&account_b_app).await?, Amount::ZERO);
assert_contract_balance(&app_b, address1, Amount::ZERO).await?;
assert_contract_balance(&app_b, address2, Amount::ZERO).await?;
assert_contract_balance(&app_b, address_app, Amount::ZERO).await?;
// Creating app_b via service calls and checking balances.
assert_contract_balance(&app_c, address1, Amount::ZERO).await?;
assert_contract_balance(&app_c, address2, Amount::ZERO).await?;
assert_contract_balance(&app_c, address_app, Amount::ZERO).await?;
// Winding down
node_service_a.ensure_is_running()?;
node_service_b.ensure_is_running()?;
node_service_c.ensure_is_running()?;
net.ensure_is_running().await?;
net.terminate().await?;
Ok(())
}
#[cfg(with_revm)]
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc) ; "storage_test_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "aws_grpc"))]
#[cfg_attr(feature = "remote-net", test_case(RemoteNetTestingConfig::new(CloseChains) ; "remote_net_grpc"))]
#[test_log::test(tokio::test)]
async fn test_evm_event(config: impl LineraNetConfig) -> Result<()> {
use linera_base::identifiers::{GenericApplicationId, StreamId, StreamName};
let _guard = INTEGRATION_TEST_GUARD.lock().await;
tracing::info!("Starting test {}", test_name!());
let (mut net, client) = config.instantiate().await?;
sol! {
struct ConstructorArgs {
uint64 start_value;
}
function increment(uint64 input);
}
let start_value = 35;
let constructor_argument = ConstructorArgs { start_value };
let constructor_argument = constructor_argument.abi_encode();
let increment = 5;
let chain = client.load_wallet()?.default_chain().unwrap();
let (evm_contract, _dir) = get_evm_contract_path("tests/fixtures/evm_example_log.sol")?;
let instantiation_argument = EvmInstantiation::default();
let application_id = client
.publish_and_create::<EvmAbi, Vec<u8>, EvmInstantiation>(
evm_contract.clone(),
evm_contract,
VmRuntime::Evm,
&constructor_argument,
&instantiation_argument,
&[],
None,
)
.await?;
let port = get_node_port().await;
let mut node_service = client.run_node_service(port, ProcessInbox::Skip).await?;
let application = node_service.make_application(&chain, &application_id)?;
let application_id = GenericApplicationId::User(application_id.forget_abi());
let stream_name = bcs::to_bytes("ethereum_event")?;
let stream_name = StreamName(stream_name);
let stream_id = StreamId {
application_id,
stream_name,
};
let mut start_index = 0;
let indices_and_events = node_service
.events_from_index(&chain, &stream_id, start_index)
.await?;
let index_and_event = indices_and_events[0].clone();
assert_eq!(index_and_event.index, 0);
let (origin, block_height, log) =
bcs::from_bytes::<(String, u64, Log)>(&index_and_event.event)?;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/tests/common/mod.rs | linera-service/tests/common/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::env;
/// Restores the `RUSTFLAGS` environment variable to make warnings fail as errors.
pub struct RestoreVarOnDrop;
impl Drop for RestoreVarOnDrop {
fn drop(&mut self) {
env::set_var("RUSTFLAGS", "-D warnings");
}
}
/// Clears the `RUSTFLAGS` environment variable, if it was configured to make warnings fail as
/// errors.
///
/// The returned [`RestoreVarOnDrop`] restores the environment variable to its original value when
/// it is dropped.
pub fn override_disable_warnings_as_errors() -> Option<RestoreVarOnDrop> {
if matches!(env::var("RUSTFLAGS"), Ok(value) if value == "-D warnings") {
env::set_var("RUSTFLAGS", "");
Some(RestoreVarOnDrop)
} else {
None
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/tests/guard/mod.rs | linera-service/tests/guard/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::LazyLock;
use tokio::sync::Mutex;
/// A static lock to prevent integration tests from running in parallel.
pub static INTEGRATION_TEST_GUARD: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/benches/transfers.rs | linera-service/benches/transfers.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use criterion::{criterion_group, criterion_main, Criterion};
use futures::{
stream::{self, FuturesUnordered},
Stream, StreamExt,
};
use linera_base::{
crypto::{AccountSecretKey, Ed25519SecretKey, EvmSecretKey, Secp256k1SecretKey},
data_types::Amount,
identifiers::{Account, AccountOwner},
time::{Duration, Instant},
};
use linera_sdk::test::{ActiveChain, TestValidator};
use tokio::runtime::Runtime;
/// Benchmarks several transactions transferring tokens across chains.
fn cross_chain_native_token_transfers(criterion: &mut Criterion) {
let chain_count = 40;
let accounts_per_chain = 1;
let transfers_per_account = 40;
criterion.bench_function("same_chain_native_token_transfers", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let chains = Box::pin(setup_native_token_balances(
chain_count,
accounts_per_chain,
transfers_per_account,
))
.await;
let transfers = prepare_transfers(chains, transfers_per_account);
let measurement = Instant::now();
transfers.collect::<()>().await;
total_time += measurement.elapsed();
}
total_time
})
});
let metrics = prometheus::TextEncoder::new()
.encode_to_string(&prometheus::gather())
.expect("Failed to format collected metrics");
println!("METRICS");
println!("{metrics}");
}
/// Provides each chain used in the benchmark with enough tokens to transfer.
async fn setup_native_token_balances(
chain_count: usize,
accounts_per_chain: usize,
transfers_per_account: usize,
) -> Vec<ActiveChain> {
let initial_balance = transfers_per_account as u128;
let validator = TestValidator::new().await;
let chains = stream::iter(0..chain_count)
.then(|idx| {
let key_pair = match idx % 3 {
0 => AccountSecretKey::Secp256k1(Secp256k1SecretKey::generate()),
1 => AccountSecretKey::Ed25519(Ed25519SecretKey::generate()),
_ => AccountSecretKey::EvmSecp256k1(EvmSecretKey::generate()),
};
validator.new_chain_with_keypair(key_pair)
})
.collect::<Vec<_>>()
.await;
let admin_chain = validator.get_chain(&validator.admin_chain_id());
for chain in &chains {
let recipient = Account {
chain_id: chain.id(),
owner: AccountOwner::from(chain.public_key()),
};
// TODO: Support benchmarking chains with multiple owner accounts
assert_eq!(accounts_per_chain, 1);
admin_chain
.add_block(|block| {
block.with_native_token_transfer(
AccountOwner::CHAIN,
recipient,
Amount::from_tokens(initial_balance),
);
})
.await;
chain.handle_received_messages().await;
}
chains
}
/// Returns a stream that concurrently adds blocks to all `chains` to transfer tokens.
fn prepare_transfers(
chains: Vec<ActiveChain>,
transfers_per_account: usize,
) -> impl Stream<Item = ()> {
let accounts = chains
.iter()
.map(|chain| Account {
chain_id: chain.id(),
owner: AccountOwner::from(chain.public_key()),
})
.collect::<Vec<_>>();
let chain_transfers = chains
.into_iter()
.enumerate()
.map(|(index, chain)| {
let chain_id = chain.id();
let sender = AccountOwner::from(chain.public_key());
let transfers = accounts
.iter()
.copied()
.filter(move |recipient| recipient.chain_id != chain_id)
.cycle()
.skip(index)
.take(transfers_per_account)
.map(move |recipient| (sender, recipient))
.collect::<Vec<_>>();
(chain, transfers)
})
.collect::<Vec<_>>();
chain_transfers
.into_iter()
.map(move |(chain, transfers)| async move {
tokio::spawn(async move {
for (sender, recipient) in transfers {
chain
.add_block(|block| {
block.with_native_token_transfer(sender, recipient, Amount::ONE);
})
.await;
}
})
.await
.unwrap();
})
.collect::<FuturesUnordered<_>>()
}
criterion_group!(benches, cross_chain_native_token_transfers);
criterion_main!(benches);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-version/build.rs | linera-version/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
include!("src/serde_pretty/type.rs");
include!("src/version_info/type.rs");
fn main() {
let VersionInfo {
crate_version:
Pretty {
value:
CrateVersion {
major,
minor,
patch,
},
..
},
git_commit,
git_dirty,
rpc_hash,
graphql_hash,
wit_hash,
} = {
let mut paths = vec![];
let version_info = VersionInfo::trace_get(
std::path::Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()),
&mut paths,
)
.unwrap();
for path in paths {
println!("cargo:rerun-if-changed={}", path.display());
}
version_info
};
let static_code = quote::quote! {
VersionInfo {
crate_version: crate::serde_pretty::Pretty::new(
CrateVersion { major: #major, minor: #minor, patch: #patch },
),
git_commit: ::std::borrow::Cow::Borrowed(#git_commit),
git_dirty: #git_dirty,
rpc_hash: ::std::borrow::Cow::Borrowed(#rpc_hash),
graphql_hash: ::std::borrow::Cow::Borrowed(#graphql_hash),
wit_hash: ::std::borrow::Cow::Borrowed(#wit_hash),
}
};
let out_path = std::path::Path::new(&std::env::var_os("OUT_DIR").unwrap()).join("static.rs");
fs_err::write(&out_path, static_code.to_string().as_bytes()).unwrap_or_else(|e| {
panic!(
"failed to write output file `{}`: {}",
out_path.display(),
e
)
});
println!("cargo:rustc-cfg=linera_version_building");
println!(
"cargo:rustc-env=LINERA_VERSION_STATIC_PATH={}",
out_path.display()
)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-version/src/lib.rs | linera-version/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
This crate is in charge of extracting version information from the Linera build, for
troubleshooting information and version compatibility checks.
*/
mod serde_pretty;
pub use serde_pretty::*;
mod version_info;
pub use version_info::*;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-version/src/main.rs | linera-version/src/main.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_version::VersionInfo;
fn main() -> anyhow::Result<()> {
serde_json::to_writer_pretty(std::io::stdout(), &VersionInfo::get()?.api_hashes())?;
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-version/src/version_info/type.rs | linera-version/src/version_info/type.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{io::Read as _, path::PathBuf};
#[cfg(linera_version_building)]
use crate::serde_pretty::Pretty;
#[cfg_attr(linera_version_building, derive(serde::Deserialize, serde::Serialize))]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct CrateVersion {
pub major: u32,
pub minor: u32,
pub patch: u32,
}
impl From<semver::Version> for CrateVersion {
fn from(
semver::Version {
major,
minor,
patch,
..
}: semver::Version,
) -> Self {
Self {
major: major as u32,
minor: minor as u32,
patch: patch as u32,
}
}
}
impl From<CrateVersion> for semver::Version {
fn from(
CrateVersion {
major,
minor,
patch,
}: CrateVersion,
) -> Self {
Self::new(major as u64, minor as u64, patch as u64)
}
}
pub type Hash = std::borrow::Cow<'static, str>;
#[cfg_attr(linera_version_building, derive(serde::Deserialize, serde::Serialize))]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
/// The version info of a build of Linera.
pub struct VersionInfo {
/// The crate version
pub crate_version: Pretty<CrateVersion, semver::Version>,
/// The git commit hash
pub git_commit: Hash,
/// Whether the git checkout was dirty
pub git_dirty: bool,
/// A hash of the RPC API
pub rpc_hash: Hash,
/// A hash of the GraphQL API
pub graphql_hash: Hash,
/// A hash of the WIT API
pub wit_hash: Hash,
}
#[cfg(linera_version_building)]
async_graphql::scalar!(VersionInfo);
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("failed to interpret cargo-metadata: {0}")]
CargoMetadata(#[from] cargo_metadata::Error),
#[error("no such package: {0}")]
NoSuchPackage(String),
#[error("I/O error: {0}")]
IoError(#[from] std::io::Error),
#[error("glob error: {0}")]
Glob(#[from] glob::GlobError),
#[error("pattern error: {0}")]
Pattern(#[from] glob::PatternError),
#[error("JSON error: {0}")]
JsonError(#[from] serde_json::Error),
}
struct Outcome {
status: std::process::ExitStatus,
output: String,
}
fn get_hash(
relevant_paths: &mut Vec<PathBuf>,
metadata: &cargo_metadata::Metadata,
package: &str,
glob: &str,
) -> Result<String, Error> {
use base64::engine::{general_purpose::STANDARD_NO_PAD, Engine as _};
use sha3::Digest as _;
let package_root = get_package_root(metadata, package)
.ok_or_else(|| Error::NoSuchPackage(package.to_owned()))?;
let mut hasher = sha3::Sha3_256::new();
let mut buffer = [0u8; 4096];
let package_glob = format!("{}/{}", package_root.display(), glob);
let mut n_file = 0;
for path in glob::glob(&package_glob)? {
let path = path?;
let mut file = fs_err::File::open(&path)?;
relevant_paths.push(path);
n_file += 1;
while file.read(&mut buffer)? != 0 {
hasher.update(buffer);
}
}
assert!(n_file > 0);
Ok(STANDARD_NO_PAD.encode(hasher.finalize()))
}
fn run(cmd: &str, args: &[&str]) -> Result<Outcome, Error> {
let mut cmd = std::process::Command::new(cmd);
let mut child = cmd
.args(args)
.stdout(std::process::Stdio::piped())
.spawn()?;
let mut output = String::new();
child.stdout.take().unwrap().read_to_string(&mut output)?;
Ok(Outcome {
status: child.wait()?,
output,
})
}
fn get_package<'r>(
metadata: &'r cargo_metadata::Metadata,
package_name: &str,
) -> Option<&'r cargo_metadata::Package> {
metadata.packages.iter().find(|p| p.name == package_name)
}
fn get_package_root<'r>(
metadata: &'r cargo_metadata::Metadata,
package_name: &str,
) -> Option<&'r std::path::Path> {
Some(
get_package(metadata, package_name)?
.targets
.first()
.expect("package must have at least one target")
.src_path
.ancestors()
.find(|p| p.join("Cargo.toml").exists())
.expect("package should have a Cargo.toml")
.as_std_path(),
)
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct CargoVcsInfo {
path_in_vcs: PathBuf,
git: CargoVcsInfoGit,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
struct CargoVcsInfoGit {
sha1: String,
}
#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct ApiHashes {
pub rpc: String,
pub graphql: String,
pub wit: String,
}
impl VersionInfo {
pub fn get() -> Result<Self, Error> {
Self::trace_get(
std::path::Path::new(env!("CARGO_MANIFEST_DIR")),
&mut vec![],
)
}
fn trace_get(crate_dir: &std::path::Path, paths: &mut Vec<PathBuf>) -> Result<Self, Error> {
let metadata = cargo_metadata::MetadataCommand::new()
.current_dir(crate_dir)
.exec()?;
let crate_version = Pretty::new(
get_package(&metadata, env!("CARGO_PKG_NAME"))
.expect("this package must be in the dependency tree")
.version
.clone()
.into(),
);
let cargo_vcs_info_path = crate_dir.join(".cargo_vcs_info.json");
let api_hashes_path = crate_dir.join("api-hashes.json");
let mut git_dirty = false;
let git_commit = if let Ok(git_commit) = std::env::var("GIT_COMMIT") {
git_commit
} else if cargo_vcs_info_path.is_file() {
let cargo_vcs_info: CargoVcsInfo =
serde_json::from_reader(std::fs::File::open(cargo_vcs_info_path)?)?;
cargo_vcs_info.git.sha1
} else {
let git_outcome = run("git", &["rev-parse", "HEAD"])?;
if git_outcome.status.success() {
git_dirty = run("git", &["diff-index", "--quiet", "HEAD"])?
.status
.code()
== Some(1);
git_outcome.output[..10].to_owned()
} else {
format!("v{}", crate_version)
}
}
.into();
let api_hashes: ApiHashes = serde_json::from_reader(fs_err::File::open(api_hashes_path)?)?;
let rpc_hash = get_hash(
paths,
&metadata,
"linera-rpc",
"tests/snapshots/format__format.yaml.snap",
)
.unwrap_or(api_hashes.rpc)
.into();
let graphql_hash = get_hash(
paths,
&metadata,
"linera-service-graphql-client",
"gql/*.graphql",
)
.unwrap_or(api_hashes.graphql)
.into();
let wit_hash = get_hash(paths, &metadata, "linera-sdk", "wit/*.wit")
.unwrap_or(api_hashes.wit)
.into();
Ok(Self {
crate_version,
git_commit,
git_dirty,
rpc_hash,
graphql_hash,
wit_hash,
})
}
pub fn api_hashes(&self) -> ApiHashes {
ApiHashes {
rpc: self.rpc_hash.clone().into_owned(),
wit: self.wit_hash.clone().into_owned(),
graphql: self.graphql_hash.clone().into_owned(),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-version/src/version_info/mod.rs | linera-version/src/version_info/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod r#type;
pub use r#type::*;
pub static VERSION_INFO: VersionInfo = include!(env!("LINERA_VERSION_STATIC_PATH"));
use crate::serde_pretty::Pretty;
impl std::fmt::Display for VersionInfo {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
formatter,
"\
Linera protocol: v{crate_version}\n\
RPC API hash: {rpc_hash}\n\
GraphQL API hash: {graphql_hash}\n\
WIT API hash: {wit_hash}\n\
Source code: {repo}/tree/{git_commit}{git_dirty}\n\
",
repo = env!("CARGO_PKG_REPOSITORY"),
crate_version = self.crate_version,
rpc_hash = self.rpc_hash,
graphql_hash = self.graphql_hash,
wit_hash = self.wit_hash,
git_commit = self.git_commit,
git_dirty = if self.git_dirty { " (dirty)" } else { "" }
)
}
}
impl CrateVersion {
/// Whether this version is known to be API-compatible with `other`.
/// Note that this relation _is not_ symmetric.
pub fn is_compatible_with(&self, other: &CrateVersion) -> bool {
if self.major == 0 {
// Cargo conventions decree that if the major version is 0, minor versions
// denote backwards-incompatible changes and patch versions denote
// backwards-compatible changes.
self.minor == other.minor && self.patch <= other.patch
} else {
self.major == other.major && self.minor <= other.minor
}
}
}
async_graphql::scalar!(
CrateVersion,
"CrateVersion",
"The version of the Linera crates used in this build"
);
async_graphql::scalar!(
Pretty<CrateVersion, semver::Version>,
"CrateVersion",
"The version of the Linera crates used in this build"
);
impl VersionInfo {
/// Print a human-readable listing of the version information at `info` level.
pub fn log(&self) {
for line in format!("{self}").lines() {
tracing::info!("{line}");
}
}
/// A static string corresponding to `VersionInfo::default().to_string()` preceded by
/// a newline. The newline is meant for `clap` as in `#[command(version =
/// linera_version::VersionInfo::default_clap_str())]`
pub fn default_clap_str() -> &'static str {
use std::sync::LazyLock;
static STRING: LazyLock<String> = LazyLock::new(|| format!("\n{}", VersionInfo::default()));
STRING.as_str()
}
/// Whether this version is known to be (remote!) API-compatible with `other`.
/// Note that this relation _is not_ symmetric.
/// It also may give false negatives.
pub fn is_compatible_with(&self, other: &VersionInfo) -> bool {
self.api_hashes() == other.api_hashes()
|| self
.crate_version
.value
.is_compatible_with(&other.crate_version.value)
}
}
impl Default for VersionInfo {
fn default() -> Self {
VERSION_INFO.clone()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-version/src/serde_pretty/type.rs | linera-version/src/serde_pretty/type.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Pretty<T, Repr> {
pub value: T,
_phantom: std::marker::PhantomData<fn(Repr) -> Repr>,
}
impl<T, Repr> Pretty<T, Repr> {
pub const fn new(value: T) -> Self {
Pretty {
value,
_phantom: std::marker::PhantomData,
}
}
pub fn repr(self) -> Repr
where
Repr: From<T>,
{
Repr::from(self.value)
}
}
impl<T, Repr> std::fmt::Display for Pretty<T, Repr>
where
T: Clone,
Repr: std::fmt::Display + From<T>,
{
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "{}", Repr::from(self.value.clone()))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-version/src/serde_pretty/mod.rs | linera-version/src/serde_pretty/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod r#type;
pub use r#type::*;
use serde::{de::Deserialize, ser::Serialize};
impl<T, Repr> From<T> for Pretty<T, Repr> {
fn from(value: T) -> Self {
Self::new(value)
}
}
impl<T, Repr> Serialize for Pretty<T, Repr>
where
T: Serialize + Clone,
Repr: Serialize + From<T>,
{
fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
if serializer.is_human_readable() {
Repr::from(self.value.clone()).serialize(serializer)
} else {
self.value.serialize(serializer)
}
}
}
impl<'de, T, Repr> serde::de::Deserialize<'de> for Pretty<T, Repr>
where
T: Deserialize<'de> + From<Repr>,
Repr: Deserialize<'de>,
{
fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
if deserializer.is_human_readable() {
Ok(Pretty::new(Repr::deserialize(deserializer)?.into()))
} else {
Ok(Pretty::new(T::deserialize(deserializer)?))
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-version/tests/up_to_date.rs | linera-version/tests/up_to_date.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_version::VersionInfo;
#[test]
fn up_to_date() {
assert_eq!(
VersionInfo::get().unwrap().api_hashes(),
VersionInfo::default().api_hashes(),
"`linera-version` API hash cache out of date.\n\
Please update `linera-version/api-hashes.json` by running:\n\
$ cargo run -p linera-version > linera-version/api-hashes.json"
);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service-graphql-client/src/lib.rs | linera-service-graphql-client/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A GraphQL client for the node service.
mod service;
pub mod utils;
pub use service::*;
pub use utils::*;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service-graphql-client/src/service.rs | linera-service-graphql-client/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use graphql_client::GraphQLQuery;
use linera_base::{
crypto::CryptoHash,
data_types::{Amount, Blob, BlockHeight, ChainDescription, OracleResponse, Round, Timestamp},
identifiers::{AccountOwner, BlobId, ChainId, GenericApplicationId, StreamName},
};
use thiserror::Error;
pub type JSONObject = serde_json::Value;
#[cfg(target_arch = "wasm32")]
mod types {
use linera_base::data_types::Round;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use super::{BlockHeight, ChainId, CryptoHash};
pub type ChainManager = Value;
pub type ChainOwnership = Value;
pub type Epoch = Value;
pub type MessageBundle = Value;
pub type MessageKind = Value;
pub type Message = Value;
pub type MessageAction = Value;
pub type Operation = Value;
pub type Origin = Value;
pub type ApplicationDescription = Value;
pub type OperationResult = Value;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Notification {
pub chain_id: ChainId,
pub reason: Reason,
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
#[expect(clippy::enum_variant_names)]
pub enum Reason {
NewBlock {
height: BlockHeight,
hash: CryptoHash,
},
NewIncomingBundle {
origin: Origin,
height: BlockHeight,
},
NewRound {
height: BlockHeight,
round: Round,
},
}
}
#[cfg(not(target_arch = "wasm32"))]
mod types {
pub use linera_base::{
data_types::{ApplicationDescription, Epoch},
ownership::ChainOwnership,
};
pub use linera_chain::{
data_types::{IncomingBundle, MessageAction, MessageBundle, OperationResult, Transaction},
manager::ChainManager,
};
pub use linera_core::worker::{Notification, Reason};
pub use linera_execution::{Message, MessageKind, Operation, SystemOperation};
}
pub use types::*;
pub type ApplicationId = String;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/service_schema.graphql",
query_path = "gql/service_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct Chain;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/service_schema.graphql",
query_path = "gql/service_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct Chains;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/service_schema.graphql",
query_path = "gql/service_requests.graphql",
response_derives = "Debug, Serialize, Clone, PartialEq"
)]
pub struct Applications;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/service_schema.graphql",
query_path = "gql/service_requests.graphql",
response_derives = "Debug, Serialize, Clone, PartialEq"
)]
pub struct Blocks;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/service_schema.graphql",
query_path = "gql/service_requests.graphql",
response_derives = "Debug, Serialize, Clone, PartialEq"
)]
pub struct Block;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/service_schema.graphql",
query_path = "gql/service_requests.graphql",
response_derives = "Debug, Serialize, Clone, PartialEq"
)]
pub struct Notifications;
#[derive(GraphQLQuery)]
#[graphql(
schema_path = "gql/service_schema.graphql",
query_path = "gql/service_requests.graphql",
response_derives = "Debug, Serialize, Clone"
)]
pub struct Transfer;
#[derive(Error, Debug)]
pub enum ConversionError {
#[error(transparent)]
Serde(#[from] serde_json::Error),
#[error("Unexpected certificate type: {0}")]
UnexpectedCertificateType(String),
}
#[cfg(not(target_arch = "wasm32"))]
mod from {
use linera_base::{
data_types::{ApplicationPermissions, Event, TimeDelta},
identifiers::{Account, ApplicationId as RealApplicationId, ModuleId, StreamId},
ownership::{ChainOwnership, TimeoutConfig},
};
use linera_chain::{
block::{Block, BlockBody, BlockHeader},
types::ConfirmedBlock,
};
use linera_execution::{
system::{AdminOperation, OpenChainConfig},
OutgoingMessage,
};
use super::*;
/// Convert GraphQL system operation metadata to a SystemOperation object.
fn convert_system_operation(
system_op: block::BlockBlockBlockBodyTransactionMetadataOperationSystemOperation,
) -> Result<SystemOperation, ConversionError> {
match system_op.system_operation_type.as_str() {
"Transfer" => {
let transfer = system_op.transfer.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing transfer metadata for Transfer operation".to_string(),
)
})?;
Ok(SystemOperation::Transfer {
owner: transfer.owner,
recipient: Account {
chain_id: transfer.recipient.chain_id,
owner: transfer.recipient.owner,
},
amount: transfer.amount,
})
}
"Claim" => {
let claim = system_op.claim.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing claim metadata for Claim operation".to_string(),
)
})?;
Ok(SystemOperation::Claim {
owner: claim.owner,
target_id: claim.target_id,
recipient: Account {
chain_id: claim.recipient.chain_id,
owner: claim.recipient.owner,
},
amount: claim.amount,
})
}
"OpenChain" => {
let open_chain = system_op.open_chain.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing open_chain metadata for OpenChain operation".to_string(),
)
})?;
let ownership: ChainOwnership =
serde_json::from_str(&open_chain.ownership.ownership_json)
.map_err(ConversionError::Serde)?;
let application_permissions: ApplicationPermissions =
serde_json::from_str(&open_chain.application_permissions.permissions_json)
.map_err(ConversionError::Serde)?;
Ok(SystemOperation::OpenChain(OpenChainConfig {
ownership,
balance: open_chain.balance,
application_permissions,
}))
}
"CloseChain" => Ok(SystemOperation::CloseChain),
"ChangeOwnership" => {
let change_ownership = system_op.change_ownership.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing change_ownership metadata for ChangeOwnership operation"
.to_string(),
)
})?;
let timeout_config = TimeoutConfig {
fast_round_duration: change_ownership
.timeout_config
.fast_round_ms
.as_ref()
.map(|s| {
s.parse::<u64>().map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid fast_round_ms value".to_string(),
)
})
})
.transpose()?
.map(|ms| TimeDelta::from_micros(ms * 1000)),
base_timeout: TimeDelta::from_micros(
change_ownership
.timeout_config
.base_timeout_ms
.parse::<u64>()
.map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid base_timeout_ms value".to_string(),
)
})?
* 1000,
),
timeout_increment: TimeDelta::from_micros(
change_ownership
.timeout_config
.timeout_increment_ms
.parse::<u64>()
.map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid timeout_increment_ms value".to_string(),
)
})?
* 1000,
),
fallback_duration: TimeDelta::from_micros(
change_ownership
.timeout_config
.fallback_duration_ms
.parse::<u64>()
.map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid fallback_duration_ms value".to_string(),
)
})?
* 1000,
),
};
Ok(SystemOperation::ChangeOwnership {
super_owners: change_ownership.super_owners,
owners: change_ownership
.owners
.into_iter()
.map(|ow| {
let weight = ow.weight.parse::<u64>().map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid owner weight value".to_string(),
)
})?;
Ok((ow.owner, weight))
})
.collect::<Result<Vec<_>, ConversionError>>()?,
first_leader: change_ownership.first_leader,
multi_leader_rounds: change_ownership.multi_leader_rounds as u32,
open_multi_leader_rounds: change_ownership.open_multi_leader_rounds,
timeout_config,
})
}
"ChangeApplicationPermissions" => {
let change_permissions =
system_op.change_application_permissions.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing change_application_permissions metadata".to_string(),
)
})?;
let permissions: ApplicationPermissions =
serde_json::from_str(&change_permissions.permissions.permissions_json)
.map_err(ConversionError::Serde)?;
Ok(SystemOperation::ChangeApplicationPermissions(permissions))
}
"PublishModule" => {
let publish_module = system_op.publish_module.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing publish_module metadata for PublishModule operation".to_string(),
)
})?;
let module_id: ModuleId = publish_module.module_id.parse().map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid module_id format".to_string(),
)
})?;
Ok(SystemOperation::PublishModule { module_id })
}
"PublishDataBlob" => {
let publish_data_blob = system_op.publish_data_blob.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing publish_data_blob metadata".to_string(),
)
})?;
Ok(SystemOperation::PublishDataBlob {
blob_hash: publish_data_blob.blob_hash,
})
}
"VerifyBlob" => {
let verify_blob = system_op.verify_blob.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing verify_blob metadata for VerifyBlob operation".to_string(),
)
})?;
let blob_id: BlobId = verify_blob.blob_id.parse().map_err(|_| {
ConversionError::UnexpectedCertificateType("Invalid blob_id format".to_string())
})?;
Ok(SystemOperation::VerifyBlob { blob_id })
}
"CreateApplication" => {
let create_application = system_op.create_application.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing create_application metadata".to_string(),
)
})?;
let module_id: ModuleId = create_application.module_id.parse().map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid module_id format".to_string(),
)
})?;
let parameters = hex::decode(create_application.parameters_hex).map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid hex in parameters_hex".to_string(),
)
})?;
let instantiation_argument =
hex::decode(create_application.instantiation_argument_hex).map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid hex in instantiation_argument_hex".to_string(),
)
})?;
let required_application_ids = create_application
.required_application_ids
.into_iter()
.map(|id| {
id.parse::<RealApplicationId>().map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid required_application_id format".to_string(),
)
})
})
.collect::<Result<Vec<_>, _>>()?;
Ok(SystemOperation::CreateApplication {
module_id,
parameters,
instantiation_argument,
required_application_ids,
})
}
"Admin" => {
let admin = system_op.admin.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing admin metadata for Admin operation".to_string(),
)
})?;
let admin_op = match admin.admin_operation_type.as_str() {
"PublishCommitteeBlob" => {
let blob_hash = admin.blob_hash.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing blob_hash for PublishCommitteeBlob".to_string(),
)
})?;
AdminOperation::PublishCommitteeBlob { blob_hash }
}
"CreateCommittee" => {
let epoch_val = admin.epoch.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing epoch for CreateCommittee".to_string(),
)
})?;
let epoch = Epoch(epoch_val as u32);
let blob_hash = admin.blob_hash.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing blob_hash for CreateCommittee".to_string(),
)
})?;
AdminOperation::CreateCommittee { epoch, blob_hash }
}
"RemoveCommittee" => {
let epoch_val = admin.epoch.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing epoch for RemoveCommittee".to_string(),
)
})?;
let epoch = Epoch(epoch_val as u32);
AdminOperation::RemoveCommittee { epoch }
}
_ => {
return Err(ConversionError::UnexpectedCertificateType(format!(
"Unknown admin operation type: {}",
admin.admin_operation_type
)));
}
};
Ok(SystemOperation::Admin(admin_op))
}
"ProcessNewEpoch" => {
let epoch_val = system_op.epoch.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing epoch for ProcessNewEpoch operation".to_string(),
)
})?;
Ok(SystemOperation::ProcessNewEpoch(Epoch(epoch_val as u32)))
}
"ProcessRemovedEpoch" => {
let epoch_val = system_op.epoch.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing epoch for ProcessRemovedEpoch operation".to_string(),
)
})?;
Ok(SystemOperation::ProcessRemovedEpoch(Epoch(
epoch_val as u32,
)))
}
"UpdateStreams" => {
let update_streams = system_op.update_streams.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing update_streams metadata for UpdateStreams operation".to_string(),
)
})?;
let streams = update_streams
.into_iter()
.map(|stream| {
let stream_id_parsed: StreamId =
stream.stream_id.parse().map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid stream_id format".to_string(),
)
})?;
Ok((stream.chain_id, stream_id_parsed, stream.next_index as u32))
})
.collect::<Result<Vec<_>, ConversionError>>()?;
Ok(SystemOperation::UpdateStreams(streams))
}
_ => Err(ConversionError::UnexpectedCertificateType(format!(
"Unknown system operation type: {}",
system_op.system_operation_type
))),
}
}
/// Convert GraphQL transaction metadata to a Transaction object.
fn convert_transaction_metadata(
metadata: block::BlockBlockBlockBodyTransactionMetadata,
) -> Result<Transaction, ConversionError> {
match metadata.transaction_type.as_str() {
"ReceiveMessages" => {
let incoming_bundle = metadata.incoming_bundle.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing incoming_bundle for ReceiveMessages transaction".to_string(),
)
})?;
let bundle = IncomingBundle {
origin: incoming_bundle.origin,
bundle: MessageBundle {
height: incoming_bundle.bundle.height,
timestamp: incoming_bundle.bundle.timestamp,
certificate_hash: incoming_bundle.bundle.certificate_hash,
transaction_index: incoming_bundle.bundle.transaction_index as u32,
messages: incoming_bundle
.bundle
.messages
.into_iter()
.map(|msg| linera_chain::data_types::PostedMessage {
authenticated_owner: msg.authenticated_owner,
grant: msg.grant,
refund_grant_to: msg.refund_grant_to.map(|rgt| Account {
chain_id: rgt.chain_id,
owner: rgt.owner,
}),
kind: msg.kind,
index: msg.index as u32,
message: msg.message,
})
.collect(),
},
action: incoming_bundle.action,
};
Ok(Transaction::ReceiveMessages(bundle))
}
"ExecuteOperation" => {
let graphql_operation = metadata.operation.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing operation for ExecuteOperation transaction".to_string(),
)
})?;
let operation = match graphql_operation.operation_type.as_str() {
"System" => {
let system_op = graphql_operation.system_operation.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing system_operation for System operation".to_string(),
)
})?;
let sys_op = convert_system_operation(system_op)?;
Operation::System(Box::new(sys_op))
}
"User" => {
let application_id = graphql_operation.application_id.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing application_id for User operation".to_string(),
)
})?;
let bytes_hex = graphql_operation.user_bytes_hex.ok_or_else(|| {
ConversionError::UnexpectedCertificateType(
"Missing user_bytes_hex for User operation".to_string(),
)
})?;
// Convert hex string to bytes
let bytes = hex::decode(bytes_hex).map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid hex in user_bytes_hex".to_string(),
)
})?;
Operation::User {
application_id: application_id.parse().map_err(|_| {
ConversionError::UnexpectedCertificateType(
"Invalid application_id format".to_string(),
)
})?,
bytes,
}
}
_ => {
return Err(ConversionError::UnexpectedCertificateType(format!(
"Unknown operation type: {}",
graphql_operation.operation_type
)));
}
};
Ok(Transaction::ExecuteOperation(operation))
}
_ => Err(ConversionError::UnexpectedCertificateType(format!(
"Unknown transaction type: {}",
metadata.transaction_type
))),
}
}
impl From<block::BlockBlockBlockBodyMessages> for OutgoingMessage {
fn from(val: block::BlockBlockBlockBodyMessages) -> Self {
let block::BlockBlockBlockBodyMessages {
destination,
authenticated_owner,
grant,
refund_grant_to,
kind,
message,
} = val;
OutgoingMessage {
destination,
authenticated_owner,
grant,
refund_grant_to: refund_grant_to.map(|rgt| Account {
chain_id: rgt.chain_id,
owner: rgt.owner,
}),
kind,
message,
}
}
}
impl TryFrom<block::BlockBlockBlock> for Block {
type Error = ConversionError;
fn try_from(val: block::BlockBlockBlock) -> Result<Self, Self::Error> {
let block::BlockBlockBlock { header, body } = val;
let block::BlockBlockBlockHeader {
chain_id,
epoch,
height,
timestamp,
authenticated_owner,
previous_block_hash,
state_hash,
transactions_hash,
messages_hash,
previous_message_blocks_hash,
previous_event_blocks_hash,
oracle_responses_hash,
events_hash,
blobs_hash,
operation_results_hash,
} = header;
let block::BlockBlockBlockBody {
messages,
previous_message_blocks,
previous_event_blocks,
oracle_responses,
events,
blobs,
operation_results,
transaction_metadata,
} = body;
let block_header = BlockHeader {
chain_id,
epoch,
height,
timestamp,
authenticated_owner,
previous_block_hash,
state_hash,
transactions_hash,
messages_hash,
previous_message_blocks_hash,
previous_event_blocks_hash,
oracle_responses_hash,
events_hash,
blobs_hash,
operation_results_hash,
};
// Convert GraphQL transaction metadata to Transaction objects
let transactions = transaction_metadata
.into_iter()
.map(convert_transaction_metadata)
.collect::<Result<Vec<_>, _>>()?;
let block_body = BlockBody {
transactions,
messages: messages
.into_iter()
.map(|messages| messages.into_iter().map(Into::into).collect())
.collect::<Vec<Vec<_>>>(),
previous_message_blocks: serde_json::from_value(previous_message_blocks)
.map_err(ConversionError::Serde)?,
previous_event_blocks: serde_json::from_value(previous_event_blocks)
.map_err(ConversionError::Serde)?,
oracle_responses: oracle_responses.into_iter().collect(),
events: events
.into_iter()
.map(|events| events.into_iter().map(Into::into).collect())
.collect(),
blobs: blobs
.into_iter()
.map(|blobs| blobs.into_iter().collect())
.collect(),
operation_results,
};
Ok(Block {
header: block_header,
body: block_body,
})
}
}
impl From<block::BlockBlockBlockBodyEvents> for Event {
fn from(event: block::BlockBlockBlockBodyEvents) -> Self {
Event {
stream_id: event.stream_id.into(),
index: event.index as u32,
value: event.value.into_iter().map(|byte| byte as u8).collect(),
}
}
}
impl From<block::BlockBlockBlockBodyEventsStreamId> for StreamId {
fn from(stream_id: block::BlockBlockBlockBodyEventsStreamId) -> Self {
StreamId {
application_id: stream_id.application_id,
stream_name: stream_id.stream_name,
}
}
}
impl TryFrom<block::BlockBlock> for ConfirmedBlock {
type Error = ConversionError;
fn try_from(val: block::BlockBlock) -> Result<Self, Self::Error> {
match (val.status.as_str(), val.block) {
("confirmed", block) => Ok(ConfirmedBlock::new(block.try_into()?)),
_ => Err(ConversionError::UnexpectedCertificateType(val.status)),
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service-graphql-client/src/utils.rs | linera-service-graphql-client/src/utils.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use graphql_client::{reqwest::post_graphql, GraphQLQuery};
use reqwest::Client;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum Error {
#[error(transparent)]
ReqwestError(#[from] reqwest::Error),
#[error("GraphQL errors: {0:?}")]
GraphQLError(Vec<graphql_client::Error>),
}
impl From<Option<Vec<graphql_client::Error>>> for Error {
fn from(val: Option<Vec<graphql_client::Error>>) -> Self {
Self::GraphQLError(val.unwrap_or_default())
}
}
pub async fn request<T, V>(
client: &Client,
url: &str,
variables: V,
) -> Result<T::ResponseData, Error>
where
T: GraphQLQuery<Variables = V> + Send + Unpin + 'static,
V: Send + Unpin,
{
let response = post_graphql::<T, _>(client, url, variables).await?;
match response.data {
None => Err(response.errors.into()),
Some(data) => Ok(data),
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service-graphql-client/tests/test.rs | linera-service-graphql-client/tests/test.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg(any(
feature = "storage-service",
feature = "dynamodb",
feature = "scylladb"
))]
use std::{collections::BTreeMap, str::FromStr, sync::LazyLock, time::Duration};
use fungible::{FungibleTokenAbi, InitialState};
use linera_base::{
data_types::Amount,
identifiers::{Account, AccountOwner, ChainId},
vm::VmRuntime,
};
use linera_service::cli_wrappers::{
local_net::{Database, LocalNetConfig, ProcessInbox},
LineraNet, LineraNetConfig, Network,
};
use linera_service_graphql_client::{
block, blocks, chains, request, transfer, Block, Blocks, Chains, Transfer,
};
use test_case::test_case;
use tokio::sync::Mutex;
/// A static lock to prevent integration tests from running in parallel.
pub static INTEGRATION_TEST_GUARD: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));
fn reqwest_client() -> reqwest::Client {
reqwest::ClientBuilder::new()
.timeout(Duration::from_secs(30))
.build()
.unwrap()
}
async fn transfer(client: &reqwest::Client, url: &str, from: ChainId, to: Account, amount: &str) {
let variables = transfer::Variables {
chain_id: from,
owner: AccountOwner::CHAIN,
recipient: transfer::Account {
chain_id: to.chain_id,
owner: to.owner,
},
amount: Amount::from_str(amount).unwrap(),
};
request::<Transfer, _>(client, url, variables)
.await
.unwrap();
}
#[cfg_attr(feature = "storage-service", test_case(LocalNetConfig::new_test(Database::Service, Network::Grpc); "storage_service_grpc"))]
#[cfg_attr(feature = "scylladb", test_case(LocalNetConfig::new_test(Database::ScyllaDb, Network::Grpc) ; "scylladb_grpc"))]
#[cfg_attr(feature = "dynamodb", test_case(LocalNetConfig::new_test(Database::DynamoDb, Network::Grpc) ; "dynamodb_grpc"))]
#[test_log::test(tokio::test)]
async fn test_end_to_end_queries(config: impl LineraNetConfig) -> anyhow::Result<()> {
let _guard = INTEGRATION_TEST_GUARD.lock().await;
let (mut net, client) = config.instantiate().await?;
let owner = client.get_owner().unwrap();
let mut node_chains = {
let wallet = client.load_wallet()?;
(wallet.default_chain(), wallet.chain_ids())
};
node_chains.1.sort();
let chain0 = node_chains.0.unwrap();
// publishing an application
let (contract, service) = client.build_example("fungible").await?;
let vm_runtime = VmRuntime::Wasm;
let accounts = BTreeMap::from([(owner, Amount::from_tokens(9))]);
let state = InitialState { accounts };
let params = fungible::Parameters::new("FUN");
let _application_id = client
.publish_and_create::<FungibleTokenAbi, fungible::Parameters, InitialState>(
contract,
service,
vm_runtime,
¶ms,
&state,
&[],
None,
)
.await?;
let mut node_service = client
.run_node_service(None, ProcessInbox::Automatic)
.await?;
let req_client = &reqwest_client();
let url = &format!("http://localhost:{}/", node_service.port());
// sending a few transfers
let chain1 = Account::chain(node_chains.1[1]);
for _ in 0..10 {
transfer(req_client, url, chain0, chain1, "0.1").await;
}
// check chains query
let mut chains = request::<Chains, _>(req_client, url, chains::Variables)
.await?
.chains;
chains.list.sort();
assert_eq!((chains.default, chains.list), node_chains);
// check blocks query
let blocks = request::<Blocks, _>(
req_client,
url,
blocks::Variables {
chain_id: chain0,
from: None,
limit: None,
},
)
.await?
.blocks;
assert_eq!(blocks.len(), 10);
// check block query
let _block = request::<Block, _>(
&reqwest_client(),
&format!("http://localhost:{}/", node_service.port()),
block::Variables {
chain_id: chain0,
hash: None,
},
)
.await?
.block;
node_service.ensure_is_running()?;
net.terminate().await?;
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service-graphql-client/tests/test_check_service_schema.rs | linera-service-graphql-client/tests/test_check_service_schema.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::io::Read;
use linera_base::command::resolve_binary;
use tempfile::tempdir;
use tokio::process::Command;
#[test_log::test(tokio::test)]
async fn test_check_service_schema() {
let tmp_dir = tempdir().unwrap();
let path = resolve_binary("linera-schema-export", "linera-service")
.await
.unwrap();
let mut command = Command::new(path);
let output = command.current_dir(tmp_dir.path()).output().await.unwrap();
let service_schema = String::from_utf8(output.stdout).unwrap();
let mut file_base = std::fs::File::open("gql/service_schema.graphql").unwrap();
let mut graphql_schema = String::new();
file_base.read_to_string(&mut graphql_schema).unwrap();
similar_asserts::assert_eq!(
graphql_schema,
service_schema,
"\nGraphQL service schema has changed -> \
regenerate schema following steps in linera-service-graphql-client/README.md\n"
)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/build.rs | linera-base/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
web: { all(target_arch = "wasm32", feature = "web") },
chain: { all(target_arch = "wasm32", not(web)) },
with_metrics: { all(not(target_arch = "wasm32"), feature = "metrics") },
with_reqwest: { feature = "reqwest" },
with_testing: { any(test, feature = "test") },
with_revm: { any(test, feature = "revm") },
// the old version of `getrandom` we pin here is available on all targets, but
// using it will panic if no suitable source of entropy is found
with_getrandom: { any(web, not(target_arch = "wasm32")) },
};
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/vm.rs | linera-base/src/vm.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! The virtual machines being supported.
use std::str::FromStr;
use allocative::Allocative;
use async_graphql::scalar;
use derive_more::Display;
use linera_witty::{WitLoad, WitStore, WitType};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use crate::data_types::Amount;
#[derive(
Clone,
Copy,
Default,
Display,
Hash,
PartialEq,
Eq,
PartialOrd,
Ord,
Serialize,
Deserialize,
WitType,
WitStore,
WitLoad,
Debug,
Allocative,
)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary))]
/// The virtual machine runtime
pub enum VmRuntime {
/// The Wasm virtual machine
#[default]
Wasm,
/// The Evm virtual machine
Evm,
}
impl FromStr for VmRuntime {
type Err = InvalidVmRuntime;
fn from_str(string: &str) -> Result<Self, Self::Err> {
match string {
"wasm" => Ok(VmRuntime::Wasm),
"evm" => Ok(VmRuntime::Evm),
unknown => Err(InvalidVmRuntime(unknown.to_owned())),
}
}
}
scalar!(VmRuntime);
/// Error caused by invalid VM runtimes
#[derive(Clone, Debug, Error)]
#[error("{0:?} is not a valid virtual machine runtime")]
pub struct InvalidVmRuntime(String);
/// The possible types of queries for an EVM contract
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum EvmQuery {
/// A read-only query.
Query(Vec<u8>),
/// A request to schedule an operation that can mutate the application state.
Operation(Vec<u8>),
/// A request to schedule operations that can mutate the application state.
Operations(Vec<Vec<u8>>),
}
/// An EVM operation containing a value and argument data.
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct EvmOperation {
/// The amount being transferred.
pub value: alloy_primitives::U256,
/// The encoded argument data.
pub argument: Vec<u8>,
}
impl EvmOperation {
/// Creates an EVM operation with a specified amount and function input.
pub fn new(amount: Amount, argument: Vec<u8>) -> Self {
Self {
value: amount.into(),
argument,
}
}
/// Converts the input to a `Vec<u8>` if possible.
pub fn to_bytes(&self) -> Result<Vec<u8>, bcs::Error> {
bcs::to_bytes(&self)
}
/// Creates an `EvmQuery` from the input.
pub fn to_evm_query(&self) -> Result<EvmQuery, bcs::Error> {
Ok(EvmQuery::Operation(self.to_bytes()?))
}
}
/// The instantiation argument to EVM smart contracts.
/// `value` is the amount being transferred.
#[derive(Default, Serialize, Deserialize)]
pub struct EvmInstantiation {
/// The initial value put in the instantiation of the contract.
pub value: alloy_primitives::U256,
/// The input to the `fn instantiate` of the EVM smart contract.
pub argument: Vec<u8>,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/hashed.rs | linera-base/src/hashed.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A wrapper for hashable types to memoize the hash.
use std::borrow::Cow;
use allocative::Allocative;
use custom_debug_derive::Debug;
use serde::{Deserialize, Serialize};
use crate::crypto::{BcsHashable, CryptoHash};
/// Wrapper type around hashed instance of `T` type.
#[derive(Debug, Allocative)]
pub struct Hashed<T> {
value: T,
/// Hash of the value (used as key for storage).
hash: CryptoHash,
}
impl<T> Hashed<T> {
/// Creates an instance of [`Hashed`] with the given `hash` value.
///
/// Note on usage: This method is unsafe because it allows the caller to create a Hashed
/// with a hash that doesn't match the value. This is necessary for the rewrite state when
/// signers sign over old `Certificate` type.
pub fn unchecked_new(value: T, hash: CryptoHash) -> Self {
Self { value, hash }
}
/// Creates an instance of [`Hashed`] with the given `value`.
///
/// Note: Contrary to its `unchecked_new` counterpart, this method is safe because it
/// calculates the hash from the value.
pub fn new<'de>(value: T) -> Self
where
T: BcsHashable<'de>,
{
let hash = CryptoHash::new(&value);
Self { value, hash }
}
/// Returns the hash.
pub fn hash(&self) -> CryptoHash {
self.hash
}
/// Returns a reference to the value, without the hash.
pub fn inner(&self) -> &T {
&self.value
}
/// Consumes the hashed value and returns the value without the hash.
pub fn into_inner(self) -> T {
self.value
}
}
impl<T: Serialize> Serialize for Hashed<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.value.serialize(serializer)
}
}
impl<'de, T: BcsHashable<'de>> Deserialize<'de> for Hashed<T> {
fn deserialize<D>(deserializer: D) -> Result<Hashed<T>, D::Error>
where
D: serde::Deserializer<'de>,
{
Ok(Hashed::new(T::deserialize(deserializer)?))
}
}
impl<T: Clone> Clone for Hashed<T> {
fn clone(&self) -> Self {
Self {
value: self.value.clone(),
hash: self.hash,
}
}
}
impl<T: async_graphql::OutputType> async_graphql::TypeName for Hashed<T> {
fn type_name() -> Cow<'static, str> {
format!("Hashed{}", T::type_name()).into()
}
}
#[async_graphql::Object(cache_control(no_cache), name_type)]
impl<T: async_graphql::OutputType + Clone> Hashed<T> {
#[graphql(derived(name = "hash"))]
async fn _hash(&self) -> CryptoHash {
self.hash()
}
#[graphql(derived(name = "value"))]
async fn _value(&self) -> T {
self.inner().clone()
}
}
impl<T> PartialEq for Hashed<T> {
fn eq(&self, other: &Self) -> bool {
self.hash() == other.hash()
}
}
impl<T> Eq for Hashed<T> {}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/task_processor.rs | linera-base/src/task_processor.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Types related to the task processor features in the node service.
use async_graphql::scalar;
use serde::{Deserialize, Serialize};
use crate::data_types::Timestamp;
/// The off-chain actions requested by the service of an on-chain application.
///
/// On-chain applications should be ready to respond to GraphQL queries of the form:
/// ```ignore
/// query {
/// nextActions(lastRequestedCallback: Timestamp, now: Timestamp!): ProcessorActions!
/// }
///
/// query {
/// processTaskOutcome(outcome: TaskOutcome!)
/// }
/// ```
#[derive(Default, Debug, Serialize, Deserialize)]
pub struct ProcessorActions {
/// The application is requesting to be called back no later than the given timestamp.
pub request_callback: Option<Timestamp>,
/// The application is requesting the execution of the given tasks.
pub execute_tasks: Vec<Task>,
}
scalar!(ProcessorActions);
/// An off-chain task requested by an on-chain application.
#[derive(Debug, Serialize, Deserialize)]
pub struct Task {
/// The operator handling the task.
pub operator: String,
/// The input argument in JSON.
pub input: String,
}
/// The result of executing an off-chain operator.
#[derive(Debug, Serialize, Deserialize)]
pub struct TaskOutcome {
/// The operator handling the task.
pub operator: String,
/// The JSON output.
pub output: String,
}
scalar!(TaskOutcome);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/lib.rs | linera-base/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module provides a common set of types and library functions that are shared
//! between the Linera protocol (compiled from Rust to native code) and Linera
//! applications (compiled from Rust to Wasm).
#![deny(missing_docs)]
#![allow(async_fn_in_trait)]
use std::fmt;
#[doc(hidden)]
pub use async_trait::async_trait;
#[cfg(all(not(target_arch = "wasm32"), unix))]
use tokio::signal::unix;
#[cfg(not(target_arch = "wasm32"))]
use {::tracing::debug, tokio_util::sync::CancellationToken};
pub mod abi;
#[cfg(not(target_arch = "wasm32"))]
pub mod command;
pub mod crypto;
pub mod data_types;
pub mod dyn_convert;
mod graphql;
pub mod hashed;
pub mod http;
pub mod identifiers;
mod limited_writer;
pub mod ownership;
#[cfg(not(target_arch = "wasm32"))]
pub mod port;
#[cfg(with_metrics)]
pub mod prometheus_util;
#[cfg(not(chain))]
pub mod task;
pub mod task_processor;
pub mod time;
#[cfg(test)]
mod unit_tests;
pub mod util;
pub mod vm;
pub use graphql::BcsHexParseError;
#[doc(hidden)]
pub use {async_graphql, bcs, hex};
/// A macro for asserting that a condition is true, returning an error if it is not.
///
/// # Examples
///
/// ```
/// # use linera_base::ensure;
/// fn divide(x: i32, y: i32) -> Result<i32, String> {
/// ensure!(y != 0, "division by zero");
/// Ok(x / y)
/// }
///
/// assert_eq!(divide(10, 2), Ok(5));
/// assert_eq!(divide(10, 0), Err(String::from("division by zero")));
/// ```
#[macro_export]
macro_rules! ensure {
($cond:expr, $e:expr) => {
if !($cond) {
return Err($e.into());
}
};
}
/// Formats a byte sequence as a hexadecimal string, and elides bytes in the middle if it is longer
/// than 32 bytes.
///
/// This function is intended to be used with the `#[debug(with = "hex_debug")]` field
/// annotation of `custom_debug_derive::Debug`.
///
/// # Examples
///
/// ```
/// # use linera_base::hex_debug;
/// use custom_debug_derive::Debug;
///
/// #[derive(Debug)]
/// struct Message {
/// #[debug(with = "hex_debug")]
/// bytes: Vec<u8>,
/// }
///
/// let msg = Message {
/// bytes: vec![0x12, 0x34, 0x56, 0x78],
/// };
///
/// assert_eq!(format!("{:?}", msg), "Message { bytes: 12345678 }");
///
/// let long_msg = Message {
/// bytes: b" 10 20 30 40 50".to_vec(),
/// };
///
/// assert_eq!(
/// format!("{:?}", long_msg),
/// "Message { bytes: 20202020202020203130202020202020..20202020343020202020202020203530 }"
/// );
/// ```
pub fn hex_debug<T: AsRef<[u8]>>(bytes: &T, f: &mut fmt::Formatter) -> fmt::Result {
const ELIDE_AFTER: usize = 16;
let bytes = bytes.as_ref();
if bytes.len() <= 2 * ELIDE_AFTER {
write!(f, "{}", hex::encode(bytes))?;
} else {
write!(
f,
"{}..{}",
hex::encode(&bytes[..ELIDE_AFTER]),
hex::encode(&bytes[(bytes.len() - ELIDE_AFTER)..])
)?;
}
Ok(())
}
/// Applies `hex_debug` to a slice of byte vectors.
///
/// # Examples
///
/// ```
/// # use linera_base::hex_vec_debug;
/// use custom_debug_derive::Debug;
///
/// #[derive(Debug)]
/// struct Messages {
/// #[debug(with = "hex_vec_debug")]
/// byte_vecs: Vec<Vec<u8>>,
/// }
///
/// let msgs = Messages {
/// byte_vecs: vec![vec![0x12, 0x34, 0x56, 0x78], vec![0x9A]],
/// };
///
/// assert_eq!(
/// format!("{:?}", msgs),
/// "Messages { byte_vecs: [12345678, 9a] }"
/// );
/// ```
#[expect(clippy::ptr_arg)] // This only works with custom_debug_derive if it's &Vec.
pub fn hex_vec_debug(list: &Vec<Vec<u8>>, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "[")?;
for (i, bytes) in list.iter().enumerate() {
if i != 0 {
write!(f, ", ")?;
}
hex_debug(bytes, f)?;
}
write!(f, "]")
}
/// Helper function for allocative.
pub fn visit_allocative_simple<T>(_: &T, visitor: &mut allocative::Visitor<'_>) {
visitor.visit_simple_sized::<T>();
}
/// Listens for shutdown signals, and notifies the [`CancellationToken`] if one is
/// received.
#[cfg(not(target_arch = "wasm32"))]
pub async fn listen_for_shutdown_signals(shutdown_sender: CancellationToken) {
let _shutdown_guard = shutdown_sender.drop_guard();
#[cfg(unix)]
{
let mut sigint =
unix::signal(unix::SignalKind::interrupt()).expect("Failed to set up SIGINT handler");
let mut sigterm =
unix::signal(unix::SignalKind::terminate()).expect("Failed to set up SIGTERM handler");
let mut sighup =
unix::signal(unix::SignalKind::hangup()).expect("Failed to set up SIGHUP handler");
tokio::select! {
_ = sigint.recv() => debug!("Received SIGINT"),
_ = sigterm.recv() => debug!("Received SIGTERM"),
_ = sighup.recv() => debug!("Received SIGHUP"),
}
}
#[cfg(windows)]
{
tokio::signal::ctrl_c()
.await
.expect("Failed to set up Ctrl+C handler");
debug!("Received Ctrl+C");
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/command.rs | linera-base/src/command.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Command functionality used for spawning child processes.
use std::{
path::{Path, PathBuf},
process::Stdio,
};
use anyhow::{bail, ensure, Context, Result};
use async_trait::async_trait;
use tokio::process::Command;
use tracing::{debug, error};
/// Attempts to resolve the path and test the version of the given binary against our
/// package version.
///
/// This is meant for binaries of the Linera repository. We use the current running binary
/// to locate the parent directory where to look for the given name.
pub async fn resolve_binary(name: &'static str, package: &'static str) -> Result<PathBuf> {
let current_binary = std::env::current_exe()?;
resolve_binary_in_same_directory_as(¤t_binary, name, package).await
}
/// Obtains the current binary parent
pub fn current_binary_parent() -> Result<PathBuf> {
let current_binary = std::env::current_exe()?;
binary_parent(¤t_binary)
}
/// Retrieves the path from the binary parent.
pub fn binary_parent(current_binary: &Path) -> Result<PathBuf> {
let mut current_binary_parent = current_binary
.canonicalize()
.with_context(|| format!("Failed to canonicalize '{}'", current_binary.display()))?;
current_binary_parent.pop();
#[cfg(with_testing)]
// Test binaries are typically in target/debug/deps while crate binaries are in target/debug
// (same thing for target/release).
let current_binary_parent = if current_binary_parent.ends_with("target/debug/deps")
|| current_binary_parent.ends_with("target/release/deps")
{
PathBuf::from(current_binary_parent.parent().unwrap())
} else {
current_binary_parent
};
Ok(current_binary_parent)
}
/// Same as [`resolve_binary`] but gives the option to specify a binary path to use as
/// reference. The path may be relative or absolute but it must point to a valid file on
/// disk.
pub async fn resolve_binary_in_same_directory_as<P: AsRef<Path>>(
current_binary: P,
name: &'static str,
package: &'static str,
) -> Result<PathBuf> {
let current_binary = current_binary.as_ref();
debug!(
"Resolving binary {name} based on the current binary path: {}",
current_binary.display()
);
let current_binary_parent =
binary_parent(current_binary).expect("Fetching binary directory should not fail");
let binary = current_binary_parent.join(name);
let version = format!("v{}", env!("CARGO_PKG_VERSION"));
if !binary.exists() {
error!(
"Cannot find a binary {name} in the directory {}. \
Consider using `cargo install {package}` or `cargo build -p {package}`",
current_binary_parent.display()
);
bail!("Failed to resolve binary {name}");
}
// Quick version check.
debug!("Checking the version of {}", binary.display());
let version_message = Command::new(&binary)
.arg("--version")
.output()
.await
.with_context(|| {
format!(
"Failed to execute and retrieve version from the binary {name} in directory {}",
current_binary_parent.display()
)
})?
.stdout;
let version_message = String::from_utf8_lossy(&version_message);
let found_version = parse_version_message(&version_message);
if version != found_version {
error!("The binary {name} in directory {} should have version {version} (found {found_version}). \
Consider using `cargo install {package} --version '{version}'` or `cargo build -p {package}`",
current_binary_parent.display()
);
bail!("Incorrect version for binary {name}");
}
debug!("{} has version {version}", binary.display());
Ok(binary)
}
/// Obtains the version from the message.
pub fn parse_version_message(message: &str) -> String {
let mut lines = message.lines();
lines.next();
lines
.next()
.unwrap_or_default()
.trim()
.split(' ')
.next_back()
.expect("splitting strings gives non-empty lists")
.to_string()
}
/// Extension trait for [`tokio::process::Command`].
#[async_trait]
pub trait CommandExt: std::fmt::Debug {
/// Similar to [`tokio::process::Command::spawn`] but sets `kill_on_drop` to `true`.
/// Errors are tagged with a description of the command.
fn spawn_into(&mut self) -> anyhow::Result<tokio::process::Child>;
/// Similar to [`tokio::process::Command::output`] but does not capture `stderr` and
/// returns the `stdout` as a string. Errors are tagged with a description of the
/// command.
async fn spawn_and_wait_for_stdout(&mut self) -> anyhow::Result<String>;
/// Spawns and waits for process to finish executing.
/// Will not wait for stdout, use `spawn_and_wait_for_stdout` for that
async fn spawn_and_wait(&mut self) -> anyhow::Result<()>;
/// Description used for error reporting.
fn description(&self) -> String {
format!("While executing {:?}", self)
}
}
#[async_trait]
impl CommandExt for tokio::process::Command {
fn spawn_into(&mut self) -> anyhow::Result<tokio::process::Child> {
self.kill_on_drop(true);
debug!("Spawning {:?}", self);
let child = tokio::process::Command::spawn(self).with_context(|| self.description())?;
Ok(child)
}
async fn spawn_and_wait_for_stdout(&mut self) -> anyhow::Result<String> {
debug!("Spawning and waiting for {:?}", self);
self.stdout(Stdio::piped());
self.stderr(Stdio::inherit());
self.kill_on_drop(true);
let child = self.spawn().with_context(|| self.description())?;
let output = child
.wait_with_output()
.await
.with_context(|| self.description())?;
ensure!(
output.status.success(),
"{}: got non-zero error code {}. Stderr: \n{:?}\n",
self.description(),
output.status,
String::from_utf8(output.stderr),
);
String::from_utf8(output.stdout).with_context(|| self.description())
}
async fn spawn_and_wait(&mut self) -> anyhow::Result<()> {
debug!("Spawning and waiting for {:?}", self);
self.kill_on_drop(true);
let mut child = self.spawn().with_context(|| self.description())?;
let status = child.wait().await.with_context(|| self.description())?;
ensure!(
status.success(),
"{}: got non-zero error code {}",
self.description(),
status
);
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/http.rs | linera-base/src/http.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Types used when performing HTTP requests.
use allocative::Allocative;
use custom_debug_derive::Debug;
use linera_witty::{WitLoad, WitStore, WitType};
use serde::{Deserialize, Serialize};
use crate::hex_debug;
/// An HTTP request.
#[derive(Clone, Debug, Eq, PartialEq, WitLoad, WitStore, WitType)]
#[witty(name = "http-request")]
pub struct Request {
/// The [`Method`] used for the HTTP request.
pub method: Method,
/// The URL this request is intended to.
pub url: String,
/// The headers that should be included in the request.
pub headers: Vec<Header>,
/// The body of the request.
#[debug(with = "hex_debug")]
pub body: Vec<u8>,
}
impl Request {
/// Creates an HTTP GET [`Request`] for a `url`.
pub fn get(url: impl Into<String>) -> Self {
Request {
method: Method::Get,
url: url.into(),
headers: vec![],
body: vec![],
}
}
/// Creates an HTTP POST [`Request`] for a `url` with a `payload` that's arbitrary bytes.
pub fn post(url: impl Into<String>, payload: impl Into<Vec<u8>>) -> Self {
Request {
method: Method::Post,
url: url.into(),
headers: vec![],
body: payload.into(),
}
}
/// Creates an HTTP POST [`Request`] for a `url` with a body that's the `payload` serialized to
/// JSON.
pub fn post_json(
url: impl Into<String>,
payload: &impl Serialize,
) -> Result<Self, serde_json::Error> {
Ok(Request {
method: Method::Post,
url: url.into(),
headers: vec![Header::new("Content-Type", b"application/json")],
body: serde_json::to_vec(payload)?,
})
}
/// Adds a header to this [`Request`].
pub fn with_header(mut self, name: impl Into<String>, value: impl Into<Vec<u8>>) -> Self {
self.headers.push(Header::new(name, value));
self
}
}
/// The method used in an HTTP request.
#[derive(Clone, Copy, Debug, Eq, PartialEq, WitLoad, WitStore, WitType)]
#[witty(name = "http-method")]
pub enum Method {
/// A GET request.
Get,
/// A POST request.
Post,
/// A PUT request.
Put,
/// A DELETE request.
Delete,
/// A HEAD request.
Head,
/// A OPTIONS request.
Options,
/// A CONNECT request.
Connect,
/// A PATCH request.
Patch,
/// A TRACE request.
Trace,
}
#[cfg(with_reqwest)]
impl From<Method> for reqwest::Method {
fn from(method: Method) -> Self {
match method {
Method::Get => reqwest::Method::GET,
Method::Post => reqwest::Method::POST,
Method::Put => reqwest::Method::PUT,
Method::Delete => reqwest::Method::DELETE,
Method::Head => reqwest::Method::HEAD,
Method::Options => reqwest::Method::OPTIONS,
Method::Connect => reqwest::Method::CONNECT,
Method::Patch => reqwest::Method::PATCH,
Method::Trace => reqwest::Method::TRACE,
}
}
}
/// A response for an HTTP request.
#[derive(
Clone,
Debug,
Deserialize,
Eq,
Hash,
PartialEq,
Serialize,
WitLoad,
WitStore,
WitType,
Allocative,
)]
#[witty(name = "http-response")]
pub struct Response {
/// The status code of the HTTP response.
pub status: u16,
/// The headers included in the response.
pub headers: Vec<Header>,
/// The body of the response.
#[debug(with = "hex_debug")]
#[serde(with = "serde_bytes")]
pub body: Vec<u8>,
}
impl Response {
/// Creates an HTTP [`Response`] with a user defined `status_code`.
pub fn new(status_code: u16) -> Self {
Response {
status: status_code,
headers: vec![],
body: vec![],
}
}
/// Creates an HTTP [`Response`] with an OK status code and the provided `body`.
pub fn ok(body: impl Into<Vec<u8>>) -> Self {
Response {
status: 200,
headers: vec![],
body: body.into(),
}
}
/// Creates an HTTP [`Response`] with an Unauthorized status code.
pub fn unauthorized() -> Self {
Response {
status: 401,
headers: vec![],
body: vec![],
}
}
/// Adds a header to this [`Response`].
pub fn with_header(mut self, name: impl Into<String>, value: impl Into<Vec<u8>>) -> Self {
self.headers.push(Header::new(name, value));
self
}
}
/// A header for an HTTP request or response.
#[derive(
Clone,
Debug,
Deserialize,
Eq,
Hash,
PartialEq,
Serialize,
WitLoad,
WitStore,
WitType,
Allocative,
)]
#[witty(name = "http-header")]
pub struct Header {
/// The header name.
pub name: String,
/// The value of the header.
#[debug(with = "hex_debug")]
#[serde(with = "serde_bytes")]
pub value: Vec<u8>,
}
impl Header {
/// Creates a new [`Header`] with the provided `name` and `value`.
pub fn new(name: impl Into<String>, value: impl Into<Vec<u8>>) -> Self {
Header {
name: name.into(),
value: value.into(),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/identifiers.rs | linera-base/src/identifiers.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Core identifiers used by the Linera protocol.
use std::{
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
};
use allocative::Allocative;
#[cfg(with_revm)]
use alloy_primitives::{Address, B256};
use anyhow::{anyhow, Context};
use async_graphql::{InputObject, SimpleObject};
use custom_debug_derive::Debug;
use derive_more::{Display, FromStr};
use linera_witty::{WitLoad, WitStore, WitType};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::{
bcs_scalar,
crypto::{
AccountPublicKey, CryptoError, CryptoHash, Ed25519PublicKey, EvmPublicKey,
Secp256k1PublicKey,
},
data_types::{BlobContent, ChainDescription},
doc_scalar, hex_debug,
vm::VmRuntime,
};
/// An account owner.
#[derive(
Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd, WitLoad, WitStore, WitType, Allocative,
)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary))]
// TODO(#5166) we can be more specific here
#[cfg_attr(
web,
derive(tsify::Tsify),
tsify(from_wasm_abi, into_wasm_abi, type = "string")
)]
pub enum AccountOwner {
/// Short addresses reserved for the protocol.
Reserved(u8),
/// 32-byte account address.
Address32(CryptoHash),
/// 20-byte account EVM-compatible address.
Address20([u8; 20]),
}
impl fmt::Debug for AccountOwner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Reserved(byte) => f.debug_tuple("Reserved").field(byte).finish(),
Self::Address32(hash) => write!(f, "Address32({:?})", hash),
Self::Address20(bytes) => write!(f, "Address20({})", hex::encode(bytes)),
}
}
}
impl AccountOwner {
/// Returns the default chain address.
pub const CHAIN: AccountOwner = AccountOwner::Reserved(0);
/// Tests if the account is the chain address.
pub fn is_chain(&self) -> bool {
self == &AccountOwner::CHAIN
}
/// The size of the `AccountOwner`.
pub fn size(&self) -> u32 {
match self {
AccountOwner::Reserved(_) => 1,
AccountOwner::Address32(_) => 32,
AccountOwner::Address20(_) => 20,
}
}
/// Gets the EVM address if possible
#[cfg(with_revm)]
pub fn to_evm_address(&self) -> Option<Address> {
match self {
AccountOwner::Address20(address) => Some(Address::from(address)),
_ => None,
}
}
}
#[cfg(with_revm)]
impl From<Address> for AccountOwner {
fn from(address: Address) -> Self {
let address = address.into_array();
AccountOwner::Address20(address)
}
}
#[cfg(with_testing)]
impl From<CryptoHash> for AccountOwner {
fn from(address: CryptoHash) -> Self {
AccountOwner::Address32(address)
}
}
/// An account.
#[derive(
Debug,
PartialEq,
Eq,
Hash,
Copy,
Clone,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
SimpleObject,
InputObject,
Allocative,
)]
#[graphql(name = "AccountOutput", input_name = "Account")]
#[cfg_attr(web, derive(tsify::Tsify), tsify(from_wasm_abi, into_wasm_abi))]
pub struct Account {
/// The chain of the account.
pub chain_id: ChainId,
/// The owner of the account.
pub owner: AccountOwner,
}
impl Account {
/// Creates a new [`Account`] with the given chain ID and owner.
pub fn new(chain_id: ChainId, owner: AccountOwner) -> Self {
Self { chain_id, owner }
}
/// Creates an [`Account`] representing the balance shared by a chain's owners.
pub fn chain(chain_id: ChainId) -> Self {
Account {
chain_id,
owner: AccountOwner::CHAIN,
}
}
/// An address used exclusively for tests
#[cfg(with_testing)]
pub fn burn_address(chain_id: ChainId) -> Self {
let hash = CryptoHash::test_hash("burn");
Account {
chain_id,
owner: hash.into(),
}
}
}
impl fmt::Display for Account {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}", self.chain_id, self.owner)
}
}
impl std::str::FromStr for Account {
type Err = anyhow::Error;
fn from_str(string: &str) -> Result<Self, Self::Err> {
let mut parts = string.splitn(2, ':');
let chain_id = parts
.next()
.context(
"Expecting an account formatted as `chain-id` or `chain-id:owner-type:address`",
)?
.parse()?;
if let Some(owner_string) = parts.next() {
let owner = owner_string.parse::<AccountOwner>()?;
Ok(Account::new(chain_id, owner))
} else {
Ok(Account::chain(chain_id))
}
}
}
/// The unique identifier (UID) of a chain. This is currently computed as the hash value
/// of a [`ChainDescription`].
#[derive(
Eq,
PartialEq,
Ord,
PartialOrd,
Copy,
Clone,
Hash,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
Allocative,
)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary))]
#[cfg_attr(with_testing, derive(Default))]
#[cfg_attr(web, derive(tsify::Tsify), tsify(from_wasm_abi, into_wasm_abi))]
pub struct ChainId(pub CryptoHash);
/// The type of the blob.
/// Should be a 1:1 mapping of the types in `Blob`.
#[derive(
Eq,
PartialEq,
Ord,
PartialOrd,
Clone,
Copy,
Hash,
Debug,
Serialize,
Deserialize,
WitType,
WitStore,
WitLoad,
Default,
Allocative,
)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary))]
pub enum BlobType {
/// A generic data blob.
#[default]
Data,
/// A blob containing compressed contract Wasm bytecode.
ContractBytecode,
/// A blob containing compressed service Wasm bytecode.
ServiceBytecode,
/// A blob containing compressed EVM bytecode.
EvmBytecode,
/// A blob containing an application description.
ApplicationDescription,
/// A blob containing a committee of validators.
Committee,
/// A blob containing a chain description.
ChainDescription,
}
impl BlobType {
/// Returns whether the blob is of [`BlobType::Committee`] variant.
pub fn is_committee_blob(&self) -> bool {
match self {
BlobType::Data
| BlobType::ContractBytecode
| BlobType::ServiceBytecode
| BlobType::EvmBytecode
| BlobType::ApplicationDescription
| BlobType::ChainDescription => false,
BlobType::Committee => true,
}
}
}
impl fmt::Display for BlobType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl std::str::FromStr for BlobType {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str(&format!("\"{s}\""))
.with_context(|| format!("Invalid BlobType: {}", s))
}
}
/// A content-addressed blob ID i.e. the hash of the `BlobContent`.
#[derive(
Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Hash, Debug, WitType, WitStore, WitLoad, Allocative,
)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary, Default))]
pub struct BlobId {
/// The type of the blob.
pub blob_type: BlobType,
/// The hash of the blob.
pub hash: CryptoHash,
}
impl BlobId {
/// Creates a new `BlobId` from a `CryptoHash`. This must be a hash of the blob's bytes!
pub fn new(hash: CryptoHash, blob_type: BlobType) -> Self {
Self { hash, blob_type }
}
}
impl fmt::Display for BlobId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}", self.blob_type, self.hash)?;
Ok(())
}
}
impl std::str::FromStr for BlobId {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts = s.split(':').collect::<Vec<_>>();
if parts.len() == 2 {
let blob_type = BlobType::from_str(parts[0]).context("Invalid BlobType!")?;
Ok(BlobId {
hash: CryptoHash::from_str(parts[1]).context("Invalid hash!")?,
blob_type,
})
} else {
Err(anyhow!("Invalid blob ID: {}", s))
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename = "BlobId")]
struct BlobIdHelper {
hash: CryptoHash,
blob_type: BlobType,
}
impl Serialize for BlobId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&self.to_string())
} else {
let helper = BlobIdHelper {
hash: self.hash,
blob_type: self.blob_type,
};
helper.serialize(serializer)
}
}
}
impl<'a> Deserialize<'a> for BlobId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
Self::from_str(&s).map_err(serde::de::Error::custom)
} else {
let helper = BlobIdHelper::deserialize(deserializer)?;
Ok(BlobId::new(helper.hash, helper.blob_type))
}
}
}
/// Hash of a data blob.
#[derive(
Eq, Hash, PartialEq, Debug, Serialize, Deserialize, Clone, Copy, WitType, WitLoad, WitStore,
)]
pub struct DataBlobHash(pub CryptoHash);
impl From<DataBlobHash> for BlobId {
fn from(hash: DataBlobHash) -> BlobId {
BlobId::new(hash.0, BlobType::Data)
}
}
// TODO(#5166) we can be more specific here (and also more generic)
#[cfg_attr(web, wasm_bindgen::prelude::wasm_bindgen(typescript_custom_section))]
const _: &str = "export type ApplicationId = string;";
/// A unique identifier for a user application from a blob.
#[derive(Debug, WitLoad, WitStore, WitType, Allocative)]
#[cfg_attr(with_testing, derive(Default, test_strategy::Arbitrary))]
#[allocative(bound = "A")]
pub struct ApplicationId<A = ()> {
/// The hash of the `ApplicationDescription` this refers to.
pub application_description_hash: CryptoHash,
#[witty(skip)]
#[debug(skip)]
#[allocative(skip)]
_phantom: PhantomData<A>,
}
/// A unique identifier for an application.
#[derive(
Eq,
PartialEq,
Ord,
PartialOrd,
Copy,
Clone,
Hash,
Debug,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
Allocative,
)]
#[cfg_attr(web, derive(tsify::Tsify), tsify(from_wasm_abi, into_wasm_abi))]
pub enum GenericApplicationId {
/// The system application.
System,
/// A user application.
User(ApplicationId),
}
impl fmt::Display for GenericApplicationId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
GenericApplicationId::System => Display::fmt("System", f),
GenericApplicationId::User(application_id) => {
Display::fmt("User:", f)?;
Display::fmt(&application_id, f)
}
}
}
}
impl std::str::FromStr for GenericApplicationId {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "System" {
return Ok(GenericApplicationId::System);
}
if let Some(result) = s.strip_prefix("User:") {
let application_id = ApplicationId::from_str(result)?;
return Ok(GenericApplicationId::User(application_id));
}
Err(anyhow!("Invalid parsing of GenericApplicationId"))
}
}
impl GenericApplicationId {
/// Returns the `ApplicationId`, or `None` if it is `System`.
pub fn user_application_id(&self) -> Option<&ApplicationId> {
if let GenericApplicationId::User(app_id) = self {
Some(app_id)
} else {
None
}
}
}
impl<A> From<ApplicationId<A>> for AccountOwner {
fn from(app_id: ApplicationId<A>) -> Self {
if app_id.is_evm() {
let hash_bytes = app_id.application_description_hash.as_bytes();
AccountOwner::Address20(hash_bytes[..20].try_into().unwrap())
} else {
AccountOwner::Address32(app_id.application_description_hash)
}
}
}
impl From<AccountPublicKey> for AccountOwner {
fn from(public_key: AccountPublicKey) -> Self {
match public_key {
AccountPublicKey::Ed25519(public_key) => public_key.into(),
AccountPublicKey::Secp256k1(public_key) => public_key.into(),
AccountPublicKey::EvmSecp256k1(public_key) => public_key.into(),
}
}
}
impl From<ApplicationId> for GenericApplicationId {
fn from(application_id: ApplicationId) -> Self {
GenericApplicationId::User(application_id)
}
}
impl From<Secp256k1PublicKey> for AccountOwner {
fn from(public_key: Secp256k1PublicKey) -> Self {
AccountOwner::Address32(CryptoHash::new(&public_key))
}
}
impl From<Ed25519PublicKey> for AccountOwner {
fn from(public_key: Ed25519PublicKey) -> Self {
AccountOwner::Address32(CryptoHash::new(&public_key))
}
}
impl From<EvmPublicKey> for AccountOwner {
fn from(public_key: EvmPublicKey) -> Self {
AccountOwner::Address20(alloy_primitives::Address::from_public_key(&public_key.0).into())
}
}
/// A unique identifier for a module.
#[derive(Debug, WitLoad, WitStore, WitType, Allocative)]
#[cfg_attr(with_testing, derive(Default, test_strategy::Arbitrary))]
pub struct ModuleId<Abi = (), Parameters = (), InstantiationArgument = ()> {
/// The hash of the blob containing the contract bytecode.
pub contract_blob_hash: CryptoHash,
/// The hash of the blob containing the service bytecode.
pub service_blob_hash: CryptoHash,
/// The virtual machine being used.
pub vm_runtime: VmRuntime,
#[witty(skip)]
#[debug(skip)]
_phantom: PhantomData<(Abi, Parameters, InstantiationArgument)>,
}
/// The name of an event stream.
#[derive(
Clone,
Debug,
Eq,
Hash,
Ord,
PartialEq,
PartialOrd,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
Allocative,
)]
pub struct StreamName(
#[serde(with = "serde_bytes")]
#[debug(with = "hex_debug")]
pub Vec<u8>,
);
impl<T> From<T> for StreamName
where
T: Into<Vec<u8>>,
{
fn from(name: T) -> Self {
StreamName(name.into())
}
}
impl fmt::Display for StreamName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&hex::encode(&self.0), f)
}
}
impl std::str::FromStr for StreamName {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let vec = hex::decode(s)?;
Ok(StreamName(vec))
}
}
/// An event stream ID.
#[derive(
Clone,
Debug,
Eq,
Hash,
Ord,
PartialEq,
PartialOrd,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
SimpleObject,
InputObject,
Allocative,
)]
#[graphql(input_name = "StreamIdInput")]
pub struct StreamId {
/// The application that can add events to this stream.
pub application_id: GenericApplicationId,
/// The name of this stream: an application can have multiple streams with different names.
pub stream_name: StreamName,
}
impl StreamId {
/// Creates a system stream ID with the given name.
pub fn system(name: impl Into<StreamName>) -> Self {
StreamId {
application_id: GenericApplicationId::System,
stream_name: name.into(),
}
}
}
/// The result of an `events_from_index`.
#[derive(
Debug,
Eq,
PartialEq,
Ord,
PartialOrd,
Clone,
Hash,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
SimpleObject,
)]
pub struct IndexAndEvent {
/// The index of the found event.
pub index: u32,
/// The event being returned.
pub event: Vec<u8>,
}
impl fmt::Display for StreamId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&self.application_id, f)?;
Display::fmt(":", f)?;
Display::fmt(&self.stream_name, f)
}
}
impl std::str::FromStr for StreamId {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts = s.rsplit_once(":");
if let Some((part0, part1)) = parts {
let application_id =
GenericApplicationId::from_str(part0).context("Invalid GenericApplicationId!")?;
let stream_name = StreamName::from_str(part1).context("Invalid StreamName!")?;
Ok(StreamId {
application_id,
stream_name,
})
} else {
Err(anyhow!("Invalid blob ID: {}", s))
}
}
}
/// An event identifier.
#[derive(
Debug,
PartialEq,
Eq,
Hash,
Clone,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
SimpleObject,
Allocative,
)]
pub struct EventId {
/// The ID of the chain that generated this event.
pub chain_id: ChainId,
/// The ID of the stream this event belongs to.
pub stream_id: StreamId,
/// The event index, i.e. the number of events in the stream before this one.
pub index: u32,
}
impl fmt::Display for EventId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}:{}:{}", self.chain_id, self.stream_id, self.index)
}
}
impl StreamName {
/// Turns the stream name into bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.0
}
}
// Cannot use #[derive(Clone)] because it requires `A: Clone`.
impl<Abi, Parameters, InstantiationArgument> Clone
for ModuleId<Abi, Parameters, InstantiationArgument>
{
fn clone(&self) -> Self {
*self
}
}
impl<Abi, Parameters, InstantiationArgument> Copy
for ModuleId<Abi, Parameters, InstantiationArgument>
{
}
impl<Abi, Parameters, InstantiationArgument> PartialEq
for ModuleId<Abi, Parameters, InstantiationArgument>
{
fn eq(&self, other: &Self) -> bool {
let ModuleId {
contract_blob_hash,
service_blob_hash,
vm_runtime,
_phantom,
} = other;
self.contract_blob_hash == *contract_blob_hash
&& self.service_blob_hash == *service_blob_hash
&& self.vm_runtime == *vm_runtime
}
}
impl<Abi, Parameters, InstantiationArgument> Eq
for ModuleId<Abi, Parameters, InstantiationArgument>
{
}
impl<Abi, Parameters, InstantiationArgument> PartialOrd
for ModuleId<Abi, Parameters, InstantiationArgument>
{
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<Abi, Parameters, InstantiationArgument> Ord
for ModuleId<Abi, Parameters, InstantiationArgument>
{
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let ModuleId {
contract_blob_hash,
service_blob_hash,
vm_runtime,
_phantom,
} = other;
(
self.contract_blob_hash,
self.service_blob_hash,
self.vm_runtime,
)
.cmp(&(*contract_blob_hash, *service_blob_hash, *vm_runtime))
}
}
impl<Abi, Parameters, InstantiationArgument> Hash
for ModuleId<Abi, Parameters, InstantiationArgument>
{
fn hash<H: Hasher>(&self, state: &mut H) {
let ModuleId {
contract_blob_hash: contract_blob_id,
service_blob_hash: service_blob_id,
vm_runtime: vm_runtime_id,
_phantom,
} = self;
contract_blob_id.hash(state);
service_blob_id.hash(state);
vm_runtime_id.hash(state);
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename = "ModuleId")]
struct SerializableModuleId {
contract_blob_hash: CryptoHash,
service_blob_hash: CryptoHash,
vm_runtime: VmRuntime,
}
impl<Abi, Parameters, InstantiationArgument> Serialize
for ModuleId<Abi, Parameters, InstantiationArgument>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
let serializable_module_id = SerializableModuleId {
contract_blob_hash: self.contract_blob_hash,
service_blob_hash: self.service_blob_hash,
vm_runtime: self.vm_runtime,
};
if serializer.is_human_readable() {
let bytes =
bcs::to_bytes(&serializable_module_id).map_err(serde::ser::Error::custom)?;
serializer.serialize_str(&hex::encode(bytes))
} else {
SerializableModuleId::serialize(&serializable_module_id, serializer)
}
}
}
impl<'de, Abi, Parameters, InstantiationArgument> Deserialize<'de>
for ModuleId<Abi, Parameters, InstantiationArgument>
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let module_id_bytes = hex::decode(s).map_err(serde::de::Error::custom)?;
let serializable_module_id: SerializableModuleId =
bcs::from_bytes(&module_id_bytes).map_err(serde::de::Error::custom)?;
Ok(ModuleId {
contract_blob_hash: serializable_module_id.contract_blob_hash,
service_blob_hash: serializable_module_id.service_blob_hash,
vm_runtime: serializable_module_id.vm_runtime,
_phantom: PhantomData,
})
} else {
let serializable_module_id = SerializableModuleId::deserialize(deserializer)?;
Ok(ModuleId {
contract_blob_hash: serializable_module_id.contract_blob_hash,
service_blob_hash: serializable_module_id.service_blob_hash,
vm_runtime: serializable_module_id.vm_runtime,
_phantom: PhantomData,
})
}
}
}
impl ModuleId {
/// Creates a module ID from contract/service hashes and the VM runtime to use.
pub fn new(
contract_blob_hash: CryptoHash,
service_blob_hash: CryptoHash,
vm_runtime: VmRuntime,
) -> Self {
ModuleId {
contract_blob_hash,
service_blob_hash,
vm_runtime,
_phantom: PhantomData,
}
}
/// Specializes a module ID for a given ABI.
pub fn with_abi<Abi, Parameters, InstantiationArgument>(
self,
) -> ModuleId<Abi, Parameters, InstantiationArgument> {
ModuleId {
contract_blob_hash: self.contract_blob_hash,
service_blob_hash: self.service_blob_hash,
vm_runtime: self.vm_runtime,
_phantom: PhantomData,
}
}
/// Gets the `BlobId` of the contract
pub fn contract_bytecode_blob_id(&self) -> BlobId {
match self.vm_runtime {
VmRuntime::Wasm => BlobId::new(self.contract_blob_hash, BlobType::ContractBytecode),
VmRuntime::Evm => BlobId::new(self.contract_blob_hash, BlobType::EvmBytecode),
}
}
/// Gets the `BlobId` of the service
pub fn service_bytecode_blob_id(&self) -> BlobId {
match self.vm_runtime {
VmRuntime::Wasm => BlobId::new(self.service_blob_hash, BlobType::ServiceBytecode),
VmRuntime::Evm => BlobId::new(self.contract_blob_hash, BlobType::EvmBytecode),
}
}
/// Gets all bytecode `BlobId`s of the module
pub fn bytecode_blob_ids(&self) -> Vec<BlobId> {
match self.vm_runtime {
VmRuntime::Wasm => vec![
BlobId::new(self.contract_blob_hash, BlobType::ContractBytecode),
BlobId::new(self.service_blob_hash, BlobType::ServiceBytecode),
],
VmRuntime::Evm => vec![BlobId::new(self.contract_blob_hash, BlobType::EvmBytecode)],
}
}
}
impl<Abi, Parameters, InstantiationArgument> ModuleId<Abi, Parameters, InstantiationArgument> {
/// Forgets the ABI of a module ID (if any).
pub fn forget_abi(self) -> ModuleId {
ModuleId {
contract_blob_hash: self.contract_blob_hash,
service_blob_hash: self.service_blob_hash,
vm_runtime: self.vm_runtime,
_phantom: PhantomData,
}
}
}
// Cannot use #[derive(Clone)] because it requires `A: Clone`.
impl<A> Clone for ApplicationId<A> {
fn clone(&self) -> Self {
*self
}
}
impl<A> Copy for ApplicationId<A> {}
impl<A: PartialEq> PartialEq for ApplicationId<A> {
fn eq(&self, other: &Self) -> bool {
self.application_description_hash == other.application_description_hash
}
}
impl<A: Eq> Eq for ApplicationId<A> {}
impl<A: PartialOrd> PartialOrd for ApplicationId<A> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.application_description_hash
.partial_cmp(&other.application_description_hash)
}
}
impl<A: Ord> Ord for ApplicationId<A> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.application_description_hash
.cmp(&other.application_description_hash)
}
}
impl<A> Hash for ApplicationId<A> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.application_description_hash.hash(state);
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename = "ApplicationId")]
struct SerializableApplicationId {
pub application_description_hash: CryptoHash,
}
impl<A> Serialize for ApplicationId<A> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
let bytes = bcs::to_bytes(&SerializableApplicationId {
application_description_hash: self.application_description_hash,
})
.map_err(serde::ser::Error::custom)?;
serializer.serialize_str(&hex::encode(bytes))
} else {
SerializableApplicationId::serialize(
&SerializableApplicationId {
application_description_hash: self.application_description_hash,
},
serializer,
)
}
}
}
impl<'de, A> Deserialize<'de> for ApplicationId<A> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let application_id_bytes = hex::decode(s).map_err(serde::de::Error::custom)?;
let application_id: SerializableApplicationId =
bcs::from_bytes(&application_id_bytes).map_err(serde::de::Error::custom)?;
Ok(ApplicationId {
application_description_hash: application_id.application_description_hash,
_phantom: PhantomData,
})
} else {
let value = SerializableApplicationId::deserialize(deserializer)?;
Ok(ApplicationId {
application_description_hash: value.application_description_hash,
_phantom: PhantomData,
})
}
}
}
impl ApplicationId {
/// Creates an application ID from the application description hash.
pub fn new(application_description_hash: CryptoHash) -> Self {
ApplicationId {
application_description_hash,
_phantom: PhantomData,
}
}
/// Converts the application ID to the ID of the blob containing the
/// `ApplicationDescription`.
pub fn description_blob_id(self) -> BlobId {
BlobId::new(
self.application_description_hash,
BlobType::ApplicationDescription,
)
}
/// Specializes an application ID for a given ABI.
pub fn with_abi<A>(self) -> ApplicationId<A> {
ApplicationId {
application_description_hash: self.application_description_hash,
_phantom: PhantomData,
}
}
}
impl<A> ApplicationId<A> {
/// Forgets the ABI of an application ID (if any).
pub fn forget_abi(self) -> ApplicationId {
ApplicationId {
application_description_hash: self.application_description_hash,
_phantom: PhantomData,
}
}
}
impl<A> ApplicationId<A> {
/// Returns whether the `ApplicationId` is the one of an EVM application.
pub fn is_evm(&self) -> bool {
let bytes = self.application_description_hash.as_bytes();
bytes.0[20..] == [0; 12]
}
}
#[cfg(with_revm)]
impl From<Address> for ApplicationId {
fn from(address: Address) -> ApplicationId {
let mut arr = [0_u8; 32];
arr[..20].copy_from_slice(address.as_slice());
ApplicationId {
application_description_hash: arr.into(),
_phantom: PhantomData,
}
}
}
#[cfg(with_revm)]
impl<A> ApplicationId<A> {
/// Converts the `ApplicationId` into an Ethereum Address.
pub fn evm_address(&self) -> Address {
let bytes = self.application_description_hash.as_bytes();
let bytes = bytes.0.as_ref();
Address::from_slice(&bytes[0..20])
}
/// Converts the `ApplicationId` into an Ethereum-compatible 32-byte array.
pub fn bytes32(&self) -> B256 {
*self.application_description_hash.as_bytes()
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename = "AccountOwner")]
enum SerializableAccountOwner {
Reserved(u8),
Address32(CryptoHash),
Address20([u8; 20]),
}
impl Serialize for AccountOwner {
fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
if serializer.is_human_readable() {
serializer.serialize_str(&self.to_string())
} else {
match self {
AccountOwner::Reserved(value) => SerializableAccountOwner::Reserved(*value),
AccountOwner::Address32(value) => SerializableAccountOwner::Address32(*value),
AccountOwner::Address20(value) => SerializableAccountOwner::Address20(*value),
}
.serialize(serializer)
}
}
}
impl<'de> Deserialize<'de> for AccountOwner {
fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = Self::from_str(&s).map_err(serde::de::Error::custom)?;
Ok(value)
} else {
let value = SerializableAccountOwner::deserialize(deserializer)?;
match value {
SerializableAccountOwner::Reserved(value) => Ok(AccountOwner::Reserved(value)),
SerializableAccountOwner::Address32(value) => Ok(AccountOwner::Address32(value)),
SerializableAccountOwner::Address20(value) => Ok(AccountOwner::Address20(value)),
}
}
}
}
impl fmt::Display for AccountOwner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AccountOwner::Reserved(value) => {
write!(f, "0x{}", hex::encode(&value.to_be_bytes()[..]))?
}
AccountOwner::Address32(value) => write!(f, "0x{}", value)?,
AccountOwner::Address20(value) => write!(f, "0x{}", hex::encode(&value[..]))?,
};
Ok(())
}
}
impl std::str::FromStr for AccountOwner {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some(s) = s.strip_prefix("0x") {
if s.len() == 64 {
if let Ok(hash) = CryptoHash::from_str(s) {
return Ok(AccountOwner::Address32(hash));
}
} else if s.len() == 40 {
let address = hex::decode(s)?;
if address.len() != 20 {
anyhow::bail!("Invalid address length: {}", s);
}
let address = <[u8; 20]>::try_from(address.as_slice()).unwrap();
return Ok(AccountOwner::Address20(address));
}
if s.len() == 2 {
let bytes = hex::decode(s)?;
if bytes.len() == 1 {
let value = u8::from_be_bytes(bytes.try_into().expect("one byte"));
return Ok(AccountOwner::Reserved(value));
}
}
}
anyhow::bail!("Invalid address value: {}", s);
}
}
impl fmt::Display for ChainId {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/abi.rs | linera-base/src/abi.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines the notion of Application Binary Interface (ABI) for Linera
//! applications across Wasm and native architectures.
use std::fmt::Debug;
use serde::{de::DeserializeOwned, Serialize};
// ANCHOR: abi
/// A trait that includes all the types exported by a Linera application (both contract
/// and service).
pub trait Abi: ContractAbi + ServiceAbi {}
// ANCHOR_END: abi
// T::Parameters is duplicated for simplicity but it must match.
impl<T> Abi for T where T: ContractAbi + ServiceAbi {}
// ANCHOR: contract_abi
/// A trait that includes all the types exported by a Linera application contract.
pub trait ContractAbi {
/// The type of operation executed by the application.
///
/// Operations are transactions directly added to a block by the creator (and signer)
/// of the block. Users typically use operations to start interacting with an
/// application on their own chain.
type Operation: Serialize + DeserializeOwned + Send + Sync + Debug + 'static;
/// The response type of an application call.
type Response: Serialize + DeserializeOwned + Send + Sync + Debug + 'static;
/// How the `Operation` is deserialized
fn deserialize_operation(operation: Vec<u8>) -> Result<Self::Operation, String> {
bcs::from_bytes(&operation)
.map_err(|e| format!("BCS deserialization error {e:?} for operation {operation:?}"))
}
/// How the `Operation` is serialized
fn serialize_operation(operation: &Self::Operation) -> Result<Vec<u8>, String> {
bcs::to_bytes(operation)
.map_err(|e| format!("BCS serialization error {e:?} for operation {operation:?}"))
}
/// How the `Response` is deserialized
fn deserialize_response(response: Vec<u8>) -> Result<Self::Response, String> {
bcs::from_bytes(&response)
.map_err(|e| format!("BCS deserialization error {e:?} for response {response:?}"))
}
/// How the `Response` is serialized
fn serialize_response(response: Self::Response) -> Result<Vec<u8>, String> {
bcs::to_bytes(&response)
.map_err(|e| format!("BCS serialization error {e:?} for response {response:?}"))
}
}
// ANCHOR_END: contract_abi
// ANCHOR: service_abi
/// A trait that includes all the types exported by a Linera application service.
pub trait ServiceAbi {
/// The type of a query receivable by the application's service.
type Query: Serialize + DeserializeOwned + Send + Sync + Debug + 'static;
/// The response type of the application's service.
type QueryResponse: Serialize + DeserializeOwned + Send + Sync + Debug + 'static;
}
// ANCHOR_END: service_abi
/// Marker trait to help importing contract types.
pub trait WithContractAbi {
/// The contract types to import.
type Abi: ContractAbi;
}
impl<A> ContractAbi for A
where
A: WithContractAbi,
{
type Operation = <<A as WithContractAbi>::Abi as ContractAbi>::Operation;
type Response = <<A as WithContractAbi>::Abi as ContractAbi>::Response;
}
/// Marker trait to help importing service types.
pub trait WithServiceAbi {
/// The service types to import.
type Abi: ServiceAbi;
}
impl<A> ServiceAbi for A
where
A: WithServiceAbi,
{
type Query = <<A as WithServiceAbi>::Abi as ServiceAbi>::Query;
type QueryResponse = <<A as WithServiceAbi>::Abi as ServiceAbi>::QueryResponse;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/time.rs | linera-base/src/time.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
Abstractions over time that can be used natively or on the Web.
*/
cfg_if::cfg_if! {
if #[cfg(web)] {
// This must remain conditional as otherwise it pulls in JavaScript symbols
// on-chain (on any Wasm target).
pub use web_time::*;
pub use linera_kywasmtime as timer;
} else {
pub use std::time::*;
pub use tokio::time as timer;
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/limited_writer.rs | linera-base/src/limited_writer.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::io::{self, Write};
use thiserror::Error;
use crate::ensure;
#[derive(Error, Debug)]
#[error("Writer limit exceeded")]
pub struct LimitedWriterError;
/// Custom writer that enforces a byte limit.
pub struct LimitedWriter<W: Write> {
inner: W,
limit: usize,
written: usize,
}
impl<W: Write> LimitedWriter<W> {
pub fn new(inner: W, limit: usize) -> Self {
Self {
inner,
limit,
written: 0,
}
}
}
impl<W: Write> Write for LimitedWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// Calculate the number of bytes we can write without exceeding the limit.
// Fail if the buffer doesn't fit.
ensure!(
self.limit
.checked_sub(self.written)
.is_some_and(|remaining| buf.len() <= remaining),
io::Error::other(LimitedWriterError)
);
// Forward to the inner writer.
let n = self.inner.write(buf)?;
self.written += n;
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_limited_writer() {
let mut out_buffer = Vec::new();
let mut writer = LimitedWriter::new(&mut out_buffer, 5);
assert_eq!(writer.write(b"foo").unwrap(), 3);
assert_eq!(writer.write(b"ba").unwrap(), 2);
assert!(writer
.write(b"r")
.unwrap_err()
.downcast::<LimitedWriterError>()
.is_ok());
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/prometheus_util.rs | linera-base/src/prometheus_util.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines utility functions for interacting with Prometheus (logging metrics, etc)
use prometheus::{
exponential_buckets, histogram_opts, linear_buckets, register_histogram,
register_histogram_vec, register_int_counter, register_int_counter_vec, register_int_gauge,
register_int_gauge_vec, Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge,
IntGaugeVec, Opts,
};
use crate::time::Instant;
const LINERA_NAMESPACE: &str = "linera";
/// Wrapper around Prometheus `register_int_counter_vec!` macro which also sets the `linera` namespace
pub fn register_int_counter_vec(
name: &str,
description: &str,
label_names: &[&str],
) -> IntCounterVec {
let counter_opts = Opts::new(name, description).namespace(LINERA_NAMESPACE);
register_int_counter_vec!(counter_opts, label_names).expect("IntCounter can be created")
}
/// Wrapper around Prometheus `register_int_counter!` macro which also sets the `linera` namespace
pub fn register_int_counter(name: &str, description: &str) -> IntCounter {
let counter_opts = Opts::new(name, description).namespace(LINERA_NAMESPACE);
register_int_counter!(counter_opts).expect("IntCounter can be created")
}
/// Wrapper around Prometheus `register_histogram_vec!` macro which also sets the `linera` namespace
pub fn register_histogram_vec(
name: &str,
description: &str,
label_names: &[&str],
buckets: Option<Vec<f64>>,
) -> HistogramVec {
let histogram_opts = if let Some(buckets) = buckets {
histogram_opts!(name, description, buckets).namespace(LINERA_NAMESPACE)
} else {
histogram_opts!(name, description).namespace(LINERA_NAMESPACE)
};
register_histogram_vec!(histogram_opts, label_names).expect("Histogram can be created")
}
/// Wrapper around Prometheus `register_histogram!` macro which also sets the `linera` namespace
pub fn register_histogram(name: &str, description: &str, buckets: Option<Vec<f64>>) -> Histogram {
let histogram_opts = if let Some(buckets) = buckets {
histogram_opts!(name, description, buckets).namespace(LINERA_NAMESPACE)
} else {
histogram_opts!(name, description).namespace(LINERA_NAMESPACE)
};
register_histogram!(histogram_opts).expect("Histogram can be created")
}
/// Wrapper around Prometheus `register_int_gauge!` macro which also sets the `linera` namespace
pub fn register_int_gauge(name: &str, description: &str) -> IntGauge {
let gauge_opts = Opts::new(name, description).namespace(LINERA_NAMESPACE);
register_int_gauge!(gauge_opts).expect("IntGauge can be created")
}
/// Wrapper around Prometheus `register_int_gauge_vec!` macro which also sets the `linera` namespace
pub fn register_int_gauge_vec(name: &str, description: &str, label_names: &[&str]) -> IntGaugeVec {
let gauge_opts = Opts::new(name, description).namespace(LINERA_NAMESPACE);
register_int_gauge_vec!(gauge_opts, label_names).expect("IntGauge can be created")
}
/// Construct the bucket interval exponentially starting from a value and an ending value.
pub fn exponential_bucket_interval(start_value: f64, end_value: f64) -> Option<Vec<f64>> {
let quot = end_value / start_value;
let factor = 3.0_f64;
let count_approx = quot.ln() / factor.ln();
let count = count_approx.round() as usize;
let mut buckets = exponential_buckets(start_value, factor, count)
.expect("Exponential buckets creation should not fail!");
if let Some(last) = buckets.last() {
if *last < end_value {
buckets.push(end_value);
}
}
Some(buckets)
}
/// Construct the latencies exponentially starting from 0.001 and ending at the maximum latency
pub fn exponential_bucket_latencies(max_latency: f64) -> Option<Vec<f64>> {
exponential_bucket_interval(0.001_f64, max_latency)
}
/// Construct the bucket interval linearly starting from a value and an ending value.
pub fn linear_bucket_interval(start_value: f64, width: f64, end_value: f64) -> Option<Vec<f64>> {
let count = (end_value - start_value) / width;
let count = count.round() as usize;
let mut buckets = linear_buckets(start_value, width, count)
.expect("Linear buckets creation should not fail!");
buckets.push(end_value);
Some(buckets)
}
/// The unit of measurement for latency metrics.
enum MeasurementUnit {
/// Measure latency in milliseconds.
Milliseconds,
/// Measure latency in microseconds.
Microseconds,
}
/// A guard for an active latency measurement.
///
/// Finishes the measurement when dropped, and then updates the `Metric`.
pub struct ActiveMeasurementGuard<'metric, Metric>
where
Metric: MeasureLatency,
{
start: Instant,
metric: Option<&'metric Metric>,
unit: MeasurementUnit,
}
impl<Metric> ActiveMeasurementGuard<'_, Metric>
where
Metric: MeasureLatency,
{
/// Finishes the measurement, updates the `Metric` and returns the measured latency in
/// the unit specified when the measurement was started.
pub fn finish(mut self) -> f64 {
self.finish_by_ref()
}
/// Finishes the measurement without taking ownership of this [`ActiveMeasurementGuard`],
/// updates the `Metric` and returns the measured latency in the unit specified when
/// the measurement was started.
fn finish_by_ref(&mut self) -> f64 {
match self.metric.take() {
Some(metric) => {
let latency = match self.unit {
MeasurementUnit::Milliseconds => self.start.elapsed().as_secs_f64() * 1000.0,
MeasurementUnit::Microseconds => {
self.start.elapsed().as_secs_f64() * 1_000_000.0
}
};
metric.finish_measurement(latency);
latency
}
None => {
// This is getting called from `Drop` after `finish` has already been
// executed
f64::NAN
}
}
}
}
impl<Metric> Drop for ActiveMeasurementGuard<'_, Metric>
where
Metric: MeasureLatency,
{
fn drop(&mut self) {
self.finish_by_ref();
}
}
/// An extension trait for metrics that can be used to measure latencies.
pub trait MeasureLatency: Sized {
/// Starts measuring the latency in milliseconds, finishing when the returned
/// [`ActiveMeasurementGuard`] is dropped.
fn measure_latency(&self) -> ActiveMeasurementGuard<'_, Self>;
/// Starts measuring the latency in microseconds, finishing when the returned
/// [`ActiveMeasurementGuard`] is dropped.
fn measure_latency_us(&self) -> ActiveMeasurementGuard<'_, Self>;
/// Updates the metric with measured latency in `milliseconds`.
fn finish_measurement(&self, milliseconds: f64);
}
impl MeasureLatency for HistogramVec {
fn measure_latency(&self) -> ActiveMeasurementGuard<'_, Self> {
ActiveMeasurementGuard {
start: Instant::now(),
metric: Some(self),
unit: MeasurementUnit::Milliseconds,
}
}
fn measure_latency_us(&self) -> ActiveMeasurementGuard<'_, Self> {
ActiveMeasurementGuard {
start: Instant::now(),
metric: Some(self),
unit: MeasurementUnit::Microseconds,
}
}
fn finish_measurement(&self, milliseconds: f64) {
self.with_label_values(&[]).observe(milliseconds);
}
}
impl MeasureLatency for Histogram {
fn measure_latency(&self) -> ActiveMeasurementGuard<'_, Self> {
ActiveMeasurementGuard {
start: Instant::now(),
metric: Some(self),
unit: MeasurementUnit::Milliseconds,
}
}
fn measure_latency_us(&self) -> ActiveMeasurementGuard<'_, Self> {
ActiveMeasurementGuard {
start: Instant::now(),
metric: Some(self),
unit: MeasurementUnit::Microseconds,
}
}
fn finish_measurement(&self, milliseconds: f64) {
self.observe(milliseconds);
}
}
#[cfg(test)]
mod tests {
use super::*;
// Helper function for approximate floating point comparison
fn assert_float_vec_eq(left: &[f64], right: &[f64]) {
const EPSILON: f64 = 1e-10;
assert_eq!(left.len(), right.len(), "Vectors have different lengths");
for (i, (l, r)) in left.iter().zip(right.iter()).enumerate() {
assert!(
(l - r).abs() < EPSILON,
"Vectors differ at index {}: {} != {}",
i,
l,
r
);
}
}
#[test]
fn test_linear_bucket_interval() {
// Case 1: Width divides range evenly - small values
let buckets = linear_bucket_interval(0.05, 0.01, 0.1).unwrap();
assert_float_vec_eq(&buckets, &[0.05, 0.06, 0.07, 0.08, 0.09, 0.1]);
// Case 2: Width divides range evenly - large values
let buckets = linear_bucket_interval(100.0, 50.0, 500.0).unwrap();
assert_float_vec_eq(
&buckets,
&[
100.0, 150.0, 200.0, 250.0, 300.0, 350.0, 400.0, 450.0, 500.0,
],
);
// Case 3: Width doesn't divide range evenly - small values
let buckets = linear_bucket_interval(0.05, 0.12, 0.5).unwrap();
assert_float_vec_eq(&buckets, &[0.05, 0.17, 0.29, 0.41, 0.5]);
// Case 4: Width doesn't divide range evenly - large values
let buckets = linear_bucket_interval(100.0, 150.0, 500.0).unwrap();
assert_float_vec_eq(&buckets, &[100.0, 250.0, 400.0, 500.0]);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/unit_tests.rs | linera-base/src/unit_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Unit tests for `linera-base` types.
use std::fmt::Debug;
use linera_witty::{Layout, WitLoad, WitStore};
use test_case::test_case;
use crate::{
crypto::{AccountPublicKey, CryptoHash},
data_types::{Amount, BlockHeight, Resources, SendMessageRequest, TimeDelta, Timestamp},
identifiers::{Account, AccountOwner, ApplicationId, ChainId, ModuleId},
ownership::{ChainOwnership, TimeoutConfig},
vm::VmRuntime,
};
/// Test roundtrip of types used in the WIT interface.
#[test_case(CryptoHash::test_hash("hash"); "of_crypto_hash")]
#[test_case(AccountPublicKey::test_key(255); "of_public_key")]
#[test_case(Amount::from_tokens(500); "of_amount")]
#[test_case(BlockHeight(1095); "of_block_height")]
#[test_case(Timestamp::from(6_400_003); "of_timestamp")]
#[test_case(resources_test_case(); "of_resources")]
#[test_case(send_message_request_test_case(); "of_send_message_request")]
#[test_case(AccountOwner::from(CryptoHash::test_hash("owner")); "of_owner")]
#[test_case(account_test_case(); "of_account")]
#[test_case(ChainId(CryptoHash::test_hash("chain_id")); "of_chain_id")]
#[test_case(application_id_test_case(); "of_application_id")]
#[test_case(module_id_test_case(); "of_module_id")]
#[test_case(timeout_config_test_case(); "of_timeout_config")]
#[test_case(chain_ownership_test_case(); "of_chain_ownership")]
#[test_case([5u8; 20]; "array20")]
fn test_wit_roundtrip<T>(input: T)
where
T: Debug + Eq + WitLoad + WitStore,
<T::Layout as Layout>::Flat: Copy + Debug + Eq,
{
linera_witty::test::test_memory_roundtrip(&input).expect("Memory WIT roundtrip test failed");
linera_witty::test::test_flattening_roundtrip(&input)
.expect("Flattening WIT roundtrip test failed");
}
/// Creates a dummy [`Resources`] instance to use for the WIT roundtrip test.
fn resources_test_case() -> Resources {
Resources {
bytes_runtime: 40,
bytes_to_read: 1_474_560,
bytes_to_write: 571,
blobs_to_read: 71,
blobs_to_publish: 73,
blob_bytes_to_read: 67,
blob_bytes_to_publish: 71,
wasm_fuel: 1_000,
evm_fuel: 1_000,
message_size: 4,
messages: 93,
read_operations: 12,
write_operations: 2,
storage_size_delta: 700_000_000,
service_as_oracle_queries: 7,
http_requests: 3,
}
}
/// Creates a dummy [`SendMessageRequest`] instance to use for the WIT roundtrip test.
fn send_message_request_test_case() -> SendMessageRequest<Vec<u8>> {
SendMessageRequest {
authenticated: true,
is_tracked: false,
destination: ChainId(CryptoHash::test_hash("chain_id_0")),
grant: Resources {
bytes_runtime: 0,
bytes_to_read: 200,
bytes_to_write: 0,
blobs_to_read: 100,
blobs_to_publish: 1000,
blob_bytes_to_read: 10,
blob_bytes_to_publish: 100,
wasm_fuel: 8,
evm_fuel: 8,
message_size: 1,
messages: 0,
read_operations: 1,
write_operations: 0,
storage_size_delta: 0,
service_as_oracle_queries: 0,
http_requests: 0,
},
message: (0..=255).cycle().take(2_000).collect(),
}
}
/// Creates a dummy [`Account`] instance to use for the WIT roundtrip test.
fn account_test_case() -> Account {
Account {
chain_id: ChainId(CryptoHash::test_hash("chain_id_10")),
owner: AccountOwner::from(CryptoHash::test_hash("account")),
}
}
/// Creates a dummy [`ApplicationId`] instance to use for the WIT roundtrip test.
fn application_id_test_case() -> ApplicationId {
ApplicationId::new(CryptoHash::test_hash("application description"))
}
/// Creates a dummy [`ModuleId`] instance to use for the WIT roundtrip test.
fn module_id_test_case() -> ModuleId {
ModuleId::new(
CryptoHash::test_hash("another contract bytecode"),
CryptoHash::test_hash("another service bytecode"),
VmRuntime::Wasm,
)
}
/// Creates a dummy [`TimeoutConfig`] instance to use for the WIT roundtrip test.
fn timeout_config_test_case() -> TimeoutConfig {
TimeoutConfig {
fast_round_duration: Some(TimeDelta::from_micros(20)),
base_timeout: TimeDelta::from_secs(4),
timeout_increment: TimeDelta::from_millis(125),
fallback_duration: TimeDelta::from_secs(1_000),
}
}
/// Creates a dummy [`ChainOwnership`] instance to use for the WIT roundtrip test.
fn chain_ownership_test_case() -> ChainOwnership {
let super_owners = ["Alice", "Bob"]
.into_iter()
.map(|owner_name| AccountOwner::from(CryptoHash::test_hash(owner_name)))
.collect();
let owners = ["Carol", "Dennis", "Eve"]
.into_iter()
.enumerate()
.map(|(index, owner_name)| {
(
AccountOwner::from(CryptoHash::test_hash(owner_name)),
index as u64,
)
})
.collect();
ChainOwnership {
super_owners,
first_leader: Some(AccountOwner::from(CryptoHash::test_hash("Fred"))),
owners,
multi_leader_rounds: 5,
open_multi_leader_rounds: false,
timeout_config: TimeoutConfig {
fast_round_duration: None,
base_timeout: TimeDelta::ZERO,
timeout_increment: TimeDelta::from_secs(3_600),
fallback_duration: TimeDelta::from_secs(10_000),
},
}
}
#[test]
fn account_owner_debug_format() {
assert_eq!(&format!("{:?}", AccountOwner::Reserved(10)), "Reserved(10)");
let addr32 = AccountOwner::Address32(CryptoHash::from([10u8; 32]));
let debug32 = "Address32(0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a)";
assert_eq!(&format!("{addr32:?}"), debug32);
let addr20 = AccountOwner::Address20([10u8; 20]);
let debug20 = "Address20(0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a)";
assert_eq!(&format!("{addr20:?}"), debug20);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/port.rs | linera-base/src/port.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Functionality for obtaining some free port.
use anyhow::{bail, Result};
use port_selector::random_free_tcp_port;
use crate::time::Duration;
/// Provides a port that is currently not used
pub async fn get_free_port() -> Result<u16> {
for i in 1..10 {
let port = random_free_tcp_port();
if let Some(port) = port {
return Ok(port);
}
crate::time::timer::sleep(Duration::from_secs(i)).await;
}
bail!("Failed to obtain a port");
}
/// Provides a local endpoint that is currently available
pub async fn get_free_endpoint() -> Result<String> {
let port = get_free_port().await?;
Ok(format!("127.0.0.1:{}", port))
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/ownership.rs | linera-base/src/ownership.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Structures defining the set of owners and super owners, as well as the consensus
//! round types and timeouts for chains.
use std::{
collections::{BTreeMap, BTreeSet},
iter,
};
use allocative::Allocative;
use custom_debug_derive::Debug;
use linera_witty::{WitLoad, WitStore, WitType};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use crate::{
data_types::{Round, TimeDelta},
doc_scalar,
identifiers::AccountOwner,
};
/// The timeout configuration: how long fast, multi-leader and single-leader rounds last.
#[derive(
PartialEq,
Eq,
Clone,
Hash,
Debug,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
Allocative,
)]
pub struct TimeoutConfig {
/// The duration of the fast round.
#[debug(skip_if = Option::is_none)]
pub fast_round_duration: Option<TimeDelta>,
/// The duration of the first single-leader and all multi-leader rounds.
pub base_timeout: TimeDelta,
/// The duration by which the timeout increases after each single-leader round.
pub timeout_increment: TimeDelta,
/// The age of an incoming tracked or protected message after which the validators start
/// transitioning the chain to fallback mode.
pub fallback_duration: TimeDelta,
}
impl Default for TimeoutConfig {
fn default() -> Self {
Self {
fast_round_duration: None,
base_timeout: TimeDelta::from_secs(10),
timeout_increment: TimeDelta::from_secs(1),
// This is `MAX` because the validators are not currently expected to start clients for
// every chain with an old tracked message in the inbox.
fallback_duration: TimeDelta::MAX,
}
}
}
/// Represents the owner(s) of a chain.
#[derive(
PartialEq,
Eq,
Clone,
Hash,
Debug,
Default,
Serialize,
Deserialize,
WitLoad,
WitStore,
WitType,
Allocative,
)]
pub struct ChainOwnership {
/// Super owners can propose fast blocks in the first round, and regular blocks in any round.
#[debug(skip_if = BTreeSet::is_empty)]
pub super_owners: BTreeSet<AccountOwner>,
/// The regular owners, with their weights that determine how often they are round leader.
#[debug(skip_if = BTreeMap::is_empty)]
pub owners: BTreeMap<AccountOwner, u64>,
/// The leader of the first single-leader round. If not set, this is random like other rounds.
pub first_leader: Option<AccountOwner>,
/// The number of rounds in which all owners are allowed to propose blocks.
pub multi_leader_rounds: u32,
/// Whether the multi-leader rounds are unrestricted, i.e. not limited to chain owners.
/// This should only be `true` on chains with restrictive application permissions and an
/// application-based mechanism to select block proposers.
pub open_multi_leader_rounds: bool,
/// The timeout configuration: how long fast, multi-leader and single-leader rounds last.
pub timeout_config: TimeoutConfig,
}
impl ChainOwnership {
/// Creates a `ChainOwnership` with a single super owner.
pub fn single_super(owner: AccountOwner) -> Self {
ChainOwnership {
super_owners: iter::once(owner).collect(),
owners: BTreeMap::new(),
first_leader: None,
multi_leader_rounds: 2,
open_multi_leader_rounds: false,
timeout_config: TimeoutConfig::default(),
}
}
/// Creates a `ChainOwnership` with a single regular owner.
pub fn single(owner: AccountOwner) -> Self {
ChainOwnership {
super_owners: BTreeSet::new(),
owners: iter::once((owner, 100)).collect(),
first_leader: None,
multi_leader_rounds: 2,
open_multi_leader_rounds: false,
timeout_config: TimeoutConfig::default(),
}
}
/// Creates a `ChainOwnership` with the specified regular owners.
pub fn multiple(
owners_and_weights: impl IntoIterator<Item = (AccountOwner, u64)>,
multi_leader_rounds: u32,
timeout_config: TimeoutConfig,
) -> Self {
ChainOwnership {
super_owners: BTreeSet::new(),
owners: owners_and_weights.into_iter().collect(),
first_leader: None,
multi_leader_rounds,
open_multi_leader_rounds: false,
timeout_config,
}
}
/// Adds a regular owner.
pub fn with_regular_owner(mut self, owner: AccountOwner, weight: u64) -> Self {
self.owners.insert(owner, weight);
self
}
/// Fixes the given owner as the leader of the first single-leader round on all heights.
pub fn with_first_leader(mut self, owner: AccountOwner) -> Self {
self.first_leader = Some(owner);
self
}
/// Returns whether there are any owners or super owners or it is a public chain.
pub fn is_active(&self) -> bool {
!self.super_owners.is_empty()
|| !self.owners.is_empty()
|| self.timeout_config.fallback_duration == TimeDelta::ZERO
}
/// Returns `true` if this is an owner or super owner.
pub fn is_owner(&self, owner: &AccountOwner) -> bool {
self.super_owners.contains(owner)
|| self.owners.contains_key(owner)
|| self.first_leader.as_ref().is_some_and(|fl| fl == owner)
}
/// Returns `true` if this is an owner or if `open_multi_leader_rounds`.
pub fn is_multi_leader_owner(&self, owner: &AccountOwner) -> bool {
self.open_multi_leader_rounds
|| self.owners.contains_key(owner)
|| self.super_owners.contains(owner)
}
/// Returns the duration of the given round.
pub fn round_timeout(&self, round: Round) -> Option<TimeDelta> {
let tc = &self.timeout_config;
if round.is_fast() && self.owners.is_empty() {
return None; // Fast round only times out if there are regular owners.
}
match round {
Round::Fast => tc.fast_round_duration,
Round::MultiLeader(r) if r.saturating_add(1) == self.multi_leader_rounds => {
Some(tc.base_timeout)
}
Round::MultiLeader(_) => None,
Round::SingleLeader(r) | Round::Validator(r) => {
let increment = tc.timeout_increment.saturating_mul(u64::from(r));
Some(tc.base_timeout.saturating_add(increment))
}
}
}
/// Returns the first consensus round for this configuration.
pub fn first_round(&self) -> Round {
if !self.super_owners.is_empty() {
Round::Fast
} else if self.owners.is_empty() {
Round::Validator(0)
} else if self.multi_leader_rounds > 0 {
Round::MultiLeader(0)
} else {
Round::SingleLeader(0)
}
}
/// Returns an iterator over all super owners, followed by all owners.
pub fn all_owners(&self) -> impl Iterator<Item = &AccountOwner> {
self.super_owners.iter().chain(self.owners.keys())
}
/// Returns whether fallback mode is enabled on this chain, i.e. the fallback duration
/// is less than `TimeDelta::MAX`.
pub fn has_fallback(&self) -> bool {
self.timeout_config.fallback_duration < TimeDelta::MAX
}
/// Returns the round following the specified one, if any.
pub fn next_round(&self, round: Round) -> Option<Round> {
let next_round = match round {
Round::Fast if self.multi_leader_rounds == 0 => Round::SingleLeader(0),
Round::Fast => Round::MultiLeader(0),
Round::MultiLeader(r) => r
.checked_add(1)
.filter(|r| *r < self.multi_leader_rounds)
.map_or(Round::SingleLeader(0), Round::MultiLeader),
Round::SingleLeader(r) => r
.checked_add(1)
.map_or(Round::Validator(0), Round::SingleLeader),
Round::Validator(r) => Round::Validator(r.checked_add(1)?),
};
Some(next_round)
}
/// Returns whether the given owner a super owner and there are no regular owners.
pub fn is_super_owner_no_regular_owners(&self, owner: &AccountOwner) -> bool {
self.owners.is_empty() && self.super_owners.contains(owner)
}
}
/// Errors that can happen when attempting to close a chain.
#[derive(Clone, Copy, Debug, Error, WitStore, WitType)]
pub enum CloseChainError {
/// The application wasn't allowed to close the chain.
#[error("Unauthorized attempt to close the chain")]
NotPermitted,
}
/// Errors that can happen when attempting to change the application permissions.
#[derive(Clone, Copy, Debug, Error, WitStore, WitType)]
pub enum ChangeApplicationPermissionsError {
/// The application wasn't allowed to change the application permissions.
#[error("Unauthorized attempt to change the application permissions")]
NotPermitted,
}
/// Errors that can happen when verifying the authentication of an operation over an
/// account.
#[derive(Clone, Copy, Debug, Error, WitStore, WitType)]
pub enum AccountPermissionError {
/// Operations on this account are not permitted in the current execution context.
#[error("Unauthorized attempt to access account owned by {0}")]
NotPermitted(AccountOwner),
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crypto::{Ed25519SecretKey, Secp256k1SecretKey};
#[test]
fn test_ownership_round_timeouts() {
let super_pub_key = Ed25519SecretKey::generate().public();
let super_owner = AccountOwner::from(super_pub_key);
let pub_key = Secp256k1SecretKey::generate().public();
let owner = AccountOwner::from(pub_key);
let ownership = ChainOwnership {
super_owners: BTreeSet::from_iter([super_owner]),
owners: BTreeMap::from_iter([(owner, 100)]),
first_leader: Some(owner),
multi_leader_rounds: 10,
open_multi_leader_rounds: false,
timeout_config: TimeoutConfig {
fast_round_duration: Some(TimeDelta::from_secs(5)),
base_timeout: TimeDelta::from_secs(10),
timeout_increment: TimeDelta::from_secs(1),
fallback_duration: TimeDelta::from_secs(60 * 60),
},
};
assert_eq!(
ownership.round_timeout(Round::Fast),
Some(TimeDelta::from_secs(5))
);
assert_eq!(ownership.round_timeout(Round::MultiLeader(8)), None);
assert_eq!(
ownership.round_timeout(Round::MultiLeader(9)),
Some(TimeDelta::from_secs(10))
);
assert_eq!(
ownership.round_timeout(Round::SingleLeader(0)),
Some(TimeDelta::from_secs(10))
);
assert_eq!(
ownership.round_timeout(Round::SingleLeader(1)),
Some(TimeDelta::from_secs(11))
);
assert_eq!(
ownership.round_timeout(Round::SingleLeader(8)),
Some(TimeDelta::from_secs(18))
);
}
}
doc_scalar!(ChainOwnership, "Represents the owner(s) of a chain");
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/task.rs | linera-base/src/task.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
Abstractions over tasks that can be used natively or on the Web.
*/
use futures::{future, Future, FutureExt as _};
/// The type of a future awaiting another task.
pub type NonBlockingFuture<R> = future::RemoteHandle<R>;
/// Spawns a new task, potentially on the current thread.
#[cfg(not(web))]
pub fn spawn<F: Future<Output: Send> + Send + 'static>(future: F) -> NonBlockingFuture<F::Output> {
let (future, remote_handle) = future.remote_handle();
tokio::task::spawn(future);
remote_handle
}
/// Spawns a new task on the current thread.
#[cfg(web)]
pub fn spawn<F: Future + 'static>(future: F) -> NonBlockingFuture<F::Output> {
let (future, remote_handle) = future.remote_handle();
wasm_bindgen_futures::spawn_local(future);
remote_handle
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/dyn_convert.rs | linera-base/src/dyn_convert.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
Object-safe conversion traits.
*/
/// An object-safe version of `std::convert::Into`.
pub trait DynInto<To> {
/// Converts a boxed object into the target type.
fn into_box(self: Box<Self>) -> To;
}
impl<To, From: Into<To>> DynInto<To> for From {
fn into_box(self: Box<From>) -> To {
(*self).into()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/data_types.rs | linera-base/src/data_types.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Core data-types used in the Linera protocol.
#[cfg(with_testing)]
use std::ops;
use std::{
fmt::{self, Display},
fs,
hash::Hash,
io, iter,
num::ParseIntError,
path::Path,
str::FromStr,
sync::Arc,
};
use allocative::{Allocative, Visitor};
use alloy_primitives::U256;
use async_graphql::{InputObject, SimpleObject};
use custom_debug_derive::Debug;
use linera_witty::{WitLoad, WitStore, WitType};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_with::{serde_as, Bytes};
use thiserror::Error;
#[cfg(with_metrics)]
use crate::prometheus_util::MeasureLatency as _;
use crate::{
crypto::{BcsHashable, CryptoError, CryptoHash},
doc_scalar, hex_debug, http,
identifiers::{
ApplicationId, BlobId, BlobType, ChainId, EventId, GenericApplicationId, ModuleId, StreamId,
},
limited_writer::{LimitedWriter, LimitedWriterError},
ownership::ChainOwnership,
time::{Duration, SystemTime},
vm::VmRuntime,
};
/// A non-negative amount of tokens.
///
/// This is a fixed-point fraction, with [`Amount::DECIMAL_PLACES`] digits after the point.
/// [`Amount::ONE`] is one whole token, divisible into `10.pow(Amount::DECIMAL_PLACES)` parts.
#[derive(
Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Default, Debug, WitType, WitLoad, WitStore,
)]
#[cfg_attr(
all(with_testing, not(target_arch = "wasm32")),
derive(test_strategy::Arbitrary)
)]
pub struct Amount(u128);
impl Allocative for Amount {
fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) {
visitor.visit_simple_sized::<Self>();
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename = "Amount")]
struct AmountString(String);
#[derive(Serialize, Deserialize)]
#[serde(rename = "Amount")]
struct AmountU128(u128);
impl Serialize for Amount {
fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
if serializer.is_human_readable() {
AmountString(self.to_string()).serialize(serializer)
} else {
AmountU128(self.0).serialize(serializer)
}
}
}
impl<'de> Deserialize<'de> for Amount {
fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
if deserializer.is_human_readable() {
let AmountString(s) = AmountString::deserialize(deserializer)?;
s.parse().map_err(serde::de::Error::custom)
} else {
Ok(Amount(AmountU128::deserialize(deserializer)?.0))
}
}
}
impl From<Amount> for U256 {
fn from(amount: Amount) -> U256 {
U256::from(amount.0)
}
}
/// Error converting from `U256` to `Amount`.
/// This can fail since `Amount` is a `u128`.
#[derive(Error, Debug)]
#[error("Failed to convert U256 to Amount. {0} has more than 128 bits")]
pub struct AmountConversionError(U256);
impl TryFrom<U256> for Amount {
type Error = AmountConversionError;
fn try_from(value: U256) -> Result<Amount, Self::Error> {
let value = u128::try_from(&value).map_err(|_| AmountConversionError(value))?;
Ok(Amount(value))
}
}
/// A block height to identify blocks in a chain.
#[derive(
Eq,
PartialEq,
Ord,
PartialOrd,
Copy,
Clone,
Hash,
Default,
Debug,
Serialize,
Deserialize,
WitType,
WitLoad,
WitStore,
Allocative,
)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary))]
pub struct BlockHeight(pub u64);
/// An identifier for successive attempts to decide a value in a consensus protocol.
#[derive(
Eq,
PartialEq,
Ord,
PartialOrd,
Copy,
Clone,
Hash,
Default,
Debug,
Serialize,
Deserialize,
Allocative,
)]
#[cfg_attr(with_testing, derive(test_strategy::Arbitrary))]
pub enum Round {
/// The initial fast round.
#[default]
Fast,
/// The N-th multi-leader round.
MultiLeader(u32),
/// The N-th single-leader round.
SingleLeader(u32),
/// The N-th round where the validators rotate as leaders.
Validator(u32),
}
/// A duration in microseconds.
#[derive(
Eq,
PartialEq,
Ord,
PartialOrd,
Copy,
Clone,
Hash,
Default,
Debug,
Serialize,
Deserialize,
WitType,
WitLoad,
WitStore,
Allocative,
)]
pub struct TimeDelta(u64);
impl TimeDelta {
/// Returns the given number of microseconds as a [`TimeDelta`].
pub const fn from_micros(micros: u64) -> Self {
TimeDelta(micros)
}
/// Returns the given number of milliseconds as a [`TimeDelta`].
pub const fn from_millis(millis: u64) -> Self {
TimeDelta(millis.saturating_mul(1_000))
}
/// Returns the given number of seconds as a [`TimeDelta`].
pub const fn from_secs(secs: u64) -> Self {
TimeDelta(secs.saturating_mul(1_000_000))
}
/// Returns the given duration, rounded to the nearest microsecond and capped to the maximum
/// [`TimeDelta`] value.
pub fn from_duration(duration: Duration) -> Self {
TimeDelta::from_micros(u64::try_from(duration.as_micros()).unwrap_or(u64::MAX))
}
/// Returns this [`TimeDelta`] as a number of microseconds.
pub const fn as_micros(&self) -> u64 {
self.0
}
/// Returns this [`TimeDelta`] as a [`Duration`].
pub const fn as_duration(&self) -> Duration {
Duration::from_micros(self.as_micros())
}
}
/// A timestamp, in microseconds since the Unix epoch.
#[derive(
Eq,
PartialEq,
Ord,
PartialOrd,
Copy,
Clone,
Hash,
Default,
Debug,
Serialize,
Deserialize,
WitType,
WitLoad,
WitStore,
Allocative,
)]
pub struct Timestamp(u64);
impl Timestamp {
/// Returns the current time according to the system clock.
pub fn now() -> Timestamp {
Timestamp(
SystemTime::UNIX_EPOCH
.elapsed()
.expect("system time should be after Unix epoch")
.as_micros()
.try_into()
.unwrap_or(u64::MAX),
)
}
/// Returns the number of microseconds since the Unix epoch.
pub const fn micros(&self) -> u64 {
self.0
}
/// Returns the [`TimeDelta`] between `other` and `self`, or zero if `other` is not earlier
/// than `self`.
pub const fn delta_since(&self, other: Timestamp) -> TimeDelta {
TimeDelta::from_micros(self.0.saturating_sub(other.0))
}
/// Returns the [`Duration`] between `other` and `self`, or zero if `other` is not
/// earlier than `self`.
pub const fn duration_since(&self, other: Timestamp) -> Duration {
Duration::from_micros(self.0.saturating_sub(other.0))
}
/// Returns the timestamp that is `duration` later than `self`.
pub const fn saturating_add(&self, duration: TimeDelta) -> Timestamp {
Timestamp(self.0.saturating_add(duration.0))
}
/// Returns the timestamp that is `duration` earlier than `self`.
pub const fn saturating_sub(&self, duration: TimeDelta) -> Timestamp {
Timestamp(self.0.saturating_sub(duration.0))
}
/// Returns a timestamp `micros` microseconds earlier than `self`, or the lowest possible value
/// if it would underflow.
pub const fn saturating_sub_micros(&self, micros: u64) -> Timestamp {
Timestamp(self.0.saturating_sub(micros))
}
}
impl From<u64> for Timestamp {
fn from(t: u64) -> Timestamp {
Timestamp(t)
}
}
impl Display for Timestamp {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(date_time) = chrono::DateTime::from_timestamp(
(self.0 / 1_000_000) as i64,
((self.0 % 1_000_000) * 1_000) as u32,
) {
return date_time.naive_utc().fmt(f);
}
self.0.fmt(f)
}
}
/// Resources that an application may spend during the execution of transaction or an
/// application call.
#[derive(
Clone, Copy, Debug, Default, Deserialize, Eq, PartialEq, Serialize, WitLoad, WitStore, WitType,
)]
pub struct Resources {
/// An amount of Wasm execution fuel.
pub wasm_fuel: u64,
/// An amount of EVM execution fuel.
pub evm_fuel: u64,
/// A number of read operations to be executed.
pub read_operations: u32,
/// A number of write operations to be executed.
pub write_operations: u32,
/// A number of bytes read from runtime.
pub bytes_runtime: u32,
/// A number of bytes to read.
pub bytes_to_read: u32,
/// A number of bytes to write.
pub bytes_to_write: u32,
/// A number of blobs to read.
pub blobs_to_read: u32,
/// A number of blobs to publish.
pub blobs_to_publish: u32,
/// A number of blob bytes to read.
pub blob_bytes_to_read: u32,
/// A number of blob bytes to publish.
pub blob_bytes_to_publish: u32,
/// A number of messages to be sent.
pub messages: u32,
/// The size of the messages to be sent.
// TODO(#1531): Account for the type of message to be sent.
pub message_size: u32,
/// An increase in the amount of storage space.
pub storage_size_delta: u32,
/// A number of service-as-oracle requests to be performed.
pub service_as_oracle_queries: u32,
/// A number of HTTP requests to be performed.
pub http_requests: u32,
// TODO(#1532): Account for the system calls that we plan on calling.
// TODO(#1533): Allow declaring calls to other applications instead of having to count them here.
}
/// A request to send a message.
#[derive(Clone, Debug, Deserialize, Serialize, WitLoad, WitType)]
#[cfg_attr(with_testing, derive(Eq, PartialEq, WitStore))]
#[witty_specialize_with(Message = Vec<u8>)]
pub struct SendMessageRequest<Message> {
/// The destination of the message.
pub destination: ChainId,
/// Whether the message is authenticated.
pub authenticated: bool,
/// Whether the message is tracked.
pub is_tracked: bool,
/// The grant resources forwarded with the message.
pub grant: Resources,
/// The message itself.
pub message: Message,
}
impl<Message> SendMessageRequest<Message>
where
Message: Serialize,
{
/// Serializes the internal `Message` type into raw bytes.
pub fn into_raw(self) -> SendMessageRequest<Vec<u8>> {
let message = bcs::to_bytes(&self.message).expect("Failed to serialize message");
SendMessageRequest {
destination: self.destination,
authenticated: self.authenticated,
is_tracked: self.is_tracked,
grant: self.grant,
message,
}
}
}
/// An error type for arithmetic errors.
#[derive(Debug, Error)]
#[allow(missing_docs)]
pub enum ArithmeticError {
#[error("Number overflow")]
Overflow,
#[error("Number underflow")]
Underflow,
}
macro_rules! impl_wrapped_number {
($name:ident, $wrapped:ident) => {
impl $name {
/// The zero value.
pub const ZERO: Self = Self(0);
/// The maximum value.
pub const MAX: Self = Self($wrapped::MAX);
/// Checked addition.
pub fn try_add(self, other: Self) -> Result<Self, ArithmeticError> {
let val = self
.0
.checked_add(other.0)
.ok_or(ArithmeticError::Overflow)?;
Ok(Self(val))
}
/// Checked increment.
pub fn try_add_one(self) -> Result<Self, ArithmeticError> {
let val = self.0.checked_add(1).ok_or(ArithmeticError::Overflow)?;
Ok(Self(val))
}
/// Saturating addition.
pub const fn saturating_add(self, other: Self) -> Self {
let val = self.0.saturating_add(other.0);
Self(val)
}
/// Checked subtraction.
pub fn try_sub(self, other: Self) -> Result<Self, ArithmeticError> {
let val = self
.0
.checked_sub(other.0)
.ok_or(ArithmeticError::Underflow)?;
Ok(Self(val))
}
/// Checked decrement.
pub fn try_sub_one(self) -> Result<Self, ArithmeticError> {
let val = self.0.checked_sub(1).ok_or(ArithmeticError::Underflow)?;
Ok(Self(val))
}
/// Saturating subtraction.
pub const fn saturating_sub(self, other: Self) -> Self {
let val = self.0.saturating_sub(other.0);
Self(val)
}
/// Returns the absolute difference between `self` and `other`.
pub fn abs_diff(self, other: Self) -> Self {
Self(self.0.abs_diff(other.0))
}
/// Checked in-place addition.
pub fn try_add_assign(&mut self, other: Self) -> Result<(), ArithmeticError> {
self.0 = self
.0
.checked_add(other.0)
.ok_or(ArithmeticError::Overflow)?;
Ok(())
}
/// Checked in-place increment.
pub fn try_add_assign_one(&mut self) -> Result<(), ArithmeticError> {
self.0 = self.0.checked_add(1).ok_or(ArithmeticError::Overflow)?;
Ok(())
}
/// Saturating in-place addition.
pub const fn saturating_add_assign(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
}
/// Checked in-place subtraction.
pub fn try_sub_assign(&mut self, other: Self) -> Result<(), ArithmeticError> {
self.0 = self
.0
.checked_sub(other.0)
.ok_or(ArithmeticError::Underflow)?;
Ok(())
}
/// Saturating division.
pub fn saturating_div(&self, other: $wrapped) -> Self {
Self(self.0.checked_div(other).unwrap_or($wrapped::MAX))
}
/// Saturating multiplication.
pub const fn saturating_mul(&self, other: $wrapped) -> Self {
Self(self.0.saturating_mul(other))
}
/// Checked multiplication.
pub fn try_mul(self, other: $wrapped) -> Result<Self, ArithmeticError> {
let val = self.0.checked_mul(other).ok_or(ArithmeticError::Overflow)?;
Ok(Self(val))
}
/// Checked in-place multiplication.
pub fn try_mul_assign(&mut self, other: $wrapped) -> Result<(), ArithmeticError> {
self.0 = self.0.checked_mul(other).ok_or(ArithmeticError::Overflow)?;
Ok(())
}
}
impl From<$name> for $wrapped {
fn from(value: $name) -> Self {
value.0
}
}
// Cannot directly create values for a wrapped type, except for testing.
#[cfg(with_testing)]
impl From<$wrapped> for $name {
fn from(value: $wrapped) -> Self {
Self(value)
}
}
#[cfg(with_testing)]
impl ops::Add for $name {
type Output = Self;
fn add(self, other: Self) -> Self {
Self(self.0 + other.0)
}
}
#[cfg(with_testing)]
impl ops::Sub for $name {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self(self.0 - other.0)
}
}
#[cfg(with_testing)]
impl ops::Mul<$wrapped> for $name {
type Output = Self;
fn mul(self, other: $wrapped) -> Self {
Self(self.0 * other)
}
}
};
}
impl TryFrom<BlockHeight> for usize {
type Error = ArithmeticError;
fn try_from(height: BlockHeight) -> Result<usize, ArithmeticError> {
usize::try_from(height.0).map_err(|_| ArithmeticError::Overflow)
}
}
impl_wrapped_number!(Amount, u128);
impl_wrapped_number!(BlockHeight, u64);
impl_wrapped_number!(TimeDelta, u64);
impl Display for Amount {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Print the wrapped integer, padded with zeros to cover a digit before the decimal point.
let places = Amount::DECIMAL_PLACES as usize;
let min_digits = places + 1;
let decimals = format!("{:0min_digits$}", self.0);
let integer_part = &decimals[..(decimals.len() - places)];
let fractional_part = decimals[(decimals.len() - places)..].trim_end_matches('0');
// For now, we never trim non-zero digits so we don't lose any precision.
let precision = f.precision().unwrap_or(0).max(fractional_part.len());
let sign = if f.sign_plus() && self.0 > 0 { "+" } else { "" };
// The amount of padding: desired width minus sign, point and number of digits.
let pad_width = f.width().map_or(0, |w| {
w.saturating_sub(precision)
.saturating_sub(sign.len() + integer_part.len() + 1)
});
let left_pad = match f.align() {
None | Some(fmt::Alignment::Right) => pad_width,
Some(fmt::Alignment::Center) => pad_width / 2,
Some(fmt::Alignment::Left) => 0,
};
for _ in 0..left_pad {
write!(f, "{}", f.fill())?;
}
write!(f, "{sign}{integer_part}.{fractional_part:0<precision$}")?;
for _ in left_pad..pad_width {
write!(f, "{}", f.fill())?;
}
Ok(())
}
}
#[derive(Error, Debug)]
#[allow(missing_docs)]
pub enum ParseAmountError {
#[error("cannot parse amount")]
Parse,
#[error("cannot represent amount: number too high")]
TooHigh,
#[error("cannot represent amount: too many decimal places after the point")]
TooManyDigits,
}
impl FromStr for Amount {
type Err = ParseAmountError;
fn from_str(src: &str) -> Result<Self, Self::Err> {
let mut result: u128 = 0;
let mut decimals: Option<u8> = None;
let mut chars = src.trim().chars().peekable();
if chars.peek() == Some(&'+') {
chars.next();
}
for char in chars {
match char {
'_' => {}
'.' if decimals.is_some() => return Err(ParseAmountError::Parse),
'.' => decimals = Some(Amount::DECIMAL_PLACES),
char => {
let digit = u128::from(char.to_digit(10).ok_or(ParseAmountError::Parse)?);
if let Some(d) = &mut decimals {
*d = d.checked_sub(1).ok_or(ParseAmountError::TooManyDigits)?;
}
result = result
.checked_mul(10)
.and_then(|r| r.checked_add(digit))
.ok_or(ParseAmountError::TooHigh)?;
}
}
}
result = result
.checked_mul(10u128.pow(decimals.unwrap_or(Amount::DECIMAL_PLACES) as u32))
.ok_or(ParseAmountError::TooHigh)?;
Ok(Amount(result))
}
}
impl Display for BlockHeight {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl FromStr for BlockHeight {
type Err = ParseIntError;
fn from_str(src: &str) -> Result<Self, Self::Err> {
Ok(Self(u64::from_str(src)?))
}
}
impl Display for Round {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Round::Fast => write!(f, "fast round"),
Round::MultiLeader(r) => write!(f, "multi-leader round {}", r),
Round::SingleLeader(r) => write!(f, "single-leader round {}", r),
Round::Validator(r) => write!(f, "validator round {}", r),
}
}
}
impl Round {
/// Whether the round is a multi-leader round.
pub fn is_multi_leader(&self) -> bool {
matches!(self, Round::MultiLeader(_))
}
/// Returns the round number if this is a multi-leader round, `None` otherwise.
pub fn multi_leader(&self) -> Option<u32> {
match self {
Round::MultiLeader(number) => Some(*number),
_ => None,
}
}
/// Returns whether this is a validator round.
pub fn is_validator(&self) -> bool {
matches!(self, Round::Validator(_))
}
/// Whether the round is the fast round.
pub fn is_fast(&self) -> bool {
matches!(self, Round::Fast)
}
/// The index of a round amongst the rounds of the same category.
pub fn number(&self) -> u32 {
match self {
Round::Fast => 0,
Round::MultiLeader(r) | Round::SingleLeader(r) | Round::Validator(r) => *r,
}
}
/// The category of the round as a string.
pub fn type_name(&self) -> &'static str {
match self {
Round::Fast => "fast",
Round::MultiLeader(_) => "multi",
Round::SingleLeader(_) => "single",
Round::Validator(_) => "validator",
}
}
}
impl<'a> iter::Sum<&'a Amount> for Amount {
fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
iter.fold(Self::ZERO, |a, b| a.saturating_add(*b))
}
}
impl Amount {
/// The base-10 exponent representing how much a token can be divided.
pub const DECIMAL_PLACES: u8 = 18;
/// One token.
pub const ONE: Amount = Amount(10u128.pow(Amount::DECIMAL_PLACES as u32));
/// Returns an `Amount` corresponding to that many tokens, or `Amount::MAX` if saturated.
pub const fn from_tokens(tokens: u128) -> Amount {
Self::ONE.saturating_mul(tokens)
}
/// Returns an `Amount` corresponding to that many millitokens, or `Amount::MAX` if saturated.
pub const fn from_millis(millitokens: u128) -> Amount {
Amount(10u128.pow(Amount::DECIMAL_PLACES as u32 - 3)).saturating_mul(millitokens)
}
/// Returns an `Amount` corresponding to that many microtokens, or `Amount::MAX` if saturated.
pub const fn from_micros(microtokens: u128) -> Amount {
Amount(10u128.pow(Amount::DECIMAL_PLACES as u32 - 6)).saturating_mul(microtokens)
}
/// Returns an `Amount` corresponding to that many nanotokens, or `Amount::MAX` if saturated.
pub const fn from_nanos(nanotokens: u128) -> Amount {
Amount(10u128.pow(Amount::DECIMAL_PLACES as u32 - 9)).saturating_mul(nanotokens)
}
/// Returns an `Amount` corresponding to that many attotokens.
pub const fn from_attos(attotokens: u128) -> Amount {
Amount(attotokens)
}
/// Returns the number of attotokens.
pub const fn to_attos(self) -> u128 {
self.0
}
/// Helper function to obtain the 64 most significant bits of the balance.
pub const fn upper_half(self) -> u64 {
(self.0 >> 64) as u64
}
/// Helper function to obtain the 64 least significant bits of the balance.
pub const fn lower_half(self) -> u64 {
self.0 as u64
}
/// Divides this by the other amount. If the other is 0, it returns `u128::MAX`.
pub fn saturating_ratio(self, other: Amount) -> u128 {
self.0.checked_div(other.0).unwrap_or(u128::MAX)
}
/// Returns whether this amount is 0.
pub fn is_zero(&self) -> bool {
*self == Amount::ZERO
}
}
/// What created a chain.
#[derive(
Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Debug, Serialize, Deserialize, Allocative,
)]
pub enum ChainOrigin {
/// The chain was created by the genesis configuration.
Root(u32),
/// The chain was created by a call from another chain.
Child {
/// The parent of this chain.
parent: ChainId,
/// The block height in the parent at which this chain was created.
block_height: BlockHeight,
/// The index of this chain among chains created at the same block height in the parent
/// chain.
chain_index: u32,
},
}
impl ChainOrigin {
/// Whether the chain was created by another chain.
pub fn is_child(&self) -> bool {
matches!(self, ChainOrigin::Child { .. })
}
/// Returns the root chain number, if this is a root chain.
pub fn root(&self) -> Option<u32> {
match self {
ChainOrigin::Root(i) => Some(*i),
ChainOrigin::Child { .. } => None,
}
}
}
/// A number identifying the configuration of the chain (aka the committee).
#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Default, Debug, Allocative)]
pub struct Epoch(pub u32);
impl Epoch {
/// The zero epoch.
pub const ZERO: Epoch = Epoch(0);
}
impl Serialize for Epoch {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&self.0.to_string())
} else {
serializer.serialize_newtype_struct("Epoch", &self.0)
}
}
}
impl<'de> Deserialize<'de> for Epoch {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
Ok(Epoch(u32::from_str(&s).map_err(serde::de::Error::custom)?))
} else {
#[derive(Deserialize)]
#[serde(rename = "Epoch")]
struct EpochDerived(u32);
let value = EpochDerived::deserialize(deserializer)?;
Ok(Self(value.0))
}
}
}
impl std::fmt::Display for Epoch {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
write!(f, "{}", self.0)
}
}
impl std::str::FromStr for Epoch {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Epoch(s.parse()?))
}
}
impl From<u32> for Epoch {
fn from(value: u32) -> Self {
Epoch(value)
}
}
impl Epoch {
/// Tries to return an epoch with a number increased by one. Returns an error if an overflow
/// happens.
#[inline]
pub fn try_add_one(self) -> Result<Self, ArithmeticError> {
let val = self.0.checked_add(1).ok_or(ArithmeticError::Overflow)?;
Ok(Self(val))
}
/// Tries to return an epoch with a number decreased by one. Returns an error if an underflow
/// happens.
pub fn try_sub_one(self) -> Result<Self, ArithmeticError> {
let val = self.0.checked_sub(1).ok_or(ArithmeticError::Underflow)?;
Ok(Self(val))
}
/// Tries to add one to this epoch's number. Returns an error if an overflow happens.
#[inline]
pub fn try_add_assign_one(&mut self) -> Result<(), ArithmeticError> {
self.0 = self.0.checked_add(1).ok_or(ArithmeticError::Overflow)?;
Ok(())
}
}
/// The initial configuration for a new chain.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Allocative)]
pub struct InitialChainConfig {
/// The ownership configuration of the new chain.
pub ownership: ChainOwnership,
/// The epoch in which the chain is created.
pub epoch: Epoch,
/// The lowest number of an active epoch at the time of creation of the chain.
pub min_active_epoch: Epoch,
/// The highest number of an active epoch at the time of creation of the chain.
pub max_active_epoch: Epoch,
/// The initial chain balance.
pub balance: Amount,
/// The initial application permissions.
pub application_permissions: ApplicationPermissions,
}
/// Initial chain configuration and chain origin.
#[derive(Eq, PartialEq, Clone, Hash, Debug, Serialize, Deserialize, Allocative)]
pub struct ChainDescription {
origin: ChainOrigin,
timestamp: Timestamp,
config: InitialChainConfig,
}
impl ChainDescription {
/// Creates a new [`ChainDescription`].
pub fn new(origin: ChainOrigin, config: InitialChainConfig, timestamp: Timestamp) -> Self {
Self {
origin,
config,
timestamp,
}
}
/// Returns the [`ChainId`] based on this [`ChainDescription`].
pub fn id(&self) -> ChainId {
ChainId::from(self)
}
/// Returns the [`ChainOrigin`] describing who created this chain.
pub fn origin(&self) -> ChainOrigin {
self.origin
}
/// Returns a reference to the [`InitialChainConfig`] of the chain.
pub fn config(&self) -> &InitialChainConfig {
&self.config
}
/// Returns the timestamp of when the chain was created.
pub fn timestamp(&self) -> Timestamp {
self.timestamp
}
/// Whether the chain was created by another chain.
pub fn is_child(&self) -> bool {
self.origin.is_child()
}
}
impl BcsHashable<'_> for ChainDescription {}
/// A description of the current Linera network to be stored in every node's database.
#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
pub struct NetworkDescription {
/// The name of the network.
pub name: String,
/// Hash of the network's genesis config.
pub genesis_config_hash: CryptoHash,
/// Genesis timestamp.
pub genesis_timestamp: Timestamp,
/// Hash of the blob containing the genesis committee.
pub genesis_committee_blob_hash: CryptoHash,
/// The chain ID of the admin chain.
pub admin_chain_id: ChainId,
}
/// Permissions for applications on a chain.
#[derive(
Default,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Clone,
Serialize,
Deserialize,
WitType,
WitLoad,
WitStore,
InputObject,
Allocative,
)]
pub struct ApplicationPermissions {
/// If this is `None`, all system operations and application operations are allowed.
/// If it is `Some`, only operations from the specified applications are allowed, and
/// no system operations.
#[debug(skip_if = Option::is_none)]
pub execute_operations: Option<Vec<ApplicationId>>,
/// At least one operation or incoming message from each of these applications must occur in
/// every block.
#[graphql(default)]
#[debug(skip_if = Vec::is_empty)]
pub mandatory_applications: Vec<ApplicationId>,
/// These applications are allowed to close the current chain.
#[graphql(default)]
#[debug(skip_if = Vec::is_empty)]
pub close_chain: Vec<ApplicationId>,
/// These applications are allowed to change the application permissions.
#[graphql(default)]
#[debug(skip_if = Vec::is_empty)]
pub change_application_permissions: Vec<ApplicationId>,
/// These applications are allowed to perform calls to services as oracles.
#[graphql(default)]
#[debug(skip_if = Option::is_none)]
pub call_service_as_oracle: Option<Vec<ApplicationId>>,
/// These applications are allowed to perform HTTP requests.
#[graphql(default)]
#[debug(skip_if = Option::is_none)]
pub make_http_requests: Option<Vec<ApplicationId>>,
}
impl ApplicationPermissions {
/// Creates new `ApplicationPermissions` where the given application is the only one
/// whose operations are allowed and mandatory, and it can also close the chain.
pub fn new_single(app_id: ApplicationId) -> Self {
Self {
execute_operations: Some(vec![app_id]),
mandatory_applications: vec![app_id],
close_chain: vec![app_id],
change_application_permissions: vec![app_id],
call_service_as_oracle: Some(vec![app_id]),
make_http_requests: Some(vec![app_id]),
}
}
/// Creates new `ApplicationPermissions` where the given applications are the only ones
/// whose operations are allowed and mandatory, and they can also close the chain.
pub fn new_multiple(app_ids: Vec<ApplicationId>) -> Self {
Self {
execute_operations: Some(app_ids.clone()),
mandatory_applications: app_ids.clone(),
close_chain: app_ids.clone(),
change_application_permissions: app_ids.clone(),
call_service_as_oracle: Some(app_ids.clone()),
make_http_requests: Some(app_ids),
}
}
/// Returns whether operations with the given application ID are allowed on this chain.
pub fn can_execute_operations(&self, app_id: &GenericApplicationId) -> bool {
match (app_id, &self.execute_operations) {
(_, None) => true,
(GenericApplicationId::System, Some(_)) => false,
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/graphql.rs | linera-base/src/graphql.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/// Defines a GraphQL scalar with a description string.
///
/// This is equivalent to `scalar!` but always uses the stringified identifier as the name.
#[macro_export]
macro_rules! doc_scalar {
($ty:ty, $desc:literal) => {
$crate::async_graphql::scalar_internal!(
$ty,
::std::stringify!($ty),
::std::option::Option::Some(::std::string::ToString::to_string($desc)),
::std::option::Option::None
);
};
}
/// An error trying to parse the hex-digits of a BCS-encoded value.
#[derive(thiserror::Error, Debug)]
#[allow(missing_docs)]
pub enum BcsHexParseError {
#[error(transparent)]
BcsError(#[from] bcs::Error),
#[error("Invalid hexadecimal: {0}")]
Hex(#[from] hex::FromHexError),
}
/// Defines a GraphQL scalar type using the hex-representation of the value's BCS-serialized form.
///
/// This is a modified implementation of [`async_graphql::scalar`].
/// In addition, it implements `Display` and `FromStr`, also using hex-representation.
#[macro_export]
macro_rules! bcs_scalar {
($ty:ty, $desc:literal) => {
impl $crate::async_graphql::ScalarType for $ty {
fn parse(
value: $crate::async_graphql::Value,
) -> $crate::async_graphql::InputValueResult<Self> {
let hex: String = $crate::async_graphql::from_value(value)?;
let bytes = $crate::hex::decode(&hex)?;
let result = $crate::bcs::from_bytes(&bytes)?;
::std::result::Result::Ok(result)
}
fn to_value(&self) -> $crate::async_graphql::Value {
let ::std::result::Result::Ok(bytes) = $crate::bcs::to_bytes(self) else {
return $crate::async_graphql::Value::Null;
};
let hex = $crate::hex::encode(&bytes);
$crate::async_graphql::to_value(hex)
.unwrap_or_else(|_| $crate::async_graphql::Value::Null)
}
}
impl $crate::async_graphql::InputType for $ty {
type RawValueType = Self;
fn type_name() -> ::std::borrow::Cow<'static, ::std::primitive::str> {
::std::borrow::Cow::Borrowed(::std::stringify!($ty))
}
fn create_type_info(
registry: &mut $crate::async_graphql::registry::Registry,
) -> ::std::string::String {
registry.create_input_type::<$ty, _>(
$crate::async_graphql::registry::MetaTypeId::Scalar,
|_| $crate::async_graphql::registry::MetaType::Scalar {
name: ::std::borrow::ToOwned::to_owned(::std::stringify!($ty)),
description: ::std::option::Option::Some(
::std::string::ToString::to_string($desc),
),
is_valid: ::std::option::Option::Some(::std::sync::Arc::new(|value| {
<$ty as $crate::async_graphql::ScalarType>::is_valid(value)
})),
visible: ::std::option::Option::None,
inaccessible: false,
tags: ::std::default::Default::default(),
specified_by_url: ::std::option::Option::None,
directive_invocations: ::std::default::Default::default(),
requires_scopes: ::std::default::Default::default(),
},
)
}
fn parse(
value: ::std::option::Option<$crate::async_graphql::Value>,
) -> $crate::async_graphql::InputValueResult<Self> {
<$ty as $crate::async_graphql::ScalarType>::parse(value.unwrap_or_default())
}
fn to_value(&self) -> $crate::async_graphql::Value {
<$ty as $crate::async_graphql::ScalarType>::to_value(self)
}
fn as_raw_value(&self) -> ::std::option::Option<&Self::RawValueType> {
::std::option::Option::Some(self)
}
}
impl $crate::async_graphql::OutputType for $ty {
fn type_name() -> ::std::borrow::Cow<'static, ::std::primitive::str> {
::std::borrow::Cow::Borrowed(::std::stringify!($ty))
}
fn create_type_info(
registry: &mut $crate::async_graphql::registry::Registry,
) -> ::std::string::String {
registry.create_output_type::<$ty, _>(
$crate::async_graphql::registry::MetaTypeId::Scalar,
|_| $crate::async_graphql::registry::MetaType::Scalar {
name: ::std::borrow::ToOwned::to_owned(::std::stringify!($ty)),
description: ::std::option::Option::Some(
::std::string::ToString::to_string($desc),
),
is_valid: ::std::option::Option::Some(::std::sync::Arc::new(|value| {
<$ty as $crate::async_graphql::ScalarType>::is_valid(value)
})),
visible: ::std::option::Option::None,
inaccessible: false,
tags: ::std::default::Default::default(),
specified_by_url: ::std::option::Option::None,
directive_invocations: ::std::default::Default::default(),
requires_scopes: ::std::default::Default::default(),
},
)
}
async fn resolve(
&self,
_: &$crate::async_graphql::ContextSelectionSet<'_>,
_field: &$crate::async_graphql::Positioned<
$crate::async_graphql::parser::types::Field,
>,
) -> $crate::async_graphql::ServerResult<$crate::async_graphql::Value> {
::std::result::Result::Ok($crate::async_graphql::ScalarType::to_value(self))
}
}
impl ::std::fmt::Display for $ty {
fn fmt(
&self,
f: &mut ::std::fmt::Formatter<'_>,
) -> ::std::result::Result<(), ::std::fmt::Error> {
match $crate::bcs::to_bytes(self) {
::std::result::Result::Ok(bytes) => {
::std::fmt::Display::fmt(&$crate::hex::encode(&bytes), f)
}
::std::result::Result::Err(_) => {
::std::write!(f, "invalid {}", ::std::stringify!($ty))
}
}
}
}
impl ::std::str::FromStr for $ty {
type Err = $crate::BcsHexParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = $crate::hex::decode(s)?;
::std::result::Result::Ok($crate::bcs::from_bytes(&bytes)?)
}
}
};
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/util/future.rs | linera-base/src/util/future.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
Utilities for working with `Future`s.
*/
use std::{future::Future, pin::Pin};
use sync_wrapper::SyncFuture;
/// An extension trait to box futures and make them `Sync`.
pub trait FutureSyncExt: Future + Sized {
/// Wrap the future so that it implements `Sync`
fn make_sync(self) -> SyncFuture<Self> {
SyncFuture::new(self)
}
/// Box the future without losing `Sync`ness
fn boxed_sync(self) -> Pin<Box<SyncFuture<Self>>> {
Box::pin(self.make_sync())
}
}
impl<F: Future> FutureSyncExt for F {}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/util/mod.rs | linera-base/src/util/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
Utilities used throughout the Linera codebase.
*/
pub mod future;
pub mod traits;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/util/traits.rs | linera-base/src/util/traits.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
Utilities for building traits that work in both single-threaded and multi-threaded
contexts.
*/
/// A trait that extends `Send` and `Sync` if not compiling for the Web.
#[cfg(web)]
pub trait AutoTraits: 'static {}
#[cfg(web)]
impl<T: 'static> AutoTraits for T {}
#[cfg(not(web))]
trait_set::trait_set! {
/// A trait that extends `Send` and `Sync` if not compiling for the Web.
pub trait AutoTraits = Send + Sync + 'static;
}
trait_set::trait_set! {
/// Precomposed `std::error::Error + AutoTraits`, for use in type-erased error
/// objects.
pub trait DynError = std::error::Error + AutoTraits;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/crypto/signer.rs | linera-base/src/crypto/signer.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
An interface for cryptographic signers that can be used by the Linera client to sign blocks.
*/
use std::error::Error as StdError;
pub use in_mem::InMemorySigner;
use super::CryptoHash;
use crate::{crypto::AccountSignature, identifiers::AccountOwner};
cfg_if::cfg_if! {
if #[cfg(web)] {
#[doc(hidden)]
pub trait TaskSendable {}
impl<T> TaskSendable for T {}
} else {
#[doc(hidden)]
pub trait TaskSendable: Send + Sync {}
impl<T: Send + Sync> TaskSendable for T {}
}
}
/// Errors that can be returned from signers.
pub trait Error: StdError + TaskSendable {}
impl<T: StdError + TaskSendable> Error for T {}
impl StdError for Box<dyn Error + '_> {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
(**self).source()
}
}
/// A trait for signing keys.
#[cfg_attr(not(web), trait_variant::make(Send))]
pub trait Signer {
/// The type of errors arising from operations on this `Signer`.
type Error: Error;
/// Creates a signature for the given `value` using the provided `owner`.
// DEV: We sign `CryptoHash` type, rather than `&[u8]` to make sure we don't sign
// things accidentally. See [`CryptoHash::new`] for how the type's name is included
// in the resulting hash, providing the canonicity of the hashing process.
async fn sign(
&self,
owner: &AccountOwner,
value: &CryptoHash,
) -> Result<AccountSignature, Self::Error>;
/// Returns whether the given `owner` is a known signer.
async fn contains_key(&self, owner: &AccountOwner) -> Result<bool, Self::Error>;
}
/// In-memory implementation of the [`Signer`] trait.
mod in_mem {
use std::{
collections::BTreeMap,
sync::{Arc, RwLock},
};
use serde::{Deserialize, Serialize};
#[cfg(with_getrandom)]
use crate::crypto::{AccountPublicKey, CryptoRng};
use crate::{
crypto::{AccountSecretKey, AccountSignature, CryptoHash, Signer},
identifiers::AccountOwner,
};
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("no key found for the given owner")]
NoSuchOwner,
}
/// In-memory signer.
#[derive(Clone)]
pub struct InMemorySigner(Arc<RwLock<InMemSignerInner>>);
#[cfg(not(with_getrandom))]
impl Default for InMemorySigner {
fn default() -> Self {
Self::new()
}
}
impl InMemorySigner {
/// Creates a new [`InMemorySigner`] seeded with `prng_seed`.
/// If `prng_seed` is `None`, an `OsRng` will be used.
#[cfg(with_getrandom)]
pub fn new(prng_seed: Option<u64>) -> Self {
InMemorySigner(Arc::new(RwLock::new(InMemSignerInner::new(prng_seed))))
}
/// Creates a new [`InMemorySigner`].
#[cfg(not(with_getrandom))]
pub fn new() -> Self {
InMemorySigner(Arc::new(RwLock::new(InMemSignerInner::new())))
}
/// Generates a new key pair from Signer's RNG. Use with care.
#[cfg(with_getrandom)]
pub fn generate_new(&mut self) -> AccountPublicKey {
let mut inner = self.0.write().unwrap();
let secret = AccountSecretKey::generate_from(&mut inner.rng_state.prng);
if inner.rng_state.testing_seed.is_some() {
// Generate a new testing seed for the case when we need to store the PRNG state.
// It provides a "forward-secrecy" property for the testing seed.
// We do not do that for the case when `testing_seed` is `None`, because
// we default to the usage of OsRng in that case.
inner.rng_state.testing_seed = Some(inner.rng_state.prng.next_u64());
}
let public = secret.public();
let owner = AccountOwner::from(public);
inner.keys.insert(owner, secret);
public
}
/// Returns the public key corresponding to the given `owner`.
pub fn keys(&self) -> Vec<(AccountOwner, Vec<u8>)> {
let inner = self.0.read().unwrap();
inner.keys()
}
}
#[derive(Debug, Deserialize, Serialize)]
struct Inner {
keys: Vec<(AccountOwner, String)>,
#[cfg(with_getrandom)]
prng_seed: Option<u64>,
}
/// In-memory signer.
struct InMemSignerInner {
keys: BTreeMap<AccountOwner, AccountSecretKey>,
#[cfg(with_getrandom)]
rng_state: RngState,
}
#[cfg(with_getrandom)]
struct RngState {
prng: Box<dyn CryptoRng>,
#[cfg(with_getrandom)]
testing_seed: Option<u64>,
}
#[cfg(with_getrandom)]
impl RngState {
fn new(prng_seed: Option<u64>) -> Self {
let prng: Box<dyn CryptoRng> = prng_seed.into();
RngState {
prng,
#[cfg(with_getrandom)]
testing_seed: prng_seed,
}
}
}
impl InMemSignerInner {
/// Creates a new `InMemSignerInner` seeded with `prng_seed`.
/// If `prng_seed` is `None`, an `OsRng` will be used.
#[cfg(with_getrandom)]
pub fn new(prng_seed: Option<u64>) -> Self {
InMemSignerInner {
keys: BTreeMap::new(),
rng_state: RngState::new(prng_seed),
}
}
/// Creates a new `InMemSignerInner`.
#[cfg(not(with_getrandom))]
pub fn new() -> Self {
InMemSignerInner {
keys: BTreeMap::new(),
}
}
pub fn keys(&self) -> Vec<(AccountOwner, Vec<u8>)> {
self.keys
.iter()
.map(|(owner, secret)| {
let bytes = serde_json::to_vec(secret).expect("serialization should not fail");
(*owner, bytes)
})
.collect()
}
}
impl Signer for InMemorySigner {
type Error = Error;
/// Creates a signature for the given `value` using the provided `owner`.
async fn sign(
&self,
owner: &AccountOwner,
value: &CryptoHash,
) -> Result<AccountSignature, Error> {
let inner = self.0.read().unwrap();
if let Some(secret) = inner.keys.get(owner) {
let signature = secret.sign_prehash(*value);
Ok(signature)
} else {
Err(Error::NoSuchOwner)
}
}
/// Returns whether the given `owner` is a known signer.
async fn contains_key(&self, owner: &AccountOwner) -> Result<bool, Error> {
Ok(self.0.read().unwrap().keys.contains_key(owner))
}
}
impl FromIterator<(AccountOwner, AccountSecretKey)> for InMemorySigner {
fn from_iter<T>(input: T) -> Self
where
T: IntoIterator<Item = (AccountOwner, AccountSecretKey)>,
{
InMemorySigner(Arc::new(RwLock::new(InMemSignerInner {
keys: BTreeMap::from_iter(input),
#[cfg(with_getrandom)]
rng_state: RngState::new(None),
})))
}
}
impl Default for InMemSignerInner {
fn default() -> Self {
#[cfg(with_getrandom)]
let signer = InMemSignerInner::new(None);
#[cfg(not(with_getrandom))]
let signer = InMemSignerInner::new();
signer
}
}
impl Serialize for InMemorySigner {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let inner = self.0.read().unwrap();
InMemSignerInner::serialize(&*inner, serializer)
}
}
impl<'de> Deserialize<'de> for InMemorySigner {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let inner = InMemSignerInner::deserialize(deserializer)?;
Ok(InMemorySigner(Arc::new(RwLock::new(inner))))
}
}
impl Serialize for InMemSignerInner {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
#[cfg(with_getrandom)]
let prng_seed = self.rng_state.testing_seed;
let keys_as_strings = self
.keys()
.into_iter()
.map(|(owner, bytes)| (owner, hex::encode(bytes)))
.collect::<Vec<_>>();
let inner = Inner {
keys: keys_as_strings,
#[cfg(with_getrandom)]
prng_seed,
};
Inner::serialize(&inner, serializer)
}
}
impl<'de> Deserialize<'de> for InMemSignerInner {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let inner = Inner::deserialize(deserializer)?;
let keys = inner
.keys
.into_iter()
.map(|(owner, secret_hex)| {
let secret_bytes =
hex::decode(&secret_hex).map_err(serde::de::Error::custom)?;
let secret =
serde_json::from_slice(&secret_bytes).map_err(serde::de::Error::custom)?;
Ok((owner, secret))
})
.collect::<Result<BTreeMap<_, _>, _>>()?;
let signer = InMemSignerInner {
keys,
#[cfg(with_getrandom)]
rng_state: RngState::new(inner.prng_seed),
};
Ok(signer)
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/crypto/mod.rs | linera-base/src/crypto/mod.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Define the cryptographic primitives used by the Linera protocol.
mod ed25519;
mod hash;
#[allow(dead_code)]
mod secp256k1;
pub mod signer;
use std::{fmt::Display, io, num::ParseIntError, str::FromStr};
use allocative::Allocative;
use alloy_primitives::FixedBytes;
use custom_debug_derive::Debug;
pub use ed25519::{Ed25519PublicKey, Ed25519SecretKey, Ed25519Signature};
pub use hash::*;
use linera_witty::{WitLoad, WitStore, WitType};
pub use secp256k1::{
evm::{EvmPublicKey, EvmSecretKey, EvmSignature},
Secp256k1PublicKey, Secp256k1SecretKey, Secp256k1Signature,
};
use serde::{Deserialize, Serialize};
pub use signer::*;
use thiserror::Error;
use crate::{hex_debug, identifiers::AccountOwner, visit_allocative_simple};
/// The public key of a validator.
pub type ValidatorPublicKey = secp256k1::Secp256k1PublicKey;
/// The private key of a validator.
pub type ValidatorSecretKey = secp256k1::Secp256k1SecretKey;
/// The signature of a validator.
pub type ValidatorSignature = secp256k1::Secp256k1Signature;
/// The key pair of a validator.
pub type ValidatorKeypair = secp256k1::Secp256k1KeyPair;
/// Signature scheme used for the public key.
#[derive(Serialize, Deserialize, Debug, Copy, Clone, Eq, PartialEq)]
pub enum SignatureScheme {
/// Ed25519
Ed25519,
/// secp256k1
Secp256k1,
/// EVM secp256k1
EvmSecp256k1,
}
/// The public key of a chain owner.
/// The corresponding private key is allowed to propose blocks
/// on the chain and transfer account's tokens.
#[derive(
Serialize,
Deserialize,
Debug,
Eq,
PartialEq,
Ord,
PartialOrd,
Copy,
Clone,
Hash,
WitType,
WitLoad,
WitStore,
Allocative,
)]
pub enum AccountPublicKey {
/// Ed25519 public key.
Ed25519(#[allocative(visit = visit_allocative_simple)] ed25519::Ed25519PublicKey),
/// secp256k1 public key.
Secp256k1(#[allocative(visit = visit_allocative_simple)] secp256k1::Secp256k1PublicKey),
/// EVM secp256k1 public key.
EvmSecp256k1(#[allocative(visit = visit_allocative_simple)] secp256k1::evm::EvmPublicKey),
}
/// The private key of a chain owner.
#[derive(Serialize, Deserialize)]
pub enum AccountSecretKey {
/// Ed25519 secret key.
Ed25519(ed25519::Ed25519SecretKey),
/// secp256k1 secret key.
Secp256k1(secp256k1::Secp256k1SecretKey),
/// EVM secp256k1 secret key.
EvmSecp256k1(secp256k1::evm::EvmSecretKey),
}
/// The signature of a chain owner.
#[derive(Eq, PartialEq, Copy, Clone, Debug, Serialize, Deserialize, Allocative)]
pub enum AccountSignature {
/// Ed25519 signature.
Ed25519 {
/// Signature of the value.
#[allocative(visit = visit_allocative_simple)]
signature: ed25519::Ed25519Signature,
/// Public key of the signer.
#[allocative(visit = visit_allocative_simple)]
public_key: ed25519::Ed25519PublicKey,
},
/// secp256k1 signature.
Secp256k1 {
/// Signature of the value.
#[allocative(visit = visit_allocative_simple)]
signature: secp256k1::Secp256k1Signature,
/// Public key of the signer.
#[allocative(visit = visit_allocative_simple)]
public_key: secp256k1::Secp256k1PublicKey,
},
/// EVM secp256k1 signature.
EvmSecp256k1 {
/// Signature of the value.
#[allocative(visit = visit_allocative_simple)]
signature: secp256k1::evm::EvmSignature,
/// EVM address of the signer.
#[debug(with = "hex_debug")]
#[allocative(visit = visit_allocative_simple)]
address: [u8; 20],
},
}
impl AccountSecretKey {
/// Returns the public key corresponding to this secret key.
pub fn public(&self) -> AccountPublicKey {
match self {
AccountSecretKey::Ed25519(secret) => AccountPublicKey::Ed25519(secret.public()),
AccountSecretKey::Secp256k1(secret) => AccountPublicKey::Secp256k1(secret.public()),
AccountSecretKey::EvmSecp256k1(secret) => {
AccountPublicKey::EvmSecp256k1(secret.public())
}
}
}
/// Copies the secret key.
pub fn copy(&self) -> Self {
match self {
AccountSecretKey::Ed25519(secret) => AccountSecretKey::Ed25519(secret.copy()),
AccountSecretKey::Secp256k1(secret) => AccountSecretKey::Secp256k1(secret.copy()),
AccountSecretKey::EvmSecp256k1(secret) => AccountSecretKey::EvmSecp256k1(secret.copy()),
}
}
/// Creates a signature for the `value` using provided `secret`.
pub fn sign<'de, T>(&self, value: &T) -> AccountSignature
where
T: BcsSignable<'de>,
{
match self {
AccountSecretKey::Ed25519(secret) => {
let signature = Ed25519Signature::new(value, secret);
let public_key = secret.public();
AccountSignature::Ed25519 {
signature,
public_key,
}
}
AccountSecretKey::Secp256k1(secret) => {
let signature = secp256k1::Secp256k1Signature::new(value, secret);
let public_key = secret.public();
AccountSignature::Secp256k1 {
signature,
public_key,
}
}
AccountSecretKey::EvmSecp256k1(secret) => {
let signature = secp256k1::evm::EvmSignature::new(CryptoHash::new(value), secret);
let address: [u8; 20] = secret.address().into();
AccountSignature::EvmSecp256k1 { signature, address }
}
}
}
/// Creates a signature for the `value`.
pub fn sign_prehash(&self, value: CryptoHash) -> AccountSignature {
match self {
AccountSecretKey::Ed25519(secret) => {
let signature = Ed25519Signature::sign_prehash(secret, value);
let public_key = secret.public();
AccountSignature::Ed25519 {
signature,
public_key,
}
}
AccountSecretKey::Secp256k1(secret) => {
let signature = secp256k1::Secp256k1Signature::sign_prehash(secret, value);
let public_key = secret.public();
AccountSignature::Secp256k1 {
signature,
public_key,
}
}
AccountSecretKey::EvmSecp256k1(secret) => {
let signature = secp256k1::evm::EvmSignature::sign_prehash(secret, value);
let address: [u8; 20] = secret.address().into();
AccountSignature::EvmSecp256k1 { signature, address }
}
}
}
#[cfg(all(with_testing, with_getrandom))]
/// Generates a new key pair using the operating system's RNG.
pub fn generate() -> Self {
AccountSecretKey::Ed25519(Ed25519SecretKey::generate())
}
#[cfg(all(with_getrandom, not(feature = "revm")))]
/// Generates a new Ed25519 key pair from the given RNG. Use with care.
pub fn generate_from<R: CryptoRng>(rng: &mut R) -> Self {
AccountSecretKey::Ed25519(Ed25519SecretKey::generate_from(rng))
}
#[cfg(all(with_getrandom, feature = "revm"))]
/// Generates a new Evm Secp256k1 key pair from the given RNG. Use with care.
pub fn generate_from<R: CryptoRng>(rng: &mut R) -> Self {
AccountSecretKey::EvmSecp256k1(EvmSecretKey::generate_from(rng))
}
}
impl AccountPublicKey {
/// Returns the signature scheme of the public key.
pub fn scheme(&self) -> SignatureScheme {
match self {
AccountPublicKey::Ed25519(_) => SignatureScheme::Ed25519,
AccountPublicKey::Secp256k1(_) => SignatureScheme::Secp256k1,
AccountPublicKey::EvmSecp256k1(_) => SignatureScheme::EvmSecp256k1,
}
}
/// Returns the byte representation of the public key.
pub fn as_bytes(&self) -> Vec<u8> {
bcs::to_bytes(&self).expect("serialization to bytes should not fail")
}
/// Parses the byte representation of the public key.
///
/// Returns error if the byte slice has incorrect length or the flag is not recognized.
pub fn from_slice(bytes: &[u8]) -> Result<Self, CryptoError> {
bcs::from_bytes(bytes).map_err(CryptoError::PublicKeyParseError)
}
/// A fake public key used for testing.
#[cfg(with_testing)]
pub fn test_key(name: u8) -> Self {
AccountPublicKey::Ed25519(Ed25519PublicKey::test_key(name))
}
}
impl AccountSignature {
/// Verifies the signature for the `value` using the provided `public_key`.
pub fn verify<'de, T>(&self, value: &T) -> Result<(), CryptoError>
where
T: BcsSignable<'de> + std::fmt::Debug,
{
match self {
AccountSignature::Ed25519 {
signature,
public_key,
} => signature.check(value, *public_key),
AccountSignature::Secp256k1 {
signature,
public_key,
} => signature.check(value, *public_key),
AccountSignature::EvmSecp256k1 {
signature,
address: sender_address,
} => {
signature.check_with_recover(value, *sender_address)?;
Ok(())
}
}
}
/// Returns byte representation of the signatures.
pub fn to_bytes(&self) -> Vec<u8> {
bcs::to_bytes(&self).expect("serialization to bytes should not fail")
}
/// Parses the byte representation of the signature.
pub fn from_slice(bytes: &[u8]) -> Result<Self, CryptoError> {
bcs::from_bytes(bytes).map_err(CryptoError::SignatureParseError)
}
/// Returns the [`AccountOwner`] of the account that signed the value.
pub fn owner(&self) -> AccountOwner {
match self {
AccountSignature::Ed25519 { public_key, .. } => AccountOwner::from(*public_key),
AccountSignature::Secp256k1 { public_key, .. } => AccountOwner::from(*public_key),
AccountSignature::EvmSecp256k1 { address, .. } => AccountOwner::Address20(*address),
}
}
}
impl FromStr for AccountPublicKey {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let value = hex::decode(s)?;
AccountPublicKey::from_slice(value.as_slice())
}
}
impl Display for AccountPublicKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex::encode(self.as_bytes()))
}
}
impl TryFrom<&[u8]> for AccountSignature {
type Error = CryptoError;
fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
AccountSignature::from_slice(bytes)
}
}
/// Error type for cryptographic errors.
#[derive(Error, Debug)]
#[allow(missing_docs)]
pub enum CryptoError {
#[error("Signature for object {type_name} is not valid: {error}")]
InvalidSignature { error: String, type_name: String },
#[error("Signature from validator is missing")]
MissingValidatorSignature,
#[error(transparent)]
NonHexDigits(#[from] hex::FromHexError),
#[error(
"Byte slice has length {0} but a `CryptoHash` requires exactly {expected} bytes",
expected = FixedBytes::<32>::len_bytes(),
)]
IncorrectHashSize(usize),
#[error(
"Byte slice has length {len} but a {scheme} `PublicKey` requires exactly {expected} bytes"
)]
IncorrectPublicKeySize {
scheme: &'static str,
len: usize,
expected: usize,
},
#[error(
"byte slice has length {len} but a {scheme} `Signature` requires exactly {expected} bytes"
)]
IncorrectSignatureBytes {
scheme: &'static str,
len: usize,
expected: usize,
},
#[error("Could not parse integer: {0}")]
ParseIntError(#[from] ParseIntError),
#[error("secp256k1 error: {0}")]
Secp256k1Error(k256::ecdsa::Error),
#[error("could not parse public key: {0}: point at infinity")]
Secp256k1PointAtInfinity(String),
#[error("could not parse public key: {0}")]
PublicKeyParseError(bcs::Error),
#[error("could not parse signature: {0}")]
SignatureParseError(bcs::Error),
}
#[cfg(with_getrandom)]
/// Wrapper around [`rand::CryptoRng`] and [`rand::RngCore`].
pub trait CryptoRng: rand::CryptoRng + rand::RngCore + Send + Sync {}
#[cfg(with_getrandom)]
impl<T: rand::CryptoRng + rand::RngCore + Send + Sync> CryptoRng for T {}
#[cfg(with_getrandom)]
impl From<Option<u64>> for Box<dyn CryptoRng> {
fn from(seed: Option<u64>) -> Self {
use rand::SeedableRng;
match seed {
Some(seed) => Box::new(rand::rngs::StdRng::seed_from_u64(seed)),
None => Box::new(rand::rngs::OsRng),
}
}
}
/// Something that we know how to hash.
pub trait Hashable<Hasher> {
/// Send the content of `Self` to the given hasher.
fn write(&self, hasher: &mut Hasher);
}
/// Something that we know how to hash and sign.
pub trait HasTypeName {
/// The name of the type.
fn type_name() -> &'static str;
}
/// Activate the blanket implementation of `Hashable` based on serde and BCS.
/// * We use `serde_name` to extract a seed from the name of structs and enums.
/// * We use `BCS` to generate canonical bytes suitable for hashing.
pub trait BcsHashable<'de>: Serialize + Deserialize<'de> {}
/// Activate the blanket implementation of `Signable` based on serde and BCS.
/// * We use `serde_name` to extract a seed from the name of structs and enums.
/// * We use `BCS` to generate canonical bytes suitable for signing.
pub trait BcsSignable<'de>: Serialize + Deserialize<'de> {}
impl<'de, T: BcsSignable<'de>> BcsHashable<'de> for T {}
impl<'de, T, Hasher> Hashable<Hasher> for T
where
T: BcsHashable<'de>,
Hasher: io::Write,
{
fn write(&self, hasher: &mut Hasher) {
let name = <Self as HasTypeName>::type_name();
// Note: This assumes that names never contain the separator `::`.
write!(hasher, "{}::", name).expect("Hasher should not fail");
bcs::serialize_into(hasher, &self).expect("Message serialization should not fail");
}
}
impl<Hasher> Hashable<Hasher> for [u8]
where
Hasher: io::Write,
{
fn write(&self, hasher: &mut Hasher) {
hasher.write_all(self).expect("Hasher should not fail");
}
}
impl<'de, T> HasTypeName for T
where
T: BcsHashable<'de>,
{
fn type_name() -> &'static str {
serde_name::trace_name::<Self>().expect("Self must be a struct or an enum")
}
}
/// A BCS-signable struct for testing.
#[cfg(with_testing)]
#[derive(Debug, Serialize, Deserialize)]
pub struct TestString(pub String);
#[cfg(with_testing)]
impl TestString {
/// Creates a new `TestString` with the given string.
pub fn new(s: impl Into<String>) -> Self {
Self(s.into())
}
}
#[cfg(with_testing)]
impl BcsSignable<'_> for TestString {}
/// Reads the `bytes` as four little-endian unsigned 64-bit integers and returns them.
pub(crate) fn le_bytes_to_u64_array(bytes: &[u8]) -> [u64; 4] {
let mut integers = [0u64; 4];
integers[0] = u64::from_le_bytes(bytes[0..8].try_into().expect("incorrect indices"));
integers[1] = u64::from_le_bytes(bytes[8..16].try_into().expect("incorrect indices"));
integers[2] = u64::from_le_bytes(bytes[16..24].try_into().expect("incorrect indices"));
integers[3] = u64::from_le_bytes(bytes[24..32].try_into().expect("incorrect indices"));
integers
}
/// Reads the `bytes` as four big-endian unsigned 64-bit integers and returns them.
pub(crate) fn be_bytes_to_u64_array(bytes: &[u8]) -> [u64; 4] {
let mut integers = [0u64; 4];
integers[0] = u64::from_be_bytes(bytes[0..8].try_into().expect("incorrect indices"));
integers[1] = u64::from_be_bytes(bytes[8..16].try_into().expect("incorrect indices"));
integers[2] = u64::from_be_bytes(bytes[16..24].try_into().expect("incorrect indices"));
integers[3] = u64::from_be_bytes(bytes[24..32].try_into().expect("incorrect indices"));
integers
}
/// Returns the bytes that represent the `integers` in little-endian.
pub(crate) fn u64_array_to_le_bytes(integers: [u64; 4]) -> [u8; 32] {
let mut bytes = [0u8; 32];
bytes[0..8].copy_from_slice(&integers[0].to_le_bytes());
bytes[8..16].copy_from_slice(&integers[1].to_le_bytes());
bytes[16..24].copy_from_slice(&integers[2].to_le_bytes());
bytes[24..32].copy_from_slice(&integers[3].to_le_bytes());
bytes
}
/// Returns the bytes that represent the `integers` in big-endian.
pub fn u64_array_to_be_bytes(integers: [u64; 4]) -> [u8; 32] {
let mut bytes = [0u8; 32];
bytes[0..8].copy_from_slice(&integers[0].to_be_bytes());
bytes[8..16].copy_from_slice(&integers[1].to_be_bytes());
bytes[16..24].copy_from_slice(&integers[2].to_be_bytes());
bytes[24..32].copy_from_slice(&integers[3].to_be_bytes());
bytes
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crypto::{ed25519::Ed25519SecretKey, secp256k1::Secp256k1KeyPair};
#[test]
fn test_u64_array_to_be_bytes() {
let input = [
0x0123456789ABCDEF,
0xFEDCBA9876543210,
0x0011223344556677,
0x8899AABBCCDDEEFF,
];
let expected_output = [
0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54,
0x32, 0x10, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB,
0xCC, 0xDD, 0xEE, 0xFF,
];
let output = u64_array_to_be_bytes(input);
assert_eq!(output, expected_output);
assert_eq!(input, be_bytes_to_u64_array(&u64_array_to_be_bytes(input)));
}
#[test]
fn test_u64_array_to_le_bytes() {
let input = [
0x0123456789ABCDEF,
0xFEDCBA9876543210,
0x0011223344556677,
0x8899AABBCCDDEEFF,
];
let expected_output = [
0xEF, 0xCD, 0xAB, 0x89, 0x67, 0x45, 0x23, 0x01, 0x10, 0x32, 0x54, 0x76, 0x98, 0xBA,
0xDC, 0xFE, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00, 0xFF, 0xEE, 0xDD, 0xCC,
0xBB, 0xAA, 0x99, 0x88,
];
let output = u64_array_to_le_bytes(input);
assert_eq!(output, expected_output);
assert_eq!(input, le_bytes_to_u64_array(&u64_array_to_le_bytes(input)));
}
#[test]
fn roundtrip_account_pk_bytes_repr() {
fn roundtrip_test(secret: AccountSecretKey) {
let public = secret.public();
let bytes = public.as_bytes();
let parsed = AccountPublicKey::from_slice(&bytes).unwrap();
assert_eq!(public, parsed);
}
roundtrip_test(AccountSecretKey::Ed25519(Ed25519SecretKey::generate()));
roundtrip_test(AccountSecretKey::Secp256k1(
Secp256k1KeyPair::generate().secret_key,
));
}
#[test]
fn roundtrip_signature_bytes_repr() {
fn roundtrip_test(secret: AccountSecretKey) {
let test_string = TestString::new("test");
let signature = secret.sign(&test_string);
let bytes = signature.to_bytes();
let parsed = AccountSignature::from_slice(&bytes).unwrap();
assert_eq!(signature, parsed);
}
roundtrip_test(AccountSecretKey::Ed25519(Ed25519SecretKey::generate()));
roundtrip_test(AccountSecretKey::Secp256k1(
Secp256k1KeyPair::generate().secret_key,
));
roundtrip_test(AccountSecretKey::EvmSecp256k1(EvmSecretKey::generate()));
}
#[test]
fn roundtrip_display_from_str_pk() {
fn test(secret: AccountSecretKey) {
let public = secret.public();
let display = public.to_string();
let parsed = AccountPublicKey::from_str(&display).unwrap();
assert_eq!(public, parsed);
}
test(AccountSecretKey::Ed25519(Ed25519SecretKey::generate()));
test(AccountSecretKey::Secp256k1(
Secp256k1KeyPair::generate().secret_key,
));
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/crypto/hash.rs | linera-base/src/crypto/hash.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines hashing primitives used by the Linera protocol.
#[cfg(with_testing)]
use std::ops::RangeInclusive;
use std::{borrow::Cow, fmt, io, str::FromStr};
use allocative::{Allocative, Visitor};
#[cfg(with_testing)]
use alloy_primitives::FixedBytes;
use alloy_primitives::{Keccak256, B256};
use linera_witty::{
GuestPointer, HList, InstanceWithMemory, Layout, Memory, Runtime, RuntimeError, RuntimeMemory,
WitLoad, WitStore, WitType,
};
#[cfg(with_testing)]
use proptest::{
collection::{vec, VecStrategy},
prelude::{Arbitrary, Strategy},
strategy,
};
use serde::{Deserialize, Serialize};
use crate::{
crypto::{BcsHashable, CryptoError, Hashable},
doc_scalar,
};
/// A Keccak256 value.
#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Copy, Hash)]
#[cfg_attr(
web,
derive(tsify::Tsify),
tsify(from_wasm_abi, into_wasm_abi, type = "string")
)]
#[cfg_attr(with_testing, derive(Default))]
pub struct CryptoHash(B256);
impl Allocative for CryptoHash {
fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) {
visitor.visit_simple_sized::<Self>();
}
}
impl CryptoHash {
/// Computes a hash.
pub fn new<'de, T: BcsHashable<'de>>(value: &T) -> Self {
let mut hasher = Keccak256Ext(Keccak256::new());
value.write(&mut hasher);
CryptoHash(hasher.0.finalize())
}
/// Reads the bytes of the hash value.
pub fn as_bytes(&self) -> &B256 {
&self.0
}
/// Force the last 12 bytes of the hash to be zeroes. This is currently used for EVM compatibility
pub fn make_evm_compatible(&mut self) {
self.0[20..32].fill(0);
}
/// Returns the hash of `TestString(s)`, for testing purposes.
#[cfg(with_testing)]
pub fn test_hash(s: impl Into<String>) -> Self {
use crate::crypto::TestString;
CryptoHash::new(&TestString::new(s))
}
}
/// Temporary struct to extend `Keccak256` with `io::Write`.
struct Keccak256Ext(Keccak256);
impl io::Write for Keccak256Ext {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.update(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
/// A vector of cryptographic hashes.
/// This is used to represent a hash of a list of hashes.
#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Hash, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(Default))]
pub struct CryptoHashVec(pub Vec<CryptoHash>);
impl BcsHashable<'_> for CryptoHashVec {}
impl Serialize for CryptoHash {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&self.to_string())
} else {
serializer.serialize_newtype_struct("CryptoHash", &self.as_bytes().0)
}
}
}
impl<'de> Deserialize<'de> for CryptoHash {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = Self::from_str(&s).map_err(serde::de::Error::custom)?;
Ok(value)
} else {
#[derive(Deserialize)]
#[serde(rename = "CryptoHash")]
struct Foo([u8; 32]);
let value = Foo::deserialize(deserializer)?;
Ok(Self(value.0.into()))
}
}
}
impl FromStr for CryptoHash {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let value = hex::decode(s)?;
(value.as_slice()).try_into()
}
}
impl TryFrom<&[u8]> for CryptoHash {
type Error = CryptoError;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
if value.len() != B256::len_bytes() {
return Err(CryptoError::IncorrectHashSize(value.len()));
}
Ok(Self(B256::from_slice(value)))
}
}
impl From<CryptoHash> for [u8; 32] {
fn from(crypto_hash: CryptoHash) -> Self {
crypto_hash.0 .0
}
}
impl From<[u8; 32]> for CryptoHash {
fn from(bytes: [u8; 32]) -> Self {
CryptoHash(B256::from(bytes))
}
}
impl From<[u64; 4]> for CryptoHash {
fn from(integers: [u64; 4]) -> Self {
CryptoHash(crate::crypto::u64_array_to_be_bytes(integers).into())
}
}
impl From<CryptoHash> for [u64; 4] {
fn from(crypto_hash: CryptoHash) -> Self {
crate::crypto::be_bytes_to_u64_array(crypto_hash.0.as_ref())
}
}
impl fmt::Display for CryptoHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let prec = f.precision().unwrap_or(self.0.len() * 2);
hex::encode(&self.0[..prec.div_ceil(2)]).fmt(f)
}
}
impl fmt::Debug for CryptoHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", hex::encode(self.0))
}
}
impl WitType for CryptoHash {
const SIZE: u32 = <(u64, u64, u64, u64) as WitType>::SIZE;
type Layout = <(u64, u64, u64, u64) as WitType>::Layout;
type Dependencies = HList![];
fn wit_type_name() -> Cow<'static, str> {
"crypto-hash".into()
}
fn wit_type_declaration() -> Cow<'static, str> {
concat!(
" record crypto-hash {\n",
" part1: u64,\n",
" part2: u64,\n",
" part3: u64,\n",
" part4: u64,\n",
" }\n",
)
.into()
}
}
impl WitLoad for CryptoHash {
fn load<Instance>(
memory: &Memory<'_, Instance>,
location: GuestPointer,
) -> Result<Self, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4) = WitLoad::load(memory, location)?;
Ok(CryptoHash::from([part1, part2, part3, part4]))
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as linera_witty::Layout>::Flat,
memory: &Memory<'_, Instance>,
) -> Result<Self, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4) = WitLoad::lift_from(flat_layout, memory)?;
Ok(CryptoHash::from([part1, part2, part3, part4]))
}
}
impl WitStore for CryptoHash {
fn store<Instance>(
&self,
memory: &mut Memory<'_, Instance>,
location: GuestPointer,
) -> Result<(), RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let [part1, part2, part3, part4] = (*self).into();
(part1, part2, part3, part4).store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut Memory<'_, Instance>,
) -> Result<<Self::Layout as Layout>::Flat, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let [part1, part2, part3, part4] = (*self).into();
(part1, part2, part3, part4).lower(memory)
}
}
#[cfg(with_testing)]
impl Arbitrary for CryptoHash {
type Parameters = ();
type Strategy = strategy::Map<VecStrategy<RangeInclusive<u8>>, fn(Vec<u8>) -> CryptoHash>;
fn arbitrary_with((): Self::Parameters) -> Self::Strategy {
vec(u8::MIN..=u8::MAX, FixedBytes::<32>::len_bytes())
.prop_map(|vector| CryptoHash(B256::from_slice(&vector[..])))
}
}
doc_scalar!(CryptoHash, "A Keccak256 value");
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/crypto/ed25519.rs | linera-base/src/crypto/ed25519.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines Ed25519 signature primitives used by the Linera protocol.
use std::{borrow::Cow, fmt, str::FromStr};
use ed25519_dalek::{self as dalek, Signer, Verifier};
use linera_witty::{
GuestPointer, HList, InstanceWithMemory, Layout, Memory, Runtime, RuntimeError, RuntimeMemory,
WitLoad, WitStore, WitType,
};
use serde::{Deserialize, Serialize};
use super::{
le_bytes_to_u64_array, u64_array_to_le_bytes, BcsHashable, BcsSignable, CryptoError,
CryptoHash, HasTypeName, Hashable,
};
use crate::doc_scalar;
/// The label for the Ed25519 scheme.
const ED25519_SCHEME_LABEL: &str = "Ed25519";
/// An Ed25519 secret key.
pub struct Ed25519SecretKey(pub(crate) dalek::SigningKey);
/// An Ed25519 signature public key.
#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash)]
pub struct Ed25519PublicKey(pub [u8; dalek::PUBLIC_KEY_LENGTH]);
/// An Ed25519 signature.
#[derive(Eq, PartialEq, Copy, Clone)]
pub struct Ed25519Signature(pub dalek::Signature);
impl Ed25519SecretKey {
#[cfg(all(with_getrandom, with_testing))]
/// Generates a new key pair using the operating system's RNG.
///
/// If you want control over the RNG, use [`Ed25519SecretKey::generate_from`].
pub fn generate() -> Self {
let mut rng = rand::rngs::OsRng;
Self::generate_from(&mut rng)
}
#[cfg(with_getrandom)]
/// Generates a new key pair from the given RNG. Use with care.
pub fn generate_from<R: super::CryptoRng>(rng: &mut R) -> Self {
let keypair = dalek::SigningKey::generate(rng);
Ed25519SecretKey(keypair)
}
/// Obtains the public key of a key pair.
pub fn public(&self) -> Ed25519PublicKey {
Ed25519PublicKey(self.0.verifying_key().to_bytes())
}
/// Copies the key pair, **including the secret key**.
///
/// The `Clone` and `Copy` traits are deliberately not implemented for `KeyPair` to prevent
/// accidental copies of secret keys.
pub fn copy(&self) -> Ed25519SecretKey {
Ed25519SecretKey(self.0.clone())
}
}
impl Ed25519PublicKey {
/// A fake public key used for testing.
#[cfg(with_testing)]
pub fn test_key(name: u8) -> Ed25519PublicKey {
let addr = [name; dalek::PUBLIC_KEY_LENGTH];
Ed25519PublicKey(addr)
}
/// Returns bytes of the public key.
pub fn as_bytes(&self) -> Vec<u8> {
self.0.to_vec()
}
/// Parses bytes to a public key.
///
/// Returns error if input bytes are not of the correct length.
pub fn from_slice(bytes: &[u8]) -> Result<Self, CryptoError> {
let key = bytes
.try_into()
.map_err(|_| CryptoError::IncorrectPublicKeySize {
scheme: ED25519_SCHEME_LABEL,
len: bytes.len(),
expected: dalek::PUBLIC_KEY_LENGTH,
})?;
Ok(Ed25519PublicKey(key))
}
}
impl Serialize for Ed25519PublicKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&hex::encode(self.as_bytes()))
} else {
serializer.serialize_newtype_struct("Ed25519PublicKey", &self.0)
}
}
}
impl<'de> Deserialize<'de> for Ed25519PublicKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = hex::decode(s).map_err(serde::de::Error::custom)?;
Ok(Self::from_slice(&value).map_err(serde::de::Error::custom)?)
} else {
#[derive(Deserialize)]
#[serde(rename = "Ed25519PublicKey")]
struct PublicKey([u8; dalek::PUBLIC_KEY_LENGTH]);
let value = PublicKey::deserialize(deserializer)?;
Ok(Self(value.0))
}
}
}
impl Serialize for Ed25519SecretKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// This is only used for JSON configuration.
assert!(serializer.is_human_readable());
serializer.serialize_str(&hex::encode(self.0.to_bytes()))
}
}
impl<'de> Deserialize<'de> for Ed25519SecretKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
// This is only used for JSON configuration.
assert!(deserializer.is_human_readable());
let s = String::deserialize(deserializer)?;
let value = hex::decode(s).map_err(serde::de::Error::custom)?;
let key =
dalek::SigningKey::from_bytes(value[..].try_into().map_err(serde::de::Error::custom)?);
Ok(Ed25519SecretKey(key))
}
}
impl FromStr for Ed25519PublicKey {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let value = hex::decode(s)?;
(value.as_slice()).try_into()
}
}
impl TryFrom<&[u8]> for Ed25519PublicKey {
type Error = CryptoError;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
if value.len() != dalek::PUBLIC_KEY_LENGTH {
return Err(CryptoError::IncorrectPublicKeySize {
scheme: ED25519_SCHEME_LABEL,
len: value.len(),
expected: dalek::PUBLIC_KEY_LENGTH,
});
}
let mut pubkey = [0u8; dalek::PUBLIC_KEY_LENGTH];
pubkey.copy_from_slice(value);
Ok(Ed25519PublicKey(pubkey))
}
}
impl From<[u64; 4]> for Ed25519PublicKey {
fn from(integers: [u64; 4]) -> Self {
Ed25519PublicKey(u64_array_to_le_bytes(integers))
}
}
impl From<Ed25519PublicKey> for [u64; 4] {
fn from(pub_key: Ed25519PublicKey) -> Self {
le_bytes_to_u64_array(&pub_key.0)
}
}
impl fmt::Display for Ed25519PublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", hex::encode(&self.0[..]))
}
}
impl fmt::Debug for Ed25519PublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", hex::encode(&self.0[..8]))
}
}
impl WitType for Ed25519PublicKey {
const SIZE: u32 = <(u64, u64, u64, u64) as WitType>::SIZE;
type Layout = <(u64, u64, u64, u64) as WitType>::Layout;
type Dependencies = HList![];
fn wit_type_name() -> Cow<'static, str> {
"ed25519-public-key".into()
}
fn wit_type_declaration() -> Cow<'static, str> {
concat!(
" record ed25519-public-key {\n",
" part1: u64,\n",
" part2: u64,\n",
" part3: u64,\n",
" part4: u64,\n",
" }\n",
)
.into()
}
}
impl WitLoad for Ed25519PublicKey {
fn load<Instance>(
memory: &Memory<'_, Instance>,
location: GuestPointer,
) -> Result<Self, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4) = WitLoad::load(memory, location)?;
Ok(Self::from([part1, part2, part3, part4]))
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as Layout>::Flat,
memory: &Memory<'_, Instance>,
) -> Result<Self, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4) = WitLoad::lift_from(flat_layout, memory)?;
Ok(Self::from([part1, part2, part3, part4]))
}
}
impl WitStore for Ed25519PublicKey {
fn store<Instance>(
&self,
memory: &mut Memory<'_, Instance>,
location: GuestPointer,
) -> Result<(), RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let [part1, part2, part3, part4] = (*self).into();
(part1, part2, part3, part4).store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut Memory<'_, Instance>,
) -> Result<<Self::Layout as Layout>::Flat, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let [part1, part2, part3, part4] = (*self).into();
(part1, part2, part3, part4).lower(memory)
}
}
impl BcsHashable<'_> for Ed25519PublicKey {}
impl Ed25519Signature {
/// Computes a signature.
pub fn new<'de, T>(value: &T, secret: &Ed25519SecretKey) -> Self
where
T: BcsSignable<'de>,
{
Self::sign_prehash(secret, CryptoHash::new(value))
}
/// Computes a signature from a prehash.
pub fn sign_prehash(secret: &Ed25519SecretKey, prehash: CryptoHash) -> Self {
let signature = secret.0.sign(&prehash.as_bytes().0);
Ed25519Signature(signature)
}
/// Parses bytes to a signature.
///
/// Returns error if input slice is not 64 bytes.
pub fn from_slice(bytes: &[u8]) -> Result<Self, CryptoError> {
let sig = dalek::Signature::from_slice(bytes).map_err(|_| {
CryptoError::IncorrectSignatureBytes {
scheme: ED25519_SCHEME_LABEL,
len: bytes.len(),
expected: dalek::SIGNATURE_LENGTH,
}
})?;
Ok(Ed25519Signature(sig))
}
fn check_internal<'de, T>(
&self,
value: &T,
author: Ed25519PublicKey,
) -> Result<(), dalek::SignatureError>
where
T: BcsSignable<'de>,
{
let prehash = CryptoHash::new(value).as_bytes().0;
let public_key = dalek::VerifyingKey::from_bytes(&author.0)?;
public_key.verify(&prehash, &self.0)
}
/// Checks a signature.
pub fn check<'de, T>(&self, value: &T, author: Ed25519PublicKey) -> Result<(), CryptoError>
where
T: BcsSignable<'de> + fmt::Debug,
{
self.check_internal(value, author)
.map_err(|error| CryptoError::InvalidSignature {
error: error.to_string(),
type_name: T::type_name().to_string(),
})
}
fn verify_batch_internal<'a, 'de, T, I>(
value: &'a T,
votes: I,
) -> Result<(), dalek::SignatureError>
where
T: BcsSignable<'de>,
I: IntoIterator<Item = (&'a Ed25519PublicKey, &'a Ed25519Signature)>,
{
let mut msg = Vec::new();
value.write(&mut msg);
let mut messages = Vec::new();
let mut signatures = Vec::new();
let mut public_keys = Vec::new();
for (addr, sig) in votes {
messages.push(msg.as_slice());
signatures.push(sig.0);
public_keys.push(dalek::VerifyingKey::from_bytes(&addr.0)?);
}
dalek::verify_batch(&messages[..], &signatures[..], &public_keys[..])
}
/// Verifies a batch of signatures.
// NOTE: This is unused now since we don't use ed25519 in consensus layer.
#[allow(unused)]
pub fn verify_batch<'a, 'de, T, I>(value: &'a T, votes: I) -> Result<(), CryptoError>
where
T: BcsSignable<'de>,
I: IntoIterator<Item = (&'a Ed25519PublicKey, &'a Ed25519Signature)>,
{
Ed25519Signature::verify_batch_internal(value, votes).map_err(|error| {
CryptoError::InvalidSignature {
error: format!("batched {}", error),
type_name: T::type_name().to_string(),
}
})
}
/// Returns bytes of the signature.
pub fn as_bytes(&self) -> Vec<u8> {
self.0.to_bytes().to_vec()
}
}
impl Serialize for Ed25519Signature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&hex::encode(self.0.to_bytes()))
} else {
serializer.serialize_newtype_struct("Ed25519Signature", &self.0)
}
}
}
impl<'de> Deserialize<'de> for Ed25519Signature {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = hex::decode(s).map_err(serde::de::Error::custom)?;
Self::from_slice(&value).map_err(serde::de::Error::custom)
} else {
#[derive(Deserialize)]
#[serde(rename = "Ed25519Signature")]
struct Signature(dalek::Signature);
let value = Signature::deserialize(deserializer)?;
Ok(Self(value.0))
}
}
}
impl fmt::Display for Ed25519Signature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = hex::encode(self.0.to_bytes());
write!(f, "{}", s)
}
}
impl fmt::Debug for Ed25519Signature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", hex::encode(&self.0.to_bytes()[0..8]))
}
}
doc_scalar!(Ed25519Signature, "An Ed25519 signature value");
doc_scalar!(Ed25519PublicKey, "A Ed25519 signature public key");
#[cfg(with_testing)]
mod tests {
#[test]
fn test_signatures() {
use serde::{Deserialize, Serialize};
use crate::crypto::{
ed25519::{Ed25519SecretKey, Ed25519Signature},
BcsSignable, TestString,
};
#[derive(Debug, Serialize, Deserialize)]
struct Foo(String);
impl BcsSignable<'_> for Foo {}
let key1 = Ed25519SecretKey::generate();
let addr1 = key1.public();
let key2 = Ed25519SecretKey::generate();
let addr2 = key2.public();
let ts = TestString("hello".into());
let tsx = TestString("hellox".into());
let foo = Foo("hello".into());
let s = Ed25519Signature::new(&ts, &key1);
assert!(s.check(&ts, addr1).is_ok());
assert!(s.check(&ts, addr2).is_err());
assert!(s.check(&tsx, addr1).is_err());
assert!(s.check(&foo, addr1).is_err());
}
#[test]
fn test_public_key_serialization() {
use crate::crypto::ed25519::Ed25519PublicKey;
let key_in = Ed25519PublicKey::test_key(0);
let s = serde_json::to_string(&key_in).unwrap();
let key_out: Ed25519PublicKey = serde_json::from_str(&s).unwrap();
assert_eq!(key_out, key_in);
let s = bcs::to_bytes(&key_in).unwrap();
let key_out: Ed25519PublicKey = bcs::from_bytes(&s).unwrap();
assert_eq!(key_out, key_in);
}
#[test]
fn test_secret_key_serialization() {
use crate::crypto::ed25519::Ed25519SecretKey;
let key_in = Ed25519SecretKey::generate();
let s = serde_json::to_string(&key_in).unwrap();
let key_out: Ed25519SecretKey = serde_json::from_str(&s).unwrap();
assert_eq!(key_out.0.to_bytes(), key_in.0.to_bytes());
}
#[test]
fn test_signature_serialization() {
use crate::crypto::ed25519::Ed25519Signature;
let sig_in = Ed25519Signature::from_slice(&[0u8; 64]).unwrap();
let s = serde_json::to_string(&sig_in).unwrap();
let sig_out: Ed25519Signature = serde_json::from_str(&s).unwrap();
assert_eq!(sig_out.0.to_bytes(), sig_in.0.to_bytes());
let s = bcs::to_bytes(&sig_in).unwrap();
let sig_out: Ed25519Signature = bcs::from_bytes(&s).unwrap();
assert_eq!(sig_out, sig_in);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/crypto/secp256k1/evm.rs | linera-base/src/crypto/secp256k1/evm.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines EIP-191 compatible signature primitives used by the Linera protocol.
use std::{
borrow::Cow,
fmt,
hash::{Hash, Hasher},
str::FromStr,
};
use alloy_primitives::{eip191_hash_message, Signature};
use k256::{
ecdsa::{SigningKey, VerifyingKey},
elliptic_curve::sec1::FromEncodedPoint,
EncodedPoint,
};
use linera_witty::{
GuestPointer, HList, InstanceWithMemory, Layout, Memory, Runtime, RuntimeError, RuntimeMemory,
WitLoad, WitStore, WitType,
};
use serde::{Deserialize, Serialize};
use super::{BcsHashable, BcsSignable, CryptoError, CryptoHash, HasTypeName};
use crate::doc_scalar;
/// Name of the secp256k1 scheme.
const EVM_SECP256K1_SCHEME_LABEL: &str = "evm_secp256k1";
/// Length of secp256k1 compressed public key.
const EVM_SECP256K1_PUBLIC_KEY_SIZE: usize = 33;
/// Length of secp256k1 signature.
const EVM_SECP256K1_SIGNATURE_SIZE: usize = 65;
/// A secp256k1 secret key.
pub struct EvmSecretKey(pub SigningKey);
impl Eq for EvmSecretKey {}
impl PartialEq for EvmSecretKey {
fn eq(&self, other: &Self) -> bool {
self.0.to_bytes() == other.0.to_bytes()
}
}
/// A secp256k1 public key.
#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
pub struct EvmPublicKey(pub VerifyingKey);
impl Hash for EvmPublicKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.to_encoded_point(true).as_bytes().hash(state);
}
}
/// Secp256k1 public/secret key pair.
#[derive(Debug, PartialEq, Eq)]
pub struct EvmKeyPair {
/// Secret key.
pub secret_key: EvmSecretKey,
/// Public key.
pub public_key: EvmPublicKey,
}
/// A secp256k1 signature.
#[derive(Eq, PartialEq, Copy, Clone)]
pub struct EvmSignature(pub Signature);
impl FromStr for EvmSignature {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// If the string starts with "0x", we remove it before decoding.
let bytes = hex::decode(s.strip_prefix("0x").unwrap_or(s))?;
Self::from_slice(&bytes)
}
}
impl EvmPublicKey {
/// A fake public key used for testing.
#[cfg(with_testing)]
pub fn test_key(seed: u8) -> Self {
use rand::SeedableRng;
let mut rng = rand::rngs::StdRng::seed_from_u64(seed as u64);
let sk = k256::SecretKey::random(&mut rng);
Self(sk.public_key().into())
}
/// Returns the bytes of the public key in compressed representation.
pub fn as_bytes(&self) -> [u8; EVM_SECP256K1_PUBLIC_KEY_SIZE] {
// UNWRAP: We already have valid key so conversion should not fail.
self.0.to_encoded_point(true).as_bytes().try_into().unwrap()
}
/// Decodes the bytes into the public key.
/// Expects the bytes to be of compressed representation.
///
/// Panics if the encoding can't be done in a constant time.
pub fn from_bytes(bytes: &[u8]) -> Result<Self, CryptoError> {
let encoded_point =
EncodedPoint::from_bytes(bytes).map_err(|_| CryptoError::IncorrectPublicKeySize {
scheme: EVM_SECP256K1_SCHEME_LABEL,
len: bytes.len(),
expected: EVM_SECP256K1_PUBLIC_KEY_SIZE,
})?;
match k256::PublicKey::from_encoded_point(&encoded_point).into_option() {
Some(public_key) => Ok(Self(public_key.into())),
None => {
let error = CryptoError::Secp256k1PointAtInfinity(hex::encode(bytes));
Err(error)
}
}
}
/// Returns an EVM address for the public key.
pub fn address(&self) -> alloy_primitives::Address {
alloy_primitives::Address::from_public_key(&self.0)
}
/// Recovers the public key from the signature and the value.
///
/// This function turns the `value` into a `CryptoHash`, then hashes it using EIP-191
/// and finally recovers the public key from the signature.
pub fn recover_from_msg<'de, T>(
signature: &EvmSignature,
value: &T,
) -> Result<Self, CryptoError>
where
T: BcsSignable<'de>,
{
let message = CryptoHash::new(value).as_bytes().0;
let public_key =
signature
.0
.recover_from_msg(message)
.map_err(|_| CryptoError::InvalidSignature {
error: "Failed to recover public key from signature".to_string(),
type_name: Self::type_name().to_string(),
})?;
Ok(EvmPublicKey(public_key))
}
}
impl fmt::Debug for EvmSecretKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "<redacted for secp256k1 secret key>")
}
}
impl Serialize for EvmSecretKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// This is only used for JSON configuration.
assert!(serializer.is_human_readable());
serializer.serialize_str(&hex::encode(self.0.to_bytes()))
}
}
impl<'de> Deserialize<'de> for EvmSecretKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
// This is only used for JSON configuration.
assert!(deserializer.is_human_readable());
let str = String::deserialize(deserializer)?;
let bytes = hex::decode(&str).map_err(serde::de::Error::custom)?;
let sk = SigningKey::from_slice(&bytes).map_err(serde::de::Error::custom)?;
Ok(EvmSecretKey(sk))
}
}
#[cfg(with_testing)]
impl FromStr for EvmSecretKey {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = hex::decode(s)?;
let sk = SigningKey::from_slice(&bytes).expect("Failed to create secret key");
Ok(EvmSecretKey(sk))
}
}
impl Serialize for EvmPublicKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&hex::encode(self.as_bytes()))
} else {
let compact_pk = serde_utils::CompressedPublicKey(self.as_bytes());
serializer.serialize_newtype_struct("EvmPublicKey", &compact_pk)
}
}
}
impl<'de> Deserialize<'de> for EvmPublicKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = hex::decode(s).map_err(serde::de::Error::custom)?;
Ok(EvmPublicKey::from_bytes(&value).map_err(serde::de::Error::custom)?)
} else {
#[derive(Deserialize)]
#[serde(rename = "EvmPublicKey")]
struct PublicKey(serde_utils::CompressedPublicKey);
let compact = PublicKey::deserialize(deserializer)?;
Ok(EvmPublicKey::from_bytes(&compact.0 .0).map_err(serde::de::Error::custom)?)
}
}
}
impl FromStr for EvmPublicKey {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
hex::decode(s.strip_prefix("0x").unwrap_or(s))?
.as_slice()
.try_into()
}
}
impl TryFrom<&[u8]> for EvmPublicKey {
type Error = CryptoError;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
Self::from_bytes(value)
}
}
impl fmt::Display for EvmPublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str = hex::encode(self.as_bytes());
write!(f, "{}", str)
}
}
impl fmt::Debug for EvmPublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}..", hex::encode(&self.as_bytes()[0..9]))
}
}
impl BcsHashable<'_> for EvmPublicKey {}
impl WitType for EvmPublicKey {
const SIZE: u32 = <(u64, u64, u64, u64, u8) as WitType>::SIZE;
type Layout = <(u64, u64, u64, u64, u8) as WitType>::Layout;
type Dependencies = HList![];
fn wit_type_name() -> Cow<'static, str> {
"evm-secp256k1-public-key".into()
}
fn wit_type_declaration() -> Cow<'static, str> {
concat!(
" record evm-secp256k1-public-key {\n",
" part1: u64,\n",
" part2: u64,\n",
" part3: u64,\n",
" part4: u64,\n",
" part5: u8\n",
" }\n",
)
.into()
}
}
impl WitLoad for EvmPublicKey {
fn load<Instance>(
memory: &Memory<'_, Instance>,
location: GuestPointer,
) -> Result<Self, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4, part5) = WitLoad::load(memory, location)?;
Ok(Self::from((part1, part2, part3, part4, part5)))
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as Layout>::Flat,
memory: &Memory<'_, Instance>,
) -> Result<Self, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4, part5) = WitLoad::lift_from(flat_layout, memory)?;
Ok(Self::from((part1, part2, part3, part4, part5)))
}
}
impl WitStore for EvmPublicKey {
fn store<Instance>(
&self,
memory: &mut Memory<'_, Instance>,
location: GuestPointer,
) -> Result<(), RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4, part5) = (*self).into();
(part1, part2, part3, part4, part5).store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut Memory<'_, Instance>,
) -> Result<<Self::Layout as Layout>::Flat, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4, part5) = (*self).into();
(part1, part2, part3, part4, part5).lower(memory)
}
}
impl From<(u64, u64, u64, u64, u8)> for EvmPublicKey {
fn from((part1, part2, part3, part4, part5): (u64, u64, u64, u64, u8)) -> Self {
let mut bytes = [0u8; EVM_SECP256K1_PUBLIC_KEY_SIZE];
bytes[0..8].copy_from_slice(&part1.to_be_bytes());
bytes[8..16].copy_from_slice(&part2.to_be_bytes());
bytes[16..24].copy_from_slice(&part3.to_be_bytes());
bytes[24..32].copy_from_slice(&part4.to_be_bytes());
bytes[32] = part5;
Self::from_bytes(&bytes).unwrap()
}
}
impl From<EvmPublicKey> for (u64, u64, u64, u64, u8) {
fn from(key: EvmPublicKey) -> Self {
let bytes = key.as_bytes();
let part1 = u64::from_be_bytes(bytes[0..8].try_into().unwrap());
let part2 = u64::from_be_bytes(bytes[8..16].try_into().unwrap());
let part3 = u64::from_be_bytes(bytes[16..24].try_into().unwrap());
let part4 = u64::from_be_bytes(bytes[24..32].try_into().unwrap());
let part5 = bytes[32];
(part1, part2, part3, part4, part5)
}
}
impl EvmKeyPair {
/// Generates a new key pair.
#[cfg(all(with_getrandom, with_testing))]
pub fn generate() -> Self {
let mut rng = rand::rngs::OsRng;
Self::generate_from(&mut rng)
}
/// Generates a new key pair from the given RNG. Use with care.
#[cfg(with_getrandom)]
pub fn generate_from<R: crate::crypto::CryptoRng>(rng: &mut R) -> Self {
let secret_key = EvmSecretKey(SigningKey::random(rng));
let public_key = secret_key.public();
EvmKeyPair {
secret_key,
public_key,
}
}
}
impl EvmSecretKey {
/// Returns a public key for the given secret key.
pub fn public(&self) -> EvmPublicKey {
EvmPublicKey(*self.0.verifying_key())
}
/// Copies the key pair, **including the secret key**.
///
/// The `Clone` and `Copy` traits are deliberately not implemented for `EvmSecretKey` to prevent
/// accidental copies of secret keys.
pub fn copy(&self) -> Self {
Self(self.0.clone())
}
/// Generates a new key pair.
#[cfg(all(with_getrandom, with_testing))]
pub fn generate() -> Self {
let mut rng = rand::rngs::OsRng;
Self::generate_from(&mut rng)
}
/// Generates a new key pair from the given RNG. Use with care.
#[cfg(with_getrandom)]
pub fn generate_from<R: crate::crypto::CryptoRng>(rng: &mut R) -> Self {
EvmSecretKey(SigningKey::random(rng))
}
/// Returns an EVM address for the public key.
pub fn address(&self) -> alloy_primitives::Address {
alloy_primitives::Address::from_private_key(&self.0)
}
}
impl EvmSignature {
/// Computes a secp256k1 signature for `prehash` using the given `secret`.
pub fn new(prehash: CryptoHash, secret: &EvmSecretKey) -> Self {
Self::sign_prehash(secret, prehash)
}
/// Computes a signature from a prehash.
pub fn sign_prehash(secret: &EvmSecretKey, prehash: CryptoHash) -> Self {
let message = eip191_hash_message(prehash.as_bytes().0).0;
let (signature, rid) = secret
.0
.sign_prehash_recoverable(&message)
.expect("Failed to sign prehashed data"); // NOTE: This is a critical error we don't control.
EvmSignature((signature, rid).into())
}
/// Checks a signature.
pub fn check<'de, T>(&self, value: &T, author: EvmPublicKey) -> Result<(), CryptoError>
where
T: BcsSignable<'de> + fmt::Debug,
{
let prehash = CryptoHash::new(value).as_bytes().0;
self.verify_inner::<T>(prehash, author)
}
/// Checks a signature against a recovered public key.
pub fn check_with_recover<'de, T>(
&self,
value: &T,
sender_address: [u8; 20],
) -> Result<EvmPublicKey, CryptoError>
where
T: BcsSignable<'de> + fmt::Debug,
{
let msg = CryptoHash::new(value).as_bytes().0;
let recovered_public_key = match self.0.recover_from_msg(msg) {
Ok(public_key) => EvmPublicKey(public_key),
Err(_) => {
return Err(CryptoError::InvalidSignature {
error: "Failed to recover public key from signature".to_string(),
type_name: T::type_name().to_string(),
});
}
};
if recovered_public_key.address() != alloy_primitives::Address::new(sender_address) {
return Err(CryptoError::InvalidSignature {
error: "Recovered public key does not match sender address".to_string(),
type_name: T::type_name().to_string(),
});
}
Ok(recovered_public_key)
}
/// Verifies a batch of signatures.
///
/// Returns an error on first failed signature.
pub fn verify_batch<'a, 'de, T, I>(value: &'a T, votes: I) -> Result<(), CryptoError>
where
T: BcsSignable<'de> + fmt::Debug,
I: IntoIterator<Item = &'a (EvmPublicKey, EvmSignature)>,
{
let prehash = CryptoHash::new(value).as_bytes().0;
for (author, signature) in votes {
signature.verify_inner::<T>(prehash, *author)?;
}
Ok(())
}
/// Returns the byte representation of the signature.
pub fn as_bytes(&self) -> [u8; EVM_SECP256K1_SIGNATURE_SIZE] {
self.0.as_bytes()
}
fn verify_inner<'de, T>(
&self,
prehash: [u8; 32],
author: EvmPublicKey,
) -> Result<(), CryptoError>
where
T: BcsSignable<'de> + fmt::Debug,
{
use k256::ecdsa::signature::hazmat::PrehashVerifier;
let message_hash = eip191_hash_message(prehash).0;
author
.0
.verify_prehash(
&message_hash,
&self.0.to_k256().map_err(CryptoError::Secp256k1Error)?,
)
.map_err(|error| CryptoError::InvalidSignature {
error: error.to_string(),
type_name: T::type_name().to_string(),
})
}
/// Creates a signature from the bytes.
/// Expects the signature to be serialized in raw-bytes form.
pub fn from_slice<A: AsRef<[u8]>>(bytes: A) -> Result<Self, CryptoError> {
let bytes = bytes.as_ref();
let sig = alloy_primitives::Signature::from_raw(bytes).map_err(|_| {
CryptoError::IncorrectSignatureBytes {
scheme: EVM_SECP256K1_SCHEME_LABEL,
len: bytes.len(),
expected: EVM_SECP256K1_SIGNATURE_SIZE,
}
})?;
Ok(EvmSignature(sig))
}
}
impl Serialize for EvmSignature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&hex::encode(self.as_bytes()))
} else {
let compact = serde_utils::CompactSignature(self.as_bytes());
serializer.serialize_newtype_struct("EvmSignature", &compact)
}
}
}
impl<'de> Deserialize<'de> for EvmSignature {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = hex::decode(s).map_err(serde::de::Error::custom)?;
Self::from_slice(&value).map_err(serde::de::Error::custom)
} else {
#[derive(Deserialize)]
#[serde(rename = "EvmSignature")]
struct Signature(serde_utils::CompactSignature);
let value = Signature::deserialize(deserializer)?;
Self::from_slice(value.0 .0.as_ref()).map_err(serde::de::Error::custom)
}
}
}
impl fmt::Display for EvmSignature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = hex::encode(self.as_bytes());
write!(f, "{}", s)
}
}
impl fmt::Debug for EvmSignature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}..", hex::encode(&self.as_bytes()[0..9]))
}
}
doc_scalar!(EvmSignature, "A secp256k1 signature value");
doc_scalar!(EvmPublicKey, "A secp256k1 public key value");
mod serde_utils {
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use super::{EVM_SECP256K1_PUBLIC_KEY_SIZE, EVM_SECP256K1_SIGNATURE_SIZE};
/// Wrapper around compact signature serialization
/// so that we can implement custom serializer for it that uses fixed length.
// Serde treats arrays larger than 32 as variable length arrays, and adds the length as a prefix.
// Since we want a fixed size representation, we wrap it in this helper struct and use serde_as.
#[serde_as]
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct CompactSignature(#[serde_as(as = "[_; 65]")] pub [u8; EVM_SECP256K1_SIGNATURE_SIZE]);
#[serde_as]
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct CompressedPublicKey(
#[serde_as(as = "[_; 33]")] pub [u8; EVM_SECP256K1_PUBLIC_KEY_SIZE],
);
}
#[cfg(with_testing)]
mod tests {
#[test]
fn eip191_compatibility() {
use std::str::FromStr;
use crate::crypto::{CryptoHash, EvmSecretKey, EvmSignature};
// Generated in MetaMask.
let secret_key = "f77a21701522a03b01c111ad2d2cdaf2b8403b47507ee0aec3c2e52b765d7a66";
let signer = EvmSecretKey::from_str(secret_key).unwrap();
let crypto_hash = CryptoHash::from_str(
"c520e2b24b05e70c39c36d4aa98e9129ac0079ea002d4c382e6996ea11946d1e",
)
.unwrap();
let signature = EvmSignature::new(crypto_hash, &signer);
let js_signature = EvmSignature::from_str("0xe257048813b851f812ba6e508e972d8bb09504824692b027ca95d31301dbe8c7103a2f35ce9950d031d260f412dcba09c24027288872a67abe261c0a3e55c9121b").unwrap();
assert_eq!(signature, js_signature);
}
#[test]
fn test_signatures() {
use serde::{Deserialize, Serialize};
use crate::crypto::{
secp256k1::evm::{EvmKeyPair, EvmSignature},
BcsSignable, CryptoHash, TestString,
};
#[derive(Debug, Serialize, Deserialize)]
struct Foo(String);
impl BcsSignable<'_> for Foo {}
let keypair1 = EvmKeyPair::generate();
let keypair2 = EvmKeyPair::generate();
let ts = TestString("hello".into());
let ts_cryptohash = CryptoHash::new(&ts);
let tsx = TestString("hellox".into());
let foo = Foo("hello".into());
let s = EvmSignature::new(ts_cryptohash, &keypair1.secret_key);
assert!(s.check(&ts, keypair1.public_key).is_ok());
assert!(s.check(&ts, keypair2.public_key).is_err());
assert!(s.check(&tsx, keypair1.public_key).is_err());
assert!(s.check(&foo, keypair1.public_key).is_err());
}
#[test]
fn test_public_key_serialization() {
use crate::crypto::secp256k1::evm::EvmPublicKey;
let key_in = EvmPublicKey::test_key(0);
let s = serde_json::to_string(&key_in).unwrap();
let key_out: EvmPublicKey = serde_json::from_str(&s).unwrap();
assert_eq!(key_out, key_in);
let s = bcs::to_bytes(&key_in).unwrap();
let key_out: EvmPublicKey = bcs::from_bytes(&s).unwrap();
assert_eq!(key_out, key_in);
}
#[test]
fn test_secret_key_serialization() {
use crate::crypto::secp256k1::evm::{EvmKeyPair, EvmSecretKey};
let key_in = EvmKeyPair::generate().secret_key;
let s = serde_json::to_string(&key_in).unwrap();
let key_out: EvmSecretKey = serde_json::from_str(&s).unwrap();
assert_eq!(key_out, key_in);
}
#[test]
fn test_signature_serialization() {
use crate::crypto::{
secp256k1::evm::{EvmKeyPair, EvmSignature},
CryptoHash, TestString,
};
let keypair = EvmKeyPair::generate();
let prehash = CryptoHash::new(&TestString("hello".into()));
let sig = EvmSignature::new(prehash, &keypair.secret_key);
let s = serde_json::to_string(&sig).unwrap();
let sig2: EvmSignature = serde_json::from_str(&s).unwrap();
assert_eq!(sig, sig2);
let s = bcs::to_bytes(&sig).unwrap();
let sig2: EvmSignature = bcs::from_bytes(&s).unwrap();
assert_eq!(sig, sig2);
}
#[test]
fn public_key_from_str() {
use std::str::FromStr;
use crate::crypto::secp256k1::evm::EvmPublicKey;
let key = EvmPublicKey::test_key(0);
let s = key.to_string();
let key2 = EvmPublicKey::from_str(s.as_str()).unwrap();
assert_eq!(key, key2);
}
#[test]
fn bytes_repr_compact_public_key() {
use crate::crypto::secp256k1::evm::{EvmPublicKey, EVM_SECP256K1_PUBLIC_KEY_SIZE};
let key_in: EvmPublicKey = EvmPublicKey::test_key(0);
let bytes = key_in.as_bytes();
assert!(
bytes.len() == EVM_SECP256K1_PUBLIC_KEY_SIZE,
"::to_bytes() should return compressed representation"
);
let key_out = EvmPublicKey::from_bytes(&bytes).unwrap();
assert_eq!(key_in, key_out);
}
#[test]
fn human_readable_ser() {
use crate::crypto::{
secp256k1::evm::{EvmKeyPair, EvmSignature},
CryptoHash, TestString,
};
let key_pair = EvmKeyPair::generate();
let prehash = CryptoHash::new(&TestString("hello".into()));
let sig = EvmSignature::new(prehash, &key_pair.secret_key);
let s = serde_json::to_string(&sig).unwrap();
let sig2: EvmSignature = serde_json::from_str(&s).unwrap();
assert_eq!(sig, sig2);
}
#[test]
fn public_key_recovery() {
use crate::crypto::{
secp256k1::evm::{EvmKeyPair, EvmPublicKey, EvmSignature},
CryptoHash, TestString,
};
let key_pair = EvmKeyPair::generate();
let address = key_pair.public_key.address();
let msg = TestString("hello".into());
let prehash = CryptoHash::new(&msg);
let sig = EvmSignature::new(prehash, &key_pair.secret_key);
sig.check_with_recover(&msg, address.0 .0).unwrap();
let public_key = EvmPublicKey::recover_from_msg(&sig, &msg).unwrap();
assert_eq!(public_key, key_pair.public_key);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/src/crypto/secp256k1/mod.rs | linera-base/src/crypto/secp256k1/mod.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Defines secp256k1 signature primitives used by the Linera protocol.
pub mod evm;
use std::{
borrow::Cow,
fmt,
hash::{Hash, Hasher},
str::FromStr,
};
use allocative::{Allocative, Visitor};
use k256::{
ecdsa::{Signature, SigningKey, VerifyingKey},
elliptic_curve::sec1::FromEncodedPoint,
EncodedPoint,
};
use linera_witty::{
GuestPointer, HList, InstanceWithMemory, Layout, Memory, Runtime, RuntimeError, RuntimeMemory,
WitLoad, WitStore, WitType,
};
use serde::{Deserialize, Serialize};
use super::{BcsHashable, BcsSignable, CryptoError, CryptoHash, HasTypeName};
use crate::doc_scalar;
/// Name of the secp256k1 scheme.
const SECP256K1_SCHEME_LABEL: &str = "secp256k1";
/// Length of secp256k1 compressed public key.
const SECP256K1_PUBLIC_KEY_SIZE: usize = 33;
/// Length of secp256k1 signature.
const SECP256K1_SIGNATURE_SIZE: usize = 64;
/// A secp256k1 secret key.
#[derive(Eq, PartialEq)]
pub struct Secp256k1SecretKey(pub SigningKey);
/// A secp256k1 public key.
#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone)]
pub struct Secp256k1PublicKey(pub VerifyingKey);
impl Allocative for Secp256k1PublicKey {
fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) {
visitor.visit_simple_sized::<Self>();
}
}
impl Hash for Secp256k1PublicKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.to_encoded_point(true).as_bytes().hash(state);
}
}
/// Secp256k1 public/secret key pair.
#[derive(Debug, PartialEq, Eq)]
pub struct Secp256k1KeyPair {
/// Secret key.
pub secret_key: Secp256k1SecretKey,
/// Public key.
pub public_key: Secp256k1PublicKey,
}
/// A secp256k1 signature.
#[derive(Eq, PartialEq, Copy, Clone)]
pub struct Secp256k1Signature(pub Signature);
impl Allocative for Secp256k1Signature {
fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) {
visitor.visit_simple_sized::<Self>();
}
}
impl Secp256k1PublicKey {
/// A fake public key used for testing.
#[cfg(all(with_testing, not(target_arch = "wasm32")))]
pub fn test_key(seed: u8) -> Self {
use rand::SeedableRng;
let mut rng = rand::rngs::StdRng::seed_from_u64(seed as u64);
let sk = k256::SecretKey::random(&mut rng);
Self(sk.public_key().into())
}
/// Returns the bytes of the public key in compressed representation.
pub fn as_bytes(&self) -> [u8; SECP256K1_PUBLIC_KEY_SIZE] {
// UNWRAP: We already have valid key so conversion should not fail.
self.0.to_encoded_point(true).as_bytes().try_into().unwrap()
}
/// Decodes the bytes into the public key.
/// Expects the bytes to be of compressed representation.
///
/// Panics if the encoding can't be done in a constant time.
pub fn from_bytes(bytes: &[u8]) -> Result<Self, CryptoError> {
let encoded_point =
EncodedPoint::from_bytes(bytes).map_err(|_| CryptoError::IncorrectPublicKeySize {
scheme: SECP256K1_SCHEME_LABEL,
len: bytes.len(),
expected: SECP256K1_PUBLIC_KEY_SIZE,
})?;
match k256::PublicKey::from_encoded_point(&encoded_point).into_option() {
Some(public_key) => Ok(Self(public_key.into())),
None => {
let error = CryptoError::Secp256k1PointAtInfinity(hex::encode(bytes));
Err(error)
}
}
}
}
impl fmt::Debug for Secp256k1SecretKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "<redacted for Secp256k1 secret key>")
}
}
impl Serialize for Secp256k1SecretKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
// This is only used for JSON configuration.
assert!(serializer.is_human_readable());
serializer.serialize_str(&hex::encode(self.0.to_bytes()))
}
}
impl<'de> Deserialize<'de> for Secp256k1SecretKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
// This is only used for JSON configuration.
assert!(deserializer.is_human_readable());
let str = String::deserialize(deserializer)?;
let bytes = hex::decode(&str).map_err(serde::de::Error::custom)?;
let sk = k256::ecdsa::SigningKey::from_slice(&bytes).map_err(serde::de::Error::custom)?;
Ok(Secp256k1SecretKey(sk))
}
}
impl Serialize for Secp256k1PublicKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&hex::encode(self.as_bytes()))
} else {
let compact_pk = serde_utils::CompressedPublicKey(self.as_bytes());
serializer.serialize_newtype_struct("Secp256k1PublicKey", &compact_pk)
}
}
}
impl<'de> Deserialize<'de> for Secp256k1PublicKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = hex::decode(s).map_err(serde::de::Error::custom)?;
Ok(Secp256k1PublicKey::from_bytes(&value).map_err(serde::de::Error::custom)?)
} else {
#[derive(Deserialize)]
#[serde(rename = "Secp256k1PublicKey")]
struct PublicKey(serde_utils::CompressedPublicKey);
let compact = PublicKey::deserialize(deserializer)?;
Ok(Secp256k1PublicKey::from_bytes(&compact.0 .0).map_err(serde::de::Error::custom)?)
}
}
}
impl FromStr for Secp256k1PublicKey {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
hex::decode(s)?.as_slice().try_into()
}
}
impl TryFrom<&[u8]> for Secp256k1PublicKey {
type Error = CryptoError;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
Self::from_bytes(value)
}
}
impl fmt::Display for Secp256k1PublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str = hex::encode(self.as_bytes());
write!(f, "{}", str)
}
}
impl fmt::Debug for Secp256k1PublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}..", hex::encode(&self.as_bytes()[0..9]))
}
}
impl BcsHashable<'_> for Secp256k1PublicKey {}
impl WitType for Secp256k1PublicKey {
const SIZE: u32 = <(u64, u64, u64, u64, u8) as WitType>::SIZE;
type Layout = <(u64, u64, u64, u64, u8) as WitType>::Layout;
type Dependencies = HList![];
fn wit_type_name() -> Cow<'static, str> {
"secp256k1-public-key".into()
}
fn wit_type_declaration() -> Cow<'static, str> {
concat!(
" record secp256k1-public-key {\n",
" part1: u64,\n",
" part2: u64,\n",
" part3: u64,\n",
" part4: u64,\n",
" part5: u8\n",
" }\n",
)
.into()
}
}
impl WitLoad for Secp256k1PublicKey {
fn load<Instance>(
memory: &Memory<'_, Instance>,
location: GuestPointer,
) -> Result<Self, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4, part5) = WitLoad::load(memory, location)?;
Ok(Self::from((part1, part2, part3, part4, part5)))
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as Layout>::Flat,
memory: &Memory<'_, Instance>,
) -> Result<Self, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4, part5) = WitLoad::lift_from(flat_layout, memory)?;
Ok(Self::from((part1, part2, part3, part4, part5)))
}
}
impl WitStore for Secp256k1PublicKey {
fn store<Instance>(
&self,
memory: &mut Memory<'_, Instance>,
location: GuestPointer,
) -> Result<(), RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4, part5) = (*self).into();
(part1, part2, part3, part4, part5).store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut Memory<'_, Instance>,
) -> Result<<Self::Layout as Layout>::Flat, RuntimeError>
where
Instance: InstanceWithMemory,
<Instance::Runtime as Runtime>::Memory: RuntimeMemory<Instance>,
{
let (part1, part2, part3, part4, part5) = (*self).into();
(part1, part2, part3, part4, part5).lower(memory)
}
}
impl From<(u64, u64, u64, u64, u8)> for Secp256k1PublicKey {
fn from((part1, part2, part3, part4, part5): (u64, u64, u64, u64, u8)) -> Self {
let mut bytes = [0u8; SECP256K1_PUBLIC_KEY_SIZE];
bytes[0..8].copy_from_slice(&part1.to_be_bytes());
bytes[8..16].copy_from_slice(&part2.to_be_bytes());
bytes[16..24].copy_from_slice(&part3.to_be_bytes());
bytes[24..32].copy_from_slice(&part4.to_be_bytes());
bytes[32] = part5;
Self::from_bytes(&bytes).unwrap()
}
}
impl From<Secp256k1PublicKey> for (u64, u64, u64, u64, u8) {
fn from(key: Secp256k1PublicKey) -> Self {
let bytes = key.as_bytes();
let part1 = u64::from_be_bytes(bytes[0..8].try_into().unwrap());
let part2 = u64::from_be_bytes(bytes[8..16].try_into().unwrap());
let part3 = u64::from_be_bytes(bytes[16..24].try_into().unwrap());
let part4 = u64::from_be_bytes(bytes[24..32].try_into().unwrap());
let part5 = bytes[32];
(part1, part2, part3, part4, part5)
}
}
impl Secp256k1KeyPair {
/// Generates a new key pair.
#[cfg(all(with_getrandom, with_testing))]
pub fn generate() -> Self {
let mut rng = rand::rngs::OsRng;
Self::generate_from(&mut rng)
}
/// Generates a new key pair from the given RNG. Use with care.
#[cfg(with_getrandom)]
pub fn generate_from<R: super::CryptoRng>(rng: &mut R) -> Self {
let secret_key = Secp256k1SecretKey(SigningKey::random(rng));
let public_key = secret_key.public();
Secp256k1KeyPair {
secret_key,
public_key,
}
}
}
impl Secp256k1SecretKey {
/// Returns a public key for the given secret key.
pub fn public(&self) -> Secp256k1PublicKey {
Secp256k1PublicKey(*self.0.verifying_key())
}
/// Copies the key pair, **including the secret key**.
///
/// The `Clone` and `Copy` traits are deliberately not implemented for `Secp256k1SecretKey` to prevent
/// accidental copies of secret keys.
pub fn copy(&self) -> Self {
Self(self.0.clone())
}
/// Generates a new key pair.
#[cfg(all(with_getrandom, with_testing))]
pub fn generate() -> Self {
let mut rng = rand::rngs::OsRng;
Self::generate_from(&mut rng)
}
/// Generates a new key pair from the given RNG. Use with care.
#[cfg(with_getrandom)]
pub fn generate_from<R: super::CryptoRng>(rng: &mut R) -> Self {
Secp256k1SecretKey(SigningKey::random(rng))
}
}
impl Secp256k1Signature {
/// Computes a secp256k1 signature for `value` using the given `secret`.
/// It first serializes the `T` type and then creates the `CryptoHash` from the serialized bytes.
pub fn new<'de, T>(value: &T, secret: &Secp256k1SecretKey) -> Self
where
T: BcsSignable<'de>,
{
Self::sign_prehash(secret, CryptoHash::new(value))
}
/// Computes a signature from a prehash.
pub fn sign_prehash(secret: &Secp256k1SecretKey, prehash: CryptoHash) -> Self {
use k256::ecdsa::signature::hazmat::PrehashSigner;
let (signature, _rid) = secret
.0
.sign_prehash(&prehash.as_bytes().0)
.expect("Failed to sign prehashed data"); // NOTE: This is a critical error we don't control.
Secp256k1Signature(signature)
}
/// Checks a signature.
pub fn check<'de, T>(&self, value: &T, author: Secp256k1PublicKey) -> Result<(), CryptoError>
where
T: BcsSignable<'de> + fmt::Debug,
{
let prehash = CryptoHash::new(value).as_bytes().0;
self.verify_inner::<T>(prehash, author)
}
/// Verifies a batch of signatures.
///
/// Returns an error on first failed signature.
pub fn verify_batch<'a, 'de, T, I>(value: &'a T, votes: I) -> Result<(), CryptoError>
where
T: BcsSignable<'de> + fmt::Debug,
I: IntoIterator<Item = &'a (Secp256k1PublicKey, Secp256k1Signature)>,
{
let prehash = CryptoHash::new(value).as_bytes().0;
for (author, signature) in votes {
signature.verify_inner::<T>(prehash, *author)?;
}
Ok(())
}
/// Returns the byte representation of the signature.
pub fn as_bytes(&self) -> [u8; SECP256K1_SIGNATURE_SIZE] {
self.0.to_bytes().into()
}
fn verify_inner<'de, T>(
&self,
prehash: [u8; 32],
author: Secp256k1PublicKey,
) -> Result<(), CryptoError>
where
T: BcsSignable<'de> + fmt::Debug,
{
use k256::ecdsa::signature::hazmat::PrehashVerifier;
author
.0
.verify_prehash(&prehash, &self.0)
.map_err(|error| CryptoError::InvalidSignature {
error: error.to_string(),
type_name: T::type_name().to_string(),
})
}
/// Creates a signature from the bytes.
/// Expects the signature to be serialized in raw-bytes form.
pub fn from_slice<A: AsRef<[u8]>>(bytes: A) -> Result<Self, CryptoError> {
let sig = k256::ecdsa::Signature::from_slice(bytes.as_ref())
.map_err(CryptoError::Secp256k1Error)?;
Ok(Secp256k1Signature(sig))
}
}
impl Serialize for Secp256k1Signature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&hex::encode(self.as_bytes()))
} else {
let compact = serde_utils::CompactSignature(self.as_bytes());
serializer.serialize_newtype_struct("Secp256k1Signature", &compact)
}
}
}
impl<'de> Deserialize<'de> for Secp256k1Signature {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = hex::decode(s).map_err(serde::de::Error::custom)?;
Self::from_slice(&value).map_err(serde::de::Error::custom)
} else {
#[derive(Deserialize)]
#[serde(rename = "Secp256k1Signature")]
struct Signature(serde_utils::CompactSignature);
let value = Signature::deserialize(deserializer)?;
Self::from_slice(value.0 .0.as_ref()).map_err(serde::de::Error::custom)
}
}
}
impl fmt::Display for Secp256k1Signature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = hex::encode(self.as_bytes());
write!(f, "{}", s)
}
}
impl fmt::Debug for Secp256k1Signature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}..", hex::encode(&self.as_bytes()[0..9]))
}
}
doc_scalar!(Secp256k1Signature, "A secp256k1 signature value");
doc_scalar!(Secp256k1PublicKey, "A secp256k1 public key value");
mod serde_utils {
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use super::{SECP256K1_PUBLIC_KEY_SIZE, SECP256K1_SIGNATURE_SIZE};
/// Wrapper around compact signature serialization
/// so that we can implement custom serializer for it that uses fixed length.
// Serde treats arrays larger than 32 as variable length arrays, and adds the length as a prefix.
// Since we want a fixed size representation, we wrap it in this helper struct and use serde_as.
#[serde_as]
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct CompactSignature(#[serde_as(as = "[_; 64]")] pub [u8; SECP256K1_SIGNATURE_SIZE]);
#[serde_as]
#[derive(Serialize, Deserialize)]
#[serde(transparent)]
pub struct CompressedPublicKey(#[serde_as(as = "[_; 33]")] pub [u8; SECP256K1_PUBLIC_KEY_SIZE]);
}
#[cfg(with_testing)]
mod tests {
#[test]
fn test_signatures() {
use serde::{Deserialize, Serialize};
use crate::crypto::{
secp256k1::{Secp256k1KeyPair, Secp256k1Signature},
BcsSignable, TestString,
};
#[derive(Debug, Serialize, Deserialize)]
struct Foo(String);
impl BcsSignable<'_> for Foo {}
let keypair1 = Secp256k1KeyPair::generate();
let keypair2 = Secp256k1KeyPair::generate();
let ts = TestString("hello".into());
let tsx = TestString("hellox".into());
let foo = Foo("hello".into());
let s = Secp256k1Signature::new(&ts, &keypair1.secret_key);
assert!(s.check(&ts, keypair1.public_key).is_ok());
assert!(s.check(&ts, keypair2.public_key).is_err());
assert!(s.check(&tsx, keypair1.public_key).is_err());
assert!(s.check(&foo, keypair1.public_key).is_err());
}
#[test]
fn test_public_key_serialization() {
use crate::crypto::secp256k1::Secp256k1PublicKey;
let key_in = Secp256k1PublicKey::test_key(0);
let s = serde_json::to_string(&key_in).unwrap();
let key_out: Secp256k1PublicKey = serde_json::from_str(&s).unwrap();
assert_eq!(key_out, key_in);
let s = bcs::to_bytes(&key_in).unwrap();
let key_out: Secp256k1PublicKey = bcs::from_bytes(&s).unwrap();
assert_eq!(key_out, key_in);
}
#[test]
fn test_secret_key_serialization() {
use crate::crypto::secp256k1::{Secp256k1KeyPair, Secp256k1SecretKey};
let key_in = Secp256k1KeyPair::generate().secret_key;
let s = serde_json::to_string(&key_in).unwrap();
let key_out: Secp256k1SecretKey = serde_json::from_str(&s).unwrap();
assert_eq!(key_out, key_in);
}
#[test]
fn test_signature_serialization() {
use crate::crypto::{
secp256k1::{Secp256k1KeyPair, Secp256k1Signature},
TestString,
};
let keypair = Secp256k1KeyPair::generate();
let sig = Secp256k1Signature::new(&TestString("hello".into()), &keypair.secret_key);
let s = serde_json::to_string(&sig).unwrap();
let sig2: Secp256k1Signature = serde_json::from_str(&s).unwrap();
assert_eq!(sig, sig2);
let s = bcs::to_bytes(&sig).unwrap();
let sig2: Secp256k1Signature = bcs::from_bytes(&s).unwrap();
assert_eq!(sig, sig2);
}
#[test]
fn public_key_from_str() {
use std::str::FromStr;
use crate::crypto::secp256k1::Secp256k1PublicKey;
let key = Secp256k1PublicKey::test_key(0);
let s = key.to_string();
let key2 = Secp256k1PublicKey::from_str(s.as_str()).unwrap();
assert_eq!(key, key2);
}
#[test]
fn bytes_repr_compact_public_key() {
use crate::crypto::secp256k1::{Secp256k1PublicKey, SECP256K1_PUBLIC_KEY_SIZE};
let key_in: Secp256k1PublicKey = Secp256k1PublicKey::test_key(0);
let bytes = key_in.as_bytes();
assert!(
bytes.len() == SECP256K1_PUBLIC_KEY_SIZE,
"::to_bytes() should return compressed representation"
);
let key_out = Secp256k1PublicKey::from_bytes(&bytes).unwrap();
assert_eq!(key_in, key_out);
}
#[test]
fn human_readable_ser() {
use crate::crypto::{
secp256k1::{Secp256k1KeyPair, Secp256k1Signature},
TestString,
};
let key_pair = Secp256k1KeyPair::generate();
let sig = Secp256k1Signature::new(&TestString("hello".into()), &key_pair.secret_key);
let s = serde_json::to_string(&sig).unwrap();
let sig2: Secp256k1Signature = serde_json::from_str(&s).unwrap();
assert_eq!(sig, sig2);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-base/tests/command_tests.rs | linera-base/tests/command_tests.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
// TODO(#2239): these tests fail to build
#![cfg(any())]
use std::path::Path;
use linera_base::util;
#[test_log::test(tokio::test)]
async fn test_resolve_binary_with_test_default() {
let path = util::resolve_binary("linera", "linera-service")
.await
.unwrap();
assert!(path.exists());
// Since we're in a test, we can use the environment variables `CARGO_BIN_EXE_*`.
assert_eq!(path, Path::new(env!("CARGO_BIN_EXE_linera")));
}
#[test_log::test(tokio::test)]
async fn test_resolve_binary_from_relative_path() {
let debug_or_release = Path::new(env!("CARGO_BIN_EXE_linera"))
.parent()
.unwrap()
.file_name()
.unwrap();
let path = util::resolve_binary_in_same_directory_as(
Path::new("../target").join(debug_or_release).join("linera"),
"linera-proxy",
"linera-service",
)
.await
.unwrap();
assert!(path.exists());
assert_eq!(path, Path::new(env!("CARGO_BIN_EXE_linera-proxy")));
}
#[test_log::test(tokio::test)]
async fn test_resolve_binary_from_absolute_path() {
let path = util::resolve_binary_in_same_directory_as(
env!("CARGO_BIN_EXE_linera"),
"linera-proxy",
"linera-service",
)
.await
.unwrap();
assert!(path.exists());
assert_eq!(path, Path::new(env!("CARGO_BIN_EXE_linera-proxy")));
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-storage/build.rs | linera-storage/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
with_testing: { any(test, feature = "test") },
with_metrics: { all(not(target_arch = "wasm32"), feature = "metrics") },
with_wasmer: { all(any(feature = "web", not(target_arch = "wasm32")), feature = "wasmer") },
with_wasmtime: { all(not(target_arch = "wasm32"), feature = "wasmtime") },
with_wasm_runtime: { any(with_wasmer, with_wasmtime) },
with_revm: { feature = "revm" },
web: { all(target_arch = "wasm32", feature = "web") },
};
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-storage/src/lib.rs | linera-storage/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module defines the storage abstractions for individual chains and certificates.
mod db_storage;
use std::sync::Arc;
use async_trait::async_trait;
use itertools::Itertools;
use linera_base::{
crypto::CryptoHash,
data_types::{
ApplicationDescription, Blob, ChainDescription, CompressedBytecode, NetworkDescription,
TimeDelta, Timestamp,
},
identifiers::{ApplicationId, BlobId, ChainId, EventId, IndexAndEvent, StreamId},
vm::VmRuntime,
};
use linera_chain::{
types::{ConfirmedBlock, ConfirmedBlockCertificate},
ChainError, ChainStateView,
};
#[cfg(with_revm)]
use linera_execution::{
evm::revm::{EvmContractModule, EvmServiceModule},
EvmRuntime,
};
use linera_execution::{
BlobState, ExecutionError, ExecutionRuntimeConfig, ExecutionRuntimeContext, TransactionTracker,
UserContractCode, UserServiceCode, WasmRuntime,
};
#[cfg(with_wasm_runtime)]
use linera_execution::{WasmContractModule, WasmServiceModule};
use linera_views::{context::Context, views::RootView, ViewError};
#[cfg(with_metrics)]
pub use crate::db_storage::metrics;
#[cfg(with_testing)]
pub use crate::db_storage::TestClock;
pub use crate::db_storage::{ChainStatesFirstAssignment, DbStorage, WallClock};
/// The default namespace to be used when none is specified
pub const DEFAULT_NAMESPACE: &str = "default";
/// Communicate with a persistent storage using the "views" abstraction.
#[cfg_attr(not(web), async_trait)]
#[cfg_attr(web, async_trait(?Send))]
pub trait Storage: linera_base::util::traits::AutoTraits + Sized {
/// The low-level storage implementation in use by the core protocol (chain workers etc).
type Context: Context<Extra = ChainRuntimeContext<Self>> + Clone + 'static;
/// The clock type being used.
type Clock: Clock;
/// The low-level storage implementation in use by the block exporter.
type BlockExporterContext: Context<Extra = u32> + Clone;
/// Returns the current wall clock time.
fn clock(&self) -> &Self::Clock;
fn thread_pool(&self) -> &Arc<linera_execution::ThreadPool>;
/// Loads the view of a chain state.
///
/// # Notes
///
/// Each time this method is called, a new [`ChainStateView`] is created. If there are multiple
/// instances of the same chain active at any given moment, they will race to access persistent
/// storage. This can lead to invalid states and data corruption.
async fn load_chain(&self, id: ChainId) -> Result<ChainStateView<Self::Context>, ViewError>;
/// Tests the existence of a blob with the given blob ID.
async fn contains_blob(&self, blob_id: BlobId) -> Result<bool, ViewError>;
/// Returns what blobs from the input are missing from storage.
async fn missing_blobs(&self, blob_ids: &[BlobId]) -> Result<Vec<BlobId>, ViewError>;
/// Tests existence of a blob state with the given blob ID.
async fn contains_blob_state(&self, blob_id: BlobId) -> Result<bool, ViewError>;
/// Reads the hashed certificate value with the given hash.
async fn read_confirmed_block(
&self,
hash: CryptoHash,
) -> Result<Option<ConfirmedBlock>, ViewError>;
/// Reads the blob with the given blob ID.
async fn read_blob(&self, blob_id: BlobId) -> Result<Option<Blob>, ViewError>;
/// Reads the blobs with the given blob IDs.
async fn read_blobs(&self, blob_ids: &[BlobId]) -> Result<Vec<Option<Blob>>, ViewError>;
/// Reads the blob state with the given blob ID.
async fn read_blob_state(&self, blob_id: BlobId) -> Result<Option<BlobState>, ViewError>;
/// Reads the blob states with the given blob IDs.
async fn read_blob_states(
&self,
blob_ids: &[BlobId],
) -> Result<Vec<Option<BlobState>>, ViewError>;
/// Writes the given blob.
async fn write_blob(&self, blob: &Blob) -> Result<(), ViewError>;
/// Writes blobs and certificate
async fn write_blobs_and_certificate(
&self,
blobs: &[Blob],
certificate: &ConfirmedBlockCertificate,
) -> Result<(), ViewError>;
/// Writes the given blobs, but only if they already have a blob state. Returns `true` for the
/// blobs that were written.
async fn maybe_write_blobs(&self, blobs: &[Blob]) -> Result<Vec<bool>, ViewError>;
/// Attempts to write the given blob state. Returns the latest `Epoch` to have used this blob.
async fn maybe_write_blob_states(
&self,
blob_ids: &[BlobId],
blob_state: BlobState,
) -> Result<(), ViewError>;
/// Writes several blobs.
async fn write_blobs(&self, blobs: &[Blob]) -> Result<(), ViewError>;
/// Tests existence of the certificate with the given hash.
async fn contains_certificate(&self, hash: CryptoHash) -> Result<bool, ViewError>;
/// Reads the certificate with the given hash.
async fn read_certificate(
&self,
hash: CryptoHash,
) -> Result<Option<ConfirmedBlockCertificate>, ViewError>;
/// Reads a number of certificates
async fn read_certificates<I: IntoIterator<Item = CryptoHash> + Send>(
&self,
hashes: I,
) -> Result<Vec<Option<ConfirmedBlockCertificate>>, ViewError>;
/// Reads certificates by hashes.
///
/// Returns a vector of tuples where the first element is a lite certificate
/// and the second element is confirmed block.
///
/// It does not check if all hashes all returned.
async fn read_certificates_raw<I: IntoIterator<Item = CryptoHash> + Send>(
&self,
hashes: I,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, ViewError>;
/// Reads the event with the given ID.
async fn read_event(&self, id: EventId) -> Result<Option<Vec<u8>>, ViewError>;
/// Tests existence of the event with the given ID.
async fn contains_event(&self, id: EventId) -> Result<bool, ViewError>;
/// Lists all the events from a starting index
async fn read_events_from_index(
&self,
chain_id: &ChainId,
stream_id: &StreamId,
start_index: u32,
) -> Result<Vec<IndexAndEvent>, ViewError>;
/// Writes a vector of events.
async fn write_events(
&self,
events: impl IntoIterator<Item = (EventId, Vec<u8>)> + Send,
) -> Result<(), ViewError>;
/// Reads the network description.
async fn read_network_description(&self) -> Result<Option<NetworkDescription>, ViewError>;
/// Writes the network description.
async fn write_network_description(
&self,
information: &NetworkDescription,
) -> Result<(), ViewError>;
/// Initializes a chain in a simple way (used for testing and to create a genesis state).
///
/// # Notes
///
/// This method creates a new [`ChainStateView`] instance. If there are multiple instances of
/// the same chain active at any given moment, they will race to access persistent storage.
/// This can lead to invalid states and data corruption.
async fn create_chain(&self, description: ChainDescription) -> Result<(), ChainError>
where
ChainRuntimeContext<Self>: ExecutionRuntimeContext,
{
let id = description.id();
// Store the description blob.
self.write_blob(&Blob::new_chain_description(&description))
.await?;
let mut chain = self.load_chain(id).await?;
assert!(!chain.is_active(), "Attempting to create a chain twice");
let current_time = self.clock().current_time();
chain.initialize_if_needed(current_time).await?;
chain.save().await?;
Ok(())
}
/// Selects the WebAssembly runtime to use for applications (if any).
fn wasm_runtime(&self) -> Option<WasmRuntime>;
/// Creates a [`UserContractCode`] instance using the bytecode in storage referenced
/// by the `application_description`.
async fn load_contract(
&self,
application_description: &ApplicationDescription,
txn_tracker: &TransactionTracker,
) -> Result<UserContractCode, ExecutionError> {
let contract_bytecode_blob_id = application_description.contract_bytecode_blob_id();
let content = match txn_tracker.get_blob_content(&contract_bytecode_blob_id) {
Some(content) => content.clone(),
None => self
.read_blob(contract_bytecode_blob_id)
.await?
.ok_or(ExecutionError::BlobsNotFound(vec![
contract_bytecode_blob_id,
]))?
.into_content(),
};
let compressed_contract_bytecode = CompressedBytecode {
compressed_bytes: content.into_arc_bytes(),
};
#[cfg_attr(not(any(with_wasm_runtime, with_revm)), allow(unused_variables))]
let contract_bytecode = self
.thread_pool()
.run_send((), move |()| async move {
compressed_contract_bytecode.decompress()
})
.await
.await??;
match application_description.module_id.vm_runtime {
VmRuntime::Wasm => {
cfg_if::cfg_if! {
if #[cfg(with_wasm_runtime)] {
let Some(wasm_runtime) = self.wasm_runtime() else {
panic!("A Wasm runtime is required to load user applications.");
};
Ok(WasmContractModule::new(contract_bytecode, wasm_runtime)
.await?
.into())
} else {
panic!(
"A Wasm runtime is required to load user applications. \
Please enable the `wasmer` or the `wasmtime` feature flags \
when compiling `linera-storage`."
);
}
}
}
VmRuntime::Evm => {
cfg_if::cfg_if! {
if #[cfg(with_revm)] {
let evm_runtime = EvmRuntime::Revm;
Ok(EvmContractModule::new(contract_bytecode, evm_runtime)?
.into())
} else {
panic!(
"An Evm runtime is required to load user applications. \
Please enable the `revm` feature flag \
when compiling `linera-storage`."
);
}
}
}
}
}
/// Creates a [`linera-sdk::UserContract`] instance using the bytecode in storage referenced
/// by the `application_description`.
async fn load_service(
&self,
application_description: &ApplicationDescription,
txn_tracker: &TransactionTracker,
) -> Result<UserServiceCode, ExecutionError> {
let service_bytecode_blob_id = application_description.service_bytecode_blob_id();
let content = match txn_tracker.get_blob_content(&service_bytecode_blob_id) {
Some(content) => content.clone(),
None => self
.read_blob(service_bytecode_blob_id)
.await?
.ok_or(ExecutionError::BlobsNotFound(vec![
service_bytecode_blob_id,
]))?
.into_content(),
};
let compressed_service_bytecode = CompressedBytecode {
compressed_bytes: content.into_arc_bytes(),
};
#[cfg_attr(not(any(with_wasm_runtime, with_revm)), allow(unused_variables))]
let service_bytecode = self
.thread_pool()
.run_send((), move |()| async move {
compressed_service_bytecode.decompress()
})
.await
.await??;
match application_description.module_id.vm_runtime {
VmRuntime::Wasm => {
cfg_if::cfg_if! {
if #[cfg(with_wasm_runtime)] {
let Some(wasm_runtime) = self.wasm_runtime() else {
panic!("A Wasm runtime is required to load user applications.");
};
Ok(WasmServiceModule::new(service_bytecode, wasm_runtime)
.await?
.into())
} else {
panic!(
"A Wasm runtime is required to load user applications. \
Please enable the `wasmer` or the `wasmtime` feature flags \
when compiling `linera-storage`."
);
}
}
}
VmRuntime::Evm => {
cfg_if::cfg_if! {
if #[cfg(with_revm)] {
let evm_runtime = EvmRuntime::Revm;
Ok(EvmServiceModule::new(service_bytecode, evm_runtime)?
.into())
} else {
panic!(
"An Evm runtime is required to load user applications. \
Please enable the `revm` feature flag \
when compiling `linera-storage`."
);
}
}
}
}
}
async fn block_exporter_context(
&self,
block_exporter_id: u32,
) -> Result<Self::BlockExporterContext, ViewError>;
/// Lists the blob IDs in storage.
async fn list_blob_ids(&self) -> Result<Vec<BlobId>, ViewError>;
/// Lists the chain IDs in storage.
async fn list_chain_ids(&self) -> Result<Vec<ChainId>, ViewError>;
/// Lists the event IDs in storage.
async fn list_event_ids(&self) -> Result<Vec<EventId>, ViewError>;
}
/// The result of processing the obtained read certificates.
pub enum ResultReadCertificates {
Certificates(Vec<ConfirmedBlockCertificate>),
InvalidHashes(Vec<CryptoHash>),
}
impl ResultReadCertificates {
/// Creating the processed read certificates.
pub fn new(
certificates: Vec<Option<ConfirmedBlockCertificate>>,
hashes: Vec<CryptoHash>,
) -> Self {
let (certificates, invalid_hashes) = certificates
.into_iter()
.zip(hashes)
.partition_map::<Vec<_>, Vec<_>, _, _, _>(|(certificate, hash)| match certificate {
Some(cert) => itertools::Either::Left(cert),
None => itertools::Either::Right(hash),
});
if invalid_hashes.is_empty() {
Self::Certificates(certificates)
} else {
Self::InvalidHashes(invalid_hashes)
}
}
}
/// An implementation of `ExecutionRuntimeContext` suitable for the core protocol.
#[derive(Clone)]
pub struct ChainRuntimeContext<S> {
storage: S,
chain_id: ChainId,
thread_pool: Arc<linera_execution::ThreadPool>,
execution_runtime_config: ExecutionRuntimeConfig,
user_contracts: Arc<papaya::HashMap<ApplicationId, UserContractCode>>,
user_services: Arc<papaya::HashMap<ApplicationId, UserServiceCode>>,
}
#[cfg_attr(not(web), async_trait)]
#[cfg_attr(web, async_trait(?Send))]
impl<S: Storage> ExecutionRuntimeContext for ChainRuntimeContext<S> {
fn chain_id(&self) -> ChainId {
self.chain_id
}
fn thread_pool(&self) -> &Arc<linera_execution::ThreadPool> {
&self.thread_pool
}
fn execution_runtime_config(&self) -> linera_execution::ExecutionRuntimeConfig {
self.execution_runtime_config
}
fn user_contracts(&self) -> &Arc<papaya::HashMap<ApplicationId, UserContractCode>> {
&self.user_contracts
}
fn user_services(&self) -> &Arc<papaya::HashMap<ApplicationId, UserServiceCode>> {
&self.user_services
}
async fn get_user_contract(
&self,
description: &ApplicationDescription,
txn_tracker: &TransactionTracker,
) -> Result<UserContractCode, ExecutionError> {
let application_id = description.into();
let pinned = self.user_contracts.pin_owned();
if let Some(contract) = pinned.get(&application_id) {
return Ok(contract.clone());
}
let contract = self.storage.load_contract(description, txn_tracker).await?;
pinned.insert(application_id, contract.clone());
Ok(contract)
}
async fn get_user_service(
&self,
description: &ApplicationDescription,
txn_tracker: &TransactionTracker,
) -> Result<UserServiceCode, ExecutionError> {
let application_id = description.into();
let pinned = self.user_services.pin_owned();
if let Some(service) = pinned.get(&application_id) {
return Ok(service.clone());
}
let service = self.storage.load_service(description, txn_tracker).await?;
pinned.insert(application_id, service.clone());
Ok(service)
}
async fn get_blob(&self, blob_id: BlobId) -> Result<Option<Blob>, ViewError> {
self.storage.read_blob(blob_id).await
}
async fn get_event(&self, event_id: EventId) -> Result<Option<Vec<u8>>, ViewError> {
self.storage.read_event(event_id).await
}
async fn get_network_description(&self) -> Result<Option<NetworkDescription>, ViewError> {
self.storage.read_network_description().await
}
async fn contains_blob(&self, blob_id: BlobId) -> Result<bool, ViewError> {
self.storage.contains_blob(blob_id).await
}
async fn contains_event(&self, event_id: EventId) -> Result<bool, ViewError> {
self.storage.contains_event(event_id).await
}
#[cfg(with_testing)]
async fn add_blobs(
&self,
blobs: impl IntoIterator<Item = Blob> + Send,
) -> Result<(), ViewError> {
let blobs = Vec::from_iter(blobs);
self.storage.write_blobs(&blobs).await
}
#[cfg(with_testing)]
async fn add_events(
&self,
events: impl IntoIterator<Item = (EventId, Vec<u8>)> + Send,
) -> Result<(), ViewError> {
self.storage.write_events(events).await
}
}
/// A clock that can be used to get the current `Timestamp`.
#[cfg_attr(not(web), async_trait)]
#[cfg_attr(web, async_trait(?Send))]
pub trait Clock {
fn current_time(&self) -> Timestamp;
async fn sleep(&self, delta: TimeDelta);
async fn sleep_until(&self, timestamp: Timestamp);
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use linera_base::{
crypto::{AccountPublicKey, CryptoHash},
data_types::{
Amount, ApplicationPermissions, Blob, BlockHeight, ChainDescription, ChainOrigin,
Epoch, InitialChainConfig, NetworkDescription, Round, Timestamp,
},
identifiers::{BlobId, BlobType, ChainId, EventId, StreamId},
ownership::ChainOwnership,
};
use linera_chain::{
block::{Block, ConfirmedBlock},
data_types::{BlockExecutionOutcome, ProposedBlock},
};
use linera_execution::BlobState;
#[cfg(feature = "dynamodb")]
use linera_views::dynamo_db::DynamoDbDatabase;
#[cfg(feature = "scylladb")]
use linera_views::scylla_db::ScyllaDbDatabase;
use linera_views::{memory::MemoryDatabase, ViewError};
use test_case::test_case;
use super::*;
use crate::db_storage::DbStorage;
/// Generic test function to test Storage trait features
async fn test_storage_chain_exporter<S: Storage + Sync>(storage: &S) -> Result<(), ViewError>
where
S::Context: Send + Sync,
{
// Test clock functionality
let _current_time = storage.clock().current_time();
let test_chain_id = ChainId(CryptoHash::test_hash("test_chain"));
// Test loading a chain (this creates a chain state view)
let _chain_view = storage.load_chain(test_chain_id).await?;
// Test block exporter context
let _block_exporter_context = storage.block_exporter_context(0).await?;
Ok(())
}
async fn test_storage_blob<S: Storage + Sync>(storage: &S) -> Result<(), ViewError>
where
S::Context: Send + Sync,
{
// Create test blobs
let chain_description = ChainDescription::new(
ChainOrigin::Root(0),
InitialChainConfig {
ownership: ChainOwnership::single(AccountPublicKey::test_key(0).into()),
epoch: Epoch::ZERO,
min_active_epoch: Epoch::ZERO,
max_active_epoch: Epoch::ZERO,
balance: Amount::ZERO,
application_permissions: ApplicationPermissions::default(),
},
Timestamp::from(0),
);
let test_blob1 = Blob::new_chain_description(&chain_description);
let test_blob2 = Blob::new_data(vec![10, 20, 30]);
let test_blob3 = Blob::new_data(vec![40, 50, 60]);
// Testing blobs existence
let blob_id1 = test_blob1.id();
let blob_id2 = test_blob2.id();
let blob_id3 = test_blob3.id();
// Test blob existence before writing
assert!(!storage.contains_blob(blob_id1).await?);
assert!(!storage.contains_blob(blob_id2).await?);
assert!(!storage.contains_blob(blob_id3).await?);
// Test single blob write
storage.write_blob(&test_blob1).await?;
assert!(storage.contains_blob(blob_id1).await?);
// Test multiple blob write (write_blobs)
storage
.write_blobs(&[test_blob2.clone(), test_blob3.clone()])
.await?;
assert!(storage.contains_blob(blob_id2).await?);
assert!(storage.contains_blob(blob_id3).await?);
// Test single blob read
let read_blob = storage.read_blob(blob_id1).await?;
assert_eq!(read_blob, Some(test_blob1.clone()));
// Test multiple blob read (read_blobs)
let blob_ids = vec![blob_id1, blob_id2, blob_id3];
let read_blobs = storage.read_blobs(&blob_ids).await?;
assert_eq!(read_blobs.len(), 3);
// Verify each blob was read correctly
assert_eq!(read_blobs[0], Some(test_blob1.clone()));
assert_eq!(read_blobs[1], Some(test_blob2));
assert_eq!(read_blobs[2], Some(test_blob3));
// Test missing blobs detection
let missing_blob_id = BlobId::new(CryptoHash::test_hash("missing"), BlobType::Data);
let missing_blobs = storage.missing_blobs(&[blob_id1, missing_blob_id]).await?;
assert_eq!(missing_blobs, vec![missing_blob_id]);
// Test maybe_write_blobs (should return false as blobs don't have blob states yet)
let write_results = storage.maybe_write_blobs(&[test_blob1.clone()]).await?;
assert_eq!(write_results, vec![false]);
// Test blob state operations
let blob_state1 = BlobState {
last_used_by: None,
chain_id: ChainId(CryptoHash::test_hash("chain1")),
block_height: BlockHeight(0),
epoch: Some(Epoch::ZERO),
};
let blob_state2 = BlobState {
last_used_by: Some(CryptoHash::test_hash("cert")),
chain_id: ChainId(CryptoHash::test_hash("chain2")),
block_height: BlockHeight(1),
epoch: Some(Epoch::from(1)),
};
// Test blob state existence before writing
assert!(!storage.contains_blob_state(blob_id1).await?);
assert!(!storage.contains_blob_state(blob_id2).await?);
// Test blob state writing
storage
.maybe_write_blob_states(&[blob_id1], blob_state1.clone())
.await?;
storage
.maybe_write_blob_states(&[blob_id2], blob_state2.clone())
.await?;
// Test blob state existence after writing
assert!(storage.contains_blob_state(blob_id1).await?);
assert!(storage.contains_blob_state(blob_id2).await?);
// Test single blob state read
let read_blob_state = storage.read_blob_state(blob_id1).await?;
assert_eq!(read_blob_state, Some(blob_state1.clone()));
// Test multiple blob state read (read_blob_states)
let read_blob_states = storage.read_blob_states(&[blob_id1, blob_id2]).await?;
assert_eq!(read_blob_states.len(), 2);
// Verify blob states
assert_eq!(read_blob_states[0], Some(blob_state1));
assert_eq!(read_blob_states[1], Some(blob_state2));
// Test maybe_write_blobs now that blob states exist (should return true)
let write_results = storage.maybe_write_blobs(&[test_blob1.clone()]).await?;
assert_eq!(write_results, vec![true]);
Ok(())
}
async fn test_storage_certificate<S: Storage + Sync>(storage: &S) -> Result<(), ViewError>
where
S::Context: Send + Sync,
{
let cert_hash = CryptoHash::test_hash("certificate");
// Test certificate existence (should be false initially)
assert!(!storage.contains_certificate(cert_hash).await?);
// Test reading non-existent certificate
assert!(storage.read_certificate(cert_hash).await?.is_none());
// Test reading multiple certificates
let cert_hashes = vec![cert_hash, CryptoHash::test_hash("cert2")];
let certs_result = storage.read_certificates(cert_hashes.clone()).await?;
assert_eq!(certs_result.len(), 2);
assert!(certs_result[0].is_none());
assert!(certs_result[1].is_none());
// Test raw certificate reading
let raw_certs_result = storage.read_certificates_raw(cert_hashes).await?;
assert!(raw_certs_result.is_empty()); // No certificates exist
// Test confirmed block reading
let block_hash = CryptoHash::test_hash("block");
let block_result = storage.read_confirmed_block(block_hash).await?;
assert!(block_result.is_none());
// Test write_blobs_and_certificate functionality
// Create test blobs
let test_blob1 = Blob::new_data(vec![1, 2, 3]);
let test_blob2 = Blob::new_data(vec![4, 5, 6]);
let blobs = vec![test_blob1, test_blob2];
// Create a test certificate using the working pattern from linera-indexer tests
let chain_id = ChainId(CryptoHash::test_hash("test_chain_cert"));
// Create a minimal proposed block (genesis block)
let proposed_block = ProposedBlock {
epoch: Epoch::ZERO,
chain_id,
transactions: vec![],
previous_block_hash: None,
height: BlockHeight::ZERO,
authenticated_owner: None,
timestamp: Timestamp::default(),
};
// Create a minimal block execution outcome with proper BTreeMap types
let outcome = BlockExecutionOutcome {
messages: vec![],
state_hash: CryptoHash::default(),
oracle_responses: vec![],
events: vec![],
blobs: vec![],
operation_results: vec![],
previous_event_blocks: BTreeMap::new(),
previous_message_blocks: BTreeMap::new(),
};
let block = Block::new(proposed_block, outcome);
let confirmed_block = ConfirmedBlock::new(block);
let certificate = ConfirmedBlockCertificate::new(confirmed_block, Round::Fast, vec![]);
// Test writing blobs and certificate together
storage
.write_blobs_and_certificate(&blobs, &certificate)
.await?;
// Verify the certificate was written
let cert_hash = certificate.hash();
assert!(storage.contains_certificate(cert_hash).await?);
// Verify the certificate can be read back
let read_certificate = storage.read_certificate(cert_hash).await?;
assert!(read_certificate.is_some());
assert_eq!(read_certificate.unwrap().hash(), cert_hash);
// Verify the blobs were written
for blob in &blobs {
assert!(storage.contains_blob(blob.id()).await?);
}
Ok(())
}
async fn test_storage_event<S: Storage + Sync>(storage: &S) -> Result<(), ViewError>
where
S::Context: Send + Sync,
{
let chain_id = ChainId(CryptoHash::test_hash("test_chain"));
let stream_id = StreamId::system("test_stream");
// Test multiple events
let event_id1 = EventId {
chain_id,
stream_id: stream_id.clone(),
index: 0,
};
let event_id2 = EventId {
chain_id,
stream_id: stream_id.clone(),
index: 1,
};
let event_id3 = EventId {
chain_id,
stream_id: stream_id.clone(),
index: 2,
};
let event_data1 = vec![1, 2, 3];
let event_data2 = vec![4, 5, 6];
let event_data3 = vec![7, 8, 9];
// Test event existence before writing
assert!(!storage.contains_event(event_id1.clone()).await?);
assert!(!storage.contains_event(event_id2.clone()).await?);
// Write multiple events
storage
.write_events([
(event_id1.clone(), event_data1.clone()),
(event_id2.clone(), event_data2.clone()),
(event_id3.clone(), event_data3.clone()),
])
.await?;
// Test event existence after writing
assert!(storage.contains_event(event_id1.clone()).await?);
assert!(storage.contains_event(event_id2.clone()).await?);
assert!(storage.contains_event(event_id3.clone()).await?);
// Test individual event reading
let read_event1 = storage.read_event(event_id1).await?;
assert_eq!(read_event1, Some(event_data1));
let read_event2 = storage.read_event(event_id2).await?;
assert_eq!(read_event2, Some(event_data2));
// Test reading events from index
let events_from_index = storage
.read_events_from_index(&chain_id, &stream_id, 1)
.await?;
assert!(events_from_index.len() >= 2); // Should contain events at index 1 and 2
Ok(())
}
async fn test_storage_network_description<S: Storage + Sync>(
storage: &S,
) -> Result<(), ViewError>
where
S::Context: Send + Sync,
{
let admin_chain_id = ChainId(CryptoHash::test_hash("test_chain_second"));
let network_desc = NetworkDescription {
name: "test_network".to_string(),
genesis_config_hash: CryptoHash::test_hash("genesis_config"),
genesis_timestamp: Timestamp::from(0),
genesis_committee_blob_hash: CryptoHash::test_hash("committee"),
admin_chain_id,
};
// Test reading non-existent network description
assert!(storage.read_network_description().await?.is_none());
// Write network description
storage.write_network_description(&network_desc).await?;
// Test reading existing network description
let read_desc = storage.read_network_description().await?;
assert_eq!(read_desc, Some(network_desc));
Ok(())
}
/// Generic test function to test Storage trait features
#[test_case(DbStorage::<MemoryDatabase, _>::make_test_storage(None).await; "memory")]
#[cfg_attr(feature = "dynamodb", test_case(DbStorage::<DynamoDbDatabase, _>::make_test_storage(None).await; "dynamo_db"))]
#[cfg_attr(feature = "scylladb", test_case(DbStorage::<ScyllaDbDatabase, _>::make_test_storage(None).await; "scylla_db"))]
#[test_log::test(tokio::test)]
async fn test_storage_features<S: Storage + Sync>(storage: S) -> Result<(), ViewError>
where
S::Context: Send + Sync,
{
test_storage_chain_exporter(&storage).await?;
test_storage_blob(&storage).await?;
test_storage_certificate(&storage).await?;
test_storage_event(&storage).await?;
test_storage_network_description(&storage).await?;
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-storage/src/db_storage.rs | linera-storage/src/db_storage.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::BTreeMap, fmt::Debug, sync::Arc};
use async_trait::async_trait;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::{
crypto::CryptoHash,
data_types::{Blob, NetworkDescription, TimeDelta, Timestamp},
identifiers::{ApplicationId, BlobId, ChainId, EventId, IndexAndEvent, StreamId},
};
use linera_chain::{
types::{CertificateValue, ConfirmedBlock, ConfirmedBlockCertificate, LiteCertificate},
ChainStateView,
};
use linera_execution::{
BlobState, ExecutionRuntimeConfig, UserContractCode, UserServiceCode, WasmRuntime,
};
use linera_views::{
backends::dual::{DualStoreRootKeyAssignment, StoreInUse},
batch::Batch,
context::ViewContext,
store::{
KeyValueDatabase, KeyValueStore, ReadableKeyValueStore as _, WritableKeyValueStore as _,
},
views::View,
ViewError,
};
use serde::{Deserialize, Serialize};
use tracing::instrument;
#[cfg(with_testing)]
use {
futures::channel::oneshot::{self, Receiver},
linera_views::{random::generate_test_namespace, store::TestKeyValueDatabase},
std::cmp::Reverse,
};
use crate::{ChainRuntimeContext, Clock, Storage};
#[cfg(with_metrics)]
pub mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{
exponential_bucket_latencies, register_histogram_vec, register_int_counter_vec,
};
use prometheus::{HistogramVec, IntCounterVec};
/// The metric counting how often a blob is tested for existence from storage
pub(super) static CONTAINS_BLOB_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"contains_blob",
"The metric counting how often a blob is tested for existence from storage",
&[],
)
});
/// The metric counting how often multiple blobs are tested for existence from storage
pub(super) static CONTAINS_BLOBS_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"contains_blobs",
"The metric counting how often multiple blobs are tested for existence from storage",
&[],
)
});
/// The metric counting how often a blob state is tested for existence from storage
pub(super) static CONTAINS_BLOB_STATE_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"contains_blob_state",
"The metric counting how often a blob state is tested for existence from storage",
&[],
)
});
/// The metric counting how often a certificate is tested for existence from storage.
pub(super) static CONTAINS_CERTIFICATE_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"contains_certificate",
"The metric counting how often a certificate is tested for existence from storage",
&[],
)
});
/// The metric counting how often a hashed certificate value is read from storage.
#[doc(hidden)]
pub static READ_CONFIRMED_BLOCK_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"read_confirmed_block",
"The metric counting how often a hashed confirmed block is read from storage",
&[],
)
});
/// The metric counting how often a blob is read from storage.
#[doc(hidden)]
pub(super) static READ_BLOB_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"read_blob",
"The metric counting how often a blob is read from storage",
&[],
)
});
/// The metric counting how often a blob state is read from storage.
#[doc(hidden)]
pub(super) static READ_BLOB_STATE_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"read_blob_state",
"The metric counting how often a blob state is read from storage",
&[],
)
});
/// The metric counting how often blob states are read from storage.
#[doc(hidden)]
pub(super) static READ_BLOB_STATES_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"read_blob_states",
"The metric counting how often blob states are read from storage",
&[],
)
});
/// The metric counting how often a blob is written to storage.
#[doc(hidden)]
pub(super) static WRITE_BLOB_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"write_blob",
"The metric counting how often a blob is written to storage",
&[],
)
});
/// The metric counting how often a certificate is read from storage.
#[doc(hidden)]
pub static READ_CERTIFICATE_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"read_certificate",
"The metric counting how often a certificate is read from storage",
&[],
)
});
/// The metric counting how often certificates are read from storage.
#[doc(hidden)]
pub(super) static READ_CERTIFICATES_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"read_certificates",
"The metric counting how often certificate are read from storage",
&[],
)
});
/// The metric counting how often a certificate is written to storage.
#[doc(hidden)]
pub static WRITE_CERTIFICATE_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"write_certificate",
"The metric counting how often a certificate is written to storage",
&[],
)
});
/// The latency to load a chain state.
#[doc(hidden)]
pub(crate) static LOAD_CHAIN_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"load_chain_latency",
"The latency to load a chain state",
&[],
exponential_bucket_latencies(10.0),
)
});
/// The metric counting how often an event is read from storage.
#[doc(hidden)]
pub(super) static READ_EVENT_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"read_event",
"The metric counting how often an event is read from storage",
&[],
)
});
/// The metric counting how often an event is tested for existence from storage
pub(super) static CONTAINS_EVENT_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"contains_event",
"The metric counting how often an event is tested for existence from storage",
&[],
)
});
/// The metric counting how often an event is written to storage.
#[doc(hidden)]
pub(super) static WRITE_EVENT_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"write_event",
"The metric counting how often an event is written to storage",
&[],
)
});
/// The metric counting how often the network description is read from storage.
#[doc(hidden)]
pub(super) static READ_NETWORK_DESCRIPTION: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"network_description",
"The metric counting how often the network description is read from storage",
&[],
)
});
/// The metric counting how often the network description is written to storage.
#[doc(hidden)]
pub(super) static WRITE_NETWORK_DESCRIPTION: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"write_network_description",
"The metric counting how often the network description is written to storage",
&[],
)
});
}
/// The key used for blobs. The Blob ID itself is contained in the root key.
const BLOB_KEY: &[u8] = &[0];
/// The key used for blob states. The Blob ID itself is contained in the root key.
const BLOB_STATE_KEY: &[u8] = &[1];
/// The key used for lite certificates. The cryptohash itself is contained in the root key.
const LITE_CERTIFICATE_KEY: &[u8] = &[2];
/// The key used for confirmed blocks. The cryptohash itself is contained in the root key.
const BLOCK_KEY: &[u8] = &[3];
/// The key used for the network description.
const NETWORK_DESCRIPTION_KEY: &[u8] = &[4];
fn get_block_keys() -> Vec<Vec<u8>> {
vec![LITE_CERTIFICATE_KEY.to_vec(), BLOCK_KEY.to_vec()]
}
#[derive(Default)]
#[allow(clippy::type_complexity)]
struct MultiPartitionBatch {
keys_value_bytes: BTreeMap<Vec<u8>, Vec<(Vec<u8>, Vec<u8>)>>,
}
impl MultiPartitionBatch {
fn new() -> Self {
Self::default()
}
fn put_key_values(&mut self, root_key: Vec<u8>, key_values: Vec<(Vec<u8>, Vec<u8>)>) {
let entry = self.keys_value_bytes.entry(root_key).or_default();
entry.extend(key_values);
}
fn put_key_value(&mut self, root_key: Vec<u8>, key: Vec<u8>, value: Vec<u8>) {
self.put_key_values(root_key, vec![(key, value)]);
}
fn add_blob(&mut self, blob: &Blob) -> Result<(), ViewError> {
#[cfg(with_metrics)]
metrics::WRITE_BLOB_COUNTER.with_label_values(&[]).inc();
let root_key = RootKey::BlobId(blob.id()).bytes();
let key = BLOB_KEY.to_vec();
self.put_key_value(root_key, key, blob.bytes().to_vec());
Ok(())
}
fn add_blob_state(&mut self, blob_id: BlobId, blob_state: &BlobState) -> Result<(), ViewError> {
let root_key = RootKey::BlobId(blob_id).bytes();
let key = BLOB_STATE_KEY.to_vec();
let value = bcs::to_bytes(blob_state)?;
self.put_key_value(root_key, key, value);
Ok(())
}
fn add_certificate(
&mut self,
certificate: &ConfirmedBlockCertificate,
) -> Result<(), ViewError> {
#[cfg(with_metrics)]
metrics::WRITE_CERTIFICATE_COUNTER
.with_label_values(&[])
.inc();
let hash = certificate.hash();
let root_key = RootKey::BlockHash(hash).bytes();
let mut key_values = Vec::new();
let key = LITE_CERTIFICATE_KEY.to_vec();
let value = bcs::to_bytes(&certificate.lite_certificate())?;
key_values.push((key, value));
let key = BLOCK_KEY.to_vec();
let value = bcs::to_bytes(&certificate.value())?;
key_values.push((key, value));
self.put_key_values(root_key, key_values);
Ok(())
}
fn add_event(&mut self, event_id: EventId, value: Vec<u8>) -> Result<(), ViewError> {
#[cfg(with_metrics)]
metrics::WRITE_EVENT_COUNTER.with_label_values(&[]).inc();
let key = to_event_key(&event_id);
let root_key = RootKey::Event(event_id.chain_id).bytes();
self.put_key_value(root_key, key, value);
Ok(())
}
fn add_network_description(
&mut self,
information: &NetworkDescription,
) -> Result<(), ViewError> {
#[cfg(with_metrics)]
metrics::WRITE_NETWORK_DESCRIPTION
.with_label_values(&[])
.inc();
let root_key = RootKey::NetworkDescription.bytes();
let key = NETWORK_DESCRIPTION_KEY.to_vec();
let value = bcs::to_bytes(information)?;
self.put_key_value(root_key, key, value);
Ok(())
}
}
/// Main implementation of the [`Storage`] trait.
#[derive(Clone)]
pub struct DbStorage<Database, Clock = WallClock> {
database: Arc<Database>,
clock: Clock,
thread_pool: Arc<linera_execution::ThreadPool>,
wasm_runtime: Option<WasmRuntime>,
user_contracts: Arc<papaya::HashMap<ApplicationId, UserContractCode>>,
user_services: Arc<papaya::HashMap<ApplicationId, UserServiceCode>>,
execution_runtime_config: ExecutionRuntimeConfig,
}
#[derive(Debug, Serialize, Deserialize)]
enum RootKey {
NetworkDescription,
BlockExporterState(u32),
ChainState(ChainId),
BlockHash(CryptoHash),
BlobId(BlobId),
Event(ChainId),
}
const CHAIN_ID_TAG: u8 = 2;
const BLOB_ID_TAG: u8 = 4;
const EVENT_ID_TAG: u8 = 5;
impl RootKey {
fn bytes(&self) -> Vec<u8> {
bcs::to_bytes(self).unwrap()
}
}
#[derive(Debug, Serialize, Deserialize)]
struct RestrictedEventId {
pub stream_id: StreamId,
pub index: u32,
}
fn to_event_key(event_id: &EventId) -> Vec<u8> {
let restricted_event_id = RestrictedEventId {
stream_id: event_id.stream_id.clone(),
index: event_id.index,
};
bcs::to_bytes(&restricted_event_id).unwrap()
}
fn is_chain_state(root_key: &[u8]) -> bool {
if root_key.is_empty() {
return false;
}
root_key[0] == CHAIN_ID_TAG
}
/// An implementation of [`DualStoreRootKeyAssignment`] that stores the
/// chain states into the first store.
#[derive(Clone, Copy)]
pub struct ChainStatesFirstAssignment;
impl DualStoreRootKeyAssignment for ChainStatesFirstAssignment {
fn assigned_store(root_key: &[u8]) -> Result<StoreInUse, bcs::Error> {
if root_key.is_empty() {
return Ok(StoreInUse::Second);
}
let store = match is_chain_state(root_key) {
true => StoreInUse::First,
false => StoreInUse::Second,
};
Ok(store)
}
}
/// A `Clock` implementation using the system clock.
#[derive(Clone)]
pub struct WallClock;
#[cfg_attr(not(web), async_trait)]
#[cfg_attr(web, async_trait(?Send))]
impl Clock for WallClock {
fn current_time(&self) -> Timestamp {
Timestamp::now()
}
async fn sleep(&self, delta: TimeDelta) {
linera_base::time::timer::sleep(delta.as_duration()).await
}
async fn sleep_until(&self, timestamp: Timestamp) {
let delta = timestamp.delta_since(Timestamp::now());
if delta > TimeDelta::ZERO {
self.sleep(delta).await
}
}
}
#[cfg(with_testing)]
#[derive(Default)]
struct TestClockInner {
time: Timestamp,
sleeps: BTreeMap<Reverse<Timestamp>, Vec<oneshot::Sender<()>>>,
/// Optional callback that decides whether to auto-advance for a given target timestamp.
/// Returns `true` if the clock should auto-advance to that time.
sleep_callback: Option<Box<dyn Fn(Timestamp) -> bool + Send + Sync>>,
}
#[cfg(with_testing)]
impl TestClockInner {
fn set(&mut self, time: Timestamp) {
self.time = time;
let senders = self.sleeps.split_off(&Reverse(time));
for sender in senders.into_values().flatten() {
let _ = sender.send(());
}
}
fn add_sleep(&mut self, delta: TimeDelta) -> Receiver<()> {
let target_time = self.time.saturating_add(delta);
self.add_sleep_until(target_time)
}
fn add_sleep_until(&mut self, time: Timestamp) -> Receiver<()> {
let (sender, receiver) = oneshot::channel();
let should_auto_advance = self
.sleep_callback
.as_ref()
.is_some_and(|callback| callback(time));
if should_auto_advance && time > self.time {
// Auto-advance mode: immediately advance the clock and complete the sleep.
self.set(time);
let _ = sender.send(());
} else if self.time >= time {
let _ = sender.send(());
} else {
self.sleeps.entry(Reverse(time)).or_default().push(sender);
}
receiver
}
}
/// A clock implementation that uses a stored number of microseconds and that can be updated
/// explicitly. All clones share the same time, and setting it in one clone updates all the others.
#[cfg(with_testing)]
#[derive(Clone, Default)]
pub struct TestClock(Arc<std::sync::Mutex<TestClockInner>>);
#[cfg(with_testing)]
#[cfg_attr(not(web), async_trait)]
#[cfg_attr(web, async_trait(?Send))]
impl Clock for TestClock {
fn current_time(&self) -> Timestamp {
self.lock().time
}
async fn sleep(&self, delta: TimeDelta) {
if delta == TimeDelta::ZERO {
return;
}
let receiver = self.lock().add_sleep(delta);
let _ = receiver.await;
}
async fn sleep_until(&self, timestamp: Timestamp) {
let receiver = self.lock().add_sleep_until(timestamp);
let _ = receiver.await;
}
}
#[cfg(with_testing)]
impl TestClock {
/// Creates a new clock with its time set to 0, i.e. the Unix epoch.
pub fn new() -> Self {
TestClock(Arc::default())
}
/// Sets the current time.
pub fn set(&self, time: Timestamp) {
self.lock().set(time);
}
/// Advances the current time by the specified delta.
pub fn add(&self, delta: TimeDelta) {
let mut guard = self.lock();
let time = guard.time.saturating_add(delta);
guard.set(time);
}
/// Returns the current time according to the test clock.
pub fn current_time(&self) -> Timestamp {
self.lock().time
}
/// Sets a callback that decides whether to auto-advance for each sleep call.
///
/// The callback receives the target timestamp and should return `true` if the clock
/// should auto-advance to that time, or `false` if the sleep should block normally.
pub fn set_sleep_callback<F>(&self, callback: F)
where
F: Fn(Timestamp) -> bool + Send + Sync + 'static,
{
self.lock().sleep_callback = Some(Box::new(callback));
}
/// Clears the sleep callback.
pub fn clear_sleep_callback(&self) {
self.lock().sleep_callback = None;
}
fn lock(&self) -> std::sync::MutexGuard<TestClockInner> {
self.0.lock().expect("poisoned TestClock mutex")
}
}
#[cfg_attr(not(web), async_trait)]
#[cfg_attr(web, async_trait(?Send))]
impl<Database, C> Storage for DbStorage<Database, C>
where
Database: KeyValueDatabase + Clone + Send + Sync + 'static,
Database::Store: KeyValueStore + Clone + Send + Sync + 'static,
C: Clock + Clone + Send + Sync + 'static,
Database::Error: Send + Sync,
{
type Context = ViewContext<ChainRuntimeContext<Self>, Database::Store>;
type Clock = C;
type BlockExporterContext = ViewContext<u32, Database::Store>;
fn clock(&self) -> &C {
&self.clock
}
fn thread_pool(&self) -> &Arc<linera_execution::ThreadPool> {
&self.thread_pool
}
#[instrument(level = "trace", skip_all, fields(chain_id = %chain_id))]
async fn load_chain(
&self,
chain_id: ChainId,
) -> Result<ChainStateView<Self::Context>, ViewError> {
#[cfg(with_metrics)]
let _metric = metrics::LOAD_CHAIN_LATENCY.measure_latency();
let runtime_context = ChainRuntimeContext {
storage: self.clone(),
thread_pool: self.thread_pool.clone(),
chain_id,
execution_runtime_config: self.execution_runtime_config,
user_contracts: self.user_contracts.clone(),
user_services: self.user_services.clone(),
};
let root_key = RootKey::ChainState(chain_id).bytes();
let store = self.database.open_exclusive(&root_key)?;
let context = ViewContext::create_root_context(store, runtime_context).await?;
ChainStateView::load(context).await
}
#[instrument(level = "trace", skip_all, fields(%blob_id))]
async fn contains_blob(&self, blob_id: BlobId) -> Result<bool, ViewError> {
let root_key = RootKey::BlobId(blob_id).bytes();
let store = self.database.open_shared(&root_key)?;
let test = store.contains_key(BLOB_KEY).await?;
#[cfg(with_metrics)]
metrics::CONTAINS_BLOB_COUNTER.with_label_values(&[]).inc();
Ok(test)
}
#[instrument(skip_all, fields(blob_count = blob_ids.len()))]
async fn missing_blobs(&self, blob_ids: &[BlobId]) -> Result<Vec<BlobId>, ViewError> {
let mut missing_blobs = Vec::new();
for blob_id in blob_ids {
let root_key = RootKey::BlobId(*blob_id).bytes();
let store = self.database.open_shared(&root_key)?;
if !store.contains_key(BLOB_KEY).await? {
missing_blobs.push(*blob_id);
}
}
#[cfg(with_metrics)]
metrics::CONTAINS_BLOBS_COUNTER.with_label_values(&[]).inc();
Ok(missing_blobs)
}
#[instrument(skip_all, fields(%blob_id))]
async fn contains_blob_state(&self, blob_id: BlobId) -> Result<bool, ViewError> {
let root_key = RootKey::BlobId(blob_id).bytes();
let store = self.database.open_shared(&root_key)?;
let test = store.contains_key(BLOB_STATE_KEY).await?;
#[cfg(with_metrics)]
metrics::CONTAINS_BLOB_STATE_COUNTER
.with_label_values(&[])
.inc();
Ok(test)
}
#[instrument(skip_all, fields(%hash))]
async fn read_confirmed_block(
&self,
hash: CryptoHash,
) -> Result<Option<ConfirmedBlock>, ViewError> {
let root_key = RootKey::BlockHash(hash).bytes();
let store = self.database.open_shared(&root_key)?;
let value = store.read_value(BLOCK_KEY).await?;
#[cfg(with_metrics)]
metrics::READ_CONFIRMED_BLOCK_COUNTER
.with_label_values(&[])
.inc();
Ok(value)
}
#[instrument(skip_all, fields(%blob_id))]
async fn read_blob(&self, blob_id: BlobId) -> Result<Option<Blob>, ViewError> {
let root_key = RootKey::BlobId(blob_id).bytes();
let store = self.database.open_shared(&root_key)?;
let maybe_blob_bytes = store.read_value_bytes(BLOB_KEY).await?;
#[cfg(with_metrics)]
metrics::READ_BLOB_COUNTER.with_label_values(&[]).inc();
Ok(maybe_blob_bytes.map(|blob_bytes| Blob::new_with_id_unchecked(blob_id, blob_bytes)))
}
#[instrument(skip_all, fields(blob_ids_len = %blob_ids.len()))]
async fn read_blobs(&self, blob_ids: &[BlobId]) -> Result<Vec<Option<Blob>>, ViewError> {
if blob_ids.is_empty() {
return Ok(Vec::new());
}
let mut blobs = Vec::new();
for blob_id in blob_ids {
blobs.push(self.read_blob(*blob_id).await?);
}
#[cfg(with_metrics)]
metrics::READ_BLOB_COUNTER
.with_label_values(&[])
.inc_by(blob_ids.len() as u64);
Ok(blobs)
}
#[instrument(skip_all, fields(%blob_id))]
async fn read_blob_state(&self, blob_id: BlobId) -> Result<Option<BlobState>, ViewError> {
let root_key = RootKey::BlobId(blob_id).bytes();
let store = self.database.open_shared(&root_key)?;
let blob_state = store.read_value::<BlobState>(BLOB_STATE_KEY).await?;
#[cfg(with_metrics)]
metrics::READ_BLOB_STATE_COUNTER
.with_label_values(&[])
.inc();
Ok(blob_state)
}
#[instrument(skip_all, fields(blob_ids_len = %blob_ids.len()))]
async fn read_blob_states(
&self,
blob_ids: &[BlobId],
) -> Result<Vec<Option<BlobState>>, ViewError> {
if blob_ids.is_empty() {
return Ok(Vec::new());
}
let mut blob_states = Vec::new();
for blob_id in blob_ids {
blob_states.push(self.read_blob_state(*blob_id).await?);
}
#[cfg(with_metrics)]
metrics::READ_BLOB_STATES_COUNTER
.with_label_values(&[])
.inc_by(blob_ids.len() as u64);
Ok(blob_states)
}
#[instrument(skip_all, fields(blob_id = %blob.id()))]
async fn write_blob(&self, blob: &Blob) -> Result<(), ViewError> {
let mut batch = MultiPartitionBatch::new();
batch.add_blob(blob)?;
self.write_batch(batch).await?;
Ok(())
}
#[instrument(skip_all, fields(blob_ids_len = %blob_ids.len()))]
async fn maybe_write_blob_states(
&self,
blob_ids: &[BlobId],
blob_state: BlobState,
) -> Result<(), ViewError> {
if blob_ids.is_empty() {
return Ok(());
}
let mut maybe_blob_states = Vec::new();
for blob_id in blob_ids {
let root_key = RootKey::BlobId(*blob_id).bytes();
let store = self.database.open_shared(&root_key)?;
let maybe_blob_state = store.read_value::<BlobState>(BLOB_STATE_KEY).await?;
maybe_blob_states.push(maybe_blob_state);
}
let mut batch = MultiPartitionBatch::new();
for (maybe_blob_state, blob_id) in maybe_blob_states.iter().zip(blob_ids) {
match maybe_blob_state {
None => {
batch.add_blob_state(*blob_id, &blob_state)?;
}
Some(state) => {
if state.epoch < blob_state.epoch {
batch.add_blob_state(*blob_id, &blob_state)?;
}
}
}
}
// We tolerate race conditions because two active chains are likely to
// be both from the latest epoch, and otherwise failing to pick the
// more recent blob state has limited impact.
self.write_batch(batch).await?;
Ok(())
}
#[instrument(skip_all, fields(blobs_len = %blobs.len()))]
async fn maybe_write_blobs(&self, blobs: &[Blob]) -> Result<Vec<bool>, ViewError> {
if blobs.is_empty() {
return Ok(Vec::new());
}
let mut batch = MultiPartitionBatch::new();
let mut blob_states = Vec::new();
for blob in blobs {
let root_key = RootKey::BlobId(blob.id()).bytes();
let store = self.database.open_shared(&root_key)?;
let has_state = store.contains_key(BLOB_STATE_KEY).await?;
blob_states.push(has_state);
if has_state {
batch.add_blob(blob)?;
}
}
self.write_batch(batch).await?;
Ok(blob_states)
}
#[instrument(skip_all, fields(blobs_len = %blobs.len()))]
async fn write_blobs(&self, blobs: &[Blob]) -> Result<(), ViewError> {
if blobs.is_empty() {
return Ok(());
}
let mut batch = MultiPartitionBatch::new();
for blob in blobs {
batch.add_blob(blob)?;
}
self.write_batch(batch).await
}
#[instrument(skip_all, fields(blobs_len = %blobs.len()))]
async fn write_blobs_and_certificate(
&self,
blobs: &[Blob],
certificate: &ConfirmedBlockCertificate,
) -> Result<(), ViewError> {
let mut batch = MultiPartitionBatch::new();
for blob in blobs {
batch.add_blob(blob)?;
}
batch.add_certificate(certificate)?;
self.write_batch(batch).await
}
#[instrument(skip_all, fields(%hash))]
async fn contains_certificate(&self, hash: CryptoHash) -> Result<bool, ViewError> {
let root_key = RootKey::BlockHash(hash).bytes();
let store = self.database.open_shared(&root_key)?;
let results = store.contains_keys(&get_block_keys()).await?;
#[cfg(with_metrics)]
metrics::CONTAINS_CERTIFICATE_COUNTER
.with_label_values(&[])
.inc();
Ok(results[0] && results[1])
}
#[instrument(skip_all, fields(%hash))]
async fn read_certificate(
&self,
hash: CryptoHash,
) -> Result<Option<ConfirmedBlockCertificate>, ViewError> {
let root_key = RootKey::BlockHash(hash).bytes();
let store = self.database.open_shared(&root_key)?;
let values = store.read_multi_values_bytes(&get_block_keys()).await?;
#[cfg(with_metrics)]
metrics::READ_CERTIFICATE_COUNTER
.with_label_values(&[])
.inc();
Self::deserialize_certificate(&values, hash)
}
#[instrument(skip_all)]
async fn read_certificates<I: IntoIterator<Item = CryptoHash> + Send>(
&self,
hashes: I,
) -> Result<Vec<Option<ConfirmedBlockCertificate>>, ViewError> {
let hashes = hashes.into_iter().collect::<Vec<_>>();
if hashes.is_empty() {
return Ok(Vec::new());
}
let root_keys = Self::get_root_keys_for_certificates(&hashes);
let mut values = Vec::new();
for root_key in root_keys {
let store = self.database.open_shared(&root_key)?;
values.extend(store.read_multi_values_bytes(&get_block_keys()).await?);
}
#[cfg(with_metrics)]
metrics::READ_CERTIFICATES_COUNTER
.with_label_values(&[])
.inc_by(hashes.len() as u64);
let mut certificates = Vec::new();
for (pair, hash) in values.chunks_exact(2).zip(hashes) {
let certificate = Self::deserialize_certificate(pair, hash)?;
certificates.push(certificate);
}
Ok(certificates)
}
/// Reads certificates by hashes.
///
/// Returns a vector of tuples where the first element is a lite certificate
/// and the second element is confirmed block.
///
/// It does not check if all hashes all returned.
#[instrument(skip_all)]
async fn read_certificates_raw<I: IntoIterator<Item = CryptoHash> + Send>(
&self,
hashes: I,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, ViewError> {
let hashes = hashes.into_iter().collect::<Vec<_>>();
if hashes.is_empty() {
return Ok(Vec::new());
}
let root_keys = Self::get_root_keys_for_certificates(&hashes);
let mut values = Vec::new();
for root_key in root_keys {
let store = self.database.open_shared(&root_key)?;
values.extend(store.read_multi_values_bytes(&get_block_keys()).await?);
}
#[cfg(with_metrics)]
metrics::READ_CERTIFICATES_COUNTER
.with_label_values(&[])
.inc_by(hashes.len() as u64);
Ok(values
.chunks_exact(2)
.filter_map(|chunk| {
let lite_cert_bytes = chunk[0].as_ref()?;
let confirmed_block_bytes = chunk[1].as_ref()?;
Some((lite_cert_bytes.clone(), confirmed_block_bytes.clone()))
})
.collect())
}
#[instrument(skip_all, fields(event_id = ?event_id))]
async fn read_event(&self, event_id: EventId) -> Result<Option<Vec<u8>>, ViewError> {
let event_key = to_event_key(&event_id);
let root_key = RootKey::Event(event_id.chain_id).bytes();
let store = self.database.open_shared(&root_key)?;
let event = store.read_value_bytes(&event_key).await?;
#[cfg(with_metrics)]
metrics::READ_EVENT_COUNTER.with_label_values(&[]).inc();
Ok(event)
}
#[instrument(skip_all, fields(event_id = ?event_id))]
async fn contains_event(&self, event_id: EventId) -> Result<bool, ViewError> {
let event_key = to_event_key(&event_id);
let root_key = RootKey::Event(event_id.chain_id).bytes();
let store = self.database.open_shared(&root_key)?;
let exists = store.contains_key(&event_key).await?;
#[cfg(with_metrics)]
metrics::CONTAINS_EVENT_COUNTER.with_label_values(&[]).inc();
Ok(exists)
}
#[instrument(skip_all, fields(chain_id = %chain_id, stream_id = %stream_id, start_index = %start_index))]
async fn read_events_from_index(
&self,
chain_id: &ChainId,
stream_id: &StreamId,
start_index: u32,
) -> Result<Vec<IndexAndEvent>, ViewError> {
let root_key = RootKey::Event(*chain_id).bytes();
let store = self.database.open_shared(&root_key)?;
let mut keys = Vec::new();
let mut indices = Vec::new();
let prefix = bcs::to_bytes(stream_id).unwrap();
for short_key in store.find_keys_by_prefix(&prefix).await? {
let index = bcs::from_bytes::<u32>(&short_key)?;
if index >= start_index {
let mut key = prefix.clone();
key.extend(short_key);
keys.push(key);
indices.push(index);
}
}
let values = store.read_multi_values_bytes(&keys).await?;
let mut returned_values = Vec::new();
for (index, value) in indices.into_iter().zip(values) {
let event = value.unwrap();
returned_values.push(IndexAndEvent { index, event });
}
Ok(returned_values)
}
#[instrument(skip_all)]
async fn write_events(
&self,
events: impl IntoIterator<Item = (EventId, Vec<u8>)> + Send,
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-metrics/src/lib.rs | linera-metrics/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A library for Linera server metrics.
pub mod monitoring_server;
#[cfg(feature = "memory-profiling")]
pub mod memory_profiler;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.