repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/unit_tests/runtime_tests.rs | linera-execution/src/unit_tests/runtime_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Resource consumption unit tests.
#![cfg(with_tokio_multi_thread)]
use std::{
any::Any,
sync::{Arc, Mutex},
};
use futures::{channel::mpsc, StreamExt};
use linera_base::{crypto::CryptoHash, data_types::BlockHeight, identifiers::ApplicationId};
use linera_views::batch::Batch;
use super::{ApplicationStatus, SyncRuntimeHandle, SyncRuntimeInternal, WithContext};
use crate::{
execution_state_actor::ExecutionRequest,
runtime::{LoadedApplication, ResourceController, SyncRuntime},
test_utils::{create_dummy_user_application_description, dummy_chain_description},
ContractRuntime, UserContractInstance,
};
/// Test if dropping [`SyncRuntime`] does not leak memory.
#[test_log::test(tokio::test)]
async fn test_dropping_sync_runtime_clears_loaded_applications() -> anyhow::Result<()> {
let (runtime, _receiver) = create_runtime();
let handle = SyncRuntimeHandle::from(runtime);
let weak_handle = Arc::downgrade(&handle.0);
let fake_application = create_fake_application_with_runtime(&handle);
handle
.0
.try_lock()
.expect("Failed to lock runtime")
.loaded_applications
.insert(create_dummy_application_id(), fake_application);
let runtime = SyncRuntime(Some(handle));
drop(runtime);
assert!(weak_handle.upgrade().is_none());
Ok(())
}
/// Test if [`SyncRuntime::into_inner`] fails if it would leak memory.
#[test_log::test(tokio::test)]
async fn test_into_inner_without_clearing_applications() {
let (runtime, _receiver) = create_runtime();
let handle = SyncRuntimeHandle::from(runtime);
let fake_application = create_fake_application_with_runtime(&handle);
handle
.0
.try_lock()
.expect("Failed to lock runtime")
.loaded_applications
.insert(create_dummy_application_id(), fake_application);
assert!(SyncRuntime(Some(handle)).into_inner().is_none());
}
/// Test if [`SyncRuntime::into_inner`] succeeds if loaded applications have been cleared.
#[test_log::test(tokio::test)]
async fn test_into_inner_after_clearing_applications() {
let (runtime, _receiver) = create_runtime();
let handle = SyncRuntimeHandle::from(runtime);
let weak_handle = Arc::downgrade(&handle.0);
let fake_application = create_fake_application_with_runtime(&handle);
{
let mut runtime = handle.0.try_lock().expect("Failed to lock runtime");
runtime
.loaded_applications
.insert(create_dummy_application_id(), fake_application);
runtime.loaded_applications.clear();
}
assert!(SyncRuntime(Some(handle)).into_inner().is_some());
assert!(weak_handle.upgrade().is_none());
}
/// Test writing a batch of changes.
///
/// Ensure that resource consumption counts are updated correctly.
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_write_batch() {
let (runtime, mut execution_state_receiver) = create_contract_runtime();
let mut runtime = SyncRuntimeHandle::from(runtime);
let mut batch = Batch::new();
let write_key = vec![1, 2, 3, 4, 5];
let write_data = vec![6, 7, 8, 9];
let delete_key = vec![10, 11, 12];
let delete_key_prefix = vec![13, 14, 15, 16, 17, 18];
let expected_bytes_count =
write_key.len() + write_data.len() + delete_key.len() + delete_key_prefix.len();
batch.put_key_value_bytes(write_key, write_data);
batch.delete_key(delete_key);
batch.delete_key_prefix(delete_key_prefix);
let expected_write_count = batch.operations.len();
let expected_application_id = runtime.inner().current_application().id;
let expected_batch = batch.clone();
tokio::spawn(async move {
let request = execution_state_receiver
.next()
.await
.expect("Missing expected request to write a batch");
let ExecutionRequest::WriteBatch {
id,
batch,
callback,
} = request
else {
panic!("Expected a `ExecutionRequest::WriteBatch` but got {request:?} instead");
};
assert_eq!(id, expected_application_id);
assert_eq!(batch, expected_batch);
callback
.send(())
.expect("Failed to notify that writing the batch finished");
});
runtime
.write_batch(batch)
.expect("Failed to write test batch");
assert_eq!(
runtime.inner().resource_controller.tracker.write_operations,
expected_write_count as u32
);
assert_eq!(
runtime.inner().resource_controller.tracker.bytes_written,
expected_bytes_count as u64
);
}
/// Creates a [`SyncRuntimeInternal`] instance for contracts, and returns it and the receiver
/// endpoint for the requests the runtime sends to the [`ExecutionStateView`] actor.
fn create_contract_runtime() -> (
SyncRuntimeInternal<UserContractInstance>,
mpsc::UnboundedReceiver<ExecutionRequest>,
) {
let (mut runtime, execution_state_receiver) = create_runtime();
runtime.push_application(create_dummy_application());
(runtime, execution_state_receiver)
}
/// Creates a [`SyncRuntimeInternal`] instance for custom `Application` types (which can
/// be invalid types).
///
/// Returns the [`SyncRuntimeInternal`] instance and the receiver endpoint for the requests the
/// runtime sends to the [`ExecutionStateView`] actor.
fn create_runtime<Application: WithContext>() -> (
SyncRuntimeInternal<Application>,
mpsc::UnboundedReceiver<ExecutionRequest>,
)
where
Application::UserContext: Default,
{
let chain_id = dummy_chain_description(0).id();
let (execution_state_sender, execution_state_receiver) = mpsc::unbounded();
let resource_controller = ResourceController::default();
let runtime = SyncRuntimeInternal::new(
chain_id,
BlockHeight(0),
Some(0),
None,
execution_state_sender,
None,
None,
resource_controller,
Default::default(),
true,
);
(runtime, execution_state_receiver)
}
/// Creates an [`ApplicationStatus`] for a dummy application.
fn create_dummy_application() -> ApplicationStatus {
let (description, _, _) = create_dummy_user_application_description(0);
let id = From::from(&description);
ApplicationStatus {
caller_id: None,
id,
description,
signer: None,
}
}
/// Creates a dummy [`ApplicationId`].
fn create_dummy_application_id() -> ApplicationId {
ApplicationId::new(CryptoHash::test_hash("application description"))
}
/// Creates a fake application instance that's just a reference to the `runtime`.
fn create_fake_application_with_runtime(
runtime: &SyncRuntimeHandle<Arc<dyn Any + Send + Sync>>,
) -> LoadedApplication<Arc<dyn Any + Send + Sync>> {
let fake_instance: Arc<dyn Any + Send + Sync> = runtime.0.clone();
let (description, _, _) = create_dummy_user_application_description(0);
LoadedApplication {
instance: Arc::new(Mutex::new(fake_instance)),
description,
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/wasm/entrypoints.rs | linera-execution/src/wasm/entrypoints.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Wasm entrypoints for contracts and services.
use linera_base::data_types::StreamUpdate;
use linera_witty::wit_import;
/// WIT entrypoints for application contracts.
#[wit_import(package = "linera:app")]
pub trait ContractEntrypoints {
fn instantiate(argument: Vec<u8>);
fn execute_operation(operation: Vec<u8>) -> Vec<u8>;
fn execute_message(message: Vec<u8>);
fn process_streams(streams: Vec<StreamUpdate>);
fn finalize();
}
/// WIT entrypoints for application services.
#[wit_import(package = "linera:app")]
pub trait ServiceEntrypoints {
fn handle_query(argument: Vec<u8>) -> Vec<u8>;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/wasm/runtime_api.rs | linera-execution/src/wasm/runtime_api.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{any::Any, collections::HashMap, marker::PhantomData};
use linera_base::{
data_types::{
Amount, ApplicationPermissions, BlockHeight, Bytecode, SendMessageRequest, Timestamp,
},
http,
identifiers::{Account, AccountOwner, ApplicationId, ChainId, StreamName},
ownership::{ChainOwnership, ChangeApplicationPermissionsError, CloseChainError},
vm::VmRuntime,
};
use linera_views::batch::{Batch, WriteOperation};
use linera_witty::{wit_export, Instance, RuntimeError};
use tracing::log;
use super::WasmExecutionError;
use crate::{BaseRuntime, ContractRuntime, DataBlobHash, ExecutionError, ModuleId, ServiceRuntime};
/// Common host data used as the `UserData` of the system API implementations.
pub struct RuntimeApiData<Runtime> {
runtime: Runtime,
active_promises: HashMap<u32, Box<dyn Any + Send + Sync>>,
promise_counter: u32,
}
impl<Runtime> RuntimeApiData<Runtime> {
/// Creates a new [`RuntimeApiData`] using the provided `runtime` to execute the system APIs.
pub fn new(runtime: Runtime) -> Self {
RuntimeApiData {
runtime,
active_promises: HashMap::new(),
promise_counter: 0,
}
}
/// Returns a mutable reference the system API `Runtime`.
pub fn runtime_mut(&mut self) -> &mut Runtime {
&mut self.runtime
}
/// Registers a `promise` internally, returning an ID that is unique for the lifetime of this
/// [`RuntimeApiData`].
fn register_promise<Promise>(&mut self, promise: Promise) -> u32
where
Promise: Send + Sync + 'static,
{
let id = self.promise_counter;
self.active_promises.insert(id, Box::new(promise));
self.promise_counter += 1;
id
}
/// Returns a `Promise` registered to the provided `promise_id`.
fn take_promise<Promise>(&mut self, promise_id: u32) -> Result<Promise, RuntimeError>
where
Promise: Send + Sync + 'static,
{
let type_erased_promise = self
.active_promises
.remove(&promise_id)
.ok_or_else(|| RuntimeError::Custom(WasmExecutionError::UnknownPromise.into()))?;
type_erased_promise
.downcast()
.map(|boxed_promise| *boxed_promise)
.map_err(|_| RuntimeError::Custom(WasmExecutionError::IncorrectPromise.into()))
}
}
/// An implementation of the runtime API used to access the common behaviour and the view storage for both contracts and
/// services.
#[derive(Default)]
pub struct BaseRuntimeApi<Caller>(PhantomData<Caller>);
#[wit_export(package = "linera:app")]
impl<Caller, Runtime> BaseRuntimeApi<Caller>
where
Caller: Instance<UserData = RuntimeApiData<Runtime>>,
Runtime: BaseRuntime + 'static,
{
/// Returns the ID of the current chain.
fn get_chain_id(caller: &mut Caller) -> Result<ChainId, RuntimeError> {
caller
.user_data_mut()
.runtime
.chain_id()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the height of the current block that is executing.
fn get_block_height(caller: &mut Caller) -> Result<BlockHeight, RuntimeError> {
caller
.user_data_mut()
.runtime
.block_height()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the ID of the current application.
fn get_application_id(caller: &mut Caller) -> Result<ApplicationId, RuntimeError> {
caller
.user_data_mut()
.runtime
.application_id()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the chain ID of the current application creator.
fn get_application_creator_chain_id(caller: &mut Caller) -> Result<ChainId, RuntimeError> {
caller
.user_data_mut()
.runtime
.application_creator_chain_id()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the application parameters provided when the application was created.
fn application_parameters(caller: &mut Caller) -> Result<Vec<u8>, RuntimeError> {
caller
.user_data_mut()
.runtime
.application_parameters()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Retrieves the owner configuration for the current chain.
fn get_chain_ownership(caller: &mut Caller) -> Result<ChainOwnership, RuntimeError> {
caller
.user_data_mut()
.runtime
.chain_ownership()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Retrieves the current system time, i.e. the timestamp of the block in which this is called.
fn read_system_timestamp(caller: &mut Caller) -> Result<Timestamp, RuntimeError> {
caller
.user_data_mut()
.runtime
.read_system_timestamp()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the current chain balance.
fn read_chain_balance(caller: &mut Caller) -> Result<Amount, RuntimeError> {
caller
.user_data_mut()
.runtime
.read_chain_balance()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the balance of one of the accounts on this chain.
fn read_owner_balance(
caller: &mut Caller,
owner: AccountOwner,
) -> Result<Amount, RuntimeError> {
caller
.user_data_mut()
.runtime
.read_owner_balance(owner)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the balances of all accounts on the chain.
fn read_owner_balances(
caller: &mut Caller,
) -> Result<Vec<(AccountOwner, Amount)>, RuntimeError> {
caller
.user_data_mut()
.runtime
.read_owner_balances()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the owners of accounts on this chain.
fn read_balance_owners(caller: &mut Caller) -> Result<Vec<AccountOwner>, RuntimeError> {
caller
.user_data_mut()
.runtime
.read_balance_owners()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Makes an HTTP request to the given URL and returns the response body.
fn perform_http_request(
caller: &mut Caller,
request: http::Request,
) -> Result<http::Response, RuntimeError> {
caller
.user_data_mut()
.runtime
.perform_http_request(request)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Rejects the transaction if the current time at block validation is `>= timestamp`. Note
/// that block validation happens at or after the block timestamp, but isn't necessarily the
/// same.
fn assert_before(caller: &mut Caller, timestamp: Timestamp) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime
.assert_before(timestamp)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Reads a data blob from storage.
fn read_data_blob(caller: &mut Caller, hash: DataBlobHash) -> Result<Vec<u8>, RuntimeError> {
caller
.user_data_mut()
.runtime
.read_data_blob(hash)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Asserts the existence of a data blob with the given hash.
fn assert_data_blob_exists(
caller: &mut Caller,
hash: DataBlobHash,
) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime
.assert_data_blob_exists(hash)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Logs a `message` with the provided information `level`.
fn log(caller: &mut Caller, message: String, level: log::Level) -> Result<(), RuntimeError> {
let allowed = caller
.user_data_mut()
.runtime
.allow_application_logs()
.map_err(|error| RuntimeError::Custom(error.into()))?;
if !allowed {
return Ok(());
}
#[cfg(web)]
{
// Send log through the execution channel to the main thread
caller
.user_data_mut()
.runtime
.send_log(message.clone(), level);
}
// Also use tracing for native builds (and as a fallback on web)
match level {
log::Level::Trace => tracing::trace!("{message}"),
log::Level::Debug => tracing::debug!("{message}"),
log::Level::Info => tracing::info!("{message}"),
log::Level::Warn => tracing::warn!("{message}"),
log::Level::Error => tracing::error!("{message}"),
}
Ok(())
}
/// Creates a new promise to check if the `key` is in storage.
fn contains_key_new(caller: &mut Caller, key: Vec<u8>) -> Result<u32, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data
.runtime
.contains_key_new(key)
.map_err(|error| RuntimeError::Custom(error.into()))?;
Ok(data.register_promise(promise))
}
/// Waits for the promise to check if the `key` is in storage.
fn contains_key_wait(caller: &mut Caller, promise_id: u32) -> Result<bool, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data.take_promise(promise_id)?;
data.runtime
.contains_key_wait(&promise)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Creates a new promise to check if the `keys` are in storage.
fn contains_keys_new(caller: &mut Caller, keys: Vec<Vec<u8>>) -> Result<u32, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data
.runtime
.contains_keys_new(keys)
.map_err(|error| RuntimeError::Custom(error.into()))?;
Ok(data.register_promise(promise))
}
/// Waits for the promise to check if the `keys` are in storage.
fn contains_keys_wait(caller: &mut Caller, promise_id: u32) -> Result<Vec<bool>, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data.take_promise(promise_id)?;
data.runtime
.contains_keys_wait(&promise)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Creates a new promise to read multiple entries from storage.
fn read_multi_values_bytes_new(
caller: &mut Caller,
keys: Vec<Vec<u8>>,
) -> Result<u32, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data
.runtime
.read_multi_values_bytes_new(keys)
.map_err(|error| RuntimeError::Custom(error.into()))?;
Ok(data.register_promise(promise))
}
/// Waits for the promise to read multiple entries from storage.
fn read_multi_values_bytes_wait(
caller: &mut Caller,
promise_id: u32,
) -> Result<Vec<Option<Vec<u8>>>, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data.take_promise(promise_id)?;
data.runtime
.read_multi_values_bytes_wait(&promise)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Creates a new promise to read a single entry from storage.
fn read_value_bytes_new(caller: &mut Caller, key: Vec<u8>) -> Result<u32, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data
.runtime
.read_value_bytes_new(key)
.map_err(|error| RuntimeError::Custom(error.into()))?;
Ok(data.register_promise(promise))
}
/// Waits for the promise to read a single entry from storage.
fn read_value_bytes_wait(
caller: &mut Caller,
promise_id: u32,
) -> Result<Option<Vec<u8>>, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data.take_promise(promise_id)?;
data.runtime
.read_value_bytes_wait(&promise)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Creates a new promise to search for keys that start with the `key_prefix`.
fn find_keys_new(caller: &mut Caller, key_prefix: Vec<u8>) -> Result<u32, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data
.runtime
.find_keys_by_prefix_new(key_prefix)
.map_err(|error| RuntimeError::Custom(error.into()))?;
Ok(data.register_promise(promise))
}
/// Waits for the promise to search for keys that start with the `key_prefix`.
fn find_keys_wait(caller: &mut Caller, promise_id: u32) -> Result<Vec<Vec<u8>>, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data.take_promise(promise_id)?;
data.runtime
.find_keys_by_prefix_wait(&promise)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Creates a new promise to search for entries whose keys that start with the `key_prefix`.
fn find_key_values_new(caller: &mut Caller, key_prefix: Vec<u8>) -> Result<u32, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data
.runtime
.find_key_values_by_prefix_new(key_prefix)
.map_err(|error| RuntimeError::Custom(error.into()))?;
Ok(data.register_promise(promise))
}
/// Waits for the promise to search for entries whose keys that start with the `key_prefix`.
#[expect(clippy::type_complexity)]
fn find_key_values_wait(
caller: &mut Caller,
promise_id: u32,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, RuntimeError> {
let mut data = caller.user_data_mut();
let promise = data.take_promise(promise_id)?;
data.runtime
.find_key_values_by_prefix_wait(&promise)
.map_err(|error| RuntimeError::Custom(error.into()))
}
}
/// An implementation of the system API made available to contracts.
#[derive(Default)]
pub struct ContractRuntimeApi<Caller>(PhantomData<Caller>);
#[wit_export(package = "linera:app")]
impl<Caller, Runtime> ContractRuntimeApi<Caller>
where
Caller: Instance<UserData = RuntimeApiData<Runtime>>,
Runtime: ContractRuntime + 'static,
{
/// Returns the authenticated owner for this execution, if there is one.
fn authenticated_owner(caller: &mut Caller) -> Result<Option<AccountOwner>, RuntimeError> {
caller
.user_data_mut()
.runtime
.authenticated_owner()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns `Some(true)` if the incoming message was rejected from the original destination and
/// is now bouncing back, `Some(false)` if the message is being currently being delivered to
/// its original destination, or [`None`] if not executing an incoming message.
fn message_is_bouncing(caller: &mut Caller) -> Result<Option<bool>, RuntimeError> {
caller
.user_data_mut()
.runtime
.message_is_bouncing()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the chain ID where the current message originated from, or [`None`] if not executing
/// an incoming message.
fn message_origin_chain_id(caller: &mut Caller) -> Result<Option<ChainId>, RuntimeError> {
caller
.user_data_mut()
.runtime
.message_origin_chain_id()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns the authenticated caller ID, if the caller configured it and if the current context.
fn authenticated_caller_id(caller: &mut Caller) -> Result<Option<ApplicationId>, RuntimeError> {
caller
.user_data_mut()
.runtime
.authenticated_caller_id()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Schedules a message to be sent to this application on another chain.
fn send_message(
caller: &mut Caller,
message: SendMessageRequest<Vec<u8>>,
) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime
.send_message(message)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Transfers an `amount` of native tokens from `source` owner account (or the current chain's
/// balance) to `destination`.
fn transfer(
caller: &mut Caller,
source: AccountOwner,
destination: Account,
amount: Amount,
) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime
.transfer(source, destination, amount)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Claims an `amount` of native tokens from a `source` account to a `destination` account.
fn claim(
caller: &mut Caller,
source: Account,
destination: Account,
amount: Amount,
) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime
.claim(source, destination, amount)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Opens a new chain, configuring it with the provided `chain_ownership`,
/// `application_permissions` and initial `balance` (debited from the current chain).
fn open_chain(
caller: &mut Caller,
chain_ownership: ChainOwnership,
application_permissions: ApplicationPermissions,
balance: Amount,
) -> Result<ChainId, RuntimeError> {
caller
.user_data_mut()
.runtime
.open_chain(chain_ownership, application_permissions, balance)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Closes the current chain. Returns an error if the application doesn't have
/// permission to do so.
fn close_chain(caller: &mut Caller) -> Result<Result<(), CloseChainError>, RuntimeError> {
match caller.user_data_mut().runtime.close_chain() {
Ok(()) => Ok(Ok(())),
Err(ExecutionError::UnauthorizedApplication(_)) => {
Ok(Err(CloseChainError::NotPermitted))
}
Err(error) => Err(RuntimeError::Custom(error.into())),
}
}
/// Changes the application permissions for the current chain. Returns an error if the
/// application doesn't have permission to do so.
fn change_application_permissions(
caller: &mut Caller,
application_permissions: ApplicationPermissions,
) -> Result<Result<(), ChangeApplicationPermissionsError>, RuntimeError> {
match caller
.user_data_mut()
.runtime
.change_application_permissions(application_permissions)
{
Ok(()) => Ok(Ok(())),
Err(ExecutionError::UnauthorizedApplication(_)) => {
Ok(Err(ChangeApplicationPermissionsError::NotPermitted))
}
Err(error) => Err(RuntimeError::Custom(error.into())),
}
}
/// Creates a new application on the chain, based on the supplied bytecode and
/// parameters.
fn create_application(
caller: &mut Caller,
module_id: ModuleId,
parameters: Vec<u8>,
argument: Vec<u8>,
required_application_ids: Vec<ApplicationId>,
) -> Result<ApplicationId, RuntimeError> {
caller
.user_data_mut()
.runtime
.create_application(module_id, parameters, argument, required_application_ids)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Creates a new data blob and returns its hash.
fn create_data_blob(caller: &mut Caller, bytes: Vec<u8>) -> Result<DataBlobHash, RuntimeError> {
caller
.user_data_mut()
.runtime
.create_data_blob(bytes)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Publishes a module with contract and service bytecode and returns the module ID.
fn publish_module(
caller: &mut Caller,
contract: Bytecode,
service: Bytecode,
vm_runtime: VmRuntime,
) -> Result<ModuleId, RuntimeError> {
caller
.user_data_mut()
.runtime
.publish_module(contract, service, vm_runtime)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Calls another application.
fn try_call_application(
caller: &mut Caller,
authenticated: bool,
callee_id: ApplicationId,
argument: Vec<u8>,
) -> Result<Vec<u8>, RuntimeError> {
caller
.user_data_mut()
.runtime
.try_call_application(authenticated, callee_id, argument)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Adds a new item to an event stream. Returns the new event's index in the stream.
fn emit(caller: &mut Caller, name: StreamName, value: Vec<u8>) -> Result<u32, RuntimeError> {
caller
.user_data_mut()
.runtime
.emit(name, value)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Reads an event from a stream. Returns the event's value.
///
/// Returns an error if the event doesn't exist.
fn read_event(
caller: &mut Caller,
chain_id: ChainId,
name: StreamName,
index: u32,
) -> Result<Vec<u8>, RuntimeError> {
caller
.user_data_mut()
.runtime
.read_event(chain_id, name, index)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Subscribes this application to an event stream.
fn subscribe_to_events(
caller: &mut Caller,
chain_id: ChainId,
application_id: ApplicationId,
name: StreamName,
) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime
.subscribe_to_events(chain_id, application_id, name)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Unsubscribes this application from an event stream.
fn unsubscribe_from_events(
caller: &mut Caller,
chain_id: ChainId,
application_id: ApplicationId,
name: StreamName,
) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime
.unsubscribe_from_events(chain_id, application_id, name)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Queries a service and returns the response.
fn query_service(
caller: &mut Caller,
application_id: ApplicationId,
query: Vec<u8>,
) -> Result<Vec<u8>, RuntimeError> {
caller
.user_data_mut()
.runtime
.query_service(application_id, query)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Consume some fuel.
///
/// This is intended for the metering instrumentation, but if the user wants to donate
/// some extra fuel, more power to them!
fn consume_fuel(caller: &mut Caller, fuel: u64) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime_mut()
.consume_fuel(fuel, VmRuntime::Wasm)
.map_err(|e| RuntimeError::Custom(e.into()))
}
/// Returns the multi-leader round in which this block was validated.
fn validation_round(caller: &mut Caller) -> Result<Option<u32>, RuntimeError> {
caller
.user_data_mut()
.runtime_mut()
.validation_round()
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Writes a batch of `operations` to storage.
fn write_batch(
caller: &mut Caller,
operations: Vec<WriteOperation>,
) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime_mut()
.write_batch(Batch { operations })
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Returns true if the corresponding contract uses a zero amount of storage.
fn has_empty_storage(
caller: &mut Caller,
application: ApplicationId,
) -> Result<bool, RuntimeError> {
caller
.user_data_mut()
.runtime_mut()
.has_empty_storage(application)
.map_err(|error| RuntimeError::Custom(error.into()))
}
}
/// An implementation of the system API made available to services.
#[derive(Default)]
pub struct ServiceRuntimeApi<Caller>(PhantomData<Caller>);
#[wit_export(package = "linera:app")]
impl<Caller, Runtime> ServiceRuntimeApi<Caller>
where
Caller: Instance<UserData = RuntimeApiData<Runtime>>,
Runtime: ServiceRuntime + 'static,
{
/// Schedules an operation to be included in the block being built by this query.
fn schedule_operation(caller: &mut Caller, operation: Vec<u8>) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime
.schedule_operation(operation)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Queries another application.
fn try_query_application(
caller: &mut Caller,
application: ApplicationId,
argument: Vec<u8>,
) -> Result<Vec<u8>, RuntimeError> {
caller
.user_data_mut()
.runtime
.try_query_application(application, argument)
.map_err(|error| RuntimeError::Custom(error.into()))
}
/// Checks if the service has exceeded its execution time limit.
///
/// This is called by the metering instrumentation, but the fuel consumed argument is
/// ignored.
fn check_execution_time(caller: &mut Caller, _fuel_consumed: u64) -> Result<(), RuntimeError> {
caller
.user_data_mut()
.runtime_mut()
.check_execution_time()
.map_err(|error| RuntimeError::Custom(error.into()))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/wasm/mod.rs | linera-execution/src/wasm/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Support for user applications compiled as WebAssembly (Wasm) modules.
//!
//! Requires a WebAssembly runtime to be selected and enabled using one of the following features:
//!
//! - `wasmer` enables the [Wasmer](https://wasmer.io/) runtime
//! - `wasmtime` enables the [Wasmtime](https://wasmtime.dev/) runtime
#![cfg(with_wasm_runtime)]
mod entrypoints;
mod module_cache;
#[macro_use]
mod runtime_api;
#[cfg(with_wasmer)]
mod wasmer;
#[cfg(with_wasmtime)]
mod wasmtime;
#[cfg(with_fs)]
use std::path::Path;
use linera_base::data_types::Bytecode;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use thiserror::Error;
use wasm_instrument::{gas_metering, parity_wasm};
#[cfg(with_wasmer)]
use wasmer::{WasmerContractInstance, WasmerServiceInstance};
#[cfg(with_wasmtime)]
use wasmtime::{WasmtimeContractInstance, WasmtimeServiceInstance};
pub use self::{
entrypoints::{ContractEntrypoints, ServiceEntrypoints},
runtime_api::{BaseRuntimeApi, ContractRuntimeApi, RuntimeApiData, ServiceRuntimeApi},
};
use crate::{
ContractSyncRuntimeHandle, ExecutionError, ServiceSyncRuntimeHandle, UserContractInstance,
UserContractModule, UserServiceInstance, UserServiceModule, WasmRuntime,
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
pub static CONTRACT_INSTANTIATION_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"wasm_contract_instantiation_latency",
"Wasm contract instantiation latency",
&[],
exponential_bucket_latencies(1.0),
)
});
pub static SERVICE_INSTANTIATION_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"wasm_service_instantiation_latency",
"Wasm service instantiation latency",
&[],
exponential_bucket_latencies(1.0),
)
});
}
/// A user contract in a compiled WebAssembly module.
#[derive(Clone)]
pub enum WasmContractModule {
#[cfg(with_wasmer)]
Wasmer {
engine: ::wasmer::Engine,
module: ::wasmer::Module,
},
#[cfg(with_wasmtime)]
Wasmtime { module: ::wasmtime::Module },
}
impl WasmContractModule {
/// Creates a new [`WasmContractModule`] using the WebAssembly module with the provided bytecode.
pub async fn new(
contract_bytecode: Bytecode,
runtime: WasmRuntime,
) -> Result<Self, WasmExecutionError> {
let contract_bytecode = add_metering(contract_bytecode)?;
match runtime {
#[cfg(with_wasmer)]
WasmRuntime::Wasmer => Self::from_wasmer(contract_bytecode).await,
#[cfg(with_wasmtime)]
WasmRuntime::Wasmtime => Self::from_wasmtime(contract_bytecode).await,
}
}
/// Creates a new [`WasmContractModule`] using the WebAssembly module in `contract_bytecode_file`.
#[cfg(with_fs)]
pub async fn from_file(
contract_bytecode_file: impl AsRef<Path>,
runtime: WasmRuntime,
) -> Result<Self, WasmExecutionError> {
Self::new(
Bytecode::load_from_file(contract_bytecode_file)
.map_err(anyhow::Error::from)
.map_err(WasmExecutionError::LoadContractModule)?,
runtime,
)
.await
}
}
impl UserContractModule for WasmContractModule {
fn instantiate(
&self,
runtime: ContractSyncRuntimeHandle,
) -> Result<UserContractInstance, ExecutionError> {
#[cfg(with_metrics)]
let _instantiation_latency = metrics::CONTRACT_INSTANTIATION_LATENCY.measure_latency();
let instance: UserContractInstance = match self {
#[cfg(with_wasmtime)]
WasmContractModule::Wasmtime { module } => {
Box::new(WasmtimeContractInstance::prepare(module, runtime)?)
}
#[cfg(with_wasmer)]
WasmContractModule::Wasmer { engine, module } => Box::new(
WasmerContractInstance::prepare(engine.clone(), module, runtime)?,
),
};
Ok(instance)
}
}
/// A user service in a compiled WebAssembly module.
#[derive(Clone)]
pub enum WasmServiceModule {
#[cfg(with_wasmer)]
Wasmer { module: ::wasmer::Module },
#[cfg(with_wasmtime)]
Wasmtime { module: ::wasmtime::Module },
}
impl WasmServiceModule {
/// Creates a new [`WasmServiceModule`] using the WebAssembly module with the provided bytecode.
pub async fn new(
service_bytecode: Bytecode,
runtime: WasmRuntime,
) -> Result<Self, WasmExecutionError> {
match runtime {
#[cfg(with_wasmer)]
WasmRuntime::Wasmer => Self::from_wasmer(service_bytecode).await,
#[cfg(with_wasmtime)]
WasmRuntime::Wasmtime => Self::from_wasmtime(service_bytecode).await,
}
}
/// Creates a new [`WasmServiceModule`] using the WebAssembly module in `service_bytecode_file`.
#[cfg(with_fs)]
pub async fn from_file(
service_bytecode_file: impl AsRef<Path>,
runtime: WasmRuntime,
) -> Result<Self, WasmExecutionError> {
Self::new(
Bytecode::load_from_file(service_bytecode_file)
.map_err(anyhow::Error::from)
.map_err(WasmExecutionError::LoadServiceModule)?,
runtime,
)
.await
}
}
impl UserServiceModule for WasmServiceModule {
fn instantiate(
&self,
runtime: ServiceSyncRuntimeHandle,
) -> Result<UserServiceInstance, ExecutionError> {
#[cfg(with_metrics)]
let _instantiation_latency = metrics::SERVICE_INSTANTIATION_LATENCY.measure_latency();
let instance: UserServiceInstance = match self {
#[cfg(with_wasmtime)]
WasmServiceModule::Wasmtime { module } => {
Box::new(WasmtimeServiceInstance::prepare(module, runtime)?)
}
#[cfg(with_wasmer)]
WasmServiceModule::Wasmer { module } => {
Box::new(WasmerServiceInstance::prepare(module, runtime)?)
}
};
Ok(instance)
}
}
/// Instrument the [`Bytecode`] to add fuel metering.
pub fn add_metering(bytecode: Bytecode) -> Result<Bytecode, WasmExecutionError> {
struct WasmtimeRules;
impl gas_metering::Rules for WasmtimeRules {
/// Calculates the fuel cost of a WebAssembly [`Operator`].
///
/// The rules try to follow the hardcoded [rules in the Wasmtime runtime
/// engine](https://docs.rs/wasmtime/5.0.0/wasmtime/struct.Store.html#method.add_fuel).
fn instruction_cost(
&self,
instruction: &parity_wasm::elements::Instruction,
) -> Option<u32> {
use parity_wasm::elements::Instruction::*;
Some(match instruction {
Nop | Drop | Block(_) | Loop(_) | Unreachable | Else | End => 0,
_ => 1,
})
}
fn memory_grow_cost(&self) -> gas_metering::MemoryGrowCost {
gas_metering::MemoryGrowCost::Free
}
fn call_per_local_cost(&self) -> u32 {
0
}
}
let instrumented_module = gas_metering::inject(
parity_wasm::deserialize_buffer(&bytecode.bytes)?,
gas_metering::host_function::Injector::new(
"linera:app/contract-runtime-api",
"consume-fuel",
),
&WasmtimeRules,
)
.map_err(|_| WasmExecutionError::InstrumentModule)?;
Ok(Bytecode::new(instrumented_module.into_bytes()?))
}
#[cfg(web)]
const _: () = {
use js_sys::wasm_bindgen::JsValue;
use web_thread_select as web_thread;
impl web_thread::AsJs for WasmServiceModule {
fn to_js(&self) -> Result<JsValue, JsValue> {
match self {
#[cfg(with_wasmer)]
Self::Wasmer { module } => Ok(::wasmer::Module::clone(module).into()),
}
}
fn from_js(value: JsValue) -> Result<Self, JsValue> {
// TODO(#2775): be generic over possible implementations
cfg_if::cfg_if! {
if #[cfg(with_wasmer)] {
Ok(Self::Wasmer {
module: value.try_into()?,
})
} else {
Err(value)
}
}
}
}
impl web_thread::Post for WasmServiceModule {}
impl web_thread::AsJs for WasmContractModule {
fn to_js(&self) -> Result<JsValue, JsValue> {
match self {
#[cfg(with_wasmer)]
Self::Wasmer { module, engine: _ } => Ok(::wasmer::Module::clone(module).into()),
}
}
fn from_js(value: JsValue) -> Result<Self, JsValue> {
// TODO(#2775): be generic over possible implementations
cfg_if::cfg_if! {
if #[cfg(with_wasmer)] {
Ok(Self::Wasmer {
module: value.try_into()?,
engine: Default::default(),
})
} else {
Err(value)
}
}
}
}
impl web_thread::Post for WasmContractModule {}
};
/// Errors that can occur when executing a user application in a WebAssembly module.
#[derive(Debug, Error)]
pub enum WasmExecutionError {
#[error("Failed to load contract Wasm module: {_0}")]
LoadContractModule(#[source] anyhow::Error),
#[error("Failed to load service Wasm module: {_0}")]
LoadServiceModule(#[source] anyhow::Error),
#[error("Failed to instrument Wasm module to add fuel metering")]
InstrumentModule,
#[error("Invalid Wasm module: {0}")]
InvalidBytecode(#[from] wasm_instrument::parity_wasm::SerializationError),
#[cfg(with_wasmer)]
#[error("Failed to instantiate Wasm module: {_0}")]
InstantiateModuleWithWasmer(#[from] Box<::wasmer::InstantiationError>),
#[cfg(with_wasmtime)]
#[error("Failed to create and configure Wasmtime runtime: {_0}")]
CreateWasmtimeEngine(#[source] anyhow::Error),
#[cfg(with_wasmer)]
#[error(
"Failed to execute Wasm module in Wasmer. This may be caused by panics or insufficient fuel. {0}"
)]
ExecuteModuleInWasmer(#[from] ::wasmer::RuntimeError),
#[cfg(with_wasmtime)]
#[error("Failed to execute Wasm module in Wasmtime: {0}")]
ExecuteModuleInWasmtime(#[from] ::wasmtime::Trap),
#[error("Failed to execute Wasm module: {0}")]
ExecuteModule(#[from] linera_witty::RuntimeError),
#[error("Attempt to wait for an unknown promise")]
UnknownPromise,
#[error("Attempt to call incorrect `wait` function for a promise")]
IncorrectPromise,
}
#[cfg(with_wasmer)]
impl From<::wasmer::InstantiationError> for WasmExecutionError {
fn from(instantiation_error: ::wasmer::InstantiationError) -> Self {
WasmExecutionError::InstantiateModuleWithWasmer(Box::new(instantiation_error))
}
}
/// This assumes that the current directory is one of the crates.
#[cfg(with_testing)]
pub mod test {
use std::{path::Path, sync::LazyLock};
#[cfg(with_fs)]
use super::{WasmContractModule, WasmRuntime, WasmServiceModule};
fn build_applications_in_directory(dir: &str) -> Result<(), std::io::Error> {
let output = std::process::Command::new("cargo")
.current_dir(dir)
.args(["build", "--release", "--target", "wasm32-unknown-unknown"])
.output()?;
if !output.status.success() {
panic!(
"Failed to build applications in directory {dir}.\n\n\
stdout:\n-------\n{}\n\n\
stderr:\n-------\n{}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr),
);
}
Ok(())
}
fn build_applications() -> Result<(), std::io::Error> {
for dir in ["../examples", "../linera-sdk/tests/fixtures"] {
build_applications_in_directory(dir)?;
}
Ok(())
}
pub fn get_example_bytecode_paths(name: &str) -> Result<(String, String), std::io::Error> {
let name = name.replace('-', "_");
static INSTANCE: LazyLock<()> = LazyLock::new(|| build_applications().unwrap());
LazyLock::force(&INSTANCE);
for dir in ["../examples", "../linera-sdk/tests/fixtures"] {
let prefix = format!("{dir}/target/wasm32-unknown-unknown/release");
let file_contract = format!("{prefix}/{name}_contract.wasm");
let file_service = format!("{prefix}/{name}_service.wasm");
if Path::new(&file_contract).exists() && Path::new(&file_service).exists() {
return Ok((file_contract, file_service));
}
}
Err(std::io::Error::last_os_error())
}
#[cfg(with_fs)]
pub async fn build_example_application(
name: &str,
wasm_runtime: impl Into<Option<WasmRuntime>>,
) -> Result<(WasmContractModule, WasmServiceModule), anyhow::Error> {
let (contract_path, service_path) = get_example_bytecode_paths(name)?;
let wasm_runtime = wasm_runtime.into().unwrap_or_default();
let contract = WasmContractModule::from_file(&contract_path, wasm_runtime).await?;
let service = WasmServiceModule::from_file(&service_path, wasm_runtime).await?;
Ok((contract, service))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/wasm/wasmer.rs | linera-execution/src/wasm/wasmer.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Code specific to the usage of the [Wasmer](https://wasmer.io/) runtime.
use std::{marker::Unpin, sync::LazyLock};
use linera_base::data_types::{Bytecode, StreamUpdate};
use linera_witty::{
wasmer::{EntrypointInstance, InstanceBuilder},
ExportTo,
};
use tokio::sync::Mutex;
use super::{
module_cache::ModuleCache,
runtime_api::{BaseRuntimeApi, ContractRuntimeApi, RuntimeApiData, ServiceRuntimeApi},
ContractEntrypoints, ServiceEntrypoints, WasmExecutionError,
};
use crate::{
wasm::{WasmContractModule, WasmServiceModule},
ContractRuntime, ExecutionError, ServiceRuntime,
};
/// An [`Engine`] instance configured to run application services.
static SERVICE_ENGINE: LazyLock<wasmer::Engine> = LazyLock::new(|| {
#[cfg(web)]
{
wasmer::Engine::default()
}
#[cfg(not(web))]
{
wasmer::sys::EngineBuilder::new(wasmer::Cranelift::new()).into()
}
});
/// A cache of compiled contract modules, with their respective [`wasmer::Engine`] instances.
static CONTRACT_CACHE: LazyLock<Mutex<ModuleCache<CachedContractModule>>> =
LazyLock::new(Mutex::default);
/// A cache of compiled service modules.
static SERVICE_CACHE: LazyLock<Mutex<ModuleCache<wasmer::Module>>> = LazyLock::new(Mutex::default);
/// Type representing a running [Wasmer](https://wasmer.io/) contract.
pub(crate) struct WasmerContractInstance<Runtime> {
/// The Wasmer instance.
instance: EntrypointInstance<RuntimeApiData<Runtime>>,
}
/// Type representing a running [Wasmer](https://wasmer.io/) service.
pub struct WasmerServiceInstance<Runtime> {
/// The Wasmer instance.
instance: EntrypointInstance<RuntimeApiData<Runtime>>,
}
impl WasmContractModule {
/// Creates a new [`WasmContractModule`] using Wasmer with the provided bytecode files.
pub async fn from_wasmer(contract_bytecode: Bytecode) -> Result<Self, WasmExecutionError> {
let mut contract_cache = CONTRACT_CACHE.lock().await;
let (engine, module) = contract_cache
.get_or_insert_with(contract_bytecode, CachedContractModule::new)
.map_err(WasmExecutionError::LoadContractModule)?
.create_execution_instance()
.map_err(WasmExecutionError::LoadContractModule)?;
Ok(WasmContractModule::Wasmer { engine, module })
}
}
impl<Runtime> WasmerContractInstance<Runtime>
where
Runtime: ContractRuntime + Clone + Unpin + 'static,
{
/// Prepares a runtime instance to call into the Wasm contract.
pub fn prepare(
contract_engine: wasmer::Engine,
contract_module: &wasmer::Module,
runtime: Runtime,
) -> Result<Self, WasmExecutionError> {
let system_api_data = RuntimeApiData::new(runtime);
let mut instance_builder = InstanceBuilder::new(contract_engine, system_api_data);
BaseRuntimeApi::export_to(&mut instance_builder)?;
ContractRuntimeApi::export_to(&mut instance_builder)?;
let instance = instance_builder.instantiate(contract_module)?;
Ok(Self { instance })
}
}
impl WasmServiceModule {
/// Creates a new [`WasmServiceModule`] using Wasmer with the provided bytecode files.
pub async fn from_wasmer(service_bytecode: Bytecode) -> Result<Self, WasmExecutionError> {
let mut service_cache = SERVICE_CACHE.lock().await;
let module = service_cache
.get_or_insert_with(service_bytecode, |bytecode| {
wasmer::Module::new(&*SERVICE_ENGINE, bytecode).map_err(anyhow::Error::from)
})
.map_err(WasmExecutionError::LoadServiceModule)?;
Ok(WasmServiceModule::Wasmer { module })
}
}
impl<Runtime> WasmerServiceInstance<Runtime>
where
Runtime: ServiceRuntime + Clone + Unpin + 'static,
{
/// Prepares a runtime instance to call into the Wasm service.
pub fn prepare(
service_module: &wasmer::Module,
runtime: Runtime,
) -> Result<Self, WasmExecutionError> {
let system_api_data = RuntimeApiData::new(runtime);
let mut instance_builder = InstanceBuilder::new(SERVICE_ENGINE.clone(), system_api_data);
BaseRuntimeApi::export_to(&mut instance_builder)?;
ServiceRuntimeApi::export_to(&mut instance_builder)?;
let instance = instance_builder.instantiate(service_module)?;
Ok(Self { instance })
}
}
impl<Runtime> crate::UserContract for WasmerContractInstance<Runtime>
where
Runtime: ContractRuntime + Unpin + 'static,
{
fn instantiate(&mut self, argument: Vec<u8>) -> Result<(), ExecutionError> {
ContractEntrypoints::new(&mut self.instance)
.instantiate(argument)
.map_err(WasmExecutionError::from)?;
Ok(())
}
fn execute_operation(&mut self, operation: Vec<u8>) -> Result<Vec<u8>, ExecutionError> {
Ok(ContractEntrypoints::new(&mut self.instance)
.execute_operation(operation)
.map_err(WasmExecutionError::from)?)
}
fn execute_message(&mut self, message: Vec<u8>) -> Result<(), ExecutionError> {
ContractEntrypoints::new(&mut self.instance)
.execute_message(message)
.map_err(WasmExecutionError::from)?;
Ok(())
}
fn process_streams(&mut self, updates: Vec<StreamUpdate>) -> Result<(), ExecutionError> {
ContractEntrypoints::new(&mut self.instance)
.process_streams(updates)
.map_err(WasmExecutionError::from)?;
Ok(())
}
fn finalize(&mut self) -> Result<(), ExecutionError> {
ContractEntrypoints::new(&mut self.instance)
.finalize()
.map_err(WasmExecutionError::from)?;
Ok(())
}
}
impl<Runtime: 'static> crate::UserService for WasmerServiceInstance<Runtime> {
fn handle_query(&mut self, argument: Vec<u8>) -> Result<Vec<u8>, ExecutionError> {
Ok(ServiceEntrypoints::new(&mut self.instance)
.handle_query(argument)
.map_err(WasmExecutionError::from)?)
}
}
impl From<ExecutionError> for wasmer::RuntimeError {
fn from(error: ExecutionError) -> Self {
wasmer::RuntimeError::user(Box::new(error))
}
}
impl From<wasmer::RuntimeError> for ExecutionError {
fn from(error: wasmer::RuntimeError) -> Self {
error
.downcast::<ExecutionError>()
.unwrap_or_else(|unknown_error| {
ExecutionError::WasmError(WasmExecutionError::ExecuteModuleInWasmer(unknown_error))
})
}
}
/// Serialized bytes of a compiled contract bytecode.
// Cloning `Module`s is cheap.
#[derive(Clone)]
pub struct CachedContractModule(wasmer::Module);
impl CachedContractModule {
/// Creates a new [`CachedContractModule`] by compiling a `contract_bytecode`.
pub fn new(contract_bytecode: Bytecode) -> Result<Self, anyhow::Error> {
let module = wasmer::Module::new(&Self::create_compilation_engine(), contract_bytecode)?;
Ok(CachedContractModule(module))
}
/// Creates a new [`Engine`] to compile a contract bytecode.
fn create_compilation_engine() -> wasmer::Engine {
#[cfg(not(web))]
{
let mut compiler_config = wasmer_compiler_singlepass::Singlepass::default();
compiler_config.canonicalize_nans(true);
wasmer::sys::EngineBuilder::new(compiler_config).into()
}
#[cfg(web)]
wasmer::Engine::default()
}
/// Creates a [`Module`] from a compiled contract using a headless [`Engine`].
pub fn create_execution_instance(
&self,
) -> Result<(wasmer::Engine, wasmer::Module), anyhow::Error> {
#[cfg(web)]
{
Ok((wasmer::Engine::default(), self.0.clone()))
}
#[cfg(not(web))]
{
let engine = wasmer::Engine::default();
let store = wasmer::Store::new(engine.clone());
let bytes = self.0.serialize()?;
let module = unsafe { wasmer::Module::deserialize(&store, bytes) }?;
Ok((engine, module))
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/wasm/module_cache.rs | linera-execution/src/wasm/module_cache.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A cache of compiled WebAssembly modules.
//!
//! The cache is limited by the total size of cached bytecode files. Note that this is a heuristic to
//! estimate the total memory usage by the cache, since it's currently not possible to determine
//! the size of a generic `Module`.
use linera_base::data_types::Bytecode;
use lru::LruCache;
/// The default maximum size of the bytecode files stored in cache.
const DEFAULT_MAX_CACHE_SIZE: u64 = 512 /* MiB */ * 1024 /* KiB */ * 1024 /* bytes */;
/// A cache of compiled WebAssembly modules.
///
/// The cache prioritizes entries based on their [`Metadata`].
pub struct ModuleCache<Module> {
modules: LruCache<Bytecode, Module>,
total_size: u64,
max_size: u64,
}
impl<Module> Default for ModuleCache<Module> {
fn default() -> Self {
ModuleCache {
modules: LruCache::unbounded(),
total_size: 0,
max_size: DEFAULT_MAX_CACHE_SIZE,
}
}
}
impl<Module: Clone> ModuleCache<Module> {
/// Returns a `Module` for the requested `bytecode`, creating it with `module_builder` and
/// adding it to the cache if it doesn't already exist in the cache.
pub fn get_or_insert_with<E>(
&mut self,
bytecode: Bytecode,
module_builder: impl FnOnce(Bytecode) -> Result<Module, E>,
) -> Result<Module, E> {
if let Some(module) = self.get(&bytecode) {
Ok(module)
} else {
let module = module_builder(bytecode.clone())?;
self.insert(bytecode, module.clone());
Ok(module)
}
}
/// Returns a `Module` for the requested `bytecode` if it's in the cache.
pub fn get(&mut self, bytecode: &Bytecode) -> Option<Module> {
self.modules.get(bytecode).cloned()
}
/// Inserts a `bytecode` and its compiled `module` in the cache.
pub fn insert(&mut self, bytecode: Bytecode, module: Module) {
let bytecode_size = bytecode.as_ref().len() as u64;
if self.total_size + bytecode_size > self.max_size {
self.reduce_size_to(self.max_size - bytecode_size);
}
self.modules.put(bytecode, module);
}
/// Evicts entries from the cache so that the total size of cached bytecode files is less than
/// `new_size`.
fn reduce_size_to(&mut self, new_size: u64) {
while self.total_size > new_size {
let (bytecode, _module) = self
.modules
.pop_lru()
.expect("Empty cache should have a `total_size` of zero");
let bytecode_size = bytecode.as_ref().len() as u64;
self.total_size -= bytecode_size;
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/wasm/wasmtime.rs | linera-execution/src/wasm/wasmtime.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Code specific to the usage of the [Wasmtime](https://wasmtime.dev/) runtime.
use std::sync::LazyLock;
use linera_base::data_types::{Bytecode, StreamUpdate};
use linera_witty::{wasmtime::EntrypointInstance, ExportTo};
use tokio::sync::Mutex;
use wasmtime::{Config, Engine, Linker, Module, Store};
use super::{
module_cache::ModuleCache,
runtime_api::{BaseRuntimeApi, ContractRuntimeApi, RuntimeApiData, ServiceRuntimeApi},
ContractEntrypoints, ServiceEntrypoints, WasmExecutionError,
};
use crate::{
wasm::{WasmContractModule, WasmServiceModule},
ContractRuntime, ExecutionError, ServiceRuntime,
};
/// An [`Engine`] instance configured to run application contracts.
static CONTRACT_ENGINE: LazyLock<Engine> = LazyLock::new(|| {
let mut config = Config::default();
config.cranelift_nan_canonicalization(true);
Engine::new(&config).expect("Failed to create Wasmtime `Engine` for contracts")
});
/// An [`Engine`] instance configured to run application services.
static SERVICE_ENGINE: LazyLock<Engine> = LazyLock::new(Engine::default);
/// A cache of compiled contract modules.
static CONTRACT_CACHE: LazyLock<Mutex<ModuleCache<Module>>> = LazyLock::new(Mutex::default);
/// A cache of compiled service modules.
static SERVICE_CACHE: LazyLock<Mutex<ModuleCache<Module>>> = LazyLock::new(Mutex::default);
/// Type representing a running [Wasmtime](https://wasmtime.dev/) contract.
///
/// The runtime has a lifetime so that it does not outlive the trait object used to export the
/// system API.
pub(crate) struct WasmtimeContractInstance<Runtime>
where
Runtime: ContractRuntime + 'static,
{
/// The Wasm module instance.
instance: EntrypointInstance<RuntimeApiData<Runtime>>,
}
/// Type representing a running [Wasmtime](https://wasmtime.dev/) service.
pub struct WasmtimeServiceInstance<Runtime> {
/// The Wasm module instance.
instance: EntrypointInstance<RuntimeApiData<Runtime>>,
}
impl WasmContractModule {
/// Creates a new [`WasmContractModule`] using Wasmtime with the provided bytecode files.
pub async fn from_wasmtime(contract_bytecode: Bytecode) -> Result<Self, WasmExecutionError> {
let mut contract_cache = CONTRACT_CACHE.lock().await;
let module = contract_cache
.get_or_insert_with(contract_bytecode, |bytecode| {
Module::new(&CONTRACT_ENGINE, bytecode)
})
.map_err(WasmExecutionError::LoadContractModule)?;
Ok(WasmContractModule::Wasmtime { module })
}
}
impl<Runtime> WasmtimeContractInstance<Runtime>
where
Runtime: ContractRuntime + 'static,
{
/// Prepares a runtime instance to call into the Wasm contract.
pub fn prepare(contract_module: &Module, runtime: Runtime) -> Result<Self, WasmExecutionError> {
let mut linker = Linker::new(&CONTRACT_ENGINE);
BaseRuntimeApi::export_to(&mut linker)?;
ContractRuntimeApi::export_to(&mut linker)?;
let user_data = RuntimeApiData::new(runtime);
let mut store = Store::new(&CONTRACT_ENGINE, user_data);
let instance = linker
.instantiate(&mut store, contract_module)
.map_err(WasmExecutionError::LoadContractModule)?;
Ok(Self {
instance: EntrypointInstance::new(instance, store),
})
}
}
impl WasmServiceModule {
/// Creates a new [`WasmServiceModule`] using Wasmtime with the provided bytecode files.
pub async fn from_wasmtime(service_bytecode: Bytecode) -> Result<Self, WasmExecutionError> {
let mut service_cache = SERVICE_CACHE.lock().await;
let module = service_cache
.get_or_insert_with(service_bytecode, |bytecode| {
Module::new(&SERVICE_ENGINE, bytecode)
})
.map_err(WasmExecutionError::LoadServiceModule)?;
Ok(WasmServiceModule::Wasmtime { module })
}
}
impl<Runtime> WasmtimeServiceInstance<Runtime>
where
Runtime: ServiceRuntime + 'static,
{
/// Prepares a runtime instance to call into the Wasm service.
pub fn prepare(service_module: &Module, runtime: Runtime) -> Result<Self, WasmExecutionError> {
let mut linker = Linker::new(&SERVICE_ENGINE);
BaseRuntimeApi::export_to(&mut linker)?;
ServiceRuntimeApi::export_to(&mut linker)?;
let user_data = RuntimeApiData::new(runtime);
let mut store = Store::new(&SERVICE_ENGINE, user_data);
let instance = linker
.instantiate(&mut store, service_module)
.map_err(WasmExecutionError::LoadServiceModule)?;
Ok(Self {
instance: EntrypointInstance::new(instance, store),
})
}
}
impl<Runtime> crate::UserContract for WasmtimeContractInstance<Runtime>
where
Runtime: ContractRuntime + 'static,
{
fn instantiate(&mut self, argument: Vec<u8>) -> Result<(), ExecutionError> {
ContractEntrypoints::new(&mut self.instance)
.instantiate(argument)
.map_err(WasmExecutionError::from)?;
Ok(())
}
fn execute_operation(&mut self, operation: Vec<u8>) -> Result<Vec<u8>, ExecutionError> {
let result = ContractEntrypoints::new(&mut self.instance)
.execute_operation(operation)
.map_err(WasmExecutionError::from)?;
Ok(result)
}
fn execute_message(&mut self, message: Vec<u8>) -> Result<(), ExecutionError> {
ContractEntrypoints::new(&mut self.instance)
.execute_message(message)
.map_err(WasmExecutionError::from)?;
Ok(())
}
fn process_streams(&mut self, updates: Vec<StreamUpdate>) -> Result<(), ExecutionError> {
ContractEntrypoints::new(&mut self.instance)
.process_streams(updates)
.map_err(WasmExecutionError::from)?;
Ok(())
}
fn finalize(&mut self) -> Result<(), ExecutionError> {
ContractEntrypoints::new(&mut self.instance)
.finalize()
.map_err(WasmExecutionError::from)?;
Ok(())
}
}
impl<Runtime> crate::UserService for WasmtimeServiceInstance<Runtime>
where
Runtime: ServiceRuntime + 'static,
{
fn handle_query(&mut self, argument: Vec<u8>) -> Result<Vec<u8>, ExecutionError> {
Ok(ServiceEntrypoints::new(&mut self.instance)
.handle_query(argument)
.map_err(WasmExecutionError::from)?)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/bin/wit_generator.rs | linera-execution/src/bin/wit_generator.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Generator of WIT files representing the interface between Linera applications and nodes.
#![cfg_attr(any(target_arch = "wasm32", not(with_wasm_runtime)), no_main)]
#![cfg(all(not(target_arch = "wasm32"), with_wasm_runtime))]
use std::{
fs::File,
io::{BufReader, BufWriter, Read, Write},
path::{Path, PathBuf},
};
use anyhow::{Context, Result};
use clap::Parser as _;
use linera_execution::{
BaseRuntimeApi, ContractEntrypoints, ContractRuntimeApi, ContractSyncRuntimeHandle,
RuntimeApiData, ServiceEntrypoints, ServiceRuntimeApi, ServiceSyncRuntimeHandle,
};
use linera_witty::wit_generation::{
FileContentGenerator, StubInstance, WitInterfaceWriter, WitWorldWriter,
};
/// Command line parameters for the WIT generator.
#[derive(Debug, clap::Parser)]
pub struct WitGeneratorOptions {
/// The base directory of where the WIT files should be placed.
#[arg(short, long, default_value = "linera-sdk/wit")]
base_directory: PathBuf,
/// Check if the existing files are correct.
#[arg(short, long)]
check: bool,
}
/// WIT file generator entrypoint.
fn main() -> Result<()> {
let options = WitGeneratorOptions::parse();
if options.check {
run_operation(options, CheckFile)?;
} else {
run_operation(options, WriteToFile)?;
}
Ok(())
}
/// Runs the main `operation` on all the WIT files.
fn run_operation(options: WitGeneratorOptions, mut operation: impl Operation) -> Result<()> {
let contract_entrypoints = WitInterfaceWriter::new::<ContractEntrypoints<StubInstance>>();
let service_entrypoints = WitInterfaceWriter::new::<ServiceEntrypoints<StubInstance>>();
let base_runtime_api = WitInterfaceWriter::new::<
BaseRuntimeApi<StubInstance<RuntimeApiData<ContractSyncRuntimeHandle>>>,
>();
let contract_runtime_api = WitInterfaceWriter::new::<
ContractRuntimeApi<StubInstance<RuntimeApiData<ContractSyncRuntimeHandle>>>,
>();
let service_runtime_api = WitInterfaceWriter::new::<
ServiceRuntimeApi<StubInstance<RuntimeApiData<ServiceSyncRuntimeHandle>>>,
>();
let contract_world = WitWorldWriter::new("linera:app", "contract")
.export::<ContractEntrypoints<StubInstance>>()
.import::<ContractRuntimeApi<StubInstance<RuntimeApiData<ContractSyncRuntimeHandle>>>>()
.import::<BaseRuntimeApi<StubInstance<RuntimeApiData<ContractSyncRuntimeHandle>>>>();
let service_world = WitWorldWriter::new("linera:app", "service")
.export::<ServiceEntrypoints<StubInstance>>()
.import::<ServiceRuntimeApi<StubInstance<RuntimeApiData<ServiceSyncRuntimeHandle>>>>()
.import::<BaseRuntimeApi<StubInstance<RuntimeApiData<ContractSyncRuntimeHandle>>>>();
operation.run_for_file(
&options.base_directory.join("contract-entrypoints.wit"),
contract_entrypoints,
)?;
operation.run_for_file(
&options.base_directory.join("service-entrypoints.wit"),
service_entrypoints,
)?;
operation.run_for_file(
&options.base_directory.join("base-runtime-api.wit"),
base_runtime_api,
)?;
operation.run_for_file(
&options.base_directory.join("contract-runtime-api.wit"),
contract_runtime_api,
)?;
operation.run_for_file(
&options.base_directory.join("service-runtime-api.wit"),
service_runtime_api,
)?;
operation.run_for_file(&options.base_directory.join("contract.wit"), contract_world)?;
operation.run_for_file(&options.base_directory.join("service.wit"), service_world)?;
Ok(())
}
/// An operation that this WIT generator binary can perform.
trait Operation {
/// Executes the operation for a file at `path`, using the WIT `contents`.
fn run_for_file(&mut self, path: &Path, generator: impl FileContentGenerator) -> Result<()>;
}
/// Writes out the WIT file.
pub struct WriteToFile;
impl Operation for WriteToFile {
fn run_for_file(&mut self, path: &Path, generator: impl FileContentGenerator) -> Result<()> {
let mut file = BufWriter::new(
File::create(path)
.with_context(|| format!("Failed to create file at {}", path.display()))?,
);
generator
.generate_file_contents(&mut file)
.with_context(|| format!("Failed to write to {}", path.display()))?;
file.flush()
.with_context(|| format!("Failed to flush to {}", path.display()))?;
Ok(())
}
}
/// Checks that a WIT file has the expected contents.
pub struct CheckFile;
struct FileComparator {
buffer: Vec<u8>,
file: BufReader<File>,
}
impl FileComparator {
fn new(file: File) -> Self {
Self {
buffer: Vec::new(),
file: BufReader::new(file),
}
}
}
impl Write for FileComparator {
fn write(&mut self, part: &[u8]) -> std::io::Result<usize> {
self.buffer.resize(part.len(), 0);
self.file.read_exact(&mut self.buffer)?;
if self.buffer != part {
return Err(std::io::Error::other("file does not match"));
}
Ok(part.len())
}
fn flush(&mut self) -> std::io::Result<()> {
self.buffer.resize(1, 0);
if self.file.read(&mut self.buffer)? != 0 {
return Err(std::io::Error::other("file has extra contents"));
}
Ok(())
}
}
impl Operation for CheckFile {
fn run_for_file(&mut self, path: &Path, generator: impl FileContentGenerator) -> Result<()> {
let mut file_comparer = FileComparator::new(
File::open(path)
.with_context(|| format!("Failed to open file at {}", path.display()))?,
);
generator
.generate_file_contents(&mut file_comparer)
.with_context(|| format!("Comparison with {} failed", path.display()))?;
file_comparer
.flush()
.with_context(|| format!("Comparison with {} failed", path.display()))?;
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/test_utils/solidity.rs | linera-execution/src/test_utils/solidity.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Code for compiling solidity smart contracts for testing purposes.
use std::{
fs::File,
io::Write,
path::{Path, PathBuf},
process::{Command, Stdio},
};
use anyhow::Context;
use revm_primitives::{Address, U256};
use serde_json::Value;
use tempfile::{tempdir, TempDir};
use crate::{LINERA_SOL, LINERA_TYPES_SOL};
fn write_compilation_json(path: &Path, file_name: &str) -> anyhow::Result<()> {
let mut source = File::create(path).unwrap();
writeln!(
source,
r#"
{{
"language": "Solidity",
"sources": {{
"{file_name}": {{
"urls": ["./{file_name}"]
}}
}},
"settings": {{
"viaIR": true,
"outputSelection": {{
"*": {{
"*": ["evm.bytecode"]
}}
}}
}}
}}
"#
)?;
Ok(())
}
fn get_bytecode_path(path: &Path, file_name: &str, contract_name: &str) -> anyhow::Result<Vec<u8>> {
let config_path = path.join("config.json");
write_compilation_json(&config_path, file_name)?;
let config_file = File::open(config_path)?;
let output_path = path.join("result.json");
let output_file = File::create(output_path.clone())?;
let status = Command::new("solc")
.current_dir(path)
.arg("--standard-json")
.stdin(Stdio::from(config_file))
.stdout(Stdio::from(output_file))
.status()?;
assert!(status.success());
let contents = std::fs::read_to_string(output_path)?;
let json_data: serde_json::Value = serde_json::from_str(&contents)?;
let contracts = json_data
.get("contracts")
.with_context(|| format!("failed to get contracts in json_data={json_data}"))?;
let file_name_contract = contracts
.get(file_name)
.context("failed to get {file_name}")?;
let test_data = file_name_contract
.get(contract_name)
.with_context(|| format!("failed to get contract_name={contract_name}"))?;
let evm_data = test_data
.get("evm")
.with_context(|| format!("failed to get evm in test_data={test_data}"))?;
let bytecode = evm_data
.get("bytecode")
.with_context(|| format!("failed to get bytecode in evm_data={evm_data}"))?;
let object = bytecode
.get("object")
.with_context(|| format!("failed to get object in bytecode={bytecode}"))?;
let object = object.to_string();
let object = object.trim_matches(|c| c == '"').to_string();
Ok(hex::decode(&object)?)
}
pub fn get_bytecode(source_code: &str, contract_name: &str) -> anyhow::Result<Vec<u8>> {
let dir = tempdir().unwrap();
let path = dir.path();
if source_code.contains("Linera.sol") {
// The source code seems to import Linera.sol, so we import the relevant files.
for (file_name, literal_path) in [
("Linera.sol", LINERA_SOL),
("LineraTypes.sol", LINERA_TYPES_SOL),
] {
let test_code_path = path.join(file_name);
let mut test_code_file = File::create(&test_code_path)?;
writeln!(test_code_file, "{}", literal_path)?;
}
}
if source_code.contains("@openzeppelin") {
let _output = Command::new("npm")
.args(["install", "@openzeppelin/contracts"])
.current_dir(path)
.output()?;
let _output = Command::new("mv")
.args(["node_modules/@openzeppelin", "@openzeppelin"])
.current_dir(path)
.output()?;
}
let file_name = "test_code.sol";
let test_code_path = path.join(file_name);
let mut test_code_file = File::create(&test_code_path)?;
writeln!(test_code_file, "{}", source_code)?;
get_bytecode_path(path, file_name, contract_name)
}
pub fn load_solidity_example(path: &str) -> anyhow::Result<Vec<u8>> {
let source_code = std::fs::read_to_string(path)?;
let contract_name: &str = source_code
.lines()
.find_map(|line| line.trim_start().strip_prefix("contract "))
.ok_or_else(|| anyhow::anyhow!("Not matching"))?;
let contract_name: &str = contract_name
.split_whitespace()
.next()
.ok_or(anyhow::anyhow!("No space found after the contract name"))?;
tracing::info!("load_solidity_example, contract_name={contract_name}");
get_bytecode(&source_code, contract_name)
}
pub fn load_solidity_example_by_name(path: &str, contract_name: &str) -> anyhow::Result<Vec<u8>> {
let source_code = std::fs::read_to_string(path)?;
get_bytecode(&source_code, contract_name)
}
pub fn temporary_write_evm_module(module: Vec<u8>) -> anyhow::Result<(PathBuf, TempDir)> {
let dir = tempfile::tempdir()?;
let path = dir.path();
let app_file = "app.json";
let app_path = path.join(app_file);
{
std::fs::write(app_path.clone(), &module)?;
}
let evm_contract = app_path.to_path_buf();
Ok((evm_contract, dir))
}
pub fn get_evm_contract_path(path: &str) -> anyhow::Result<(PathBuf, TempDir)> {
let module = load_solidity_example(path)?;
temporary_write_evm_module(module)
}
pub fn value_to_vec_u8(value: Value) -> Vec<u8> {
let mut vec: Vec<u8> = Vec::new();
for val in value.as_array().unwrap() {
let val = val.as_u64().unwrap();
let val = val as u8;
vec.push(val);
}
vec
}
pub fn read_evm_u64_entry(value: Value) -> u64 {
let vec = value_to_vec_u8(value);
let mut arr = [0_u8; 8];
arr.copy_from_slice(&vec[24..]);
u64::from_be_bytes(arr)
}
pub fn read_evm_u256_entry(value: Value) -> U256 {
let result = value_to_vec_u8(value);
U256::from_be_slice(&result)
}
pub fn read_evm_address_entry(value: Value) -> Address {
let vec = value_to_vec_u8(value);
let mut arr = [0_u8; 20];
arr.copy_from_slice(&vec[12..]);
Address::from_slice(&arr)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/test_utils/mod.rs | linera-execution/src/test_utils/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod mock_application;
#[cfg(with_revm)]
pub mod solidity;
mod system_execution_state;
use std::{collections::BTreeMap, sync::Arc, thread, vec};
use linera_base::{
crypto::{AccountPublicKey, ValidatorPublicKey},
data_types::{
Amount, Blob, BlockHeight, ChainDescription, ChainOrigin, CompressedBytecode, Epoch,
InitialChainConfig, OracleResponse, Timestamp,
},
identifiers::{AccountOwner, ApplicationId, BlobId, ChainId, ModuleId},
ownership::ChainOwnership,
vm::VmRuntime,
};
use linera_views::{context::Context, views::View};
use proptest::{prelude::any, strategy::Strategy};
pub use self::{
mock_application::{ExpectedCall, MockApplication, MockApplicationInstance},
system_execution_state::SystemExecutionState,
};
use crate::{
committee::Committee, ApplicationDescription, ExecutionRuntimeContext, ExecutionStateView,
MessageContext, OperationContext, QueryContext, ServiceRuntimeEndpoint, ServiceSyncRuntime,
SystemExecutionStateView,
};
pub fn dummy_committee() -> Committee {
Committee::make_simple(vec![(
ValidatorPublicKey::test_key(0),
AccountPublicKey::test_key(0),
)])
}
pub fn dummy_committees() -> BTreeMap<Epoch, Committee> {
let committee = dummy_committee();
BTreeMap::from([(Epoch::ZERO, committee)])
}
pub fn dummy_chain_description_with_ownership_and_balance(
index: u32,
ownership: ChainOwnership,
balance: Amount,
) -> ChainDescription {
let origin = ChainOrigin::Root(index);
let config = InitialChainConfig {
application_permissions: Default::default(),
balance,
epoch: Epoch::ZERO,
min_active_epoch: Epoch::ZERO,
max_active_epoch: Epoch::ZERO,
ownership,
};
ChainDescription::new(origin, config, Timestamp::default())
}
pub fn dummy_chain_description_with_owner(index: u32, owner: AccountOwner) -> ChainDescription {
dummy_chain_description_with_ownership_and_balance(
index,
ChainOwnership::single(owner),
Amount::MAX,
)
}
pub fn dummy_chain_description(index: u32) -> ChainDescription {
let chain_key = AccountPublicKey::test_key(2 * (index % 128) as u8 + 1);
let ownership = ChainOwnership::single(chain_key.into());
dummy_chain_description_with_ownership_and_balance(index, ownership, Amount::MAX)
}
/// Creates a dummy [`ApplicationDescription`] for use in tests.
pub fn create_dummy_user_application_description(
index: u32,
) -> (ApplicationDescription, Blob, Blob) {
let chain_id = dummy_chain_description(1).id();
let mut contract_bytes = b"contract".to_vec();
let mut service_bytes = b"service".to_vec();
contract_bytes.push(index as u8);
service_bytes.push(index as u8);
let contract_blob = Blob::new_contract_bytecode(CompressedBytecode {
compressed_bytes: Arc::new(contract_bytes.into_boxed_slice()),
});
let service_blob = Blob::new_service_bytecode(CompressedBytecode {
compressed_bytes: Arc::new(service_bytes.into_boxed_slice()),
});
let vm_runtime = VmRuntime::Wasm;
(
ApplicationDescription {
module_id: ModuleId::new(contract_blob.id().hash, service_blob.id().hash, vm_runtime),
creator_chain_id: chain_id,
block_height: 0.into(),
application_index: index,
required_application_ids: vec![],
parameters: vec![],
},
contract_blob,
service_blob,
)
}
/// Creates a dummy [`OperationContext`] to use in tests.
pub fn create_dummy_operation_context(chain_id: ChainId) -> OperationContext {
OperationContext {
chain_id,
height: BlockHeight(0),
round: Some(0),
authenticated_owner: None,
timestamp: Default::default(),
}
}
/// Creates a dummy [`MessageContext`] to use in tests.
pub fn create_dummy_message_context(
chain_id: ChainId,
authenticated_owner: Option<AccountOwner>,
) -> MessageContext {
MessageContext {
chain_id,
origin: chain_id,
is_bouncing: false,
authenticated_owner,
refund_grant_to: None,
height: BlockHeight(0),
round: Some(0),
timestamp: Default::default(),
}
}
/// Creates a dummy [`QueryContext`] to use in tests.
pub fn create_dummy_query_context() -> QueryContext {
QueryContext {
chain_id: dummy_chain_description(0).id(),
next_block_height: BlockHeight(0),
local_time: Timestamp::from(0),
}
}
/// Registration of [`MockApplication`]s to use in tests.
#[allow(async_fn_in_trait)]
pub trait RegisterMockApplication {
/// Returns the chain to use for the creation of the application.
///
/// This is included in the mocked [`ApplicationId`].
fn creator_chain_id(&self) -> ChainId;
/// Registers a new [`MockApplication`] and returns it with the [`ApplicationId`] that was
/// used for it.
async fn register_mock_application(
&mut self,
index: u32,
) -> anyhow::Result<(ApplicationId, MockApplication, [BlobId; 3])> {
let (description, contract, service) = create_dummy_user_application_description(index);
let description_blob_id = Blob::new_application_description(&description).id();
let contract_blob_id = contract.id();
let service_blob_id = service.id();
let (app_id, application) = self
.register_mock_application_with(description, contract, service)
.await?;
Ok((
app_id,
application,
[description_blob_id, contract_blob_id, service_blob_id],
))
}
/// Registers a new [`MockApplication`] associated with a [`ApplicationDescription`] and
/// its bytecode [`Blob`]s.
async fn register_mock_application_with(
&mut self,
description: ApplicationDescription,
contract: Blob,
service: Blob,
) -> anyhow::Result<(ApplicationId, MockApplication)>;
}
impl<C> RegisterMockApplication for ExecutionStateView<C>
where
C: Context + Clone + Send + Sync + 'static,
C::Extra: ExecutionRuntimeContext,
{
fn creator_chain_id(&self) -> ChainId {
self.system.creator_chain_id()
}
async fn register_mock_application_with(
&mut self,
description: ApplicationDescription,
contract: Blob,
service: Blob,
) -> anyhow::Result<(ApplicationId, MockApplication)> {
self.system
.register_mock_application_with(description, contract, service)
.await
}
}
impl<C> RegisterMockApplication for SystemExecutionStateView<C>
where
C: Context + Clone + Send + Sync + 'static,
C::Extra: ExecutionRuntimeContext,
{
fn creator_chain_id(&self) -> ChainId {
self.description.get().as_ref().expect(
"Can't register applications on a system state with no associated `ChainDescription`",
).into()
}
async fn register_mock_application_with(
&mut self,
description: ApplicationDescription,
contract: Blob,
service: Blob,
) -> anyhow::Result<(ApplicationId, MockApplication)> {
let id = From::from(&description);
let context = self.context();
let extra = context.extra();
let mock_application = MockApplication::default();
extra
.user_contracts()
.pin()
.insert(id, mock_application.clone().into());
extra
.user_services()
.pin()
.insert(id, mock_application.clone().into());
extra
.add_blobs([
contract,
service,
Blob::new_application_description(&description),
])
.await?;
Ok((id, mock_application))
}
}
pub fn create_dummy_user_application_registrations(
count: u32,
) -> anyhow::Result<Vec<(ApplicationId, ApplicationDescription, Blob, Blob)>> {
let mut ids = Vec::with_capacity(count as usize);
for index in 0..count {
let (description, contract_blob, service_blob) =
create_dummy_user_application_description(index);
let id = From::from(&description);
ids.push((id, description, contract_blob, service_blob));
}
Ok(ids)
}
impl QueryContext {
/// Spawns a thread running the [`ServiceSyncRuntime`] actor.
///
/// Returns the endpoints to communicate with the actor.
pub fn spawn_service_runtime_actor(self) -> ServiceRuntimeEndpoint {
let (execution_state_sender, incoming_execution_requests) =
futures::channel::mpsc::unbounded();
let (runtime_request_sender, runtime_request_receiver) = std::sync::mpsc::channel();
thread::spawn(move || {
ServiceSyncRuntime::new(execution_state_sender, self).run(runtime_request_receiver)
});
ServiceRuntimeEndpoint {
incoming_execution_requests,
runtime_request_sender,
}
}
}
/// Creates a [`Strategy`] for creating a [`BTreeMap`] of [`AccountOwner`]s with an initial
/// non-zero [`Amount`] of tokens.
pub fn test_accounts_strategy() -> impl Strategy<Value = BTreeMap<AccountOwner, Amount>> {
proptest::collection::btree_map(
any::<AccountOwner>(),
(1_u128..).prop_map(Amount::from_tokens),
0..5,
)
}
/// Creates a vector of [`OracleResponse`]s for the supplied [`BlobId`]s.
pub fn blob_oracle_responses<'a>(blobs: impl Iterator<Item = &'a BlobId>) -> Vec<OracleResponse> {
blobs
.into_iter()
.copied()
.map(OracleResponse::Blob)
.collect()
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/test_utils/system_execution_state.rs | linera-execution/src/test_utils/system_execution_state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, BTreeSet},
ops::Not,
};
use custom_debug_derive::Debug;
use linera_base::{
crypto::CryptoHash,
data_types::{Amount, ApplicationPermissions, Blob, ChainDescription, Epoch, Timestamp},
identifiers::{AccountOwner, ApplicationId, BlobId, ChainId},
ownership::ChainOwnership,
};
use linera_views::{context::MemoryContext, views::View};
use super::{dummy_chain_description, dummy_committees, MockApplication, RegisterMockApplication};
use crate::{
committee::Committee, ApplicationDescription, ExecutionRuntimeConfig, ExecutionRuntimeContext,
ExecutionStateView, TestExecutionRuntimeContext,
};
/// A system execution state, not represented as a view but as a simple struct.
#[derive(Default, Debug, PartialEq, Eq, Clone)]
pub struct SystemExecutionState {
pub description: Option<ChainDescription>,
pub epoch: Epoch,
pub admin_id: Option<ChainId>,
pub committees: BTreeMap<Epoch, Committee>,
pub ownership: ChainOwnership,
pub balance: Amount,
#[debug(skip_if = BTreeMap::is_empty)]
pub balances: BTreeMap<AccountOwner, Amount>,
pub timestamp: Timestamp,
pub used_blobs: BTreeSet<BlobId>,
#[debug(skip_if = Not::not)]
pub closed: bool,
pub application_permissions: ApplicationPermissions,
#[debug(skip_if = Vec::is_empty)]
pub extra_blobs: Vec<Blob>,
#[debug(skip_if = BTreeMap::is_empty)]
pub mock_applications: BTreeMap<ApplicationId, MockApplication>,
}
impl SystemExecutionState {
pub fn new(description: ChainDescription) -> Self {
let ownership = description.config().ownership.clone();
let balance = description.config().balance;
let epoch = description.config().epoch;
let admin_id = Some(dummy_chain_description(0).id());
SystemExecutionState {
epoch,
description: Some(description),
admin_id,
ownership,
balance,
committees: dummy_committees(),
..SystemExecutionState::default()
}
}
pub fn dummy_chain_state(index: u32) -> (Self, ChainId) {
let description = dummy_chain_description(index);
let chain_id = description.id();
(Self::new(description), chain_id)
}
pub async fn into_hash(self) -> CryptoHash {
let mut view = self.into_view().await;
view.crypto_hash_mut()
.await
.expect("hashing from memory should not fail")
}
pub async fn into_view(self) -> ExecutionStateView<MemoryContext<TestExecutionRuntimeContext>> {
let chain_id = self
.description
.as_ref()
.expect("Chain description should be set")
.into();
self.into_view_with(chain_id, ExecutionRuntimeConfig::default())
.await
}
pub async fn into_view_with(
self,
chain_id: ChainId,
execution_runtime_config: ExecutionRuntimeConfig,
) -> ExecutionStateView<MemoryContext<TestExecutionRuntimeContext>> {
// Destructure, to make sure we don't miss any fields.
let SystemExecutionState {
description,
epoch,
admin_id,
committees,
ownership,
balance,
balances,
timestamp,
used_blobs,
closed,
application_permissions,
extra_blobs,
mock_applications,
} = self;
let extra = TestExecutionRuntimeContext::new(chain_id, execution_runtime_config);
extra
.add_blobs(extra_blobs)
.await
.expect("Adding blobs to the `TestExecutionRuntimeContext` should not fail");
for (id, mock_application) in mock_applications {
extra
.user_contracts()
.pin()
.insert(id, mock_application.clone().into());
extra
.user_services()
.pin()
.insert(id, mock_application.into());
}
let context = MemoryContext::new_for_testing(extra);
let mut view = ExecutionStateView::load(context)
.await
.expect("Loading from memory should work");
view.system.description.set(description);
view.system.epoch.set(epoch);
view.system.admin_id.set(admin_id);
view.system.committees.set(committees);
view.system.ownership.set(ownership);
view.system.balance.set(balance);
for (account_owner, balance) in balances {
view.system
.balances
.insert(&account_owner, balance)
.expect("insertion of balances should not fail");
}
view.system.timestamp.set(timestamp);
for blob_id in used_blobs {
view.system
.used_blobs
.insert(&blob_id)
.expect("inserting blob IDs should not fail");
}
view.system.closed.set(closed);
view.system
.application_permissions
.set(application_permissions);
view
}
}
impl RegisterMockApplication for SystemExecutionState {
fn creator_chain_id(&self) -> ChainId {
self.description.as_ref().expect(
"Can't register applications on a system state with no associated `ChainDescription`",
).into()
}
async fn register_mock_application_with(
&mut self,
description: ApplicationDescription,
contract: Blob,
service: Blob,
) -> anyhow::Result<(ApplicationId, MockApplication)> {
let id = ApplicationId::from(&description);
let application = MockApplication::default();
self.extra_blobs.extend([
contract,
service,
Blob::new_application_description(&description),
]);
self.mock_applications.insert(id, application.clone());
Ok((id, application))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/test_utils/mock_application.rs | linera-execution/src/test_utils/mock_application.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Mocking of user applications to help with execution scenario tests.
use std::{
collections::VecDeque,
fmt::{self, Debug, Display, Formatter},
sync::{
atomic::{AtomicUsize, Ordering},
Arc, Mutex,
},
};
#[cfg(web)]
use js_sys::wasm_bindgen;
use linera_base::data_types::StreamUpdate;
use crate::{
ContractSyncRuntimeHandle, ExecutionError, ServiceSyncRuntimeHandle, UserContract,
UserContractModule, UserService, UserServiceModule,
};
/// A mocked implementation of a user application.
///
/// Should be configured with any expected calls, and can then be used to create a
/// [`MockApplicationInstance`] that implements [`UserContract`] and [`UserService`].
#[cfg_attr(web, wasm_bindgen::prelude::wasm_bindgen)]
#[derive(Clone, Default)]
pub struct MockApplication {
expected_calls: Arc<Mutex<VecDeque<ExpectedCall>>>,
active_instances: Arc<AtomicUsize>,
}
/// A mocked implementation of a user application instance.
///
/// Will expect certain calls previously configured through [`MockApplication`].
pub struct MockApplicationInstance<Runtime> {
expected_calls: Arc<Mutex<VecDeque<ExpectedCall>>>,
runtime: Runtime,
active_instances: Arc<AtomicUsize>,
}
impl MockApplication {
/// Queues an expected call to the [`MockApplication`].
pub fn expect_call(&self, expected_call: ExpectedCall) {
self.expected_calls
.lock()
.expect("Mutex is poisoned")
.push_back(expected_call);
}
/// Creates a new [`MockApplicationInstance`], forwarding the configured expected calls.
pub fn create_mock_instance<Runtime>(
&self,
runtime: Runtime,
) -> MockApplicationInstance<Runtime> {
self.active_instances.fetch_add(1, Ordering::AcqRel);
MockApplicationInstance {
expected_calls: self.expected_calls.clone(),
runtime,
active_instances: self.active_instances.clone(),
}
}
/// Panics if there are still expected calls left in this [`MockApplication`].
pub fn assert_no_more_expected_calls(&self) {
assert!(
self.expected_calls.lock().unwrap().is_empty(),
"Missing call to instantiate a `MockApplicationInstance`"
);
}
/// Panics if there are still expected calls in one of the [`MockApplicationInstance`]s created
/// from this [`MockApplication`].
pub fn assert_no_active_instances(&self) {
assert_eq!(
self.active_instances.load(Ordering::Acquire),
0,
"At least one of `MockApplicationInstance` is still waiting for expected calls"
);
}
}
impl Debug for MockApplication {
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
let mut struct_formatter = formatter.debug_struct("MockApplication");
match self.expected_calls.lock() {
Ok(expected_calls) => struct_formatter.field("expected_calls", &*expected_calls),
Err(_) => struct_formatter.field("expected_calls", &"[POISONED]"),
};
struct_formatter
.field(
"active_instances",
&self.active_instances.load(Ordering::Acquire),
)
.finish()
}
}
impl PartialEq for MockApplication {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.expected_calls, &other.expected_calls)
&& Arc::ptr_eq(&self.active_instances, &other.active_instances)
}
}
impl Eq for MockApplication {}
impl<Runtime> Drop for MockApplicationInstance<Runtime> {
fn drop(&mut self) {
self.active_instances.fetch_sub(1, Ordering::AcqRel);
}
}
type InstantiateHandler = Box<
dyn FnOnce(&mut ContractSyncRuntimeHandle, Vec<u8>) -> Result<(), ExecutionError> + Send + Sync,
>;
type ExecuteOperationHandler = Box<
dyn FnOnce(&mut ContractSyncRuntimeHandle, Vec<u8>) -> Result<Vec<u8>, ExecutionError>
+ Send
+ Sync,
>;
type ExecuteMessageHandler = Box<
dyn FnOnce(&mut ContractSyncRuntimeHandle, Vec<u8>) -> Result<(), ExecutionError> + Send + Sync,
>;
type ProcessStreamHandler = Box<
dyn FnOnce(&mut ContractSyncRuntimeHandle, Vec<StreamUpdate>) -> Result<(), ExecutionError>
+ Send
+ Sync,
>;
type FinalizeHandler =
Box<dyn FnOnce(&mut ContractSyncRuntimeHandle) -> Result<(), ExecutionError> + Send + Sync>;
type HandleQueryHandler = Box<
dyn FnOnce(&mut ServiceSyncRuntimeHandle, Vec<u8>) -> Result<Vec<u8>, ExecutionError>
+ Send
+ Sync,
>;
/// An expected call to a [`MockApplicationInstance`].
#[derive(custom_debug_derive::Debug)]
pub enum ExpectedCall {
/// An expected call to [`UserContract::instantiate`].
Instantiate(#[debug(skip)] InstantiateHandler),
/// An expected call to [`UserContract::execute_operation`].
ExecuteOperation(#[debug(skip)] ExecuteOperationHandler),
/// An expected call to [`UserContract::execute_message`].
ExecuteMessage(#[debug(skip)] ExecuteMessageHandler),
/// An expected call to [`UserContract::process_streams`].
ProcessStreams(#[debug(skip)] ProcessStreamHandler),
/// An expected call to [`UserContract::finalize`].
Finalize(#[debug(skip)] FinalizeHandler),
/// An expected call to [`UserService::handle_query`].
HandleQuery(#[debug(skip)] HandleQueryHandler),
}
impl Display for ExpectedCall {
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
let name = match self {
ExpectedCall::Instantiate(_) => "instantiate",
ExpectedCall::ExecuteOperation(_) => "execute_operation",
ExpectedCall::ExecuteMessage(_) => "execute_message",
ExpectedCall::ProcessStreams(_) => "process_streams",
ExpectedCall::Finalize(_) => "finalize",
ExpectedCall::HandleQuery(_) => "handle_query",
};
write!(formatter, "{name}")
}
}
impl ExpectedCall {
/// Creates an [`ExpectedCall`] to the [`MockApplicationInstance`]'s
/// [`UserContract::instantiate`] implementation, which is handled by the provided `handler`.
pub fn instantiate(
handler: impl FnOnce(&mut ContractSyncRuntimeHandle, Vec<u8>) -> Result<(), ExecutionError>
+ Send
+ Sync
+ 'static,
) -> Self {
ExpectedCall::Instantiate(Box::new(handler))
}
/// Creates an [`ExpectedCall`] to the [`MockApplicationInstance`]'s
/// [`UserContract::execute_operation`] implementation, which is handled by the provided
/// `handler`.
pub fn execute_operation(
handler: impl FnOnce(&mut ContractSyncRuntimeHandle, Vec<u8>) -> Result<Vec<u8>, ExecutionError>
+ Send
+ Sync
+ 'static,
) -> Self {
ExpectedCall::ExecuteOperation(Box::new(handler))
}
/// Creates an [`ExpectedCall`] to the [`MockApplicationInstance`]'s
/// [`UserContract::execute_message`] implementation, which is handled by the provided
/// `handler`.
pub fn execute_message(
handler: impl FnOnce(&mut ContractSyncRuntimeHandle, Vec<u8>) -> Result<(), ExecutionError>
+ Send
+ Sync
+ 'static,
) -> Self {
ExpectedCall::ExecuteMessage(Box::new(handler))
}
/// Creates an [`ExpectedCall`] to the [`MockApplicationInstance`]'s
/// [`UserContract::process_streams`] implementation, which is handled by the provided
/// `handler`.
pub fn process_streams(
handler: impl FnOnce(&mut ContractSyncRuntimeHandle, Vec<StreamUpdate>) -> Result<(), ExecutionError>
+ Send
+ Sync
+ 'static,
) -> Self {
ExpectedCall::ProcessStreams(Box::new(handler))
}
/// Creates an [`ExpectedCall`] to the [`MockApplicationInstance`]'s [`UserContract::finalize`]
/// implementation, which is handled by the provided `handler`.
pub fn finalize(
handler: impl FnOnce(&mut ContractSyncRuntimeHandle) -> Result<(), ExecutionError>
+ Send
+ Sync
+ 'static,
) -> Self {
ExpectedCall::Finalize(Box::new(handler))
}
/// Creates an [`ExpectedCall`] to the [`MockApplicationInstance`]'s [`UserContract::finalize`]
/// implementation, which is handled by the default implementation which does nothing.
pub fn default_finalize() -> Self {
Self::finalize(|_| Ok(()))
}
/// Creates an [`ExpectedCall`] to the [`MockApplicationInstance`]'s
/// [`UserService::handle_query`] implementation, which is handled by the provided `handler`.
pub fn handle_query(
handler: impl FnOnce(&mut ServiceSyncRuntimeHandle, Vec<u8>) -> Result<Vec<u8>, ExecutionError>
+ Send
+ Sync
+ 'static,
) -> Self {
ExpectedCall::HandleQuery(Box::new(handler))
}
}
impl UserContractModule for MockApplication {
fn instantiate(
&self,
runtime: ContractSyncRuntimeHandle,
) -> Result<Box<dyn UserContract + 'static>, ExecutionError> {
Ok(Box::new(self.create_mock_instance(runtime)))
}
}
impl UserServiceModule for MockApplication {
fn instantiate(
&self,
runtime: ServiceSyncRuntimeHandle,
) -> Result<Box<dyn UserService + 'static>, ExecutionError> {
Ok(Box::new(self.create_mock_instance(runtime)))
}
}
impl<Runtime> MockApplicationInstance<Runtime> {
/// Retrieves the next [`ExpectedCall`] in the queue.
fn next_expected_call(&mut self) -> Option<ExpectedCall> {
self.expected_calls
.lock()
.expect("Queue of expected calls was poisoned")
.pop_front()
}
}
impl UserContract for MockApplicationInstance<ContractSyncRuntimeHandle> {
fn instantiate(&mut self, argument: Vec<u8>) -> Result<(), ExecutionError> {
match self.next_expected_call() {
Some(ExpectedCall::Instantiate(handler)) => handler(&mut self.runtime, argument),
Some(unexpected_call) => panic!(
"Expected a call to `instantiate`, got a call to `{unexpected_call}` instead."
),
None => panic!("Unexpected call to `instantiate`"),
}
}
fn execute_operation(&mut self, operation: Vec<u8>) -> Result<Vec<u8>, ExecutionError> {
match self.next_expected_call() {
Some(ExpectedCall::ExecuteOperation(handler)) => handler(&mut self.runtime, operation),
Some(unexpected_call) => panic!(
"Expected a call to `execute_operation`, got a call to `{unexpected_call}` instead."
),
None => panic!("Unexpected call to `execute_operation`"),
}
}
fn execute_message(&mut self, message: Vec<u8>) -> Result<(), ExecutionError> {
match self.next_expected_call() {
Some(ExpectedCall::ExecuteMessage(handler)) => handler(&mut self.runtime, message),
Some(unexpected_call) => panic!(
"Expected a call to `execute_message`, got a call to `{unexpected_call}` instead."
),
None => panic!("Unexpected call to `execute_message`"),
}
}
fn process_streams(&mut self, updates: Vec<StreamUpdate>) -> Result<(), ExecutionError> {
match self.next_expected_call() {
Some(ExpectedCall::ProcessStreams(handler)) => handler(&mut self.runtime, updates),
Some(unexpected_call) => panic!(
"Expected a call to `process_streams`, got a call to `{unexpected_call}` instead."
),
None => panic!("Unexpected call to `process_streams`"),
}
}
fn finalize(&mut self) -> Result<(), ExecutionError> {
match self.next_expected_call() {
Some(ExpectedCall::Finalize(handler)) => handler(&mut self.runtime),
Some(unexpected_call) => {
panic!("Expected a call to `finalize`, got a call to `{unexpected_call}` instead.")
}
None => panic!("Unexpected call to `finalize`"),
}
}
}
impl UserService for MockApplicationInstance<ServiceSyncRuntimeHandle> {
fn handle_query(&mut self, query: Vec<u8>) -> Result<Vec<u8>, ExecutionError> {
match self.next_expected_call() {
Some(ExpectedCall::HandleQuery(handler)) => handler(&mut self.runtime, query),
Some(unexpected_call) => panic!(
"Expected a call to `handle_query`, got a call to `{unexpected_call}` instead."
),
None => panic!("Unexpected call to `handle_query`"),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/tests/service_runtime_apis.rs | linera-execution/tests/service_runtime_apis.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::field_reassign_with_default)]
use std::{
collections::{BTreeMap, BTreeSet},
vec,
};
use linera_base::{data_types::Amount, identifiers::AccountOwner};
use linera_execution::{
test_utils::{
create_dummy_query_context, dummy_chain_description, test_accounts_strategy, ExpectedCall,
RegisterMockApplication, SystemExecutionState,
},
BaseRuntime, Query,
};
use test_strategy::proptest;
/// Tests the contract system API to read the chain balance.
#[proptest(async = "tokio")]
async fn test_read_chain_balance_system_api(chain_balance: Amount) {
let mut view = SystemExecutionState {
balance: chain_balance,
..SystemExecutionState::new(dummy_chain_description(0))
}
.into_view()
.await;
let (application_id, application, _) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::handle_query(move |runtime, _query| {
assert_eq!(runtime.read_chain_balance().unwrap(), chain_balance);
Ok(vec![])
}));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_query_context();
let query = Query::User {
application_id,
bytes: vec![],
};
view.query_application(context, query, None).await.unwrap();
}
/// Tests the contract system API to read a single account balance.
#[proptest(async = "tokio")]
async fn test_read_owner_balance_system_api(
#[strategy(test_accounts_strategy())] accounts: BTreeMap<AccountOwner, Amount>,
) {
let mut view = SystemExecutionState {
balances: accounts.clone(),
..SystemExecutionState::new(dummy_chain_description(0))
}
.into_view()
.await;
let (application_id, application, _) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::handle_query(move |runtime, _query| {
for (owner, balance) in accounts {
assert_eq!(runtime.read_owner_balance(owner).unwrap(), balance);
}
Ok(vec![])
}));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_query_context();
let query = Query::User {
application_id,
bytes: vec![],
};
view.query_application(context, query, None).await.unwrap();
}
/// Tests if reading the balance of a missing account returns zero.
#[proptest(async = "tokio")]
async fn test_read_owner_balance_returns_zero_for_missing_accounts(missing_account: AccountOwner) {
let mut view = SystemExecutionState::new(dummy_chain_description(0))
.into_view()
.await;
let (application_id, application, _) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::handle_query(move |runtime, _query| {
assert_eq!(
runtime.read_owner_balance(missing_account).unwrap(),
Amount::ZERO
);
Ok(vec![])
}));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_query_context();
let query = Query::User {
application_id,
bytes: vec![],
};
view.query_application(context, query, None).await.unwrap();
}
/// Tests the contract system API to read all account balances.
#[proptest(async = "tokio")]
async fn test_read_owner_balances_system_api(
#[strategy(test_accounts_strategy())] accounts: BTreeMap<AccountOwner, Amount>,
) {
let mut view = SystemExecutionState {
balances: accounts.clone(),
..SystemExecutionState::new(dummy_chain_description(0))
}
.into_view()
.await;
let (application_id, application, _) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::handle_query(move |runtime, _query| {
assert_eq!(
runtime
.read_owner_balances()
.unwrap()
.into_iter()
.collect::<BTreeMap<_, _>>(),
accounts,
);
Ok(vec![])
}));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_query_context();
let query = Query::User {
application_id,
bytes: vec![],
};
view.query_application(context, query, None).await.unwrap();
}
/// Tests the contract system API to read all account owners.
#[proptest(async = "tokio")]
async fn test_read_balance_owners_system_api(
#[strategy(test_accounts_strategy())] accounts: BTreeMap<AccountOwner, Amount>,
) {
let mut view = SystemExecutionState {
balances: accounts.clone(),
..SystemExecutionState::new(dummy_chain_description(0))
}
.into_view()
.await;
let (application_id, application, _) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::handle_query(move |runtime, _query| {
assert_eq!(
runtime
.read_balance_owners()
.unwrap()
.into_iter()
.collect::<BTreeSet<_>>(),
accounts.keys().copied().collect::<BTreeSet<_>>()
);
Ok(vec![])
}));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_query_context();
let query = Query::User {
application_id,
bytes: vec![],
};
view.query_application(context, query, None).await.unwrap();
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/tests/test_execution.rs | linera-execution/tests/test_execution.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::field_reassign_with_default)]
use std::{collections::BTreeMap, vec};
use assert_matches::assert_matches;
use linera_base::{
crypto::{AccountPublicKey, ValidatorPublicKey},
data_types::{
Amount, ApplicationPermissions, Blob, BlockHeight, ChainDescription, ChainOrigin, Epoch,
InitialChainConfig, Resources, SendMessageRequest, Timestamp,
},
identifiers::{Account, AccountOwner, BlobType},
ownership::ChainOwnership,
};
use linera_execution::{
committee::Committee,
test_utils::{
blob_oracle_responses, create_dummy_message_context, create_dummy_operation_context,
create_dummy_user_application_registrations, dummy_chain_description,
dummy_chain_description_with_ownership_and_balance, ExpectedCall, RegisterMockApplication,
SystemExecutionState,
},
BaseRuntime, ContractRuntime, ExecutionError, ExecutionRuntimeContext, ExecutionStateActor,
Message, Operation, OperationContext, OutgoingMessage, Query, QueryContext, QueryOutcome,
QueryResponse, ResourceController, SystemOperation, TransactionTracker,
};
use linera_views::{batch::Batch, context::Context, views::View};
use test_case::test_case;
#[tokio::test]
async fn test_missing_bytecode_for_user_application() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (app_id, app_desc, contract_blob, service_blob) =
&create_dummy_user_application_registrations(1)?[0];
let app_desc_blob = Blob::new_application_description(app_desc);
let app_desc_blob_id = app_desc_blob.id();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
view.context()
.extra()
.add_blobs([contract_blob.clone(), service_blob.clone(), app_desc_blob])
.await?;
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: *app_id,
bytes: vec![],
},
)
.await;
assert_matches!(
result,
Err(ExecutionError::ApplicationBytecodeNotFound(desc)) if &*desc == app_desc
);
Ok(())
}
#[tokio::test]
// TODO(#1484): Split this test into multiple more specialized tests.
async fn test_simple_user_operation() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (caller_id, caller_application, caller_blobs) = view.register_mock_application(0).await?;
let (target_id, target_application, target_blobs) = view.register_mock_application(1).await?;
let owner = AccountOwner::from(AccountPublicKey::test_key(0));
let state_key = vec![];
let dummy_operation = vec![1];
caller_application.expect_call({
let state_key = state_key.clone();
let dummy_operation = dummy_operation.clone();
ExpectedCall::execute_operation(move |runtime, operation| {
assert_eq!(operation, dummy_operation);
// Modify our state.
let mut state = runtime
.read_value_bytes(state_key.clone())?
.unwrap_or_default();
state.extend(operation.clone());
let mut batch = Batch::new();
batch.put_key_value_bytes(state_key, state);
runtime.write_batch(batch)?;
// Call the target application to create a session
let response = runtime.try_call_application(
/* authenticated */ true,
target_id,
vec![SessionCall::StartSession as u8],
)?;
assert!(response.is_empty());
// Call the target application to end the session
let response = runtime.try_call_application(
/* authenticated */ false,
target_id,
vec![SessionCall::EndSession as u8],
)?;
assert!(response.is_empty());
Ok(vec![])
})
});
target_application.expect_call(ExpectedCall::execute_operation(
move |_runtime, argument| {
assert_eq!(&argument, &[SessionCall::StartSession as u8]);
Ok(vec![])
},
));
target_application.expect_call(ExpectedCall::execute_operation(
move |_runtime, argument| {
assert_eq!(&argument, &[SessionCall::EndSession as u8]);
Ok(vec![])
},
));
target_application.expect_call(ExpectedCall::default_finalize());
caller_application.expect_call(ExpectedCall::default_finalize());
let context = OperationContext {
authenticated_owner: Some(owner),
..create_dummy_operation_context(chain_id)
};
let mut controller = ResourceController::default();
let mut txn_tracker =
TransactionTracker::new_replaying_blobs(caller_blobs.iter().chain(&target_blobs));
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: caller_id,
bytes: dummy_operation.clone(),
},
)
.await
.unwrap();
let txn_outcome = txn_tracker.into_outcome().unwrap();
assert!(txn_outcome.outgoing_messages.is_empty());
{
let state_key = state_key.clone();
caller_application.expect_call(ExpectedCall::handle_query(|runtime, _query| {
let state = runtime.read_value_bytes(state_key)?.unwrap_or_default();
Ok(state)
}));
}
caller_application.expect_call(ExpectedCall::handle_query(|runtime, _query| {
let state = runtime.read_value_bytes(state_key)?.unwrap_or_default();
Ok(state)
}));
let context = QueryContext {
chain_id,
next_block_height: BlockHeight(0),
local_time: Timestamp::from(0),
};
let mut service_runtime_endpoint = context.spawn_service_runtime_actor();
assert_eq!(
view.query_application(
context,
Query::User {
application_id: caller_id,
bytes: vec![]
},
Some(&mut service_runtime_endpoint),
)
.await
.unwrap(),
QueryOutcome {
response: QueryResponse::User(dummy_operation.clone()),
operations: vec![],
}
);
assert_eq!(
view.query_application(
context,
Query::User {
application_id: caller_id,
bytes: vec![]
},
Some(&mut service_runtime_endpoint),
)
.await
.unwrap(),
QueryOutcome {
response: QueryResponse::User(dummy_operation),
operations: vec![],
}
);
Ok(())
}
/// A cross-application call to start or end a session.
///
/// Here a session is a test scenario where the transaction is prevented from succeeding while
/// there in an open session.
#[repr(u8)]
enum SessionCall {
StartSession,
EndSession,
}
/// Tests a simulated session.
#[tokio::test]
async fn test_simulated_session() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (caller_id, caller_application, caller_blobs) = view.register_mock_application(0).await?;
let (target_id, target_application, target_blobs) = view.register_mock_application(1).await?;
caller_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.try_call_application(
false,
target_id,
vec![SessionCall::StartSession as u8],
)?;
runtime.try_call_application(false, target_id, vec![SessionCall::EndSession as u8])?;
Ok(vec![])
},
));
let state_key = vec![];
target_application.expect_call(ExpectedCall::execute_operation({
let state_key = state_key.clone();
move |runtime, argument| {
assert_eq!(&argument, &[SessionCall::StartSession as u8]);
let mut batch = Batch::new();
batch.put_key_value_bytes(state_key, vec![true as u8]);
runtime.write_batch(batch)?;
Ok(vec![])
}
}));
target_application.expect_call(ExpectedCall::execute_operation({
let state_key = state_key.clone();
move |runtime, argument| {
assert_eq!(&argument, &[SessionCall::EndSession as u8]);
let mut batch = Batch::new();
batch.put_key_value_bytes(state_key, vec![false as u8]);
runtime.write_batch(batch)?;
Ok(vec![])
}
}));
target_application.expect_call(ExpectedCall::finalize(|runtime| {
match runtime.read_value_bytes(state_key)? {
Some(session_is_open) if session_is_open == vec![u8::from(false)] => Ok(()),
Some(_) => Err(ExecutionError::UserError("Leaked session".to_owned())),
_ => Err(ExecutionError::UserError(
"Missing or invalid session state".to_owned(),
)),
}
}));
caller_application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker =
TransactionTracker::new_replaying_blobs(caller_blobs.iter().chain(&target_blobs));
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: caller_id,
bytes: vec![],
},
)
.await?;
let txn_outcome = txn_tracker.into_outcome().unwrap();
assert!(txn_outcome.outgoing_messages.is_empty());
Ok(())
}
/// Tests if execution fails if a simulated session isn't properly closed.
#[tokio::test]
async fn test_simulated_session_leak() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (caller_id, caller_application, caller_blobs) = view.register_mock_application(0).await?;
let (target_id, target_application, target_blobs) = view.register_mock_application(1).await?;
caller_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.try_call_application(
false,
target_id,
vec![SessionCall::StartSession as u8],
)?;
Ok(vec![])
},
));
let state_key = vec![];
target_application.expect_call(ExpectedCall::execute_operation({
let state_key = state_key.clone();
|runtime, argument| {
assert_eq!(argument, &[SessionCall::StartSession as u8]);
let mut batch = Batch::new();
batch.put_key_value_bytes(state_key, vec![true as u8]);
runtime.write_batch(batch)?;
Ok(Vec::new())
}
}));
let error_message = "Session leaked";
target_application.expect_call(ExpectedCall::finalize(|runtime| {
match runtime.read_value_bytes(state_key)? {
Some(session_is_open) if session_is_open == vec![u8::from(false)] => Ok(()),
Some(_) => Err(ExecutionError::UserError(error_message.to_owned())),
_ => Err(ExecutionError::UserError(
"Missing or invalid session state".to_owned(),
)),
}
}));
caller_application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker =
TransactionTracker::new_replaying_blobs(caller_blobs.iter().chain(&target_blobs));
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: caller_id,
bytes: vec![],
},
)
.await;
assert_matches!(result, Err(ExecutionError::UserError(message)) if message == error_message);
Ok(())
}
/// Tests if `finalize` can cause execution to fail.
#[tokio::test]
async fn test_rejecting_block_from_finalize() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (id, application, blobs) = view.register_mock_application(0).await?;
application.expect_call(ExpectedCall::execute_operation(
move |_runtime, _operation| Ok(vec![]),
));
let error_message = "Finalize aborted execution";
application.expect_call(ExpectedCall::finalize(|_runtime| {
Err(ExecutionError::UserError(error_message.to_owned()))
}));
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker = TransactionTracker::new_replaying_blobs(blobs);
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: id,
bytes: vec![],
},
)
.await;
assert_matches!(result, Err(ExecutionError::UserError(message)) if message == error_message);
Ok(())
}
/// Tests if `finalize` from a called application can cause execution to fail.
#[tokio::test]
async fn test_rejecting_block_from_called_applications_finalize() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (first_id, first_application, first_app_blobs) = view.register_mock_application(0).await?;
let (second_id, second_application, second_app_blobs) =
view.register_mock_application(1).await?;
let (third_id, third_application, third_app_blobs) = view.register_mock_application(2).await?;
let (fourth_id, fourth_application, fourth_app_blobs) =
view.register_mock_application(3).await?;
first_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.try_call_application(false, second_id, vec![])?;
Ok(vec![])
},
));
second_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _argument| {
runtime.try_call_application(false, third_id, vec![])?;
Ok(vec![])
},
));
third_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _argument| {
runtime.try_call_application(false, fourth_id, vec![])?;
Ok(vec![])
},
));
fourth_application.expect_call(ExpectedCall::execute_operation(|_runtime, _argument| {
Ok(vec![])
}));
let error_message = "Third application aborted execution";
fourth_application.expect_call(ExpectedCall::default_finalize());
third_application.expect_call(ExpectedCall::finalize(|_runtime| {
Err(ExecutionError::UserError(error_message.to_owned()))
}));
second_application.expect_call(ExpectedCall::default_finalize());
first_application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker = TransactionTracker::new_replaying_blobs(
first_app_blobs
.iter()
.chain(&second_app_blobs)
.chain(&third_app_blobs)
.chain(&fourth_app_blobs),
);
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: first_id,
bytes: vec![],
},
)
.await;
assert_matches!(result, Err(ExecutionError::UserError(message)) if message == error_message);
Ok(())
}
/// Tests if `finalize` can send messages.
#[tokio::test]
async fn test_sending_message_from_finalize() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (first_id, first_application, first_app_blobs) = view.register_mock_application(0).await?;
let (second_id, second_application, second_app_blobs) =
view.register_mock_application(1).await?;
let (third_id, third_application, third_app_blobs) = view.register_mock_application(2).await?;
let (fourth_id, fourth_application, fourth_app_blobs) =
view.register_mock_application(3).await?;
let destination = dummy_chain_description(1).id();
let first_message = SendMessageRequest {
destination,
authenticated: false,
is_tracked: false,
grant: Resources::default(),
message: b"first".to_vec(),
};
let expected_first_message = OutgoingMessage::new(
destination,
Message::User {
application_id: third_id,
bytes: b"first".to_vec(),
},
);
first_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.try_call_application(false, second_id, vec![])?;
Ok(vec![])
},
));
second_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _argument| {
runtime.try_call_application(false, third_id, vec![])?;
Ok(vec![])
},
));
third_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _argument| {
runtime.send_message(first_message)?;
runtime.try_call_application(false, fourth_id, vec![])?;
Ok(vec![])
},
));
fourth_application.expect_call(ExpectedCall::execute_operation(|_runtime, _argument| {
Ok(vec![])
}));
let second_message = SendMessageRequest {
destination,
authenticated: false,
is_tracked: false,
grant: Resources::default(),
message: b"second".to_vec(),
};
let third_message = SendMessageRequest {
destination,
authenticated: false,
is_tracked: false,
grant: Resources::default(),
message: b"third".to_vec(),
};
let fourth_message = SendMessageRequest {
destination,
authenticated: false,
is_tracked: false,
grant: Resources::default(),
message: b"fourth".to_vec(),
};
let expected_second_message = OutgoingMessage::new(
destination,
Message::User {
application_id: third_id,
bytes: b"second".to_vec(),
},
);
let expected_third_message = OutgoingMessage::new(
destination,
Message::User {
application_id: third_id,
bytes: b"third".to_vec(),
},
);
let expected_fourth_message = OutgoingMessage::new(
destination,
Message::User {
application_id: first_id,
bytes: b"fourth".to_vec(),
},
);
fourth_application.expect_call(ExpectedCall::default_finalize());
third_application.expect_call(ExpectedCall::finalize(|runtime| {
runtime.send_message(second_message)?;
runtime.send_message(third_message)?;
Ok(())
}));
second_application.expect_call(ExpectedCall::default_finalize());
first_application.expect_call(ExpectedCall::finalize(|runtime| {
runtime.send_message(fourth_message)?;
Ok(())
}));
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker = TransactionTracker::new_replaying_blobs(
first_app_blobs
.iter()
.chain(&second_app_blobs)
.chain(&third_app_blobs)
.chain(&fourth_app_blobs),
);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: first_id,
bytes: vec![],
},
)
.await?;
let txn_outcome = txn_tracker.into_outcome().unwrap();
let mut expected = TransactionTracker::default();
expected.add_outgoing_messages(vec![
expected_first_message,
expected_second_message,
expected_third_message,
expected_fourth_message,
]);
assert_eq!(
txn_outcome.outgoing_messages,
expected.into_outcome().unwrap().outgoing_messages
);
Ok(())
}
/// Tests if an application can't perform cross-application calls during `finalize`.
#[tokio::test]
async fn test_cross_application_call_from_finalize() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (caller_id, caller_application, caller_blobs) = view.register_mock_application(0).await?;
let (target_id, _target_application, target_blobs) = view.register_mock_application(1).await?;
caller_application.expect_call(ExpectedCall::execute_operation(
move |_runtime, _operation| Ok(vec![]),
));
caller_application.expect_call(ExpectedCall::finalize({
move |runtime| {
runtime.try_call_application(false, target_id, vec![])?;
Ok(())
}
}));
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker =
TransactionTracker::new_replaying_blobs(caller_blobs.iter().chain(&target_blobs));
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: caller_id,
bytes: vec![],
},
)
.await;
let expected_caller_id = caller_id;
let expected_callee_id = target_id;
assert_matches!(
result,
Err(ExecutionError::CrossApplicationCallInFinalize { caller_id, callee_id })
if *caller_id == expected_caller_id && *callee_id == expected_callee_id
);
Ok(())
}
/// Tests if an application can't perform cross-application calls during `finalize`, even if they
/// have already called the same application.
#[tokio::test]
async fn test_cross_application_call_from_finalize_of_called_application() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (caller_id, caller_application, caller_blobs) = view.register_mock_application(0).await?;
let (target_id, target_application, target_blobs) = view.register_mock_application(1).await?;
caller_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.try_call_application(false, target_id, vec![])?;
Ok(vec![])
},
));
target_application.expect_call(ExpectedCall::execute_operation(|_runtime, _argument| {
Ok(vec![])
}));
target_application.expect_call(ExpectedCall::finalize({
move |runtime| {
runtime.try_call_application(false, caller_id, vec![])?;
Ok(())
}
}));
caller_application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker =
TransactionTracker::new_replaying_blobs(caller_blobs.iter().chain(&target_blobs));
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: caller_id,
bytes: vec![],
},
)
.await;
let expected_caller_id = target_id;
let expected_callee_id = caller_id;
assert_matches!(
result,
Err(ExecutionError::CrossApplicationCallInFinalize { caller_id, callee_id })
if *caller_id == expected_caller_id && *callee_id == expected_callee_id
);
Ok(())
}
/// Tests if a called application can't perform cross-application calls during `finalize`.
#[tokio::test]
async fn test_calling_application_again_from_finalize() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (caller_id, caller_application, caller_blobs) = view.register_mock_application(0).await?;
let (target_id, target_application, target_blobs) = view.register_mock_application(1).await?;
caller_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.try_call_application(false, target_id, vec![])?;
Ok(vec![])
},
));
target_application.expect_call(ExpectedCall::execute_operation(|_runtime, _argument| {
Ok(vec![])
}));
target_application.expect_call(ExpectedCall::default_finalize());
caller_application.expect_call(ExpectedCall::finalize({
move |runtime| {
runtime.try_call_application(false, target_id, vec![])?;
Ok(())
}
}));
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker =
TransactionTracker::new_replaying_blobs(caller_blobs.iter().chain(&target_blobs));
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: caller_id,
bytes: vec![],
},
)
.await;
let expected_caller_id = caller_id;
let expected_callee_id = target_id;
assert_matches!(
result,
Err(ExecutionError::CrossApplicationCallInFinalize { caller_id, callee_id })
if *caller_id == expected_caller_id && *callee_id == expected_callee_id
);
Ok(())
}
/// Tests if user application errors when handling cross-application calls are handled correctly.
///
/// Errors in secondary [`UserContract::execute_operation`] executions should be handled correctly
/// without panicking.
#[tokio::test]
async fn test_cross_application_error() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (caller_id, caller_application, caller_blobs) = view.register_mock_application(0).await?;
let (target_id, target_application, target_blobs) = view.register_mock_application(1).await?;
caller_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.try_call_application(/* authenticated */ false, target_id, vec![])?;
Ok(vec![])
},
));
let error_message = "Cross-application call failed";
target_application.expect_call(ExpectedCall::execute_operation(|_runtime, _argument| {
Err(ExecutionError::UserError(error_message.to_owned()))
}));
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker =
TransactionTracker::new_replaying_blobs(caller_blobs.iter().chain(&target_blobs));
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: caller_id,
bytes: vec![],
},
)
.await;
assert_matches!(result, Err(ExecutionError::UserError(message)) if message == error_message);
Ok(())
}
/// Tests if an application is scheduled to be registered together with any messages it sends to
/// other chains.
#[tokio::test]
async fn test_simple_message() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (application_id, application, blobs) = view.register_mock_application(0).await?;
let destination = dummy_chain_description(1).id();
let dummy_message = SendMessageRequest {
destination,
authenticated: false,
is_tracked: false,
grant: Resources::default(),
message: b"msg".to_vec(),
};
let expected_dummy_message = OutgoingMessage::new(
destination,
Message::User {
application_id,
bytes: b"msg".to_vec(),
},
);
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.send_message(dummy_message)?;
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker = TransactionTracker::new_replaying_blobs(blobs);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id,
bytes: vec![],
},
)
.await?;
let txn_outcome = txn_tracker.into_outcome().unwrap();
let mut expected = TransactionTracker::default();
expected.add_outgoing_message(expected_dummy_message);
assert_eq!(
txn_outcome.outgoing_messages,
expected.into_outcome().unwrap().outgoing_messages
);
Ok(())
}
/// Tests if a message is scheduled to be sent while an application is handling a cross-application
/// call.
#[tokio::test]
async fn test_message_from_cross_application_call() -> anyhow::Result<()> {
let (state, chain_id) = SystemExecutionState::dummy_chain_state(0);
let mut view = state.into_view().await;
let (caller_id, caller_application, caller_blobs) = view.register_mock_application(0).await?;
let (target_id, target_application, target_blobs) = view.register_mock_application(1).await?;
caller_application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.try_call_application(/* authenticated */ false, target_id, vec![])?;
Ok(vec![])
},
));
let destination = dummy_chain_description(1).id();
let dummy_message = SendMessageRequest {
destination,
authenticated: false,
is_tracked: false,
grant: Resources::default(),
message: b"msg".to_vec(),
};
let expected_dummy_message = OutgoingMessage::new(
destination,
Message::User {
application_id: target_id,
bytes: b"msg".to_vec(),
},
);
target_application.expect_call(ExpectedCall::execute_operation(|runtime, _argument| {
runtime.send_message(dummy_message)?;
Ok(vec![])
}));
target_application.expect_call(ExpectedCall::default_finalize());
caller_application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let mut txn_tracker =
TransactionTracker::new_replaying_blobs(caller_blobs.iter().chain(&target_blobs));
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::User {
application_id: caller_id,
bytes: vec![],
},
)
.await?;
let txn_outcome = txn_tracker.into_outcome().unwrap();
let mut expected = TransactionTracker::default();
expected.add_outgoing_message(expected_dummy_message);
assert_eq!(
txn_outcome.outgoing_messages,
expected.into_outcome().unwrap().outgoing_messages
);
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/tests/revm.rs | linera-execution/tests/revm.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg(with_revm)]
use std::sync::Arc;
use alloy_sol_types::{sol, SolCall, SolValue};
use linera_base::{
data_types::{Amount, Blob, BlockHeight, Timestamp},
vm::{EvmInstantiation, EvmOperation, EvmQuery},
};
use linera_execution::{
evm::revm::{EvmContractModule, EvmServiceModule},
test_utils::{
create_dummy_user_application_description, dummy_chain_description,
solidity::{load_solidity_example, read_evm_u64_entry},
SystemExecutionState,
},
ExecutionRuntimeConfig, ExecutionRuntimeContext, ExecutionStateActor, Operation,
OperationContext, Query, QueryContext, QueryResponse, ResourceControlPolicy,
ResourceController, ResourceTracker, TransactionTracker,
};
use linera_views::{context::Context as _, views::View};
fn operation_to_bytes(operation: impl alloy_sol_types::SolCall) -> Result<Vec<u8>, bcs::Error> {
let operation = EvmOperation::new(Amount::ZERO, operation.abi_encode());
operation.to_bytes()
}
#[tokio::test]
async fn test_fuel_for_counter_revm_application() -> anyhow::Result<()> {
let module = load_solidity_example("tests/fixtures/evm_example_counter.sol")?;
sol! {
struct ConstructorArgs {
uint64 initial_value;
}
function increment(uint64 input);
function get_value();
}
let initial_value = 10000;
let mut value = initial_value;
let args = ConstructorArgs { initial_value };
let constructor_argument = args.abi_encode();
let constructor_argument = serde_json::to_string(&constructor_argument)?.into_bytes();
let instantiation_argument = EvmInstantiation::default();
let instantiation_argument = serde_json::to_string(&instantiation_argument)?.into_bytes();
let state = SystemExecutionState {
description: Some(dummy_chain_description(0)),
..Default::default()
};
let (mut app_desc, contract_blob, service_blob) = create_dummy_user_application_description(1);
app_desc.parameters = constructor_argument;
let chain_id = app_desc.creator_chain_id;
let mut view = state
.into_view_with(chain_id, ExecutionRuntimeConfig::default())
.await;
let app_id = From::from(&app_desc);
let app_desc_blob_id = Blob::new_application_description(&app_desc).id();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let contract = EvmContractModule::Revm {
module: module.clone(),
};
{
let context = view.context();
let pinned = context.extra().user_contracts().pin();
pinned.insert(app_id, contract.clone().into());
}
let service = EvmServiceModule::Revm { module };
{
let context = view.context();
let pinned = context.extra().user_services().pin();
pinned.insert(app_id, service.into());
}
view.simulate_instantiation(
contract.into(),
Timestamp::from(2),
app_desc,
instantiation_argument,
contract_blob,
service_blob,
)
.await?;
let operation_context = OperationContext {
chain_id,
height: BlockHeight(0),
round: Some(0),
authenticated_owner: None,
timestamp: Default::default(),
};
let query_context = QueryContext {
chain_id,
next_block_height: BlockHeight(0),
local_time: Timestamp::from(0),
};
let increments = [2_u64, 9_u64, 7_u64, 1000_u64];
let policy = ResourceControlPolicy {
evm_fuel_unit: Amount::from_attos(1),
..ResourceControlPolicy::default()
};
let amount = Amount::from_tokens(1);
*view.system.balance.get_mut() = amount;
let mut controller =
ResourceController::new(Arc::new(policy), ResourceTracker::default(), None);
for increment in &increments {
let mut txn_tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
value += increment;
let operation = incrementCall { input: *increment };
let bytes = operation_to_bytes(operation)?;
let operation = Operation::User {
application_id: app_id,
bytes,
};
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(operation_context, operation)
.await?;
let query = get_valueCall {};
let query = query.abi_encode();
let query = EvmQuery::Query(query);
let bytes = serde_json::to_vec(&query)?;
let query = Query::User {
application_id: app_id,
bytes,
};
let result = view.query_application(query_context, query, None).await?;
let QueryResponse::User(result) = result.response else {
anyhow::bail!("Wrong QueryResponse result");
};
let result: serde_json::Value = serde_json::from_slice(&result).unwrap();
let result = read_evm_u64_entry(result);
assert_eq!(result, value);
}
Ok(())
}
#[tokio::test]
async fn test_terminate_execute_operation_by_lack_of_fuel() -> anyhow::Result<()> {
let module = load_solidity_example("tests/fixtures/evm_example_counter.sol")?;
sol! {
struct ConstructorArgs {
uint64 initial_value;
}
function increment(uint64 input);
function get_value();
}
let initial_value = 10000;
let args = ConstructorArgs { initial_value };
let constructor_argument = args.abi_encode();
let constructor_argument = serde_json::to_string(&constructor_argument)?.into_bytes();
let instantiation_argument = EvmInstantiation::default();
let instantiation_argument = serde_json::to_string(&instantiation_argument)?.into_bytes();
let state = SystemExecutionState {
description: Some(dummy_chain_description(0)),
..Default::default()
};
let (mut app_desc, contract_blob, service_blob) = create_dummy_user_application_description(1);
app_desc.parameters = constructor_argument;
let chain_id = app_desc.creator_chain_id;
let mut view = state
.into_view_with(chain_id, ExecutionRuntimeConfig::default())
.await;
let app_id = From::from(&app_desc);
let app_desc_blob_id = Blob::new_application_description(&app_desc).id();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let contract = EvmContractModule::Revm {
module: module.clone(),
};
{
let context = view.context();
let pinned = context.extra().user_contracts().pin();
pinned.insert(app_id, contract.clone().into());
}
let service = EvmServiceModule::Revm { module };
{
let context = view.context();
let pinned = context.extra().user_services().pin();
pinned.insert(app_id, service.into());
}
view.simulate_instantiation(
contract.into(),
Timestamp::from(2),
app_desc,
instantiation_argument,
contract_blob,
service_blob,
)
.await?;
let operation_context = OperationContext {
chain_id,
height: BlockHeight(0),
round: Some(0),
authenticated_owner: None,
timestamp: Default::default(),
};
let policy = ResourceControlPolicy {
evm_fuel_unit: Amount::from_attos(1),
maximum_evm_fuel_per_block: 20000,
..ResourceControlPolicy::default()
};
let amount = Amount::from_tokens(1);
*view.system.balance.get_mut() = amount;
let mut controller =
ResourceController::new(Arc::new(policy), ResourceTracker::default(), None);
// Trying the increment, should fail
let mut txn_tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
let input = 2;
let operation = incrementCall { input };
let bytes = operation_to_bytes(operation)?;
let operation = Operation::User {
application_id: app_id,
bytes,
};
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(operation_context, operation)
.await;
assert!(result.is_err());
Ok(())
}
#[tokio::test]
async fn test_terminate_query_by_lack_of_fuel() -> anyhow::Result<()> {
let module = load_solidity_example("tests/fixtures/evm_too_long_service.sol")?;
sol! {
struct ConstructorArgs {
uint64 initial_value;
}
function too_long_run();
}
let args = ConstructorArgs { initial_value: 0 };
let constructor_argument = args.abi_encode();
let constructor_argument = serde_json::to_string(&constructor_argument)?.into_bytes();
let instantiation_argument = EvmInstantiation::default();
let instantiation_argument = serde_json::to_string(&instantiation_argument)?.into_bytes();
let state = SystemExecutionState {
description: Some(dummy_chain_description(0)),
..Default::default()
};
let (mut app_desc, contract_blob, service_blob) = create_dummy_user_application_description(1);
app_desc.parameters = constructor_argument;
let chain_id = app_desc.creator_chain_id;
let mut view = state
.into_view_with(chain_id, ExecutionRuntimeConfig::default())
.await;
let app_id = From::from(&app_desc);
let contract = EvmContractModule::Revm {
module: module.clone(),
};
{
let context = view.context();
let pinned = context.extra().user_contracts().pin();
pinned.insert(app_id, contract.clone().into());
}
let service = EvmServiceModule::Revm { module };
{
let context = view.context();
let pinned = context.extra().user_services().pin();
pinned.insert(app_id, service.into());
}
view.simulate_instantiation(
contract.into(),
Timestamp::from(2),
app_desc,
instantiation_argument,
contract_blob,
service_blob,
)
.await?;
let query_context = QueryContext {
chain_id,
next_block_height: BlockHeight(0),
local_time: Timestamp::from(0),
};
let amount = Amount::from_tokens(1);
*view.system.balance.get_mut() = amount;
// Trying to read the value, should fail
let query = too_long_runCall {};
let query = query.abi_encode();
let query = EvmQuery::Query(query);
let bytes = serde_json::to_vec(&query)?;
let query = Query::User {
application_id: app_id,
bytes,
};
let result = view.query_application(query_context, query, None).await;
assert!(result.is_err());
Ok(())
}
#[tokio::test]
async fn test_basic_evm_features() -> anyhow::Result<()> {
let module = load_solidity_example("tests/fixtures/evm_basic_check.sol")?;
sol! {
function failing_function();
function test_precompile_sha256();
function check_contract_address(address evm_address);
}
let constructor_argument = Vec::<u8>::new();
let constructor_argument = serde_json::to_string(&constructor_argument)?.into_bytes();
let instantiation_argument = EvmInstantiation::default();
let instantiation_argument = serde_json::to_string(&instantiation_argument)?.into_bytes();
let state = SystemExecutionState {
description: Some(dummy_chain_description(0)),
..Default::default()
};
let (mut app_desc, contract_blob, service_blob) = create_dummy_user_application_description(1);
app_desc.parameters = constructor_argument;
let chain_id = app_desc.creator_chain_id;
let mut view = state
.into_view_with(chain_id, ExecutionRuntimeConfig::default())
.await;
let app_id = From::from(&app_desc);
let app_desc_blob_id = Blob::new_application_description(&app_desc).id();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let contract = EvmContractModule::Revm {
module: module.clone(),
};
{
let context = view.context();
let pinned = context.extra().user_contracts().pin();
pinned.insert(app_id, contract.clone().into());
}
let service = EvmServiceModule::Revm { module };
{
let context = view.context();
let pinned = context.extra().user_services().pin();
pinned.insert(app_id, service.into());
}
view.simulate_instantiation(
contract.into(),
Timestamp::from(2),
app_desc,
instantiation_argument,
contract_blob,
service_blob,
)
.await?;
let operation_context = OperationContext {
chain_id,
height: BlockHeight(0),
round: Some(0),
authenticated_owner: None,
timestamp: Default::default(),
};
let policy = ResourceControlPolicy {
evm_fuel_unit: Amount::from_attos(1),
maximum_evm_fuel_per_block: 20000,
..ResourceControlPolicy::default()
};
let mut controller =
ResourceController::new(Arc::new(policy), ResourceTracker::default(), None);
let mut txn_tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
let query_context = QueryContext {
chain_id,
next_block_height: BlockHeight(0),
local_time: Timestamp::from(0),
};
// Trying a failing function, should be an error
let operation = failing_functionCall {};
let bytes = operation_to_bytes(operation)?;
let operation = Operation::User {
application_id: app_id,
bytes,
};
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(operation_context, operation)
.await;
assert!(result.is_err());
// Trying a call to an ethereum precompile function
let query = test_precompile_sha256Call {};
let query = query.abi_encode();
let query = EvmQuery::Query(query);
let bytes = serde_json::to_vec(&query)?;
let query = Query::User {
application_id: app_id,
bytes,
};
let result = view.query_application(query_context, query, None).await?;
let QueryResponse::User(result) = result.response else {
anyhow::bail!("Wrong QueryResponse result");
};
let result: serde_json::Value = serde_json::from_slice(&result).unwrap();
assert_eq!(read_evm_u64_entry(result), 0);
// Testing that the created contract has the right address
let evm_address = app_id.evm_address();
let query = check_contract_addressCall { evm_address };
let query = query.abi_encode();
let query = EvmQuery::Query(query);
let bytes = serde_json::to_vec(&query)?;
let query = Query::User {
application_id: app_id,
bytes,
};
let result = view.query_application(query_context, query, None).await?;
let QueryResponse::User(result) = result.response else {
anyhow::bail!("Wrong QueryResponse result");
};
let result: serde_json::Value = serde_json::from_slice(&result).unwrap();
assert_eq!(read_evm_u64_entry(result), 49);
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/tests/wasm.rs | linera-execution/tests/wasm.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg(with_wasm_runtime)]
use std::sync::Arc;
use linera_base::data_types::{Amount, Blob, BlockHeight, Timestamp};
use linera_execution::{
test_utils::{
create_dummy_user_application_description, dummy_chain_description, SystemExecutionState,
},
ExecutionRuntimeConfig, ExecutionRuntimeContext, ExecutionStateActor, Operation,
OperationContext, Query, QueryContext, QueryOutcome, QueryResponse, ResourceControlPolicy,
ResourceController, ResourceTracker, TransactionTracker, WasmContractModule, WasmRuntime,
WasmServiceModule,
};
use linera_views::{context::Context as _, views::View};
use serde_json::json;
use test_case::test_case;
/// Test if the "counter" example application in `linera-sdk` compiled to a Wasm module can be
/// called correctly and consume the expected amount of fuel.
///
/// To update the bytecode files, run `linera-execution/update_wasm_fixtures.sh`.
#[cfg_attr(with_wasmer, test_case(WasmRuntime::Wasmer, 71_229; "wasmer"))]
#[cfg_attr(with_wasmtime, test_case(WasmRuntime::Wasmtime, 71_229; "wasmtime"))]
#[test_log::test(tokio::test(flavor = "multi_thread"))]
async fn test_fuel_for_counter_wasm_application(
wasm_runtime: WasmRuntime,
expected_fuel: u64,
) -> anyhow::Result<()> {
let chain_description = dummy_chain_description(0);
let chain_id = chain_description.id();
let state = SystemExecutionState {
description: Some(chain_description),
..Default::default()
};
let mut view = state
.into_view_with(chain_id, ExecutionRuntimeConfig::default())
.await;
let (app_desc, contract_blob, service_blob) = create_dummy_user_application_description(1);
let app_id = From::from(&app_desc);
let app_desc_blob_id = Blob::new_application_description(&app_desc).id();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let contract =
WasmContractModule::from_file("tests/fixtures/counter_contract.wasm", wasm_runtime).await?;
{
let context = view.context();
let pinned = context.extra().user_contracts().pin();
pinned.insert(app_id, contract.into());
}
let service =
WasmServiceModule::from_file("tests/fixtures/counter_service.wasm", wasm_runtime).await?;
{
let context = view.context();
let pinned = context.extra().user_services().pin();
pinned.insert(app_id, service.into());
}
view.context()
.extra()
.add_blobs([
contract_blob,
service_blob,
Blob::new_application_description(&app_desc),
])
.await?;
let context = OperationContext {
chain_id,
height: BlockHeight(0),
round: Some(0),
authenticated_owner: None,
timestamp: Default::default(),
};
let increments = [2_u64, 9, 7, 1000];
let policy = ResourceControlPolicy {
wasm_fuel_unit: Amount::from_attos(1),
..ResourceControlPolicy::default()
};
let amount = Amount::from_tokens(1);
*view.system.balance.get_mut() = amount;
let mut controller =
ResourceController::new(Arc::new(policy), ResourceTracker::default(), None);
for (index, increment) in increments.iter().enumerate() {
let mut txn_tracker = TransactionTracker::new_replaying_blobs(if index == 0 {
vec![app_desc_blob_id, contract_blob_id, service_blob_id]
} else {
vec![]
});
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(
context,
Operation::user_without_abi(app_id, increment).unwrap(),
)
.await?;
let txn_outcome = txn_tracker.into_outcome().unwrap();
assert!(txn_outcome.outgoing_messages.is_empty());
}
assert_eq!(controller.tracker.wasm_fuel, expected_fuel);
assert_eq!(
controller
.with_state(&mut view.system)
.await?
.balance()
.unwrap(),
Amount::ONE
.try_sub(Amount::from_attos(expected_fuel as u128))
.unwrap()
);
let context = QueryContext {
chain_id,
next_block_height: BlockHeight(0),
local_time: Timestamp::from(0),
};
let mut service_runtime_endpoint = context.spawn_service_runtime_actor();
let expected_value = async_graphql::Response::new(
async_graphql::Value::from_json(json!({"value" : increments.into_iter().sum::<u64>()}))
.unwrap(),
);
let request = async_graphql::Request::new("query { value }");
let outcome = view
.query_application(
context,
Query::user_without_abi(app_id, &request).unwrap(),
Some(&mut service_runtime_endpoint),
)
.await?;
let QueryOutcome {
response: QueryResponse::User(serialized_value),
operations,
} = outcome
else {
panic!("unexpected response")
};
assert_eq!(
serde_json::from_slice::<async_graphql::Response>(&serialized_value).unwrap(),
expected_value
);
assert!(operations.is_empty());
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/tests/contract_runtime_apis.rs | linera-execution/tests/contract_runtime_apis.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, BTreeSet},
sync::Arc,
vec,
};
use assert_matches::assert_matches;
use linera_base::{
crypto::{AccountPublicKey, CryptoHash},
data_types::{
Amount, ApplicationDescription, ApplicationPermissions, Blob, BlockHeight, Bytecode,
CompressedBytecode, OracleResponse,
},
http,
identifiers::{Account, AccountOwner, ApplicationId, DataBlobHash, ModuleId},
ownership::ChainOwnership,
vm::VmRuntime,
};
use linera_execution::{
test_utils::{
create_dummy_message_context, create_dummy_operation_context, dummy_chain_description,
dummy_chain_description_with_ownership_and_balance, test_accounts_strategy, ExpectedCall,
RegisterMockApplication, SystemExecutionState,
},
BaseRuntime, ContractRuntime, ExecutionError, ExecutionStateActor, Message, MessageContext,
Operation, OperationContext, ResourceController, SystemExecutionStateView,
TestExecutionRuntimeContext, TransactionOutcome, TransactionTracker,
};
use linera_views::context::MemoryContext;
use test_case::{test_case, test_matrix};
use test_strategy::proptest;
/// Tests the contract system API to transfer tokens between accounts.
#[test_matrix(
[TransferTestEndpoint::Chain, TransferTestEndpoint::User, TransferTestEndpoint::Application],
[TransferTestEndpoint::Chain, TransferTestEndpoint::User, TransferTestEndpoint::Application]
)]
#[test_log::test(tokio::test)]
async fn test_transfer_system_api(
sender: TransferTestEndpoint,
recipient: TransferTestEndpoint,
) -> anyhow::Result<()> {
let amount = Amount::ONE;
let state = sender.create_system_state(amount);
let chain_id = state.description.unwrap().id();
let mut view = sender.create_system_state(amount).into_view().await;
let contract_blob = TransferTestEndpoint::sender_application_contract_blob();
let service_blob = TransferTestEndpoint::sender_application_service_blob();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let application_description = TransferTestEndpoint::sender_application_description();
let application_description_blob = Blob::new_application_description(&application_description);
let app_desc_blob_id = application_description_blob.id();
let (application_id, application) = view
.register_mock_application_with(application_description, contract_blob, service_blob)
.await?;
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.transfer(
sender.sender_account_owner(),
Account {
owner: recipient.recipient_account_owner(),
chain_id: dummy_chain_description(0).id(),
},
amount,
)?;
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = OperationContext {
authenticated_owner: sender.signer(),
..create_dummy_operation_context(chain_id)
};
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
ExecutionStateActor::new(&mut view, &mut tracker, &mut controller)
.execute_operation(context, operation)
.await?;
let TransactionOutcome {
outgoing_messages,
oracle_responses,
..
} = tracker.into_outcome()?;
assert_eq!(outgoing_messages.len(), 1);
assert_eq!(oracle_responses.len(), 3);
assert!(matches!(outgoing_messages[0].message, Message::System(_)));
let mut txn_tracker = TransactionTracker::new_replaying(Vec::new());
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_message(
create_dummy_message_context(chain_id, None),
outgoing_messages[0].message.clone(),
None,
)
.await?;
recipient.verify_recipient(&view.system, amount).await?;
Ok(())
}
/// Tests the contract system API to transfer tokens between accounts.
#[test_matrix(
[TransferTestEndpoint::Chain, TransferTestEndpoint::User, TransferTestEndpoint::Application],
[TransferTestEndpoint::Chain, TransferTestEndpoint::User, TransferTestEndpoint::Application]
)]
#[test_log::test(tokio::test)]
async fn test_unauthorized_transfer_system_api(
sender: TransferTestEndpoint,
recipient: TransferTestEndpoint,
) -> anyhow::Result<()> {
let amount = Amount::ONE;
let state = sender.create_system_state(amount);
let chain_id = state.description.unwrap().id();
let mut view = sender.create_system_state(amount).into_view().await;
let contract_blob = TransferTestEndpoint::sender_application_contract_blob();
let service_blob = TransferTestEndpoint::sender_application_service_blob();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let application_description = TransferTestEndpoint::sender_application_description();
let application_description_blob = Blob::new_application_description(&application_description);
let app_desc_blob_id = application_description_blob.id();
let (application_id, application) = view
.register_mock_application_with(application_description, contract_blob, service_blob)
.await?;
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.transfer(
sender.unauthorized_sender_account_owner(),
Account {
owner: recipient.recipient_account_owner(),
chain_id: dummy_chain_description(0).id(),
},
amount,
)?;
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = OperationContext {
authenticated_owner: sender.unauthorized_signer(),
..create_dummy_operation_context(chain_id)
};
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut txn_tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
let result = ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(context, operation)
.await;
assert_matches!(result, Err(ExecutionError::UnauthenticatedTransferOwner));
Ok(())
}
/// Tests the contract system API to claim tokens from a remote account.
#[test_matrix(
[TransferTestEndpoint::User, TransferTestEndpoint::Application],
[TransferTestEndpoint::Chain, TransferTestEndpoint::User, TransferTestEndpoint::Application]
)]
#[test_log::test(tokio::test)]
async fn test_claim_system_api(
sender: TransferTestEndpoint,
recipient: TransferTestEndpoint,
) -> anyhow::Result<()> {
let amount = Amount::ONE;
let claimer_chain_description = dummy_chain_description(1);
let claimer_chain_id = claimer_chain_description.id();
let source_state = sender.create_system_state(amount);
let claimer_state = SystemExecutionState {
description: Some(claimer_chain_description),
..SystemExecutionState::default()
};
let source_chain_id = source_state
.description
.as_ref()
.expect("System state created by sender should have a `ChainDescription`")
.id();
let mut source_view = source_state.into_view().await;
let mut claimer_view = claimer_state.into_view().await;
let contract_blob = TransferTestEndpoint::sender_application_contract_blob();
let service_blob = TransferTestEndpoint::sender_application_service_blob();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let application_description = TransferTestEndpoint::sender_application_description();
let application_description_blob = Blob::new_application_description(&application_description);
let app_desc_blob_id = application_description_blob.id();
let (application_id, application) = claimer_view
.register_mock_application_with(application_description, contract_blob, service_blob)
.await?;
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.claim(
Account {
owner: sender.sender_account_owner(),
chain_id: source_chain_id,
},
Account {
owner: recipient.recipient_account_owner(),
chain_id: claimer_chain_id,
},
amount,
)?;
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = OperationContext {
authenticated_owner: sender.signer(),
chain_id: claimer_chain_id,
..create_dummy_operation_context(claimer_chain_id)
};
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
ExecutionStateActor::new(&mut claimer_view, &mut tracker, &mut controller)
.execute_operation(context, operation)
.await?;
let TransactionOutcome {
outgoing_messages,
oracle_responses,
..
} = tracker.into_outcome()?;
assert_eq!(outgoing_messages.len(), 1);
assert_eq!(oracle_responses.len(), 3);
assert!(matches!(outgoing_messages[0].message, Message::System(_)));
let mut tracker = TransactionTracker::new_replaying(Vec::new());
ExecutionStateActor::new(&mut source_view, &mut tracker, &mut controller)
.execute_message(
create_dummy_message_context(source_chain_id, None),
outgoing_messages[0].message.clone(),
None,
)
.await?;
assert_eq!(*source_view.system.balance.get(), Amount::ZERO);
source_view
.system
.balances
.for_each_index_value(|owner, balance| {
panic!(
"No accounts should have tokens after the claim message has been handled, \
but {owner} has {balance} tokens"
);
})
.await?;
let TransactionOutcome {
outgoing_messages,
oracle_responses,
..
} = tracker.into_outcome()?;
assert_eq!(outgoing_messages.len(), 1);
assert!(oracle_responses.is_empty());
assert!(matches!(outgoing_messages[0].message, Message::System(_)));
let mut tracker = TransactionTracker::new_replaying(Vec::new());
let context = MessageContext {
chain_id: claimer_chain_id,
..create_dummy_message_context(claimer_chain_id, None)
};
ExecutionStateActor::new(&mut claimer_view, &mut tracker, &mut controller)
.execute_message(context, outgoing_messages[0].message.clone(), None)
.await?;
recipient
.verify_recipient(&claimer_view.system, amount)
.await?;
Ok(())
}
/// Tests the contract system API to claim tokens from an unauthorized remote account.
#[test_matrix(
[TransferTestEndpoint::User, TransferTestEndpoint::Application],
[TransferTestEndpoint::Chain, TransferTestEndpoint::User, TransferTestEndpoint::Application]
)]
#[test_log::test(tokio::test)]
async fn test_unauthorized_claims(
sender: TransferTestEndpoint,
recipient: TransferTestEndpoint,
) -> anyhow::Result<()> {
let amount = Amount::ONE;
let claimer_chain_description = dummy_chain_description(1);
let claimer_chain_id = claimer_chain_description.id();
let claimer_state = SystemExecutionState {
description: Some(claimer_chain_description),
..SystemExecutionState::default()
};
let source_chain_id = dummy_chain_description(0).id();
let mut claimer_view = claimer_state.into_view().await;
let contract_blob = TransferTestEndpoint::sender_application_contract_blob();
let service_blob = TransferTestEndpoint::sender_application_service_blob();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let application_description = TransferTestEndpoint::sender_application_description();
let application_description_blob = Blob::new_application_description(&application_description);
let app_desc_blob_id = application_description_blob.id();
let (application_id, application) = claimer_view
.register_mock_application_with(application_description, contract_blob, service_blob)
.await?;
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.claim(
Account {
owner: sender.unauthorized_sender_account_owner(),
chain_id: source_chain_id,
},
Account {
owner: recipient.recipient_account_owner(),
chain_id: claimer_chain_id,
},
amount,
)?;
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = OperationContext {
authenticated_owner: sender.unauthorized_signer(),
..create_dummy_operation_context(claimer_chain_id)
};
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
let result = ExecutionStateActor::new(&mut claimer_view, &mut tracker, &mut controller)
.execute_operation(context, operation)
.await;
assert_matches!(result, Err(ExecutionError::UnauthenticatedClaimOwner));
Ok(())
}
/// Tests the contract system API to read the chain balance.
#[proptest(async = "tokio")]
async fn test_read_chain_balance_system_api(chain_balance: Amount) {
let description = dummy_chain_description(0);
let chain_id = description.id();
let mut view = SystemExecutionState {
balance: chain_balance,
..SystemExecutionState::new(description)
}
.into_view()
.await;
let contract_blob = TransferTestEndpoint::sender_application_contract_blob();
let service_blob = TransferTestEndpoint::sender_application_service_blob();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let application_description = TransferTestEndpoint::sender_application_description();
let application_description_blob = Blob::new_application_description(&application_description);
let app_desc_blob_id = application_description_blob.id();
let (application_id, application) = view
.register_mock_application_with(application_description, contract_blob, service_blob)
.await
.unwrap();
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
assert_eq!(runtime.read_chain_balance().unwrap(), chain_balance);
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut txn_tracker = TransactionTracker::new_replaying_blobs([
app_desc_blob_id,
contract_blob_id,
service_blob_id,
]);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(context, operation)
.await
.unwrap();
}
/// Tests the contract system API to read a single account balance.
#[proptest(async = "tokio")]
async fn test_read_owner_balance_system_api(
#[strategy(test_accounts_strategy())] accounts: BTreeMap<AccountOwner, Amount>,
) {
let description = dummy_chain_description(0);
let chain_id = description.id();
let mut view = SystemExecutionState {
balances: accounts.clone(),
..SystemExecutionState::new(description)
}
.into_view()
.await;
let (application_id, application, blobs) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
for (owner, balance) in accounts {
assert_eq!(runtime.read_owner_balance(owner).unwrap(), balance);
}
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut txn_tracker = TransactionTracker::new_replaying_blobs(blobs);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(context, operation)
.await
.unwrap();
}
/// Tests if reading the balance of a missing account returns zero.
#[proptest(async = "tokio")]
async fn test_read_owner_balance_returns_zero_for_missing_accounts(missing_account: AccountOwner) {
let description = dummy_chain_description(0);
let chain_id = description.id();
let mut view = SystemExecutionState::new(description).into_view().await;
let (application_id, application, blobs) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
assert_eq!(
runtime.read_owner_balance(missing_account).unwrap(),
Amount::ZERO
);
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut txn_tracker = TransactionTracker::new_replaying_blobs(blobs);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(context, operation)
.await
.unwrap();
}
/// Tests the contract system API to read all account balances.
#[proptest(async = "tokio")]
async fn test_read_owner_balances_system_api(
#[strategy(test_accounts_strategy())] accounts: BTreeMap<AccountOwner, Amount>,
) {
let description = dummy_chain_description(0);
let chain_id = description.id();
let mut view = SystemExecutionState {
balances: accounts.clone(),
..SystemExecutionState::new(description)
}
.into_view()
.await;
let (application_id, application, blobs) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
assert_eq!(
runtime.read_owner_balances().unwrap(),
accounts.into_iter().collect::<Vec<_>>()
);
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut txn_tracker = TransactionTracker::new_replaying_blobs(blobs);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(context, operation)
.await
.unwrap();
}
/// Tests the contract system API to read all account owners.
#[proptest(async = "tokio")]
async fn test_read_balance_owners_system_api(
#[strategy(test_accounts_strategy())] accounts: BTreeMap<AccountOwner, Amount>,
) {
let description = dummy_chain_description(0);
let chain_id = description.id();
let mut view = SystemExecutionState {
balances: accounts.clone(),
..SystemExecutionState::new(description)
}
.into_view()
.await;
let (application_id, application, blobs) = view.register_mock_application(0).await.unwrap();
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
assert_eq!(
runtime.read_balance_owners().unwrap(),
accounts.keys().copied().collect::<Vec<_>>()
);
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut txn_tracker = TransactionTracker::new_replaying_blobs(blobs);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(context, operation)
.await
.unwrap();
}
/// A test helper representing a transfer endpoint.
#[derive(Clone, Copy, Debug)]
enum TransferTestEndpoint {
Chain,
User,
Application,
}
impl TransferTestEndpoint {
/// Returns the [`AccountOwner`] used to represent a sender that's a user.
fn sender_owner() -> AccountOwner {
AccountOwner::from(CryptoHash::test_hash("sender"))
}
/// Returns the [`ApplicationId`] used to represent a sender that's an application.
fn sender_application_id() -> ApplicationId {
ApplicationId::from(&Self::sender_application_description())
}
/// Returns the [`ApplicationDescription`] used to represent a sender that's an application.
fn sender_application_description() -> ApplicationDescription {
let contract_id = Self::sender_application_contract_blob().id().hash;
let service_id = Self::sender_application_service_blob().id().hash;
let vm_runtime = VmRuntime::Wasm;
ApplicationDescription {
module_id: ModuleId::new(contract_id, service_id, vm_runtime),
creator_chain_id: dummy_chain_description(1000).id(),
block_height: BlockHeight(0),
application_index: 0,
parameters: vec![],
required_application_ids: vec![],
}
}
/// Returns the [`Blob`] that represents the contract bytecode used when representing the
/// sender as an application.
fn sender_application_contract_blob() -> Blob {
Blob::new_contract_bytecode(CompressedBytecode {
compressed_bytes: Arc::new(b"sender contract".to_vec().into_boxed_slice()),
})
}
/// Returns the [`Blob`] that represents the service bytecode used when representing the sender
/// as an application.
fn sender_application_service_blob() -> Blob {
Blob::new_service_bytecode(CompressedBytecode {
compressed_bytes: Arc::new(b"sender service".to_vec().into_boxed_slice()),
})
}
/// Returns the [`Owner`] used to represent a recipient that's a user.
fn recipient_owner() -> AccountOwner {
AccountOwner::from(CryptoHash::test_hash("recipient"))
}
/// Returns the [`ApplicationId`] used to represent a recipient that's an application.
fn recipient_application_id() -> ApplicationId {
ApplicationId::new(CryptoHash::test_hash("recipient application description"))
}
/// Returns a [`SystemExecutionState`] initialized with this transfer endpoint's account
/// having `transfer_amount` tokens.
///
/// The state is also configured so that authentication will succeed when this endpoint is used
/// as a sender.
pub fn create_system_state(&self, transfer_amount: Amount) -> SystemExecutionState {
let (balance, balances, owner) = match self {
TransferTestEndpoint::Chain => (transfer_amount, vec![], Some(Self::sender_owner())),
TransferTestEndpoint::User => {
let owner = Self::sender_owner();
(Amount::ZERO, vec![(owner, transfer_amount)], Some(owner))
}
TransferTestEndpoint::Application => (
Amount::ZERO,
vec![(Self::sender_application_id().into(), transfer_amount)],
None,
),
};
let ownership = ChainOwnership {
super_owners: BTreeSet::from_iter(owner),
..ChainOwnership::default()
};
let chain_description =
dummy_chain_description_with_ownership_and_balance(0, ownership.clone(), balance);
SystemExecutionState {
description: Some(chain_description),
ownership,
balance,
balances: BTreeMap::from_iter(balances),
..SystemExecutionState::default()
}
}
/// Returns the [`AccountOwner`] to represent this transfer endpoint as a sender.
pub fn sender_account_owner(&self) -> AccountOwner {
match self {
TransferTestEndpoint::Chain => AccountOwner::CHAIN,
TransferTestEndpoint::User => Self::sender_owner(),
TransferTestEndpoint::Application => Self::sender_application_id().into(),
}
}
/// Returns the [`AccountOwner`] to represent this transfer endpoint as an unauthorized sender.
pub fn unauthorized_sender_account_owner(&self) -> AccountOwner {
match self {
TransferTestEndpoint::Chain => AccountOwner::CHAIN,
TransferTestEndpoint::User => AccountOwner::from(CryptoHash::test_hash("attacker")),
TransferTestEndpoint::Application => Self::recipient_application_id().into(),
}
}
/// Returns the [`AccountOwner`] that should be used as the authenticated owner in the transfer
/// operation.
pub fn signer(&self) -> Option<AccountOwner> {
match self {
TransferTestEndpoint::Chain | TransferTestEndpoint::User => Some(Self::sender_owner()),
TransferTestEndpoint::Application => None,
}
}
/// Returns the [`AccountOwner`] that should be used as the authenticated owner when testing an
/// unauthorized transfer operation.
pub fn unauthorized_signer(&self) -> Option<AccountOwner> {
match self {
TransferTestEndpoint::Chain | TransferTestEndpoint::User => {
Some(Self::recipient_owner())
}
TransferTestEndpoint::Application => None,
}
}
/// Returns the [`AccountOwner`] to represent this transfer endpoint as a recipient.
pub fn recipient_account_owner(&self) -> AccountOwner {
match self {
TransferTestEndpoint::Chain => AccountOwner::CHAIN,
TransferTestEndpoint::User => Self::recipient_owner(),
TransferTestEndpoint::Application => Self::recipient_application_id().into(),
}
}
/// Verifies that the [`SystemExecutionStateView`] has the expected `amount` in this transfer
/// endpoint's account, and that all other accounts are empty.
pub async fn verify_recipient(
&self,
system: &SystemExecutionStateView<MemoryContext<TestExecutionRuntimeContext>>,
amount: Amount,
) -> anyhow::Result<()> {
let account_owner = self.recipient_account_owner();
let (expected_chain_balance, expected_balances) = if account_owner == AccountOwner::CHAIN {
(amount, vec![])
} else {
(Amount::ZERO, vec![(account_owner, amount)])
};
let balances = system.balances.index_values().await?;
assert_eq!(*system.balance.get(), expected_chain_balance);
assert_eq!(balances, expected_balances);
Ok(())
}
}
/// Tests the contract system API to query an application service.
#[test_case(None => matches Ok(_); "when all authorized")]
#[test_case(Some(vec![()]) => matches Ok(_); "when single app authorized")]
#[test_case(Some(vec![]) => matches Err(ExecutionError::UnauthorizedApplication(_)); "when unauthorized")]
#[test_log::test(tokio::test)]
async fn test_query_service(authorized_apps: Option<Vec<()>>) -> Result<(), ExecutionError> {
let description = dummy_chain_description(0);
let chain_id = description.id();
let mut view = SystemExecutionState {
ownership: ChainOwnership::default(),
balance: Amount::ONE,
balances: BTreeMap::new(),
..SystemExecutionState::new(description)
}
.into_view()
.await;
let contract_blob = TransferTestEndpoint::sender_application_contract_blob();
let service_blob = TransferTestEndpoint::sender_application_service_blob();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let application_description = TransferTestEndpoint::sender_application_description();
let application_description_blob = Blob::new_application_description(&application_description);
let app_desc_blob_id = application_description_blob.id();
let (application_id, application) = view
.register_mock_application_with(application_description, contract_blob, service_blob)
.await
.expect("should register mock application");
let call_service_as_oracle =
authorized_apps.map(|apps| apps.into_iter().map(|()| application_id).collect());
view.system
.application_permissions
.set(ApplicationPermissions {
call_service_as_oracle,
..ApplicationPermissions::new_single(application_id)
});
application.expect_call(ExpectedCall::execute_operation(
move |runtime, _operation| {
runtime.query_service(application_id, vec![])?;
Ok(vec![])
},
));
application.expect_call(ExpectedCall::default_finalize());
application.expect_call(ExpectedCall::handle_query(|_service, _query| Ok(vec![])));
let context = create_dummy_operation_context(chain_id);
let mut controller = ResourceController::default();
let operation = Operation::User {
application_id,
bytes: vec![],
};
let mut txn_tracker = TransactionTracker::new_replaying(vec![
OracleResponse::Blob(app_desc_blob_id),
OracleResponse::Blob(contract_blob_id),
OracleResponse::Blob(service_blob_id),
OracleResponse::Service(vec![]),
]);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(context, operation)
.await?;
Ok(())
}
/// Tests the contract system API to make HTTP requests.
#[test_case(None => matches Ok(_); "when all authorized")]
#[test_case(Some(vec![()]) => matches Ok(_); "when single app authorized")]
#[test_case(Some(vec![]) => matches Err(ExecutionError::UnauthorizedApplication(_)); "when unauthorized")]
#[test_log::test(tokio::test)]
async fn test_perform_http_request(authorized_apps: Option<Vec<()>>) -> Result<(), ExecutionError> {
let description = dummy_chain_description(0);
let chain_id = description.id();
let mut view = SystemExecutionState {
ownership: ChainOwnership::default(),
balance: Amount::ONE,
balances: BTreeMap::new(),
..SystemExecutionState::new(description)
}
.into_view()
.await;
let contract_blob = TransferTestEndpoint::sender_application_contract_blob();
let service_blob = TransferTestEndpoint::sender_application_service_blob();
let contract_blob_id = contract_blob.id();
let service_blob_id = service_blob.id();
let application_description = TransferTestEndpoint::sender_application_description();
let application_description_blob = Blob::new_application_description(&application_description);
let app_desc_blob_id = application_description_blob.id();
let (application_id, application) = view
.register_mock_application_with(application_description, contract_blob, service_blob)
.await
.expect("should register mock application");
let make_http_requests =
authorized_apps.map(|apps| apps.into_iter().map(|()| application_id).collect());
view.system
.application_permissions
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/tests/test_system_execution.rs | linera-execution/tests/test_system_execution.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::field_reassign_with_default)]
use linera_base::{
crypto::AccountSecretKey,
data_types::{Amount, BlockHeight, Timestamp},
identifiers::{Account, AccountOwner},
ownership::ChainOwnership,
};
use linera_execution::{
test_utils::{
dummy_chain_description, dummy_chain_description_with_ownership_and_balance,
SystemExecutionState,
},
ExecutionStateActor, Message, MessageContext, Operation, OperationContext, Query, QueryContext,
QueryOutcome, QueryResponse, ResourceController, SystemMessage, SystemOperation, SystemQuery,
SystemResponse, TransactionTracker,
};
#[tokio::test]
async fn test_simple_system_operation() -> anyhow::Result<()> {
let owner_key_pair = AccountSecretKey::generate();
let owner = AccountOwner::from(owner_key_pair.public());
let ownership = ChainOwnership {
super_owners: [owner].into_iter().collect(),
..ChainOwnership::default()
};
let balance = Amount::from_tokens(4);
let description =
dummy_chain_description_with_ownership_and_balance(0, ownership.clone(), balance);
let chain_id = description.id();
let state = SystemExecutionState {
description: Some(description),
balance,
ownership,
..SystemExecutionState::default()
};
let mut view = state.into_view().await;
let recipient = Account::burn_address(chain_id);
let operation = SystemOperation::Transfer {
owner: AccountOwner::CHAIN,
amount: Amount::from_tokens(4),
recipient,
};
let context = OperationContext {
chain_id,
height: BlockHeight(0),
round: Some(0),
authenticated_owner: Some(owner),
timestamp: Default::default(),
};
let mut controller = ResourceController::default();
let mut txn_tracker = TransactionTracker::new_replaying(Vec::new());
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_operation(context, Operation::system(operation))
.await?;
assert_eq!(view.system.balance.get(), &Amount::ZERO);
let txn_outcome = txn_tracker.into_outcome().unwrap();
assert!(txn_outcome.outgoing_messages.is_empty());
Ok(())
}
#[tokio::test]
async fn test_simple_system_message() -> anyhow::Result<()> {
let mut state = SystemExecutionState::default();
let description = dummy_chain_description(0);
let chain_id = description.id();
state.description = Some(description);
let mut view = state.into_view().await;
let message = SystemMessage::Credit {
amount: Amount::from_tokens(4),
target: AccountOwner::CHAIN,
source: AccountOwner::CHAIN,
};
let context = MessageContext {
chain_id,
origin: chain_id,
is_bouncing: false,
height: BlockHeight(0),
round: Some(0),
authenticated_owner: None,
refund_grant_to: None,
timestamp: Default::default(),
};
let mut controller = ResourceController::default();
let mut txn_tracker = TransactionTracker::new_replaying(Vec::new());
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_message(context, Message::System(message), None)
.await?;
assert_eq!(view.system.balance.get(), &Amount::from_tokens(4));
let txn_outcome = txn_tracker.into_outcome().unwrap();
assert!(txn_outcome.outgoing_messages.is_empty());
Ok(())
}
#[tokio::test]
async fn test_simple_system_query() -> anyhow::Result<()> {
let mut state = SystemExecutionState::default();
let description = dummy_chain_description(0);
let chain_id = description.id();
state.description = Some(description);
state.balance = Amount::from_tokens(4);
let mut view = state.into_view().await;
let context = QueryContext {
chain_id,
next_block_height: BlockHeight(0),
local_time: Timestamp::from(0),
};
let QueryOutcome {
response,
operations,
} = view
.query_application(context, Query::System(SystemQuery), None)
.await
.unwrap();
assert_eq!(
response,
QueryResponse::System(SystemResponse {
chain_id,
balance: Amount::from_tokens(4)
})
);
assert!(operations.is_empty());
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/tests/fee_consumption.rs | linera-execution/tests/fee_consumption.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Tests for how the runtime computes fees based on consumed resources.
use std::{collections::BTreeSet, sync::Arc, vec};
use linera_base::{
crypto::AccountPublicKey,
data_types::{Amount, BlockHeight, OracleResponse},
http,
identifiers::{Account, AccountOwner},
vm::VmRuntime,
};
use linera_execution::{
test_utils::{
blob_oracle_responses, dummy_chain_description, ExpectedCall, RegisterMockApplication,
SystemExecutionState,
},
ContractRuntime, ExecutionError, ExecutionStateActor, Message, MessageContext,
ResourceControlPolicy, ResourceController, ResourceTracker, TransactionTracker,
};
use test_case::test_case;
/// Tests if the chain balance is updated based on the fees spent for consuming resources.
// Chain account only.
#[test_case(vec![], Amount::ZERO, None, None; "without any costs")]
#[test_case(vec![FeeSpend::Fuel(100)], Amount::from_tokens(1_000), None, None; "with only execution costs")]
#[test_case(vec![FeeSpend::Read(vec![0, 1], None)], Amount::from_tokens(1_000), None, None; "with only an empty read")]
#[test_case(
vec![
FeeSpend::Read(vec![0, 1], None),
FeeSpend::Fuel(207),
],
Amount::from_tokens(1_000),
None,
None;
"with execution and an empty read"
)]
// Chain account and small owner account.
#[test_case(
vec![FeeSpend::Fuel(100)],
Amount::from_tokens(1_000),
Some(Amount::from_tokens(1)),
None;
"with only execution costs and with owner account"
)]
#[test_case(
vec![FeeSpend::Read(vec![0, 1], None)],
Amount::from_tokens(1_000),
Some(Amount::from_tokens(1)),
None;
"with only an empty read and with owner account"
)]
#[test_case(
vec![
FeeSpend::Read(vec![0, 1], None),
FeeSpend::Fuel(207),
],
Amount::from_tokens(1_000),
Some(Amount::from_tokens(1)),
None;
"with execution and an empty read and with owner account"
)]
// Small chain account and larger owner account.
#[test_case(
vec![FeeSpend::Fuel(100)],
Amount::from_tokens(1),
Some(Amount::from_tokens(1_000)),
None;
"with only execution costs and with larger owner account"
)]
#[test_case(
vec![FeeSpend::Read(vec![0, 1], None)],
Amount::from_tokens(1),
Some(Amount::from_tokens(1_000)),
None;
"with only an empty read and with larger owner account"
)]
#[test_case(
vec![
FeeSpend::Read(vec![0, 1], None),
FeeSpend::Fuel(207),
],
Amount::from_tokens(1),
Some(Amount::from_tokens(1_000)),
None;
"with execution and an empty read and with larger owner account"
)]
// Small chain account, small owner account, large grant.
#[test_case(
vec![FeeSpend::Fuel(100)],
Amount::from_tokens(2),
Some(Amount::from_tokens(1)),
Some(Amount::from_tokens(1_000));
"with only execution costs and with owner account and grant"
)]
#[test_case(
vec![FeeSpend::Read(vec![0, 1], None)],
Amount::from_tokens(2),
Some(Amount::from_tokens(1)),
Some(Amount::from_tokens(1_000));
"with only an empty read and with owner account and grant"
)]
#[test_case(
vec![
FeeSpend::Read(vec![0, 1], None),
FeeSpend::Fuel(207),
],
Amount::from_tokens(2),
Some(Amount::from_tokens(1)),
Some(Amount::from_tokens(1_000));
"with execution and an empty read and with owner account and grant"
)]
#[test_case(
vec![
FeeSpend::QueryServiceOracle,
FeeSpend::Runtime(32),
],
Amount::from_tokens(2),
Some(Amount::from_tokens(1)),
Some(Amount::from_tokens(1_000));
"with only a service oracle call"
)]
#[test_case(
vec![
FeeSpend::QueryServiceOracle,
FeeSpend::QueryServiceOracle,
FeeSpend::QueryServiceOracle,
FeeSpend::Runtime(96),
],
Amount::from_tokens(2),
Some(Amount::from_tokens(1)),
Some(Amount::from_tokens(1_000));
"with three service oracle calls"
)]
#[test_case(
vec![
FeeSpend::Fuel(91),
FeeSpend::QueryServiceOracle,
FeeSpend::Fuel(11),
FeeSpend::Read(vec![0, 1, 2], None),
FeeSpend::QueryServiceOracle,
FeeSpend::Fuel(57),
FeeSpend::QueryServiceOracle,
FeeSpend::Runtime(96),
],
Amount::from_tokens(2),
Some(Amount::from_tokens(1_000)),
None;
"with service oracle calls, fuel consumption and a read operation"
)]
#[test_case(
vec![FeeSpend::HttpRequest],
Amount::from_tokens(2),
Some(Amount::from_tokens(1)),
Some(Amount::from_tokens(1_000));
"with one HTTP request"
)]
#[test_case(
vec![
FeeSpend::HttpRequest,
FeeSpend::HttpRequest,
FeeSpend::HttpRequest,
],
Amount::from_tokens(2),
Some(Amount::from_tokens(1)),
Some(Amount::from_tokens(1_000));
"with three HTTP requests"
)]
#[test_case(
vec![
FeeSpend::Fuel(11),
FeeSpend::HttpRequest,
FeeSpend::Read(vec![0, 1], None),
FeeSpend::Fuel(23),
FeeSpend::HttpRequest,
],
Amount::from_tokens(2),
Some(Amount::from_tokens(1)),
Some(Amount::from_tokens(1_000));
"with all fee spend operations"
)]
// TODO(#1601): Add more test cases
#[tokio::test]
async fn test_fee_consumption(
spends: Vec<FeeSpend>,
chain_balance: Amount,
owner_balance: Option<Amount>,
initial_grant: Option<Amount>,
) -> anyhow::Result<()> {
let chain_description = dummy_chain_description(0);
let chain_id = chain_description.id();
let mut state = SystemExecutionState {
description: Some(chain_description.clone()),
..SystemExecutionState::default()
};
let (application_id, application, blobs) = state.register_mock_application(0).await?;
let mut view = state.into_view().await;
let mut oracle_responses = blob_oracle_responses(blobs.iter());
let signer = AccountOwner::from(AccountPublicKey::test_key(0));
view.system.balance.set(chain_balance);
if let Some(owner_balance) = owner_balance {
view.system.balances.insert(&signer, owner_balance)?;
}
let prices = ResourceControlPolicy {
wasm_fuel_unit: Amount::from_tokens(3),
evm_fuel_unit: Amount::from_tokens(2),
read_operation: Amount::from_tokens(3),
write_operation: Amount::from_tokens(5),
byte_runtime: Amount::from_millis(1),
byte_read: Amount::from_tokens(7),
byte_written: Amount::from_tokens(11),
byte_stored: Amount::from_tokens(13),
operation: Amount::from_tokens(17),
operation_byte: Amount::from_tokens(19),
message: Amount::from_tokens(23),
message_byte: Amount::from_tokens(29),
service_as_oracle_query: Amount::from_millis(31),
http_request: Amount::from_tokens(37),
maximum_wasm_fuel_per_block: 4_868_145_137,
maximum_evm_fuel_per_block: 4_868_145_137,
maximum_block_size: 41,
maximum_service_oracle_execution_ms: 43,
maximum_blob_size: 47,
maximum_published_blobs: 53,
maximum_bytecode_size: 59,
maximum_block_proposal_size: 61,
maximum_bytes_read_per_block: 67,
maximum_bytes_written_per_block: 71,
maximum_oracle_response_bytes: 73,
maximum_http_response_bytes: 79,
http_request_timeout_ms: 83,
blob_read: Amount::from_tokens(89),
blob_published: Amount::from_tokens(97),
blob_byte_read: Amount::from_tokens(101),
blob_byte_published: Amount::from_tokens(103),
http_request_allow_list: BTreeSet::new(),
};
let consumed_fees = spends
.iter()
.map(|spend| spend.amount(&prices))
.fold(Amount::ZERO, |sum, spent_fees| {
sum.saturating_add(spent_fees)
});
let authenticated_owner = if owner_balance.is_some() {
Some(signer)
} else {
None
};
let mut controller = ResourceController::new(
Arc::new(prices),
ResourceTracker::default(),
authenticated_owner,
);
for spend in &spends {
oracle_responses.extend(spend.expected_oracle_responses());
}
application.expect_call(ExpectedCall::execute_message(move |runtime, _operation| {
for spend in spends {
spend.execute(runtime)?;
}
Ok(())
}));
application.expect_call(ExpectedCall::default_finalize());
let refund_grant_to = authenticated_owner
.map(|owner| Account { chain_id, owner })
.or(None);
let context = MessageContext {
chain_id,
origin: chain_id,
is_bouncing: false,
authenticated_owner,
refund_grant_to,
height: BlockHeight(0),
round: Some(0),
timestamp: Default::default(),
};
let mut grant = initial_grant.unwrap_or_default();
let mut txn_tracker = TransactionTracker::new_replaying(oracle_responses);
ExecutionStateActor::new(&mut view, &mut txn_tracker, &mut controller)
.execute_message(
context,
Message::User {
application_id,
bytes: vec![],
},
if initial_grant.is_some() {
Some(&mut grant)
} else {
None
},
)
.await?;
let txn_outcome = txn_tracker.into_outcome()?;
assert!(txn_outcome.outgoing_messages.is_empty());
match initial_grant {
None => {
let (expected_chain_balance, expected_owner_balance) = if chain_balance >= consumed_fees
{
(chain_balance.saturating_sub(consumed_fees), owner_balance)
} else {
let Some(owner_balance) = owner_balance else {
panic!("execution should have failed earlier");
};
(
Amount::ZERO,
Some(
owner_balance
.saturating_add(chain_balance)
.saturating_sub(consumed_fees),
),
)
};
assert_eq!(*view.system.balance.get(), expected_chain_balance);
assert_eq!(
view.system.balances.get(&signer).await?,
expected_owner_balance
);
assert_eq!(grant, Amount::ZERO);
}
Some(initial_grant) => {
let (expected_grant, expected_owner_balance) = if initial_grant >= consumed_fees {
(initial_grant.saturating_sub(consumed_fees), owner_balance)
} else {
let Some(owner_balance) = owner_balance else {
panic!("execution should have failed earlier");
};
(
Amount::ZERO,
Some(
owner_balance
.saturating_add(initial_grant)
.saturating_sub(consumed_fees),
),
)
};
assert_eq!(*view.system.balance.get(), chain_balance);
assert_eq!(
view.system.balances.get(&signer).await?,
expected_owner_balance
);
assert_eq!(grant, expected_grant);
}
}
Ok(())
}
/// A runtime operation that costs some amount of fees.
pub enum FeeSpend {
/// Consume some execution fuel.
Fuel(u64),
/// Reads from storage.
Read(Vec<u8>, Option<Vec<u8>>),
/// Queries a service as an oracle.
QueryServiceOracle,
/// Performs an HTTP request.
HttpRequest,
/// Byte from runtime.
Runtime(u32),
}
impl FeeSpend {
/// Returns the [`OracleResponse`]s necessary for executing this runtime operation.
pub fn expected_oracle_responses(&self) -> Vec<OracleResponse> {
match self {
FeeSpend::Fuel(_) | FeeSpend::Read(_, _) | FeeSpend::Runtime(_) => vec![],
FeeSpend::QueryServiceOracle => {
vec![OracleResponse::Service(vec![])]
}
FeeSpend::HttpRequest => vec![OracleResponse::Http(http::Response::ok([]))],
}
}
/// The fee amount required for this runtime operation.
pub fn amount(&self, policy: &ResourceControlPolicy) -> Amount {
match self {
FeeSpend::Fuel(units) => policy.wasm_fuel_unit.saturating_mul(*units as u128),
FeeSpend::Read(_key, value) => {
let value_read_fee = value
.as_ref()
.map_or(Amount::ZERO, |value| Amount::from(value.len() as u128));
policy.read_operation.saturating_add(value_read_fee)
}
FeeSpend::QueryServiceOracle => policy.service_as_oracle_query,
FeeSpend::HttpRequest => policy.http_request,
FeeSpend::Runtime(bytes) => policy.byte_runtime.saturating_mul(*bytes as u128),
}
}
/// Executes the operation with the `runtime`
pub fn execute(self, runtime: &mut impl ContractRuntime) -> Result<(), ExecutionError> {
match self {
FeeSpend::Fuel(units) => runtime.consume_fuel(units, VmRuntime::Wasm),
FeeSpend::Runtime(_bytes) => Ok(()),
FeeSpend::Read(key, value) => {
let promise = runtime.read_value_bytes_new(key)?;
let response = runtime.read_value_bytes_wait(&promise)?;
assert_eq!(response, value);
Ok(())
}
FeeSpend::QueryServiceOracle => {
let application_id = runtime.application_id()?;
runtime.query_service(application_id, vec![])?;
Ok(())
}
FeeSpend::HttpRequest => {
runtime.perform_http_request(http::Request::get("http://dummy.url"))?;
Ok(())
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/build.rs | linera-rpc/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() -> Result<(), Box<dyn std::error::Error>> {
use std::io::Write as _;
let out_dir: std::path::PathBuf = std::env::var("OUT_DIR")?.into();
let no_includes: &[&str] = &[];
tonic_prost_build::configure()
.file_descriptor_set_path(out_dir.join("file_descriptor_set.bin"))
.protoc_arg("--experimental_allow_proto3_optional")
.compile_protos(&["proto/rpc.proto"], no_includes)?;
let subject_alt_names = vec!["localhost".to_string()];
let cert = rcgen::generate_simple_self_signed(subject_alt_names)?;
// Write the certificate to a file (PEM format)
let mut cert_file = out_dir.clone();
cert_file.push("self_signed_cert.pem");
let cert_file = format!("{}", cert_file.display());
let mut cert_file = std::fs::File::create(cert_file)?;
cert_file.write_all(cert.serialize_pem()?.as_bytes())?;
// Write the private key to a file (PEM format)
let mut key_file = out_dir.clone();
key_file.push("private_key.pem");
let key_file = format!("{}", key_file.display());
let mut key_file = std::fs::File::create(key_file)?;
key_file.write_all(cert.serialize_private_key_pem().as_bytes())?;
cfg_aliases::cfg_aliases! {
with_testing: { any(test, feature = "test") },
web: { all(target_arch = "wasm32", target_os = "unknown", feature = "web") },
with_metrics: { all(not(web), feature = "metrics") },
with_server: { all(not(web), feature = "server") },
with_simple_network: { all(not(web), feature = "simple-network") },
};
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/config.rs | linera-rpc/src/config.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::ffi::OsString;
use clap::Parser;
use linera_base::{crypto::ValidatorPublicKey, identifiers::ChainId};
use serde::{Deserialize, Serialize};
#[cfg(with_simple_network)]
use crate::simple;
#[derive(Clone, Debug, Parser)]
#[cfg_attr(with_testing, derive(PartialEq))]
pub struct CrossChainConfig {
/// Number of cross-chain messages allowed before dropping them.
#[arg(long = "cross-chain-queue-size", default_value = "1000")]
pub(crate) queue_size: usize,
/// Maximum number of retries for a cross-chain message.
#[arg(long = "cross-chain-max-retries", default_value = "10")]
pub(crate) max_retries: u32,
/// Delay before retrying of cross-chain message.
#[arg(long = "cross-chain-retry-delay-ms", default_value = "2000")]
pub(crate) retry_delay_ms: u64,
/// Introduce a delay before sending every cross-chain message (e.g. for testing purpose).
#[arg(long = "cross-chain-sender-delay-ms", default_value = "0")]
pub(crate) sender_delay_ms: u64,
/// Drop cross-chain messages randomly at the given rate (0 <= rate < 1) (meant for testing).
#[arg(long = "cross-chain-sender-failure-rate", default_value = "0.0")]
pub(crate) sender_failure_rate: f32,
}
impl Default for CrossChainConfig {
fn default() -> Self {
CrossChainConfig::parse_from::<[OsString; 1], OsString>(["".into()])
}
}
impl CrossChainConfig {
pub fn to_args(&self) -> Vec<String> {
vec![
"--cross-chain-queue-size".to_string(),
self.queue_size.to_string(),
"--cross-chain-max-retries".to_string(),
self.max_retries.to_string(),
"--cross-chain-retry-delay-ms".to_string(),
self.retry_delay_ms.to_string(),
"--cross-chain-sender-delay-ms".to_string(),
self.sender_delay_ms.to_string(),
"--cross-chain-sender-failure-rate".to_string(),
self.sender_failure_rate.to_string(),
]
}
}
#[derive(Clone, Debug, Parser)]
pub struct NotificationConfig {
/// Number of notifications allowed before blocking the main server loop
#[arg(long = "notification-queue-size", default_value = "1000")]
pub(crate) notification_queue_size: usize,
}
pub type ShardId = usize;
/// The network configuration of a shard.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct ShardConfig {
/// The host name (e.g., an IP address).
pub host: String,
/// The port.
pub port: u16,
/// The port on which metrics are served.
pub metrics_port: Option<u16>,
}
impl ShardConfig {
pub fn address(&self) -> String {
format!("{}:{}", self.host, self.port)
}
pub fn http_address(&self) -> String {
format!("http://{}:{}", self.host, self.port)
}
}
/// The network configuration of a proxy.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct ProxyConfig {
/// The hostname (e.g., an IP address).
pub host: String,
/// The public facing port. Receives incoming connections from clients.
pub public_port: u16,
/// The private port. Used for communicating with shards.
pub private_port: u16,
/// The port on which metrics are served.
pub metrics_port: u16,
}
impl ProxyConfig {
pub fn internal_address(&self, protocol: &NetworkProtocol) -> String {
format!(
"{}://{}:{}",
protocol.scheme(),
self.host,
self.private_port
)
}
}
/// The network protocol.
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum NetworkProtocol {
#[cfg(with_simple_network)]
Simple(simple::TransportProtocol),
Grpc(TlsConfig),
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum TlsConfig {
ClearText,
Tls,
}
impl NetworkProtocol {
fn scheme(&self) -> &'static str {
match self {
#[cfg(with_simple_network)]
NetworkProtocol::Simple(transport) => transport.scheme(),
NetworkProtocol::Grpc(tls) => match tls {
TlsConfig::ClearText => "http",
TlsConfig::Tls => "https",
},
}
}
}
/// The network configuration for all shards.
pub type ValidatorInternalNetworkConfig = ValidatorInternalNetworkPreConfig<NetworkProtocol>;
/// The public network configuration for a validator.
pub type ValidatorPublicNetworkConfig = ValidatorPublicNetworkPreConfig<NetworkProtocol>;
/// The network configuration for all shards.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct ValidatorInternalNetworkPreConfig<P> {
/// The public key of the validator.
pub public_key: ValidatorPublicKey,
/// The network protocol to use internally.
pub protocol: P,
/// The available shards. Each chain UID is mapped to a unique shard in the vector in
/// a static way.
pub shards: Vec<ShardConfig>,
/// The server configurations for the linera-exporter.
/// They can be used as optional locations to forward notifications to destinations other than
/// the proxy, by the workers.
pub block_exporters: Vec<ExporterServiceConfig>,
/// The available proxies.
pub proxies: Vec<ProxyConfig>,
}
impl<P> ValidatorInternalNetworkPreConfig<P> {
pub fn clone_with_protocol<Q>(&self, protocol: Q) -> ValidatorInternalNetworkPreConfig<Q> {
ValidatorInternalNetworkPreConfig {
public_key: self.public_key,
protocol,
shards: self.shards.clone(),
block_exporters: self.block_exporters.clone(),
proxies: self.proxies.clone(),
}
}
}
impl ValidatorInternalNetworkConfig {
pub fn exporter_addresses(&self) -> Vec<String> {
self.block_exporters
.iter()
.map(|ExporterServiceConfig { host, port }| {
format!("{}://{}:{}", self.protocol.scheme(), host, port)
})
.collect::<Vec<_>>()
}
}
impl ValidatorPublicNetworkConfig {
pub fn http_address(&self) -> String {
format!("{}://{}:{}", self.protocol.scheme(), self.host, self.port)
}
}
/// The public network configuration for a validator.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct ValidatorPublicNetworkPreConfig<P> {
/// The network protocol to use for the validator frontend.
pub protocol: P,
/// The host name of the validator (IP or hostname).
pub host: String,
/// The port the validator listens on.
pub port: u16,
}
impl<P> ValidatorPublicNetworkPreConfig<P> {
pub fn clone_with_protocol<Q>(&self, protocol: Q) -> ValidatorPublicNetworkPreConfig<Q> {
ValidatorPublicNetworkPreConfig {
protocol,
host: self.host.clone(),
port: self.port,
}
}
}
impl<P> std::fmt::Display for ValidatorPublicNetworkPreConfig<P>
where
P: std::fmt::Display,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}:{}:{}", self.protocol, self.host, self.port)
}
}
impl std::fmt::Display for NetworkProtocol {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
#[cfg(with_simple_network)]
NetworkProtocol::Simple(protocol) => write!(f, "{:?}", protocol),
NetworkProtocol::Grpc(tls) => match tls {
TlsConfig::ClearText => write!(f, "grpc"),
TlsConfig::Tls => write!(f, "grpcs"),
},
}
}
}
impl<P> std::str::FromStr for ValidatorPublicNetworkPreConfig<P>
where
P: std::str::FromStr,
P::Err: std::fmt::Display,
{
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts = s.split(':').collect::<Vec<_>>();
anyhow::ensure!(
parts.len() == 3,
"Expecting format `(tcp|udp|grpc|grpcs):host:port`"
);
let protocol = parts[0].parse().map_err(|s| anyhow::anyhow!("{}", s))?;
let host = parts[1].to_owned();
let port = parts[2].parse()?;
Ok(ValidatorPublicNetworkPreConfig {
protocol,
host,
port,
})
}
}
impl std::str::FromStr for NetworkProtocol {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let protocol = match s {
"grpc" => Self::Grpc(TlsConfig::ClearText),
"grpcs" => Self::Grpc(TlsConfig::Tls),
#[cfg(with_simple_network)]
s => Self::Simple(simple::TransportProtocol::from_str(s)?),
#[cfg(not(with_simple_network))]
s => return Err(format!("unsupported protocol: {s:?}")),
};
Ok(protocol)
}
}
impl<P> ValidatorInternalNetworkPreConfig<P> {
/// Static shard assignment
pub fn get_shard_id(&self, chain_id: ChainId) -> ShardId {
use std::hash::{Hash, Hasher};
let mut s = std::collections::hash_map::DefaultHasher::new();
// Use the validator public key to randomise shard assignment.
self.public_key.hash(&mut s);
chain_id.hash(&mut s);
(s.finish() as ShardId) % self.shards.len()
}
pub fn shard(&self, shard_id: ShardId) -> &ShardConfig {
&self.shards[shard_id]
}
/// Gets the [`ShardConfig`] of the shard assigned to the `chain_id`.
pub fn get_shard_for(&self, chain_id: ChainId) -> &ShardConfig {
self.shard(self.get_shard_id(chain_id))
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
/// The server configuration for the linera-exporter.
pub struct ExporterServiceConfig {
/// The host name of the server (IP or hostname).
pub host: String,
/// The port for the server to listen on.
pub port: u16,
}
impl ExporterServiceConfig {
pub fn new(host: String, port: u16) -> ExporterServiceConfig {
ExporterServiceConfig { host, port }
}
}
#[test]
fn cross_chain_config_to_args() {
let config = CrossChainConfig::default();
let args = config.to_args();
let mut cmd = vec![String::new()];
cmd.extend(args.clone());
let config2 = CrossChainConfig::parse_from(cmd);
let args2 = config2.to_args();
assert_eq!(config, config2);
assert_eq!(args, args2);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/lib.rs | linera-rpc/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module provides network abstractions and the data schemas for remote procedure
//! calls (RPCs) in the Linera protocol.
// `tracing::instrument` is not compatible with this nightly Clippy lint
#![allow(unknown_lints)]
pub mod config;
pub mod node_provider;
pub mod client;
mod cross_chain_message_queue;
mod message;
#[cfg(with_simple_network)]
pub mod simple;
pub mod grpc;
pub use client::Client;
pub use message::{RpcMessage, ShardInfo};
pub use node_provider::{NodeOptions, NodeProvider};
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct HandleLiteCertRequest<'a> {
pub certificate: linera_chain::types::LiteCertificate<'a>,
pub wait_for_outgoing_messages: bool,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct HandleConfirmedCertificateRequest {
pub certificate: linera_chain::types::ConfirmedBlockCertificate,
pub wait_for_outgoing_messages: bool,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct HandleValidatedCertificateRequest {
pub certificate: linera_chain::types::ValidatedBlockCertificate,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct HandleTimeoutCertificateRequest {
pub certificate: linera_chain::types::TimeoutCertificate,
}
pub const FILE_DESCRIPTOR_SET: &[u8] = tonic::include_file_descriptor_set!("file_descriptor_set");
#[cfg(not(target_arch = "wasm32"))]
pub const CERT_PEM: &str = include_str!(concat!(env!("OUT_DIR"), "/self_signed_cert.pem"));
#[cfg(not(target_arch = "wasm32"))]
pub const KEY_PEM: &str = include_str!(concat!(env!("OUT_DIR"), "/private_key.pem"));
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/cross_chain_message_queue.rs | linera-rpc/src/cross_chain_message_queue.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Types and functions common to the gRPC and simple implementations.
#![cfg(with_server)]
use std::{
collections::{hash_map::Entry, HashMap},
future::Future,
time::Duration,
};
use futures::{channel::mpsc, StreamExt as _};
use linera_base::identifiers::ChainId;
#[cfg(with_metrics)]
use linera_base::time::Instant;
use linera_core::data_types::CrossChainRequest;
use rand::Rng as _;
use tracing::{trace, warn};
use crate::config::ShardId;
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{
exponential_bucket_latencies, register_histogram, register_int_gauge,
};
use prometheus::{Histogram, IntGauge};
pub static CROSS_CHAIN_MESSAGE_TASKS: LazyLock<IntGauge> = LazyLock::new(|| {
register_int_gauge(
"cross_chain_message_tasks",
"Number of concurrent cross-chain message tasks",
)
});
pub static CROSS_CHAIN_QUEUE_WAIT_TIME: LazyLock<Histogram> = LazyLock::new(|| {
register_histogram(
"cross_chain_queue_wait_time",
"Time (ms) a cross-chain message waits in queue before handle_request is called",
exponential_bucket_latencies(10_000.0),
)
});
}
#[expect(clippy::too_many_arguments)]
pub(crate) async fn forward_cross_chain_queries<F, G>(
nickname: String,
cross_chain_max_retries: u32,
cross_chain_retry_delay: Duration,
cross_chain_sender_delay: Duration,
cross_chain_sender_failure_rate: f32,
this_shard: ShardId,
mut receiver: mpsc::Receiver<(CrossChainRequest, ShardId)>,
handle_request: F,
) where
F: Fn(ShardId, CrossChainRequest) -> G + Send + Clone + 'static,
G: Future<Output = anyhow::Result<()>> + Send + 'static,
{
let mut steps = tokio::task::JoinSet::new();
let mut job_states: HashMap<QueueId, JobState> = HashMap::new();
let run_task = |task: Task| async move {
// Record how long the message waited in queue (in milliseconds)
#[cfg(with_metrics)]
{
let queue_wait_time_ms = task.queued_at.elapsed().as_secs_f64() * 1000.0;
metrics::CROSS_CHAIN_QUEUE_WAIT_TIME.observe(queue_wait_time_ms);
}
handle_request(task.shard_id, task.request).await
};
let run_action = |action, queue, state: JobState| async move {
linera_base::time::timer::sleep(cross_chain_sender_delay).await;
let to_shard = state.task.shard_id;
(
queue,
match action {
Action::Proceed { .. } => {
if let Err(error) = run_task(state.task).await {
warn!(
nickname = state.nickname,
?error,
retry = state.retries,
from_shard = this_shard,
to_shard,
"Failed to send cross-chain query",
);
Action::Retry
} else {
trace!(from_shard = this_shard, to_shard, "Sent cross-chain query",);
Action::Proceed {
id: state.id.wrapping_add(1),
}
}
}
Action::Retry => {
linera_base::time::timer::sleep(cross_chain_retry_delay * state.retries).await;
Action::Proceed { id: state.id }
}
},
)
};
loop {
#[cfg(with_metrics)]
metrics::CROSS_CHAIN_MESSAGE_TASKS.set(job_states.len() as i64);
tokio::select! {
Some(Ok((queue, action))) = steps.join_next() => {
let Entry::Occupied(mut state) = job_states.entry(queue) else {
panic!("running job without state");
};
if state.get().is_finished(&action, cross_chain_max_retries) {
state.remove();
continue;
}
if let Action::Retry = action {
state.get_mut().retries += 1
}
steps.spawn(run_action.clone()(action, queue, state.get().clone()));
}
request = receiver.next() => {
let Some((request, shard_id)) = request else { break };
if rand::thread_rng().gen::<f32>() < cross_chain_sender_failure_rate {
warn!("Dropped 1 cross-chain message intentionally.");
continue;
}
let queue = QueueId::new(&request);
let task = Task {
shard_id,
request,
#[cfg(with_metrics)]
queued_at: Instant::now(),
};
match job_states.entry(queue) {
Entry::Vacant(entry) => {
steps.spawn(run_action.clone()(
Action::Proceed { id: 0 },
queue,
entry.insert(JobState {
id: 0,
retries: 0,
nickname: nickname.clone(),
task,
}).clone(),
));
}
Entry::Occupied(mut entry) => {
entry.insert(JobState {
id: entry.get().id + 1,
retries: 0,
nickname: nickname.clone(),
task,
});
}
}
}
else => (),
}
}
}
/// An discriminant for message queues: messages with the same queue ID will be delivered
/// in order.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
struct QueueId {
sender: ChainId,
recipient: ChainId,
is_update: bool,
}
impl QueueId {
/// Returns a discriminant for the message's queue.
fn new(request: &CrossChainRequest) -> Self {
let (sender, recipient, is_update) = match request {
CrossChainRequest::UpdateRecipient {
sender, recipient, ..
} => (*sender, *recipient, true),
CrossChainRequest::ConfirmUpdatedRecipient {
sender, recipient, ..
} => (*sender, *recipient, false),
};
QueueId {
sender,
recipient,
is_update,
}
}
}
enum Action {
/// The request has been sent successfully and the next request can be sent.
Proceed { id: usize },
/// The request failed and should be retried.
Retry,
}
#[derive(Clone)]
struct Task {
/// The ID of the shard the request is sent to.
pub shard_id: ShardId,
/// The cross-chain request to be sent.
pub request: linera_core::data_types::CrossChainRequest,
/// When this task was queued.
#[cfg(with_metrics)]
pub queued_at: Instant,
}
#[derive(Clone)]
struct JobState {
/// Queued requests are assigned incremental IDs.
pub id: usize,
/// How often the current request has been retried.
pub retries: u32,
/// The nickname of this worker, i.e. the one that is sending the request.
pub nickname: String,
/// The current request to be sent.
pub task: Task,
}
impl JobState {
/// Returns whether the job is finished and should be removed.
fn is_finished(&self, action: &Action, max_retries: u32) -> bool {
match action {
// If the action is to proceed and no new messages with a higher ID are waiting.
Action::Proceed { id } => self.id < *id,
// If the action is to retry and the maximum number of retries has been reached.
Action::Retry => self.retries >= max_retries,
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/client.rs | linera-rpc/src/client.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::CryptoHash,
data_types::{BlobContent, BlockHeight, NetworkDescription},
identifiers::{BlobId, ChainId},
};
use linera_chain::{
data_types::BlockProposal,
types::{
ConfirmedBlockCertificate, LiteCertificate, TimeoutCertificate, ValidatedBlockCertificate,
},
};
use linera_core::{
data_types::{ChainInfoQuery, ChainInfoResponse},
node::{CrossChainMessageDelivery, NodeError, NotificationStream, ValidatorNode},
};
use crate::grpc::GrpcClient;
#[cfg(with_simple_network)]
use crate::simple::SimpleClient;
#[derive(Clone)]
pub enum Client {
Grpc(GrpcClient),
#[cfg(with_simple_network)]
Simple(SimpleClient),
}
impl From<GrpcClient> for Client {
fn from(client: GrpcClient) -> Self {
Self::Grpc(client)
}
}
#[cfg(with_simple_network)]
impl From<SimpleClient> for Client {
fn from(client: SimpleClient) -> Self {
Self::Simple(client)
}
}
impl ValidatorNode for Client {
type NotificationStream = NotificationStream;
fn address(&self) -> String {
match self {
Client::Grpc(grpc_client) => grpc_client.address().to_string(),
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.address(),
}
}
async fn handle_block_proposal(
&self,
proposal: BlockProposal,
) -> Result<ChainInfoResponse, NodeError> {
match self {
Client::Grpc(grpc_client) => grpc_client.handle_block_proposal(proposal).await,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.handle_block_proposal(proposal).await,
}
}
async fn handle_lite_certificate(
&self,
certificate: LiteCertificate<'_>,
delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError> {
match self {
Client::Grpc(grpc_client) => {
grpc_client
.handle_lite_certificate(certificate, delivery)
.await
}
#[cfg(with_simple_network)]
Client::Simple(simple_client) => {
simple_client
.handle_lite_certificate(certificate, delivery)
.await
}
}
}
async fn handle_timeout_certificate(
&self,
certificate: TimeoutCertificate,
) -> Result<ChainInfoResponse, NodeError> {
match self {
Client::Grpc(grpc_client) => grpc_client.handle_timeout_certificate(certificate).await,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => {
simple_client.handle_timeout_certificate(certificate).await
}
}
}
async fn handle_confirmed_certificate(
&self,
certificate: ConfirmedBlockCertificate,
delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError> {
match self {
Client::Grpc(grpc_client) => {
grpc_client
.handle_confirmed_certificate(certificate, delivery)
.await
}
#[cfg(with_simple_network)]
Client::Simple(simple_client) => {
simple_client
.handle_confirmed_certificate(certificate, delivery)
.await
}
}
}
async fn handle_validated_certificate(
&self,
certificate: ValidatedBlockCertificate,
) -> Result<ChainInfoResponse, NodeError> {
match self {
Client::Grpc(grpc_client) => {
grpc_client.handle_validated_certificate(certificate).await
}
#[cfg(with_simple_network)]
Client::Simple(simple_client) => {
simple_client
.handle_validated_certificate(certificate)
.await
}
}
}
async fn handle_chain_info_query(
&self,
query: ChainInfoQuery,
) -> Result<ChainInfoResponse, NodeError> {
match self {
Client::Grpc(grpc_client) => grpc_client.handle_chain_info_query(query).await,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.handle_chain_info_query(query).await,
}
}
async fn subscribe(&self, chains: Vec<ChainId>) -> Result<Self::NotificationStream, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => Box::pin(grpc_client.subscribe(chains).await?),
#[cfg(with_simple_network)]
Client::Simple(simple_client) => Box::pin(simple_client.subscribe(chains).await?),
})
}
async fn get_version_info(&self) -> Result<linera_version::VersionInfo, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.get_version_info().await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.get_version_info().await?,
})
}
async fn get_network_description(&self) -> Result<NetworkDescription, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.get_network_description().await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.get_network_description().await?,
})
}
async fn upload_blob(&self, content: BlobContent) -> Result<BlobId, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.upload_blob(content).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.upload_blob(content).await?,
})
}
async fn download_blob(&self, blob_id: BlobId) -> Result<BlobContent, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.download_blob(blob_id).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.download_blob(blob_id).await?,
})
}
async fn download_pending_blob(
&self,
chain_id: ChainId,
blob_id: BlobId,
) -> Result<BlobContent, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => {
grpc_client.download_pending_blob(chain_id, blob_id).await?
}
#[cfg(with_simple_network)]
Client::Simple(simple_client) => {
simple_client
.download_pending_blob(chain_id, blob_id)
.await?
}
})
}
async fn handle_pending_blob(
&self,
chain_id: ChainId,
blob: BlobContent,
) -> Result<ChainInfoResponse, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.handle_pending_blob(chain_id, blob).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => {
simple_client.handle_pending_blob(chain_id, blob).await?
}
})
}
async fn download_certificate(
&self,
hash: CryptoHash,
) -> Result<ConfirmedBlockCertificate, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.download_certificate(hash).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.download_certificate(hash).await?,
})
}
async fn download_certificates(
&self,
hashes: Vec<CryptoHash>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.download_certificates(hashes).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.download_certificates(hashes).await?,
})
}
async fn download_certificates_by_heights(
&self,
chain_id: ChainId,
mut heights: Vec<BlockHeight>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
heights.sort();
Ok(match self {
Client::Grpc(grpc_client) => {
grpc_client
.download_certificates_by_heights(chain_id, heights)
.await?
}
#[cfg(with_simple_network)]
Client::Simple(simple_client) => {
simple_client
.download_certificates_by_heights(chain_id, heights)
.await?
}
})
}
async fn blob_last_used_by(&self, blob_id: BlobId) -> Result<CryptoHash, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.blob_last_used_by(blob_id).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.blob_last_used_by(blob_id).await?,
})
}
async fn blob_last_used_by_certificate(
&self,
blob_id: BlobId,
) -> Result<ConfirmedBlockCertificate, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.blob_last_used_by_certificate(blob_id).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => {
simple_client.blob_last_used_by_certificate(blob_id).await?
}
})
}
async fn missing_blob_ids(&self, blob_ids: Vec<BlobId>) -> Result<Vec<BlobId>, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.missing_blob_ids(blob_ids).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.missing_blob_ids(blob_ids).await?,
})
}
async fn get_shard_info(
&self,
chain_id: ChainId,
) -> Result<linera_core::data_types::ShardInfo, NodeError> {
Ok(match self {
Client::Grpc(grpc_client) => grpc_client.get_shard_info(chain_id).await?,
#[cfg(with_simple_network)]
Client::Simple(simple_client) => simple_client.get_shard_info(chain_id).await?,
})
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/node_provider.rs | linera-rpc/src/node_provider.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::time::Duration;
use linera_core::node::{NodeError, ValidatorNodeProvider};
#[cfg(with_simple_network)]
use crate::simple::SimpleNodeProvider;
use crate::{client::Client, grpc::GrpcNodeProvider};
/// A general node provider which delegates node provision to the underlying
/// node provider according to the `ValidatorPublicNetworkConfig`.
#[derive(Clone)]
pub struct NodeProvider {
grpc: GrpcNodeProvider,
#[cfg(with_simple_network)]
simple: SimpleNodeProvider,
}
impl NodeProvider {
pub fn new(options: NodeOptions) -> Self {
Self {
grpc: GrpcNodeProvider::new(options),
#[cfg(with_simple_network)]
simple: SimpleNodeProvider::new(options),
}
}
}
impl ValidatorNodeProvider for NodeProvider {
type Node = Client;
fn make_node(&self, address: &str) -> anyhow::Result<Self::Node, NodeError> {
let address = address.to_lowercase();
#[cfg(with_simple_network)]
if address.starts_with("tcp") || address.starts_with("udp") {
return Ok(Client::Simple(self.simple.make_node(&address)?));
}
if address.starts_with("grpc") {
return Ok(Client::Grpc(self.grpc.make_node(&address)?));
}
Err(NodeError::CannotResolveValidatorAddress { address })
}
}
#[derive(Copy, Clone, Default)]
pub struct NodeOptions {
pub send_timeout: Duration,
pub recv_timeout: Duration,
pub retry_delay: Duration,
pub max_retries: u32,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/message.rs | linera-rpc/src/message.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::CryptoHash,
data_types::{BlobContent, BlockHeight, NetworkDescription},
identifiers::{BlobId, ChainId},
};
use linera_chain::{
data_types::{BlockProposal, LiteVote},
types::{ConfirmedBlock, ConfirmedBlockCertificate},
};
use linera_core::{
data_types::{ChainInfoQuery, ChainInfoResponse, CrossChainRequest},
node::NodeError,
};
use linera_version::VersionInfo;
use serde::{Deserialize, Serialize};
use crate::{
config::ShardId, HandleConfirmedCertificateRequest, HandleLiteCertRequest,
HandleTimeoutCertificateRequest, HandleValidatedCertificateRequest,
};
/// Information about shard configuration for a specific chain.
#[derive(Clone, Serialize, Deserialize, Debug)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct ShardInfo {
/// The ID of the shard assigned to the chain.
pub shard_id: ShardId,
/// The total number of shards in the validator network.
pub total_shards: usize,
}
#[derive(Clone, Serialize, Deserialize, Debug)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub enum RpcMessage {
// Inbound
BlockProposal(Box<BlockProposal>),
TimeoutCertificate(Box<HandleTimeoutCertificateRequest>),
ValidatedCertificate(Box<HandleValidatedCertificateRequest>),
ConfirmedCertificate(Box<HandleConfirmedCertificateRequest>),
LiteCertificate(Box<HandleLiteCertRequest<'static>>),
ChainInfoQuery(Box<ChainInfoQuery>),
UploadBlob(Box<BlobContent>),
DownloadBlob(Box<BlobId>),
DownloadPendingBlob(Box<(ChainId, BlobId)>),
HandlePendingBlob(Box<(ChainId, BlobContent)>),
DownloadConfirmedBlock(Box<CryptoHash>),
DownloadCertificates(Vec<CryptoHash>),
DownloadCertificatesByHeights(ChainId, Vec<BlockHeight>),
BlobLastUsedBy(Box<BlobId>),
MissingBlobIds(Vec<BlobId>),
VersionInfoQuery,
NetworkDescriptionQuery,
// Outbound
Vote(Box<LiteVote>),
ChainInfoResponse(Box<ChainInfoResponse>),
Error(Box<NodeError>),
VersionInfoResponse(Box<VersionInfo>),
NetworkDescriptionResponse(Box<NetworkDescription>),
UploadBlobResponse(Box<BlobId>),
DownloadBlobResponse(Box<BlobContent>),
DownloadPendingBlobResponse(Box<BlobContent>),
DownloadConfirmedBlockResponse(Box<ConfirmedBlock>),
DownloadCertificatesResponse(Vec<ConfirmedBlockCertificate>),
DownloadCertificatesByHeightsResponse(Vec<ConfirmedBlockCertificate>),
BlobLastUsedByResponse(Box<CryptoHash>),
MissingBlobIdsResponse(Vec<BlobId>),
// Internal to a validator
CrossChainRequest(Box<CrossChainRequest>),
BlobLastUsedByCertificate(Box<BlobId>),
BlobLastUsedByCertificateResponse(Box<ConfirmedBlockCertificate>),
ShardInfoQuery(ChainId),
ShardInfoResponse(ShardInfo),
}
impl RpcMessage {
/// Obtains the [`ChainId`] of the chain targeted by this message, if there is one.
///
/// Only inbound messages have target chains.
pub fn target_chain_id(&self) -> Option<ChainId> {
use RpcMessage::*;
let chain_id = match self {
BlockProposal(proposal) => proposal.content.block.chain_id,
LiteCertificate(request) => request.certificate.value.chain_id,
TimeoutCertificate(request) => request.certificate.inner().chain_id(),
ValidatedCertificate(request) => request.certificate.inner().chain_id(),
ConfirmedCertificate(request) => request.certificate.inner().chain_id(),
ChainInfoQuery(query) => query.chain_id,
CrossChainRequest(request) => request.target_chain_id(),
DownloadPendingBlob(request) => request.0,
DownloadCertificatesByHeights(chain_id, _) => *chain_id,
HandlePendingBlob(request) => request.0,
ShardInfoQuery(chain_id) => *chain_id,
Vote(_)
| Error(_)
| ChainInfoResponse(_)
| VersionInfoQuery
| VersionInfoResponse(_)
| NetworkDescriptionQuery
| NetworkDescriptionResponse(_)
| UploadBlob(_)
| UploadBlobResponse(_)
| DownloadBlob(_)
| DownloadBlobResponse(_)
| DownloadPendingBlobResponse(_)
| DownloadConfirmedBlock(_)
| DownloadConfirmedBlockResponse(_)
| DownloadCertificatesByHeightsResponse(_)
| DownloadCertificates(_)
| BlobLastUsedBy(_)
| BlobLastUsedByResponse(_)
| BlobLastUsedByCertificate(_)
| BlobLastUsedByCertificateResponse(_)
| MissingBlobIds(_)
| MissingBlobIdsResponse(_)
| ShardInfoResponse(_)
| DownloadCertificatesResponse(_) => {
return None;
}
};
Some(chain_id)
}
/// Whether this message is "local" i.e. will be executed locally on the proxy
/// or if it'll be proxied to the server.
pub fn is_local_message(&self) -> bool {
use RpcMessage::*;
match self {
VersionInfoQuery
| NetworkDescriptionQuery
| ShardInfoQuery(_)
| UploadBlob(_)
| DownloadBlob(_)
| DownloadConfirmedBlock(_)
| BlobLastUsedBy(_)
| BlobLastUsedByCertificate(_)
| MissingBlobIds(_)
| DownloadCertificates(_)
| DownloadCertificatesByHeights(_, _) => true,
BlockProposal(_)
| LiteCertificate(_)
| TimeoutCertificate(_)
| ValidatedCertificate(_)
| ConfirmedCertificate(_)
| ChainInfoQuery(_)
| CrossChainRequest(_)
| Vote(_)
| Error(_)
| ChainInfoResponse(_)
| VersionInfoResponse(_)
| NetworkDescriptionResponse(_)
| ShardInfoResponse(_)
| UploadBlobResponse(_)
| DownloadPendingBlob(_)
| DownloadPendingBlobResponse(_)
| HandlePendingBlob(_)
| DownloadBlobResponse(_)
| DownloadConfirmedBlockResponse(_)
| BlobLastUsedByResponse(_)
| BlobLastUsedByCertificateResponse(_)
| MissingBlobIdsResponse(_)
| DownloadCertificatesResponse(_)
| DownloadCertificatesByHeightsResponse(_) => false,
}
}
}
impl TryFrom<RpcMessage> for ChainInfoResponse {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::ChainInfoResponse(response) => Ok(*response),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for VersionInfo {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::VersionInfoResponse(version_info) => Ok(*version_info),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for BlobContent {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::DownloadBlobResponse(blob)
| RpcMessage::DownloadPendingBlobResponse(blob) => Ok(*blob),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for ConfirmedBlock {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::DownloadConfirmedBlockResponse(certificate) => Ok(*certificate),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for ConfirmedBlockCertificate {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::BlobLastUsedByCertificateResponse(certificate) => Ok(*certificate),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for Vec<ConfirmedBlockCertificate> {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::DownloadCertificatesResponse(certificates) => Ok(certificates),
RpcMessage::DownloadCertificatesByHeightsResponse(certificates) => Ok(certificates),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for CryptoHash {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::BlobLastUsedByResponse(hash) => Ok(*hash),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for NetworkDescription {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::NetworkDescriptionResponse(description) => Ok(*description),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for Vec<BlobId> {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::MissingBlobIdsResponse(blob_ids) => Ok(blob_ids),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for BlobId {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::UploadBlobResponse(blob_id) => Ok(*blob_id),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl TryFrom<RpcMessage> for ShardInfo {
type Error = NodeError;
fn try_from(message: RpcMessage) -> Result<Self, Self::Error> {
match message {
RpcMessage::ShardInfoResponse(shard_info) => Ok(shard_info),
RpcMessage::Error(error) => Err(*error),
_ => Err(NodeError::UnexpectedMessage),
}
}
}
impl From<NodeError> for RpcMessage {
fn from(error: NodeError) -> Self {
RpcMessage::Error(Box::new(error))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/grpc/client.rs | linera-rpc/src/grpc/client.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{fmt, future::Future, iter};
use futures::{future, stream, StreamExt};
use linera_base::{
crypto::CryptoHash,
data_types::{BlobContent, BlockHeight, NetworkDescription},
ensure,
identifiers::{BlobId, ChainId},
time::Duration,
};
use linera_chain::{
data_types::{self},
types::{
self, Certificate, ConfirmedBlock, ConfirmedBlockCertificate, GenericCertificate,
LiteCertificate, Timeout, ValidatedBlock,
},
};
use linera_core::{
data_types::{CertificatesByHeightRequest, ChainInfoResponse},
node::{CrossChainMessageDelivery, NodeError, NotificationStream, ValidatorNode},
worker::Notification,
};
use linera_version::VersionInfo;
use tonic::{Code, IntoRequest, Request, Status};
use tracing::{debug, instrument, trace, warn, Level};
use super::{
api::{self, validator_node_client::ValidatorNodeClient, SubscriptionRequest},
transport, GRPC_MAX_MESSAGE_SIZE,
};
use crate::{
grpc::api::RawCertificate, HandleConfirmedCertificateRequest, HandleLiteCertRequest,
HandleTimeoutCertificateRequest, HandleValidatedCertificateRequest,
};
#[derive(Clone)]
pub struct GrpcClient {
address: String,
client: ValidatorNodeClient<transport::Channel>,
retry_delay: Duration,
max_retries: u32,
}
impl GrpcClient {
pub fn new(
address: String,
channel: transport::Channel,
retry_delay: Duration,
max_retries: u32,
) -> Self {
let client = ValidatorNodeClient::new(channel)
.max_encoding_message_size(GRPC_MAX_MESSAGE_SIZE)
.max_decoding_message_size(GRPC_MAX_MESSAGE_SIZE);
Self {
address,
client,
retry_delay,
max_retries,
}
}
pub fn address(&self) -> &str {
&self.address
}
/// Returns whether this gRPC status means the server stream should be reconnected to, or not.
/// Logs a warning on unexpected status codes.
fn is_retryable(status: &Status) -> bool {
match status.code() {
Code::DeadlineExceeded | Code::Aborted | Code::Unavailable | Code::Unknown => {
trace!("gRPC request interrupted: {status:?}; retrying");
true
}
Code::Ok | Code::Cancelled | Code::ResourceExhausted => {
trace!("Unexpected gRPC status: {status:?}; retrying");
true
}
Code::NotFound => false, // This code is used if e.g. the validator is missing blobs.
Code::InvalidArgument
| Code::AlreadyExists
| Code::PermissionDenied
| Code::FailedPrecondition
| Code::OutOfRange
| Code::Unimplemented
| Code::Internal
| Code::DataLoss
| Code::Unauthenticated => {
trace!("Unexpected gRPC status: {status:?}");
false
}
}
}
async fn delegate<F, Fut, R, S>(
&self,
f: F,
request: impl TryInto<R> + fmt::Debug + Clone,
handler: &str,
) -> Result<S, NodeError>
where
F: Fn(ValidatorNodeClient<transport::Channel>, Request<R>) -> Fut,
Fut: Future<Output = Result<tonic::Response<S>, Status>>,
R: IntoRequest<R> + Clone,
{
let mut retry_count = 0;
let request_inner = request.try_into().map_err(|_| NodeError::GrpcError {
error: "could not convert request to proto".to_string(),
})?;
loop {
match f(self.client.clone(), Request::new(request_inner.clone())).await {
Err(s) if Self::is_retryable(&s) && retry_count < self.max_retries => {
let delay = self.retry_delay.saturating_mul(retry_count);
retry_count += 1;
linera_base::time::timer::sleep(delay).await;
continue;
}
Err(s) => {
return Err(NodeError::GrpcError {
error: format!("remote request [{handler}] failed with status: {s:?}"),
});
}
Ok(result) => return Ok(result.into_inner()),
};
}
}
fn try_into_chain_info(
result: api::ChainInfoResult,
) -> Result<linera_core::data_types::ChainInfoResponse, NodeError> {
let inner = result.inner.ok_or_else(|| NodeError::GrpcError {
error: "missing body from response".to_string(),
})?;
match inner {
api::chain_info_result::Inner::ChainInfoResponse(response) => {
Ok(response.try_into().map_err(|err| NodeError::GrpcError {
error: format!("failed to unmarshal response: {}", err),
})?)
}
api::chain_info_result::Inner::Error(error) => Err(bincode::deserialize(&error)
.map_err(|err| NodeError::GrpcError {
error: format!("failed to unmarshal error message: {}", err),
})?),
}
}
}
impl TryFrom<api::PendingBlobResult> for BlobContent {
type Error = NodeError;
fn try_from(result: api::PendingBlobResult) -> Result<Self, Self::Error> {
let inner = result.inner.ok_or_else(|| NodeError::GrpcError {
error: "missing body from response".to_string(),
})?;
match inner {
api::pending_blob_result::Inner::Blob(blob) => {
Ok(blob.try_into().map_err(|err| NodeError::GrpcError {
error: format!("failed to unmarshal response: {}", err),
})?)
}
api::pending_blob_result::Inner::Error(error) => Err(bincode::deserialize(&error)
.map_err(|err| NodeError::GrpcError {
error: format!("failed to unmarshal error message: {}", err),
})?),
}
}
}
macro_rules! client_delegate {
($self:ident, $handler:ident, $req:ident) => {{
debug!(
handler = stringify!($handler),
request = ?$req,
"sending gRPC request"
);
$self
.delegate(
|mut client, req| async move { client.$handler(req).await },
$req,
stringify!($handler),
)
.await
}};
}
impl ValidatorNode for GrpcClient {
type NotificationStream = NotificationStream;
fn address(&self) -> String {
self.address.clone()
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn handle_block_proposal(
&self,
proposal: data_types::BlockProposal,
) -> Result<linera_core::data_types::ChainInfoResponse, NodeError> {
GrpcClient::try_into_chain_info(client_delegate!(self, handle_block_proposal, proposal)?)
}
#[instrument(target = "grpc_client", skip_all, fields(address = self.address))]
async fn handle_lite_certificate(
&self,
certificate: types::LiteCertificate<'_>,
delivery: CrossChainMessageDelivery,
) -> Result<linera_core::data_types::ChainInfoResponse, NodeError> {
let wait_for_outgoing_messages = delivery.wait_for_outgoing_messages();
let request = HandleLiteCertRequest {
certificate,
wait_for_outgoing_messages,
};
GrpcClient::try_into_chain_info(client_delegate!(self, handle_lite_certificate, request)?)
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn handle_confirmed_certificate(
&self,
certificate: GenericCertificate<ConfirmedBlock>,
delivery: CrossChainMessageDelivery,
) -> Result<linera_core::data_types::ChainInfoResponse, NodeError> {
let wait_for_outgoing_messages: bool = delivery.wait_for_outgoing_messages();
let request = HandleConfirmedCertificateRequest {
certificate,
wait_for_outgoing_messages,
};
GrpcClient::try_into_chain_info(client_delegate!(
self,
handle_confirmed_certificate,
request
)?)
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn handle_validated_certificate(
&self,
certificate: GenericCertificate<ValidatedBlock>,
) -> Result<linera_core::data_types::ChainInfoResponse, NodeError> {
let request = HandleValidatedCertificateRequest { certificate };
GrpcClient::try_into_chain_info(client_delegate!(
self,
handle_validated_certificate,
request
)?)
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn handle_timeout_certificate(
&self,
certificate: GenericCertificate<Timeout>,
) -> Result<linera_core::data_types::ChainInfoResponse, NodeError> {
let request = HandleTimeoutCertificateRequest { certificate };
GrpcClient::try_into_chain_info(client_delegate!(
self,
handle_timeout_certificate,
request
)?)
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn handle_chain_info_query(
&self,
query: linera_core::data_types::ChainInfoQuery,
) -> Result<linera_core::data_types::ChainInfoResponse, NodeError> {
GrpcClient::try_into_chain_info(client_delegate!(self, handle_chain_info_query, query)?)
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn subscribe(&self, chains: Vec<ChainId>) -> Result<Self::NotificationStream, NodeError> {
let retry_delay = self.retry_delay;
let max_retries = self.max_retries;
let mut retry_count = 0;
let subscription_request = SubscriptionRequest {
chain_ids: chains.into_iter().map(|chain| chain.into()).collect(),
};
let mut client = self.client.clone();
// Make the first connection attempt before returning from this method.
let mut stream = Some(
client
.subscribe(subscription_request.clone())
.await
.map_err(|status| NodeError::SubscriptionFailed {
status: status.to_string(),
})?
.into_inner(),
);
// A stream of `Result<grpc::Notification, tonic::Status>` that keeps calling
// `client.subscribe(request)` endlessly and without delay.
let endlessly_retrying_notification_stream = stream::unfold((), move |()| {
let mut client = client.clone();
let subscription_request = subscription_request.clone();
let mut stream = stream.take();
async move {
let stream = if let Some(stream) = stream.take() {
future::Either::Right(stream)
} else {
match client.subscribe(subscription_request.clone()).await {
Err(err) => future::Either::Left(stream::iter(iter::once(Err(err)))),
Ok(response) => future::Either::Right(response.into_inner()),
}
};
Some((stream, ()))
}
})
.flatten();
let span = tracing::info_span!("notification stream");
// The stream of `Notification`s that inserts increasing delays after retriable errors, and
// terminates after unexpected or fatal errors.
let notification_stream = endlessly_retrying_notification_stream
.map(|result| {
Option::<Notification>::try_from(result?).map_err(|err| {
let message = format!("Could not deserialize notification: {}", err);
tonic::Status::new(Code::Internal, message)
})
})
.take_while(move |result| {
let Err(status) = result else {
retry_count = 0;
return future::Either::Left(future::ready(true));
};
if !span.in_scope(|| Self::is_retryable(status)) || retry_count >= max_retries {
return future::Either::Left(future::ready(false));
}
let delay = retry_delay.saturating_mul(retry_count);
retry_count += 1;
future::Either::Right(async move {
linera_base::time::timer::sleep(delay).await;
true
})
})
.filter_map(|result| {
future::ready(match result {
Ok(notification @ Some(_)) => notification,
Ok(None) => None,
Err(err) => {
warn!("{}", err);
None
}
})
});
Ok(Box::pin(notification_stream))
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn get_version_info(&self) -> Result<VersionInfo, NodeError> {
let req = ();
Ok(client_delegate!(self, get_version_info, req)?.into())
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn get_network_description(&self) -> Result<NetworkDescription, NodeError> {
let req = ();
Ok(client_delegate!(self, get_network_description, req)?.try_into()?)
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn upload_blob(&self, content: BlobContent) -> Result<BlobId, NodeError> {
Ok(client_delegate!(self, upload_blob, content)?.try_into()?)
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn download_blob(&self, blob_id: BlobId) -> Result<BlobContent, NodeError> {
Ok(client_delegate!(self, download_blob, blob_id)?.try_into()?)
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn download_pending_blob(
&self,
chain_id: ChainId,
blob_id: BlobId,
) -> Result<BlobContent, NodeError> {
let req = (chain_id, blob_id);
client_delegate!(self, download_pending_blob, req)?.try_into()
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn handle_pending_blob(
&self,
chain_id: ChainId,
blob: BlobContent,
) -> Result<ChainInfoResponse, NodeError> {
let req = (chain_id, blob);
GrpcClient::try_into_chain_info(client_delegate!(self, handle_pending_blob, req)?)
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn download_certificate(
&self,
hash: CryptoHash,
) -> Result<ConfirmedBlockCertificate, NodeError> {
ConfirmedBlockCertificate::try_from(Certificate::try_from(client_delegate!(
self,
download_certificate,
hash
)?)?)
.map_err(|_| NodeError::UnexpectedCertificateValue)
}
#[instrument(target = "grpc_client", skip_all, err(level = Level::WARN), fields(address = self.address))]
async fn download_certificates(
&self,
hashes: Vec<CryptoHash>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
let mut missing_hashes = hashes;
let mut certs_collected = Vec::with_capacity(missing_hashes.len());
while !missing_hashes.is_empty() {
// Macro doesn't compile if we pass `missing_hashes.clone()` directly to `client_delegate!`.
let missing = missing_hashes.clone();
let mut received: Vec<ConfirmedBlockCertificate> = Vec::<Certificate>::try_from(
client_delegate!(self, download_certificates, missing)?,
)?
.into_iter()
.map(|cert| {
ConfirmedBlockCertificate::try_from(cert)
.map_err(|_| NodeError::UnexpectedCertificateValue)
})
.collect::<Result<_, _>>()?;
// In the case of the server not returning any certificates, we break the loop.
if received.is_empty() {
break;
}
// Honest validator should return certificates in the same order as the requested hashes.
missing_hashes = missing_hashes[received.len()..].to_vec();
certs_collected.append(&mut received);
}
ensure!(
missing_hashes.is_empty(),
NodeError::MissingCertificates(missing_hashes)
);
Ok(certs_collected)
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn download_certificates_by_heights(
&self,
chain_id: ChainId,
heights: Vec<BlockHeight>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
let mut missing = heights;
let mut certs_collected = vec![];
while !missing.is_empty() {
let request = CertificatesByHeightRequest {
chain_id,
heights: missing.clone(),
};
let mut received: Vec<ConfirmedBlockCertificate> =
client_delegate!(self, download_raw_certificates_by_heights, request)?
.certificates
.into_iter()
.map(
|RawCertificate {
lite_certificate,
confirmed_block,
}| {
let cert = bcs::from_bytes::<LiteCertificate>(&lite_certificate)
.map_err(|_| NodeError::UnexpectedCertificateValue)?;
let block = bcs::from_bytes::<ConfirmedBlock>(&confirmed_block)
.map_err(|_| NodeError::UnexpectedCertificateValue)?;
cert.with_value(block)
.ok_or(NodeError::UnexpectedCertificateValue)
},
)
.collect::<Result<_, _>>()?;
if received.is_empty() {
break;
}
// Honest validator should return certificates in the same order as the requested hashes.
missing = missing[received.len()..].to_vec();
certs_collected.append(&mut received);
}
Ok(certs_collected)
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn blob_last_used_by(&self, blob_id: BlobId) -> Result<CryptoHash, NodeError> {
Ok(client_delegate!(self, blob_last_used_by, blob_id)?.try_into()?)
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn blob_last_used_by_certificate(
&self,
blob_id: BlobId,
) -> Result<ConfirmedBlockCertificate, NodeError> {
Ok(client_delegate!(self, blob_last_used_by_certificate, blob_id)?.try_into()?)
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn missing_blob_ids(&self, blob_ids: Vec<BlobId>) -> Result<Vec<BlobId>, NodeError> {
Ok(client_delegate!(self, missing_blob_ids, blob_ids)?.try_into()?)
}
#[instrument(target = "grpc_client", skip(self), err(level = Level::WARN), fields(address = self.address))]
async fn get_shard_info(
&self,
chain_id: ChainId,
) -> Result<linera_core::data_types::ShardInfo, NodeError> {
let response = client_delegate!(self, get_shard_info, chain_id)?;
Ok(linera_core::data_types::ShardInfo {
shard_id: response.shard_id as usize,
total_shards: response.total_shards as usize,
})
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/grpc/mod.rs | linera-rpc/src/grpc/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod client;
mod conversions;
mod node_provider;
pub mod pool;
#[cfg(with_server)]
mod server;
pub mod transport;
pub use client::*;
pub use conversions::*;
pub use node_provider::*;
#[cfg(with_server)]
pub use server::*;
pub mod api {
tonic::include_proto!("rpc.v1");
}
#[derive(thiserror::Error, Debug)]
pub enum GrpcError {
#[error("failed to connect to address: {0}")]
ConnectionFailed(#[from] transport::Error),
#[error("failed to execute task to completion: {0}")]
Join(#[from] futures::channel::oneshot::Canceled),
#[error("failed to parse socket address: {0}")]
SocketAddr(#[from] std::net::AddrParseError),
#[cfg(with_server)]
#[error(transparent)]
Reflection(#[from] tonic_reflection::server::Error),
}
const MEBIBYTE: usize = 1024 * 1024;
pub const GRPC_MAX_MESSAGE_SIZE: usize = 16 * MEBIBYTE;
/// Limit of gRPC message size up to which we will try to populate with data when estimating.
/// We leave 30% of buffer for the rest of the message and potential underestimation.
pub const GRPC_CHUNKED_MESSAGE_FILL_LIMIT: usize = GRPC_MAX_MESSAGE_SIZE * 7 / 10;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/grpc/node_provider.rs | linera-rpc/src/grpc/node_provider.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::str::FromStr as _;
use linera_base::time::Duration;
use linera_core::node::{NodeError, ValidatorNodeProvider};
use super::GrpcClient;
use crate::{
config::ValidatorPublicNetworkConfig,
grpc::{pool::GrpcConnectionPool, transport},
node_provider::NodeOptions,
};
#[derive(Clone)]
pub struct GrpcNodeProvider {
pool: GrpcConnectionPool,
retry_delay: Duration,
max_retries: u32,
}
impl GrpcNodeProvider {
pub fn new(options: NodeOptions) -> Self {
let transport_options = transport::Options::from(&options);
let retry_delay = options.retry_delay;
let max_retries = options.max_retries;
let pool = GrpcConnectionPool::new(transport_options);
Self {
pool,
retry_delay,
max_retries,
}
}
}
impl ValidatorNodeProvider for GrpcNodeProvider {
type Node = GrpcClient;
fn make_node(&self, address: &str) -> Result<Self::Node, NodeError> {
let network = ValidatorPublicNetworkConfig::from_str(address).map_err(|_| {
NodeError::CannotResolveValidatorAddress {
address: address.to_string(),
}
})?;
let http_address = network.http_address();
let channel =
self.pool
.channel(http_address.clone())
.map_err(|error| NodeError::GrpcError {
error: format!("error creating channel: {}", error),
})?;
Ok(GrpcClient::new(
http_address,
channel,
self.retry_delay,
self.max_retries,
))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/grpc/transport.rs | linera-rpc/src/grpc/transport.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use crate::NodeOptions;
#[derive(Clone, Debug, Default)]
pub struct Options {
pub connect_timeout: Option<linera_base::time::Duration>,
pub timeout: Option<linera_base::time::Duration>,
}
impl From<&'_ NodeOptions> for Options {
fn from(node_options: &NodeOptions) -> Self {
Self {
connect_timeout: Some(node_options.send_timeout),
timeout: Some(node_options.recv_timeout),
}
}
}
cfg_if::cfg_if! {
if #[cfg(web)] {
pub use tonic_web_wasm_client::{Client as Channel, Error};
pub fn create_channel(address: String, _options: &Options) -> Result<Channel, Error> {
// TODO(#1817): this should respect `options`
Ok(tonic_web_wasm_client::Client::new(address))
}
} else {
pub use tonic::transport::{Channel, Error};
pub fn create_channel(
address: String,
options: &Options,
) -> Result<Channel, Error> {
let mut endpoint = tonic::transport::Endpoint::from_shared(address)?
.tls_config(tonic::transport::channel::ClientTlsConfig::default().with_webpki_roots())?
.tcp_keepalive(Some(std::time::Duration::from_secs(60)))
.http2_keep_alive_interval(std::time::Duration::from_secs(30))
.keep_alive_timeout(std::time::Duration::from_secs(10))
.keep_alive_while_idle(true);
if let Some(timeout) = options.connect_timeout {
endpoint = endpoint.connect_timeout(timeout);
}
if let Some(timeout) = options.timeout {
endpoint = endpoint.timeout(timeout);
}
Ok(endpoint.connect_lazy())
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/grpc/server.rs | linera-rpc/src/grpc/server.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
net::{IpAddr, SocketAddr},
str::FromStr,
task::{Context, Poll},
};
use futures::{channel::mpsc, future::BoxFuture, FutureExt as _};
use linera_base::{
data_types::Blob,
identifiers::ChainId,
time::{Duration, Instant},
};
use linera_core::{
join_set_ext::JoinSet,
node::NodeError,
worker::{NetworkActions, Notification, Reason, WorkerState},
JoinSetExt as _, TaskHandle,
};
use linera_storage::Storage;
use tokio::sync::{broadcast::error::RecvError, oneshot};
use tokio_util::sync::CancellationToken;
use tonic::{transport::Channel, Request, Response, Status};
use tower::{builder::ServiceBuilder, Layer, Service};
use tracing::{debug, error, info, instrument, trace, warn};
use super::{
api::{
self,
notifier_service_client::NotifierServiceClient,
validator_worker_client::ValidatorWorkerClient,
validator_worker_server::{ValidatorWorker as ValidatorWorkerRpc, ValidatorWorkerServer},
BlockProposal, ChainInfoQuery, ChainInfoResult, CrossChainRequest,
HandlePendingBlobRequest, LiteCertificate, PendingBlobRequest, PendingBlobResult,
},
pool::GrpcConnectionPool,
GrpcError, GRPC_MAX_MESSAGE_SIZE,
};
use crate::{
config::{CrossChainConfig, NotificationConfig, ShardId, ValidatorInternalNetworkConfig},
cross_chain_message_queue, HandleConfirmedCertificateRequest, HandleLiteCertRequest,
HandleTimeoutCertificateRequest, HandleValidatedCertificateRequest,
};
type CrossChainSender = mpsc::Sender<(linera_core::data_types::CrossChainRequest, ShardId)>;
type NotificationSender = tokio::sync::broadcast::Sender<Notification>;
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{
linear_bucket_interval, register_histogram_vec, register_int_counter_vec,
};
use prometheus::{HistogramVec, IntCounterVec};
pub static SERVER_REQUEST_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"server_request_latency",
"Server request latency",
&[],
linear_bucket_interval(1.0, 25.0, 2000.0),
)
});
pub static SERVER_REQUEST_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec("server_request_count", "Server request count", &[])
});
pub static SERVER_REQUEST_SUCCESS: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"server_request_success",
"Server request success",
&["method_name"],
)
});
pub static SERVER_REQUEST_ERROR: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"server_request_error",
"Server request error",
&["method_name"],
)
});
pub static SERVER_REQUEST_LATENCY_PER_REQUEST_TYPE: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"server_request_latency_per_request_type",
"Server request latency per request type",
&["method_name"],
linear_bucket_interval(1.0, 25.0, 2000.0),
)
});
pub static CROSS_CHAIN_MESSAGE_CHANNEL_FULL: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"cross_chain_message_channel_full",
"Cross-chain message channel full",
&[],
)
});
}
#[derive(Clone)]
pub struct GrpcServer<S>
where
S: Storage,
{
state: WorkerState<S>,
shard_id: ShardId,
network: ValidatorInternalNetworkConfig,
cross_chain_sender: CrossChainSender,
notification_sender: NotificationSender,
}
pub struct GrpcServerHandle {
handle: TaskHandle<Result<(), GrpcError>>,
}
impl GrpcServerHandle {
pub async fn join(self) -> Result<(), GrpcError> {
self.handle.await?
}
}
#[derive(Clone)]
pub struct GrpcPrometheusMetricsMiddlewareLayer;
#[derive(Clone)]
pub struct GrpcPrometheusMetricsMiddlewareService<T> {
service: T,
}
impl<S> Layer<S> for GrpcPrometheusMetricsMiddlewareLayer {
type Service = GrpcPrometheusMetricsMiddlewareService<S>;
fn layer(&self, service: S) -> Self::Service {
GrpcPrometheusMetricsMiddlewareService { service }
}
}
impl<S, Req> Service<Req> for GrpcPrometheusMetricsMiddlewareService<S>
where
S::Future: Send + 'static,
S: Service<Req> + std::marker::Send,
{
type Response = S::Response;
type Error = S::Error;
type Future = BoxFuture<'static, Result<S::Response, S::Error>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, request: Req) -> Self::Future {
#[cfg(with_metrics)]
let start = Instant::now();
let future = self.service.call(request);
async move {
let response = future.await?;
#[cfg(with_metrics)]
{
metrics::SERVER_REQUEST_LATENCY
.with_label_values(&[])
.observe(start.elapsed().as_secs_f64() * 1000.0);
metrics::SERVER_REQUEST_COUNT.with_label_values(&[]).inc();
}
Ok(response)
}
.boxed()
}
}
impl<S> GrpcServer<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
#[expect(clippy::too_many_arguments)]
pub fn spawn(
host: String,
port: u16,
state: WorkerState<S>,
shard_id: ShardId,
internal_network: ValidatorInternalNetworkConfig,
cross_chain_config: CrossChainConfig,
notification_config: NotificationConfig,
shutdown_signal: CancellationToken,
join_set: &mut JoinSet,
) -> GrpcServerHandle {
info!(
"spawning gRPC server on {}:{} for shard {}",
host, port, shard_id
);
let (cross_chain_sender, cross_chain_receiver) =
mpsc::channel(cross_chain_config.queue_size);
let (notification_sender, _) =
tokio::sync::broadcast::channel(notification_config.notification_queue_size);
join_set.spawn_task({
info!(
nickname = state.nickname(),
"spawning cross-chain queries thread on {} for shard {}", host, shard_id
);
Self::forward_cross_chain_queries(
state.nickname().to_string(),
internal_network.clone(),
cross_chain_config.max_retries,
Duration::from_millis(cross_chain_config.retry_delay_ms),
Duration::from_millis(cross_chain_config.sender_delay_ms),
cross_chain_config.sender_failure_rate,
shard_id,
cross_chain_receiver,
)
});
let mut exporter_forwarded = false;
for proxy in &internal_network.proxies {
let receiver = notification_sender.subscribe();
join_set.spawn_task({
info!(
nickname = state.nickname(),
"spawning notifications thread on {} for shard {}", host, shard_id
);
let exporter_addresses = if exporter_forwarded {
vec![]
} else {
exporter_forwarded = true;
internal_network.exporter_addresses()
};
Self::forward_notifications(
state.nickname().to_string(),
proxy.internal_address(&internal_network.protocol),
exporter_addresses,
receiver,
)
});
}
let (health_reporter, health_service) = tonic_health::server::health_reporter();
let grpc_server = GrpcServer {
state,
shard_id,
network: internal_network,
cross_chain_sender,
notification_sender,
};
let worker_node = ValidatorWorkerServer::new(grpc_server)
.max_encoding_message_size(GRPC_MAX_MESSAGE_SIZE)
.max_decoding_message_size(GRPC_MAX_MESSAGE_SIZE);
let handle = join_set.spawn_task(async move {
let server_address = SocketAddr::from((IpAddr::from_str(&host)?, port));
let reflection_service = tonic_reflection::server::Builder::configure()
.register_encoded_file_descriptor_set(crate::FILE_DESCRIPTOR_SET)
.build_v1()?;
health_reporter
.set_serving::<ValidatorWorkerServer<Self>>()
.await;
tonic::transport::Server::builder()
.layer(
ServiceBuilder::new()
.layer(GrpcPrometheusMetricsMiddlewareLayer)
.into_inner(),
)
.add_service(health_service)
.add_service(reflection_service)
.add_service(worker_node)
.serve_with_shutdown(server_address, shutdown_signal.cancelled_owned())
.await?;
Ok(())
});
GrpcServerHandle { handle }
}
/// Continuously waits for receiver to receive a notification which is then sent to
/// the proxy.
#[instrument(skip(receiver))]
async fn forward_notifications(
nickname: String,
proxy_address: String,
exporter_addresses: Vec<String>,
mut receiver: tokio::sync::broadcast::Receiver<Notification>,
) {
let channel = tonic::transport::Channel::from_shared(proxy_address.clone())
.expect("Proxy URI should be valid")
.connect_lazy();
let mut client = NotifierServiceClient::new(channel)
.max_encoding_message_size(GRPC_MAX_MESSAGE_SIZE)
.max_decoding_message_size(GRPC_MAX_MESSAGE_SIZE);
let mut exporter_clients: Vec<NotifierServiceClient<Channel>> = exporter_addresses
.iter()
.map(|address| {
let channel = tonic::transport::Channel::from_shared(address.clone())
.expect("Exporter URI should be valid")
.connect_lazy();
NotifierServiceClient::new(channel)
.max_encoding_message_size(GRPC_MAX_MESSAGE_SIZE)
.max_decoding_message_size(GRPC_MAX_MESSAGE_SIZE)
})
.collect::<Vec<_>>();
loop {
let notification = match receiver.recv().await {
Ok(notification) => notification,
Err(RecvError::Lagged(skipped_count)) => {
warn!(
nickname,
skipped_count, "notification receiver lagged, messages were skipped"
);
continue;
}
Err(RecvError::Closed) => {
warn!(
nickname,
"notification channel closed, exiting forwarding loop"
);
break;
}
};
let reason = ¬ification.reason;
let chain_id = notification.chain_id;
let notification: api::Notification = match notification.clone().try_into() {
Ok(notification) => notification,
Err(error) => {
warn!(%error, nickname, "could not deserialize notification");
continue;
}
};
let request = tonic::Request::new(notification.clone());
if let Err(error) = client.notify(request).await {
error!(
%error,
nickname,
?chain_id,
?reason,
"proxy: could not send notification",
)
}
if let Reason::NewBlock { height: _, hash: _ } = reason {
for exporter_client in &mut exporter_clients {
let request = tonic::Request::new(notification.clone());
if let Err(error) = exporter_client.notify(request).await {
error!(
%error,
nickname,
?chain_id,
?reason,
"block exporter: could not send notification",
)
}
}
}
}
}
fn handle_network_actions(&self, actions: NetworkActions) {
let mut cross_chain_sender = self.cross_chain_sender.clone();
let notification_sender = self.notification_sender.clone();
for request in actions.cross_chain_requests {
let shard_id = self.network.get_shard_id(request.target_chain_id());
trace!(
source_shard_id = self.shard_id,
target_shard_id = shard_id,
"Scheduling cross-chain query",
);
if let Err(error) = cross_chain_sender.try_send((request, shard_id)) {
error!(%error, "dropping cross-chain request");
#[cfg(with_metrics)]
if error.is_full() {
metrics::CROSS_CHAIN_MESSAGE_CHANNEL_FULL
.with_label_values(&[])
.inc();
}
}
}
for notification in actions.notifications {
trace!("Scheduling notification query");
if let Err(error) = notification_sender.send(notification) {
error!(%error, "dropping notification");
break;
}
}
}
#[instrument(skip_all, fields(nickname, %this_shard))]
#[expect(clippy::too_many_arguments)]
async fn forward_cross_chain_queries(
nickname: String,
network: ValidatorInternalNetworkConfig,
cross_chain_max_retries: u32,
cross_chain_retry_delay: Duration,
cross_chain_sender_delay: Duration,
cross_chain_sender_failure_rate: f32,
this_shard: ShardId,
receiver: mpsc::Receiver<(linera_core::data_types::CrossChainRequest, ShardId)>,
) {
let pool = GrpcConnectionPool::default();
let handle_request =
move |shard_id: ShardId, request: linera_core::data_types::CrossChainRequest| {
let channel_result = pool.channel(network.shard(shard_id).http_address());
async move {
let mut client = ValidatorWorkerClient::new(channel_result?)
.max_encoding_message_size(GRPC_MAX_MESSAGE_SIZE)
.max_decoding_message_size(GRPC_MAX_MESSAGE_SIZE);
client
.handle_cross_chain_request(Request::new(request.try_into()?))
.await?;
anyhow::Result::<_, anyhow::Error>::Ok(())
}
};
cross_chain_message_queue::forward_cross_chain_queries(
nickname,
cross_chain_max_retries,
cross_chain_retry_delay,
cross_chain_sender_delay,
cross_chain_sender_failure_rate,
this_shard,
receiver,
handle_request,
)
.await;
}
fn log_request_outcome_and_latency(start: Instant, success: bool, method_name: &str) {
#![cfg_attr(not(with_metrics), allow(unused_variables))]
#[cfg(with_metrics)]
{
metrics::SERVER_REQUEST_LATENCY_PER_REQUEST_TYPE
.with_label_values(&[method_name])
.observe(start.elapsed().as_secs_f64() * 1000.0);
if success {
metrics::SERVER_REQUEST_SUCCESS
.with_label_values(&[method_name])
.inc();
} else {
metrics::SERVER_REQUEST_ERROR
.with_label_values(&[method_name])
.inc();
}
}
}
fn log_error(&self, error: &linera_core::worker::WorkerError, context: &str) {
let nickname = self.state.nickname();
if error.is_local() {
error!(nickname, %error, "{}", context);
} else {
debug!(nickname, %error, "{}", context);
}
}
}
#[tonic::async_trait]
impl<S> ValidatorWorkerRpc for GrpcServer<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id()
)
)]
async fn handle_block_proposal(
&self,
request: Request<BlockProposal>,
) -> Result<Response<ChainInfoResult>, Status> {
let start = Instant::now();
let proposal = request.into_inner().try_into()?;
trace!(?proposal, "Handling block proposal");
Ok(Response::new(
match self.state.clone().handle_block_proposal(proposal).await {
Ok((info, actions)) => {
Self::log_request_outcome_and_latency(start, true, "handle_block_proposal");
self.handle_network_actions(actions);
info.try_into()?
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "handle_block_proposal");
self.log_error(&error, "Failed to handle block proposal");
NodeError::from(error).try_into()?
}
},
))
}
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id()
)
)]
async fn handle_lite_certificate(
&self,
request: Request<LiteCertificate>,
) -> Result<Response<ChainInfoResult>, Status> {
let start = Instant::now();
let HandleLiteCertRequest {
certificate,
wait_for_outgoing_messages,
} = request.into_inner().try_into()?;
trace!(?certificate, "Handling lite certificate");
let (sender, receiver) = wait_for_outgoing_messages.then(oneshot::channel).unzip();
match Box::pin(
self.state
.clone()
.handle_lite_certificate(certificate, sender),
)
.await
{
Ok((info, actions)) => {
Self::log_request_outcome_and_latency(start, true, "handle_lite_certificate");
self.handle_network_actions(actions);
if let Some(receiver) = receiver {
if let Err(e) = receiver.await {
error!("Failed to wait for message delivery: {e}");
}
}
Ok(Response::new(info.try_into()?))
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "handle_lite_certificate");
self.log_error(&error, "Failed to handle lite certificate");
Ok(Response::new(NodeError::from(error).try_into()?))
}
}
}
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id()
)
)]
async fn handle_confirmed_certificate(
&self,
request: Request<api::HandleConfirmedCertificateRequest>,
) -> Result<Response<ChainInfoResult>, Status> {
let start = Instant::now();
let HandleConfirmedCertificateRequest {
certificate,
wait_for_outgoing_messages,
} = request.into_inner().try_into()?;
trace!(?certificate, "Handling certificate");
let (sender, receiver) = wait_for_outgoing_messages.then(oneshot::channel).unzip();
match self
.state
.clone()
.handle_confirmed_certificate(certificate, sender)
.await
{
Ok((info, actions)) => {
Self::log_request_outcome_and_latency(start, true, "handle_confirmed_certificate");
self.handle_network_actions(actions);
if let Some(receiver) = receiver {
if let Err(e) = receiver.await {
error!("Failed to wait for message delivery: {e}");
}
}
Ok(Response::new(info.try_into()?))
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "handle_confirmed_certificate");
self.log_error(&error, "Failed to handle confirmed certificate");
Ok(Response::new(NodeError::from(error).try_into()?))
}
}
}
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id()
)
)]
async fn handle_validated_certificate(
&self,
request: Request<api::HandleValidatedCertificateRequest>,
) -> Result<Response<ChainInfoResult>, Status> {
let start = Instant::now();
let HandleValidatedCertificateRequest { certificate } = request.into_inner().try_into()?;
trace!(?certificate, "Handling certificate");
match self
.state
.clone()
.handle_validated_certificate(certificate)
.await
{
Ok((info, actions)) => {
Self::log_request_outcome_and_latency(start, true, "handle_validated_certificate");
self.handle_network_actions(actions);
Ok(Response::new(info.try_into()?))
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "handle_validated_certificate");
self.log_error(&error, "Failed to handle validated certificate");
Ok(Response::new(NodeError::from(error).try_into()?))
}
}
}
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id()
)
)]
async fn handle_timeout_certificate(
&self,
request: Request<api::HandleTimeoutCertificateRequest>,
) -> Result<Response<ChainInfoResult>, Status> {
let start = Instant::now();
let HandleTimeoutCertificateRequest { certificate } = request.into_inner().try_into()?;
trace!(?certificate, "Handling Timeout certificate");
match self
.state
.clone()
.handle_timeout_certificate(certificate)
.await
{
Ok((info, _actions)) => {
Self::log_request_outcome_and_latency(start, true, "handle_timeout_certificate");
Ok(Response::new(info.try_into()?))
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "handle_timeout_certificate");
self.log_error(&error, "Failed to handle timeout certificate");
Ok(Response::new(NodeError::from(error).try_into()?))
}
}
}
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id()
)
)]
async fn handle_chain_info_query(
&self,
request: Request<ChainInfoQuery>,
) -> Result<Response<ChainInfoResult>, Status> {
let start = Instant::now();
let query = request.into_inner().try_into()?;
trace!(?query, "Handling chain info query");
match self.state.clone().handle_chain_info_query(query).await {
Ok((info, actions)) => {
Self::log_request_outcome_and_latency(start, true, "handle_chain_info_query");
self.handle_network_actions(actions);
Ok(Response::new(info.try_into()?))
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "handle_chain_info_query");
self.log_error(&error, "Failed to handle chain info query");
Ok(Response::new(NodeError::from(error).try_into()?))
}
}
}
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id()
)
)]
async fn download_pending_blob(
&self,
request: Request<PendingBlobRequest>,
) -> Result<Response<PendingBlobResult>, Status> {
let start = Instant::now();
let (chain_id, blob_id) = request.into_inner().try_into()?;
trace!(?chain_id, ?blob_id, "Download pending blob");
match self
.state
.clone()
.download_pending_blob(chain_id, blob_id)
.await
{
Ok(blob) => {
Self::log_request_outcome_and_latency(start, true, "download_pending_blob");
Ok(Response::new(blob.into_content().try_into()?))
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "download_pending_blob");
self.log_error(&error, "Failed to download pending blob");
Ok(Response::new(NodeError::from(error).try_into()?))
}
}
}
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id
)
)]
async fn handle_pending_blob(
&self,
request: Request<HandlePendingBlobRequest>,
) -> Result<Response<ChainInfoResult>, Status> {
let start = Instant::now();
let (chain_id, blob_content) = request.into_inner().try_into()?;
let blob = Blob::new(blob_content);
let blob_id = blob.id();
trace!(?chain_id, ?blob_id, "Handle pending blob");
match self.state.clone().handle_pending_blob(chain_id, blob).await {
Ok(info) => {
Self::log_request_outcome_and_latency(start, true, "handle_pending_blob");
Ok(Response::new(info.try_into()?))
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "handle_pending_blob");
self.log_error(&error, "Failed to handle pending blob");
Ok(Response::new(NodeError::from(error).try_into()?))
}
}
}
#[instrument(
target = "grpc_server",
skip_all,
err,
fields(
nickname = self.state.nickname(),
chain_id = ?request.get_ref().chain_id()
)
)]
async fn handle_cross_chain_request(
&self,
request: Request<CrossChainRequest>,
) -> Result<Response<()>, Status> {
let start = Instant::now();
let request = request.into_inner().try_into()?;
trace!(?request, "Handling cross-chain request");
match self.state.clone().handle_cross_chain_request(request).await {
Ok(actions) => {
Self::log_request_outcome_and_latency(start, true, "handle_cross_chain_request");
self.handle_network_actions(actions)
}
Err(error) => {
Self::log_request_outcome_and_latency(start, false, "handle_cross_chain_request");
let nickname = self.state.nickname();
error!(nickname, %error, "Failed to handle cross-chain request");
}
}
Ok(Response::new(()))
}
}
/// Types which are proxyable and expose the appropriate methods to be handled
/// by the `GrpcProxy`
pub trait GrpcProxyable {
fn chain_id(&self) -> Option<ChainId>;
}
impl GrpcProxyable for BlockProposal {
fn chain_id(&self) -> Option<ChainId> {
self.chain_id.clone()?.try_into().ok()
}
}
impl GrpcProxyable for LiteCertificate {
fn chain_id(&self) -> Option<ChainId> {
self.chain_id.clone()?.try_into().ok()
}
}
impl GrpcProxyable for api::HandleConfirmedCertificateRequest {
fn chain_id(&self) -> Option<ChainId> {
self.chain_id.clone()?.try_into().ok()
}
}
impl GrpcProxyable for api::HandleTimeoutCertificateRequest {
fn chain_id(&self) -> Option<ChainId> {
self.chain_id.clone()?.try_into().ok()
}
}
impl GrpcProxyable for api::HandleValidatedCertificateRequest {
fn chain_id(&self) -> Option<ChainId> {
self.chain_id.clone()?.try_into().ok()
}
}
impl GrpcProxyable for ChainInfoQuery {
fn chain_id(&self) -> Option<ChainId> {
self.chain_id.clone()?.try_into().ok()
}
}
impl GrpcProxyable for PendingBlobRequest {
fn chain_id(&self) -> Option<ChainId> {
self.chain_id.clone()?.try_into().ok()
}
}
impl GrpcProxyable for HandlePendingBlobRequest {
fn chain_id(&self) -> Option<ChainId> {
self.chain_id.clone()?.try_into().ok()
}
}
impl GrpcProxyable for CrossChainRequest {
fn chain_id(&self) -> Option<ChainId> {
use super::api::cross_chain_request::Inner;
match self.inner.as_ref()? {
Inner::UpdateRecipient(api::UpdateRecipient { recipient, .. })
| Inner::ConfirmUpdatedRecipient(api::ConfirmUpdatedRecipient { recipient, .. }) => {
recipient.clone()?.try_into().ok()
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/grpc/conversions.rs | linera-rpc/src/grpc/conversions.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::{
AccountPublicKey, AccountSignature, CryptoError, CryptoHash, ValidatorPublicKey,
ValidatorSignature,
},
data_types::{BlobContent, BlockHeight, NetworkDescription},
ensure,
identifiers::{AccountOwner, BlobId, ChainId},
};
use linera_chain::{
data_types::{BlockProposal, LiteValue, ProposalContent},
types::{
Certificate, CertificateKind, ConfirmedBlock, ConfirmedBlockCertificate, LiteCertificate,
Timeout, TimeoutCertificate, ValidatedBlock, ValidatedBlockCertificate,
},
};
use linera_core::{
data_types::{
CertificatesByHeightRequest, ChainInfoQuery, ChainInfoResponse, CrossChainRequest,
},
node::NodeError,
worker::Notification,
};
use thiserror::Error;
use tonic::{Code, Status};
use super::api::{self, PendingBlobRequest};
use crate::{
HandleConfirmedCertificateRequest, HandleLiteCertRequest, HandleTimeoutCertificateRequest,
HandleValidatedCertificateRequest,
};
#[derive(Error, Debug)]
pub enum GrpcProtoConversionError {
#[error(transparent)]
BincodeError(#[from] bincode::Error),
#[error("Conversion failed due to missing field")]
MissingField,
#[error("Signature error: {0}")]
SignatureError(ed25519_dalek::SignatureError),
#[error("Cryptographic error: {0}")]
CryptoError(#[from] CryptoError),
#[error("Inconsistent outer/inner chain IDs")]
InconsistentChainId,
#[error("Unrecognized certificate type")]
InvalidCertificateType,
}
impl From<ed25519_dalek::SignatureError> for GrpcProtoConversionError {
fn from(signature_error: ed25519_dalek::SignatureError) -> Self {
GrpcProtoConversionError::SignatureError(signature_error)
}
}
/// Extracts an optional field from a Proto type and tries to map it.
fn try_proto_convert<S, T>(t: Option<T>) -> Result<S, GrpcProtoConversionError>
where
T: TryInto<S, Error = GrpcProtoConversionError>,
{
t.ok_or(GrpcProtoConversionError::MissingField)?.try_into()
}
impl From<GrpcProtoConversionError> for Status {
fn from(error: GrpcProtoConversionError) -> Self {
Status::new(Code::InvalidArgument, error.to_string())
}
}
impl From<GrpcProtoConversionError> for NodeError {
fn from(error: GrpcProtoConversionError) -> Self {
NodeError::GrpcError {
error: error.to_string(),
}
}
}
impl From<linera_version::CrateVersion> for api::CrateVersion {
fn from(
linera_version::CrateVersion {
major,
minor,
patch,
}: linera_version::CrateVersion,
) -> Self {
Self {
major,
minor,
patch,
}
}
}
impl From<api::CrateVersion> for linera_version::CrateVersion {
fn from(
api::CrateVersion {
major,
minor,
patch,
}: api::CrateVersion,
) -> Self {
Self {
major,
minor,
patch,
}
}
}
impl From<linera_version::VersionInfo> for api::VersionInfo {
fn from(version_info: linera_version::VersionInfo) -> api::VersionInfo {
api::VersionInfo {
crate_version: Some(version_info.crate_version.value.into()),
git_commit: version_info.git_commit.into(),
git_dirty: version_info.git_dirty,
rpc_hash: version_info.rpc_hash.into(),
graphql_hash: version_info.graphql_hash.into(),
wit_hash: version_info.wit_hash.into(),
}
}
}
impl From<api::VersionInfo> for linera_version::VersionInfo {
fn from(version_info: api::VersionInfo) -> linera_version::VersionInfo {
linera_version::VersionInfo {
crate_version: linera_version::Pretty::new(
version_info
.crate_version
.unwrap_or(api::CrateVersion {
major: 0,
minor: 0,
patch: 0,
})
.into(),
),
git_commit: version_info.git_commit.into(),
git_dirty: version_info.git_dirty,
rpc_hash: version_info.rpc_hash.into(),
graphql_hash: version_info.graphql_hash.into(),
wit_hash: version_info.wit_hash.into(),
}
}
}
impl From<NetworkDescription> for api::NetworkDescription {
fn from(
NetworkDescription {
name,
genesis_config_hash,
genesis_timestamp,
genesis_committee_blob_hash,
admin_chain_id,
}: NetworkDescription,
) -> Self {
Self {
name,
genesis_config_hash: Some(genesis_config_hash.into()),
genesis_timestamp: genesis_timestamp.micros(),
admin_chain_id: Some(admin_chain_id.into()),
genesis_committee_blob_hash: Some(genesis_committee_blob_hash.into()),
}
}
}
impl TryFrom<api::NetworkDescription> for NetworkDescription {
type Error = GrpcProtoConversionError;
fn try_from(
api::NetworkDescription {
name,
genesis_config_hash,
genesis_timestamp,
genesis_committee_blob_hash,
admin_chain_id,
}: api::NetworkDescription,
) -> Result<Self, Self::Error> {
Ok(Self {
name,
genesis_config_hash: try_proto_convert(genesis_config_hash)?,
genesis_timestamp: genesis_timestamp.into(),
admin_chain_id: try_proto_convert(admin_chain_id)?,
genesis_committee_blob_hash: try_proto_convert(genesis_committee_blob_hash)?,
})
}
}
impl TryFrom<Notification> for api::Notification {
type Error = GrpcProtoConversionError;
fn try_from(notification: Notification) -> Result<Self, Self::Error> {
Ok(Self {
chain_id: Some(notification.chain_id.into()),
reason: bincode::serialize(¬ification.reason)?,
})
}
}
impl TryFrom<api::Notification> for Option<Notification> {
type Error = GrpcProtoConversionError;
fn try_from(notification: api::Notification) -> Result<Self, Self::Error> {
if notification.chain_id.is_none() && notification.reason.is_empty() {
Ok(None)
} else {
Ok(Some(Notification {
chain_id: try_proto_convert(notification.chain_id)?,
reason: bincode::deserialize(¬ification.reason)?,
}))
}
}
}
impl TryFrom<ChainInfoResponse> for api::ChainInfoResult {
type Error = GrpcProtoConversionError;
fn try_from(chain_info_response: ChainInfoResponse) -> Result<Self, Self::Error> {
let response = chain_info_response.try_into()?;
Ok(api::ChainInfoResult {
inner: Some(api::chain_info_result::Inner::ChainInfoResponse(response)),
})
}
}
impl TryFrom<NodeError> for api::ChainInfoResult {
type Error = GrpcProtoConversionError;
fn try_from(node_error: NodeError) -> Result<Self, Self::Error> {
let error = bincode::serialize(&node_error)?;
Ok(api::ChainInfoResult {
inner: Some(api::chain_info_result::Inner::Error(error)),
})
}
}
impl TryFrom<BlockProposal> for api::BlockProposal {
type Error = GrpcProtoConversionError;
fn try_from(block_proposal: BlockProposal) -> Result<Self, Self::Error> {
Ok(Self {
chain_id: Some(block_proposal.content.block.chain_id.into()),
content: bincode::serialize(&block_proposal.content)?,
owner: Some(block_proposal.owner().try_into()?),
signature: Some(block_proposal.signature.into()),
original_proposal: block_proposal
.original_proposal
.map(|cert| bincode::serialize(&cert))
.transpose()?,
})
}
}
impl TryFrom<api::BlockProposal> for BlockProposal {
type Error = GrpcProtoConversionError;
fn try_from(block_proposal: api::BlockProposal) -> Result<Self, Self::Error> {
let content: ProposalContent = bincode::deserialize(&block_proposal.content)?;
ensure!(
Some(content.block.chain_id.into()) == block_proposal.chain_id,
GrpcProtoConversionError::InconsistentChainId
);
Ok(Self {
content,
signature: try_proto_convert(block_proposal.signature)?,
original_proposal: block_proposal
.original_proposal
.map(|bytes| bincode::deserialize(&bytes))
.transpose()?,
})
}
}
impl TryFrom<api::CrossChainRequest> for CrossChainRequest {
type Error = GrpcProtoConversionError;
fn try_from(cross_chain_request: api::CrossChainRequest) -> Result<Self, Self::Error> {
use api::cross_chain_request::Inner;
let ccr = match cross_chain_request
.inner
.ok_or(GrpcProtoConversionError::MissingField)?
{
Inner::UpdateRecipient(api::UpdateRecipient {
sender,
recipient,
bundles,
}) => CrossChainRequest::UpdateRecipient {
sender: try_proto_convert(sender)?,
recipient: try_proto_convert(recipient)?,
bundles: bincode::deserialize(&bundles)?,
},
Inner::ConfirmUpdatedRecipient(api::ConfirmUpdatedRecipient {
sender,
recipient,
latest_height,
}) => CrossChainRequest::ConfirmUpdatedRecipient {
sender: try_proto_convert(sender)?,
recipient: try_proto_convert(recipient)?,
latest_height: latest_height
.ok_or(GrpcProtoConversionError::MissingField)?
.into(),
},
};
Ok(ccr)
}
}
impl TryFrom<CrossChainRequest> for api::CrossChainRequest {
type Error = GrpcProtoConversionError;
fn try_from(cross_chain_request: CrossChainRequest) -> Result<Self, Self::Error> {
use api::cross_chain_request::Inner;
let inner = match cross_chain_request {
CrossChainRequest::UpdateRecipient {
sender,
recipient,
bundles,
} => Inner::UpdateRecipient(api::UpdateRecipient {
sender: Some(sender.into()),
recipient: Some(recipient.into()),
bundles: bincode::serialize(&bundles)?,
}),
CrossChainRequest::ConfirmUpdatedRecipient {
sender,
recipient,
latest_height,
} => Inner::ConfirmUpdatedRecipient(api::ConfirmUpdatedRecipient {
sender: Some(sender.into()),
recipient: Some(recipient.into()),
latest_height: Some(latest_height.into()),
}),
};
Ok(Self { inner: Some(inner) })
}
}
impl TryFrom<api::LiteCertificate> for HandleLiteCertRequest<'_> {
type Error = GrpcProtoConversionError;
fn try_from(certificate: api::LiteCertificate) -> Result<Self, Self::Error> {
let kind = if certificate.kind == api::CertificateKind::Validated as i32 {
CertificateKind::Validated
} else if certificate.kind == api::CertificateKind::Confirmed as i32 {
CertificateKind::Confirmed
} else if certificate.kind == api::CertificateKind::Timeout as i32 {
CertificateKind::Timeout
} else {
return Err(GrpcProtoConversionError::InvalidCertificateType);
};
let value = LiteValue {
value_hash: CryptoHash::try_from(certificate.hash.as_slice())?,
chain_id: try_proto_convert(certificate.chain_id)?,
kind,
};
let signatures = bincode::deserialize(&certificate.signatures)?;
let round = bincode::deserialize(&certificate.round)?;
Ok(Self {
certificate: LiteCertificate::new(value, round, signatures),
wait_for_outgoing_messages: certificate.wait_for_outgoing_messages,
})
}
}
impl TryFrom<HandleLiteCertRequest<'_>> for api::LiteCertificate {
type Error = GrpcProtoConversionError;
fn try_from(request: HandleLiteCertRequest) -> Result<Self, Self::Error> {
Ok(Self {
hash: request.certificate.value.value_hash.as_bytes().to_vec(),
round: bincode::serialize(&request.certificate.round)?,
chain_id: Some(request.certificate.value.chain_id.into()),
signatures: bincode::serialize(&request.certificate.signatures)?,
wait_for_outgoing_messages: request.wait_for_outgoing_messages,
kind: request.certificate.value.kind as i32,
})
}
}
impl TryFrom<api::HandleTimeoutCertificateRequest> for HandleTimeoutCertificateRequest {
type Error = GrpcProtoConversionError;
fn try_from(cert_request: api::HandleTimeoutCertificateRequest) -> Result<Self, Self::Error> {
let certificate: TimeoutCertificate = cert_request
.certificate
.ok_or(GrpcProtoConversionError::MissingField)?
.try_into()?;
let req_chain_id: ChainId = cert_request
.chain_id
.ok_or(GrpcProtoConversionError::MissingField)?
.try_into()?;
ensure!(
certificate.inner().chain_id() == req_chain_id,
GrpcProtoConversionError::InconsistentChainId
);
Ok(HandleTimeoutCertificateRequest { certificate })
}
}
impl TryFrom<api::HandleValidatedCertificateRequest> for HandleValidatedCertificateRequest {
type Error = GrpcProtoConversionError;
fn try_from(cert_request: api::HandleValidatedCertificateRequest) -> Result<Self, Self::Error> {
let certificate: ValidatedBlockCertificate = cert_request
.certificate
.ok_or(GrpcProtoConversionError::MissingField)?
.try_into()?;
let req_chain_id: ChainId = cert_request
.chain_id
.ok_or(GrpcProtoConversionError::MissingField)?
.try_into()?;
ensure!(
certificate.inner().chain_id() == req_chain_id,
GrpcProtoConversionError::InconsistentChainId
);
Ok(HandleValidatedCertificateRequest { certificate })
}
}
impl TryFrom<api::HandleConfirmedCertificateRequest> for HandleConfirmedCertificateRequest {
type Error = GrpcProtoConversionError;
fn try_from(cert_request: api::HandleConfirmedCertificateRequest) -> Result<Self, Self::Error> {
let certificate: ConfirmedBlockCertificate = cert_request
.certificate
.ok_or(GrpcProtoConversionError::MissingField)?
.try_into()?;
let req_chain_id: ChainId = cert_request
.chain_id
.ok_or(GrpcProtoConversionError::MissingField)?
.try_into()?;
ensure!(
certificate.inner().chain_id() == req_chain_id,
GrpcProtoConversionError::InconsistentChainId
);
Ok(HandleConfirmedCertificateRequest {
certificate,
wait_for_outgoing_messages: cert_request.wait_for_outgoing_messages,
})
}
}
impl TryFrom<HandleConfirmedCertificateRequest> for api::HandleConfirmedCertificateRequest {
type Error = GrpcProtoConversionError;
fn try_from(request: HandleConfirmedCertificateRequest) -> Result<Self, Self::Error> {
Ok(Self {
chain_id: Some(request.certificate.inner().chain_id().into()),
certificate: Some(request.certificate.try_into()?),
wait_for_outgoing_messages: request.wait_for_outgoing_messages,
})
}
}
impl TryFrom<HandleValidatedCertificateRequest> for api::HandleValidatedCertificateRequest {
type Error = GrpcProtoConversionError;
fn try_from(request: HandleValidatedCertificateRequest) -> Result<Self, Self::Error> {
Ok(Self {
chain_id: Some(request.certificate.inner().chain_id().into()),
certificate: Some(request.certificate.try_into()?),
})
}
}
impl TryFrom<HandleTimeoutCertificateRequest> for api::HandleTimeoutCertificateRequest {
type Error = GrpcProtoConversionError;
fn try_from(request: HandleTimeoutCertificateRequest) -> Result<Self, Self::Error> {
Ok(Self {
chain_id: Some(request.certificate.inner().chain_id().into()),
certificate: Some(request.certificate.try_into()?),
})
}
}
impl TryFrom<api::Certificate> for TimeoutCertificate {
type Error = GrpcProtoConversionError;
fn try_from(certificate: api::Certificate) -> Result<Self, Self::Error> {
let round = bincode::deserialize(&certificate.round)?;
let signatures = bincode::deserialize(&certificate.signatures)?;
let cert_type = certificate.kind;
if cert_type == api::CertificateKind::Timeout as i32 {
let value: Timeout = bincode::deserialize(&certificate.value)?;
Ok(TimeoutCertificate::new(value, round, signatures))
} else {
Err(GrpcProtoConversionError::InvalidCertificateType)
}
}
}
impl TryFrom<api::Certificate> for ValidatedBlockCertificate {
type Error = GrpcProtoConversionError;
fn try_from(certificate: api::Certificate) -> Result<Self, Self::Error> {
let round = bincode::deserialize(&certificate.round)?;
let signatures = bincode::deserialize(&certificate.signatures)?;
let cert_type = certificate.kind;
if cert_type == api::CertificateKind::Validated as i32 {
let value: ValidatedBlock = bincode::deserialize(&certificate.value)?;
Ok(ValidatedBlockCertificate::new(value, round, signatures))
} else {
Err(GrpcProtoConversionError::InvalidCertificateType)
}
}
}
impl TryFrom<api::Certificate> for ConfirmedBlockCertificate {
type Error = GrpcProtoConversionError;
fn try_from(certificate: api::Certificate) -> Result<Self, Self::Error> {
let round = bincode::deserialize(&certificate.round)?;
let signatures = bincode::deserialize(&certificate.signatures)?;
let cert_type = certificate.kind;
if cert_type == api::CertificateKind::Confirmed as i32 {
let value: ConfirmedBlock = bincode::deserialize(&certificate.value)?;
Ok(ConfirmedBlockCertificate::new(value, round, signatures))
} else {
Err(GrpcProtoConversionError::InvalidCertificateType)
}
}
}
impl TryFrom<TimeoutCertificate> for api::Certificate {
type Error = GrpcProtoConversionError;
fn try_from(certificate: TimeoutCertificate) -> Result<Self, Self::Error> {
let round = bincode::serialize(&certificate.round)?;
let signatures = bincode::serialize(certificate.signatures())?;
let value = bincode::serialize(certificate.value())?;
Ok(Self {
value,
round,
signatures,
kind: api::CertificateKind::Timeout as i32,
})
}
}
impl TryFrom<ConfirmedBlockCertificate> for api::Certificate {
type Error = GrpcProtoConversionError;
fn try_from(certificate: ConfirmedBlockCertificate) -> Result<Self, Self::Error> {
let round = bincode::serialize(&certificate.round)?;
let signatures = bincode::serialize(certificate.signatures())?;
let value = bincode::serialize(certificate.value())?;
Ok(Self {
value,
round,
signatures,
kind: api::CertificateKind::Confirmed as i32,
})
}
}
impl TryFrom<ValidatedBlockCertificate> for api::Certificate {
type Error = GrpcProtoConversionError;
fn try_from(certificate: ValidatedBlockCertificate) -> Result<Self, Self::Error> {
let round = bincode::serialize(&certificate.round)?;
let signatures = bincode::serialize(certificate.signatures())?;
let value = bincode::serialize(certificate.value())?;
Ok(Self {
value,
round,
signatures,
kind: api::CertificateKind::Validated as i32,
})
}
}
impl TryFrom<api::ChainInfoQuery> for ChainInfoQuery {
type Error = GrpcProtoConversionError;
fn try_from(chain_info_query: api::ChainInfoQuery) -> Result<Self, Self::Error> {
let request_sent_certificate_hashes_by_heights = chain_info_query
.request_sent_certificate_hashes_by_heights
.map(|heights| bincode::deserialize(&heights))
.transpose()?
.unwrap_or_default();
let request_leader_timeout = chain_info_query
.request_leader_timeout
.map(|height_and_round| bincode::deserialize(&height_and_round))
.transpose()?;
Ok(Self {
request_committees: chain_info_query.request_committees,
request_owner_balance: try_proto_convert(chain_info_query.request_owner_balance)?,
request_pending_message_bundles: chain_info_query.request_pending_message_bundles,
chain_id: try_proto_convert(chain_info_query.chain_id)?,
request_received_log_excluding_first_n: chain_info_query
.request_received_log_excluding_first_n,
test_next_block_height: chain_info_query.test_next_block_height.map(Into::into),
request_manager_values: chain_info_query.request_manager_values,
request_leader_timeout,
request_fallback: chain_info_query.request_fallback,
request_sent_certificate_hashes_by_heights,
request_sent_certificate_hashes_in_range: None,
create_network_actions: chain_info_query.create_network_actions.unwrap_or(true),
})
}
}
impl TryFrom<ChainInfoQuery> for api::ChainInfoQuery {
type Error = GrpcProtoConversionError;
fn try_from(chain_info_query: ChainInfoQuery) -> Result<Self, Self::Error> {
let request_sent_certificate_hashes_by_heights =
bincode::serialize(&chain_info_query.request_sent_certificate_hashes_by_heights)?;
let request_owner_balance = Some(chain_info_query.request_owner_balance.try_into()?);
let request_leader_timeout = chain_info_query
.request_leader_timeout
.map(|height_and_round| bincode::serialize(&height_and_round))
.transpose()?;
Ok(Self {
chain_id: Some(chain_info_query.chain_id.into()),
request_committees: chain_info_query.request_committees,
request_owner_balance,
request_pending_message_bundles: chain_info_query.request_pending_message_bundles,
test_next_block_height: chain_info_query.test_next_block_height.map(Into::into),
request_sent_certificate_hashes_by_heights: Some(
request_sent_certificate_hashes_by_heights,
),
request_received_log_excluding_first_n: chain_info_query
.request_received_log_excluding_first_n,
request_manager_values: chain_info_query.request_manager_values,
request_leader_timeout,
request_fallback: chain_info_query.request_fallback,
create_network_actions: Some(chain_info_query.create_network_actions),
})
}
}
impl From<ChainId> for api::ChainId {
fn from(chain_id: ChainId) -> Self {
Self {
bytes: chain_id.0.as_bytes().to_vec(),
}
}
}
impl TryFrom<api::ChainId> for ChainId {
type Error = GrpcProtoConversionError;
fn try_from(chain_id: api::ChainId) -> Result<Self, Self::Error> {
Ok(ChainId::try_from(chain_id.bytes.as_slice())?)
}
}
impl From<AccountPublicKey> for api::AccountPublicKey {
fn from(public_key: AccountPublicKey) -> Self {
Self {
bytes: public_key.as_bytes(),
}
}
}
impl From<ValidatorPublicKey> for api::ValidatorPublicKey {
fn from(public_key: ValidatorPublicKey) -> Self {
Self {
bytes: public_key.as_bytes().to_vec(),
}
}
}
impl TryFrom<api::ValidatorPublicKey> for ValidatorPublicKey {
type Error = GrpcProtoConversionError;
fn try_from(public_key: api::ValidatorPublicKey) -> Result<Self, Self::Error> {
Ok(Self::from_bytes(public_key.bytes.as_slice())?)
}
}
impl TryFrom<api::AccountPublicKey> for AccountPublicKey {
type Error = GrpcProtoConversionError;
fn try_from(public_key: api::AccountPublicKey) -> Result<Self, Self::Error> {
Ok(Self::from_slice(public_key.bytes.as_slice())?)
}
}
impl From<AccountSignature> for api::AccountSignature {
fn from(signature: AccountSignature) -> Self {
Self {
bytes: signature.to_bytes(),
}
}
}
impl From<ValidatorSignature> for api::ValidatorSignature {
fn from(signature: ValidatorSignature) -> Self {
Self {
bytes: signature.as_bytes().to_vec(),
}
}
}
impl TryFrom<api::ValidatorSignature> for ValidatorSignature {
type Error = GrpcProtoConversionError;
fn try_from(signature: api::ValidatorSignature) -> Result<Self, Self::Error> {
Self::from_slice(signature.bytes.as_slice()).map_err(GrpcProtoConversionError::CryptoError)
}
}
impl TryFrom<api::AccountSignature> for AccountSignature {
type Error = GrpcProtoConversionError;
fn try_from(signature: api::AccountSignature) -> Result<Self, Self::Error> {
Ok(Self::from_slice(signature.bytes.as_slice())?)
}
}
impl TryFrom<ChainInfoResponse> for api::ChainInfoResponse {
type Error = GrpcProtoConversionError;
fn try_from(chain_info_response: ChainInfoResponse) -> Result<Self, Self::Error> {
Ok(Self {
chain_info: bincode::serialize(&chain_info_response.info)?,
signature: chain_info_response.signature.map(Into::into),
})
}
}
impl TryFrom<api::ChainInfoResponse> for ChainInfoResponse {
type Error = GrpcProtoConversionError;
fn try_from(chain_info_response: api::ChainInfoResponse) -> Result<Self, Self::Error> {
let signature = chain_info_response
.signature
.map(TryInto::try_into)
.transpose()?;
let info = bincode::deserialize(chain_info_response.chain_info.as_slice())?;
Ok(Self { info, signature })
}
}
impl TryFrom<(ChainId, BlobId)> for api::PendingBlobRequest {
type Error = GrpcProtoConversionError;
fn try_from((chain_id, blob_id): (ChainId, BlobId)) -> Result<Self, Self::Error> {
Ok(Self {
chain_id: Some(chain_id.into()),
blob_id: Some(blob_id.try_into()?),
})
}
}
impl TryFrom<api::PendingBlobRequest> for (ChainId, BlobId) {
type Error = GrpcProtoConversionError;
fn try_from(request: PendingBlobRequest) -> Result<Self, Self::Error> {
Ok((
try_proto_convert(request.chain_id)?,
try_proto_convert(request.blob_id)?,
))
}
}
impl TryFrom<(ChainId, BlobContent)> for api::HandlePendingBlobRequest {
type Error = GrpcProtoConversionError;
fn try_from((chain_id, blob_content): (ChainId, BlobContent)) -> Result<Self, Self::Error> {
Ok(Self {
chain_id: Some(chain_id.into()),
blob: Some(blob_content.try_into()?),
})
}
}
impl TryFrom<api::HandlePendingBlobRequest> for (ChainId, BlobContent) {
type Error = GrpcProtoConversionError;
fn try_from(request: api::HandlePendingBlobRequest) -> Result<Self, Self::Error> {
Ok((
try_proto_convert(request.chain_id)?,
try_proto_convert(request.blob)?,
))
}
}
impl TryFrom<BlobContent> for api::PendingBlobResult {
type Error = GrpcProtoConversionError;
fn try_from(blob: BlobContent) -> Result<Self, Self::Error> {
Ok(Self {
inner: Some(api::pending_blob_result::Inner::Blob(blob.try_into()?)),
})
}
}
impl TryFrom<NodeError> for api::PendingBlobResult {
type Error = GrpcProtoConversionError;
fn try_from(node_error: NodeError) -> Result<Self, Self::Error> {
let error = bincode::serialize(&node_error)?;
Ok(api::PendingBlobResult {
inner: Some(api::pending_blob_result::Inner::Error(error)),
})
}
}
impl From<BlockHeight> for api::BlockHeight {
fn from(block_height: BlockHeight) -> Self {
Self {
height: block_height.0,
}
}
}
impl From<api::BlockHeight> for BlockHeight {
fn from(block_height: api::BlockHeight) -> Self {
Self(block_height.height)
}
}
impl TryFrom<AccountOwner> for api::AccountOwner {
type Error = GrpcProtoConversionError;
fn try_from(account_owner: AccountOwner) -> Result<Self, Self::Error> {
Ok(Self {
bytes: bincode::serialize(&account_owner)?,
})
}
}
impl TryFrom<api::AccountOwner> for AccountOwner {
type Error = GrpcProtoConversionError;
fn try_from(account_owner: api::AccountOwner) -> Result<Self, Self::Error> {
Ok(bincode::deserialize(&account_owner.bytes)?)
}
}
impl TryFrom<api::BlobId> for BlobId {
type Error = GrpcProtoConversionError;
fn try_from(blob_id: api::BlobId) -> Result<Self, Self::Error> {
Ok(bincode::deserialize(blob_id.bytes.as_slice())?)
}
}
impl TryFrom<api::BlobIds> for Vec<BlobId> {
type Error = GrpcProtoConversionError;
fn try_from(blob_ids: api::BlobIds) -> Result<Self, Self::Error> {
Ok(blob_ids
.bytes
.into_iter()
.map(|x| bincode::deserialize(x.as_slice()))
.collect::<Result<_, _>>()?)
}
}
impl TryFrom<BlobId> for api::BlobId {
type Error = GrpcProtoConversionError;
fn try_from(blob_id: BlobId) -> Result<Self, Self::Error> {
Ok(Self {
bytes: bincode::serialize(&blob_id)?,
})
}
}
impl TryFrom<Vec<BlobId>> for api::BlobIds {
type Error = GrpcProtoConversionError;
fn try_from(blob_ids: Vec<BlobId>) -> Result<Self, Self::Error> {
let bytes = blob_ids
.into_iter()
.map(|blob_id| bincode::serialize(&blob_id))
.collect::<Result<_, _>>()?;
Ok(Self { bytes })
}
}
impl TryFrom<api::CryptoHash> for CryptoHash {
type Error = GrpcProtoConversionError;
fn try_from(hash: api::CryptoHash) -> Result<Self, Self::Error> {
Ok(CryptoHash::try_from(hash.bytes.as_slice())?)
}
}
impl TryFrom<BlobContent> for api::BlobContent {
type Error = GrpcProtoConversionError;
fn try_from(blob: BlobContent) -> Result<Self, Self::Error> {
Ok(Self {
bytes: bincode::serialize(&blob)?,
})
}
}
impl TryFrom<api::BlobContent> for BlobContent {
type Error = GrpcProtoConversionError;
fn try_from(blob: api::BlobContent) -> Result<Self, Self::Error> {
Ok(bincode::deserialize(blob.bytes.as_slice())?)
}
}
impl From<CryptoHash> for api::CryptoHash {
fn from(hash: CryptoHash) -> Self {
Self {
bytes: hash.as_bytes().to_vec(),
}
}
}
impl From<Vec<CryptoHash>> for api::CertificatesBatchRequest {
fn from(certs: Vec<CryptoHash>) -> Self {
Self {
hashes: certs.into_iter().map(Into::into).collect(),
}
}
}
impl TryFrom<Certificate> for api::Certificate {
type Error = GrpcProtoConversionError;
fn try_from(certificate: Certificate) -> Result<Self, Self::Error> {
let round = bincode::serialize(&certificate.round())?;
let signatures = bincode::serialize(certificate.signatures())?;
let (kind, value) = match certificate {
Certificate::Confirmed(confirmed) => (
api::CertificateKind::Confirmed,
bincode::serialize(confirmed.value())?,
),
Certificate::Validated(validated) => (
api::CertificateKind::Validated,
bincode::serialize(validated.value())?,
),
Certificate::Timeout(timeout) => (
api::CertificateKind::Timeout,
bincode::serialize(timeout.value())?,
),
};
Ok(Self {
value,
round,
signatures,
kind: kind as i32,
})
}
}
impl TryFrom<api::Certificate> for Certificate {
type Error = GrpcProtoConversionError;
fn try_from(certificate: api::Certificate) -> Result<Self, Self::Error> {
let round = bincode::deserialize(&certificate.round)?;
let signatures = bincode::deserialize(&certificate.signatures)?;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/grpc/pool.rs | linera-rpc/src/grpc/pool.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::time::Duration;
use super::{transport, GrpcError};
/// A pool of transport channels to be used by gRPC.
#[derive(Clone, Default)]
pub struct GrpcConnectionPool {
options: transport::Options,
channels: papaya::HashMap<String, transport::Channel>,
}
impl GrpcConnectionPool {
pub fn new(options: transport::Options) -> Self {
Self {
options,
channels: papaya::HashMap::default(),
}
}
pub fn with_connect_timeout(mut self, connect_timeout: impl Into<Option<Duration>>) -> Self {
self.options.connect_timeout = connect_timeout.into();
self
}
pub fn with_timeout(mut self, timeout: impl Into<Option<Duration>>) -> Self {
self.options.timeout = timeout.into();
self
}
/// Obtains a channel for the current address. Either clones an existing one (thereby
/// reusing the connection), or creates one if needed. New channels do not create a
/// connection immediately.
pub fn channel(&self, address: String) -> Result<transport::Channel, GrpcError> {
let pinned = self.channels.pin();
if let Some(channel) = pinned.get(&address) {
return Ok(channel.clone());
}
let channel = transport::create_channel(address.clone(), &self.options)?;
Ok(pinned.get_or_insert(address, channel).clone())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/simple/codec.rs | linera-rpc/src/simple/codec.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{io, mem, ops::DerefMut};
use bytes::{Buf, BufMut, BytesMut};
use linera_core::node::NodeError;
use thiserror::Error;
use tokio_util::codec::{Decoder, Encoder};
use crate::RpcMessage;
/// The size of the frame prefix that contains the payload size.
const PREFIX_SIZE: u8 = mem::size_of::<u32>() as u8;
/// An encoder/decoder of [`RpcMessage`]s for the RPC protocol.
///
/// The frames are length-delimited by a [`u32`] prefix, and the payload is deserialized by
/// [`bincode`].
#[derive(Clone, Copy, Debug)]
pub struct Codec;
impl Encoder<RpcMessage> for Codec {
type Error = Error;
fn encode(&mut self, message: RpcMessage, buffer: &mut BytesMut) -> Result<(), Self::Error> {
let mut frame_buffer = buffer.split_off(buffer.len());
frame_buffer.put_u32_le(0);
let mut frame_writer = frame_buffer.writer();
bincode::serialize_into(&mut frame_writer, &message)
.map_err(|error| Error::Serialization(*error))?;
let mut frame_buffer = frame_writer.into_inner();
let frame_size = frame_buffer.len();
let payload_size = frame_size - PREFIX_SIZE as usize;
let mut start_of_frame = frame_buffer.deref_mut();
start_of_frame.put_u32_le(
payload_size
.try_into()
.map_err(|_| Error::MessageTooBig { size: payload_size })?,
);
buffer.unsplit(frame_buffer);
Ok(())
}
}
impl Decoder for Codec {
type Item = RpcMessage;
type Error = Error;
fn decode(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if buffer.len() < PREFIX_SIZE.into() {
return Ok(None);
}
let mut start_of_buffer: &[u8] = &*buffer;
let payload_size = start_of_buffer
.get_u32_le()
.try_into()
.expect("u32 should fit in a usize");
let frame_size = PREFIX_SIZE as usize + payload_size;
if buffer.len() < frame_size {
buffer.reserve(frame_size);
return Ok(None);
}
let _prefix = buffer.split_to(PREFIX_SIZE.into());
let payload = buffer.split_to(payload_size);
let message =
bincode::deserialize(&payload).map_err(|error| Error::Deserialization(*error))?;
Ok(Some(message))
}
}
/// Errors that can arise during transmission or reception of [`RpcMessage`]s.
#[derive(Debug, Error)]
pub enum Error {
#[error("I/O error in the underlying transport: {0}")]
IoError(#[from] io::Error),
#[error("Failed to deserialize an incoming message: {0}")]
Deserialization(#[source] bincode::ErrorKind),
#[error("Failed to serialize outgoing message: {0}")]
Serialization(#[source] bincode::ErrorKind),
#[error("RpcMessage is too big to fit in a protocol frame: \
message is {size} bytes but can't be larger than {max} bytes.",
max = u32::MAX)]
MessageTooBig { size: usize },
}
impl From<Error> for NodeError {
fn from(error: Error) -> NodeError {
match error {
Error::IoError(io_error) => NodeError::ClientIoError {
error: format!("{}", io_error),
},
err => {
tracing::error!("Unexpected decoding error: {err}");
NodeError::InvalidDecoding
}
}
}
}
#[cfg(test)]
mod tests {
use bytes::{BufMut, BytesMut};
use linera_core::data_types::ChainInfoQuery;
use test_strategy::proptest;
use tokio_util::codec::{Decoder, Encoder};
use super::{Codec, RpcMessage, PREFIX_SIZE};
/// Test decoding of a frame from a buffer.
///
/// The buffer may contain leading or trailing bytes around the frame. The frame contains the
/// size of the payload, and the payload is a serialized dummy [`RpcMessage`].
///
/// The decoder should produce the exact same message as used as the test input, and it should
/// ignore the leading and trailing bytes.
#[proptest]
fn decodes_frame_ignoring_leading_and_trailing_bytes(
leading_bytes: Vec<u8>,
message_contents: ChainInfoQuery,
trailing_bytes: Vec<u8>,
) {
let message = RpcMessage::ChainInfoQuery(Box::new(message_contents));
let payload = bincode::serialize(&message).expect("RpcMessage is serializable");
let mut buffer = BytesMut::with_capacity(
leading_bytes.len() + PREFIX_SIZE as usize + payload.len() + trailing_bytes.len(),
);
buffer.extend_from_slice(&leading_bytes);
let start_of_buffer = buffer.split();
buffer.put_u32_le(payload.len() as u32);
buffer.extend_from_slice(&payload);
buffer.extend_from_slice(&trailing_bytes);
let result = Codec.decode(&mut buffer);
assert!(result.is_ok());
assert_eq!(result.unwrap(), Some(message));
assert_eq!(&start_of_buffer, &leading_bytes);
assert_eq!(&buffer, &trailing_bytes);
}
/// Test encoding a message to buffer.
///
/// The buffer may already contain some leading bytes, but the cursor is set to where the frame
/// should start.
///
/// The encoder should write a prefix with the size of the serialized message, followed by the
/// serialized message bytes. It should not touch the leading bytes nor append any trailing
/// bytes.
#[proptest]
fn encodes_at_the_correct_buffer_offset(
leading_bytes: Vec<u8>,
message_contents: ChainInfoQuery,
) {
let message = RpcMessage::ChainInfoQuery(Box::new(message_contents));
let serialized_message =
bincode::serialize(&message).expect("Serialization should succeed");
let mut buffer = BytesMut::new();
buffer.extend_from_slice(&leading_bytes);
let frame_start = buffer.len();
let prefix_end = frame_start + PREFIX_SIZE as usize;
let result = Codec.encode(message, &mut buffer);
assert!(matches!(result, Ok(())));
assert_eq!(&buffer[..frame_start], &leading_bytes);
let prefix = u32::from_le_bytes(
buffer[frame_start..prefix_end]
.try_into()
.expect("Incorrect prefix slice indices"),
);
assert_eq!(prefix as usize, serialized_message.len());
assert_eq!(
buffer.len(),
leading_bytes.len() + PREFIX_SIZE as usize + prefix as usize
);
assert_eq!(&buffer[prefix_end..], &serialized_message);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/simple/client.rs | linera-rpc/src/simple/client.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::future::Future;
use futures::{sink::SinkExt, stream::StreamExt};
use linera_base::{
crypto::CryptoHash,
data_types::{BlobContent, BlockHeight, NetworkDescription},
identifiers::{BlobId, ChainId},
time::{timer, Duration},
};
use linera_chain::{
data_types::BlockProposal,
types::{
ConfirmedBlockCertificate, LiteCertificate, TimeoutCertificate, ValidatedBlockCertificate,
},
};
use linera_core::{
data_types::{ChainInfoQuery, ChainInfoResponse},
node::{CrossChainMessageDelivery, NodeError, NotificationStream, ValidatorNode},
};
use linera_version::VersionInfo;
use super::{codec, transport::TransportProtocol};
use crate::{
config::ValidatorPublicNetworkPreConfig, HandleConfirmedCertificateRequest,
HandleLiteCertRequest, HandleTimeoutCertificateRequest, HandleValidatedCertificateRequest,
RpcMessage,
};
#[derive(Clone)]
pub struct SimpleClient {
network: ValidatorPublicNetworkPreConfig<TransportProtocol>,
send_timeout: Duration,
recv_timeout: Duration,
}
impl SimpleClient {
pub(crate) fn new(
network: ValidatorPublicNetworkPreConfig<TransportProtocol>,
send_timeout: Duration,
recv_timeout: Duration,
) -> Self {
Self {
network,
send_timeout,
recv_timeout,
}
}
async fn send_recv_internal(&self, message: RpcMessage) -> Result<RpcMessage, codec::Error> {
let address = format!("{}:{}", self.network.host, self.network.port);
let mut stream = self.network.protocol.connect(address).await?;
// Send message
timer::timeout(self.send_timeout, stream.send(message))
.await
.map_err(|timeout| codec::Error::IoError(timeout.into()))??;
// Wait for reply
timer::timeout(self.recv_timeout, stream.next())
.await
.map_err(|timeout| codec::Error::IoError(timeout.into()))?
.transpose()?
.ok_or_else(|| codec::Error::IoError(std::io::ErrorKind::UnexpectedEof.into()))
}
async fn query<Response>(&self, query: RpcMessage) -> Result<Response, Response::Error>
where
Response: TryFrom<RpcMessage>,
Response::Error: From<codec::Error>,
{
self.send_recv_internal(query).await?.try_into()
}
}
impl ValidatorNode for SimpleClient {
type NotificationStream = NotificationStream;
fn address(&self) -> String {
format!(
"{}://{}:{}",
self.network.protocol, self.network.host, self.network.port
)
}
/// Initiates a new block.
async fn handle_block_proposal(
&self,
proposal: BlockProposal,
) -> Result<ChainInfoResponse, NodeError> {
let request = RpcMessage::BlockProposal(Box::new(proposal));
self.query(request).await
}
/// Processes a lite certificate.
async fn handle_lite_certificate(
&self,
certificate: LiteCertificate<'_>,
delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError> {
let wait_for_outgoing_messages = delivery.wait_for_outgoing_messages();
let request = RpcMessage::LiteCertificate(Box::new(HandleLiteCertRequest {
certificate: certificate.cloned(),
wait_for_outgoing_messages,
}));
self.query(request).await
}
/// Processes a validated certificate.
async fn handle_validated_certificate(
&self,
certificate: ValidatedBlockCertificate,
) -> Result<ChainInfoResponse, NodeError> {
let request = HandleValidatedCertificateRequest { certificate };
let request = RpcMessage::ValidatedCertificate(Box::new(request));
self.query(request).await
}
/// Processes a confirmed certificate.
async fn handle_confirmed_certificate(
&self,
certificate: ConfirmedBlockCertificate,
delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError> {
let wait_for_outgoing_messages = delivery.wait_for_outgoing_messages();
let request = HandleConfirmedCertificateRequest {
certificate,
wait_for_outgoing_messages,
};
let request = RpcMessage::ConfirmedCertificate(Box::new(request));
self.query(request).await
}
/// Processes a timeout certificate.
async fn handle_timeout_certificate(
&self,
certificate: TimeoutCertificate,
) -> Result<ChainInfoResponse, NodeError> {
let request = HandleTimeoutCertificateRequest { certificate };
let request = RpcMessage::TimeoutCertificate(Box::new(request));
self.query(request).await
}
/// Handles information queries for this chain.
async fn handle_chain_info_query(
&self,
query: ChainInfoQuery,
) -> Result<ChainInfoResponse, NodeError> {
let request = RpcMessage::ChainInfoQuery(Box::new(query));
self.query(request).await
}
fn subscribe(
&self,
_chains: Vec<ChainId>,
) -> impl Future<Output = Result<NotificationStream, NodeError>> + Send {
let transport = self.network.protocol.to_string();
async { Err(NodeError::SubscriptionError { transport }) }
}
async fn get_version_info(&self) -> Result<VersionInfo, NodeError> {
self.query(RpcMessage::VersionInfoQuery).await
}
async fn get_network_description(&self) -> Result<NetworkDescription, NodeError> {
self.query(RpcMessage::NetworkDescriptionQuery).await
}
async fn upload_blob(&self, content: BlobContent) -> Result<BlobId, NodeError> {
self.query(RpcMessage::UploadBlob(Box::new(content))).await
}
async fn download_blob(&self, blob_id: BlobId) -> Result<BlobContent, NodeError> {
self.query(RpcMessage::DownloadBlob(Box::new(blob_id)))
.await
}
async fn download_pending_blob(
&self,
chain_id: ChainId,
blob_id: BlobId,
) -> Result<BlobContent, NodeError> {
self.query(RpcMessage::DownloadPendingBlob(Box::new((
chain_id, blob_id,
))))
.await
}
async fn handle_pending_blob(
&self,
chain_id: ChainId,
blob: BlobContent,
) -> Result<ChainInfoResponse, NodeError> {
self.query(RpcMessage::HandlePendingBlob(Box::new((chain_id, blob))))
.await
}
async fn download_certificate(
&self,
hash: CryptoHash,
) -> Result<ConfirmedBlockCertificate, NodeError> {
Ok(self
.download_certificates(vec![hash])
.await?
.into_iter()
.next()
.unwrap()) // UNWRAP: We know there is exactly one certificate, otherwise we would have an error.
}
async fn download_certificates(
&self,
hashes: Vec<CryptoHash>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
let certificates = self
.query::<Vec<ConfirmedBlockCertificate>>(RpcMessage::DownloadCertificates(
hashes.clone(),
))
.await?;
if certificates.len() != hashes.len() {
let missing_hashes: Vec<CryptoHash> = hashes
.into_iter()
.filter(|hash| !certificates.iter().any(|cert| cert.hash() == *hash))
.collect();
Err(NodeError::MissingCertificates(missing_hashes))
} else {
Ok(certificates)
}
}
async fn download_certificates_by_heights(
&self,
chain_id: ChainId,
heights: Vec<BlockHeight>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
let expected_count = heights.len();
let certificates: Vec<ConfirmedBlockCertificate> = self
.query(RpcMessage::DownloadCertificatesByHeights(
chain_id,
heights.clone(),
))
.await?;
if certificates.len() < expected_count {
return Err(NodeError::MissingCertificatesByHeights { chain_id, heights });
}
Ok(certificates)
}
async fn blob_last_used_by(&self, blob_id: BlobId) -> Result<CryptoHash, NodeError> {
self.query(RpcMessage::BlobLastUsedBy(Box::new(blob_id)))
.await
}
async fn blob_last_used_by_certificate(
&self,
blob_id: BlobId,
) -> Result<ConfirmedBlockCertificate, NodeError> {
self.query::<ConfirmedBlockCertificate>(RpcMessage::BlobLastUsedByCertificate(Box::new(
blob_id,
)))
.await
}
async fn missing_blob_ids(&self, blob_ids: Vec<BlobId>) -> Result<Vec<BlobId>, NodeError> {
self.query(RpcMessage::MissingBlobIds(blob_ids)).await
}
async fn get_shard_info(
&self,
chain_id: ChainId,
) -> Result<linera_core::data_types::ShardInfo, NodeError> {
let rpc_shard_info: crate::message::ShardInfo =
self.query(RpcMessage::ShardInfoQuery(chain_id)).await?;
Ok(linera_core::data_types::ShardInfo {
shard_id: rpc_shard_info.shard_id,
total_shards: rpc_shard_info.total_shards,
})
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/simple/mod.rs | linera-rpc/src/simple/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod client;
mod codec;
mod node_provider;
#[cfg(with_server)]
mod server;
mod transport;
pub use client::*;
pub use codec::*;
pub use node_provider::*;
#[cfg(with_server)]
pub use server::*;
pub use transport::*;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/simple/node_provider.rs | linera-rpc/src/simple/node_provider.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::str::FromStr as _;
use linera_core::node::{NodeError, ValidatorNodeProvider};
use super::SimpleClient;
use crate::{config::ValidatorPublicNetworkPreConfig, node_provider::NodeOptions};
/// A client without an address - serves as a client factory.
#[derive(Copy, Clone)]
pub struct SimpleNodeProvider(NodeOptions);
impl SimpleNodeProvider {
pub fn new(options: NodeOptions) -> Self {
Self(options)
}
}
impl ValidatorNodeProvider for SimpleNodeProvider {
type Node = SimpleClient;
fn make_node(&self, address: &str) -> Result<Self::Node, NodeError> {
let network = ValidatorPublicNetworkPreConfig::from_str(address).map_err(|_| {
NodeError::CannotResolveValidatorAddress {
address: address.to_string(),
}
})?;
let client = SimpleClient::new(network, self.0.send_timeout, self.0.recv_timeout);
Ok(client)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/simple/transport.rs | linera-rpc/src/simple/transport.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::HashMap, io, mem, net::SocketAddr, pin::pin, sync::Arc};
use async_trait::async_trait;
use futures::{
future,
stream::{self, FuturesUnordered, SplitSink, SplitStream},
Sink, SinkExt, Stream, StreamExt, TryStreamExt,
};
use linera_core::{JoinSetExt as _, TaskHandle};
use serde::{Deserialize, Serialize};
use tokio::{
io::AsyncWriteExt,
net::{lookup_host, TcpListener, TcpStream, ToSocketAddrs, UdpSocket},
sync::Mutex,
task::JoinSet,
};
use tokio_util::{codec::Framed, sync::CancellationToken, udp::UdpFramed};
use tracing::{error, warn};
use crate::{
simple::{codec, codec::Codec},
RpcMessage,
};
/// Suggested buffer size
pub const DEFAULT_MAX_DATAGRAM_SIZE: &str = "65507";
/// Number of tasks to spawn before attempting to reap some finished tasks to prevent memory leaks.
const REAP_TASKS_THRESHOLD: usize = 100;
// Supported transport protocols.
#[derive(clap::ValueEnum, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum TransportProtocol {
Udp,
Tcp,
}
impl std::str::FromStr for TransportProtocol {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
clap::ValueEnum::from_str(s, true)
}
}
impl std::fmt::Display for TransportProtocol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
impl TransportProtocol {
pub fn scheme(&self) -> &'static str {
match self {
TransportProtocol::Udp => "udp",
TransportProtocol::Tcp => "tcp",
}
}
}
/// A pool of (outgoing) data streams.
pub trait ConnectionPool: Send {
fn send_message_to<'a>(
&'a mut self,
message: RpcMessage,
address: &'a str,
) -> future::BoxFuture<'a, Result<(), codec::Error>>;
}
/// The handler required to create a service.
///
/// The implementation needs to implement [`Clone`] because a seed instance is used to generate
/// cloned instances, where each cloned instance handles a single request. Multiple cloned instances
/// may exist at the same time and handle separate requests concurrently.
#[async_trait]
pub trait MessageHandler: Clone {
async fn handle_message(&mut self, message: RpcMessage) -> Option<RpcMessage>;
}
/// The result of spawning a server is oneshot channel to track completion, and the set of
/// executing tasks.
pub struct ServerHandle {
pub handle: TaskHandle<Result<(), std::io::Error>>,
}
impl ServerHandle {
pub async fn join(self) -> Result<(), std::io::Error> {
self.handle.await.map_err(|_| {
std::io::Error::new(
std::io::ErrorKind::Interrupted,
"Server task did not finish successfully",
)
})?
}
}
/// A trait alias for a protocol transport.
///
/// A transport is an active connection that can be used to send and receive
/// [`RpcMessage`]s.
pub trait Transport:
Stream<Item = Result<RpcMessage, codec::Error>> + Sink<RpcMessage, Error = codec::Error>
{
}
impl<T> Transport for T where
T: Stream<Item = Result<RpcMessage, codec::Error>> + Sink<RpcMessage, Error = codec::Error>
{
}
impl TransportProtocol {
/// Creates a transport for this protocol.
pub async fn connect(
self,
address: impl ToSocketAddrs,
) -> Result<impl Transport, std::io::Error> {
let mut addresses = lookup_host(address)
.await
.expect("Invalid address to connect to");
let address = addresses
.next()
.expect("Couldn't resolve address to connect to");
let stream: futures::future::Either<_, _> = match self {
TransportProtocol::Udp => {
let socket = UdpSocket::bind(&"0.0.0.0:0").await?;
UdpFramed::new(socket, Codec)
.with(move |message| future::ready(Ok((message, address))))
.map_ok(|(message, _address)| message)
.left_stream()
}
TransportProtocol::Tcp => {
let stream = TcpStream::connect(address).await?;
Framed::new(stream, Codec).right_stream()
}
};
Ok(stream)
}
/// Creates a [`ConnectionPool`] for this protocol.
pub async fn make_outgoing_connection_pool(
self,
) -> Result<Box<dyn ConnectionPool>, std::io::Error> {
let pool: Box<dyn ConnectionPool> = match self {
Self::Udp => Box::new(UdpConnectionPool::new().await?),
Self::Tcp => Box::new(TcpConnectionPool::new()),
};
Ok(pool)
}
/// Runs a server for this protocol and the given message handler.
pub fn spawn_server<S>(
self,
address: impl ToSocketAddrs + Send + 'static,
state: S,
shutdown_signal: CancellationToken,
join_set: &mut JoinSet<()>,
) -> ServerHandle
where
S: MessageHandler + Send + 'static,
{
let handle = match self {
Self::Udp => join_set.spawn_task(UdpServer::run(address, state, shutdown_signal)),
Self::Tcp => join_set.spawn_task(TcpServer::run(address, state, shutdown_signal)),
};
ServerHandle { handle }
}
}
/// An implementation of [`ConnectionPool`] based on UDP.
struct UdpConnectionPool {
transport: UdpFramed<Codec>,
}
impl UdpConnectionPool {
async fn new() -> Result<Self, std::io::Error> {
let socket = UdpSocket::bind(&"0.0.0.0:0").await?;
let transport = UdpFramed::new(socket, Codec);
Ok(Self { transport })
}
}
impl ConnectionPool for UdpConnectionPool {
fn send_message_to<'a>(
&'a mut self,
message: RpcMessage,
address: &'a str,
) -> future::BoxFuture<'a, Result<(), codec::Error>> {
Box::pin(async move {
let address = address.parse().map_err(std::io::Error::other)?;
self.transport.send((message, address)).await
})
}
}
/// Server implementation for UDP.
pub struct UdpServer<State> {
handler: State,
udp_sink: SharedUdpSink,
udp_stream: SplitStream<UdpFramed<Codec>>,
active_handlers: HashMap<SocketAddr, TaskHandle<()>>,
join_set: JoinSet<()>,
}
/// Type alias for the outgoing endpoint of UDP messages.
type SharedUdpSink = Arc<Mutex<SplitSink<UdpFramed<Codec>, (RpcMessage, SocketAddr)>>>;
impl<State> UdpServer<State>
where
State: MessageHandler + Send + 'static,
{
/// Runs the UDP server implementation.
pub async fn run(
address: impl ToSocketAddrs,
state: State,
shutdown_signal: CancellationToken,
) -> Result<(), std::io::Error> {
let mut server = Self::bind(address, state).await?;
loop {
tokio::select! { biased;
_ = shutdown_signal.cancelled() => {
server.shutdown().await;
return Ok(());
}
result = server.udp_stream.next() => match result {
Some(Ok((message, peer))) => server.handle_message(message, peer),
Some(Err(error)) => server.handle_error(error).await?,
None => unreachable!("`UdpFramed` should never return `None`"),
},
}
}
}
/// Creates a [`UdpServer`] bound to the provided `address`, handling messages using the
/// provided `handler`.
async fn bind(address: impl ToSocketAddrs, handler: State) -> Result<Self, std::io::Error> {
let socket = UdpSocket::bind(address).await?;
let (udp_sink, udp_stream) = UdpFramed::new(socket, Codec).split();
Ok(UdpServer {
handler,
udp_sink: Arc::new(Mutex::new(udp_sink)),
udp_stream,
active_handlers: HashMap::new(),
join_set: JoinSet::new(),
})
}
/// Spawns a task to handle a single incoming message.
fn handle_message(&mut self, message: RpcMessage, peer: SocketAddr) {
let previous_task = self.active_handlers.remove(&peer);
let mut state = self.handler.clone();
let udp_sink = self.udp_sink.clone();
let new_task = self.join_set.spawn_task(async move {
if let Some(reply) = state.handle_message(message).await {
if let Some(task) = previous_task {
if let Err(error) = task.await {
warn!("Message handler task panicked: {}", error);
}
}
let status = udp_sink.lock().await.send((reply, peer)).await;
if let Err(error) = status {
error!("Failed to send query response: {}", error);
}
}
});
self.active_handlers.insert(peer, new_task);
if self.active_handlers.len() >= REAP_TASKS_THRESHOLD {
// Collect finished tasks to avoid leaking memory.
self.active_handlers.retain(|_, task| task.is_running());
self.join_set.reap_finished_tasks();
}
}
/// Handles an error while receiving a message.
async fn handle_error(&mut self, error: codec::Error) -> Result<(), std::io::Error> {
match error {
codec::Error::IoError(io_error) => {
error!("I/O error in UDP server: {io_error}");
self.shutdown().await;
Err(io_error)
}
other_error => {
warn!("Received an invalid message: {other_error}");
Ok(())
}
}
}
/// Gracefully shuts down the server, waiting for existing tasks to finish.
async fn shutdown(&mut self) {
let handlers = mem::take(&mut self.active_handlers);
let mut handler_results = FuturesUnordered::from_iter(handlers.into_values());
while let Some(result) = handler_results.next().await {
if let Err(error) = result {
warn!("Message handler panicked: {}", error);
}
}
self.join_set.await_all_tasks().await;
}
}
/// An implementation of [`ConnectionPool`] based on TCP.
struct TcpConnectionPool {
streams: HashMap<String, Framed<TcpStream, Codec>>,
}
impl TcpConnectionPool {
fn new() -> Self {
let streams = HashMap::new();
Self { streams }
}
async fn get_stream(
&mut self,
address: &str,
) -> Result<&mut Framed<TcpStream, Codec>, io::Error> {
if !self.streams.contains_key(address) {
match TcpStream::connect(address).await {
Ok(s) => {
self.streams
.insert(address.to_string(), Framed::new(s, Codec));
}
Err(error) => {
error!("Failed to open connection to {}: {}", address, error);
return Err(error);
}
};
};
Ok(self.streams.get_mut(address).unwrap())
}
}
impl ConnectionPool for TcpConnectionPool {
fn send_message_to<'a>(
&'a mut self,
message: RpcMessage,
address: &'a str,
) -> future::BoxFuture<'a, Result<(), codec::Error>> {
Box::pin(async move {
let stream = self.get_stream(address).await?;
let result = stream.send(message).await;
if result.is_err() {
self.streams.remove(address);
}
result
})
}
}
/// Server implementation for TCP.
pub struct TcpServer<State> {
connection: Framed<TcpStream, Codec>,
handler: State,
shutdown_signal: CancellationToken,
}
impl<State> TcpServer<State>
where
State: MessageHandler + Send + 'static,
{
/// Runs the TCP server implementation.
///
/// Listens for connections and spawns a task with a new [`TcpServer`] instance to serve that
/// client.
pub async fn run(
address: impl ToSocketAddrs,
handler: State,
shutdown_signal: CancellationToken,
) -> Result<(), std::io::Error> {
let listener = TcpListener::bind(address).await?;
let mut accept_stream = stream::try_unfold(listener, |listener| async move {
let (socket, _) = listener.accept().await?;
Ok::<_, io::Error>(Some((socket, listener)))
});
let mut accept_stream = pin!(accept_stream);
let connection_shutdown_signal = shutdown_signal.child_token();
let mut join_set = JoinSet::new();
let mut reap_countdown = REAP_TASKS_THRESHOLD;
loop {
tokio::select! { biased;
_ = shutdown_signal.cancelled() => {
join_set.await_all_tasks().await;
return Ok(());
}
maybe_socket = accept_stream.next() => match maybe_socket {
Some(Ok(socket)) => {
let server = TcpServer::new_connection(
socket,
handler.clone(),
connection_shutdown_signal.clone(),
);
join_set.spawn_task(server.serve());
reap_countdown -= 1;
}
Some(Err(error)) => {
join_set.await_all_tasks().await;
return Err(error);
}
None => unreachable!(
"The `accept_stream` should never finish unless there's an error",
),
},
}
if reap_countdown == 0 {
join_set.reap_finished_tasks();
reap_countdown = REAP_TASKS_THRESHOLD;
}
}
}
/// Creates a new [`TcpServer`] to serve a single connection established on the provided
/// [`TcpStream`].
fn new_connection(
tcp_stream: TcpStream,
handler: State,
shutdown_signal: CancellationToken,
) -> Self {
TcpServer {
connection: Framed::new(tcp_stream, Codec),
handler,
shutdown_signal,
}
}
/// Serves a client through a single connection.
async fn serve(mut self) {
loop {
tokio::select! { biased;
_ = self.shutdown_signal.cancelled() => {
let mut tcp_stream = self.connection.into_inner();
if let Err(error) = tcp_stream.shutdown().await {
let peer = tcp_stream
.peer_addr()
.map_or_else(|_| "an unknown peer".to_owned(), |address| address.to_string());
warn!("Failed to close connection to {peer}: {error:?}");
}
return;
}
result = self.connection.next() => match result {
Some(Ok(message)) => self.handle_message(message).await,
Some(Err(error)) => {
Self::handle_error(error);
return;
}
None => break,
},
}
}
}
/// Handles a single request message from a client.
async fn handle_message(&mut self, message: RpcMessage) {
if let Some(reply) = self.handler.handle_message(message).await {
if let Err(error) = self.connection.send(reply).await {
error!("Failed to send query response: {error}");
}
}
}
/// Handles an error received while attempting to receive from the connection.
///
/// Ignores a successful connection termination, while logging an unexpected connection
/// termination or any other error.
fn handle_error(error: codec::Error) {
if !matches!(
&error,
codec::Error::IoError(error)
if error.kind() == io::ErrorKind::UnexpectedEof
|| error.kind() == io::ErrorKind::ConnectionReset
) {
error!("Error while reading TCP stream: {error}");
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/src/simple/server.rs | linera-rpc/src/simple/server.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use async_trait::async_trait;
use futures::{channel::mpsc, lock::Mutex};
use linera_base::{data_types::Blob, time::Duration};
use linera_core::{
data_types::CrossChainRequest,
node::NodeError,
worker::{NetworkActions, WorkerError, WorkerState},
JoinSetExt as _,
};
use linera_storage::Storage;
use tokio::{sync::oneshot, task::JoinSet};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, instrument};
use super::transport::{MessageHandler, ServerHandle, TransportProtocol};
use crate::{
config::{CrossChainConfig, ShardId, ValidatorInternalNetworkPreConfig},
cross_chain_message_queue, RpcMessage,
};
#[derive(Clone)]
pub struct Server<S>
where
S: Storage,
{
network: ValidatorInternalNetworkPreConfig<TransportProtocol>,
host: String,
port: u16,
state: WorkerState<S>,
shard_id: ShardId,
cross_chain_config: CrossChainConfig,
// Stats
packets_processed: u64,
user_errors: u64,
}
impl<S> Server<S>
where
S: Storage,
{
pub fn new(
network: ValidatorInternalNetworkPreConfig<TransportProtocol>,
host: String,
port: u16,
state: WorkerState<S>,
shard_id: ShardId,
cross_chain_config: CrossChainConfig,
) -> Self {
Self {
network,
host,
port,
state,
shard_id,
cross_chain_config,
packets_processed: 0,
user_errors: 0,
}
}
pub fn packets_processed(&self) -> u64 {
self.packets_processed
}
pub fn user_errors(&self) -> u64 {
self.user_errors
}
}
impl<S> Server<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
#[expect(clippy::too_many_arguments)]
async fn forward_cross_chain_queries(
nickname: String,
network: ValidatorInternalNetworkPreConfig<TransportProtocol>,
cross_chain_max_retries: u32,
cross_chain_retry_delay: Duration,
cross_chain_sender_delay: Duration,
cross_chain_sender_failure_rate: f32,
this_shard: ShardId,
receiver: mpsc::Receiver<(CrossChainRequest, ShardId)>,
) {
let pool = Arc::new(Mutex::new(
network
.protocol
.make_outgoing_connection_pool()
.await
.expect("Initialization should not fail"),
));
let handle_request = move |shard_id, request| {
let pool = pool.clone();
let shard = network.shard(shard_id);
let remote_address = format!("{}:{}", shard.host, shard.port);
let message = RpcMessage::CrossChainRequest(Box::new(request));
async move {
pool.lock()
.await
.send_message_to(message.clone(), &remote_address)
.await?;
anyhow::Result::<_, anyhow::Error>::Ok(())
}
};
cross_chain_message_queue::forward_cross_chain_queries(
nickname,
cross_chain_max_retries,
cross_chain_retry_delay,
cross_chain_sender_delay,
cross_chain_sender_failure_rate,
this_shard,
receiver,
handle_request,
)
.await;
}
pub fn spawn(
self,
shutdown_signal: CancellationToken,
join_set: &mut JoinSet<()>,
) -> ServerHandle {
info!(
"Listening to {:?} traffic on {}:{}",
self.network.protocol, self.host, self.port
);
let address = (self.host.clone(), self.port);
let (cross_chain_sender, cross_chain_receiver) =
mpsc::channel(self.cross_chain_config.queue_size);
join_set.spawn_task(Self::forward_cross_chain_queries(
self.state.nickname().to_string(),
self.network.clone(),
self.cross_chain_config.max_retries,
Duration::from_millis(self.cross_chain_config.retry_delay_ms),
Duration::from_millis(self.cross_chain_config.sender_delay_ms),
self.cross_chain_config.sender_failure_rate,
self.shard_id,
cross_chain_receiver,
));
let protocol = self.network.protocol;
let state = RunningServerState {
server: self,
cross_chain_sender,
};
// Launch server for the appropriate protocol.
protocol.spawn_server(address, state, shutdown_signal, join_set)
}
}
#[derive(Clone)]
struct RunningServerState<S>
where
S: Storage,
{
server: Server<S>,
cross_chain_sender: mpsc::Sender<(CrossChainRequest, ShardId)>,
}
#[async_trait]
impl<S> MessageHandler for RunningServerState<S>
where
S: Storage + Clone + Send + Sync + 'static,
{
#[instrument(
target = "simple_server",
skip_all,
fields(
nickname = self.server.state.nickname(),
chain_id = ?message.target_chain_id()
)
)]
async fn handle_message(&mut self, message: RpcMessage) -> Option<RpcMessage> {
let reply = match message {
RpcMessage::BlockProposal(message) => {
match self.server.state.handle_block_proposal(*message).await {
Ok((info, actions)) => {
// Cross-shard requests
self.handle_network_actions(actions);
// Response
Ok(Some(RpcMessage::ChainInfoResponse(Box::new(info))))
}
Err(error) => {
self.log_error(&error, "Failed to handle block proposal");
Err(error.into())
}
}
}
RpcMessage::LiteCertificate(request) => {
let (sender, receiver) = request
.wait_for_outgoing_messages
.then(oneshot::channel)
.unzip();
match Box::pin(
self.server
.state
.handle_lite_certificate(request.certificate, sender),
)
.await
{
Ok((info, actions)) => {
// Cross-shard requests
self.handle_network_actions(actions);
if let Some(receiver) = receiver {
if let Err(e) = receiver.await {
error!("Failed to wait for message delivery: {e}");
}
}
// Response
Ok(Some(RpcMessage::ChainInfoResponse(Box::new(info))))
}
Err(error) => {
let nickname = self.server.state.nickname();
if let WorkerError::MissingCertificateValue = &error {
debug!(nickname, %error, "Failed to handle lite certificate");
} else {
error!(nickname, %error, "Failed to handle lite certificate");
}
Err(error.into())
}
}
}
RpcMessage::TimeoutCertificate(request) => {
match self
.server
.state
.handle_timeout_certificate(request.certificate)
.await
{
Ok((info, actions)) => {
// Cross-shard requests
self.handle_network_actions(actions);
// Response
Ok(Some(RpcMessage::ChainInfoResponse(Box::new(info))))
}
Err(error) => {
self.log_error(&error, "Failed to handle timeout certificate");
Err(error.into())
}
}
}
RpcMessage::ValidatedCertificate(request) => {
match self
.server
.state
.handle_validated_certificate(request.certificate)
.await
{
Ok((info, actions)) => {
// Cross-shard requests
self.handle_network_actions(actions);
// Response
Ok(Some(RpcMessage::ChainInfoResponse(Box::new(info))))
}
Err(error) => {
self.log_error(&error, "Failed to handle validated certificate");
Err(error.into())
}
}
}
RpcMessage::ConfirmedCertificate(request) => {
let (sender, receiver) = request
.wait_for_outgoing_messages
.then(oneshot::channel)
.unzip();
match self
.server
.state
.handle_confirmed_certificate(request.certificate, sender)
.await
{
Ok((info, actions)) => {
// Cross-shard requests
self.handle_network_actions(actions);
if let Some(receiver) = receiver {
if let Err(e) = receiver.await {
error!("Failed to wait for message delivery: {e}");
}
}
// Response
Ok(Some(RpcMessage::ChainInfoResponse(Box::new(info))))
}
Err(error) => {
self.log_error(&error, "Failed to handle confirmed certificate");
Err(error.into())
}
}
}
RpcMessage::ChainInfoQuery(message) => {
match self.server.state.handle_chain_info_query(*message).await {
Ok((info, actions)) => {
// Cross-shard requests
self.handle_network_actions(actions);
// Response
Ok(Some(RpcMessage::ChainInfoResponse(Box::new(info))))
}
Err(error) => {
self.log_error(&error, "Failed to handle chain info query");
Err(error.into())
}
}
}
RpcMessage::CrossChainRequest(request) => {
match self.server.state.handle_cross_chain_request(*request).await {
Ok(actions) => {
self.handle_network_actions(actions);
}
Err(error) => {
self.log_error(&error, "Failed to handle cross-chain request");
}
}
// No user to respond to.
Ok(None)
}
RpcMessage::DownloadPendingBlob(request) => {
let (chain_id, blob_id) = *request;
match self
.server
.state
.download_pending_blob(chain_id, blob_id)
.await
{
Ok(blob) => Ok(Some(RpcMessage::DownloadPendingBlobResponse(Box::new(
blob.into(),
)))),
Err(error) => {
self.log_error(&error, "Failed to handle pending blob request");
Err(error.into())
}
}
}
RpcMessage::HandlePendingBlob(request) => {
let (chain_id, blob_content) = *request;
match self
.server
.state
.handle_pending_blob(chain_id, Blob::new(blob_content))
.await
{
Ok(info) => Ok(Some(RpcMessage::ChainInfoResponse(Box::new(info)))),
Err(error) => {
self.log_error(&error, "Failed to handle pending blob");
Err(error.into())
}
}
}
RpcMessage::VersionInfoQuery => {
Ok(Some(RpcMessage::VersionInfoResponse(Box::default())))
}
RpcMessage::Vote(_)
| RpcMessage::Error(_)
| RpcMessage::ChainInfoResponse(_)
| RpcMessage::VersionInfoResponse(_)
| RpcMessage::NetworkDescriptionQuery
| RpcMessage::NetworkDescriptionResponse(_)
| RpcMessage::ShardInfoQuery(_)
| RpcMessage::ShardInfoResponse(_)
| RpcMessage::DownloadBlob(_)
| RpcMessage::DownloadBlobResponse(_)
| RpcMessage::DownloadPendingBlobResponse(_)
| RpcMessage::DownloadConfirmedBlock(_)
| RpcMessage::DownloadConfirmedBlockResponse(_)
| RpcMessage::BlobLastUsedBy(_)
| RpcMessage::BlobLastUsedByResponse(_)
| RpcMessage::BlobLastUsedByCertificate(_)
| RpcMessage::BlobLastUsedByCertificateResponse(_)
| RpcMessage::MissingBlobIds(_)
| RpcMessage::MissingBlobIdsResponse(_)
| RpcMessage::DownloadCertificates(_)
| RpcMessage::DownloadCertificatesResponse(_)
| RpcMessage::UploadBlob(_)
| RpcMessage::UploadBlobResponse(_)
| RpcMessage::DownloadCertificatesByHeights(_, _)
| RpcMessage::DownloadCertificatesByHeightsResponse(_) => {
Err(NodeError::UnexpectedMessage)
}
};
self.server.packets_processed += 1;
if self.server.packets_processed % 5000 == 0 {
debug!(
"[{}] {}:{} (shard {}) has processed {} packets",
self.server.state.nickname(),
self.server.host,
self.server.port,
self.server.shard_id,
self.server.packets_processed
);
}
match reply {
Ok(x) => x,
Err(error) => {
// TODO(#459): Make it a warning or an error again.
debug!(
"[{}] User query failed: {}",
self.server.state.nickname(),
error
);
self.server.user_errors += 1;
Some(error.into())
}
}
}
}
impl<S> RunningServerState<S>
where
S: Storage + Send,
{
fn handle_network_actions(&mut self, actions: NetworkActions) {
for request in actions.cross_chain_requests {
let shard_id = self.server.network.get_shard_id(request.target_chain_id());
debug!(
"[{}] Scheduling cross-chain query: {} -> {}",
self.server.state.nickname(),
self.server.shard_id,
shard_id
);
if let Err(error) = self.cross_chain_sender.try_send((request, shard_id)) {
error!(%error, "dropping cross-chain request");
break;
}
}
}
fn log_error(&self, error: &WorkerError, context: &str) {
let nickname = self.server.state.nickname();
if error.is_local() {
error!(nickname, %error, "{}", context);
} else {
debug!(nickname, %error, "{}", context);
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/tests/format.rs | linera-rpc/tests/format.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::{AccountPublicKey, AccountSignature, CryptoHash, TestString},
data_types::{BlobContent, ChainDescription, ChainOrigin, OracleResponse, Round},
identifiers::{Account, AccountOwner, BlobType, GenericApplicationId},
ownership::ChainOwnership,
vm::VmRuntime,
};
use linera_chain::{
data_types::{MessageAction, OriginalProposal, Transaction},
manager::{ChainManagerInfo, LockingBlock},
types::{Certificate, CertificateKind, ConfirmedBlock, Timeout, ValidatedBlock},
};
use linera_core::{data_types::CrossChainRequest, node::NodeError};
use linera_execution::{
system::{AdminOperation, SystemMessage, SystemOperation},
Message, MessageKind, Operation,
};
use linera_rpc::RpcMessage;
use serde_reflection::{Registry, Result, Samples, Tracer, TracerConfig};
fn get_registry() -> Result<Registry> {
let mut tracer = Tracer::new(
TracerConfig::default()
.record_samples_for_newtype_structs(true)
.record_samples_for_tuple_structs(true),
);
let mut samples = Samples::new();
// 1. Record samples for types with custom deserializers.
{
// Record sample values for Secp256k1PublicKey and Secp256k1Signature
// as the ones generated by serde-reflection are not valid and will fail.
let validator_keypair = linera_base::crypto::ValidatorKeypair::generate();
let validator_signature = linera_base::crypto::ValidatorSignature::new(
&TestString::new("signature".to_string()),
&validator_keypair.secret_key,
);
tracer.trace_value(&mut samples, &validator_keypair.public_key)?;
tracer.trace_value(&mut samples, &validator_signature)?;
// We also record separate samples for EVM-compatible keys,
// as the generated ones are not valid.
let evm_secret_key = linera_base::crypto::EvmSecretKey::generate();
let evm_public_key = evm_secret_key.public();
tracer.trace_value(&mut samples, &evm_public_key)?;
let evm_signature = linera_base::crypto::EvmSignature::new(
CryptoHash::new(&TestString::new("signature".to_string())),
&evm_secret_key,
);
tracer.trace_value(&mut samples, &evm_signature)?;
}
// 2. Trace the main entry point(s) + every enum separately.
tracer.trace_type::<AccountPublicKey>(&samples)?;
tracer.trace_type::<AccountSignature>(&samples)?;
tracer.trace_type::<Round>(&samples)?;
tracer.trace_type::<OracleResponse>(&samples)?;
tracer.trace_type::<Account>(&samples)?;
tracer.trace_type::<SystemOperation>(&samples)?;
tracer.trace_type::<AdminOperation>(&samples)?;
tracer.trace_type::<SystemMessage>(&samples)?;
tracer.trace_type::<Operation>(&samples)?;
tracer.trace_type::<Message>(&samples)?;
tracer.trace_type::<Transaction>(&samples)?;
tracer.trace_type::<OriginalProposal>(&samples)?;
tracer.trace_type::<VmRuntime>(&samples)?;
tracer.trace_type::<MessageAction>(&samples)?;
tracer.trace_type::<MessageKind>(&samples)?;
tracer.trace_type::<CertificateKind>(&samples)?;
tracer.trace_type::<Certificate>(&samples)?;
tracer.trace_type::<ConfirmedBlock>(&samples)?;
tracer.trace_type::<ValidatedBlock>(&samples)?;
tracer.trace_type::<Timeout>(&samples)?;
tracer.trace_type::<ChainDescription>(&samples)?;
tracer.trace_type::<ChainOrigin>(&samples)?;
tracer.trace_type::<ChainOwnership>(&samples)?;
tracer.trace_type::<GenericApplicationId>(&samples)?;
tracer.trace_type::<LockingBlock>(&samples)?;
tracer.trace_type::<ChainManagerInfo>(&samples)?;
tracer.trace_type::<CrossChainRequest>(&samples)?;
tracer.trace_type::<NodeError>(&samples)?;
tracer.trace_type::<RpcMessage>(&samples)?;
tracer.trace_type::<BlobType>(&samples)?;
tracer.trace_type::<BlobContent>(&samples)?;
tracer.trace_type::<AccountOwner>(&samples)?;
tracer.registry()
}
#[test]
fn test_format() {
insta::assert_yaml_snapshot!("format.yaml", get_registry().unwrap());
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-rpc/tests/transport.rs | linera-rpc/tests/transport.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#[cfg(web)]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[cfg_attr(web, wasm_bindgen_test::wasm_bindgen_test)]
#[cfg_attr(not(web), tokio::test(flavor = "current_thread"))]
#[ignore]
// this test currently must be run manually, as it requires a Linera proxy to be running on 127.0.0.1:9000.
async fn client() {
use linera_base::time::Duration;
use linera_core::node::ValidatorNode as _;
use linera_rpc::grpc::{
transport::{create_channel, Options},
GrpcClient,
};
let retry_delay = Duration::from_millis(100);
let max_retries = 5;
let address = "http://127.0.0.1:9000".to_string();
let options = Options {
connect_timeout: Some(Duration::from_millis(100)),
timeout: Some(Duration::from_millis(100)),
};
let channel = create_channel(address.clone(), &options).unwrap();
let _ = GrpcClient::new(address, channel, retry_delay, max_retries)
.get_version_info()
.await
.unwrap();
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/build.rs | linera-chain/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
web: { all(target_arch = "wasm32", feature = "web") },
with_testing: { any(test, feature = "test") },
with_metrics: { all(not(target_arch = "wasm32"), feature = "metrics") },
with_graphql: { not(web) },
};
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/outbox.rs | linera-chain/src/outbox.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use allocative::Allocative;
use linera_base::data_types::{ArithmeticError, BlockHeight};
#[cfg(with_testing)]
use linera_views::context::MemoryContext;
use linera_views::{
context::Context,
queue_view::QueueView,
register_view::RegisterView,
views::{ClonableView, View},
ViewError,
};
#[cfg(test)]
#[path = "unit_tests/outbox_tests.rs"]
mod outbox_tests;
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_interval, register_histogram_vec};
use prometheus::HistogramVec;
pub static OUTBOX_SIZE: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"outbox_size",
"Outbox size",
&[],
exponential_bucket_interval(1.0, 10_000.0),
)
});
}
/// The state of an outbox
/// * An outbox is used to send messages to another chain.
/// * Internally, this is implemented as a FIFO queue of (increasing) block heights.
/// Messages are contained in blocks, together with destination information, so currently
/// we just send the certified blocks over and let the receivers figure out what were the
/// messages for them.
/// * When marking block heights as received, messages at lower heights are also marked (i.e. dequeued).
#[cfg_attr(with_graphql, derive(async_graphql::SimpleObject))]
#[derive(Debug, ClonableView, View, Allocative)]
#[allocative(bound = "C")]
pub struct OutboxStateView<C>
where
C: Context + 'static,
{
/// The minimum block height accepted in the future.
pub next_height_to_schedule: RegisterView<C, BlockHeight>,
/// Keep sending these certified blocks of ours until they are acknowledged by
/// receivers.
pub queue: QueueView<C, BlockHeight>,
}
impl<C> OutboxStateView<C>
where
C: Context + Clone + 'static,
{
/// Schedules a message at the given height if we haven't already.
/// Returns true if a change was made.
pub(crate) fn schedule_message(
&mut self,
height: BlockHeight,
) -> Result<bool, ArithmeticError> {
if height < *self.next_height_to_schedule.get() {
return Ok(false);
}
self.next_height_to_schedule.set(height.try_add_one()?);
self.queue.push_back(height);
#[cfg(with_metrics)]
metrics::OUTBOX_SIZE
.with_label_values(&[])
.observe(self.queue.count() as f64);
Ok(true)
}
/// Marks all messages as received up to the given height.
/// Returns the heights that were newly marked as received.
pub(crate) async fn mark_messages_as_received(
&mut self,
height: BlockHeight,
) -> Result<Vec<BlockHeight>, ViewError> {
let mut updates = Vec::new();
while let Some(h) = self.queue.front().await? {
if h > height {
break;
}
self.queue.delete_front();
updates.push(h);
}
#[cfg(with_metrics)]
metrics::OUTBOX_SIZE
.with_label_values(&[])
.observe(self.queue.count() as f64);
Ok(updates)
}
}
#[cfg(with_testing)]
impl OutboxStateView<MemoryContext<()>>
where
MemoryContext<()>: Context + Clone + Send + Sync + 'static,
{
pub async fn new() -> Self {
let context = MemoryContext::new_for_testing(());
Self::load(context)
.await
.expect("Loading from memory should work")
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/block_tracker.rs | linera-chain/src/block_tracker.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::{BTreeMap, BTreeSet};
use custom_debug_derive::Debug;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency;
use linera_base::{
data_types::{Amount, Blob, BlockHeight, Event, OracleResponse, Timestamp},
ensure,
identifiers::{AccountOwner, BlobId, ChainId, StreamId},
};
use linera_execution::{
execution_state_actor::ExecutionStateActor, ExecutionRuntimeContext, ExecutionStateView,
MessageContext, MessageKind, OperationContext, OutgoingMessage, ResourceController,
ResourceTracker, SystemExecutionStateView, TransactionOutcome, TransactionTracker,
};
use linera_views::context::Context;
use tracing::instrument;
#[cfg(with_metrics)]
use crate::chain::metrics;
use crate::{
chain::EMPTY_BLOCK_SIZE,
data_types::{
IncomingBundle, MessageAction, OperationResult, PostedMessage, ProposedBlock, Transaction,
},
ChainError, ChainExecutionContext, ExecutionResultExt,
};
/// Tracks execution of transactions within a block.
/// Captures the resource policy, produced messages, oracle responses and events.
#[derive(Debug)]
pub struct BlockExecutionTracker<'resources, 'blobs> {
chain_id: ChainId,
block_height: BlockHeight,
timestamp: Timestamp,
authenticated_owner: Option<AccountOwner>,
resource_controller: &'resources mut ResourceController<Option<AccountOwner>, ResourceTracker>,
local_time: Timestamp,
#[debug(skip_if = Option::is_none)]
replaying_oracle_responses: Option<Vec<Vec<OracleResponse>>>,
next_application_index: u32,
next_chain_index: u32,
#[debug(skip_if = Vec::is_empty)]
oracle_responses: Vec<Vec<OracleResponse>>,
#[debug(skip_if = Vec::is_empty)]
events: Vec<Vec<Event>>,
#[debug(skip_if = Vec::is_empty)]
blobs: Vec<Vec<Blob>>,
#[debug(skip_if = Vec::is_empty)]
messages: Vec<Vec<OutgoingMessage>>,
#[debug(skip_if = Vec::is_empty)]
operation_results: Vec<OperationResult>,
// Index of the currently executed transaction in a block.
transaction_index: u32,
// Blobs published in the block.
published_blobs: BTreeMap<BlobId, &'blobs Blob>,
// We expect the number of outcomes to be equal to the number of transactions in the block.
expected_outcomes_count: usize,
}
impl<'resources, 'blobs> BlockExecutionTracker<'resources, 'blobs> {
/// Creates a new BlockExecutionTracker.
pub fn new(
resource_controller: &'resources mut ResourceController<
Option<AccountOwner>,
ResourceTracker,
>,
published_blobs: BTreeMap<BlobId, &'blobs Blob>,
local_time: Timestamp,
replaying_oracle_responses: Option<Vec<Vec<OracleResponse>>>,
proposal: &ProposedBlock,
) -> Result<Self, ChainError> {
resource_controller
.track_block_size(EMPTY_BLOCK_SIZE)
.with_execution_context(ChainExecutionContext::Block)?;
Ok(Self {
chain_id: proposal.chain_id,
block_height: proposal.height,
timestamp: proposal.timestamp,
authenticated_owner: proposal.authenticated_owner,
resource_controller,
local_time,
replaying_oracle_responses,
next_application_index: 0,
next_chain_index: 0,
oracle_responses: Vec::new(),
events: Vec::new(),
blobs: Vec::new(),
messages: Vec::new(),
operation_results: Vec::new(),
transaction_index: 0,
published_blobs,
expected_outcomes_count: proposal.transactions.len(),
})
}
/// Executes a transaction in the context of the block.
#[instrument(skip_all, fields(
chain_id = %self.chain_id,
block_height = %self.block_height,
))]
pub async fn execute_transaction<C>(
&mut self,
transaction: &Transaction,
round: Option<u32>,
chain: &mut ExecutionStateView<C>,
) -> Result<(), ChainError>
where
C: Context + Clone + 'static,
C::Extra: ExecutionRuntimeContext,
{
let chain_execution_context = self.chain_execution_context(transaction);
let mut txn_tracker = self.new_transaction_tracker()?;
match transaction {
Transaction::ReceiveMessages(incoming_bundle) => {
self.resource_controller_mut()
.track_block_size_of(&incoming_bundle)
.with_execution_context(chain_execution_context)?;
for posted_message in incoming_bundle.messages() {
Box::pin(self.execute_message_in_block(
chain,
posted_message,
incoming_bundle,
round,
&mut txn_tracker,
))
.await?;
}
}
Transaction::ExecuteOperation(operation) => {
self.resource_controller_mut()
.with_state(&mut chain.system)
.await?
.track_block_size_of(&operation)
.with_execution_context(chain_execution_context)?;
#[cfg(with_metrics)]
let _operation_latency = metrics::OPERATION_EXECUTION_LATENCY.measure_latency_us();
let context = OperationContext {
chain_id: self.chain_id,
height: self.block_height,
round,
authenticated_owner: self.authenticated_owner,
timestamp: self.timestamp,
};
let mut actor =
ExecutionStateActor::new(chain, &mut txn_tracker, self.resource_controller);
Box::pin(actor.execute_operation(context, operation.clone()))
.await
.with_execution_context(chain_execution_context)?;
self.resource_controller_mut()
.with_state(&mut chain.system)
.await?
.track_operation(operation)
.with_execution_context(chain_execution_context)?;
}
}
let txn_outcome = txn_tracker
.into_outcome()
.with_execution_context(chain_execution_context)?;
self.process_txn_outcome(txn_outcome, &mut chain.system, chain_execution_context)
.await?;
Ok(())
}
/// Returns a new TransactionTracker for the current transaction.
fn new_transaction_tracker(&mut self) -> Result<TransactionTracker, ChainError> {
Ok(TransactionTracker::new(
self.local_time,
self.transaction_index,
self.next_application_index,
self.next_chain_index,
self.oracle_responses()?,
&self.blobs,
))
}
/// Executes a message as part of an incoming bundle in a block.
async fn execute_message_in_block<C>(
&mut self,
chain: &mut ExecutionStateView<C>,
posted_message: &PostedMessage,
incoming_bundle: &IncomingBundle,
round: Option<u32>,
txn_tracker: &mut TransactionTracker,
) -> Result<(), ChainError>
where
C: Context + Clone + 'static,
C::Extra: ExecutionRuntimeContext,
{
#[cfg(with_metrics)]
let _message_latency = metrics::MESSAGE_EXECUTION_LATENCY.measure_latency_us();
let context = MessageContext {
chain_id: self.chain_id,
origin: incoming_bundle.origin,
is_bouncing: posted_message.is_bouncing(),
height: self.block_height,
round,
authenticated_owner: posted_message.authenticated_owner,
refund_grant_to: posted_message.refund_grant_to,
timestamp: self.timestamp,
};
let mut grant = posted_message.grant;
match incoming_bundle.action {
MessageAction::Accept => {
let chain_execution_context =
ChainExecutionContext::IncomingBundle(txn_tracker.transaction_index());
// Once a chain is closed, accepting incoming messages is not allowed.
ensure!(!chain.system.closed.get(), ChainError::ClosedChain);
let mut actor =
ExecutionStateActor::new(chain, txn_tracker, self.resource_controller);
Box::pin(actor.execute_message(
context,
posted_message.message.clone(),
(grant > Amount::ZERO).then_some(&mut grant),
))
.await
.with_execution_context(chain_execution_context)?;
actor
.send_refund(context, grant)
.with_execution_context(chain_execution_context)?;
}
MessageAction::Reject => {
// If rejecting a message fails, the entire block proposal should be
// scrapped.
ensure!(
!posted_message.is_protected() || *chain.system.closed.get(),
ChainError::CannotRejectMessage {
chain_id: self.chain_id,
origin: incoming_bundle.origin,
posted_message: Box::new(posted_message.clone()),
}
);
let mut actor =
ExecutionStateActor::new(chain, txn_tracker, self.resource_controller);
if posted_message.is_tracked() {
// Bounce the message.
actor
.bounce_message(context, grant, posted_message.message.clone())
.with_execution_context(ChainExecutionContext::Block)?;
} else {
// Nothing to do except maybe refund the grant.
actor
.send_refund(context, grant)
.with_execution_context(ChainExecutionContext::Block)?;
}
}
}
Ok(())
}
/// Returns oracle responses for the current transaction.
fn oracle_responses(&self) -> Result<Option<Vec<OracleResponse>>, ChainError> {
if let Some(responses) = self.replaying_oracle_responses.as_ref() {
match responses.get(self.transaction_index as usize) {
Some(responses) => Ok(Some(responses.clone())),
None => Err(ChainError::MissingOracleResponseList),
}
} else {
Ok(None)
}
}
/// Processes the transaction outcome.
///
/// Updates block tracker with indexes for the next messages, applications, etc.
/// so that the execution of the next transaction doesn't overwrite the previous ones.
///
/// Tracks the resources used by the transaction - size of the incoming and outgoing messages, blobs, etc.
pub async fn process_txn_outcome<C>(
&mut self,
txn_outcome: TransactionOutcome,
view: &mut SystemExecutionStateView<C>,
context: ChainExecutionContext,
) -> Result<(), ChainError>
where
C: Context + Clone + 'static,
{
let mut resource_controller = self.resource_controller.with_state(view).await?;
for message_out in &txn_outcome.outgoing_messages {
if message_out.kind == MessageKind::Bouncing {
continue; // Bouncing messages are free.
}
resource_controller
.track_message(&message_out.message)
.with_execution_context(context)?;
}
resource_controller
.track_block_size_of(&(
&txn_outcome.oracle_responses,
&txn_outcome.outgoing_messages,
&txn_outcome.events,
&txn_outcome.blobs,
))
.with_execution_context(context)?;
// Account for blobs published by this transaction directly.
for blob in &txn_outcome.blobs {
resource_controller
.track_blob_published(blob)
.with_execution_context(context)?;
}
// Account for blobs published indirectly but referenced by the transaction.
for blob_id in &txn_outcome.blobs_published {
if let Some(blob) = self.published_blobs.get(blob_id) {
resource_controller
.track_blob_published(blob)
.with_execution_context(context)?;
} else {
return Err(ChainError::InternalError(format!(
"Missing published blob {blob_id}"
)));
}
}
self.resource_controller
.track_block_size_of(&(&txn_outcome.operation_result))
.with_execution_context(context)?;
self.next_application_index = txn_outcome.next_application_index;
self.next_chain_index = txn_outcome.next_chain_index;
self.oracle_responses.push(txn_outcome.oracle_responses);
self.events.push(txn_outcome.events);
self.blobs.push(txn_outcome.blobs);
self.messages.push(txn_outcome.outgoing_messages);
if matches!(context, ChainExecutionContext::Operation(_)) {
self.operation_results
.push(OperationResult(txn_outcome.operation_result));
}
self.transaction_index += 1;
Ok(())
}
/// Returns recipient chain IDs for outgoing messages in the block.
pub fn recipients(&self) -> BTreeSet<ChainId> {
self.messages
.iter()
.flatten()
.map(|msg| msg.destination)
.collect()
}
/// Returns stream IDs for events published in the block.
pub fn event_streams(&self) -> BTreeSet<StreamId> {
self.events
.iter()
.flatten()
.map(|event| event.stream_id.clone())
.collect()
}
/// Returns a mutable reference to the resource controller.
pub fn resource_controller_mut(
&mut self,
) -> &mut ResourceController<Option<AccountOwner>, ResourceTracker> {
self.resource_controller
}
/// Finalizes the execution and returns the collected results.
///
/// This method should be called after all transactions have been processed.
/// Panics if the number of lists of oracle responses, outgoing messages,
/// events, or blobs does not match the expected counts.
pub fn finalize(self) -> FinalizeExecutionResult {
// Asserts that the number of outcomes matches the expected count.
assert_eq!(self.oracle_responses.len(), self.expected_outcomes_count);
assert_eq!(self.messages.len(), self.expected_outcomes_count);
assert_eq!(self.events.len(), self.expected_outcomes_count);
assert_eq!(self.blobs.len(), self.expected_outcomes_count);
#[cfg(with_metrics)]
crate::chain::metrics::track_block_metrics(&self.resource_controller.tracker);
(
self.messages,
self.oracle_responses,
self.events,
self.blobs,
self.operation_results,
)
}
/// Returns the execution context for the current transaction.
fn chain_execution_context(&self, transaction: &Transaction) -> ChainExecutionContext {
match transaction {
Transaction::ReceiveMessages(_) => {
ChainExecutionContext::IncomingBundle(self.transaction_index)
}
Transaction::ExecuteOperation(_) => {
ChainExecutionContext::Operation(self.transaction_index)
}
}
}
}
pub(crate) type FinalizeExecutionResult = (
Vec<Vec<OutgoingMessage>>,
Vec<Vec<OracleResponse>>,
Vec<Vec<Event>>,
Vec<Vec<Blob>>,
Vec<OperationResult>,
);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/lib.rs | linera-chain/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module manages the state of a Linera chain, including cross-chain communication.
pub mod block;
mod certificate;
pub mod types {
pub use super::{block::*, certificate::*};
}
mod block_tracker;
mod chain;
pub mod data_types;
mod inbox;
pub mod manager;
mod outbox;
mod pending_blobs;
#[cfg(with_testing)]
pub mod test;
pub use chain::ChainStateView;
use data_types::{MessageBundle, PostedMessage};
use linera_base::{
bcs,
crypto::CryptoError,
data_types::{ArithmeticError, BlockHeight, Round, Timestamp},
identifiers::{ApplicationId, ChainId},
};
use linera_execution::ExecutionError;
use linera_views::ViewError;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ChainError {
#[error("Cryptographic error: {0}")]
CryptoError(#[from] CryptoError),
#[error(transparent)]
ArithmeticError(#[from] ArithmeticError),
#[error(transparent)]
ViewError(#[from] ViewError),
#[error("Execution error: {0} during {1:?}")]
ExecutionError(Box<ExecutionError>, ChainExecutionContext),
#[error("The chain being queried is not active {0}")]
InactiveChain(ChainId),
#[error(
"Cannot vote for block proposal of chain {chain_id} because a message \
from chain {origin} at height {height} has not been received yet"
)]
MissingCrossChainUpdate {
chain_id: ChainId,
origin: ChainId,
height: BlockHeight,
},
#[error(
"Message in block proposed to {chain_id} does not match the previously received messages from \
origin {origin:?}: was {bundle:?} instead of {previous_bundle:?}"
)]
UnexpectedMessage {
chain_id: ChainId,
origin: ChainId,
bundle: Box<MessageBundle>,
previous_bundle: Box<MessageBundle>,
},
#[error(
"Message in block proposed to {chain_id} is out of order compared to previous messages \
from origin {origin:?}: {bundle:?}. Block and height should be at least: \
{next_height}, {next_index}"
)]
IncorrectMessageOrder {
chain_id: ChainId,
origin: ChainId,
bundle: Box<MessageBundle>,
next_height: BlockHeight,
next_index: u32,
},
#[error(
"Block proposed to {chain_id} is attempting to reject protected message \
{posted_message:?}"
)]
CannotRejectMessage {
chain_id: ChainId,
origin: ChainId,
posted_message: Box<PostedMessage>,
},
#[error(
"Block proposed to {chain_id} is attempting to skip a message bundle \
that cannot be skipped: {bundle:?}"
)]
CannotSkipMessage {
chain_id: ChainId,
origin: ChainId,
bundle: Box<MessageBundle>,
},
#[error(
"Incoming message bundle in block proposed to {chain_id} has timestamp \
{bundle_timestamp:}, which is later than the block timestamp {block_timestamp:}."
)]
IncorrectBundleTimestamp {
chain_id: ChainId,
bundle_timestamp: Timestamp,
block_timestamp: Timestamp,
},
#[error("The signature was not created by a valid entity")]
InvalidSigner,
#[error(
"Chain is expecting a next block at height {expected_block_height} but the given block \
is at height {found_block_height} instead"
)]
UnexpectedBlockHeight {
expected_block_height: BlockHeight,
found_block_height: BlockHeight,
},
#[error("The previous block hash of a new block should match the last block of the chain")]
UnexpectedPreviousBlockHash,
#[error("Sequence numbers above the maximal value are not usable for blocks")]
BlockHeightOverflow,
#[error(
"Block timestamp {new} must not be earlier than the parent block's timestamp {parent}"
)]
InvalidBlockTimestamp { parent: Timestamp, new: Timestamp },
#[error("Round number should be at least {0:?}")]
InsufficientRound(Round),
#[error("Round number should be greater than {0:?}")]
InsufficientRoundStrict(Round),
#[error("Round number should be {0:?}")]
WrongRound(Round),
#[error("Already voted to confirm a different block for height {0:?} at round number {1:?}")]
HasIncompatibleConfirmedVote(BlockHeight, Round),
#[error("Proposal for height {0:?} is not newer than locking block in round {1:?}")]
MustBeNewerThanLockingBlock(BlockHeight, Round),
#[error("Cannot confirm a block before its predecessors: {current_block_height:?}")]
MissingEarlierBlocks { current_block_height: BlockHeight },
#[error("Signatures in a certificate must be from different validators")]
CertificateValidatorReuse,
#[error("Signatures in a certificate must form a quorum")]
CertificateRequiresQuorum,
#[error("Internal error {0}")]
InternalError(String),
#[error("Block proposal has size {0} which is too large")]
BlockProposalTooLarge(usize),
#[error(transparent)]
BcsError(#[from] bcs::Error),
#[error("Closed chains cannot have operations, accepted messages or empty blocks")]
ClosedChain,
#[error("Empty blocks are not allowed")]
EmptyBlock,
#[error("All operations on this chain must be from one of the following applications: {0:?}")]
AuthorizedApplications(Vec<ApplicationId>),
#[error("Missing operations or messages from mandatory applications: {0:?}")]
MissingMandatoryApplications(Vec<ApplicationId>),
#[error("Executed block contains fewer oracle responses than requests")]
MissingOracleResponseList,
#[error("Not signing timeout certificate; current round does not time out")]
RoundDoesNotTimeOut,
#[error("Not signing timeout certificate; current round times out at time {0}")]
NotTimedOutYet(Timestamp),
}
impl ChainError {
/// Returns whether this error is caused by an issue in the local node.
///
/// Returns `false` whenever the error could be caused by a bad message from a peer.
pub fn is_local(&self) -> bool {
match self {
ChainError::CryptoError(_)
| ChainError::ArithmeticError(_)
| ChainError::ViewError(ViewError::NotFound(_))
| ChainError::InactiveChain(_)
| ChainError::IncorrectMessageOrder { .. }
| ChainError::CannotRejectMessage { .. }
| ChainError::CannotSkipMessage { .. }
| ChainError::IncorrectBundleTimestamp { .. }
| ChainError::InvalidSigner
| ChainError::UnexpectedBlockHeight { .. }
| ChainError::UnexpectedPreviousBlockHash
| ChainError::BlockHeightOverflow
| ChainError::InvalidBlockTimestamp { .. }
| ChainError::InsufficientRound(_)
| ChainError::InsufficientRoundStrict(_)
| ChainError::WrongRound(_)
| ChainError::HasIncompatibleConfirmedVote(..)
| ChainError::MustBeNewerThanLockingBlock(..)
| ChainError::MissingEarlierBlocks { .. }
| ChainError::CertificateValidatorReuse
| ChainError::CertificateRequiresQuorum
| ChainError::BlockProposalTooLarge(_)
| ChainError::ClosedChain
| ChainError::EmptyBlock
| ChainError::AuthorizedApplications(_)
| ChainError::MissingMandatoryApplications(_)
| ChainError::MissingOracleResponseList
| ChainError::RoundDoesNotTimeOut
| ChainError::NotTimedOutYet(_)
| ChainError::MissingCrossChainUpdate { .. } => false,
ChainError::ViewError(_)
| ChainError::UnexpectedMessage { .. }
| ChainError::InternalError(_)
| ChainError::BcsError(_) => true,
ChainError::ExecutionError(execution_error, _) => execution_error.is_local(),
}
}
}
#[derive(Copy, Clone, Debug)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub enum ChainExecutionContext {
Query,
DescribeApplication,
IncomingBundle(u32),
Operation(u32),
Block,
}
pub trait ExecutionResultExt<T> {
fn with_execution_context(self, context: ChainExecutionContext) -> Result<T, ChainError>;
}
impl<T, E> ExecutionResultExt<T> for Result<T, E>
where
E: Into<ExecutionError>,
{
fn with_execution_context(self, context: ChainExecutionContext) -> Result<T, ChainError> {
self.map_err(|error| ChainError::ExecutionError(Box::new(error.into()), context))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/inbox.rs | linera-chain/src/inbox.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use allocative::Allocative;
use async_graphql::SimpleObject;
use linera_base::{
data_types::{ArithmeticError, BlockHeight},
ensure,
identifiers::ChainId,
};
#[cfg(with_testing)]
use linera_views::context::MemoryContext;
use linera_views::{
context::Context,
queue_view::QueueView,
register_view::RegisterView,
views::{ClonableView, View},
ViewError,
};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use crate::{data_types::MessageBundle, ChainError};
#[cfg(test)]
#[path = "unit_tests/inbox_tests.rs"]
mod inbox_tests;
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_interval, register_histogram_vec};
use prometheus::HistogramVec;
pub static INBOX_SIZE: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"inbox_size",
"Inbox size",
&[],
exponential_bucket_interval(1.0, 2_000_000.0),
)
});
pub static REMOVED_BUNDLES: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"removed_bundles",
"Number of bundles removed by anticipation",
&[],
exponential_bucket_interval(1.0, 10_000.0),
)
});
}
/// The state of an inbox.
/// * An inbox is used to track bundles received and executed locally.
/// * A `MessageBundle` consists of a logical cursor `(height, index)` and some message
/// content `messages`.
/// * On the surface, an inbox looks like a FIFO queue: the main APIs are `add_bundle` and
/// `remove_bundle`.
/// * However, bundles can also be removed before they are added. When this happens,
/// the bundles removed by anticipation are tracked in a separate queue. Any bundle added
/// later will be required to match the first removed bundle and so on.
/// * The cursors of added bundles (resp. removed bundles) must be increasing over time.
/// * Reconciliation of added and removed bundles is allowed to skip some added bundles.
/// However, the opposite is not true: every removed bundle must be eventually added.
#[cfg_attr(with_graphql, derive(async_graphql::SimpleObject))]
#[derive(Allocative, Debug, ClonableView, View)]
#[allocative(bound = "C")]
pub struct InboxStateView<C>
where
C: Clone + Context,
{
/// We have already added all the messages below this height and index.
pub next_cursor_to_add: RegisterView<C, Cursor>,
/// We have already removed all the messages below this height and index.
pub next_cursor_to_remove: RegisterView<C, Cursor>,
/// These bundles have been added and are waiting to be removed.
pub added_bundles: QueueView<C, MessageBundle>,
/// These bundles have been removed by anticipation and are waiting to be added.
/// At least one of `added_bundles` and `removed_bundles` should be empty.
pub removed_bundles: QueueView<C, MessageBundle>,
}
#[derive(
Debug,
Default,
Clone,
Copy,
Hash,
Eq,
PartialEq,
Ord,
PartialOrd,
Serialize,
Deserialize,
SimpleObject,
Allocative,
)]
pub struct Cursor {
height: BlockHeight,
index: u32,
}
#[derive(Error, Debug)]
pub(crate) enum InboxError {
#[error(transparent)]
ViewError(#[from] ViewError),
#[error(transparent)]
ArithmeticError(#[from] ArithmeticError),
#[error("Cannot reconcile {bundle:?} with {previous_bundle:?}")]
UnexpectedBundle {
bundle: MessageBundle,
previous_bundle: MessageBundle,
},
#[error("{bundle:?} is out of order. Block and height should be at least: {next_cursor:?}")]
IncorrectOrder {
bundle: MessageBundle,
next_cursor: Cursor,
},
#[error(
"{bundle:?} cannot be skipped: it must be received before the next \
messages from the same origin"
)]
UnskippableBundle { bundle: MessageBundle },
}
impl From<&MessageBundle> for Cursor {
#[inline]
fn from(bundle: &MessageBundle) -> Self {
Self {
height: bundle.height,
index: bundle.transaction_index,
}
}
}
impl Cursor {
fn try_add_one(self) -> Result<Self, ArithmeticError> {
let value = Self {
height: self.height,
index: self.index.checked_add(1).ok_or(ArithmeticError::Overflow)?,
};
Ok(value)
}
}
impl From<(ChainId, ChainId, InboxError)> for ChainError {
fn from(value: (ChainId, ChainId, InboxError)) -> Self {
let (chain_id, origin, error) = value;
match error {
InboxError::ViewError(e) => ChainError::ViewError(e),
InboxError::ArithmeticError(e) => ChainError::ArithmeticError(e),
InboxError::UnexpectedBundle {
bundle,
previous_bundle,
} => ChainError::UnexpectedMessage {
chain_id,
origin,
bundle: Box::new(bundle),
previous_bundle: Box::new(previous_bundle),
},
InboxError::IncorrectOrder {
bundle,
next_cursor,
} => ChainError::IncorrectMessageOrder {
chain_id,
origin,
bundle: Box::new(bundle),
next_height: next_cursor.height,
next_index: next_cursor.index,
},
InboxError::UnskippableBundle { bundle } => ChainError::CannotSkipMessage {
chain_id,
origin,
bundle: Box::new(bundle),
},
}
}
}
impl<C> InboxStateView<C>
where
C: Context + Clone + 'static,
{
/// Converts the internal cursor for added bundles into an externally-visible block height.
/// This makes sense because the rest of the system always adds bundles one block at a time.
pub fn next_block_height_to_receive(&self) -> Result<BlockHeight, ChainError> {
let cursor = self.next_cursor_to_add.get();
if cursor.index == 0 {
Ok(cursor.height)
} else {
Ok(cursor.height.try_add_one()?)
}
}
/// Consumes a bundle from the inbox.
///
/// Returns `true` if the bundle was already known, i.e. it was present in `added_bundles`.
pub(crate) async fn remove_bundle(
&mut self,
bundle: &MessageBundle,
) -> Result<bool, InboxError> {
// Record the latest cursor.
let cursor = Cursor::from(bundle);
ensure!(
cursor >= *self.next_cursor_to_remove.get(),
InboxError::IncorrectOrder {
bundle: bundle.clone(),
next_cursor: *self.next_cursor_to_remove.get(),
}
);
// Discard added bundles with lower cursors (if any).
while let Some(previous_bundle) = self.added_bundles.front().await? {
if Cursor::from(&previous_bundle) >= cursor {
break;
}
ensure!(
previous_bundle.is_skippable(),
InboxError::UnskippableBundle {
bundle: previous_bundle
}
);
self.added_bundles.delete_front();
#[cfg(with_metrics)]
metrics::INBOX_SIZE
.with_label_values(&[])
.observe(self.added_bundles.count() as f64);
tracing::trace!("Skipping previously received bundle {:?}", previous_bundle);
}
// Reconcile the bundle with the next added bundle, or mark it as removed.
let already_known = match self.added_bundles.front().await? {
Some(previous_bundle) => {
// Rationale: If the two cursors are equal, then the bundles should match.
// Otherwise, at this point we know that `self.next_cursor_to_add >
// Cursor::from(&previous_bundle) > cursor`. Notably, `bundle` will never be
// added in the future. Therefore, we should fail instead of adding
// it to `self.removed_bundles`.
ensure!(
bundle == &previous_bundle,
InboxError::UnexpectedBundle {
previous_bundle,
bundle: bundle.clone(),
}
);
self.added_bundles.delete_front();
#[cfg(with_metrics)]
metrics::INBOX_SIZE
.with_label_values(&[])
.observe(self.added_bundles.count() as f64);
tracing::trace!("Consuming bundle {:?}", bundle);
true
}
None => {
tracing::trace!("Marking bundle as expected: {:?}", bundle);
self.removed_bundles.push_back(bundle.clone());
#[cfg(with_metrics)]
metrics::REMOVED_BUNDLES
.with_label_values(&[])
.observe(self.removed_bundles.count() as f64);
false
}
};
self.next_cursor_to_remove.set(cursor.try_add_one()?);
Ok(already_known)
}
/// Pushes a bundle to the inbox. The verifications should not fail in production unless
/// many validators are faulty.
///
/// Returns `true` if the bundle was new, `false` if it was already in `removed_bundles`.
pub(crate) async fn add_bundle(&mut self, bundle: MessageBundle) -> Result<bool, InboxError> {
// Record the latest cursor.
let cursor = Cursor::from(&bundle);
ensure!(
cursor >= *self.next_cursor_to_add.get(),
InboxError::IncorrectOrder {
bundle: bundle.clone(),
next_cursor: *self.next_cursor_to_add.get(),
}
);
// Find if the bundle was removed ahead of time.
let newly_added = match self.removed_bundles.front().await? {
Some(previous_bundle) => {
if Cursor::from(&previous_bundle) == cursor {
// We already executed this bundle by anticipation. Remove it from
// the queue.
ensure!(
bundle == previous_bundle,
InboxError::UnexpectedBundle {
previous_bundle,
bundle,
}
);
self.removed_bundles.delete_front();
#[cfg(with_metrics)]
metrics::REMOVED_BUNDLES
.with_label_values(&[])
.observe(self.removed_bundles.count() as f64);
} else {
// The receiver has already executed a later bundle from the same
// sender ahead of time so we should skip this one.
ensure!(
cursor < Cursor::from(&previous_bundle) && bundle.is_skippable(),
InboxError::UnexpectedBundle {
previous_bundle,
bundle,
}
);
}
false
}
None => {
// Otherwise, schedule the messages for execution.
self.added_bundles.push_back(bundle);
#[cfg(with_metrics)]
metrics::INBOX_SIZE
.with_label_values(&[])
.observe(self.added_bundles.count() as f64);
true
}
};
self.next_cursor_to_add.set(cursor.try_add_one()?);
Ok(newly_added)
}
}
#[cfg(with_testing)]
impl InboxStateView<MemoryContext<()>>
where
MemoryContext<()>: Context + Clone + Send + Sync + 'static,
{
pub async fn new() -> Self {
let context = MemoryContext::new_for_testing(());
Self::load(context)
.await
.expect("Loading from memory should work")
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/manager.rs | linera-chain/src/manager.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! # Chain manager
//!
//! This module contains the consensus mechanism for all microchains. Whenever a block is
//! confirmed, a new chain manager is created for the next block height. It manages the consensus
//! state until a new block is confirmed. As long as less than a third of the validators are faulty,
//! it guarantees that at most one `ConfirmedBlock` certificate will be created for this height.
//!
//! The protocol proceeds in rounds, until it reaches a round where a block gets confirmed.
//!
//! There are four kinds of rounds:
//!
//! * In `Round::Fast`, only super owners can propose blocks, and validators vote to confirm a
//! block immediately. Super owners must be careful to make only one block proposal, or else they
//! can permanently block the microchain. If there are no super owners, `Round::Fast` is skipped.
//! * In cooperative mode (`Round::MultiLeader`), all chain owners can propose blocks at any time.
//! The protocol is guaranteed to eventually confirm a block as long as no chain owner
//! continuously actively prevents progress.
//! * In leader rotation mode (`Round::SingleLeader`), chain owners take turns at proposing blocks.
//! It can make progress as long as at least one owner is honest, even if other owners try to
//! prevent it.
//! * In fallback/public mode (`Round::Validator`), validators take turns at proposing blocks.
//! It can always make progress under the standard assumption that there is a quorum of honest
//! validators.
//!
//! ## Safety, i.e. at most one block will be confirmed
//!
//! In all modes this is guaranteed as follows:
//!
//! * Validators (honest ones) never cast a vote if they have already cast any vote in a later
//! round.
//! * Validators never vote for a `ValidatedBlock` **A** in round **r** if they have voted for a
//! _different_ `ConfirmedBlock` **B** in an earlier round **s** ≤ **r**, unless there is a
//! `ValidatedBlock` certificate (with a quorum of validator signatures) for **A** in some round
//! between **s** and **r** included in the block proposal.
//! * Validators only vote for a `ConfirmedBlock` if there is a `ValidatedBlock` certificate for the
//! same block in the same round. (Or, in the `Fast` round, if there is a valid proposal.)
//!
//! This guarantees that once a quorum votes for some `ConfirmedBlock`, there can never be a
//! `ValidatedBlock` certificate (and thus also no `ConfirmedBlock` certificate) for a different
//! block in a later round. So if there are two different `ConfirmedBlock` certificates, they may
//! be from different rounds, but they are guaranteed to contain the same block.
//!
//! ## Liveness, i.e. some block will eventually be confirmed
//!
//! In `Round::Fast`, liveness depends on the super owners coordinating, and proposing at most one
//! block.
//!
//! If they propose none, and there are other owners, `Round::Fast` will eventually time out.
//!
//! In cooperative mode, if there is contention, the owners need to agree on a single owner as the
//! next proposer. That owner should then download all highest-round certificates and block
//! proposals known to the honest validators. They can then make a proposal in a round higher than
//! all previous proposals. If there is any `ValidatedBlock` certificate they must include the
//! highest one in their proposal, and propose that block. Otherwise they can propose a new block.
//! Now all honest validators are allowed to vote for that proposal, and eventually confirm it.
//!
//! If the owners fail to cooperate, any honest owner can initiate the last multi-leader round by
//! making a proposal there, then wait for it to time out, which starts the leader-based mode:
//!
//! In leader-based and fallback/public mode, an honest participant should subscribe to
//! notifications from all validators, and follow the chain. Whenever another leader's round takes
//! too long, they should request timeout votes from the validators to make the next round begin.
//! Once the honest participant becomes the round leader, they should update all validators, so
//! that they all agree on the current round. Then they download the highest `ValidatedBlock`
//! certificate known to any honest validator and include that in their block proposal, just like
//! in the cooperative case.
use std::collections::BTreeMap;
use allocative::Allocative;
use custom_debug_derive::Debug;
use futures::future::Either;
use linera_base::{
crypto::{AccountPublicKey, ValidatorSecretKey},
data_types::{Blob, BlockHeight, Epoch, Round, Timestamp},
ensure,
identifiers::{AccountOwner, BlobId, ChainId},
ownership::ChainOwnership,
};
use linera_execution::ExecutionRuntimeContext;
use linera_views::{
context::Context,
map_view::MapView,
register_view::RegisterView,
views::{ClonableView, View},
ViewError,
};
use rand_chacha::{rand_core::SeedableRng, ChaCha8Rng};
use rand_distr::{Distribution, WeightedAliasIndex};
use serde::{Deserialize, Serialize};
use crate::{
block::{Block, ConfirmedBlock, Timeout, ValidatedBlock},
data_types::{BlockProposal, LiteVote, OriginalProposal, ProposedBlock, Vote},
types::{TimeoutCertificate, ValidatedBlockCertificate},
ChainError,
};
/// The result of verifying a (valid) query.
#[derive(Eq, PartialEq)]
pub enum Outcome {
Accept,
Skip,
}
pub type ValidatedOrConfirmedVote<'a> = Either<&'a Vote<ValidatedBlock>, &'a Vote<ConfirmedBlock>>;
/// The latest block that validators may have voted to confirm: this is either the block proposal
/// from the fast round or a validated block certificate. Validators are allowed to vote for this
/// even if they have locked (i.e. voted to confirm) a different block earlier.
#[derive(Debug, Clone, Serialize, Deserialize, Allocative)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub enum LockingBlock {
/// A proposal in the `Fast` round.
Fast(BlockProposal),
/// A `ValidatedBlock` certificate in a round other than `Fast`.
Regular(ValidatedBlockCertificate),
}
impl LockingBlock {
/// Returns the locking block's round. To propose a different block, a `ValidatedBlock`
/// certificate from a higher round is needed.
pub fn round(&self) -> Round {
match self {
Self::Fast(_) => Round::Fast,
Self::Regular(certificate) => certificate.round,
}
}
pub fn chain_id(&self) -> ChainId {
match self {
Self::Fast(proposal) => proposal.content.block.chain_id,
Self::Regular(certificate) => certificate.value().chain_id(),
}
}
}
/// The state of the certification process for a chain's next block.
#[cfg_attr(with_graphql, derive(async_graphql::SimpleObject), graphql(complex))]
#[derive(Debug, View, ClonableView, Allocative)]
#[allocative(bound = "C")]
pub struct ChainManager<C>
where
C: Clone + Context + 'static,
{
/// The public keys, weights and types of the chain's owners.
pub ownership: RegisterView<C, ChainOwnership>,
/// The seed for the pseudo-random number generator that determines the round leaders.
pub seed: RegisterView<C, u64>,
/// The probability distribution for choosing a round leader.
#[cfg_attr(with_graphql, graphql(skip))] // Derived from ownership.
#[allocative(skip)]
pub distribution: RegisterView<C, Option<WeightedAliasIndex<u64>>>,
/// The probability distribution for choosing a fallback round leader.
#[cfg_attr(with_graphql, graphql(skip))] // Derived from validator weights.
#[allocative(skip)]
pub fallback_distribution: RegisterView<C, Option<WeightedAliasIndex<u64>>>,
/// Highest-round authenticated block that we have received, but not necessarily
/// checked yet. If there are multiple proposals in the same round, this contains only the
/// first one. This can even contain proposals that did not execute successfully, to determine
/// which round to propose in.
#[cfg_attr(with_graphql, graphql(skip))]
pub signed_proposal: RegisterView<C, Option<BlockProposal>>,
/// Highest-round authenticated block that we have received and checked. If there are multiple
/// proposals in the same round, this contains only the first one.
#[cfg_attr(with_graphql, graphql(skip))]
pub proposed: RegisterView<C, Option<BlockProposal>>,
/// These are blobs published or read by the proposed block.
pub proposed_blobs: MapView<C, BlobId, Blob>,
/// Latest validated proposal that a validator may have voted to confirm. This is either the
/// latest `ValidatedBlock` we have seen, or the proposal from the `Fast` round.
#[cfg_attr(with_graphql, graphql(skip))]
pub locking_block: RegisterView<C, Option<LockingBlock>>,
/// These are blobs published or read by the locking block.
pub locking_blobs: MapView<C, BlobId, Blob>,
/// Latest leader timeout certificate we have received.
#[cfg_attr(with_graphql, graphql(skip))]
pub timeout: RegisterView<C, Option<TimeoutCertificate>>,
/// Latest vote we cast to confirm a block.
#[cfg_attr(with_graphql, graphql(skip))]
pub confirmed_vote: RegisterView<C, Option<Vote<ConfirmedBlock>>>,
/// Latest vote we cast to validate a block.
#[cfg_attr(with_graphql, graphql(skip))]
pub validated_vote: RegisterView<C, Option<Vote<ValidatedBlock>>>,
/// Latest timeout vote we cast.
#[cfg_attr(with_graphql, graphql(skip))]
pub timeout_vote: RegisterView<C, Option<Vote<Timeout>>>,
/// Fallback vote we cast.
#[cfg_attr(with_graphql, graphql(skip))]
pub fallback_vote: RegisterView<C, Option<Vote<Timeout>>>,
/// The time after which we are ready to sign a timeout certificate for the current round.
pub round_timeout: RegisterView<C, Option<Timestamp>>,
/// The lowest round where we can still vote to validate or confirm a block. This is
/// the round to which the timeout applies.
///
/// Having a leader timeout certificate in any given round causes the next one to become
/// current. Seeing a validated block certificate or a valid proposal in any round causes that
/// round to become current, unless a higher one already is.
#[cfg_attr(with_graphql, graphql(skip))]
pub current_round: RegisterView<C, Round>,
/// The owners that take over in fallback mode.
pub fallback_owners: RegisterView<C, BTreeMap<AccountOwner, u64>>,
}
#[cfg(with_graphql)]
#[async_graphql::ComplexObject]
impl<C> ChainManager<C>
where
C: Context + Clone + 'static,
{
/// Returns the lowest round where we can still vote to validate or confirm a block. This is
/// the round to which the timeout applies.
///
/// Having a leader timeout certificate in any given round causes the next one to become
/// current. Seeing a validated block certificate or a valid proposal in any round causes that
/// round to become current, unless a higher one already is.
#[graphql(derived(name = "current_round"))]
async fn _current_round(&self) -> Round {
self.current_round()
}
}
impl<C> ChainManager<C>
where
C: Context + Clone + 'static,
{
/// Replaces `self` with a new chain manager.
pub fn reset<'a>(
&mut self,
ownership: ChainOwnership,
height: BlockHeight,
local_time: Timestamp,
fallback_owners: impl Iterator<Item = (AccountPublicKey, u64)> + 'a,
) -> Result<(), ChainError> {
let distribution = calculate_distribution(ownership.owners.iter());
let fallback_owners = fallback_owners
.map(|(pub_key, weight)| (AccountOwner::from(pub_key), weight))
.collect::<BTreeMap<_, _>>();
let fallback_distribution = calculate_distribution(fallback_owners.iter());
let current_round = ownership.first_round();
let round_duration = ownership.round_timeout(current_round);
let round_timeout = round_duration.map(|rd| local_time.saturating_add(rd));
self.clear();
self.seed.set(height.0);
self.ownership.set(ownership);
self.distribution.set(distribution);
self.fallback_distribution.set(fallback_distribution);
self.fallback_owners.set(fallback_owners);
self.current_round.set(current_round);
self.round_timeout.set(round_timeout);
Ok(())
}
/// Returns the most recent confirmed vote we cast.
pub fn confirmed_vote(&self) -> Option<&Vote<ConfirmedBlock>> {
self.confirmed_vote.get().as_ref()
}
/// Returns the most recent validated vote we cast.
pub fn validated_vote(&self) -> Option<&Vote<ValidatedBlock>> {
self.validated_vote.get().as_ref()
}
/// Returns the most recent timeout vote we cast.
pub fn timeout_vote(&self) -> Option<&Vote<Timeout>> {
self.timeout_vote.get().as_ref()
}
/// Returns the most recent fallback vote we cast.
pub fn fallback_vote(&self) -> Option<&Vote<Timeout>> {
self.fallback_vote.get().as_ref()
}
/// Returns the lowest round where we can still vote to validate or confirm a block. This is
/// the round to which the timeout applies.
///
/// Having a leader timeout certificate in any given round causes the next one to become
/// current. Seeing a validated block certificate or a valid proposal in any round causes that
/// round to become current, unless a higher one already is.
pub fn current_round(&self) -> Round {
*self.current_round.get()
}
/// Verifies that a proposed block is relevant and should be handled.
pub fn check_proposed_block(&self, proposal: &BlockProposal) -> Result<Outcome, ChainError> {
let new_block = &proposal.content.block;
let new_round = proposal.content.round;
if let Some(old_proposal) = self.proposed.get() {
if old_proposal.content == proposal.content {
return Ok(Outcome::Skip); // We have already seen this proposal; nothing to do.
}
}
// When a block is certified, incrementing its height must succeed.
ensure!(
new_block.height < BlockHeight::MAX,
ChainError::BlockHeightOverflow
);
let current_round = self.current_round();
match new_round {
// The proposal from the fast round may still be relevant as a locking block, so
// we don't compare against the current round here.
Round::Fast => {}
Round::MultiLeader(_) | Round::SingleLeader(0) => {
// If the fast round has not timed out yet, only a super owner is allowed to open
// a later round by making a proposal.
ensure!(
self.is_super(&proposal.owner()) || !current_round.is_fast(),
ChainError::WrongRound(current_round)
);
// After the fast round, proposals older than the current round are obsolete.
ensure!(
new_round >= current_round,
ChainError::InsufficientRound(new_round)
);
}
Round::SingleLeader(_) | Round::Validator(_) => {
// After the first single-leader round, only proposals from the current round are relevant.
ensure!(
new_round == current_round,
ChainError::WrongRound(current_round)
);
}
}
// The round of our validation votes is only allowed to increase.
if let Some(vote) = self.validated_vote() {
ensure!(
new_round > vote.round,
ChainError::InsufficientRoundStrict(vote.round)
);
}
// A proposal that isn't newer than the locking block is not relevant anymore.
if let Some(locking_block) = self.locking_block.get() {
ensure!(
locking_block.round() < new_round,
ChainError::MustBeNewerThanLockingBlock(new_block.height, locking_block.round())
);
}
// If we have voted to confirm we cannot vote to validate a different block anymore, except
// if there is a validated block certificate from a later round.
if let Some(vote) = self.confirmed_vote() {
ensure!(
match proposal.original_proposal.as_ref() {
None => false,
Some(OriginalProposal::Regular { certificate }) =>
vote.round <= certificate.round,
Some(OriginalProposal::Fast(_)) => {
vote.round.is_fast() && vote.value().matches_proposed_block(new_block)
}
},
ChainError::HasIncompatibleConfirmedVote(new_block.height, vote.round)
);
}
Ok(Outcome::Accept)
}
/// Checks if the current round has timed out, and signs a `Timeout`. Returns `true` if the
/// chain manager's state has changed.
pub fn create_timeout_vote(
&mut self,
chain_id: ChainId,
height: BlockHeight,
round: Round,
epoch: Epoch,
key_pair: Option<&ValidatorSecretKey>,
local_time: Timestamp,
) -> Result<bool, ChainError> {
let Some(key_pair) = key_pair else {
return Ok(false); // We are not a validator.
};
ensure!(
round == self.current_round(),
ChainError::WrongRound(self.current_round())
);
let Some(round_timeout) = *self.round_timeout.get() else {
return Err(ChainError::RoundDoesNotTimeOut);
};
ensure!(
local_time >= round_timeout,
ChainError::NotTimedOutYet(round_timeout)
);
if let Some(vote) = self.timeout_vote.get() {
if vote.round == round {
return Ok(false); // We already signed this timeout.
}
}
let value = Timeout::new(chain_id, height, epoch);
self.timeout_vote
.set(Some(Vote::new(value, round, key_pair)));
Ok(true)
}
/// Signs a `Timeout` certificate to switch to fallback mode.
///
/// This must only be called after verifying that the condition for fallback mode is
/// satisfied locally.
pub fn vote_fallback(
&mut self,
chain_id: ChainId,
height: BlockHeight,
epoch: Epoch,
key_pair: Option<&ValidatorSecretKey>,
) -> bool {
let Some(key_pair) = key_pair else {
return false; // We are not a validator.
};
if self.fallback_vote.get().is_some() || self.current_round() >= Round::Validator(0) {
return false; // We already signed this or are already in fallback mode.
}
let value = Timeout::new(chain_id, height, epoch);
let last_regular_round = Round::SingleLeader(u32::MAX);
self.fallback_vote
.set(Some(Vote::new(value, last_regular_round, key_pair)));
true
}
/// Verifies that a validated block is still relevant and should be handled.
pub fn check_validated_block(
&self,
certificate: &ValidatedBlockCertificate,
) -> Result<Outcome, ChainError> {
let new_block = certificate.block();
let new_round = certificate.round;
if let Some(Vote { value, round, .. }) = self.confirmed_vote.get() {
if value.block() == new_block && *round == new_round {
return Ok(Outcome::Skip); // We already voted to confirm this block.
}
}
// Check if we already voted to validate in a later round.
if let Some(Vote { round, .. }) = self.validated_vote.get() {
ensure!(new_round >= *round, ChainError::InsufficientRound(*round))
}
if let Some(locking) = self.locking_block.get() {
ensure!(
new_round > locking.round(),
ChainError::InsufficientRoundStrict(locking.round())
);
}
Ok(Outcome::Accept)
}
/// Signs a vote to validate the proposed block.
pub fn create_vote(
&mut self,
proposal: BlockProposal,
block: Block,
key_pair: Option<&ValidatorSecretKey>,
local_time: Timestamp,
blobs: BTreeMap<BlobId, Blob>,
) -> Result<Option<ValidatedOrConfirmedVote>, ChainError> {
let round = proposal.content.round;
match &proposal.original_proposal {
// If the validated block certificate is more recent, update our locking block.
Some(OriginalProposal::Regular { certificate }) => {
if self
.locking_block
.get()
.as_ref()
.is_none_or(|locking| locking.round() < certificate.round)
{
let value = ValidatedBlock::new(block.clone());
if let Some(certificate) = certificate.clone().with_value(value) {
self.update_locking(LockingBlock::Regular(certificate), blobs.clone())?;
}
}
}
// If this contains a proposal from the fast round, we consider that a locking block.
// It is useful for clients synchronizing with us, so they can re-propose it.
Some(OriginalProposal::Fast(signature)) => {
if self.locking_block.get().is_none() {
let original_proposal = BlockProposal {
signature: *signature,
..proposal.clone()
};
self.update_locking(LockingBlock::Fast(original_proposal), blobs.clone())?;
}
}
// If this proposal itself is from the fast round, it is also a locking block: We
// will vote to confirm it, so it is locked.
None => {
if round.is_fast() && self.locking_block.get().is_none() {
// The fast block also counts as locking.
self.update_locking(LockingBlock::Fast(proposal.clone()), blobs.clone())?;
}
}
}
// We record the proposed block, in case it affects the current round number.
self.update_proposed(proposal.clone(), blobs)?;
self.update_current_round(local_time);
let Some(key_pair) = key_pair else {
// Not a validator.
return Ok(None);
};
// If this is a fast block, vote to confirm. Otherwise vote to validate.
if round.is_fast() {
self.validated_vote.set(None);
let value = ConfirmedBlock::new(block);
let vote = Vote::new(value, round, key_pair);
Ok(Some(Either::Right(
self.confirmed_vote.get_mut().insert(vote),
)))
} else {
let value = ValidatedBlock::new(block);
let vote = Vote::new(value, round, key_pair);
Ok(Some(Either::Left(
self.validated_vote.get_mut().insert(vote),
)))
}
}
/// Signs a vote to confirm the validated block.
pub fn create_final_vote(
&mut self,
validated: ValidatedBlockCertificate,
key_pair: Option<&ValidatorSecretKey>,
local_time: Timestamp,
blobs: BTreeMap<BlobId, Blob>,
) -> Result<(), ViewError> {
let round = validated.round;
let confirmed_block = ConfirmedBlock::new(validated.inner().block().clone());
self.update_locking(LockingBlock::Regular(validated), blobs)?;
self.update_current_round(local_time);
if let Some(key_pair) = key_pair {
if self.current_round() != round {
return Ok(()); // We never vote in a past round.
}
// Vote to confirm.
let vote = Vote::new(confirmed_block, round, key_pair);
// Ok to overwrite validation votes with confirmation votes at equal or higher round.
self.confirmed_vote.set(Some(vote));
self.validated_vote.set(None);
}
Ok(())
}
/// Returns the requested blob if it belongs to the proposal or the locking block.
pub async fn pending_blob(&self, blob_id: &BlobId) -> Result<Option<Blob>, ViewError> {
if let Some(blob) = self.proposed_blobs.get(blob_id).await? {
return Ok(Some(blob));
}
self.locking_blobs.get(blob_id).await
}
/// Returns the requested blobs if they belong to the proposal or the locking block.
pub async fn pending_blobs(&self, blob_ids: &[BlobId]) -> Result<Vec<Option<Blob>>, ViewError> {
let mut blobs = self.proposed_blobs.multi_get(blob_ids).await?;
let mut missing_indices = Vec::new();
let mut missing_blob_ids = Vec::new();
for (i, (blob, blob_id)) in blobs.iter().zip(blob_ids).enumerate() {
if blob.is_none() {
missing_indices.push(i);
missing_blob_ids.push(blob_id);
}
}
let second_blobs = self.locking_blobs.multi_get(missing_blob_ids).await?;
for (blob, i) in second_blobs.into_iter().zip(missing_indices) {
blobs[i] = blob;
}
Ok(blobs)
}
/// Updates `current_round` and `round_timeout` if necessary.
///
/// This must be called after every change to `timeout`, `locking`, `proposed` or
/// `signed_proposal`.
///
/// The current round starts at `Fast` if there is a super owner, `MultiLeader(0)` if at least
/// one multi-leader round is configured, or otherwise `SingleLeader(0)`.
///
/// Single-leader rounds can only be ended by a timeout certificate for that round.
///
/// The presence of any validated block certificate is also proof that a quorum of validators
/// is already in that round, even if we have not seen the corresponding timeout.
///
/// Multi-leader rounds can always be skipped, so any correctly signed block proposal in a
/// later round ends a multi-leader round.
/// Since we don't accept proposals that violate that rule, we can compute the current round in
/// general by taking the maximum of all the above.
fn update_current_round(&mut self, local_time: Timestamp) {
let current_round = self
.timeout
.get()
.iter()
// A timeout certificate starts the next round.
.map(|certificate| {
self.ownership
.get()
.next_round(certificate.round)
.unwrap_or(Round::Validator(u32::MAX))
})
// A locking block or a proposal is proof we have accepted that we are at least in
// this round.
.chain(self.locking_block.get().as_ref().map(LockingBlock::round))
.chain(
self.proposed
.get()
.iter()
.chain(self.signed_proposal.get())
.map(|proposal| proposal.content.round),
)
.max()
.unwrap_or_default()
// Otherwise compute the first round for this chain configuration.
.max(self.ownership.get().first_round());
if current_round <= self.current_round() {
return;
}
let round_duration = self.ownership.get().round_timeout(current_round);
self.round_timeout
.set(round_duration.map(|rd| local_time.saturating_add(rd)));
self.current_round.set(current_round);
}
/// Updates the round number and timer if the timeout certificate is from a higher round than
/// any known certificate.
pub fn handle_timeout_certificate(
&mut self,
certificate: TimeoutCertificate,
local_time: Timestamp,
) {
let round = certificate.round;
if let Some(known_certificate) = self.timeout.get() {
if known_certificate.round >= round {
return;
}
}
self.timeout.set(Some(certificate));
self.update_current_round(local_time);
}
/// Returns whether the signer is a valid owner and allowed to propose a block in the
/// proposal's round.
///
/// Super owners can always propose, except in `Validator` rounds, but it is recommended that
/// they don't interfere with single-leader rounds. In multi-leader rounds, any owner can
/// propose (or anyone, if `open_multi_leader_rounds`) and in other rounds there is only
/// one leader.
pub fn can_propose(&self, owner: &AccountOwner, round: Round) -> bool {
let ownership = self.ownership.get();
if ownership.super_owners.contains(owner) {
return !round.is_validator();
}
match round {
Round::Fast => false,
Round::MultiLeader(_) => ownership.is_multi_leader_owner(owner),
Round::SingleLeader(_) | Round::Validator(_) => self.round_leader(round) == Some(owner),
}
}
/// Returns the leader who is allowed to propose a block in the given round, or `None` if every
/// owner is allowed to propose. Exception: In `Round::Fast`, only super owners can propose.
fn round_leader(&self, round: Round) -> Option<&AccountOwner> {
let ownership = self.ownership.get();
compute_round_leader(
round,
*self.seed.get(),
ownership.first_leader.as_ref(),
&ownership.owners,
self.distribution.get().as_ref(),
self.fallback_owners.get(),
self.fallback_distribution.get().as_ref(),
)
}
/// Returns whether the owner is a super owner.
fn is_super(&self, owner: &AccountOwner) -> bool {
self.ownership.get().super_owners.contains(owner)
}
/// Sets the signed proposal, if it is newer than the known one, at most from the first
/// single-leader round. Returns whether it was updated.
///
/// We don't update the signed proposal for any rounds later than `SingleLeader(0)`,
/// because single-leader rounds cannot be skipped without a timeout certificate.
pub fn update_signed_proposal(
&mut self,
proposal: &BlockProposal,
local_time: Timestamp,
) -> bool {
if proposal.content.round > Round::SingleLeader(0) {
return false;
}
if let Some(old_proposal) = self.signed_proposal.get() {
if old_proposal.content.round >= proposal.content.round {
if *self.current_round.get() < old_proposal.content.round {
tracing::warn!(
chain_id = %proposal.content.block.chain_id,
current_round = ?self.current_round.get(),
proposal_round = ?old_proposal.content.round,
"Proposal round is greater than current round. Updating."
);
self.update_current_round(local_time);
return true;
}
return false;
}
}
if let Some(old_proposal) = self.proposed.get() {
if old_proposal.content.round >= proposal.content.round {
return false;
}
}
self.signed_proposal.set(Some(proposal.clone()));
self.update_current_round(local_time);
true
}
/// Sets the proposed block, if it is newer than our known latest proposal.
fn update_proposed(
&mut self,
proposal: BlockProposal,
blobs: BTreeMap<BlobId, Blob>,
) -> Result<(), ViewError> {
if let Some(old_proposal) = self.proposed.get() {
if old_proposal.content.round >= proposal.content.round {
return Ok(());
}
}
if let Some(old_proposal) = self.signed_proposal.get() {
if old_proposal.content.round <= proposal.content.round {
self.signed_proposal.set(None);
}
}
self.proposed.set(Some(proposal));
self.proposed_blobs.clear();
for (blob_id, blob) in blobs {
self.proposed_blobs.insert(&blob_id, blob)?;
}
Ok(())
}
/// Sets the locking block and the associated blobs, if it is newer than the known one.
fn update_locking(
&mut self,
locking: LockingBlock,
blobs: BTreeMap<BlobId, Blob>,
) -> Result<(), ViewError> {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/chain.rs | linera-chain/src/chain.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
sync::Arc,
};
use allocative::Allocative;
use linera_base::{
crypto::{CryptoHash, ValidatorPublicKey},
data_types::{
ApplicationDescription, ApplicationPermissions, ArithmeticError, Blob, BlockHeight, Epoch,
OracleResponse, Timestamp,
},
ensure,
identifiers::{AccountOwner, ApplicationId, BlobType, ChainId, StreamId},
ownership::ChainOwnership,
};
use linera_execution::{
committee::Committee, system::EPOCH_STREAM_NAME, ExecutionRuntimeContext, ExecutionStateView,
Message, Operation, OutgoingMessage, Query, QueryContext, QueryOutcome, ResourceController,
ResourceTracker, ServiceRuntimeEndpoint, TransactionTracker,
};
use linera_views::{
context::Context,
log_view::LogView,
map_view::MapView,
reentrant_collection_view::{ReadGuardedView, ReentrantCollectionView},
register_view::RegisterView,
views::{ClonableView, RootView, View},
};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::{
block::{Block, ConfirmedBlock},
block_tracker::BlockExecutionTracker,
data_types::{
BlockExecutionOutcome, ChainAndHeight, IncomingBundle, MessageBundle, ProposedBlock,
Transaction,
},
inbox::{InboxError, InboxStateView},
manager::ChainManager,
outbox::OutboxStateView,
pending_blobs::PendingBlobsView,
ChainError, ChainExecutionContext, ExecutionError, ExecutionResultExt,
};
#[cfg(test)]
#[path = "unit_tests/chain_tests.rs"]
mod chain_tests;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency;
#[cfg(with_metrics)]
pub(crate) mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{
exponential_bucket_interval, exponential_bucket_latencies, register_histogram_vec,
register_int_counter_vec,
};
use linera_execution::ResourceTracker;
use prometheus::{HistogramVec, IntCounterVec};
pub static NUM_BLOCKS_EXECUTED: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec("num_blocks_executed", "Number of blocks executed", &[])
});
pub static BLOCK_EXECUTION_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"block_execution_latency",
"Block execution latency",
&[],
exponential_bucket_interval(50.0_f64, 10_000_000.0),
)
});
#[cfg(with_metrics)]
pub static MESSAGE_EXECUTION_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"message_execution_latency",
"Message execution latency",
&[],
exponential_bucket_interval(0.1_f64, 50_000.0),
)
});
pub static OPERATION_EXECUTION_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"operation_execution_latency",
"Operation execution latency",
&[],
exponential_bucket_interval(0.1_f64, 50_000.0),
)
});
pub static WASM_FUEL_USED_PER_BLOCK: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"wasm_fuel_used_per_block",
"Wasm fuel used per block",
&[],
exponential_bucket_interval(10.0, 1_000_000.0),
)
});
pub static EVM_FUEL_USED_PER_BLOCK: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"evm_fuel_used_per_block",
"EVM fuel used per block",
&[],
exponential_bucket_interval(10.0, 1_000_000.0),
)
});
pub static VM_NUM_READS_PER_BLOCK: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"vm_num_reads_per_block",
"VM number of reads per block",
&[],
exponential_bucket_interval(0.1, 100.0),
)
});
pub static VM_BYTES_READ_PER_BLOCK: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"vm_bytes_read_per_block",
"VM number of bytes read per block",
&[],
exponential_bucket_interval(0.1, 10_000_000.0),
)
});
pub static VM_BYTES_WRITTEN_PER_BLOCK: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"vm_bytes_written_per_block",
"VM number of bytes written per block",
&[],
exponential_bucket_interval(0.1, 10_000_000.0),
)
});
pub static STATE_HASH_COMPUTATION_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"state_hash_computation_latency",
"Time to recompute the state hash",
&[],
exponential_bucket_latencies(500.0),
)
});
pub static NUM_INBOXES: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"num_inboxes",
"Number of inboxes",
&[],
exponential_bucket_interval(1.0, 10_000.0),
)
});
pub static NUM_OUTBOXES: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"num_outboxes",
"Number of outboxes",
&[],
exponential_bucket_interval(1.0, 10_000.0),
)
});
/// Tracks block execution metrics in Prometheus.
pub(crate) fn track_block_metrics(tracker: &ResourceTracker) {
NUM_BLOCKS_EXECUTED.with_label_values(&[]).inc();
WASM_FUEL_USED_PER_BLOCK
.with_label_values(&[])
.observe(tracker.wasm_fuel as f64);
EVM_FUEL_USED_PER_BLOCK
.with_label_values(&[])
.observe(tracker.evm_fuel as f64);
VM_NUM_READS_PER_BLOCK
.with_label_values(&[])
.observe(tracker.read_operations as f64);
VM_BYTES_READ_PER_BLOCK
.with_label_values(&[])
.observe(tracker.bytes_read as f64);
VM_BYTES_WRITTEN_PER_BLOCK
.with_label_values(&[])
.observe(tracker.bytes_written as f64);
}
}
/// The BCS-serialized size of an empty [`Block`].
pub(crate) const EMPTY_BLOCK_SIZE: usize = 94;
/// A view accessing the state of a chain.
#[cfg_attr(
with_graphql,
derive(async_graphql::SimpleObject),
graphql(cache_control(no_cache))
)]
#[derive(Debug, RootView, ClonableView, Allocative)]
#[allocative(bound = "C")]
pub struct ChainStateView<C>
where
C: Clone + Context + 'static,
{
/// Execution state, including system and user applications.
pub execution_state: ExecutionStateView<C>,
/// Hash of the execution state.
pub execution_state_hash: RegisterView<C, Option<CryptoHash>>,
/// Block-chaining state.
pub tip_state: RegisterView<C, ChainTipState>,
/// Consensus state.
pub manager: ChainManager<C>,
/// Pending validated block that is still missing blobs.
/// The incomplete set of blobs for the pending validated block.
pub pending_validated_blobs: PendingBlobsView<C>,
/// The incomplete sets of blobs for upcoming proposals.
pub pending_proposed_blobs: ReentrantCollectionView<C, AccountOwner, PendingBlobsView<C>>,
/// Hashes of all certified blocks for this sender.
/// This ends with `block_hash` and has length `usize::from(next_block_height)`.
pub confirmed_log: LogView<C, CryptoHash>,
/// Sender chain and height of all certified blocks known as a receiver (local ordering).
pub received_log: LogView<C, ChainAndHeight>,
/// The number of `received_log` entries we have synchronized, for each validator.
pub received_certificate_trackers: RegisterView<C, HashMap<ValidatorPublicKey, u64>>,
/// Mailboxes used to receive messages indexed by their origin.
pub inboxes: ReentrantCollectionView<C, ChainId, InboxStateView<C>>,
/// Mailboxes used to send messages, indexed by their target.
pub outboxes: ReentrantCollectionView<C, ChainId, OutboxStateView<C>>,
/// The indices of next events we expect to see per stream (could be ahead of the last
/// executed block in sparse chains).
pub next_expected_events: MapView<C, StreamId, u32>,
/// Number of outgoing messages in flight for each block height.
/// We use a `RegisterView` to prioritize speed for small maps.
pub outbox_counters: RegisterView<C, BTreeMap<BlockHeight, u32>>,
/// Outboxes with at least one pending message. This allows us to avoid loading all outboxes.
pub nonempty_outboxes: RegisterView<C, BTreeSet<ChainId>>,
/// Blocks that have been verified but not executed yet, and that may not be contiguous.
pub preprocessed_blocks: MapView<C, BlockHeight, CryptoHash>,
}
/// Block-chaining state.
#[cfg_attr(with_graphql, derive(async_graphql::SimpleObject))]
#[derive(Debug, Default, Clone, Eq, PartialEq, Serialize, Deserialize, Allocative)]
pub struct ChainTipState {
/// Hash of the latest certified block in this chain, if any.
pub block_hash: Option<CryptoHash>,
/// Sequence number tracking blocks.
pub next_block_height: BlockHeight,
/// Number of incoming message bundles.
pub num_incoming_bundles: u32,
/// Number of operations.
pub num_operations: u32,
/// Number of outgoing messages.
pub num_outgoing_messages: u32,
}
impl ChainTipState {
/// Checks that the proposed block is suitable, i.e. at the expected height and with the
/// expected parent.
pub fn verify_block_chaining(&self, new_block: &ProposedBlock) -> Result<(), ChainError> {
ensure!(
new_block.height == self.next_block_height,
ChainError::UnexpectedBlockHeight {
expected_block_height: self.next_block_height,
found_block_height: new_block.height
}
);
ensure!(
new_block.previous_block_hash == self.block_hash,
ChainError::UnexpectedPreviousBlockHash
);
Ok(())
}
/// Returns `true` if the validated block's height is below the tip height. Returns an error if
/// it is higher than the tip.
pub fn already_validated_block(&self, height: BlockHeight) -> Result<bool, ChainError> {
ensure!(
self.next_block_height >= height,
ChainError::MissingEarlierBlocks {
current_block_height: self.next_block_height,
}
);
Ok(self.next_block_height > height)
}
/// Checks if the measurement counters would be valid.
pub fn update_counters(
&mut self,
transactions: &[Transaction],
messages: &[Vec<OutgoingMessage>],
) -> Result<(), ChainError> {
let mut num_incoming_bundles = 0u32;
let mut num_operations = 0u32;
for transaction in transactions {
match transaction {
Transaction::ReceiveMessages(_) => {
num_incoming_bundles = num_incoming_bundles
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
}
Transaction::ExecuteOperation(_) => {
num_operations = num_operations
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
}
}
}
self.num_incoming_bundles = self
.num_incoming_bundles
.checked_add(num_incoming_bundles)
.ok_or(ArithmeticError::Overflow)?;
self.num_operations = self
.num_operations
.checked_add(num_operations)
.ok_or(ArithmeticError::Overflow)?;
let num_outgoing_messages = u32::try_from(messages.iter().map(Vec::len).sum::<usize>())
.map_err(|_| ArithmeticError::Overflow)?;
self.num_outgoing_messages = self
.num_outgoing_messages
.checked_add(num_outgoing_messages)
.ok_or(ArithmeticError::Overflow)?;
Ok(())
}
}
impl<C> ChainStateView<C>
where
C: Context + Clone + 'static,
C::Extra: ExecutionRuntimeContext,
{
/// Returns the [`ChainId`] of the chain this [`ChainStateView`] represents.
pub fn chain_id(&self) -> ChainId {
self.context().extra().chain_id()
}
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
))]
pub async fn query_application(
&mut self,
local_time: Timestamp,
query: Query,
service_runtime_endpoint: Option<&mut ServiceRuntimeEndpoint>,
) -> Result<QueryOutcome, ChainError> {
let context = QueryContext {
chain_id: self.chain_id(),
next_block_height: self.tip_state.get().next_block_height,
local_time,
};
self.execution_state
.query_application(context, query, service_runtime_endpoint)
.await
.with_execution_context(ChainExecutionContext::Query)
}
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
application_id = %application_id
))]
pub async fn describe_application(
&mut self,
application_id: ApplicationId,
) -> Result<ApplicationDescription, ChainError> {
self.execution_state
.system
.describe_application(application_id, &mut TransactionTracker::default())
.await
.with_execution_context(ChainExecutionContext::DescribeApplication)
}
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
target = %target,
height = %height
))]
pub async fn mark_messages_as_received(
&mut self,
target: &ChainId,
height: BlockHeight,
) -> Result<bool, ChainError> {
let mut outbox = self.outboxes.try_load_entry_mut(target).await?;
let updates = outbox.mark_messages_as_received(height).await?;
if updates.is_empty() {
return Ok(false);
}
for update in updates {
let counter = self
.outbox_counters
.get_mut()
.get_mut(&update)
.ok_or_else(|| {
ChainError::InternalError("message counter should be present".into())
})?;
*counter = counter.checked_sub(1).ok_or(ArithmeticError::Underflow)?;
if *counter == 0 {
// Important for the test in `all_messages_delivered_up_to`.
self.outbox_counters.get_mut().remove(&update);
}
}
if outbox.queue.count() == 0 {
self.nonempty_outboxes.get_mut().remove(target);
// If the outbox is empty and not ahead of the executed blocks, remove it.
if *outbox.next_height_to_schedule.get() <= self.tip_state.get().next_block_height {
self.outboxes.remove_entry(target)?;
}
}
#[cfg(with_metrics)]
metrics::NUM_OUTBOXES
.with_label_values(&[])
.observe(self.outboxes.count().await? as f64);
Ok(true)
}
/// Returns true if there are no more outgoing messages in flight up to the given
/// block height.
pub fn all_messages_delivered_up_to(&self, height: BlockHeight) -> bool {
tracing::debug!(
"Messages left in {:.8}'s outbox: {:?}",
self.chain_id(),
self.outbox_counters.get()
);
if let Some((key, _)) = self.outbox_counters.get().first_key_value() {
key > &height
} else {
true
}
}
/// Invariant for the states of active chains.
pub fn is_active(&self) -> bool {
self.execution_state.system.is_active()
}
/// Initializes the chain if it is not active yet.
pub async fn initialize_if_needed(&mut self, local_time: Timestamp) -> Result<(), ChainError> {
let chain_id = self.chain_id();
// Initialize ourselves.
if self
.execution_state
.system
.initialize_chain(chain_id)
.await
.with_execution_context(ChainExecutionContext::Block)?
{
// The chain was already initialized.
return Ok(());
}
// Recompute the state hash.
let hash = self.execution_state.crypto_hash_mut().await?;
self.execution_state_hash.set(Some(hash));
let maybe_committee = self.execution_state.system.current_committee().into_iter();
// Last, reset the consensus state based on the current ownership.
self.manager.reset(
self.execution_state.system.ownership.get().clone(),
BlockHeight(0),
local_time,
maybe_committee.flat_map(|(_, committee)| committee.account_keys_and_weights()),
)?;
Ok(())
}
pub async fn next_block_height_to_receive(
&self,
origin: &ChainId,
) -> Result<BlockHeight, ChainError> {
let inbox = self.inboxes.try_load_entry(origin).await?;
match inbox {
Some(inbox) => inbox.next_block_height_to_receive(),
None => Ok(BlockHeight::ZERO),
}
}
/// Returns the height of the highest block we have, plus one. Includes preprocessed blocks.
///
/// The "+ 1" is so that it can be used in the same places as `next_block_height`.
pub async fn next_height_to_preprocess(&self) -> Result<BlockHeight, ChainError> {
if let Some(height) = self.preprocessed_blocks.indices().await?.last() {
return Ok(height.saturating_add(BlockHeight(1)));
}
Ok(self.tip_state.get().next_block_height)
}
pub async fn last_anticipated_block_height(
&self,
origin: &ChainId,
) -> Result<Option<BlockHeight>, ChainError> {
let inbox = self.inboxes.try_load_entry(origin).await?;
match inbox {
Some(inbox) => match inbox.removed_bundles.back().await? {
Some(bundle) => Ok(Some(bundle.height)),
None => Ok(None),
},
None => Ok(None),
}
}
/// Attempts to process a new `bundle` of messages from the given `origin`. Returns an
/// internal error if the bundle doesn't appear to be new, based on the sender's
/// height. The value `local_time` is specific to each validator and only used for
/// round timeouts.
///
/// Returns `true` if incoming `Subscribe` messages created new outbox entries.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
origin = %origin,
bundle_height = %bundle.height
))]
pub async fn receive_message_bundle(
&mut self,
origin: &ChainId,
bundle: MessageBundle,
local_time: Timestamp,
add_to_received_log: bool,
) -> Result<(), ChainError> {
assert!(!bundle.messages.is_empty());
let chain_id = self.chain_id();
tracing::trace!(
"Processing new messages to {chain_id:.8} from {origin} at height {}",
bundle.height,
);
let chain_and_height = ChainAndHeight {
chain_id: *origin,
height: bundle.height,
};
match self.initialize_if_needed(local_time).await {
Ok(_) => (),
// if the only issue was that we couldn't initialize the chain because of a
// missing chain description blob, we might still want to update the inbox
Err(ChainError::ExecutionError(exec_err, _))
if matches!(*exec_err, ExecutionError::BlobsNotFound(ref blobs)
if blobs.iter().all(|blob_id| {
blob_id.blob_type == BlobType::ChainDescription && blob_id.hash == chain_id.0
})) => {}
err => {
return err;
}
}
// Process the inbox bundle and update the inbox state.
let mut inbox = self.inboxes.try_load_entry_mut(origin).await?;
#[cfg(with_metrics)]
metrics::NUM_INBOXES
.with_label_values(&[])
.observe(self.inboxes.count().await? as f64);
inbox
.add_bundle(bundle)
.await
.map_err(|error| match error {
InboxError::ViewError(error) => ChainError::ViewError(error),
error => ChainError::InternalError(format!(
"while processing messages in certified block: {error}"
)),
})?;
// Remember the certificate for future validator/client synchronizations.
if add_to_received_log {
self.received_log.push(chain_and_height);
}
Ok(())
}
/// Updates the `received_log` trackers.
pub fn update_received_certificate_trackers(
&mut self,
new_trackers: BTreeMap<ValidatorPublicKey, u64>,
) {
for (name, tracker) in new_trackers {
self.received_certificate_trackers
.get_mut()
.entry(name)
.and_modify(|t| {
// Because several synchronizations could happen in parallel, we need to make
// sure to never go backward.
if tracker > *t {
*t = tracker;
}
})
.or_insert(tracker);
}
}
pub fn current_committee(&self) -> Result<(Epoch, &Committee), ChainError> {
self.execution_state
.system
.current_committee()
.ok_or_else(|| ChainError::InactiveChain(self.chain_id()))
}
pub fn ownership(&self) -> &ChainOwnership {
self.execution_state.system.ownership.get()
}
/// Removes the incoming message bundles in the block from the inboxes.
///
/// If `must_be_present` is `true`, an error is returned if any of the bundles have not been
/// added to the inbox yet. So this should be `true` if the bundles are in a block _proposal_,
/// and `false` if the block is already confirmed.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
))]
pub async fn remove_bundles_from_inboxes(
&mut self,
timestamp: Timestamp,
must_be_present: bool,
incoming_bundles: impl IntoIterator<Item = &IncomingBundle>,
) -> Result<(), ChainError> {
let chain_id = self.chain_id();
let mut bundles_by_origin: BTreeMap<_, Vec<&MessageBundle>> = Default::default();
for IncomingBundle { bundle, origin, .. } in incoming_bundles {
ensure!(
bundle.timestamp <= timestamp,
ChainError::IncorrectBundleTimestamp {
chain_id,
bundle_timestamp: bundle.timestamp,
block_timestamp: timestamp,
}
);
let bundles = bundles_by_origin.entry(*origin).or_default();
bundles.push(bundle);
}
let origins = bundles_by_origin.keys().copied().collect::<Vec<_>>();
let inboxes = self.inboxes.try_load_entries_mut(&origins).await?;
for ((origin, bundles), mut inbox) in bundles_by_origin.into_iter().zip(inboxes) {
tracing::trace!(
"Removing [{}] from {chain_id:.8}'s inbox for {origin:}",
bundles
.iter()
.map(|bundle| bundle.height.to_string())
.collect::<Vec<_>>()
.join(", ")
);
for bundle in bundles {
// Mark the message as processed in the inbox.
let was_present = inbox
.remove_bundle(bundle)
.await
.map_err(|error| (chain_id, origin, error))?;
if must_be_present {
ensure!(
was_present,
ChainError::MissingCrossChainUpdate {
chain_id,
origin,
height: bundle.height,
}
);
}
}
}
#[cfg(with_metrics)]
metrics::NUM_INBOXES
.with_label_values(&[])
.observe(self.inboxes.count().await? as f64);
Ok(())
}
/// Returns the chain IDs of all recipients for which a message is waiting in the outbox.
pub fn nonempty_outbox_chain_ids(&self) -> Vec<ChainId> {
self.nonempty_outboxes.get().iter().copied().collect()
}
/// Returns the outboxes for the given targets, or an error if any of them are missing.
pub async fn load_outboxes(
&self,
targets: &[ChainId],
) -> Result<Vec<ReadGuardedView<OutboxStateView<C>>>, ChainError> {
let vec_of_options = self.outboxes.try_load_entries(targets).await?;
let optional_vec = vec_of_options.into_iter().collect::<Option<Vec<_>>>();
optional_vec.ok_or_else(|| ChainError::InternalError("Missing outboxes".into()))
}
/// Executes a block: first the incoming messages, then the main operation.
/// Does not update chain state other than the execution state.
#[instrument(skip_all, fields(
chain_id = %block.chain_id,
block_height = %block.height
))]
async fn execute_block_inner(
chain: &mut ExecutionStateView<C>,
confirmed_log: &LogView<C, CryptoHash>,
block: &ProposedBlock,
local_time: Timestamp,
round: Option<u32>,
published_blobs: &[Blob],
replaying_oracle_responses: Option<Vec<Vec<OracleResponse>>>,
) -> Result<BlockExecutionOutcome, ChainError> {
#[cfg(with_metrics)]
let _execution_latency = metrics::BLOCK_EXECUTION_LATENCY.measure_latency_us();
chain.system.timestamp.set(block.timestamp);
let policy = chain
.system
.current_committee()
.ok_or_else(|| ChainError::InactiveChain(block.chain_id))?
.1
.policy()
.clone();
let mut resource_controller = ResourceController::new(
Arc::new(policy),
ResourceTracker::default(),
block.authenticated_owner,
);
for blob in published_blobs {
let blob_id = blob.id();
resource_controller
.policy()
.check_blob_size(blob.content())
.with_execution_context(ChainExecutionContext::Block)?;
chain.system.used_blobs.insert(&blob_id)?;
}
// Execute each incoming bundle as a transaction, then each operation.
// Collect messages, events and oracle responses, each as one list per transaction.
let mut block_execution_tracker = BlockExecutionTracker::new(
&mut resource_controller,
published_blobs
.iter()
.map(|blob| (blob.id(), blob))
.collect(),
local_time,
replaying_oracle_responses,
block,
)?;
for transaction in block.transaction_refs() {
block_execution_tracker
.execute_transaction(transaction, round, chain)
.await?;
}
let recipients = block_execution_tracker.recipients();
let mut recipient_heights = Vec::new();
let mut indices = Vec::new();
for (recipient, height) in chain
.previous_message_blocks
.multi_get_pairs(recipients)
.await?
{
chain
.previous_message_blocks
.insert(&recipient, block.height)?;
if let Some(height) = height {
let index = usize::try_from(height.0).map_err(|_| ArithmeticError::Overflow)?;
indices.push(index);
recipient_heights.push((recipient, height));
}
}
let hashes = confirmed_log.multi_get(indices).await?;
let mut previous_message_blocks = BTreeMap::new();
for (hash, (recipient, height)) in hashes.into_iter().zip(recipient_heights) {
let hash = hash.ok_or_else(|| {
ChainError::InternalError("missing entry in confirmed_log".into())
})?;
previous_message_blocks.insert(recipient, (hash, height));
}
let streams = block_execution_tracker.event_streams();
let mut stream_heights = Vec::new();
let mut indices = Vec::new();
for (stream, height) in chain.previous_event_blocks.multi_get_pairs(streams).await? {
chain.previous_event_blocks.insert(&stream, block.height)?;
if let Some(height) = height {
let index = usize::try_from(height.0).map_err(|_| ArithmeticError::Overflow)?;
indices.push(index);
stream_heights.push((stream, height));
}
}
let hashes = confirmed_log.multi_get(indices).await?;
let mut previous_event_blocks = BTreeMap::new();
for (hash, (stream, height)) in hashes.into_iter().zip(stream_heights) {
let hash = hash.ok_or_else(|| {
ChainError::InternalError("missing entry in confirmed_log".into())
})?;
previous_event_blocks.insert(stream, (hash, height));
}
let state_hash = {
#[cfg(with_metrics)]
let _hash_latency = metrics::STATE_HASH_COMPUTATION_LATENCY.measure_latency();
chain.crypto_hash_mut().await?
};
let (messages, oracle_responses, events, blobs, operation_results) =
block_execution_tracker.finalize();
Ok(BlockExecutionOutcome {
messages,
previous_message_blocks,
previous_event_blocks,
state_hash,
oracle_responses,
events,
blobs,
operation_results,
})
}
/// Executes a block: first the incoming messages, then the main operation.
/// Does not update chain state other than the execution state.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
block_height = %block.height
))]
pub async fn execute_block(
&mut self,
block: &ProposedBlock,
local_time: Timestamp,
round: Option<u32>,
published_blobs: &[Blob],
replaying_oracle_responses: Option<Vec<Vec<OracleResponse>>>,
) -> Result<BlockExecutionOutcome, ChainError> {
assert_eq!(
block.chain_id,
self.execution_state.context().extra().chain_id()
);
self.initialize_if_needed(local_time).await?;
let chain_timestamp = *self.execution_state.system.timestamp.get();
ensure!(
chain_timestamp <= block.timestamp,
ChainError::InvalidBlockTimestamp {
parent: chain_timestamp,
new: block.timestamp
}
);
ensure!(!block.transactions.is_empty(), ChainError::EmptyBlock);
ensure!(
block.published_blob_ids()
== published_blobs
.iter()
.map(|blob| blob.id())
.collect::<BTreeSet<_>>(),
ChainError::InternalError("published_blobs mismatch".to_string())
);
if *self.execution_state.system.closed.get() {
ensure!(block.has_only_rejected_messages(), ChainError::ClosedChain);
}
Self::check_app_permissions(
self.execution_state.system.application_permissions.get(),
block,
)?;
Self::execute_block_inner(
&mut self.execution_state,
&self.confirmed_log,
block,
local_time,
round,
published_blobs,
replaying_oracle_responses,
)
.await
}
/// Applies an execution outcome to the chain, updating the outboxes, state hash and chain
/// manager. This does not touch the execution state itself, which must be updated separately.
/// Returns the set of event streams that were updated as a result of applying the block.
#[instrument(skip_all, fields(
chain_id = %self.chain_id(),
block_height = %block.inner().inner().header.height
))]
pub async fn apply_confirmed_block(
&mut self,
block: &ConfirmedBlock,
local_time: Timestamp,
) -> Result<BTreeSet<StreamId>, ChainError> {
let hash = block.inner().hash();
let block = block.inner().inner();
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/block.rs | linera-chain/src/block.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
borrow::Cow,
collections::{BTreeMap, BTreeSet},
fmt::Debug,
};
use allocative::Allocative;
use async_graphql::SimpleObject;
use linera_base::{
crypto::{BcsHashable, CryptoHash},
data_types::{Blob, BlockHeight, Epoch, Event, OracleResponse, Timestamp},
hashed::Hashed,
identifiers::{AccountOwner, BlobId, BlobType, ChainId, StreamId},
};
use linera_execution::{BlobState, Operation, OutgoingMessage};
use serde::{ser::SerializeStruct, Deserialize, Serialize};
use thiserror::Error;
use crate::{
data_types::{
BlockExecutionOutcome, IncomingBundle, MessageBundle, OperationResult, OutgoingMessageExt,
ProposedBlock, Transaction,
},
types::CertificateValue,
};
/// Wrapper around a `Block` that has been validated.
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Allocative)]
#[serde(transparent)]
pub struct ValidatedBlock(Hashed<Block>);
impl ValidatedBlock {
/// Creates a new `ValidatedBlock` from a `Block`.
pub fn new(block: Block) -> Self {
Self(Hashed::new(block))
}
pub fn from_hashed(block: Hashed<Block>) -> Self {
Self(block)
}
pub fn inner(&self) -> &Hashed<Block> {
&self.0
}
/// Returns a reference to the [`Block`] contained in this `ValidatedBlock`.
pub fn block(&self) -> &Block {
self.0.inner()
}
/// Consumes this `ValidatedBlock`, returning the [`Block`] it contains.
pub fn into_inner(self) -> Block {
self.0.into_inner()
}
pub fn to_log_str(&self) -> &'static str {
"validated_block"
}
pub fn chain_id(&self) -> ChainId {
self.0.inner().header.chain_id
}
pub fn height(&self) -> BlockHeight {
self.0.inner().header.height
}
pub fn epoch(&self) -> Epoch {
self.0.inner().header.epoch
}
}
/// Wrapper around a `Block` that has been confirmed.
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Allocative)]
#[serde(transparent)]
pub struct ConfirmedBlock(Hashed<Block>);
#[async_graphql::Object(cache_control(no_cache))]
impl ConfirmedBlock {
#[graphql(derived(name = "block"))]
async fn _block(&self) -> Block {
self.0.inner().clone()
}
async fn status(&self) -> String {
"confirmed".to_string()
}
async fn hash(&self) -> CryptoHash {
self.0.hash()
}
}
impl ConfirmedBlock {
pub fn new(block: Block) -> Self {
Self(Hashed::new(block))
}
pub fn from_hashed(block: Hashed<Block>) -> Self {
Self(block)
}
pub fn inner(&self) -> &Hashed<Block> {
&self.0
}
pub fn into_inner(self) -> Hashed<Block> {
self.0
}
/// Returns a reference to the `Block` contained in this `ConfirmedBlock`.
pub fn block(&self) -> &Block {
self.0.inner()
}
/// Consumes this `ConfirmedBlock`, returning the `Block` it contains.
pub fn into_block(self) -> Block {
self.0.into_inner()
}
pub fn chain_id(&self) -> ChainId {
self.0.inner().header.chain_id
}
pub fn height(&self) -> BlockHeight {
self.0.inner().header.height
}
pub fn timestamp(&self) -> Timestamp {
self.0.inner().header.timestamp
}
pub fn to_log_str(&self) -> &'static str {
"confirmed_block"
}
/// Returns whether this block matches the proposal.
pub fn matches_proposed_block(&self, block: &ProposedBlock) -> bool {
self.block().matches_proposed_block(block)
}
/// Returns a blob state that applies to all blobs used by this block.
pub fn to_blob_state(&self, is_stored_block: bool) -> BlobState {
BlobState {
last_used_by: is_stored_block.then_some(self.0.hash()),
chain_id: self.chain_id(),
block_height: self.height(),
epoch: is_stored_block.then_some(self.epoch()),
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Allocative)]
#[serde(transparent)]
pub struct Timeout(Hashed<TimeoutInner>);
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Allocative)]
#[serde(rename = "Timeout")]
pub(crate) struct TimeoutInner {
chain_id: ChainId,
height: BlockHeight,
epoch: Epoch,
}
impl Timeout {
pub fn new(chain_id: ChainId, height: BlockHeight, epoch: Epoch) -> Self {
let inner = TimeoutInner {
chain_id,
height,
epoch,
};
Self(Hashed::new(inner))
}
pub fn to_log_str(&self) -> &'static str {
"timeout"
}
pub fn chain_id(&self) -> ChainId {
self.0.inner().chain_id
}
pub fn height(&self) -> BlockHeight {
self.0.inner().height
}
pub fn epoch(&self) -> Epoch {
self.0.inner().epoch
}
pub(crate) fn inner(&self) -> &Hashed<TimeoutInner> {
&self.0
}
}
impl BcsHashable<'_> for Timeout {}
impl BcsHashable<'_> for TimeoutInner {}
/// Failure to convert a `Certificate` into one of the expected certificate types.
#[derive(Clone, Copy, Debug, Error)]
pub enum ConversionError {
/// Failure to convert to [`ConfirmedBlock`] certificate.
#[error("Expected a `ConfirmedBlockCertificate` value")]
ConfirmedBlock,
/// Failure to convert to [`ValidatedBlock`] certificate.
#[error("Expected a `ValidatedBlockCertificate` value")]
ValidatedBlock,
/// Failure to convert to [`Timeout`] certificate.
#[error("Expected a `TimeoutCertificate` value")]
Timeout,
}
/// Block defines the atomic unit of growth of the Linera chain.
///
/// As part of the block body, contains all the incoming messages
/// and operations to execute which define a state transition of the chain.
/// Resulting messages produced by the operations are also included in the block body,
/// together with oracle responses and events.
#[derive(Debug, PartialEq, Eq, Hash, Clone, SimpleObject, Allocative)]
pub struct Block {
/// Header of the block containing metadata of the block.
pub header: BlockHeader,
/// Body of the block containing all of the data.
pub body: BlockBody,
}
impl Serialize for Block {
fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut state = serializer.serialize_struct("Block", 2)?;
let header = SerializedHeader {
chain_id: self.header.chain_id,
epoch: self.header.epoch,
height: self.header.height,
timestamp: self.header.timestamp,
state_hash: self.header.state_hash,
previous_block_hash: self.header.previous_block_hash,
authenticated_owner: self.header.authenticated_owner,
};
state.serialize_field("header", &header)?;
state.serialize_field("body", &self.body)?;
state.end()
}
}
impl<'de> Deserialize<'de> for Block {
fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
#[derive(Deserialize)]
#[serde(rename = "Block")]
struct Inner {
header: SerializedHeader,
body: BlockBody,
}
let inner = Inner::deserialize(deserializer)?;
let transactions_hash = hashing::hash_vec(&inner.body.transactions);
let messages_hash = hashing::hash_vec_vec(&inner.body.messages);
let previous_message_blocks_hash = CryptoHash::new(&PreviousMessageBlocksMap {
inner: Cow::Borrowed(&inner.body.previous_message_blocks),
});
let previous_event_blocks_hash = CryptoHash::new(&PreviousEventBlocksMap {
inner: Cow::Borrowed(&inner.body.previous_event_blocks),
});
let oracle_responses_hash = hashing::hash_vec_vec(&inner.body.oracle_responses);
let events_hash = hashing::hash_vec_vec(&inner.body.events);
let blobs_hash = hashing::hash_vec_vec(&inner.body.blobs);
let operation_results_hash = hashing::hash_vec(&inner.body.operation_results);
let header = BlockHeader {
chain_id: inner.header.chain_id,
epoch: inner.header.epoch,
height: inner.header.height,
timestamp: inner.header.timestamp,
state_hash: inner.header.state_hash,
previous_block_hash: inner.header.previous_block_hash,
authenticated_owner: inner.header.authenticated_owner,
transactions_hash,
messages_hash,
previous_message_blocks_hash,
previous_event_blocks_hash,
oracle_responses_hash,
events_hash,
blobs_hash,
operation_results_hash,
};
Ok(Self {
header,
body: inner.body,
})
}
}
/// Succinct representation of a block.
/// Contains all the metadata to follow the chain of blocks or verifying
/// inclusion (event, message, oracle response, etc.) in the block's body.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, SimpleObject, Allocative)]
pub struct BlockHeader {
/// The chain to which this block belongs.
pub chain_id: ChainId,
/// The number identifying the current configuration.
pub epoch: Epoch,
/// The block height.
pub height: BlockHeight,
/// The timestamp when this block was created.
pub timestamp: Timestamp,
/// The hash of the chain's execution state after this block.
pub state_hash: CryptoHash,
/// Certified hash of the previous block in the chain, if any.
pub previous_block_hash: Option<CryptoHash>,
/// The user signing for the operations in the block and paying for their execution
/// fees. If set, this must be the `owner` in the block proposal. `None` means that
/// the default account of the chain is used. This value is also used as recipient of
/// potential refunds for the message grants created by the operations.
pub authenticated_owner: Option<AccountOwner>,
// Inputs to the block, chosen by the block proposer.
/// Cryptographic hash of all the transactions in the block.
pub transactions_hash: CryptoHash,
// Outcome of the block execution.
/// Cryptographic hash of all the messages in the block.
pub messages_hash: CryptoHash,
/// Cryptographic hash of the lookup table for previous sending blocks.
pub previous_message_blocks_hash: CryptoHash,
/// Cryptographic hash of the lookup table for previous blocks publishing events.
pub previous_event_blocks_hash: CryptoHash,
/// Cryptographic hash of all the oracle responses in the block.
pub oracle_responses_hash: CryptoHash,
/// Cryptographic hash of all the events in the block.
pub events_hash: CryptoHash,
/// Cryptographic hash of all the created blobs in the block.
pub blobs_hash: CryptoHash,
/// A cryptographic hash of the execution results of all operations in a block.
pub operation_results_hash: CryptoHash,
}
/// The body of a block containing all the data included in the block.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, SimpleObject, Allocative)]
#[graphql(complex)]
pub struct BlockBody {
/// The transactions to execute in this block. Each transaction can be either
/// incoming messages or an operation.
#[graphql(skip)]
pub transactions: Vec<Transaction>,
/// The list of outgoing messages for each transaction.
pub messages: Vec<Vec<OutgoingMessage>>,
/// The hashes and heights of previous blocks that sent messages to the same recipients.
pub previous_message_blocks: BTreeMap<ChainId, (CryptoHash, BlockHeight)>,
/// The hashes and heights of previous blocks that published events to the same channels.
pub previous_event_blocks: BTreeMap<StreamId, (CryptoHash, BlockHeight)>,
/// The record of oracle responses for each transaction.
pub oracle_responses: Vec<Vec<OracleResponse>>,
/// The list of events produced by each transaction.
pub events: Vec<Vec<Event>>,
/// The list of blobs produced by each transaction.
pub blobs: Vec<Vec<Blob>>,
/// The execution result for each operation.
pub operation_results: Vec<OperationResult>,
}
impl BlockBody {
/// Returns all operations in this block body.
pub fn operations(&self) -> impl Iterator<Item = &Operation> {
self.transactions.iter().filter_map(|tx| match tx {
Transaction::ExecuteOperation(operation) => Some(operation),
Transaction::ReceiveMessages(_) => None,
})
}
/// Returns all incoming bundles in this block body.
pub fn incoming_bundles(&self) -> impl Iterator<Item = &IncomingBundle> {
self.transactions.iter().filter_map(|tx| match tx {
Transaction::ReceiveMessages(bundle) => Some(bundle),
Transaction::ExecuteOperation(_) => None,
})
}
}
#[async_graphql::ComplexObject]
impl BlockBody {
/// Metadata about the transactions in this block.
async fn transaction_metadata(&self) -> Vec<crate::data_types::TransactionMetadata> {
self.transactions
.iter()
.map(crate::data_types::TransactionMetadata::from_transaction)
.collect()
}
}
impl Block {
pub fn new(block: ProposedBlock, outcome: BlockExecutionOutcome) -> Self {
let transactions_hash = hashing::hash_vec(&block.transactions);
let messages_hash = hashing::hash_vec_vec(&outcome.messages);
let previous_message_blocks_hash = CryptoHash::new(&PreviousMessageBlocksMap {
inner: Cow::Borrowed(&outcome.previous_message_blocks),
});
let previous_event_blocks_hash = CryptoHash::new(&PreviousEventBlocksMap {
inner: Cow::Borrowed(&outcome.previous_event_blocks),
});
let oracle_responses_hash = hashing::hash_vec_vec(&outcome.oracle_responses);
let events_hash = hashing::hash_vec_vec(&outcome.events);
let blobs_hash = hashing::hash_vec_vec(&outcome.blobs);
let operation_results_hash = hashing::hash_vec(&outcome.operation_results);
let header = BlockHeader {
chain_id: block.chain_id,
epoch: block.epoch,
height: block.height,
timestamp: block.timestamp,
state_hash: outcome.state_hash,
previous_block_hash: block.previous_block_hash,
authenticated_owner: block.authenticated_owner,
transactions_hash,
messages_hash,
previous_message_blocks_hash,
previous_event_blocks_hash,
oracle_responses_hash,
events_hash,
blobs_hash,
operation_results_hash,
};
let body = BlockBody {
transactions: block.transactions,
messages: outcome.messages,
previous_message_blocks: outcome.previous_message_blocks,
previous_event_blocks: outcome.previous_event_blocks,
oracle_responses: outcome.oracle_responses,
events: outcome.events,
blobs: outcome.blobs,
operation_results: outcome.operation_results,
};
Self { header, body }
}
/// Returns the bundles of messages sent via the given medium to the specified
/// recipient. Messages originating from different transactions of the original block
/// are kept in separate bundles. If the medium is a channel, does not verify that the
/// recipient is actually subscribed to that channel.
pub fn message_bundles_for(
&self,
recipient: ChainId,
certificate_hash: CryptoHash,
) -> impl Iterator<Item = (Epoch, MessageBundle)> + '_ {
let mut index = 0u32;
let block_height = self.header.height;
let block_timestamp = self.header.timestamp;
let block_epoch = self.header.epoch;
(0u32..)
.zip(self.messages())
.filter_map(move |(transaction_index, txn_messages)| {
let messages = (index..)
.zip(txn_messages)
.filter(|(_, message)| message.destination == recipient)
.map(|(idx, message)| message.clone().into_posted(idx))
.collect::<Vec<_>>();
index += txn_messages.len() as u32;
(!messages.is_empty()).then(|| {
let bundle = MessageBundle {
height: block_height,
timestamp: block_timestamp,
certificate_hash,
transaction_index,
messages,
};
(block_epoch, bundle)
})
})
}
/// Returns all the blob IDs required by this block.
/// Either as oracle responses or as published blobs.
pub fn required_blob_ids(&self) -> BTreeSet<BlobId> {
let mut blob_ids = self.oracle_blob_ids();
blob_ids.extend(self.published_blob_ids());
blob_ids.extend(self.created_blob_ids());
if self.header.height == BlockHeight(0) {
// the initial block implicitly depends on the chain description blob
blob_ids.insert(BlobId::new(
self.header.chain_id.0,
BlobType::ChainDescription,
));
}
blob_ids
}
/// Returns whether this block requires the blob with the specified ID.
pub fn requires_or_creates_blob(&self, blob_id: &BlobId) -> bool {
self.oracle_blob_ids().contains(blob_id)
|| self.published_blob_ids().contains(blob_id)
|| self.created_blob_ids().contains(blob_id)
|| (self.header.height == BlockHeight(0)
&& (blob_id.blob_type == BlobType::ChainDescription
&& blob_id.hash == self.header.chain_id.0))
}
/// Returns all the published blob IDs in this block's transactions.
pub fn published_blob_ids(&self) -> BTreeSet<BlobId> {
self.body
.operations()
.flat_map(Operation::published_blob_ids)
.collect()
}
/// Returns all the blob IDs created by the block's transactions.
pub fn created_blob_ids(&self) -> BTreeSet<BlobId> {
self.body
.blobs
.iter()
.flatten()
.map(|blob| blob.id())
.collect()
}
/// Returns all the blobs created by the block's transactions.
pub fn created_blobs(&self) -> BTreeMap<BlobId, Blob> {
self.body
.blobs
.iter()
.flatten()
.map(|blob| (blob.id(), blob.clone()))
.collect()
}
/// Returns set of blob IDs that were a result of an oracle call.
pub fn oracle_blob_ids(&self) -> BTreeSet<BlobId> {
let mut required_blob_ids = BTreeSet::new();
for responses in &self.body.oracle_responses {
for response in responses {
if let OracleResponse::Blob(blob_id) = response {
required_blob_ids.insert(*blob_id);
}
}
}
required_blob_ids
}
/// Returns reference to the outgoing messages in the block.
pub fn messages(&self) -> &Vec<Vec<OutgoingMessage>> {
&self.body.messages
}
/// Returns all recipients of messages in this block.
pub fn recipients(&self) -> BTreeSet<ChainId> {
self.body
.messages
.iter()
.flat_map(|messages| messages.iter().map(|message| message.destination))
.collect()
}
/// Returns whether there are any oracle responses in this block.
pub fn has_oracle_responses(&self) -> bool {
self.body
.oracle_responses
.iter()
.any(|responses| !responses.is_empty())
}
/// Returns whether this block matches the proposal.
pub fn matches_proposed_block(&self, block: &ProposedBlock) -> bool {
let ProposedBlock {
chain_id,
epoch,
transactions,
height,
timestamp,
authenticated_owner,
previous_block_hash,
} = block;
*chain_id == self.header.chain_id
&& *epoch == self.header.epoch
&& *transactions == self.body.transactions
&& *height == self.header.height
&& *timestamp == self.header.timestamp
&& *authenticated_owner == self.header.authenticated_owner
&& *previous_block_hash == self.header.previous_block_hash
}
/// Returns whether the outcomes of the block's execution match the passed values.
#[cfg(with_testing)]
#[expect(clippy::too_many_arguments)]
pub fn outcome_matches(
&self,
expected_messages: Vec<Vec<OutgoingMessage>>,
expected_previous_message_blocks: BTreeMap<ChainId, (CryptoHash, BlockHeight)>,
expected_previous_event_blocks: BTreeMap<StreamId, (CryptoHash, BlockHeight)>,
expected_oracle_responses: Vec<Vec<OracleResponse>>,
expected_events: Vec<Vec<Event>>,
expected_blobs: Vec<Vec<Blob>>,
expected_operation_results: Vec<OperationResult>,
) -> bool {
let BlockBody {
transactions: _,
messages,
previous_message_blocks,
previous_event_blocks,
oracle_responses,
events,
blobs,
operation_results,
} = &self.body;
*messages == expected_messages
&& *previous_message_blocks == expected_previous_message_blocks
&& *previous_event_blocks == expected_previous_event_blocks
&& *oracle_responses == expected_oracle_responses
&& *events == expected_events
&& *blobs == expected_blobs
&& *operation_results == expected_operation_results
}
pub fn into_proposal(self) -> (ProposedBlock, BlockExecutionOutcome) {
let proposed_block = ProposedBlock {
chain_id: self.header.chain_id,
epoch: self.header.epoch,
transactions: self.body.transactions,
height: self.header.height,
timestamp: self.header.timestamp,
authenticated_owner: self.header.authenticated_owner,
previous_block_hash: self.header.previous_block_hash,
};
let outcome = BlockExecutionOutcome {
state_hash: self.header.state_hash,
messages: self.body.messages,
previous_message_blocks: self.body.previous_message_blocks,
previous_event_blocks: self.body.previous_event_blocks,
oracle_responses: self.body.oracle_responses,
events: self.body.events,
blobs: self.body.blobs,
operation_results: self.body.operation_results,
};
(proposed_block, outcome)
}
pub fn iter_created_blobs(&self) -> impl Iterator<Item = (BlobId, Blob)> + '_ {
self.body
.blobs
.iter()
.flatten()
.map(|blob| (blob.id(), blob.clone()))
}
}
impl BcsHashable<'_> for Block {}
#[derive(Serialize, Deserialize)]
pub struct PreviousMessageBlocksMap<'a> {
inner: Cow<'a, BTreeMap<ChainId, (CryptoHash, BlockHeight)>>,
}
impl<'de> BcsHashable<'de> for PreviousMessageBlocksMap<'de> {}
#[derive(Serialize, Deserialize)]
pub struct PreviousEventBlocksMap<'a> {
inner: Cow<'a, BTreeMap<StreamId, (CryptoHash, BlockHeight)>>,
}
impl<'de> BcsHashable<'de> for PreviousEventBlocksMap<'de> {}
#[derive(Serialize, Deserialize)]
#[serde(rename = "BlockHeader")]
struct SerializedHeader {
chain_id: ChainId,
epoch: Epoch,
height: BlockHeight,
timestamp: Timestamp,
state_hash: CryptoHash,
previous_block_hash: Option<CryptoHash>,
authenticated_owner: Option<AccountOwner>,
}
mod hashing {
use linera_base::crypto::{BcsHashable, CryptoHash, CryptoHashVec};
pub(super) fn hash_vec<'de, T: BcsHashable<'de>>(it: impl AsRef<[T]>) -> CryptoHash {
let v = CryptoHashVec(it.as_ref().iter().map(CryptoHash::new).collect::<Vec<_>>());
CryptoHash::new(&v)
}
pub(super) fn hash_vec_vec<'de, T: BcsHashable<'de>>(it: impl AsRef<[Vec<T>]>) -> CryptoHash {
let v = CryptoHashVec(it.as_ref().iter().map(hash_vec).collect::<Vec<_>>());
CryptoHash::new(&v)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/pending_blobs.rs | linera-chain/src/pending_blobs.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::BTreeMap;
use allocative::Allocative;
use linera_base::{
data_types::{Blob, Round},
ensure,
identifiers::BlobId,
};
use linera_views::{
context::Context,
map_view::MapView,
register_view::RegisterView,
views::{ClonableView, View},
ViewError,
};
use crate::ChainError;
/// The pending blobs belonging to a block that can't be processed without them.
#[cfg_attr(with_graphql, derive(async_graphql::SimpleObject))]
#[derive(Debug, View, ClonableView, Allocative)]
#[allocative(bound = "C")]
pub struct PendingBlobsView<C>
where
C: Clone + Context,
{
/// The round in which the block is validated.
pub round: RegisterView<C, Round>,
/// Whether these blobs were already validated.
///
/// This is only `false` for _new_ block proposals, not when re-proposing blocks from earlier
/// rounds or when handling validated block certificates. If it is false, the pending blobs are
/// only the ones published by the new block, not the ones that are only read.
pub validated: RegisterView<C, bool>,
/// The map of blobs needed to process the block.
pub pending_blobs: MapView<C, BlobId, Option<Blob>>,
}
impl<C> PendingBlobsView<C>
where
C: Clone + Context,
{
pub async fn multi_get(&self, blob_ids: &[BlobId]) -> Result<Vec<Option<Blob>>, ViewError> {
Ok(self
.pending_blobs
.multi_get(blob_ids)
.await?
.into_iter()
.map(|x| x.flatten())
.collect())
}
pub async fn get(&self, blob_id: &BlobId) -> Result<Option<Blob>, ViewError> {
Ok(self.pending_blobs.get(blob_id).await?.flatten())
}
/// Inserts the blob. Returns whether the blob was required by the pending block.
pub async fn maybe_insert(&mut self, blob: &Blob) -> Result<bool, ViewError> {
let blob_id = blob.id();
let Some(maybe_blob) = self.pending_blobs.get_mut(&blob_id).await? else {
return Ok(false);
};
if maybe_blob.is_none() {
*maybe_blob = Some(blob.clone());
}
Ok(true)
}
pub fn update(
&mut self,
round: Round,
validated: bool,
maybe_blobs: BTreeMap<BlobId, Option<Blob>>,
) -> Result<(), ChainError> {
let existing_round = *self.round.get();
ensure!(
existing_round <= round,
ChainError::InsufficientRound(existing_round)
);
if existing_round < round {
self.clear();
self.round.set(round);
self.validated.set(validated);
}
for (blob_id, maybe_blob) in maybe_blobs {
self.pending_blobs.insert(&blob_id, maybe_blob)?;
}
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/certificate/timeout.rs | linera-chain/src/certificate/timeout.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::{ValidatorPublicKey, ValidatorSignature},
data_types::Round,
};
use serde::{
ser::{Serialize, SerializeStruct, Serializer},
Deserialize, Deserializer,
};
use super::{generic::GenericCertificate, Certificate};
use crate::block::{ConversionError, Timeout};
impl TryFrom<Certificate> for GenericCertificate<Timeout> {
type Error = ConversionError;
fn try_from(cert: Certificate) -> Result<Self, Self::Error> {
match cert {
Certificate::Timeout(timeout) => Ok(timeout),
_ => Err(ConversionError::Timeout),
}
}
}
impl From<GenericCertificate<Timeout>> for Certificate {
fn from(cert: GenericCertificate<Timeout>) -> Certificate {
Certificate::Timeout(cert)
}
}
impl Serialize for GenericCertificate<Timeout> {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut state = serializer.serialize_struct("TimeoutCertificate", 4)?;
state.serialize_field("value", self.inner())?;
state.serialize_field("round", &self.round)?;
state.serialize_field("signatures", self.signatures())?;
state.end()
}
}
impl<'de> Deserialize<'de> for GenericCertificate<Timeout> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(rename = "TimeoutCertificate")]
struct Inner {
value: Timeout,
round: Round,
signatures: Vec<(ValidatorPublicKey, ValidatorSignature)>,
}
let inner = Inner::deserialize(deserializer)?;
if !crate::data_types::is_strictly_ordered(&inner.signatures) {
Err(serde::de::Error::custom("Vector is not strictly sorted"))
} else {
Ok(Self::new(inner.value, inner.round, inner.signatures))
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/certificate/validated.rs | linera-chain/src/certificate/validated.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::{ValidatorPublicKey, ValidatorSignature},
data_types::Round,
};
use serde::{
ser::{Serialize, SerializeStruct, Serializer},
Deserialize, Deserializer,
};
use super::{generic::GenericCertificate, Certificate};
use crate::block::{Block, ConversionError, ValidatedBlock};
impl GenericCertificate<ValidatedBlock> {
#[cfg(with_testing)]
pub fn outgoing_message_count(&self) -> usize {
self.block().messages().iter().map(Vec::len).sum()
}
/// Returns reference to the [`Block`] contained in this certificate.
pub fn block(&self) -> &Block {
self.inner().block()
}
}
impl TryFrom<Certificate> for GenericCertificate<ValidatedBlock> {
type Error = ConversionError;
fn try_from(cert: Certificate) -> Result<Self, Self::Error> {
match cert {
Certificate::Validated(validated) => Ok(validated),
_ => Err(ConversionError::ValidatedBlock),
}
}
}
impl From<GenericCertificate<ValidatedBlock>> for Certificate {
fn from(cert: GenericCertificate<ValidatedBlock>) -> Certificate {
Certificate::Validated(cert)
}
}
impl Serialize for GenericCertificate<ValidatedBlock> {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut state = serializer.serialize_struct("ValidatedBlockCertificate", 3)?;
state.serialize_field("value", self.inner())?;
state.serialize_field("round", &self.round)?;
state.serialize_field("signatures", self.signatures())?;
state.end()
}
}
impl<'de> Deserialize<'de> for GenericCertificate<ValidatedBlock> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(rename = "ValidatedBlockCertificate")]
struct Inner {
value: ValidatedBlock,
round: Round,
signatures: Vec<(ValidatorPublicKey, ValidatorSignature)>,
}
let inner = Inner::deserialize(deserializer)?;
if !crate::data_types::is_strictly_ordered(&inner.signatures) {
Err(serde::de::Error::custom(
"Signatures are not strictly ordered",
))
} else {
Ok(Self::new(inner.value, inner.round, inner.signatures))
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/certificate/mod.rs | linera-chain/src/certificate/mod.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
mod confirmed;
mod generic;
mod lite;
mod timeout;
mod validated;
use std::collections::BTreeSet;
use allocative::Allocative;
pub use generic::GenericCertificate;
use linera_base::{
crypto::{CryptoHash, ValidatorPublicKey, ValidatorSignature},
data_types::{BlockHeight, Epoch, Round},
identifiers::{BlobId, ChainId},
};
pub use lite::LiteCertificate;
use serde::{Deserialize, Serialize};
use crate::types::{ConfirmedBlock, Timeout, ValidatedBlock};
/// Certificate for a [`ValidatedBlock`] instance.
/// A validated block certificate means the block is valid (but not necessarily finalized yet).
/// Since only one block per round is validated,
/// there can be at most one such certificate in every round.
pub type ValidatedBlockCertificate = GenericCertificate<ValidatedBlock>;
/// Certificate for a [`ConfirmedBlock`] instance.
/// A confirmed block certificate means that the block is finalized:
/// It is the agreed block at that height on that chain.
pub type ConfirmedBlockCertificate = GenericCertificate<ConfirmedBlock>;
/// Certificate for a [`Timeout`] instance.
/// A timeout certificate means that the next consensus round has begun.
pub type TimeoutCertificate = GenericCertificate<Timeout>;
/// Enum wrapping all types of certificates that can be created.
/// A certified statement from the committee.
/// Every certificate is a statement signed by the quorum of the committee.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub enum Certificate {
/// Certificate for [`ValidatedBlock`].
Validated(ValidatedBlockCertificate),
/// Certificate for [`ConfirmedBlock`].
Confirmed(ConfirmedBlockCertificate),
/// Certificate for [`Timeout`].
Timeout(TimeoutCertificate),
}
impl Certificate {
pub fn round(&self) -> Round {
match self {
Certificate::Validated(cert) => cert.round,
Certificate::Confirmed(cert) => cert.round,
Certificate::Timeout(cert) => cert.round,
}
}
pub fn height(&self) -> BlockHeight {
match self {
Certificate::Validated(cert) => cert.value().block().header.height,
Certificate::Confirmed(cert) => cert.value().block().header.height,
Certificate::Timeout(cert) => cert.value().height(),
}
}
pub fn chain_id(&self) -> ChainId {
match self {
Certificate::Validated(cert) => cert.value().block().header.chain_id,
Certificate::Confirmed(cert) => cert.value().block().header.chain_id,
Certificate::Timeout(cert) => cert.value().chain_id(),
}
}
pub fn signatures(&self) -> &Vec<(ValidatorPublicKey, ValidatorSignature)> {
match self {
Certificate::Validated(cert) => cert.signatures(),
Certificate::Confirmed(cert) => cert.signatures(),
Certificate::Timeout(cert) => cert.signatures(),
}
}
}
#[derive(Clone, Copy, Debug, Serialize, Deserialize, Hash, Eq, PartialEq, Allocative)]
#[repr(u8)]
pub enum CertificateKind {
Timeout = 0,
Validated = 1,
Confirmed = 2,
}
pub trait CertificateValue: Clone {
const KIND: CertificateKind;
fn chain_id(&self) -> ChainId;
fn epoch(&self) -> Epoch;
fn height(&self) -> BlockHeight;
fn required_blob_ids(&self) -> BTreeSet<BlobId>;
fn hash(&self) -> CryptoHash;
}
impl CertificateValue for Timeout {
const KIND: CertificateKind = CertificateKind::Timeout;
fn chain_id(&self) -> ChainId {
self.chain_id()
}
fn epoch(&self) -> Epoch {
self.epoch()
}
fn height(&self) -> BlockHeight {
self.height()
}
fn required_blob_ids(&self) -> BTreeSet<BlobId> {
BTreeSet::new()
}
fn hash(&self) -> CryptoHash {
self.inner().hash()
}
}
impl CertificateValue for ValidatedBlock {
const KIND: CertificateKind = CertificateKind::Validated;
fn chain_id(&self) -> ChainId {
self.block().header.chain_id
}
fn epoch(&self) -> Epoch {
self.block().header.epoch
}
fn height(&self) -> BlockHeight {
self.block().header.height
}
fn required_blob_ids(&self) -> BTreeSet<BlobId> {
self.block().required_blob_ids()
}
fn hash(&self) -> CryptoHash {
self.inner().hash()
}
}
impl CertificateValue for ConfirmedBlock {
const KIND: CertificateKind = CertificateKind::Confirmed;
fn chain_id(&self) -> ChainId {
self.block().header.chain_id
}
fn epoch(&self) -> Epoch {
self.block().header.epoch
}
fn height(&self) -> BlockHeight {
self.block().header.height
}
fn required_blob_ids(&self) -> BTreeSet<BlobId> {
self.block().required_blob_ids()
}
fn hash(&self) -> CryptoHash {
self.inner().hash()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/certificate/confirmed.rs | linera-chain/src/certificate/confirmed.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::{ValidatorPublicKey, ValidatorSignature},
data_types::{Epoch, Round},
identifiers::ChainId,
};
use serde::{ser::SerializeStruct, Deserialize, Deserializer, Serialize};
use super::{generic::GenericCertificate, Certificate};
use crate::{
block::{Block, ConfirmedBlock, ConversionError},
data_types::MessageBundle,
};
impl GenericCertificate<ConfirmedBlock> {
/// Returns reference to the `Block` contained in this certificate.
pub fn block(&self) -> &Block {
self.inner().block()
}
/// Returns the bundles of messages sent to the specified recipient.
/// Messages originating from different transactions of the original block
/// are kept in separate bundles.
pub fn message_bundles_for(
&self,
recipient: ChainId,
) -> impl Iterator<Item = (Epoch, MessageBundle)> + '_ {
let certificate_hash = self.hash();
self.block()
.message_bundles_for(recipient, certificate_hash)
}
#[cfg(with_testing)]
pub fn outgoing_message_count(&self) -> usize {
self.block().messages().iter().map(Vec::len).sum()
}
}
impl TryFrom<Certificate> for GenericCertificate<ConfirmedBlock> {
type Error = ConversionError;
fn try_from(cert: Certificate) -> Result<Self, Self::Error> {
match cert {
Certificate::Confirmed(confirmed) => Ok(confirmed),
_ => Err(ConversionError::ConfirmedBlock),
}
}
}
impl From<GenericCertificate<ConfirmedBlock>> for Certificate {
fn from(cert: GenericCertificate<ConfirmedBlock>) -> Certificate {
Certificate::Confirmed(cert)
}
}
impl Serialize for GenericCertificate<ConfirmedBlock> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut state = serializer.serialize_struct("ConfirmedBlockCertificate", 3)?;
state.serialize_field("value", self.inner())?;
state.serialize_field("round", &self.round)?;
state.serialize_field("signatures", self.signatures())?;
state.end()
}
}
impl<'de> Deserialize<'de> for GenericCertificate<ConfirmedBlock> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Debug, Deserialize)]
#[serde(rename = "ConfirmedBlockCertificate")]
struct Helper {
value: ConfirmedBlock,
round: Round,
signatures: Vec<(ValidatorPublicKey, ValidatorSignature)>,
}
let helper = Helper::deserialize(deserializer)?;
if !crate::data_types::is_strictly_ordered(&helper.signatures) {
Err(serde::de::Error::custom("Vector is not strictly sorted"))
} else {
Ok(Self::new(helper.value, helper.round, helper.signatures))
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/certificate/lite.rs | linera-chain/src/certificate/lite.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{borrow::Cow, ops::Deref};
use allocative::{Allocative, Key, Visitor};
use linera_base::{
crypto::{ValidatorPublicKey, ValidatorSignature},
data_types::Round,
};
use linera_execution::committee::Committee;
use serde::{Deserialize, Serialize};
use super::{CertificateValue, GenericCertificate};
use crate::{
data_types::{check_signatures, LiteValue, LiteVote},
ChainError,
};
/// A certified statement from the committee, without the value.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct LiteCertificate<'a> {
/// Hash and chain ID of the certified value (used as key for storage).
pub value: LiteValue,
/// The round in which the value was certified.
pub round: Round,
/// Signatures on the value.
pub signatures: Cow<'a, [(ValidatorPublicKey, ValidatorSignature)]>,
}
impl Allocative for LiteCertificate<'_> {
fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) {
visitor.visit_field(Key::new("LiteCertificate_value"), &self.value);
visitor.visit_field(Key::new("LiteCertificate_round"), &self.round);
if matches!(self.signatures, Cow::Owned(_)) {
for (public_key, signature) in self.signatures.deref() {
visitor.visit_field(Key::new("ValidatorPublicKey"), public_key);
visitor.visit_field(Key::new("ValidatorSignature"), signature);
}
}
}
}
impl LiteCertificate<'_> {
pub fn new(
value: LiteValue,
round: Round,
mut signatures: Vec<(ValidatorPublicKey, ValidatorSignature)>,
) -> Self {
signatures.sort_by_key(|&(validator_name, _)| validator_name);
let signatures = Cow::Owned(signatures);
Self {
value,
round,
signatures,
}
}
/// Creates a [`LiteCertificate`] from a list of votes with their validator public keys, without cryptographically checking the
/// signatures. Returns `None` if the votes are empty or don't have matching values and rounds.
pub fn try_from_votes(
votes: impl IntoIterator<Item = (ValidatorPublicKey, LiteVote)>,
) -> Option<Self> {
let mut votes = votes.into_iter();
let (
public_key,
LiteVote {
value,
round,
signature,
},
) = votes.next()?;
let mut signatures = vec![(public_key, signature)];
for (validator_key, vote) in votes {
if vote.value.value_hash != value.value_hash || vote.round != round {
return None;
}
signatures.push((validator_key, vote.signature));
}
Some(LiteCertificate::new(value, round, signatures))
}
/// Verifies the certificate.
pub fn check(&self, committee: &Committee) -> Result<&LiteValue, ChainError> {
check_signatures(
self.value.value_hash,
self.value.kind,
self.round,
&self.signatures,
committee,
)?;
Ok(&self.value)
}
/// Checks whether the value matches this certificate.
pub fn check_value<T: CertificateValue>(&self, value: &T) -> bool {
self.value.chain_id == value.chain_id()
&& T::KIND == self.value.kind
&& self.value.value_hash == value.hash()
}
/// Returns the [`GenericCertificate`] with the specified value, if it matches.
pub fn with_value<T: CertificateValue>(self, value: T) -> Option<GenericCertificate<T>> {
if self.value.chain_id != value.chain_id()
|| T::KIND != self.value.kind
|| self.value.value_hash != value.hash()
{
return None;
}
Some(GenericCertificate::new(
value,
self.round,
self.signatures.into_owned(),
))
}
/// Returns a [`LiteCertificate`] that owns the list of signatures.
pub fn cloned(&self) -> LiteCertificate<'static> {
LiteCertificate {
value: self.value.clone(),
round: self.round,
signatures: Cow::Owned(self.signatures.clone().into_owned()),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/certificate/generic.rs | linera-chain/src/certificate/generic.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use allocative::{Allocative, Key, Visitor};
use custom_debug_derive::Debug;
use linera_base::{
crypto::{CryptoHash, ValidatorPublicKey, ValidatorSignature},
data_types::Round,
};
use linera_execution::committee::Committee;
use super::CertificateValue;
use crate::{data_types::LiteValue, ChainError};
/// Generic type representing a certificate for `value` of type `T`.
#[derive(Debug)]
pub struct GenericCertificate<T: CertificateValue> {
value: T,
pub round: Round,
signatures: Vec<(ValidatorPublicKey, ValidatorSignature)>,
}
impl<T: Allocative + CertificateValue> Allocative for GenericCertificate<T> {
fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) {
visitor.visit_field(Key::new("GenericCertificate_value"), &self.value);
visitor.visit_field(Key::new("GenericCertificate_round"), &self.round);
for (public_key, signature) in &self.signatures {
visitor.visit_field(Key::new("ValidatorPublicKey"), public_key);
visitor.visit_field(Key::new("ValidatorSignature"), signature);
}
}
}
impl<T: CertificateValue> GenericCertificate<T> {
pub fn new(
value: T,
round: Round,
mut signatures: Vec<(ValidatorPublicKey, ValidatorSignature)>,
) -> Self {
signatures.sort_by_key(|&(validator_name, _)| validator_name);
Self {
value,
round,
signatures,
}
}
/// Returns a reference to the `Hashed` value contained in this certificate.
pub fn value(&self) -> &T {
&self.value
}
/// Consumes this certificate, returning the value it contains.
pub fn into_value(self) -> T {
self.value
}
/// Returns reference to the value contained in this certificate.
pub fn inner(&self) -> &T {
&self.value
}
/// Consumes this certificate, returning the value it contains.
pub fn into_inner(self) -> T {
self.value
}
/// Returns the certified value's hash.
pub fn hash(&self) -> CryptoHash {
self.value.hash()
}
pub fn destructure(self) -> (T, Round, Vec<(ValidatorPublicKey, ValidatorSignature)>) {
(self.value, self.round, self.signatures)
}
pub fn signatures(&self) -> &Vec<(ValidatorPublicKey, ValidatorSignature)> {
&self.signatures
}
#[cfg(with_testing)]
pub fn signatures_mut(&mut self) -> &mut Vec<(ValidatorPublicKey, ValidatorSignature)> {
&mut self.signatures
}
/// Adds a signature to the certificate's list of signatures
/// It's the responsibility of the caller to not insert duplicates
pub fn add_signature(
&mut self,
signature: (ValidatorPublicKey, ValidatorSignature),
) -> &Vec<(ValidatorPublicKey, ValidatorSignature)> {
let index = self
.signatures
.binary_search_by(|(name, _)| name.cmp(&signature.0))
.unwrap_or_else(std::convert::identity);
self.signatures.insert(index, signature);
&self.signatures
}
/// Returns whether the validator is among the signatories of this certificate.
pub fn is_signed_by(&self, validator_name: &ValidatorPublicKey) -> bool {
self.signatures
.binary_search_by(|(name, _)| name.cmp(validator_name))
.is_ok()
}
/// Verifies the certificate.
pub fn check(&self, committee: &Committee) -> Result<(), ChainError>
where
T: CertificateValue,
{
crate::data_types::check_signatures(
self.hash(),
T::KIND,
self.round,
&self.signatures,
committee,
)?;
Ok(())
}
pub fn lite_certificate(&self) -> crate::certificate::LiteCertificate<'_>
where
T: CertificateValue,
{
crate::certificate::LiteCertificate {
value: LiteValue::new(&self.value),
round: self.round,
signatures: std::borrow::Cow::Borrowed(&self.signatures),
}
}
}
impl<T: CertificateValue> Clone for GenericCertificate<T> {
fn clone(&self) -> Self {
Self {
value: self.value.clone(),
round: self.round,
signatures: self.signatures.clone(),
}
}
}
#[cfg(with_testing)]
impl<T: CertificateValue + Eq + PartialEq> Eq for GenericCertificate<T> {}
#[cfg(with_testing)]
impl<T: CertificateValue + Eq + PartialEq> PartialEq for GenericCertificate<T> {
fn eq(&self, other: &Self) -> bool {
self.hash() == other.hash()
&& self.round == other.round
&& self.signatures == other.signatures
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/unit_tests/outbox_tests.rs | linera-chain/src/unit_tests/outbox_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use super::*;
#[tokio::test]
async fn test_outbox() {
let mut view = OutboxStateView::new().await;
assert!(view.schedule_message(BlockHeight::ZERO).unwrap());
assert!(view.schedule_message(BlockHeight::from(2)).unwrap());
assert!(view.schedule_message(BlockHeight::from(4)).unwrap());
assert!(!view.schedule_message(BlockHeight::ZERO).unwrap());
assert_eq!(view.queue.count(), 3);
assert_eq!(
view.mark_messages_as_received(BlockHeight::from(3))
.await
.unwrap(),
vec![BlockHeight::ZERO, BlockHeight::from(2)]
);
assert_eq!(
view.mark_messages_as_received(BlockHeight::from(3))
.await
.unwrap(),
vec![]
);
assert_eq!(view.queue.count(), 1);
assert_eq!(
view.mark_messages_as_received(BlockHeight::from(4))
.await
.unwrap(),
vec![BlockHeight::from(4)]
);
assert_eq!(view.queue.count(), 0);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/unit_tests/data_types_tests.rs | linera-chain/src/unit_tests/data_types_tests.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::{
crypto::{AccountSecretKey, Ed25519SecretKey, Secp256k1SecretKey, ValidatorKeypair},
data_types::Amount,
};
use super::*;
use crate::{
block::{ConfirmedBlock, ValidatedBlock},
test::{make_first_block, BlockTestExt},
};
fn dummy_chain_id(index: u32) -> ChainId {
ChainId(CryptoHash::test_hash(format!("chain{}", index)))
}
#[test]
fn test_signed_values() {
let validator1_key_pair = ValidatorKeypair::generate();
let validator2_key_pair = ValidatorKeypair::generate();
let block = BlockExecutionOutcome {
messages: vec![Vec::new()],
previous_message_blocks: BTreeMap::new(),
previous_event_blocks: BTreeMap::new(),
state_hash: CryptoHash::test_hash("state"),
oracle_responses: vec![Vec::new()],
events: vec![Vec::new()],
blobs: vec![Vec::new()],
operation_results: vec![OperationResult::default()],
}
.with(make_first_block(dummy_chain_id(1)).with_simple_transfer(dummy_chain_id(2), Amount::ONE));
let confirmed_value = ConfirmedBlock::new(block.clone());
let confirmed_vote = LiteVote::new(
LiteValue::new(&confirmed_value),
Round::Fast,
&validator1_key_pair.secret_key,
);
assert!(confirmed_vote.check(validator1_key_pair.public_key).is_ok());
let validated_value = ValidatedBlock::new(block);
let validated_vote = LiteVote::new(
LiteValue::new(&validated_value),
Round::Fast,
&validator1_key_pair.secret_key,
);
assert_ne!(
confirmed_vote.value, validated_vote.value,
"Confirmed and validated votes should be different, even if for the same block"
);
let v = LiteVote::new(
LiteValue::new(&confirmed_value),
Round::Fast,
&validator2_key_pair.secret_key,
);
// The vote was created with validator2's key but we'll check it with validator1's key
assert!(v.check(validator1_key_pair.public_key).is_err());
assert!(validated_vote.check(validator1_key_pair.public_key).is_ok());
assert!(confirmed_vote.check(validator1_key_pair.public_key).is_ok());
let mut v = validated_vote.clone();
// Use signature from ConfirmedBlock to sign a ValidatedBlock.
v.signature = confirmed_vote.signature;
assert!(
v.check(validator1_key_pair.public_key).is_err(),
"Confirmed and validated votes must not be interchangeable"
);
let mut v = confirmed_vote.clone();
v.signature = validated_vote.signature;
assert!(
v.check(validator1_key_pair.public_key).is_err(),
"Confirmed and validated votes must not be interchangeable"
);
}
#[test]
fn test_certificates() {
let validator1_key_pair = ValidatorKeypair::generate();
let account1_secret = AccountSecretKey::Ed25519(Ed25519SecretKey::generate());
let validator2_key_pair = ValidatorKeypair::generate();
let account2_secret = AccountSecretKey::Secp256k1(Secp256k1SecretKey::generate());
let validator3_key_pair = ValidatorKeypair::generate();
let committee = Committee::make_simple(vec![
(validator1_key_pair.public_key, account1_secret.public()),
(validator2_key_pair.public_key, account2_secret.public()),
]);
let block = BlockExecutionOutcome {
messages: vec![Vec::new()],
previous_message_blocks: BTreeMap::new(),
previous_event_blocks: BTreeMap::new(),
state_hash: CryptoHash::test_hash("state"),
oracle_responses: vec![Vec::new()],
events: vec![Vec::new()],
blobs: vec![Vec::new()],
operation_results: vec![OperationResult::default()],
}
.with(make_first_block(dummy_chain_id(1)).with_simple_transfer(dummy_chain_id(1), Amount::ONE));
let value = ConfirmedBlock::new(block);
let v1 = LiteVote::new(
LiteValue::new(&value),
Round::Fast,
&validator1_key_pair.secret_key,
);
let v2 = LiteVote::new(
LiteValue::new(&value),
Round::Fast,
&validator2_key_pair.secret_key,
);
let v3 = LiteVote::new(
LiteValue::new(&value),
Round::Fast,
&validator3_key_pair.secret_key,
);
let mut builder = SignatureAggregator::new(value.clone(), Round::Fast, &committee);
assert!(builder
.append(validator1_key_pair.public_key, v1.signature)
.unwrap()
.is_none());
let mut c = builder
.append(validator2_key_pair.public_key, v2.signature)
.unwrap()
.unwrap();
assert!(c.check(&committee).is_ok());
c.signatures_mut().pop();
assert!(c.check(&committee).is_err());
let mut builder = SignatureAggregator::new(value, Round::Fast, &committee);
assert!(builder
.append(validator1_key_pair.public_key, v1.signature)
.unwrap()
.is_none());
assert!(builder
.append(validator3_key_pair.public_key, v3.signature)
.is_err());
}
#[test]
fn round_ordering() {
assert!(Round::Fast < Round::MultiLeader(0));
assert!(Round::MultiLeader(1) < Round::MultiLeader(2));
assert!(Round::MultiLeader(2) < Round::SingleLeader(0));
assert!(Round::SingleLeader(1) < Round::SingleLeader(2));
assert!(Round::SingleLeader(2) < Round::Validator(0));
assert!(Round::Validator(1) < Round::Validator(2))
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/unit_tests/inbox_tests.rs | linera-chain/src/unit_tests/inbox_tests.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use assert_matches::assert_matches;
use linera_base::{crypto::CryptoHash, data_types::Timestamp, identifiers::ApplicationId};
use linera_execution::{Message, MessageKind};
use super::*;
use crate::test::MessageTestExt as _;
fn make_bundle(
certificate_hash: CryptoHash,
height: u64,
index: u32,
message: impl Into<Vec<u8>>,
) -> MessageBundle {
let message = Message::User {
application_id: ApplicationId::default(),
bytes: message.into(),
};
MessageBundle {
certificate_hash,
height: BlockHeight::from(height),
timestamp: Timestamp::default(),
transaction_index: index,
messages: vec![message.to_posted(index, MessageKind::Simple)],
}
}
fn make_unskippable_bundle(
certificate_hash: CryptoHash,
height: u64,
index: u32,
message: impl Into<Vec<u8>>,
) -> MessageBundle {
let mut bundle = make_bundle(certificate_hash, height, index, message);
bundle.messages[0].kind = MessageKind::Protected;
bundle
}
#[tokio::test]
async fn test_inbox_add_then_remove_skippable() {
let hash = CryptoHash::test_hash("1");
let mut view = InboxStateView::new().await;
// Add one bundle.
assert!(view.add_bundle(make_bundle(hash, 0, 0, [0])).await.unwrap());
// Remove the same bundle
assert!(view
.remove_bundle(&make_bundle(hash, 0, 0, [0]))
.await
.unwrap());
// Fail to add an old bundle.
assert_matches!(
view.add_bundle(make_bundle(hash, 0, 0, [0])).await,
Err(InboxError::IncorrectOrder { .. })
);
// Fail to remove an old bundle.
assert_matches!(
view.remove_bundle(&make_bundle(hash, 0, 0, [0])).await,
Err(InboxError::IncorrectOrder { .. })
);
// Add two more bundles.
assert!(view.add_bundle(make_bundle(hash, 0, 1, [1])).await.unwrap());
assert!(view.add_bundle(make_bundle(hash, 1, 0, [2])).await.unwrap());
// Fail to remove non-matching bundle.
assert_matches!(
view.remove_bundle(&make_bundle(hash, 0, 1, [0])).await,
Err(InboxError::UnexpectedBundle { .. })
);
// Fail to remove non-matching bundle (hash).
assert_matches!(
view.remove_bundle(&make_bundle(CryptoHash::test_hash("2"), 0, 1, [1]))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
// OK to skip bundles.
assert!(view
.remove_bundle(&make_bundle(hash, 1, 0, [2]))
.await
.unwrap());
// Inbox is empty again.
assert_eq!(view.added_bundles.count(), 0);
assert_eq!(view.removed_bundles.count(), 0);
}
#[tokio::test]
async fn test_inbox_remove_then_add_skippable() {
let hash = CryptoHash::test_hash("1");
let mut view = InboxStateView::new().await;
// Remove one bundle by anticipation.
assert!(!view
.remove_bundle(&make_bundle(hash, 0, 0, [0]))
.await
.unwrap());
// Add the same bundle
assert!(!view.add_bundle(make_bundle(hash, 0, 0, [0])).await.unwrap());
// Fail to remove an old bundle.
assert_matches!(
view.remove_bundle(&make_bundle(hash, 0, 0, [0])).await,
Err(InboxError::IncorrectOrder { .. })
);
// Fail to add an old bundle.
assert_matches!(
view.add_bundle(make_bundle(hash, 0, 0, [0])).await,
Err(InboxError::IncorrectOrder { .. })
);
// Remove two more bundles.
assert!(!view
.remove_bundle(&make_bundle(hash, 0, 1, [1]))
.await
.unwrap());
assert!(!view
.remove_bundle(&make_bundle(hash, 1, 1, [3]))
.await
.unwrap());
// Fail to add non-matching bundle.
assert_matches!(
view.add_bundle(make_bundle(hash, 0, 1, [0])).await,
Err(InboxError::UnexpectedBundle { .. })
);
// Fail to add non-matching bundle (hash).
assert_matches!(
view.add_bundle(make_bundle(CryptoHash::test_hash("2"), 0, 1, [1]))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
// NOT OK to forget about previous consumed bundles while backfilling.
assert_matches!(
view.add_bundle(make_bundle(hash, 1, 0, [2])).await,
Err(InboxError::UnexpectedBundle { .. })
);
// OK to backfill the two consumed bundles, with one skippable bundle in the middle.
assert!(!view.add_bundle(make_bundle(hash, 0, 1, [1])).await.unwrap());
// Cannot add an unskippable bundle that was visibly skipped already.
assert_matches!(
view.add_bundle(make_unskippable_bundle(hash, 1, 0, [2]))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
assert!(!view.add_bundle(make_bundle(hash, 1, 0, [2])).await.unwrap());
assert!(!view.add_bundle(make_bundle(hash, 1, 1, [3])).await.unwrap());
// Inbox is empty again.
assert_eq!(view.added_bundles.count(), 0);
assert_eq!(view.removed_bundles.count(), 0);
}
#[tokio::test]
async fn test_inbox_add_then_remove_unskippable() {
let hash = CryptoHash::test_hash("1");
let mut view = InboxStateView::new().await;
// Add one bundle.
assert!(view
.add_bundle(make_unskippable_bundle(hash, 0, 0, [0]))
.await
.unwrap());
// Remove the same bundle
assert!(view
.remove_bundle(&make_unskippable_bundle(hash, 0, 0, [0]))
.await
.unwrap());
// Fail to add an old bundle.
assert_matches!(
view.add_bundle(make_unskippable_bundle(hash, 0, 0, [0]))
.await,
Err(InboxError::IncorrectOrder { .. })
);
// Fail to remove an old bundle.
assert_matches!(
view.remove_bundle(&make_unskippable_bundle(hash, 0, 0, [0]))
.await,
Err(InboxError::IncorrectOrder { .. })
);
// Add two more bundles.
assert!(view
.add_bundle(make_unskippable_bundle(hash, 0, 1, [1]))
.await
.unwrap());
assert!(view
.add_bundle(make_unskippable_bundle(hash, 1, 0, [2]))
.await
.unwrap());
// Fail to remove non-matching bundle.
assert_matches!(
view.remove_bundle(&make_unskippable_bundle(hash, 0, 1, [0]))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
// Fail to remove non-matching bundle (hash).
assert_matches!(
view.remove_bundle(&make_unskippable_bundle(
CryptoHash::test_hash("2"),
0,
1,
[1]
))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
// Fail to skip unskippable bundle.
assert_matches!(
view.remove_bundle(&make_unskippable_bundle(hash, 1, 0, [2])).await,
Err(InboxError::UnskippableBundle { bundle })
if bundle == make_unskippable_bundle(hash, 0, 1, [1])
);
assert!(view
.remove_bundle(&make_unskippable_bundle(hash, 0, 1, [1]))
.await
.unwrap());
assert!(view
.remove_bundle(&make_unskippable_bundle(hash, 1, 0, [2]))
.await
.unwrap());
// Inbox is empty again.
assert_eq!(view.added_bundles.count(), 0);
assert_eq!(view.removed_bundles.count(), 0);
}
#[tokio::test]
async fn test_inbox_remove_then_add_unskippable() {
let hash = CryptoHash::test_hash("1");
let mut view = InboxStateView::new().await;
// Remove one bundle by anticipation.
assert!(!view
.remove_bundle(&make_unskippable_bundle(hash, 0, 0, [0]))
.await
.unwrap());
// Add the same bundle
assert!(!view
.add_bundle(make_unskippable_bundle(hash, 0, 0, [0]))
.await
.unwrap());
// Fail to remove an old bundle.
assert_matches!(
view.remove_bundle(&make_unskippable_bundle(hash, 0, 0, [0]))
.await,
Err(InboxError::IncorrectOrder { .. })
);
// Fail to add an old bundle.
assert_matches!(
view.add_bundle(make_unskippable_bundle(hash, 0, 0, [0]))
.await,
Err(InboxError::IncorrectOrder { .. })
);
// Remove two more bundles.
assert!(!view
.remove_bundle(&make_unskippable_bundle(hash, 0, 1, [1]))
.await
.unwrap());
assert!(!view
.remove_bundle(&make_unskippable_bundle(hash, 1, 1, [3]))
.await
.unwrap());
// Fail to add non-matching bundle.
assert_matches!(
view.add_bundle(make_unskippable_bundle(hash, 0, 1, [0]))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
// Fail to add non-matching bundle (hash).
assert_matches!(
view.add_bundle(make_unskippable_bundle(
CryptoHash::test_hash("2"),
0,
1,
[1]
))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
// NOT OK to forget about previous consumed bundles while backfilling.
assert_matches!(
view.add_bundle(make_unskippable_bundle(hash, 1, 1, [3]))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
// OK to add the two bundles.
assert!(!view
.add_bundle(make_unskippable_bundle(hash, 0, 1, [1]))
.await
.unwrap());
// Cannot add an unskippable bundle that was visibly skipped already.
assert_matches!(
view.add_bundle(make_unskippable_bundle(hash, 1, 0, [2]))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
assert!(!view
.add_bundle(make_unskippable_bundle(hash, 1, 1, [3]))
.await
.unwrap());
// Inbox is empty again.
assert_eq!(view.added_bundles.count(), 0);
assert_eq!(view.removed_bundles.count(), 0);
}
#[tokio::test]
async fn test_inbox_add_then_remove_mixed() {
let hash = CryptoHash::test_hash("1");
let mut view = InboxStateView::new().await;
// Add two bundles.
assert!(view
.add_bundle(make_unskippable_bundle(hash, 0, 1, [1]))
.await
.unwrap());
assert!(view.add_bundle(make_bundle(hash, 1, 0, [2])).await.unwrap());
// Fail to remove non-matching bundle (skippability).
assert_matches!(
view.remove_bundle(&make_bundle(hash, 0, 1, [1])).await,
Err(InboxError::UnexpectedBundle { .. })
);
// Fail to remove non-matching bundle (hash).
assert_matches!(
view.remove_bundle(&make_unskippable_bundle(
CryptoHash::test_hash("2"),
0,
1,
[1]
))
.await,
Err(InboxError::UnexpectedBundle { .. })
);
// Fail to skip unskippable bundle.
assert_matches!(
view.remove_bundle(&make_bundle(hash, 1, 0, [2])).await,
Err(InboxError::UnskippableBundle { bundle })
if bundle == make_unskippable_bundle(hash, 0, 1, [1])
);
assert!(view
.remove_bundle(&make_unskippable_bundle(hash, 0, 1, [1]))
.await
.unwrap());
assert!(view
.remove_bundle(&make_bundle(hash, 1, 0, [2]))
.await
.unwrap());
// Inbox is empty again.
assert_eq!(view.added_bundles.count(), 0);
assert_eq!(view.removed_bundles.count(), 0);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/unit_tests/chain_tests.rs | linera-chain/src/unit_tests/chain_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![allow(clippy::large_futures)]
use std::{
collections::{BTreeMap, BTreeSet},
thread,
};
use assert_matches::assert_matches;
use axum::{routing::get, Router};
use linera_base::{
crypto::{AccountPublicKey, ValidatorPublicKey},
data_types::{
Amount, ApplicationDescription, ApplicationPermissions, Blob, BlockHeight, Bytecode,
ChainDescription, ChainOrigin, Epoch, InitialChainConfig, Timestamp,
},
http,
identifiers::{Account, AccountOwner, ApplicationId, ChainId, ModuleId},
ownership::ChainOwnership,
time::{Duration, Instant},
vm::VmRuntime,
};
use linera_execution::{
committee::{Committee, ValidatorState},
test_utils::{ExpectedCall, MockApplication},
BaseRuntime, ContractRuntime, ExecutionError, ExecutionRuntimeConfig, ExecutionRuntimeContext,
Operation, ResourceControlPolicy, ServiceRuntime, SystemOperation, TestExecutionRuntimeContext,
};
use linera_views::{
context::{Context as _, MemoryContext, ViewContext},
memory::MemoryStore,
views::View,
};
use test_case::test_case;
use crate::{
block::{Block, ConfirmedBlock},
data_types::{BlockExecutionOutcome, ProposedBlock},
test::{make_child_block, make_first_block, BlockTestExt, HttpServer},
ChainError, ChainExecutionContext, ChainStateView,
};
impl ChainStateView<MemoryContext<TestExecutionRuntimeContext>> {
pub async fn new(chain_id: ChainId) -> Self {
let exec_runtime_context =
TestExecutionRuntimeContext::new(chain_id, ExecutionRuntimeConfig::default());
let context = MemoryContext::new_for_testing(exec_runtime_context);
Self::load(context)
.await
.expect("Loading from memory should work")
}
}
struct TestEnvironment {
admin_chain_description: ChainDescription,
created_descriptions: BTreeMap<ChainId, ChainDescription>,
}
impl TestEnvironment {
fn new() -> Self {
let config = InitialChainConfig {
ownership: ChainOwnership::single(AccountPublicKey::test_key(0).into()),
epoch: Epoch::ZERO,
min_active_epoch: Epoch::ZERO,
max_active_epoch: Epoch::ZERO,
balance: Amount::from_tokens(10),
application_permissions: Default::default(),
};
let origin = ChainOrigin::Root(0);
let admin_chain_description = ChainDescription::new(origin, config, Default::default());
let admin_id = admin_chain_description.id();
Self {
admin_chain_description: admin_chain_description.clone(),
created_descriptions: [(admin_id, admin_chain_description)].into_iter().collect(),
}
}
fn admin_id(&self) -> ChainId {
self.admin_chain_description.id()
}
fn description_blobs(&self) -> impl Iterator<Item = Blob> + '_ {
self.created_descriptions
.values()
.map(Blob::new_chain_description)
}
fn make_open_chain_config(&self) -> InitialChainConfig {
self.admin_chain_description.config().clone()
}
fn make_app_description(&self) -> (ApplicationDescription, Blob, Blob) {
let contract = Bytecode::new(b"contract".into());
let service = Bytecode::new(b"service".into());
self.make_app_from_bytecodes(contract, service)
}
fn make_app_from_bytecodes(
&self,
contract: Bytecode,
service: Bytecode,
) -> (ApplicationDescription, Blob, Blob) {
let contract_blob = Blob::new_contract_bytecode(contract.compress());
let service_blob = Blob::new_service_bytecode(service.compress());
let vm_runtime = VmRuntime::Wasm;
let module_id = ModuleId::new(contract_blob.id().hash, service_blob.id().hash, vm_runtime);
(
ApplicationDescription {
module_id,
creator_chain_id: self.admin_id(),
block_height: BlockHeight(2),
application_index: 0,
required_application_ids: vec![],
parameters: vec![],
},
contract_blob,
service_blob,
)
}
fn make_child_chain_description_with_config(
&mut self,
height: u64,
config: InitialChainConfig,
) -> ChainDescription {
let origin = ChainOrigin::Child {
parent: self.admin_id(),
block_height: BlockHeight(height),
chain_index: 0,
};
let description = ChainDescription::new(origin, config, Timestamp::from(0));
self.created_descriptions
.insert(description.id(), description.clone());
description
}
}
fn committee_blob(policy: ResourceControlPolicy) -> Blob {
let committee = Committee::new(
BTreeMap::from([(
ValidatorPublicKey::test_key(1),
ValidatorState {
network_address: ValidatorPublicKey::test_key(1).to_string(),
votes: 1,
account_public_key: AccountPublicKey::test_key(1),
},
)]),
policy,
);
Blob::new_committee(bcs::to_bytes(&committee).expect("serializing a committee should succeed"))
}
#[tokio::test]
async fn test_block_size_limit() -> anyhow::Result<()> {
let mut env = TestEnvironment::new();
let time = Timestamp::from(0);
// The size of the executed valid block below.
let maximum_block_size = 260;
let config = env.make_open_chain_config();
let chain_desc = env.make_child_chain_description_with_config(3, config);
let chain_id = chain_desc.id();
let owner = chain_desc
.config()
.ownership
.all_owners()
.next()
.copied()
.unwrap();
let mut chain = ChainStateView::new(chain_id).await;
let policy = ResourceControlPolicy {
maximum_block_size,
..ResourceControlPolicy::default()
};
chain
.context()
.extra()
.add_blobs([committee_blob(policy)])
.await?;
chain
.context()
.extra()
.add_blobs(env.description_blobs())
.await?;
// Initialize the chain.
chain.initialize_if_needed(time).await.unwrap();
let valid_block = make_first_block(chain_id)
.with_authenticated_owner(Some(owner))
.with_operation(SystemOperation::Transfer {
owner: AccountOwner::CHAIN,
recipient: Account::chain(env.admin_id()),
amount: Amount::ONE,
});
// Any block larger than the valid block is rejected.
let invalid_block = valid_block
.clone()
.with_operation(SystemOperation::Transfer {
owner: AccountOwner::CHAIN,
recipient: Account::chain(env.admin_id()),
amount: Amount::ONE,
});
let result = chain
.execute_block(&invalid_block, time, None, &[], None)
.await;
assert_matches!(
result,
Err(ChainError::ExecutionError(
execution_error,
ChainExecutionContext::Operation(1),
)) if matches!(*execution_error, ExecutionError::BlockTooLarge)
);
// The valid block is accepted...
let outcome = chain
.execute_block(&valid_block, time, None, &[], None)
.await
.unwrap();
let block = Block::new(valid_block, outcome);
// ...because its size is at the allowed limit.
assert_eq!(
bcs::serialized_size(&block).unwrap(),
maximum_block_size as usize
);
Ok(())
}
#[tokio::test]
async fn test_application_permissions() -> anyhow::Result<()> {
let mut env = TestEnvironment::new();
let time = Timestamp::from(0);
// Create a mock application.
let (app_description, contract_blob, service_blob) = env.make_app_description();
let application_id = ApplicationId::from(&app_description);
let application = MockApplication::default();
let (another_app, another_contract, another_service) = env.make_app_from_bytecodes(
Bytecode::new(b"contractB".into()),
Bytecode::new(b"serviceB".into()),
);
let another_app_id = ApplicationId::from(&another_app);
let config = InitialChainConfig {
application_permissions: ApplicationPermissions::new_multiple(vec![
application_id,
another_app_id,
]),
..env.make_open_chain_config()
};
let chain_desc = env.make_child_chain_description_with_config(3, config);
let chain_id = chain_desc.id();
let mut chain = ChainStateView::new(chain_id).await;
let context = chain.context();
let extra = context.extra();
{
let pinned = extra.user_contracts().pin();
pinned.insert(application_id, application.clone().into());
pinned.insert(another_app_id, application.clone().into());
}
extra
.add_blobs([committee_blob(Default::default())])
.await?;
extra.add_blobs(env.description_blobs()).await?;
extra
.add_blobs([
contract_blob,
service_blob,
Blob::new_application_description(&app_description),
])
.await?;
extra
.add_blobs([
another_contract,
another_service,
Blob::new_application_description(&another_app),
])
.await?;
// Initialize the chain, with a chain application.
chain.initialize_if_needed(time).await?;
// An operation that doesn't belong to the app isn't allowed.
let invalid_block = make_first_block(chain_id).with_simple_transfer(chain_id, Amount::ONE);
let result = chain
.execute_block(&invalid_block, time, None, &[], None)
.await;
assert_matches!(result, Err(ChainError::AuthorizedApplications(app_ids))
if app_ids == vec![application_id, another_app_id]
);
// After registering, an app operation can already be used in the first block.
application.expect_call(ExpectedCall::execute_operation(|_, _| Ok(vec![])));
application.expect_call(ExpectedCall::default_finalize());
application.expect_call(ExpectedCall::execute_operation(|_, _| Ok(vec![])));
application.expect_call(ExpectedCall::default_finalize());
let app_operation = Operation::User {
application_id,
bytes: b"foo".to_vec(),
};
let another_app_operation = Operation::User {
application_id: another_app_id,
bytes: b"bar".to_vec(),
};
let valid_block = make_first_block(chain_id)
.with_operation(app_operation.clone())
.with_operation(another_app_operation.clone());
let outcome = chain
.execute_block(&valid_block, time, None, &[], None)
.await?;
let value = ConfirmedBlock::new(outcome.with(valid_block));
chain.apply_confirmed_block(&value, time).await?;
// In the second block, other operations are still not allowed.
let invalid_block = make_child_block(&value.clone())
.with_simple_transfer(chain_id, Amount::ONE)
.with_operation(app_operation.clone());
let result = chain
.execute_block(&invalid_block, time, None, &[], None)
.await;
assert_matches!(result, Err(ChainError::AuthorizedApplications(app_ids))
if app_ids == vec![application_id, another_app_id]
);
// Also, blocks without all authorized applications operation, or incoming message, are forbidden.
let invalid_block = make_child_block(&value).with_operation(another_app_operation.clone());
let result = chain
.execute_block(&invalid_block, time, None, &[], None)
.await;
assert_matches!(result, Err(ChainError::MissingMandatoryApplications(app_ids))
if app_ids == vec![application_id]
);
// But app operations continue to work.
application.expect_call(ExpectedCall::execute_operation(|_, _| Ok(vec![])));
application.expect_call(ExpectedCall::default_finalize());
application.expect_call(ExpectedCall::execute_operation(|_, _| Ok(vec![])));
application.expect_call(ExpectedCall::default_finalize());
let valid_block = make_child_block(&value)
.with_operation(app_operation.clone())
.with_operation(another_app_operation.clone());
let outcome = chain
.execute_block(&valid_block, time, None, &[], None)
.await?;
let value = ConfirmedBlock::new(outcome.with(valid_block));
chain.apply_confirmed_block(&value, time).await?;
Ok(())
}
/// Tests if services can execute as oracles if the total execution time is less than the limit.
#[test_case(&[100]; "single service as oracle call")]
#[test_case(&[50, 50]; "two service as oracle calls")]
#[test_case(&[90, 10]; "long and short service as oracle calls")]
#[test_case(&[33, 33, 33]; "three service as oracle calls")]
#[tokio::test]
async fn test_service_as_oracles(service_oracle_execution_times_ms: &[u64]) -> anyhow::Result<()> {
let maximum_service_oracle_execution_ms = 300;
let service_oracle_call_count = service_oracle_execution_times_ms.len();
let service_oracle_execution_times = service_oracle_execution_times_ms
.iter()
.copied()
.map(Duration::from_millis);
let (application, application_id, mut chain, block, time) =
prepare_test_with_dummy_mock_application(ResourceControlPolicy {
maximum_service_oracle_execution_ms,
..ResourceControlPolicy::default()
})
.await?;
application.expect_call(ExpectedCall::execute_operation(move |runtime, _| {
for _ in 0..service_oracle_call_count {
runtime.query_service(application_id, vec![])?;
}
Ok(vec![])
}));
for service_oracle_execution_time in service_oracle_execution_times {
application.expect_call(ExpectedCall::handle_query(move |_, _| {
thread::sleep(service_oracle_execution_time);
Ok(vec![])
}));
}
application.expect_call(ExpectedCall::default_finalize());
chain.execute_block(&block, time, None, &[], None).await?;
Ok(())
}
/// Tests if execution fails if services executing as oracles exceed the time limit.
#[test_case(&[120]; "single service as oracle call")]
#[test_case(&[60, 60]; "two service as oracle calls")]
#[test_case(&[105, 15]; "long and short service as oracle calls")]
#[test_case(&[50, 50, 50]; "three service as oracle calls")]
#[test_case(&[60, 60, 60]; "first two service as oracle calls exceeds limit")]
#[tokio::test]
async fn test_service_as_oracle_exceeding_time_limit(
service_oracle_execution_times_ms: &[u64],
) -> anyhow::Result<()> {
let maximum_service_oracle_execution_ms = 110;
let service_oracle_call_count = service_oracle_execution_times_ms.len();
let service_oracle_execution_times = service_oracle_execution_times_ms
.iter()
.copied()
.map(Duration::from_millis);
let (application, application_id, mut chain, block, time) =
prepare_test_with_dummy_mock_application(ResourceControlPolicy {
maximum_service_oracle_execution_ms,
..ResourceControlPolicy::default()
})
.await?;
application.expect_call(ExpectedCall::execute_operation(move |runtime, _| {
for _ in 0..service_oracle_call_count {
runtime.query_service(application_id, vec![])?;
}
Ok(vec![])
}));
for service_oracle_execution_time in service_oracle_execution_times {
application.expect_call(ExpectedCall::handle_query(move |_, _| {
thread::sleep(service_oracle_execution_time);
Ok(vec![])
}));
}
application.expect_call(ExpectedCall::default_finalize());
let result = chain.execute_block(&block, time, None, &[], None).await;
let Err(ChainError::ExecutionError(execution_error, ChainExecutionContext::Operation(0))) =
result
else {
panic!("Expected a block execution error, got: {result:#?}");
};
assert_matches!(
*execution_error,
ExecutionError::MaximumServiceOracleExecutionTimeExceeded
);
Ok(())
}
/// Tests if execution fails early if services call `check_execution_time`.
#[test_case(&[1200]; "single service as oracle call")]
#[test_case(&[600, 600]; "two service as oracle calls")]
#[test_case(&[1050, 150]; "long and short service as oracle calls")]
#[test_case(&[500, 500, 500]; "three service as oracle calls")]
#[test_case(&[600, 600, 600]; "first two service as oracle calls exceeds limit")]
#[tokio::test]
async fn test_service_as_oracle_timeout_early_stop(
service_oracle_execution_times_ms: &[u64],
) -> anyhow::Result<()> {
let maximum_service_oracle_execution_ms = 700;
let poll_interval = Duration::from_millis(100);
let maximum_expected_execution_time =
Duration::from_millis(maximum_service_oracle_execution_ms) + 2 * poll_interval;
let service_oracle_call_count = service_oracle_execution_times_ms.len();
let service_oracle_execution_times = service_oracle_execution_times_ms
.iter()
.copied()
.map(Duration::from_millis);
let (application, application_id, mut chain, block, time) =
prepare_test_with_dummy_mock_application(ResourceControlPolicy {
maximum_service_oracle_execution_ms,
..ResourceControlPolicy::default()
})
.await?;
application.expect_call(ExpectedCall::execute_operation(move |runtime, _| {
for _ in 0..service_oracle_call_count {
runtime.query_service(application_id, vec![])?;
}
Ok(vec![])
}));
for service_oracle_execution_time in service_oracle_execution_times {
application.expect_call(ExpectedCall::handle_query(move |runtime, _| {
let execution_time = Instant::now();
while execution_time.elapsed() < service_oracle_execution_time {
runtime.check_execution_time()?;
thread::sleep(poll_interval);
}
Ok(vec![])
}));
}
application.expect_call(ExpectedCall::default_finalize());
let execution_start = Instant::now();
let result = chain.execute_block(&block, time, None, &[], None).await;
let execution_time = execution_start.elapsed();
let Err(ChainError::ExecutionError(execution_error, ChainExecutionContext::Operation(0))) =
result
else {
panic!("Expected a block execution error, got: {result:#?}");
};
assert_matches!(
*execution_error,
ExecutionError::MaximumServiceOracleExecutionTimeExceeded
);
assert!(execution_time <= maximum_expected_execution_time);
Ok(())
}
/// Tests service-as-oracle response size limit.
#[test_case(50, 49 => matches Ok(_); "smaller than limit")]
#[test_case(
50, 51
=> matches Err(ChainError::ExecutionError(execution_error, _))
if matches!(*execution_error, ExecutionError::ServiceOracleResponseTooLarge);
"larger than limit"
)]
#[tokio::test]
async fn test_service_as_oracle_response_size_limit(
limit: u64,
response_size: usize,
) -> Result<BlockExecutionOutcome, ChainError> {
let (application, application_id, mut chain, block, time) =
prepare_test_with_dummy_mock_application(ResourceControlPolicy {
maximum_oracle_response_bytes: limit,
..ResourceControlPolicy::default()
})
.await
.expect("Failed to set up test with mock application");
application.expect_call(ExpectedCall::execute_operation(move |runtime, _| {
runtime.query_service(application_id, vec![])?;
Ok(vec![])
}));
application.expect_call(ExpectedCall::handle_query(move |_runtime, _| {
Ok(vec![0; response_size])
}));
application.expect_call(ExpectedCall::default_finalize());
chain.execute_block(&block, time, None, &[], None).await
}
/// Tests contract HTTP response size limit.
#[test_case(150, 140, 139 => matches Ok(_); "smaller than both limits")]
#[test_case(
150, 140, 141
=> matches Err(ChainError::ExecutionError(execution_error, _))
if matches!(*execution_error, ExecutionError::HttpResponseSizeLimitExceeded { .. });
"larger than http limit"
)]
#[test_case(
140, 150, 142
=> matches Err(ChainError::ExecutionError(execution_error, _))
if matches!(*execution_error, ExecutionError::HttpResponseSizeLimitExceeded { .. });
"larger than oracle limit"
)]
#[test_case(
140, 150, 1000
=> matches Err(ChainError::ExecutionError(execution_error, _))
if matches!(*execution_error, ExecutionError::HttpResponseSizeLimitExceeded { .. });
"larger than both limits"
)]
#[tokio::test]
async fn test_contract_http_response_size_limit(
oracle_limit: u64,
http_limit: u64,
response_size: usize,
) -> Result<BlockExecutionOutcome, ChainError> {
let response_header_size = 84;
let response_body_size = response_size - response_header_size;
let http_server = HttpServer::start(Router::new().route(
"/",
get(move || async move { vec![b'a'; response_body_size] }),
))
.await
.expect("Failed to start test HTTP server");
let (application, _application_id, mut chain, block, time) =
prepare_test_with_dummy_mock_application(ResourceControlPolicy {
maximum_oracle_response_bytes: oracle_limit,
maximum_http_response_bytes: http_limit,
http_request_allow_list: BTreeSet::from_iter([http_server.hostname()]),
..ResourceControlPolicy::default()
})
.await
.expect("Failed to set up test with mock application");
application.expect_call(ExpectedCall::execute_operation(move |runtime, _| {
runtime.perform_http_request(http::Request::get(http_server.url()))?;
Ok(vec![])
}));
application.expect_call(ExpectedCall::default_finalize());
chain.execute_block(&block, time, None, &[], None).await
}
/// Tests service HTTP response size limit.
#[test_case(150, 140, 139 => matches Ok(_); "smaller than both limits")]
#[test_case(140, 150, 142 => matches Ok(_); "larger than oracle limit")]
#[test_case(
150, 140, 141
=> matches Err(ChainError::ExecutionError(execution_error, _))
if matches!(*execution_error, ExecutionError::HttpResponseSizeLimitExceeded { .. });
"larger than http limit"
)]
#[test_case(
140, 150, 1000
=> matches Err(ChainError::ExecutionError(execution_error, _))
if matches!(*execution_error, ExecutionError::HttpResponseSizeLimitExceeded { .. });
"larger than both limits"
)]
#[tokio::test]
async fn test_service_http_response_size_limit(
oracle_limit: u64,
http_limit: u64,
response_size: usize,
) -> Result<BlockExecutionOutcome, ChainError> {
let response_header_size = 84;
let response_body_size = response_size - response_header_size;
let http_server = HttpServer::start(Router::new().route(
"/",
get(move || async move { vec![b'a'; response_body_size] }),
))
.await
.expect("Failed to start test HTTP server");
let (application, application_id, mut chain, block, time) =
prepare_test_with_dummy_mock_application(ResourceControlPolicy {
maximum_oracle_response_bytes: oracle_limit,
maximum_http_response_bytes: http_limit,
http_request_allow_list: BTreeSet::from_iter([http_server.hostname()]),
..ResourceControlPolicy::default()
})
.await
.expect("Failed to set up test with mock application");
application.expect_call(ExpectedCall::execute_operation(move |runtime, _| {
runtime.query_service(application_id, vec![])?;
Ok(vec![])
}));
application.expect_call(ExpectedCall::handle_query(move |runtime, _| {
runtime.perform_http_request(http::Request::get(http_server.url()))?;
Ok(vec![])
}));
application.expect_call(ExpectedCall::default_finalize());
chain.execute_block(&block, time, None, &[], None).await
}
/// Sets up a test with a dummy [`MockApplication`].
///
/// Creates and initializes a [`ChainStateView`] configured with the
/// `maximum_service_oracle_execution_ms` policy. Registers the dummy application on the chain, and
/// creates a block proposal with a dummy operation.
async fn prepare_test_with_dummy_mock_application(
policy: ResourceControlPolicy,
) -> anyhow::Result<(
MockApplication,
ApplicationId,
ChainStateView<ViewContext<TestExecutionRuntimeContext, MemoryStore>>,
ProposedBlock,
Timestamp,
)> {
let mut env = TestEnvironment::new();
let time = Timestamp::from(0);
let config = env.make_open_chain_config();
let committee_blob = committee_blob(policy);
let chain_desc = env.make_child_chain_description_with_config(3, config);
let chain_id = chain_desc.id();
let mut chain = ChainStateView::new(chain_id).await;
chain
.context()
.extra()
.add_blobs([committee_blob.clone()])
.await?;
chain
.context()
.extra()
.add_blobs(env.description_blobs())
.await?;
chain.initialize_if_needed(time).await?;
// Create a mock application.
let (app_description, contract_blob, service_blob) = env.make_app_description();
let application_id = ApplicationId::from(&app_description);
let application = MockApplication::default();
let context = chain.context();
let extra = context.extra();
{
let pinned = extra.user_contracts().pin();
pinned.insert(application_id, application.clone().into());
}
{
let pinned = extra.user_services().pin();
pinned.insert(application_id, application.clone().into());
}
extra
.add_blobs([
committee_blob,
contract_blob,
service_blob,
Blob::new_application_description(&app_description),
])
.await?;
let block = make_first_block(chain_id).with_operation(Operation::User {
application_id,
bytes: vec![],
});
Ok((application, application_id, chain, block, time))
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/test/http_server.rs | linera-chain/src/test/http_server.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A simple HTTP server to use for testing.
use std::{future::IntoFuture, net::Ipv4Addr};
use axum::Router;
use futures::FutureExt as _;
use tokio::{net::TcpListener, sync::oneshot};
/// A handle to a running HTTP server.
///
/// The server is gracefully shutdown when this handle is dropped.
pub struct HttpServer {
port: u16,
_shutdown_sender: oneshot::Sender<()>,
}
impl HttpServer {
/// Spawns a task with an HTTP server serving the routes defined by the [`Router`].
///
/// Returns an [`HttpServer`] handle to keep the server running in the background.
pub async fn start(router: Router) -> anyhow::Result<Self> {
let (shutdown_sender, shutdown_receiver) = oneshot::channel();
let shutdown_signal = shutdown_receiver.map(|_| ());
let listener = TcpListener::bind((Ipv4Addr::from([127, 0, 0, 1]), 0)).await?;
let port = listener.local_addr()?.port();
tokio::spawn(
axum::serve(listener, router)
.with_graceful_shutdown(shutdown_signal)
.into_future(),
);
Ok(HttpServer {
port,
_shutdown_sender: shutdown_sender,
})
}
/// Returns the URL string this HTTP server is listening on.
pub fn url(&self) -> String {
format!("http://{}:{}", self.hostname(), self.port())
}
/// Returns the hostname of this HTTP server.
pub fn hostname(&self) -> String {
"localhost".to_owned()
}
/// Returns the port this HTTP server is listening on.
pub fn port(&self) -> u16 {
self.port
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/test/mod.rs | linera-chain/src/test/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Test utilities
mod http_server;
use linera_base::{
crypto::{AccountPublicKey, Signer, ValidatorPublicKey},
data_types::{Amount, BlockHeight, Epoch, Round, Timestamp},
identifiers::{Account, AccountOwner, ChainId},
};
use linera_execution::{
committee::{Committee, ValidatorState},
Message, MessageKind, Operation, ResourceControlPolicy, SystemOperation,
};
pub use self::http_server::HttpServer;
use crate::{
block::ConfirmedBlock,
data_types::{
BlockProposal, IncomingBundle, PostedMessage, ProposedBlock, SignatureAggregator,
Transaction, Vote,
},
types::{CertificateValue, GenericCertificate},
};
/// Creates a new child of the given block, with the same timestamp.
pub fn make_child_block(parent: &ConfirmedBlock) -> ProposedBlock {
let parent_header = &parent.block().header;
ProposedBlock {
epoch: parent_header.epoch,
chain_id: parent_header.chain_id,
transactions: vec![],
previous_block_hash: Some(parent.hash()),
height: parent_header.height.try_add_one().unwrap(),
authenticated_owner: parent_header.authenticated_owner,
timestamp: parent_header.timestamp,
}
}
/// Creates a block at height 0 for a new chain.
pub fn make_first_block(chain_id: ChainId) -> ProposedBlock {
ProposedBlock {
epoch: Epoch::ZERO,
chain_id,
transactions: vec![],
previous_block_hash: None,
height: BlockHeight::ZERO,
authenticated_owner: None,
timestamp: Timestamp::default(),
}
}
/// A helper trait to simplify constructing blocks for tests.
#[allow(async_fn_in_trait)]
pub trait BlockTestExt: Sized {
/// Returns the block with the given authenticated owner.
fn with_authenticated_owner(self, authenticated_owner: Option<AccountOwner>) -> Self;
/// Returns the block with the given operation appended at the end.
fn with_operation(self, operation: impl Into<Operation>) -> Self;
/// Returns the block with a transfer operation appended at the end.
fn with_transfer(self, owner: AccountOwner, recipient: Account, amount: Amount) -> Self;
/// Returns the block with a simple transfer operation appended at the end.
fn with_simple_transfer(self, chain_id: ChainId, amount: Amount) -> Self;
/// Returns the block with the given message appended at the end.
fn with_incoming_bundle(self, incoming_bundle: IncomingBundle) -> Self;
/// Returns the block with the given messages appended at the end.
fn with_incoming_bundles(
self,
incoming_bundles: impl IntoIterator<Item = IncomingBundle>,
) -> Self;
/// Returns the block with the specified timestamp.
fn with_timestamp(self, timestamp: impl Into<Timestamp>) -> Self;
/// Returns the block with the specified epoch.
fn with_epoch(self, epoch: impl Into<Epoch>) -> Self;
/// Returns the block with the burn operation (transfer to a special address) appended at the end.
fn with_burn(self, amount: Amount) -> Self;
/// Returns a block proposal in the first round in a default ownership configuration
/// (`Round::MultiLeader(0)`) without any hashed certificate values or validated block.
async fn into_first_proposal<S: Signer + ?Sized>(
self,
owner: AccountOwner,
signer: &S,
) -> Result<BlockProposal, S::Error> {
self.into_proposal_with_round(owner, signer, Round::MultiLeader(0))
.await
}
/// Returns a block proposal without any hashed certificate values or validated block.
async fn into_proposal_with_round<S: Signer + ?Sized>(
self,
owner: AccountOwner,
signer: &S,
round: Round,
) -> Result<BlockProposal, S::Error>;
}
impl BlockTestExt for ProposedBlock {
fn with_authenticated_owner(mut self, authenticated_owner: Option<AccountOwner>) -> Self {
self.authenticated_owner = authenticated_owner;
self
}
fn with_operation(mut self, operation: impl Into<Operation>) -> Self {
self.transactions
.push(Transaction::ExecuteOperation(operation.into()));
self
}
fn with_transfer(self, owner: AccountOwner, recipient: Account, amount: Amount) -> Self {
self.with_operation(SystemOperation::Transfer {
owner,
recipient,
amount,
})
}
fn with_simple_transfer(self, chain_id: ChainId, amount: Amount) -> Self {
self.with_transfer(AccountOwner::CHAIN, Account::chain(chain_id), amount)
}
fn with_burn(self, amount: Amount) -> Self {
let recipient = Account::burn_address(self.chain_id);
self.with_operation(SystemOperation::Transfer {
owner: AccountOwner::CHAIN,
recipient,
amount,
})
}
fn with_incoming_bundle(mut self, incoming_bundle: IncomingBundle) -> Self {
self.transactions
.push(Transaction::ReceiveMessages(incoming_bundle));
self
}
fn with_incoming_bundles(
mut self,
incoming_bundles: impl IntoIterator<Item = IncomingBundle>,
) -> Self {
self.transactions.extend(
incoming_bundles
.into_iter()
.map(Transaction::ReceiveMessages),
);
self
}
fn with_timestamp(mut self, timestamp: impl Into<Timestamp>) -> Self {
self.timestamp = timestamp.into();
self
}
fn with_epoch(mut self, epoch: impl Into<Epoch>) -> Self {
self.epoch = epoch.into();
self
}
async fn into_proposal_with_round<S: Signer + ?Sized>(
self,
owner: AccountOwner,
signer: &S,
round: Round,
) -> Result<BlockProposal, S::Error> {
BlockProposal::new_initial(owner, round, self, signer).await
}
}
pub trait VoteTestExt<T: CertificateValue>: Sized {
/// Returns a certificate for a committee consisting only of this validator.
fn into_certificate(self, public_key: ValidatorPublicKey) -> GenericCertificate<T>;
}
impl<T: CertificateValue> VoteTestExt<T> for Vote<T> {
fn into_certificate(self, public_key: ValidatorPublicKey) -> GenericCertificate<T> {
let state = ValidatorState {
network_address: "".to_string(),
votes: 100,
account_public_key: AccountPublicKey::test_key(1),
};
let committee = Committee::new(
vec![(public_key, state)].into_iter().collect(),
ResourceControlPolicy::only_fuel(),
);
SignatureAggregator::new(self.value, self.round, &committee)
.append(public_key, self.signature)
.unwrap()
.unwrap()
}
}
/// Helper trait to simplify constructing messages for tests.
pub trait MessageTestExt: Sized {
fn to_posted(self, index: u32, kind: MessageKind) -> PostedMessage;
}
impl<T: Into<Message>> MessageTestExt for T {
fn to_posted(self, index: u32, kind: MessageKind) -> PostedMessage {
PostedMessage {
authenticated_owner: None,
grant: Amount::ZERO,
refund_grant_to: None,
kind,
index,
message: self.into(),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/data_types/mod.rs | linera-chain/src/data_types/mod.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::{BTreeMap, BTreeSet, HashSet};
use allocative::Allocative;
use async_graphql::SimpleObject;
use custom_debug_derive::Debug;
use linera_base::{
bcs,
crypto::{
AccountSignature, BcsHashable, BcsSignable, CryptoError, CryptoHash, Signer,
ValidatorPublicKey, ValidatorSecretKey, ValidatorSignature,
},
data_types::{Amount, Blob, BlockHeight, Epoch, Event, OracleResponse, Round, Timestamp},
doc_scalar, ensure, hex, hex_debug,
identifiers::{Account, AccountOwner, ApplicationId, BlobId, ChainId, StreamId},
};
use linera_execution::{committee::Committee, Message, MessageKind, Operation, OutgoingMessage};
use serde::{Deserialize, Serialize};
use crate::{
block::{Block, ValidatedBlock},
types::{
CertificateKind, CertificateValue, GenericCertificate, LiteCertificate,
ValidatedBlockCertificate,
},
ChainError,
};
pub mod metadata;
pub use metadata::*;
#[cfg(test)]
#[path = "../unit_tests/data_types_tests.rs"]
mod data_types_tests;
/// A block containing operations to apply on a given chain, as well as the
/// acknowledgment of a number of incoming messages from other chains.
/// * Incoming messages must be selected in the order they were
/// produced by the sending chain, but can be skipped.
/// * When a block is proposed to a validator, all cross-chain messages must have been
/// received ahead of time in the inbox of the chain.
/// * This constraint does not apply to the execution of confirmed blocks.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, SimpleObject, Allocative)]
#[graphql(complex)]
pub struct ProposedBlock {
/// The chain to which this block belongs.
pub chain_id: ChainId,
/// The number identifying the current configuration.
pub epoch: Epoch,
/// The transactions to execute in this block. Each transaction can be either
/// incoming messages or an operation.
#[debug(skip_if = Vec::is_empty)]
#[graphql(skip)]
pub transactions: Vec<Transaction>,
/// The block height.
pub height: BlockHeight,
/// The timestamp when this block was created. This must be later than all messages received
/// in this block, but no later than the current time.
pub timestamp: Timestamp,
/// The user signing for the operations in the block and paying for their execution
/// fees. If set, this must be the `owner` in the block proposal. `None` means that
/// the default account of the chain is used. This value is also used as recipient of
/// potential refunds for the message grants created by the operations.
#[debug(skip_if = Option::is_none)]
pub authenticated_owner: Option<AccountOwner>,
/// Certified hash (see `Certificate` below) of the previous block in the
/// chain, if any.
pub previous_block_hash: Option<CryptoHash>,
}
impl ProposedBlock {
/// Returns all the published blob IDs in this block's operations.
pub fn published_blob_ids(&self) -> BTreeSet<BlobId> {
self.operations()
.flat_map(Operation::published_blob_ids)
.collect()
}
/// Returns whether the block contains only rejected incoming messages, which
/// makes it admissible even on closed chains.
pub fn has_only_rejected_messages(&self) -> bool {
self.transactions.iter().all(|txn| {
matches!(
txn,
Transaction::ReceiveMessages(IncomingBundle {
action: MessageAction::Reject,
..
})
)
})
}
/// Returns an iterator over all incoming [`PostedMessage`]s in this block.
pub fn incoming_messages(&self) -> impl Iterator<Item = &PostedMessage> {
self.incoming_bundles()
.flat_map(|incoming_bundle| &incoming_bundle.bundle.messages)
}
/// Returns the number of incoming messages.
pub fn message_count(&self) -> usize {
self.incoming_bundles()
.map(|im| im.bundle.messages.len())
.sum()
}
/// Returns an iterator over all transactions as references.
pub fn transaction_refs(&self) -> impl Iterator<Item = &Transaction> {
self.transactions.iter()
}
/// Returns all operations in this block.
pub fn operations(&self) -> impl Iterator<Item = &Operation> {
self.transactions.iter().filter_map(|tx| match tx {
Transaction::ExecuteOperation(operation) => Some(operation),
Transaction::ReceiveMessages(_) => None,
})
}
/// Returns all incoming bundles in this block.
pub fn incoming_bundles(&self) -> impl Iterator<Item = &IncomingBundle> {
self.transactions.iter().filter_map(|tx| match tx {
Transaction::ReceiveMessages(bundle) => Some(bundle),
Transaction::ExecuteOperation(_) => None,
})
}
pub fn check_proposal_size(&self, maximum_block_proposal_size: u64) -> Result<(), ChainError> {
let size = bcs::serialized_size(self)?;
ensure!(
size <= usize::try_from(maximum_block_proposal_size).unwrap_or(usize::MAX),
ChainError::BlockProposalTooLarge(size)
);
Ok(())
}
}
#[async_graphql::ComplexObject]
impl ProposedBlock {
/// Metadata about the transactions in this block.
async fn transaction_metadata(&self) -> Vec<TransactionMetadata> {
self.transactions
.iter()
.map(TransactionMetadata::from_transaction)
.collect()
}
}
/// A transaction in a block: incoming messages or an operation.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Allocative)]
pub enum Transaction {
/// Receive a bundle of incoming messages.
ReceiveMessages(IncomingBundle),
/// Execute an operation.
ExecuteOperation(Operation),
}
impl BcsHashable<'_> for Transaction {}
impl Transaction {
pub fn incoming_bundle(&self) -> Option<&IncomingBundle> {
match self {
Transaction::ReceiveMessages(bundle) => Some(bundle),
_ => None,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
#[graphql(name = "Operation")]
pub struct OperationMetadata {
/// The type of operation: "System" or "User"
pub operation_type: String,
/// For user operations, the application ID
pub application_id: Option<ApplicationId>,
/// For user operations, the serialized bytes (as a hex string for GraphQL)
pub user_bytes_hex: Option<String>,
/// For system operations, structured representation
pub system_operation: Option<SystemOperationMetadata>,
}
impl From<&Operation> for OperationMetadata {
fn from(operation: &Operation) -> Self {
match operation {
Operation::System(sys_op) => OperationMetadata {
operation_type: "System".to_string(),
application_id: None,
user_bytes_hex: None,
system_operation: Some(SystemOperationMetadata::from(sys_op.as_ref())),
},
Operation::User {
application_id,
bytes,
} => OperationMetadata {
operation_type: "User".to_string(),
application_id: Some(*application_id),
user_bytes_hex: Some(hex::encode(bytes)),
system_operation: None,
},
}
}
}
/// GraphQL-compatible metadata about a transaction.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct TransactionMetadata {
/// The type of transaction: "ReceiveMessages" or "ExecuteOperation"
pub transaction_type: String,
/// The incoming bundle, if this is a ReceiveMessages transaction
pub incoming_bundle: Option<IncomingBundle>,
/// The operation, if this is an ExecuteOperation transaction
pub operation: Option<OperationMetadata>,
}
impl TransactionMetadata {
pub fn from_transaction(transaction: &Transaction) -> Self {
match transaction {
Transaction::ReceiveMessages(bundle) => TransactionMetadata {
transaction_type: "ReceiveMessages".to_string(),
incoming_bundle: Some(bundle.clone()),
operation: None,
},
Transaction::ExecuteOperation(op) => TransactionMetadata {
transaction_type: "ExecuteOperation".to_string(),
incoming_bundle: None,
operation: Some(OperationMetadata::from(op)),
},
}
}
}
/// A chain ID with a block height.
#[derive(
Debug,
Clone,
Copy,
Eq,
PartialEq,
Ord,
PartialOrd,
Serialize,
Deserialize,
SimpleObject,
Allocative,
)]
pub struct ChainAndHeight {
pub chain_id: ChainId,
pub height: BlockHeight,
}
/// A bundle of cross-chain messages.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, SimpleObject, Allocative)]
pub struct IncomingBundle {
/// The origin of the messages.
pub origin: ChainId,
/// The messages to be delivered to the inbox identified by `origin`.
pub bundle: MessageBundle,
/// What to do with the message.
pub action: MessageAction,
}
impl IncomingBundle {
/// Returns an iterator over all posted messages in this bundle, together with their ID.
pub fn messages(&self) -> impl Iterator<Item = &PostedMessage> {
self.bundle.messages.iter()
}
}
impl BcsHashable<'_> for IncomingBundle {}
/// What to do with a message picked from the inbox.
#[derive(Copy, Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Allocative)]
pub enum MessageAction {
/// Execute the incoming message.
Accept,
/// Do not execute the incoming message.
Reject,
}
/// A set of messages from a single block, for a single destination.
#[derive(Debug, Eq, PartialEq, Clone, Hash, Serialize, Deserialize, SimpleObject, Allocative)]
pub struct MessageBundle {
/// The block height.
pub height: BlockHeight,
/// The block's timestamp.
pub timestamp: Timestamp,
/// The confirmed block certificate hash.
pub certificate_hash: CryptoHash,
/// The index of the transaction in the block that is sending this bundle.
pub transaction_index: u32,
/// The relevant messages.
pub messages: Vec<PostedMessage>,
}
#[derive(Clone, Debug, Serialize, Deserialize, Allocative)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
/// An earlier proposal that is being retried.
pub enum OriginalProposal {
/// A proposal in the fast round.
Fast(AccountSignature),
/// A validated block certificate from an earlier round.
Regular {
certificate: LiteCertificate<'static>,
},
}
/// An authenticated proposal for a new block.
// TODO(#456): the signature of the block owner is currently lost but it would be useful
// to have it for auditing purposes.
#[derive(Clone, Debug, Serialize, Deserialize, Allocative)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct BlockProposal {
pub content: ProposalContent,
pub signature: AccountSignature,
#[debug(skip_if = Option::is_none)]
pub original_proposal: Option<OriginalProposal>,
}
/// A message together with kind, authentication and grant information.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, SimpleObject, Allocative)]
#[graphql(complex)]
pub struct PostedMessage {
/// The user authentication carried by the message, if any.
#[debug(skip_if = Option::is_none)]
pub authenticated_owner: Option<AccountOwner>,
/// A grant to pay for the message execution.
#[debug(skip_if = Amount::is_zero)]
pub grant: Amount,
/// Where to send a refund for the unused part of the grant after execution, if any.
#[debug(skip_if = Option::is_none)]
pub refund_grant_to: Option<Account>,
/// The kind of message being sent.
pub kind: MessageKind,
/// The index of the message in the sending block.
pub index: u32,
/// The message itself.
pub message: Message,
}
pub trait OutgoingMessageExt {
/// Returns the posted message, i.e. the outgoing message without the destination.
fn into_posted(self, index: u32) -> PostedMessage;
}
impl OutgoingMessageExt for OutgoingMessage {
/// Returns the posted message, i.e. the outgoing message without the destination.
fn into_posted(self, index: u32) -> PostedMessage {
let OutgoingMessage {
destination: _,
authenticated_owner,
grant,
refund_grant_to,
kind,
message,
} = self;
PostedMessage {
authenticated_owner,
grant,
refund_grant_to,
kind,
index,
message,
}
}
}
#[async_graphql::ComplexObject]
impl PostedMessage {
/// Structured message metadata for GraphQL.
async fn message_metadata(&self) -> MessageMetadata {
MessageMetadata::from(&self.message)
}
}
/// The execution result of a single operation.
#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Allocative)]
pub struct OperationResult(
#[debug(with = "hex_debug")]
#[serde(with = "serde_bytes")]
pub Vec<u8>,
);
impl BcsHashable<'_> for OperationResult {}
doc_scalar!(
OperationResult,
"The execution result of a single operation."
);
/// The messages and the state hash resulting from a [`ProposedBlock`]'s execution.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, SimpleObject, Allocative)]
#[cfg_attr(with_testing, derive(Default))]
pub struct BlockExecutionOutcome {
/// The list of outgoing messages for each transaction.
pub messages: Vec<Vec<OutgoingMessage>>,
/// The hashes and heights of previous blocks that sent messages to the same recipients.
pub previous_message_blocks: BTreeMap<ChainId, (CryptoHash, BlockHeight)>,
/// The hashes and heights of previous blocks that published events to the same channels.
pub previous_event_blocks: BTreeMap<StreamId, (CryptoHash, BlockHeight)>,
/// The hash of the chain's execution state after this block.
pub state_hash: CryptoHash,
/// The record of oracle responses for each transaction.
pub oracle_responses: Vec<Vec<OracleResponse>>,
/// The list of events produced by each transaction.
pub events: Vec<Vec<Event>>,
/// The list of blobs created by each transaction.
pub blobs: Vec<Vec<Blob>>,
/// The execution result for each operation.
pub operation_results: Vec<OperationResult>,
}
/// The hash and chain ID of a `CertificateValue`.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Allocative)]
pub struct LiteValue {
pub value_hash: CryptoHash,
pub chain_id: ChainId,
pub kind: CertificateKind,
}
impl LiteValue {
pub fn new<T: CertificateValue>(value: &T) -> Self {
LiteValue {
value_hash: value.hash(),
chain_id: value.chain_id(),
kind: T::KIND,
}
}
}
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
struct VoteValue(CryptoHash, Round, CertificateKind);
/// A vote on a statement from a validator.
#[derive(Allocative, Clone, Debug, Serialize, Deserialize)]
#[serde(bound(deserialize = "T: Deserialize<'de>"))]
pub struct Vote<T> {
pub value: T,
pub round: Round,
pub signature: ValidatorSignature,
}
impl<T> Vote<T> {
/// Use signing key to create a signed object.
pub fn new(value: T, round: Round, key_pair: &ValidatorSecretKey) -> Self
where
T: CertificateValue,
{
let hash_and_round = VoteValue(value.hash(), round, T::KIND);
let signature = ValidatorSignature::new(&hash_and_round, key_pair);
Self {
value,
round,
signature,
}
}
/// Returns the vote, with a `LiteValue` instead of the full value.
pub fn lite(&self) -> LiteVote
where
T: CertificateValue,
{
LiteVote {
value: LiteValue::new(&self.value),
round: self.round,
signature: self.signature,
}
}
/// Returns the value this vote is for.
pub fn value(&self) -> &T {
&self.value
}
}
/// A vote on a statement from a validator, represented as a `LiteValue`.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(with_testing, derive(Eq, PartialEq))]
pub struct LiteVote {
pub value: LiteValue,
pub round: Round,
pub signature: ValidatorSignature,
}
impl LiteVote {
/// Returns the full vote, with the value, if it matches.
#[cfg(with_testing)]
pub fn with_value<T: CertificateValue>(self, value: T) -> Option<Vote<T>> {
if self.value.value_hash != value.hash() {
return None;
}
Some(Vote {
value,
round: self.round,
signature: self.signature,
})
}
pub fn kind(&self) -> CertificateKind {
self.value.kind
}
}
impl MessageBundle {
pub fn is_skippable(&self) -> bool {
self.messages.iter().all(PostedMessage::is_skippable)
}
pub fn is_protected(&self) -> bool {
self.messages.iter().any(PostedMessage::is_protected)
}
}
impl PostedMessage {
pub fn is_skippable(&self) -> bool {
match self.kind {
MessageKind::Protected | MessageKind::Tracked => false,
MessageKind::Simple | MessageKind::Bouncing => self.grant == Amount::ZERO,
}
}
pub fn is_protected(&self) -> bool {
matches!(self.kind, MessageKind::Protected)
}
pub fn is_tracked(&self) -> bool {
matches!(self.kind, MessageKind::Tracked)
}
pub fn is_bouncing(&self) -> bool {
matches!(self.kind, MessageKind::Bouncing)
}
}
impl BlockExecutionOutcome {
pub fn with(self, block: ProposedBlock) -> Block {
Block::new(block, self)
}
pub fn oracle_blob_ids(&self) -> HashSet<BlobId> {
let mut required_blob_ids = HashSet::new();
for responses in &self.oracle_responses {
for response in responses {
if let OracleResponse::Blob(blob_id) = response {
required_blob_ids.insert(*blob_id);
}
}
}
required_blob_ids
}
pub fn has_oracle_responses(&self) -> bool {
self.oracle_responses
.iter()
.any(|responses| !responses.is_empty())
}
pub fn iter_created_blobs_ids(&self) -> impl Iterator<Item = BlobId> + '_ {
self.blobs.iter().flatten().map(|blob| blob.id())
}
pub fn created_blobs_ids(&self) -> HashSet<BlobId> {
self.iter_created_blobs_ids().collect()
}
}
/// The data a block proposer signs.
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Allocative)]
pub struct ProposalContent {
/// The proposed block.
pub block: ProposedBlock,
/// The consensus round in which this proposal is made.
pub round: Round,
/// If this is a retry from an earlier round, the execution outcome.
#[debug(skip_if = Option::is_none)]
pub outcome: Option<BlockExecutionOutcome>,
}
impl BlockProposal {
pub async fn new_initial<S: Signer + ?Sized>(
owner: AccountOwner,
round: Round,
block: ProposedBlock,
signer: &S,
) -> Result<Self, S::Error> {
let content = ProposalContent {
round,
block,
outcome: None,
};
let signature = signer.sign(&owner, &CryptoHash::new(&content)).await?;
Ok(Self {
content,
signature,
original_proposal: None,
})
}
pub async fn new_retry_fast<S: Signer + ?Sized>(
owner: AccountOwner,
round: Round,
old_proposal: BlockProposal,
signer: &S,
) -> Result<Self, S::Error> {
let content = ProposalContent {
round,
block: old_proposal.content.block,
outcome: None,
};
let signature = signer.sign(&owner, &CryptoHash::new(&content)).await?;
Ok(Self {
content,
signature,
original_proposal: Some(OriginalProposal::Fast(old_proposal.signature)),
})
}
pub async fn new_retry_regular<S: Signer>(
owner: AccountOwner,
round: Round,
validated_block_certificate: ValidatedBlockCertificate,
signer: &S,
) -> Result<Self, S::Error> {
let certificate = validated_block_certificate.lite_certificate().cloned();
let block = validated_block_certificate.into_inner().into_inner();
let (block, outcome) = block.into_proposal();
let content = ProposalContent {
block,
round,
outcome: Some(outcome),
};
let signature = signer.sign(&owner, &CryptoHash::new(&content)).await?;
Ok(Self {
content,
signature,
original_proposal: Some(OriginalProposal::Regular { certificate }),
})
}
/// Returns the `AccountOwner` that proposed the block.
pub fn owner(&self) -> AccountOwner {
match self.signature {
AccountSignature::Ed25519 { public_key, .. } => public_key.into(),
AccountSignature::Secp256k1 { public_key, .. } => public_key.into(),
AccountSignature::EvmSecp256k1 { address, .. } => AccountOwner::Address20(address),
}
}
pub fn check_signature(&self) -> Result<(), CryptoError> {
self.signature.verify(&self.content)
}
pub fn required_blob_ids(&self) -> impl Iterator<Item = BlobId> + '_ {
self.content.block.published_blob_ids().into_iter().chain(
self.content
.outcome
.iter()
.flat_map(|outcome| outcome.oracle_blob_ids()),
)
}
pub fn expected_blob_ids(&self) -> impl Iterator<Item = BlobId> + '_ {
self.content.block.published_blob_ids().into_iter().chain(
self.content.outcome.iter().flat_map(|outcome| {
outcome
.oracle_blob_ids()
.into_iter()
.chain(outcome.iter_created_blobs_ids())
}),
)
}
/// Checks that the original proposal, if present, matches the new one and has a higher round.
pub fn check_invariants(&self) -> Result<(), &'static str> {
match (&self.original_proposal, &self.content.outcome) {
(None, None) => {}
(Some(OriginalProposal::Fast(_)), None) => ensure!(
self.content.round > Round::Fast,
"The new proposal's round must be greater than the original's"
),
(None, Some(_))
| (Some(OriginalProposal::Fast(_)), Some(_))
| (Some(OriginalProposal::Regular { .. }), None) => {
return Err("Must contain a validation certificate if and only if \
it contains the execution outcome from a previous round");
}
(Some(OriginalProposal::Regular { certificate }), Some(outcome)) => {
ensure!(
self.content.round > certificate.round,
"The new proposal's round must be greater than the original's"
);
let block = outcome.clone().with(self.content.block.clone());
let value = ValidatedBlock::new(block);
ensure!(
certificate.check_value(&value),
"Lite certificate must match the given block and execution outcome"
);
}
}
Ok(())
}
}
impl LiteVote {
/// Uses the signing key to create a signed object.
pub fn new(value: LiteValue, round: Round, secret_key: &ValidatorSecretKey) -> Self {
let hash_and_round = VoteValue(value.value_hash, round, value.kind);
let signature = ValidatorSignature::new(&hash_and_round, secret_key);
Self {
value,
round,
signature,
}
}
/// Verifies the signature in the vote.
pub fn check(&self, public_key: ValidatorPublicKey) -> Result<(), ChainError> {
let hash_and_round = VoteValue(self.value.value_hash, self.round, self.value.kind);
Ok(self.signature.check(&hash_and_round, public_key)?)
}
}
pub struct SignatureAggregator<'a, T: CertificateValue> {
committee: &'a Committee,
weight: u64,
used_validators: HashSet<ValidatorPublicKey>,
partial: GenericCertificate<T>,
}
impl<'a, T: CertificateValue> SignatureAggregator<'a, T> {
/// Starts aggregating signatures for the given value into a certificate.
pub fn new(value: T, round: Round, committee: &'a Committee) -> Self {
Self {
committee,
weight: 0,
used_validators: HashSet::new(),
partial: GenericCertificate::new(value, round, Vec::new()),
}
}
/// Tries to append a signature to a (partial) certificate. Returns Some(certificate) if a
/// quorum was reached. The resulting final certificate is guaranteed to be valid in the sense
/// of `check` below. Returns an error if the signed value cannot be aggregated.
pub fn append(
&mut self,
public_key: ValidatorPublicKey,
signature: ValidatorSignature,
) -> Result<Option<GenericCertificate<T>>, ChainError>
where
T: CertificateValue,
{
let hash_and_round = VoteValue(self.partial.hash(), self.partial.round, T::KIND);
signature.check(&hash_and_round, public_key)?;
// Check that each validator only appears once.
ensure!(
!self.used_validators.contains(&public_key),
ChainError::CertificateValidatorReuse
);
self.used_validators.insert(public_key);
// Update weight.
let voting_rights = self.committee.weight(&public_key);
ensure!(voting_rights > 0, ChainError::InvalidSigner);
self.weight += voting_rights;
// Update certificate.
self.partial.add_signature((public_key, signature));
if self.weight >= self.committee.quorum_threshold() {
self.weight = 0; // Prevent from creating the certificate twice.
Ok(Some(self.partial.clone()))
} else {
Ok(None)
}
}
}
// Checks if the array slice is strictly ordered. That means that if the array
// has duplicates, this will return False, even if the array is sorted
pub(crate) fn is_strictly_ordered(values: &[(ValidatorPublicKey, ValidatorSignature)]) -> bool {
values.windows(2).all(|pair| pair[0].0 < pair[1].0)
}
/// Verifies certificate signatures.
pub(crate) fn check_signatures(
value_hash: CryptoHash,
certificate_kind: CertificateKind,
round: Round,
signatures: &[(ValidatorPublicKey, ValidatorSignature)],
committee: &Committee,
) -> Result<(), ChainError> {
// Check the quorum.
let mut weight = 0;
let mut used_validators = HashSet::new();
for (validator, _) in signatures {
// Check that each validator only appears once.
ensure!(
!used_validators.contains(validator),
ChainError::CertificateValidatorReuse
);
used_validators.insert(*validator);
// Update weight.
let voting_rights = committee.weight(validator);
ensure!(voting_rights > 0, ChainError::InvalidSigner);
weight += voting_rights;
}
ensure!(
weight >= committee.quorum_threshold(),
ChainError::CertificateRequiresQuorum
);
// All that is left is checking signatures!
let hash_and_round = VoteValue(value_hash, round, certificate_kind);
ValidatorSignature::verify_batch(&hash_and_round, signatures.iter())?;
Ok(())
}
impl BcsSignable<'_> for ProposalContent {}
impl BcsSignable<'_> for VoteValue {}
doc_scalar!(
MessageAction,
"Whether an incoming message is accepted or rejected."
);
#[cfg(test)]
mod signing {
use linera_base::{
crypto::{AccountSecretKey, AccountSignature, CryptoHash, EvmSignature, TestString},
data_types::{BlockHeight, Epoch, Round},
identifiers::ChainId,
};
use crate::data_types::{BlockProposal, ProposalContent, ProposedBlock};
#[test]
fn proposal_content_signing() {
use std::str::FromStr;
// Generated in MetaMask.
let secret_key = linera_base::crypto::EvmSecretKey::from_str(
"f77a21701522a03b01c111ad2d2cdaf2b8403b47507ee0aec3c2e52b765d7a66",
)
.unwrap();
let address = secret_key.address();
let signer: AccountSecretKey = AccountSecretKey::EvmSecp256k1(secret_key);
let public_key = signer.public();
let proposed_block = ProposedBlock {
chain_id: ChainId(CryptoHash::new(&TestString::new("ChainId"))),
epoch: Epoch(11),
transactions: vec![],
height: BlockHeight(11),
timestamp: 190000000u64.into(),
authenticated_owner: None,
previous_block_hash: None,
};
let proposal = ProposalContent {
block: proposed_block,
round: Round::SingleLeader(11),
outcome: None,
};
// personal_sign of the `proposal_hash` done via MetaMask.
// Wrap with proper variant so that bytes match (include the enum variant tag).
let signature = EvmSignature::from_str(
"d69d31203f59be441fd02cdf68b2504cbcdd7215905c9b7dc3a7ccbf09afe14550\
3c93b391810ce9edd6ee36b1e817b2d0e9dabdf4a098da8c2f670ef4198e8a1b",
)
.unwrap();
let metamask_signature = AccountSignature::EvmSecp256k1 {
signature,
address: address.0 .0,
};
let signature = signer.sign(&proposal);
assert_eq!(signature, metamask_signature);
assert_eq!(signature.owner(), public_key.into());
let block_proposal = BlockProposal {
content: proposal,
signature,
original_proposal: None,
};
assert_eq!(block_proposal.owner(), public_key.into(),);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-chain/src/data_types/metadata.rs | linera-chain/src/data_types/metadata.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! GraphQL-compatible structured metadata representations for operations and messages.
use async_graphql::SimpleObject;
use linera_base::{
crypto::CryptoHash,
data_types::{Amount, ApplicationPermissions},
hex,
identifiers::{Account, AccountOwner, ApplicationId, ChainId},
ownership::{ChainOwnership, TimeoutConfig},
};
use linera_execution::{system::AdminOperation, Message, SystemMessage, SystemOperation};
use serde::{Deserialize, Serialize};
/// Timeout configuration metadata for GraphQL.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct TimeoutConfigMetadata {
/// The duration of the fast round in milliseconds.
pub fast_round_ms: Option<String>,
/// The duration of the first single-leader and all multi-leader rounds in milliseconds.
pub base_timeout_ms: String,
/// The duration by which the timeout increases after each single-leader round in milliseconds.
pub timeout_increment_ms: String,
/// The age of an incoming tracked or protected message after which validators start
/// transitioning to fallback mode, in milliseconds.
pub fallback_duration_ms: String,
}
impl From<&TimeoutConfig> for TimeoutConfigMetadata {
fn from(config: &TimeoutConfig) -> Self {
TimeoutConfigMetadata {
fast_round_ms: config
.fast_round_duration
.map(|d| (d.as_micros() / 1000).to_string()),
base_timeout_ms: (config.base_timeout.as_micros() / 1000).to_string(),
timeout_increment_ms: (config.timeout_increment.as_micros() / 1000).to_string(),
fallback_duration_ms: (config.fallback_duration.as_micros() / 1000).to_string(),
}
}
}
/// Chain ownership metadata for GraphQL.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct ChainOwnershipMetadata {
/// JSON serialized `ChainOwnership` for full representation.
pub ownership_json: String,
}
impl From<&ChainOwnership> for ChainOwnershipMetadata {
fn from(ownership: &ChainOwnership) -> Self {
ChainOwnershipMetadata {
// Fallback to Debug format should never be needed, as ChainOwnership implements Serialize.
// But we include it as a safety measure for GraphQL responses to always succeed.
ownership_json: serde_json::to_string(ownership)
.unwrap_or_else(|_| format!("{:?}", ownership)),
}
}
}
/// Application permissions metadata for GraphQL.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct ApplicationPermissionsMetadata {
/// JSON serialized `ApplicationPermissions`.
pub permissions_json: String,
}
impl From<&ApplicationPermissions> for ApplicationPermissionsMetadata {
fn from(permissions: &ApplicationPermissions) -> Self {
ApplicationPermissionsMetadata {
// Fallback to Debug format should never be needed, as ApplicationPermissions implements Serialize.
// But we include it as a safety measure for GraphQL responses to always succeed.
permissions_json: serde_json::to_string(permissions)
.unwrap_or_else(|_| format!("{:?}", permissions)),
}
}
}
/// Structured representation of a system operation for GraphQL.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct SystemOperationMetadata {
/// The type of system operation
pub system_operation_type: String,
/// Transfer operation details
pub transfer: Option<TransferOperationMetadata>,
/// Claim operation details
pub claim: Option<ClaimOperationMetadata>,
/// Open chain operation details
pub open_chain: Option<OpenChainOperationMetadata>,
/// Change ownership operation details
pub change_ownership: Option<ChangeOwnershipOperationMetadata>,
/// Change application permissions operation details
pub change_application_permissions: Option<ChangeApplicationPermissionsMetadata>,
/// Admin operation details
pub admin: Option<AdminOperationMetadata>,
/// Create application operation details
pub create_application: Option<CreateApplicationOperationMetadata>,
/// Publish data blob operation details
pub publish_data_blob: Option<PublishDataBlobMetadata>,
/// Verify blob operation details
pub verify_blob: Option<VerifyBlobMetadata>,
/// Publish module operation details
pub publish_module: Option<PublishModuleMetadata>,
/// Epoch operation details (`ProcessNewEpoch`, `ProcessRemovedEpoch`)
pub epoch: Option<i32>,
/// `UpdateStreams` operation details
pub update_streams: Option<Vec<UpdateStreamMetadata>>,
}
impl SystemOperationMetadata {
/// Creates a new metadata with the given operation type and all fields set to None.
fn new(system_operation_type: &str) -> Self {
SystemOperationMetadata {
system_operation_type: system_operation_type.to_string(),
transfer: None,
claim: None,
open_chain: None,
change_ownership: None,
change_application_permissions: None,
admin: None,
create_application: None,
publish_data_blob: None,
verify_blob: None,
publish_module: None,
epoch: None,
update_streams: None,
}
}
}
/// Transfer operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct TransferOperationMetadata {
pub owner: AccountOwner,
pub recipient: Account,
pub amount: Amount,
}
/// Claim operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct ClaimOperationMetadata {
pub owner: AccountOwner,
pub target_id: ChainId,
pub recipient: Account,
pub amount: Amount,
}
/// Open chain operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct OpenChainOperationMetadata {
pub balance: Amount,
pub ownership: ChainOwnershipMetadata,
pub application_permissions: ApplicationPermissionsMetadata,
}
/// Change ownership operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct ChangeOwnershipOperationMetadata {
pub super_owners: Vec<AccountOwner>,
pub owners: Vec<OwnerWithWeight>,
pub first_leader: Option<AccountOwner>,
pub multi_leader_rounds: i32,
pub open_multi_leader_rounds: bool,
pub timeout_config: TimeoutConfigMetadata,
}
/// Owner with weight metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct OwnerWithWeight {
pub owner: AccountOwner,
pub weight: String, // Using String to represent u64 safely in GraphQL
}
/// Change application permissions operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct ChangeApplicationPermissionsMetadata {
pub permissions: ApplicationPermissionsMetadata,
}
/// Admin operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct AdminOperationMetadata {
pub admin_operation_type: String,
pub epoch: Option<i32>,
pub blob_hash: Option<CryptoHash>,
}
/// Create application operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct CreateApplicationOperationMetadata {
pub module_id: String,
pub parameters_hex: String,
pub instantiation_argument_hex: String,
pub required_application_ids: Vec<ApplicationId>,
}
/// Publish data blob operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct PublishDataBlobMetadata {
pub blob_hash: CryptoHash,
}
/// Verify blob operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct VerifyBlobMetadata {
pub blob_id: String,
}
/// Publish module operation metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct PublishModuleMetadata {
pub module_id: String,
}
/// Update stream metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct UpdateStreamMetadata {
pub chain_id: ChainId,
pub stream_id: String,
pub next_index: i32,
}
/// Structured representation of a system message for GraphQL.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct SystemMessageMetadata {
/// The type of system message
pub system_message_type: String,
/// Credit message details
pub credit: Option<CreditMessageMetadata>,
/// Withdraw message details
pub withdraw: Option<WithdrawMessageMetadata>,
}
/// Credit message metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct CreditMessageMetadata {
pub target: AccountOwner,
pub amount: Amount,
pub source: AccountOwner,
}
/// Withdraw message metadata.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct WithdrawMessageMetadata {
pub owner: AccountOwner,
pub amount: Amount,
pub recipient: Account,
}
/// Structured representation of a message for GraphQL.
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, SimpleObject)]
pub struct MessageMetadata {
/// The type of message: "System" or "User"
pub message_type: String,
/// For user messages, the application ID
pub application_id: Option<ApplicationId>,
/// For user messages, the serialized bytes (as a hex string for GraphQL)
pub user_bytes_hex: Option<String>,
/// For system messages, structured representation
pub system_message: Option<SystemMessageMetadata>,
}
impl From<&SystemOperation> for SystemOperationMetadata {
fn from(sys_op: &SystemOperation) -> Self {
match sys_op {
SystemOperation::Transfer {
owner,
recipient,
amount,
} => SystemOperationMetadata {
transfer: Some(TransferOperationMetadata {
owner: *owner,
recipient: *recipient,
amount: *amount,
}),
..SystemOperationMetadata::new("Transfer")
},
SystemOperation::Claim {
owner,
target_id,
recipient,
amount,
} => SystemOperationMetadata {
claim: Some(ClaimOperationMetadata {
owner: *owner,
target_id: *target_id,
recipient: *recipient,
amount: *amount,
}),
..SystemOperationMetadata::new("Claim")
},
SystemOperation::OpenChain(config) => SystemOperationMetadata {
open_chain: Some(OpenChainOperationMetadata {
balance: config.balance,
ownership: ChainOwnershipMetadata::from(&config.ownership),
application_permissions: ApplicationPermissionsMetadata::from(
&config.application_permissions,
),
}),
..SystemOperationMetadata::new("OpenChain")
},
SystemOperation::CloseChain => SystemOperationMetadata::new("CloseChain"),
SystemOperation::ChangeOwnership {
super_owners,
owners,
first_leader,
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config,
} => SystemOperationMetadata {
change_ownership: Some(ChangeOwnershipOperationMetadata {
super_owners: super_owners.clone(),
owners: owners
.iter()
.map(|(owner, weight)| OwnerWithWeight {
owner: *owner,
weight: weight.to_string(),
})
.collect(),
first_leader: *first_leader,
multi_leader_rounds: *multi_leader_rounds as i32,
open_multi_leader_rounds: *open_multi_leader_rounds,
timeout_config: TimeoutConfigMetadata::from(timeout_config),
}),
..SystemOperationMetadata::new("ChangeOwnership")
},
SystemOperation::ChangeApplicationPermissions(permissions) => SystemOperationMetadata {
change_application_permissions: Some(ChangeApplicationPermissionsMetadata {
permissions: ApplicationPermissionsMetadata::from(permissions),
}),
..SystemOperationMetadata::new("ChangeApplicationPermissions")
},
SystemOperation::Admin(admin_op) => SystemOperationMetadata {
admin: Some(AdminOperationMetadata::from(admin_op)),
..SystemOperationMetadata::new("Admin")
},
SystemOperation::CreateApplication {
module_id,
parameters,
instantiation_argument,
required_application_ids,
} => SystemOperationMetadata {
create_application: Some(CreateApplicationOperationMetadata {
module_id: serde_json::to_string(module_id)
.unwrap_or_else(|_| format!("{:?}", module_id)),
parameters_hex: hex::encode(parameters),
instantiation_argument_hex: hex::encode(instantiation_argument),
required_application_ids: required_application_ids.clone(),
}),
..SystemOperationMetadata::new("CreateApplication")
},
SystemOperation::PublishDataBlob { blob_hash } => SystemOperationMetadata {
publish_data_blob: Some(PublishDataBlobMetadata {
blob_hash: *blob_hash,
}),
..SystemOperationMetadata::new("PublishDataBlob")
},
SystemOperation::VerifyBlob { blob_id } => SystemOperationMetadata {
verify_blob: Some(VerifyBlobMetadata {
blob_id: blob_id.to_string(),
}),
..SystemOperationMetadata::new("VerifyBlob")
},
SystemOperation::PublishModule { module_id } => SystemOperationMetadata {
publish_module: Some(PublishModuleMetadata {
module_id: serde_json::to_string(module_id)
.unwrap_or_else(|_| format!("{:?}", module_id)),
}),
..SystemOperationMetadata::new("PublishModule")
},
SystemOperation::ProcessNewEpoch(epoch) => SystemOperationMetadata {
epoch: Some(epoch.0 as i32),
..SystemOperationMetadata::new("ProcessNewEpoch")
},
SystemOperation::ProcessRemovedEpoch(epoch) => SystemOperationMetadata {
epoch: Some(epoch.0 as i32),
..SystemOperationMetadata::new("ProcessRemovedEpoch")
},
SystemOperation::UpdateStreams(streams) => SystemOperationMetadata {
update_streams: Some(
streams
.iter()
.map(|(chain_id, stream_id, next_index)| UpdateStreamMetadata {
chain_id: *chain_id,
stream_id: stream_id.to_string(),
next_index: *next_index as i32,
})
.collect(),
),
..SystemOperationMetadata::new("UpdateStreams")
},
}
}
}
impl From<&AdminOperation> for AdminOperationMetadata {
fn from(admin_op: &AdminOperation) -> Self {
match admin_op {
AdminOperation::PublishCommitteeBlob { blob_hash } => AdminOperationMetadata {
admin_operation_type: "PublishCommitteeBlob".to_string(),
epoch: None,
blob_hash: Some(*blob_hash),
},
AdminOperation::CreateCommittee { epoch, blob_hash } => AdminOperationMetadata {
admin_operation_type: "CreateCommittee".to_string(),
epoch: Some(epoch.0 as i32),
blob_hash: Some(*blob_hash),
},
AdminOperation::RemoveCommittee { epoch } => AdminOperationMetadata {
admin_operation_type: "RemoveCommittee".to_string(),
epoch: Some(epoch.0 as i32),
blob_hash: None,
},
}
}
}
impl From<&Message> for MessageMetadata {
fn from(message: &Message) -> Self {
match message {
Message::System(sys_msg) => MessageMetadata {
message_type: "System".to_string(),
application_id: None,
user_bytes_hex: None,
system_message: Some(SystemMessageMetadata::from(sys_msg)),
},
Message::User {
application_id,
bytes,
} => MessageMetadata {
message_type: "User".to_string(),
application_id: Some(*application_id),
user_bytes_hex: Some(hex::encode(bytes)),
system_message: None,
},
}
}
}
impl From<&SystemMessage> for SystemMessageMetadata {
fn from(sys_msg: &SystemMessage) -> Self {
match sys_msg {
SystemMessage::Credit {
target,
amount,
source,
} => SystemMessageMetadata {
system_message_type: "Credit".to_string(),
credit: Some(CreditMessageMetadata {
target: *target,
amount: *amount,
source: *source,
}),
withdraw: None,
},
SystemMessage::Withdraw {
owner,
amount,
recipient,
} => SystemMessageMetadata {
system_message_type: "Withdraw".to_string(),
credit: None,
withdraw: Some(WithdrawMessageMetadata {
owner: *owner,
amount: *amount,
recipient: *recipient,
}),
},
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/build.rs | linera-witty-macros/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
with_testing: { feature = "test" },
with_wasmer: { feature = "wasmer" },
with_wasmtime: { feature = "wasmtime" },
with_wit_export: {
any(feature = "test", feature = "wasmer", feature = "wasmtime")
},
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/wit_type.rs | linera-witty-macros/src/wit_type.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Derivation of the `WitType` trait.
use heck::ToKebabCase;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use quote::quote;
use syn::{Attribute, Ident, LitStr, MacroDelimiter, Meta, Variant};
use crate::util::FieldsInformation;
#[path = "unit_tests/wit_type.rs"]
mod tests;
/// Returns a [`LitStr`] with the type name to use in the WIT declaration.
///
/// The name is either obtained from a custom `#[wit_name = ""]` attribute, or as the
/// kebab-case version of the Rust type name.
pub fn discover_wit_name(attributes: &[Attribute], rust_name: &Ident) -> LitStr {
let custom_name = attributes.iter().find_map(|attribute| {
let Meta::List(meta) = &attribute.meta else {
return None;
};
let MacroDelimiter::Paren(_) = meta.delimiter else {
return None;
};
if !meta.path.is_ident("witty") {
return None;
}
let mut wit_name = None;
meta.parse_nested_meta(|witty_attribute| {
if witty_attribute.path.is_ident("name") {
if wit_name.is_some() {
abort!(
witty_attribute.path,
"Multiple attributes configuring the WIT type name"
);
}
let value = witty_attribute.value()?;
let name = value.parse::<LitStr>()?;
wit_name = Some(name.clone());
}
Ok(())
})
.unwrap_or_else(|_| {
abort!(
meta,
"Failed to parse WIT type name attribute. \
Expected `#[witty(name = \"custom-wit-type-name\")]`."
);
});
wit_name
});
custom_name
.unwrap_or_else(|| LitStr::new(&rust_name.to_string().to_kebab_case(), rust_name.span()))
}
/// Returns the body of the `WitType` implementation for the Rust `struct` with the specified
/// `fields`.
pub fn derive_for_struct<'input>(
wit_name: LitStr,
fields: impl Into<FieldsInformation<'input>>,
) -> TokenStream {
let fields = fields.into();
let fields_hlist = fields.hlist_type();
let field_wit_names = fields.wit_names();
let field_wit_type_names = fields.wit_type_names();
quote! {
const SIZE: u32 = <#fields_hlist as linera_witty::WitType>::SIZE;
type Layout = <#fields_hlist as linera_witty::WitType>::Layout;
type Dependencies = #fields_hlist;
fn wit_type_name() -> std::borrow::Cow<'static, str> {
#wit_name.into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration = String::from(concat!(" record ", #wit_name, " {\n"));
#(
wit_declaration.push_str(" ");
wit_declaration.push_str(#field_wit_names);
wit_declaration.push_str(": ");
wit_declaration.push_str(&*#field_wit_type_names);
wit_declaration.push_str(",\n");
)*
wit_declaration.push_str(" }\n");
wit_declaration.into()
}
}
}
/// Returns the body of the `WitType` implementation for the Rust `enum` with the specified
/// `variants`.
pub fn derive_for_enum<'variants>(
name: &Ident,
wit_name: LitStr,
variants: impl DoubleEndedIterator<Item = &'variants Variant> + Clone,
) -> TokenStream {
let variant_count = variants.clone().count();
let variant_fields: Vec<_> = variants
.clone()
.map(|variant| FieldsInformation::from(&variant.fields))
.collect();
let variant_hlists: Vec<_> = variant_fields
.iter()
.map(FieldsInformation::hlist_type)
.collect();
let discriminant_type = if variant_count <= u8::MAX.into() {
quote! { u8 }
} else if variant_count <= u16::MAX.into() {
quote! { u16 }
} else if variant_count <= u32::MAX as usize {
quote! { u32 }
} else {
abort!(name, "Too many variants in `enum`");
};
let discriminant_size = quote! { std::mem::size_of::<#discriminant_type>() as u32 };
let variant_sizes = variant_hlists.iter().map(|variant_hlist| {
quote! {
let variant_size =
discriminant_size + padding + <#variant_hlist as linera_witty::WitType>::SIZE;
if variant_size > size {
size = variant_size;
}
}
});
let variant_layouts = variant_hlists
.iter()
.map(|variant_hlist| quote! { <#variant_hlist as linera_witty::WitType>::Layout })
.rev()
.reduce(|current, variant_layout| {
quote! {
<#variant_layout as linera_witty::Merge<#current>>::Output
}
});
let variant_field_types = variant_fields.iter().map(FieldsInformation::types);
let dependencies = variant_field_types.clone().flatten();
let enum_or_variant = if dependencies.clone().count() == 0 {
LitStr::new("enum", name.span())
} else {
LitStr::new("variant", name.span())
};
let variant_wit_names = variants.map(|variant| {
LitStr::new(
&variant.ident.to_string().to_kebab_case(),
variant.ident.span(),
)
});
let variant_wit_payloads = variant_field_types.map(|field_types| {
let mut field_types = field_types.peekable();
let first_field_type = field_types.next();
let has_second_field_type = field_types.peek().is_some();
match (first_field_type, has_second_field_type) {
(None, _) => quote! {},
(Some(only_field_type), false) => quote! {
wit_declaration.push('(');
wit_declaration.push_str(
&<#only_field_type as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push(')');
},
(Some(first_field_type), true) => quote! {
wit_declaration.push_str("(tuple<");
wit_declaration.push_str(
&<#first_field_type as linera_witty::WitType>::wit_type_name(),
);
#(
wit_declaration.push_str(", ");
wit_declaration.push_str(
&<#field_types as linera_witty::WitType>::wit_type_name(),
);
)*
wit_declaration.push_str(">)");
},
}
});
quote! {
const SIZE: u32 = {
let discriminant_size = #discriminant_size;
let mut size = discriminant_size;
let mut variants_alignment = <#variant_layouts as linera_witty::Layout>::ALIGNMENT;
let padding = (-(size as i32) & (variants_alignment as i32 - 1)) as u32;
#(#variant_sizes)*
let end_padding = (-(size as i32) & (variants_alignment as i32 - 1)) as u32;
size + end_padding
};
type Layout = linera_witty::HCons<#discriminant_type, #variant_layouts>;
type Dependencies = linera_witty::HList![#( #dependencies ),*];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
#wit_name.into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration = String::from(
concat!(" ", #enum_or_variant, " ", #wit_name, " {\n"),
);
#(
wit_declaration.push_str(" ");
wit_declaration.push_str(#variant_wit_names);
#variant_wit_payloads
wit_declaration.push_str(",\n");
)*
wit_declaration.push_str(" }\n");
wit_declaration.into()
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/wit_load.rs | linera-witty-macros/src/wit_load.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Derivation of the `WitLoad` trait.
#[path = "unit_tests/wit_load.rs"]
mod tests;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use quote::quote;
use syn::{Ident, LitInt, Variant};
use crate::util::FieldsInformation;
/// Returns the body of the `WitLoad` implementation for the Rust `struct` with the specified
/// `fields`.
pub fn derive_for_struct<'input>(fields: impl Into<FieldsInformation<'input>>) -> TokenStream {
let fields = fields.into();
let fields_hlist_binding = fields.hlist_bindings();
let fields_hlist_type = fields.hlist_type();
let construction = fields.construction();
let fallback_bindings = fields.bindings_for_skipped_fields();
quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let #fields_hlist_binding =
<#fields_hlist_type as linera_witty::WitLoad>::load(memory, location)?;
#fallback_bindings
Ok(Self #construction)
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let #fields_hlist_binding =
<#fields_hlist_type as linera_witty::WitLoad>::lift_from(flat_layout, memory)?;
#fallback_bindings
Ok(Self #construction)
}
}
}
/// Returns the body of the `WitLoad` implementation for the Rust `enum` with the specified
/// `variants`.
pub fn derive_for_enum<'variants>(
name: &Ident,
variants: impl DoubleEndedIterator<Item = &'variants Variant> + Clone,
) -> TokenStream {
let variant_count = variants.clone().count();
let variant_fields = variants
.clone()
.map(|variant| FieldsInformation::from(&variant.fields))
.collect::<Vec<_>>();
let variant_types = variant_fields.iter().map(FieldsInformation::hlist_type);
let variants = variants.zip(&variant_fields).enumerate();
let discriminant_type = if variant_count <= u8::MAX.into() {
quote! { u8 }
} else if variant_count <= u16::MAX.into() {
quote! { u16 }
} else if variant_count <= u32::MAX as usize {
quote! { u32 }
} else {
abort!(name, "Too many variants in `enum`");
};
let align_to_cases = variant_types.fold(quote! {}, |location, variant_type| {
quote! {
#location.after_padding_for::<#variant_type>()
}
});
let load_variants = variants.clone().map(|(index, (variant, fields))| {
let variant_name = &variant.ident;
let index = LitInt::new(&index.to_string(), variant_name.span());
let field_bindings = fields.hlist_bindings();
let fields_type = fields.hlist_type();
let construction = fields.construction();
let fallback_bindings = fields.bindings_for_skipped_fields();
quote! {
#index => {
let #field_bindings =
<#fields_type as linera_witty::WitLoad>::load(memory, location)?;
#fallback_bindings
Ok(#name::#variant_name #construction)
}
}
});
let lift_variants = variants.map(|(index, (variant, fields))| {
let variant_name = &variant.ident;
let index = LitInt::new(&index.to_string(), variant_name.span());
let field_bindings = fields.hlist_bindings();
let fields_type = fields.hlist_type();
let construction = fields.construction();
let fallback_bindings = fields.bindings_for_skipped_fields();
quote! {
#index => {
let #field_bindings = <#fields_type as linera_witty::WitLoad>::lift_from(
linera_witty::JoinFlatLayouts::from_joined(flat_layout),
memory,
)?;
#fallback_bindings
Ok(#name::#variant_name #construction)
}
}
});
quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let discriminant = <#discriminant_type as linera_witty::WitLoad>::load(
memory,
location,
)?;
location = location.after::<#discriminant_type>() #align_to_cases;
match discriminant {
#( #load_variants )*
discriminant => Err(linera_witty::RuntimeError::InvalidVariant {
type_name: ::std::any::type_name::<Self>(),
discriminant: discriminant.into(),
}),
}
}
fn lift_from<Instance>(
linera_witty::hlist_pat![discriminant_flat_type, ...flat_layout]:
<Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let discriminant = <#discriminant_type as linera_witty::WitLoad>::lift_from(
linera_witty::hlist![discriminant_flat_type],
memory,
)?;
match discriminant {
#( #lift_variants )*
discriminant => Err(linera_witty::RuntimeError::InvalidVariant {
type_name: ::std::any::type_name::<Self>(),
discriminant: discriminant.into(),
}),
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/lib.rs | linera-witty-macros/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! # Linera Witty Macros
//!
//! This crate contains the procedural macros used by the `linera-witty` crate.
#![deny(missing_docs)]
mod util;
mod wit_export;
mod wit_import;
mod wit_interface;
mod wit_load;
mod wit_store;
mod wit_type;
use proc_macro::TokenStream;
use proc_macro2::Span;
use proc_macro_error::{abort, proc_macro_error};
use quote::{quote, ToTokens};
#[cfg(with_wit_export)]
use syn::ItemImpl;
use syn::{parse_macro_input, Data, DeriveInput, Ident, ItemTrait};
use self::util::{apply_specialization_attribute, AttributeParameters, Specializations};
/// Derives `WitType` for a Rust type.
///
/// All fields in the type must also implement `WitType`.
#[proc_macro_error]
#[proc_macro_derive(WitType, attributes(witty, witty_specialize_with))]
pub fn derive_wit_type(input: TokenStream) -> TokenStream {
let mut input = parse_macro_input!(input as DeriveInput);
let specializations = apply_specialization_attribute(&mut input);
let wit_name = wit_type::discover_wit_name(&input.attrs, &input.ident);
let body = match &input.data {
Data::Struct(struct_item) => wit_type::derive_for_struct(wit_name, &struct_item.fields),
Data::Enum(enum_item) => {
wit_type::derive_for_enum(&input.ident, wit_name, enum_item.variants.iter())
}
Data::Union(_union_item) => {
abort!(input.ident, "Can't derive `WitType` for `union`s")
}
};
derive_trait(
input,
specializations,
body,
Ident::new("WitType", Span::call_site()),
)
}
/// Derives `WitLoad` for the Rust type.
///
/// All fields in the type must also implement `WitLoad`.
#[proc_macro_error]
#[proc_macro_derive(WitLoad, attributes(witty, witty_specialize_with))]
pub fn derive_wit_load(input: TokenStream) -> TokenStream {
let mut input = parse_macro_input!(input as DeriveInput);
let specializations = apply_specialization_attribute(&mut input);
let body = match &input.data {
Data::Struct(struct_item) => wit_load::derive_for_struct(&struct_item.fields),
Data::Enum(enum_item) => wit_load::derive_for_enum(&input.ident, enum_item.variants.iter()),
Data::Union(_union_item) => {
abort!(input.ident, "Can't derive `WitLoad` for `union`s")
}
};
derive_trait(
input,
specializations,
body,
Ident::new("WitLoad", Span::call_site()),
)
}
/// Derives `WitStore` for the Rust type.
///
/// All fields in the type must also implement `WitStore`.
#[proc_macro_error]
#[proc_macro_derive(WitStore, attributes(witty, witty_specialize_with))]
pub fn derive_wit_store(input: TokenStream) -> TokenStream {
let mut input = parse_macro_input!(input as DeriveInput);
let specializations = apply_specialization_attribute(&mut input);
let body = match &input.data {
Data::Struct(struct_item) => wit_store::derive_for_struct(&struct_item.fields),
Data::Enum(enum_item) => {
wit_store::derive_for_enum(&input.ident, enum_item.variants.iter())
}
Data::Union(_union_item) => {
abort!(input.ident, "Can't derive `WitStore` for `union`s")
}
};
derive_trait(
input,
specializations,
body,
Ident::new("WitStore", Span::call_site()),
)
}
/// Derives a trait named `trait_name` with the specified `body`.
///
/// Contains the common code to extract and apply the type's generics for the trait implementation.
fn derive_trait(
input: DeriveInput,
specializations: Specializations,
body: impl ToTokens,
trait_name: Ident,
) -> TokenStream {
let (generic_parameters, type_generics, where_clause) =
specializations.split_generics_from(&input.generics);
let type_name = &input.ident;
quote! {
impl #generic_parameters #trait_name for #type_name #type_generics #where_clause {
#body
}
}
.into()
}
/// Generates a generic type from a trait.
///
/// The generic type has a type parameter for the Wasm guest instance to use, and allows calling
/// functions that the instance exports through the trait's methods.
#[proc_macro_error]
#[proc_macro_attribute]
pub fn wit_import(attribute: TokenStream, input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemTrait);
let parameters = AttributeParameters::new(attribute);
wit_import::generate(input, parameters).into()
}
/// Registers an `impl` block's functions as callable host functions exported to guest Wasm
/// modules.
///
/// The code generated depends on the enabled feature flags to determine which Wasm runtimes will
/// be supported.
#[cfg(with_wit_export)]
#[proc_macro_error]
#[proc_macro_attribute]
pub fn wit_export(attribute: TokenStream, input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemImpl);
let parameters = AttributeParameters::new(attribute);
wit_export::generate(&input, parameters).into()
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/wit_store.rs | linera-witty-macros/src/wit_store.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Derivation of the `WitStore` trait.
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use quote::quote;
use syn::{Ident, LitInt, Variant};
use crate::util::FieldsInformation;
#[path = "unit_tests/wit_store.rs"]
mod tests;
/// Returns the body of the `WitStore` implementation for the Rust `struct` with the specified
/// `fields`.
pub fn derive_for_struct<'input>(fields: impl Into<FieldsInformation<'input>>) -> TokenStream {
let fields = fields.into();
let pattern = fields.destructuring();
let fields_hlist_value = fields.hlist_value();
quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self #pattern = self;
#fields_hlist_value.store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self #pattern = self;
#fields_hlist_value.lower(memory)
}
}
}
/// Returns the body of the `WitStore` implementation for the Rust `enum` with the specified
/// `variants`.
pub fn derive_for_enum<'variants>(
name: &Ident,
variants: impl DoubleEndedIterator<Item = &'variants Variant> + Clone,
) -> TokenStream {
let variant_count = variants.clone().count();
let variant_fields = variants
.clone()
.map(|variant| FieldsInformation::from(&variant.fields))
.collect::<Vec<_>>();
let discriminant_type = if variant_count <= u8::MAX.into() {
quote! { u8 }
} else if variant_count <= u16::MAX.into() {
quote! { u16 }
} else if variant_count <= u32::MAX as usize {
quote! { u32 }
} else {
abort!(name, "Too many variants in `enum`");
};
let variants = variants
.zip(&variant_fields)
.enumerate()
.map(|(index, (variant, fields))| {
let discriminant = LitInt::new(
&format!("{index}_{discriminant_type}"),
variant.ident.span(),
);
(discriminant, (variant, fields))
});
let align_to_cases = variant_fields.iter().fold(quote! {}, |location, variant| {
let variant_type = variant.hlist_type();
quote! {
#location.after_padding_for::<#variant_type>()
}
});
let store_variants = variants.clone().map(|(discriminant, (variant, fields))| {
let variant_name = &variant.ident;
let pattern = fields.destructuring();
let fields_hlist_value = fields.hlist_value();
quote! {
#name::#variant_name #pattern => {
#discriminant.store(memory, location)?;
location = location.after::<#discriminant_type>() #align_to_cases;
#fields_hlist_value.store(memory, location)
}
}
});
let lower_variants = variants.map(|(discriminant, (variant, fields))| {
let variant_name = &variant.ident;
let pattern = fields.destructuring();
let fields_hlist_value = fields.hlist_value();
quote! {
#name::#variant_name #pattern => {
let variant_flat_layout = #fields_hlist_value.lower(memory)?;
let flat_layout: <Self::Layout as linera_witty::Layout>::Flat =
linera_witty::JoinFlatLayouts::into_joined(
#discriminant.lower(memory)? + variant_flat_layout,
);
Ok(flat_layout)
}
}
});
quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
match self {
#( #store_variants )*
}
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
match self {
#( #lower_variants )*
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/wit_interface.rs | linera-witty-macros/src/wit_interface.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Generation of code to generate WIT snippets for an interface.
use std::iter;
use heck::ToKebabCase;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use quote::quote;
use syn::{FnArg, Ident, LitStr, Pat, PatIdent, PatType, ReturnType, Type};
#[cfg(with_wit_export)]
use super::wit_export;
use super::wit_import;
/// Returns the code generated for implementing `WitInterface` to generate WIT snippets for an
/// interface.
pub fn generate<'input, Functions>(
wit_package: &LitStr,
wit_name: LitStr,
functions: Functions,
) -> TokenStream
where
Functions: IntoIterator,
Functions::IntoIter: Clone,
FunctionInformation<'input>: From<Functions::Item>,
{
let functions = functions.into_iter().map(FunctionInformation::from);
let dependencies = functions
.clone()
.flat_map(|function| function.dependencies().cloned().collect::<Vec<_>>());
let wit_functions = functions.map(|function| function.wit_declaration());
quote! {
type Dependencies = linera_witty::HList![#( #dependencies ),*];
fn wit_package() -> &'static str {
#wit_package
}
fn wit_name() -> &'static str {
#wit_name
}
fn wit_functions() -> Vec<String> {
vec![
#( #wit_functions ),*
]
}
}
}
/// Information from a function relevant to deriving the [`WitInterface`] trait.
pub struct FunctionInformation<'input> {
name: &'input Ident,
parameter_names: Vec<&'input Ident>,
parameter_types: Vec<&'input Type>,
output_type: Option<Type>,
}
impl<'input> FunctionInformation<'input> {
/// Creates a new [`FunctionInformation`] instance from its signature.
pub fn new(
name: &'input Ident,
inputs: impl IntoIterator<Item = &'input FnArg>,
output: ReturnType,
) -> Self {
let (parameter_names, parameter_types) = inputs
.into_iter()
.map(|argument| {
let FnArg::Typed(PatType { pat, ty, .. }) = argument else {
abort!(argument, "`self` is not supported in imported functions");
};
let Pat::Ident(PatIdent { ident, .. }) = pat.as_ref() else {
abort!(
pat,
"Only named parameters are supported in imported functions"
);
};
(ident, ty.as_ref())
})
.unzip();
let output_type = match output {
ReturnType::Default => None,
ReturnType::Type(_arrow, return_type) => Some(*return_type),
};
FunctionInformation {
name,
parameter_names,
parameter_types,
output_type,
}
}
/// Returns the types used in the function signature.
pub fn dependencies(&self) -> impl Iterator<Item = &'_ Type> {
self.parameter_types
.clone()
.into_iter()
.chain(&self.output_type)
}
/// Returns a [`LitStr`] with the kebab-case WIT name of the function.
pub fn wit_name(&self) -> LitStr {
LitStr::new(&self.name.to_string().to_kebab_case(), self.name.span())
}
/// Returns the code to generate a [`String`] with the WIT declaration of the function.
pub fn wit_declaration(&self) -> TokenStream {
let wit_name = self.wit_name();
let parameters = self.parameter_names.iter().zip(&self.parameter_types).map(
|(parameter_name, parameter_type)| {
let parameter_wit_name = LitStr::new(
¶meter_name.to_string().to_kebab_case(),
parameter_name.span(),
);
quote! {
#parameter_wit_name.into(),
": ".into(),
<#parameter_type as linera_witty::WitType>::wit_type_name()
}
},
);
let commas = iter::repeat_n(
Some(quote! { ", ".into(), }),
self.parameter_names.len().saturating_sub(1),
)
.chain(Some(None));
let output = self
.output_type
.as_ref()
.map(|output_type| {
quote! { " -> ".into(), <#output_type as linera_witty::WitType>::wit_type_name() }
})
.into_iter();
quote! {
[
std::borrow::Cow::Borrowed(" "),
#wit_name.into(),
": func(".into(),
#( #parameters, #commas )*
")".into(),
#( #output, )*
";".into(),
]
.as_slice()
.join("")
}
}
}
impl<'input> From<&'_ wit_import::FunctionInformation<'input>> for FunctionInformation<'input> {
fn from(imported_function: &'_ wit_import::FunctionInformation<'input>) -> Self {
let signature = &imported_function.function.sig;
FunctionInformation::new(
&signature.ident,
&signature.inputs,
signature.output.clone(),
)
}
}
#[cfg(with_wit_export)]
impl<'input> From<&'_ wit_export::FunctionInformation<'input>> for FunctionInformation<'input> {
fn from(exported_function: &'_ wit_export::FunctionInformation<'input>) -> Self {
let signature = &exported_function.function.sig;
let inputs = signature
.inputs
.iter()
.skip(if exported_function.is_reentrant { 1 } else { 0 });
let mut output = signature.output.clone();
if exported_function.call_early_return.is_some() {
let ReturnType::Type(_arrow, return_type) = &mut output else {
abort!(output, "Missing `Result` in fallible function return type");
};
let Some(actual_output) = wit_export::ok_type_inside_result(&*return_type).cloned()
else {
abort!(
output,
"Missing `Ok` result type in fallible function return type"
);
};
if is_unit_type(&actual_output) {
output = ReturnType::Default;
} else {
*return_type = Box::new(actual_output);
}
}
FunctionInformation::new(&signature.ident, inputs, output)
}
}
/// Returns `true` if `the_type` is the unit `()` type.
#[cfg(with_wit_export)]
pub fn is_unit_type(the_type: &Type) -> bool {
matches!(the_type, Type::Tuple(tuple) if tuple.elems.is_empty())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/wit_import.rs | linera-witty-macros/src/wit_import.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Generation of code to import functions from a Wasm guest module.
use std::collections::HashSet;
use heck::ToKebabCase;
use proc_macro2::{Span, TokenStream};
use proc_macro_error::abort;
use quote::{format_ident, quote, quote_spanned, ToTokens};
use syn::{spanned::Spanned, FnArg, Ident, ItemTrait, LitStr, ReturnType, TraitItem, TraitItemFn};
use super::wit_interface;
use crate::util::{AttributeParameters, TokensSetItem};
/// Returns the code generated for calling imported Wasm functions.
///
/// The generated code contains a new generic type with the `trait_definition`'s name that allows
/// calling into the functions imported from a guest Wasm instance represented by a generic
/// parameter.
pub fn generate(trait_definition: ItemTrait, parameters: AttributeParameters) -> TokenStream {
WitImportGenerator::new(&trait_definition, parameters).generate()
}
/// A helper type for generation of the importing of Wasm functions.
///
/// Code generating is done in two phases. First the necessary pieces are collected and stored in
/// this type. Then, they are used to generate the final code.
pub struct WitImportGenerator<'input> {
parameters: AttributeParameters,
trait_name: &'input Ident,
namespace: LitStr,
functions: Vec<FunctionInformation<'input>>,
}
/// Pieces of information extracted from a function's definition.
pub(crate) struct FunctionInformation<'input> {
pub(crate) function: &'input TraitItemFn,
parameter_definitions: TokenStream,
parameter_bindings: TokenStream,
return_type: TokenStream,
interface: TokenStream,
instance_constraint: TokenStream,
}
impl<'input> WitImportGenerator<'input> {
/// Collects the pieces necessary for code generation from the inputs.
fn new(trait_definition: &'input ItemTrait, parameters: AttributeParameters) -> Self {
let trait_name = &trait_definition.ident;
let namespace = parameters.namespace(trait_name);
let functions = trait_definition
.items
.iter()
.map(FunctionInformation::from)
.collect::<Vec<_>>();
WitImportGenerator {
trait_name,
parameters,
namespace,
functions,
}
}
/// Consumes the collected pieces to generate the final code.
fn generate(self) -> TokenStream {
let function_slots = self.function_slots();
let slot_initializations = self.slot_initializations();
let imported_functions = self.imported_functions();
let (instance_trait_alias_name, instance_trait_alias) = self.instance_trait_alias();
let trait_name = self.trait_name;
let wit_interface_implementation = wit_interface::generate(
self.parameters.package_name(),
self.parameters.interface_name(trait_name),
&self.functions,
);
quote! {
#[allow(clippy::type_complexity)]
pub struct #trait_name<Instance>
where
Instance: #instance_trait_alias_name,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
instance: Instance,
#( #function_slots ),*
}
impl<Instance> #trait_name<Instance>
where
Instance: #instance_trait_alias_name,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
pub fn new(instance: Instance) -> Self {
#trait_name {
instance,
#( #slot_initializations ),*
}
}
#( #imported_functions )*
}
impl<Instance> linera_witty::wit_generation::WitInterface for #trait_name<Instance>
where
Instance: #instance_trait_alias_name,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
#wit_interface_implementation
}
#instance_trait_alias
}
}
/// Returns the function slots definitions.
///
/// The function slots are `Option` types used to lazily store handles to the functions
/// obtained from a Wasm guest instance.
fn function_slots(&self) -> impl Iterator<Item = TokenStream> + '_ {
self.functions.iter().map(|function| {
let function_name = function.name();
let instance_constraint = &function.instance_constraint;
quote_spanned! { function.span() =>
#function_name: Option<<Instance as #instance_constraint>::Function>
}
})
}
/// Returns the expressions to initialize the function slots.
fn slot_initializations(&self) -> impl Iterator<Item = TokenStream> + '_ {
self.functions.iter().map(|function| {
let function_name = function.name();
quote_spanned! { function.span() =>
#function_name: None
}
})
}
/// Returns the code to import and call each function.
fn imported_functions(&self) -> impl Iterator<Item = TokenStream> + '_ {
self.functions.iter().map(|function| {
let namespace = &self.namespace;
let function_name = function.name();
let function_wit_name = function_name.to_string().to_kebab_case();
let instance = &function.instance_constraint;
let parameters = &function.parameter_definitions;
let parameter_bindings = &function.parameter_bindings;
let return_type = &function.return_type;
let interface = &function.interface;
quote_spanned! { function.span() =>
pub fn #function_name(
&mut self,
#parameters
) -> Result<#return_type, linera_witty::RuntimeError> {
let function = match &self.#function_name {
Some(function) => function,
None => {
self.#function_name = Some(<Instance as #instance>::load_function(
&mut self.instance,
&format!("{}#{}", #namespace, #function_wit_name),
)?);
self.#function_name
.as_ref()
.expect("Function loaded into slot, but the slot remains empty")
}
};
let flat_parameters = #interface::lower_parameters(
linera_witty::hlist![#parameter_bindings],
&mut self.instance.memory()?,
)?;
let flat_results = self.instance.call(function, flat_parameters)?;
#[allow(clippy::let_unit_value)]
let result = #interface::lift_results(flat_results, &self.instance.memory()?)?;
Ok(result)
}
}
})
}
/// Returns a trait alias for all the instance constraints necessary for the generated type.
fn instance_trait_alias(&self) -> (Ident, TokenStream) {
let name = format_ident!("InstanceFor{}", self.trait_name);
let constraints = self.instance_constraints();
let definition = quote! {
pub trait #name : #constraints
where
<<Self as linera_witty::Instance>::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Self>,
{}
impl<AnyInstance> #name for AnyInstance
where
AnyInstance: #constraints,
<AnyInstance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<AnyInstance>,
{}
};
(name, definition)
}
/// Returns the instance constraints necessary for the generated type.
fn instance_constraints(&self) -> TokenStream {
let constraint_set: HashSet<_> = self
.functions
.iter()
.map(|function| TokensSetItem::from(&function.instance_constraint))
.collect();
constraint_set.into_iter().fold(
quote! { linera_witty::InstanceWithMemory },
|list, item| quote! { #list + #item },
)
}
}
impl<'input> FunctionInformation<'input> {
/// Extracts the necessary information from the `function` and stores it in a new
/// [`FunctionInformation`] instance.
pub fn new(function: &'input TraitItemFn) -> Self {
let (parameter_definitions, parameter_bindings, parameter_types) =
Self::parse_parameters(function.sig.inputs.iter());
let return_type = match &function.sig.output {
ReturnType::Default => quote_spanned! { function.sig.output.span() => () },
ReturnType::Type(_, return_type) => return_type.to_token_stream(),
};
let interface = quote_spanned! { function.sig.span() =>
<(linera_witty::HList![#parameter_types], #return_type)
as linera_witty::ImportedFunctionInterface>
};
let instance_constraint = quote_spanned! { function.sig.span() =>
linera_witty::InstanceWithFunction<
#interface::GuestParameters,
#interface::GuestResults,
>
};
FunctionInformation {
function,
parameter_definitions,
parameter_bindings,
return_type,
interface,
instance_constraint,
}
}
/// Parses a function's parameters and returns the pieces constructed from the parameters.
///
/// Returns the parameter definitions (the name and type pairs), the parameter bindings (the
/// names) and the parameter types.
fn parse_parameters(
function_inputs: impl Iterator<Item = &'input FnArg>,
) -> (TokenStream, TokenStream, TokenStream) {
let parameters = function_inputs.map(|input| match input {
FnArg::Typed(parameter) => parameter,
FnArg::Receiver(receiver) => abort!(
receiver.self_token,
"Imported interfaces can not have `self` parameters"
),
});
let mut parameter_definitions = quote! {};
let mut parameter_bindings = quote! {};
let mut parameter_types = quote! {};
for parameter in parameters {
let parameter_binding = ¶meter.pat;
let parameter_type = ¶meter.ty;
parameter_definitions.extend(quote! { #parameter, });
parameter_bindings.extend(quote! { #parameter_binding, });
parameter_types.extend(quote! { #parameter_type, });
}
(parameter_definitions, parameter_bindings, parameter_types)
}
/// Returns the name of the function.
pub fn name(&self) -> &Ident {
&self.function.sig.ident
}
/// Returns the code span of the function.
pub fn span(&self) -> Span {
self.function.span()
}
}
impl<'input> From<&'input TraitItem> for FunctionInformation<'input> {
fn from(item: &'input TraitItem) -> Self {
match item {
TraitItem::Fn(function) => FunctionInformation::new(function),
TraitItem::Const(const_item) => abort!(
const_item.ident,
"Const items are not supported in imported traits"
),
TraitItem::Type(type_item) => abort!(
type_item.ident,
"Type items are not supported in imported traits"
),
TraitItem::Macro(macro_item) => abort!(
macro_item.mac.path,
"Macro items are not supported in imported traits"
),
_ => abort!(item, "Only function items are supported in imported traits"),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/util/fields.rs | linera-witty-macros/src/util/fields.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Helper types to process [`Fields`] from `struct`s and `enum` variants.
use std::{borrow::Cow, ops::Deref};
use heck::ToKebabCase;
use proc_macro2::TokenStream;
use quote::{format_ident, quote};
use syn::{spanned::Spanned, Field, Fields, Ident, LitStr, Meta, MetaList, Type};
/// A helper type with information about a list of [`Fields`].
pub struct FieldsInformation<'input> {
kind: FieldsKind,
fields: Vec<FieldInformation<'input>>,
}
impl<'input> FieldsInformation<'input> {
/// Returns an iterator over the [`FieldInformation`] of the non-skipped fields.
pub fn non_skipped_fields(
&self,
) -> impl Iterator<Item = &FieldInformation<'input>> + Clone + '_ {
self.fields.iter().filter(|field| !field.is_skipped())
}
/// Returns an iterator over the names of the fields.
pub fn names(&self) -> impl Iterator<Item = &Ident> + '_ {
self.fields.iter().map(FieldInformation::name)
}
/// Returns an iterator over the types of the non-skipped fields.
pub fn types(&self) -> impl Iterator<Item = &Type> + Clone + '_ {
self.non_skipped_fields().map(FieldInformation::field_type)
}
/// Returns an iterator over the WIT compatible names of the non-skipped fields.
pub fn wit_names(&self) -> impl Iterator<Item = &LitStr> + '_ {
self.non_skipped_fields().map(FieldInformation::wit_name)
}
/// Returns an iterator over the code to obtain the WIT type names of the non-skipped fields.
pub fn wit_type_names(&self) -> impl Iterator<Item = TokenStream> + '_ {
self.non_skipped_fields()
.map(FieldInformation::wit_type_name)
}
/// Returns the code with a pattern to match a heterogeneous list using the `field_names` as
/// bindings.
pub fn hlist_type(&self) -> TokenStream {
let field_types = self.types();
quote! { linera_witty::HList![#( #field_types ),*] }
}
/// Returns the code with a pattern to match a heterogeneous list using the `field_names` as
/// bindings.
///
/// This function receives `field_names` instead of a `Fields` instance because some fields might
/// not have names, so binding names must be created for them.
pub fn hlist_bindings(&self) -> TokenStream {
let field_names = self.non_skipped_fields().map(FieldInformation::name);
quote! { linera_witty::hlist_pat![#( #field_names ),*] }
}
/// Returns the code that creates a heterogeneous list with the field values.
///
/// Assumes that the bindings were obtained using [`Self::hlist_bindings`] or
/// [`Self::construction`].
pub fn hlist_value(&self) -> TokenStream {
let field_names = self.non_skipped_fields().map(FieldInformation::name);
quote! { linera_witty::hlist![#( #field_names ),*] }
}
/// Returns the code that creates bindings with default values for the skipped fields.
pub fn bindings_for_skipped_fields(&self) -> TokenStream {
self.fields
.iter()
.filter(|field| field.is_skipped())
.map(FieldInformation::name)
.map(|field_name| quote! { let #field_name = Default::default(); })
.collect()
}
/// Returns the code with the body to construct the container of the fields.
///
/// Assumes all the fields have appropriate bindings set up with the names from
/// [`Self::names`].
pub fn construction(&self) -> TokenStream {
let names = self.names();
match self.kind {
FieldsKind::Unit => quote! {},
FieldsKind::Named => quote! { { #( #names ),* } },
FieldsKind::Unnamed => quote! { ( #( #names ),* ) },
}
}
/// Returns the code with the body pattern to destructure the container of the fields.
///
/// Does not include bindings for skipped fields.
pub fn destructuring(&self) -> TokenStream {
match self.kind {
FieldsKind::Unit => quote! {},
FieldsKind::Named => {
let bindings = self.non_skipped_fields().map(FieldInformation::name);
let has_skipped_fields = self.fields.iter().any(FieldInformation::is_skipped);
let ignored_fields = has_skipped_fields.then(|| quote! { .. });
quote! { { #( #bindings, )* #ignored_fields } }
}
FieldsKind::Unnamed => {
let bindings = self.fields.iter().map(|field| {
if field.is_skipped() {
Cow::Owned(Ident::new("_", field.name.span()))
} else {
Cow::Borrowed(field.name())
}
});
quote! { ( #( #bindings ),* ) }
}
}
}
}
impl<'input> From<&'input Fields> for FieldsInformation<'input> {
fn from(fields: &'input Fields) -> Self {
FieldsInformation {
kind: fields.into(),
fields: fields
.iter()
.enumerate()
.map(FieldInformation::from)
.collect(),
}
}
}
/// A helper type with information about a [`Field`].
pub struct FieldInformation<'input> {
field: &'input Field,
name: Cow<'input, Ident>,
wit_name: LitStr,
is_skipped: bool,
}
impl FieldInformation<'_> {
/// Returns the name to use for this field.
pub fn name(&self) -> &Ident {
&self.name
}
/// Returns the type of this field.
pub fn field_type(&self) -> &Type {
&self.field.ty
}
/// Returns the string literal with the WIT compatible name.
pub fn wit_name(&self) -> &LitStr {
&self.wit_name
}
/// Returns the code to obtain the field's WIT type name.
pub fn wit_type_name(&self) -> TokenStream {
let field_type = &self.field.ty;
quote! { <#field_type as linera_witty::WitType>::wit_type_name() }
}
/// Returns `true` if this field was marked to be skipped.
pub fn is_skipped(&self) -> bool {
self.is_skipped
}
}
impl Deref for FieldInformation<'_> {
type Target = Field;
fn deref(&self) -> &Self::Target {
self.field
}
}
impl<'input> From<(usize, &'input Field)> for FieldInformation<'input> {
fn from((index, field): (usize, &'input Field)) -> Self {
let name = field
.ident
.as_ref()
.map_or_else(|| Cow::Owned(format_ident!("field{index}")), Cow::Borrowed);
let wit_name = LitStr::new(
&field
.ident
.as_ref()
.map_or_else(|| format!("inner{index}"), Ident::to_string)
.to_kebab_case(),
field.span(),
);
let is_skipped = field.attrs.iter().any(|attribute| {
matches!(
&attribute.meta,
Meta::List(MetaList { path, tokens, ..})
if path.is_ident("witty") && tokens.to_string() == "skip"
)
});
FieldInformation {
field,
name,
wit_name,
is_skipped,
}
}
}
/// The kind of a [`Fields`] list.
#[derive(Clone, Copy, Debug)]
pub enum FieldsKind {
Unit,
Named,
Unnamed,
}
impl<'input> From<&'input Fields> for FieldsKind {
fn from(fields: &'input Fields) -> Self {
match fields {
Fields::Unit => FieldsKind::Unit,
Fields::Named(_) => FieldsKind::Named,
Fields::Unnamed(_) => FieldsKind::Unnamed,
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/util/mod.rs | linera-witty-macros/src/util/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Helper types and functions shared between different macro implementations.
mod fields;
mod specialization;
use std::hash::{Hash, Hasher};
use heck::ToKebabCase;
use proc_macro2::{Span, TokenStream};
use proc_macro_error::abort;
use quote::ToTokens;
use syn::{
parse::{self, Parse, ParseStream},
punctuated::Punctuated,
DeriveInput, Ident, Lit, LitStr, MetaNameValue, Token,
};
#[cfg(with_wit_export)]
pub use self::specialization::Specialization;
pub use self::{fields::FieldsInformation, specialization::Specializations};
/// Changes the [`DeriveInput`] by replacing some generic type parameters with specialized types.
pub fn apply_specialization_attribute(input: &mut DeriveInput) -> Specializations {
Specializations::prepare_derive_input(input)
}
/// A type representing the parameters for an attribute procedural macro.
pub struct AttributeParameters {
metadata: Punctuated<MetaNameValue, Token![,]>,
}
impl Parse for AttributeParameters {
fn parse(input: ParseStream) -> parse::Result<Self> {
Ok(AttributeParameters {
metadata: Punctuated::parse_terminated(input)?,
})
}
}
impl AttributeParameters {
/// Parses the attribute parameters to the attribute procedural macro.
pub fn new(attribute_parameters: proc_macro::TokenStream) -> Self {
syn::parse(attribute_parameters.clone()).unwrap_or_else(|_| {
abort!(
TokenStream::from(attribute_parameters),
r#"Failed to parse attribute parameters, expected either `root = true` \
or `package = "namespace:package"`"#
)
})
}
/// Returns the string value of a parameter named `name`, if it exists.
pub fn parameter(&self, name: &str) -> Option<&'_ LitStr> {
self.metadata
.iter()
.find(|pair| pair.path.is_ident(name))
.map(|pair| {
let syn::Expr::Lit(syn::ExprLit {
lit: Lit::Str(lit_str),
..
}) = &pair.value
else {
abort!(&pair.value, "Expected a string literal");
};
lit_str
})
}
/// Returns the package name specified through the `package` attribute.
pub fn package_name(&self) -> &'_ LitStr {
self.parameter("package").unwrap_or_else(|| {
abort!(
Span::call_site(),
r#"Missing package name specifier in attribute parameters \
(package = "namespace:package")"#
)
})
}
/// Returns the interface name specified through the `interface` attribute, or inferred from
/// the `type_name`
pub fn interface_name(&self, type_name: &Ident) -> LitStr {
self.parameter("interface").cloned().unwrap_or_else(|| {
LitStr::new(&type_name.to_string().to_kebab_case(), type_name.span())
})
}
/// Returns the namespace to use to prefix function names.
///
/// This is based on the package name and the interface name. The former must be specified
/// using the `package` attribute parameter, while the latter can be specified using the
/// `interface` attribute parameter or inferred from the `type_name`.
pub fn namespace(&self, type_name: &Ident) -> LitStr {
let package = self.package_name();
let interface = self.interface_name(type_name);
LitStr::new(
&format!("{}/{}", package.value(), interface.value()),
interface.span(),
)
}
}
/// A helper type to allow comparing [`TokenStream`] instances, allowing it to be used in a
/// [`HashSet`].
pub struct TokensSetItem<'input> {
string: String,
tokens: &'input TokenStream,
}
impl<'input> From<&'input TokenStream> for TokensSetItem<'input> {
fn from(tokens: &'input TokenStream) -> Self {
TokensSetItem {
string: tokens.to_string(),
tokens,
}
}
}
impl PartialEq for TokensSetItem<'_> {
fn eq(&self, other: &Self) -> bool {
self.string.eq(&other.string)
}
}
impl Eq for TokensSetItem<'_> {}
impl Hash for TokensSetItem<'_> {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.string.hash(state)
}
}
impl ToTokens for TokensSetItem<'_> {
fn to_tokens(&self, stream: &mut TokenStream) {
self.tokens.to_tokens(stream)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/util/specialization.rs | linera-witty-macros/src/util/specialization.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Specialization of types before deriving traits for them.
#[cfg(test)]
#[path = "../unit_tests/specialization.rs"]
mod tests;
use std::{
collections::HashSet,
fmt::{self, Debug, Formatter},
mem,
};
use proc_macro2::{Span, TokenStream};
use proc_macro_error::abort;
use quote::ToTokens;
use syn::{
parse::{self, Parse, ParseStream, Parser},
punctuated::Punctuated,
spanned::Spanned,
AngleBracketedGenericArguments, AssocConst, AssocType, Attribute, Constraint, Data, DataEnum,
DataStruct, DataUnion, DeriveInput, Field, Fields, GenericArgument, GenericParam, Generics,
Ident, MacroDelimiter, Meta, MetaList, Path, PathArguments, PredicateType, QSelf, ReturnType,
Token, Type, TypeArray, TypeBareFn, TypeGroup, TypeImplTrait, TypeParamBound, TypeParen,
TypePath, TypePtr, TypeReference, TypeSlice, TypeTuple, WhereClause, WherePredicate,
};
/// Collected specializations to apply before deriving a trait for a type.
#[derive(Debug)]
pub struct Specializations(Vec<Specialization>);
impl FromIterator<Specialization> for Specializations {
fn from_iter<I>(specializations: I) -> Self
where
I: IntoIterator<Item = Specialization>,
{
Specializations(specializations.into_iter().collect())
}
}
impl Specializations {
/// Changes the [`DeriveInput`] based on the specializations requested through the
/// `witty_specialize_with` attributes.
///
/// The [`DeriveInput`] is changed so that its `where` clause and field types are specialized.
/// Returns the [`Specializations`] instance created from parsing the `witty_specialize_with`
/// attributes from the [`DeriveInput`].
pub fn prepare_derive_input(input: &mut DeriveInput) -> Self {
let this: Self = Self::parse_specialization_attributes(&input.attrs).collect();
for specialization in &this.0 {
specialization.apply_to_derive_input(input);
}
this
}
/// Creates a list of [`Specialization`]s based on the `witty_specialize_with` attributes found
/// in the provided `attributes`.
fn parse_specialization_attributes(
attributes: &[Attribute],
) -> impl Iterator<Item = Specialization> {
let mut specializations = Vec::new();
let abort_with_error = |span: Span| -> ! {
abort!(
span,
"Failed to parse Witty specialization attribute. \
Expected: `#[witty_specialize_with(TypeParam = Type, ...)]`."
);
};
for attribute in attributes {
match &attribute.meta {
Meta::List(MetaList {
path,
delimiter,
tokens,
}) if path.is_ident("witty_specialize_with")
&& matches!(delimiter, MacroDelimiter::Paren(_)) =>
{
let parser = Punctuated::<Specialization, Token![,]>::parse_separated_nonempty;
specializations.push(
parser
.parse2(tokens.clone())
.unwrap_or_else(|_| abort_with_error(attribute.span()))
.into_iter(),
);
}
_ => {}
}
}
specializations.into_iter().flatten()
}
/// Specializes the types in the [`Generics`] representation.
#[cfg(with_wit_export)]
pub fn apply_to_generics(&self, generics: &mut Generics) {
for specialization in &self.0 {
specialization.apply_to_generics(generics);
}
}
/// Specializes the types in the `target_type`, either itself or its type parameters.
#[cfg(with_wit_export)]
pub fn apply_to_type(&self, target_type: &mut Type) {
for specialization in &self.0 {
specialization.change_types_in_type(target_type);
}
}
/// Retrieves the information related to generics from the provided [`Generics`] after
/// applying the specializations from this instance.
///
/// Returns the generic parameters for the `impl` block, the generic arguments for the target
/// type, and the where clause to use.
pub fn split_generics_from<'generics>(
&self,
generics: &'generics Generics,
) -> (
TokenStream,
Option<AngleBracketedGenericArguments>,
Option<&'generics WhereClause>,
) {
let (_original_generic_parameters, original_type_generics, where_clause) =
generics.split_for_impl();
let type_generics = self.specialize_type_generics(original_type_generics);
let generic_parameters = self.clean_generic_parameters(generics.clone());
(
generic_parameters.into_token_stream(),
type_generics,
where_clause,
)
}
/// Specializes the target type's generic parameters.
fn specialize_type_generics(
&self,
type_generics: impl ToTokens,
) -> Option<AngleBracketedGenericArguments> {
let mut generic_type = syn::parse_quote!(TheType #type_generics);
for specialization in &self.0 {
specialization.change_types_in_type(&mut generic_type);
}
match generic_type {
Type::Path(TypePath {
qself: None,
path:
Path {
leading_colon: None,
segments,
},
}) if segments.len() == 1 => {
let segment = segments
.into_iter()
.next()
.expect("Missing custom type's path");
assert_eq!(segment.ident, "TheType");
match segment.arguments {
PathArguments::None => None,
PathArguments::AngleBracketed(arguments) => Some(arguments),
PathArguments::Parenthesized(_) => {
unreachable!("Custom type has unexpected function type parameters")
}
}
}
_ => unreachable!("Parsed custom type literal is incorrect"),
}
}
/// Returns the generic parameters from the [`Generics`] information after applying the
/// specializations from this instance.
fn clean_generic_parameters(&self, mut generics: Generics) -> TokenStream {
let original_generic_types = mem::take(&mut generics.params);
let parameters_to_remove: HashSet<Ident> = self
.0
.iter()
.map(|specialization| specialization.type_parameter.clone())
.collect();
generics
.params
.extend(
original_generic_types
.into_iter()
.filter(|generic_type| match generic_type {
GenericParam::Lifetime(_) | GenericParam::Const(_) => true,
GenericParam::Type(type_parameter) => {
!parameters_to_remove.contains(&type_parameter.ident)
}
}),
);
let (generic_parameters, _incorrect_type_generics, _unaltered_where_clause) =
generics.split_for_impl();
generic_parameters.into_token_stream()
}
}
/// A single specialization of a generic type parameter.
pub struct Specialization {
/// The type parameter to be specialized.
type_parameter: Ident,
/// The type to use as the specialized argument.
specialized_type: Type,
}
impl Parse for Specialization {
fn parse(input: ParseStream) -> parse::Result<Self> {
let type_parameter = input.parse()?;
let _: Token![=] = input.parse()?;
let specialized_type = input.parse()?;
Ok(Specialization {
type_parameter,
specialized_type,
})
}
}
impl Specialization {
/// Creates a new specialization for the `type_parameter`, to specialize it into the
/// `specialized_type`.
#[cfg(with_wit_export)]
pub fn new(type_parameter: Ident, specialized_type: Type) -> Self {
Specialization {
type_parameter,
specialized_type,
}
}
/// Replaces a type parameter in the [`DeriveInput`] with a specialized type.
///
/// Note that the specialization is only done to the `where` clause and the type's fields. The
/// types generic parameters needs to be changed separately (see
/// [`Specializations::specialize_type_generics`].
pub fn apply_to_derive_input(&self, input: &mut DeriveInput) {
self.apply_to_generics(&mut input.generics);
self.change_types_in_fields(&mut input.data);
}
/// Replaces a type parameter in the [`Generics`] representation with a specialized type.
pub fn apply_to_generics(&self, generics: &mut Generics) {
self.remove_from_where_clause(generics.where_clause.as_mut());
self.change_types_in_where_clause(generics.where_clause.as_mut());
}
/// Removes from a [`WhereClause`] all predicates for the [`Self::type_parameter`] that this
/// specialization targets.
fn remove_from_where_clause(&self, maybe_where_clause: Option<&mut WhereClause>) {
if let Some(WhereClause { predicates, .. }) = maybe_where_clause {
let original_predicates = mem::take(predicates);
predicates.extend(
original_predicates
.into_iter()
.filter(|predicate| !self.affects_predicate(predicate)),
);
}
}
/// Returns [`true`] if this [`Specialization`] affects the `predicate`.
fn affects_predicate(&self, predicate: &WherePredicate) -> bool {
let WherePredicate::Type(PredicateType {
bounded_ty: Type::Path(type_path),
..
}) = predicate
else {
return false;
};
let mut type_path = type_path;
while let Some(inner_type) = &type_path.qself {
type_path = match &*inner_type.ty {
Type::Path(path) => path,
_ => return false,
};
}
let Some(segment) = type_path.path.segments.first() else {
return false;
};
segment.ident == self.type_parameter && matches!(segment.arguments, PathArguments::None)
}
/// Replaces the [`Self::type_parameter`] with the [`Self::specialized_type`] inside the
/// predicates of the [`WhereClause`].
fn change_types_in_where_clause(&self, maybe_where_clause: Option<&mut WhereClause>) {
let type_predicates = maybe_where_clause
.map(|where_clause| where_clause.predicates.iter_mut())
.into_iter()
.flatten()
.filter_map(|predicate| match predicate {
WherePredicate::Type(type_predicate) => Some(type_predicate),
_ => None,
});
for predicate in type_predicates {
self.change_types_in_type(&mut predicate.bounded_ty);
self.change_types_in_bounds(predicate.bounds.iter_mut());
}
}
/// Replaces the [`Self::type_parameter`] with the [`Self::specialized_type`] inside the
/// fields of the [`DeriveInput`]'s [`Data`].
fn change_types_in_fields(&self, data: &mut Data) {
let fields: Box<dyn Iterator<Item = &mut Field>> = match data {
Data::Struct(DataStruct {
fields: Fields::Named(fields),
..
})
| Data::Union(DataUnion { fields, .. }) => Box::new(fields.named.iter_mut()),
Data::Struct(DataStruct {
fields: Fields::Unnamed(fields),
..
}) => Box::new(fields.unnamed.iter_mut()),
Data::Enum(DataEnum { variants, .. }) => Box::new(
variants
.iter_mut()
.flat_map(|variant| variant.fields.iter_mut()),
),
Data::Struct(_) => Box::new(None.into_iter()),
};
for Field { ty, .. } in fields {
self.change_types_in_type(ty);
}
}
/// Replaces the [`Self::type_parameter`] with the [`Self::specialized_type`] inside the
/// generic type parameter bounds.
fn change_types_in_bounds<'bound>(
&self,
bounds: impl Iterator<Item = &'bound mut TypeParamBound>,
) {
for bound in bounds {
if let TypeParamBound::Trait(trait_bound) = bound {
self.change_types_in_path(&mut trait_bound.path);
}
}
}
/// Replaces the [`Self::type_parameter`] with the [`Self::specialized_type`] inside the
/// provided [`Type`].
pub fn change_types_in_type(&self, the_type: &mut Type) {
match the_type {
Type::Array(TypeArray { elem, .. })
| Type::Group(TypeGroup { elem, .. })
| Type::Paren(TypeParen { elem, .. })
| Type::Ptr(TypePtr { elem, .. })
| Type::Reference(TypeReference { elem, .. })
| Type::Slice(TypeSlice { elem, .. }) => self.change_types_in_type(elem.as_mut()),
Type::BareFn(TypeBareFn { inputs, output, .. }) => self.change_types_in_function_type(
inputs.iter_mut().map(|bare_fn_arg| &mut bare_fn_arg.ty),
output,
),
Type::ImplTrait(TypeImplTrait { bounds, .. }) => {
self.change_types_in_bounds(bounds.iter_mut())
}
Type::Path(TypePath { qself: None, path }) if path.is_ident(&self.type_parameter) => {
*the_type = self.specialized_type.clone();
}
Type::Path(TypePath { qself, path }) => {
if let Some(QSelf { ty, .. }) = qself {
self.change_types_in_type(ty);
}
self.change_types_in_path(path);
}
Type::Tuple(TypeTuple { elems, .. }) => {
for element in elems {
self.change_types_in_type(element);
}
}
_ => {}
}
}
/// Replaces the [`Self::type_parameter`] with the [`Self::specialized_type`] inside the
/// [`Path`]'s type arguments.
fn change_types_in_path(&self, path: &mut Path) {
for segment in &mut path.segments {
match &mut segment.arguments {
PathArguments::None => {}
PathArguments::AngleBracketed(angle_bracketed) => {
self.change_types_in_angle_bracketed_generic_arguments(angle_bracketed)
}
PathArguments::Parenthesized(function_type) => self.change_types_in_function_type(
function_type.inputs.iter_mut(),
&mut function_type.output,
),
}
}
}
/// Replaces the [`Self::type_parameter`] with the [`Self::specialized_type`] inside the
/// [`AngleBracketedGenericArguments`] of a [`PathSegment`][`syn::PathSegment`].
fn change_types_in_angle_bracketed_generic_arguments(
&self,
arguments: &mut AngleBracketedGenericArguments,
) {
for argument in &mut arguments.args {
match argument {
GenericArgument::Type(the_type) => self.change_types_in_type(the_type),
GenericArgument::AssocType(AssocType { generics, ty, .. }) => {
if let Some(arguments) = generics {
self.change_types_in_angle_bracketed_generic_arguments(arguments);
}
self.change_types_in_type(ty);
}
GenericArgument::AssocConst(AssocConst {
generics: Some(arguments),
..
}) => {
self.change_types_in_angle_bracketed_generic_arguments(arguments);
}
GenericArgument::Constraint(Constraint {
generics, bounds, ..
}) => {
if let Some(arguments) = generics {
self.change_types_in_angle_bracketed_generic_arguments(arguments);
}
self.change_types_in_bounds(bounds.iter_mut());
}
_ => {}
}
}
}
/// Replaces the [`Self::type_parameter`] with the [`Self::specialized_type`] inside a
/// function's input parameter [`Type`]s and output [`ReturnType`].
/// [`Path`]'s type arguments.
fn change_types_in_function_type<'input>(
&self,
inputs: impl Iterator<Item = &'input mut Type>,
output: &mut ReturnType,
) {
for ty in inputs {
self.change_types_in_type(ty);
}
if let ReturnType::Type(_, return_type) = output {
self.change_types_in_type(return_type.as_mut());
}
}
}
impl Debug for Specialization {
fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
formatter
.debug_struct("Specialization")
.field("type_parameter", &self.type_parameter)
.field(
"specialized_type",
&self.specialized_type.to_token_stream().to_string(),
)
.finish()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/unit_tests/wit_type.rs | linera-witty-macros/src/unit_tests/wit_type.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Unit tests for the `WitType` derive macro.
#![cfg(test)]
use proc_macro2::Span;
use quote::quote;
use syn::{parse_quote, Fields, ItemEnum, ItemStruct, LitStr};
use super::{derive_for_enum, derive_for_struct, discover_wit_name};
/// Checks the generated code for the body of the implementation of `WitType` for a unit struct.
#[test]
fn zero_sized_type() {
let input = Fields::Unit;
let output = derive_for_struct(LitStr::new("zero-sized-type", Span::call_site()), &input);
let expected = quote! {
const SIZE: u32 = <linera_witty::HList![] as linera_witty::WitType>::SIZE;
type Layout = <linera_witty::HList![] as linera_witty::WitType>::Layout;
type Dependencies = linera_witty::HList![];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
"zero-sized-type".into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration =
String::from(concat!(" record " , "zero-sized-type" , " {\n"));
wit_declaration.push_str(" }\n");
wit_declaration.into ()
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for a named struct.
#[test]
fn named_struct() {
let input: ItemStruct = parse_quote! {
struct Type {
first: u8,
second: CustomType,
}
};
let wit_name = discover_wit_name(&[], &input.ident);
let output = derive_for_struct(wit_name, &input.fields);
let expected = quote! {
const SIZE: u32 = <linera_witty::HList![u8, CustomType] as linera_witty::WitType>::SIZE;
type Layout = <linera_witty::HList![u8, CustomType] as linera_witty::WitType>::Layout;
type Dependencies = linera_witty::HList![u8, CustomType];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
"type".into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration = String::from(concat!(" record " , "type" , " {\n"));
wit_declaration.push_str(" ");
wit_declaration.push_str("first");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<u8 as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("second");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<CustomType as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" }\n");
wit_declaration.into ()
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for a tuple struct.
#[test]
fn tuple_struct() {
let input: ItemStruct = parse_quote! {
struct Type(String, Vec<CustomType>, i64);
};
let wit_name = discover_wit_name(&[], &input.ident);
let output = derive_for_struct(wit_name, &input.fields);
let expected = quote! {
const SIZE: u32 =
<linera_witty::HList![String, Vec<CustomType>, i64] as linera_witty::WitType>::SIZE;
type Layout =
<linera_witty::HList![String, Vec<CustomType>, i64] as linera_witty::WitType>::Layout;
type Dependencies = linera_witty::HList![String, Vec<CustomType>, i64];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
"type".into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration = String::from(concat!(" record " , "type" , " {\n"));
wit_declaration.push_str(" ");
wit_declaration.push_str("inner0");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<String as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("inner1");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<Vec<CustomType> as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("inner2");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<i64 as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" }\n");
wit_declaration.into ()
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for an enum.
#[test]
fn enum_type() {
let input: ItemEnum = parse_quote! {
enum Enum {
Empty,
Tuple(i8, CustomType),
Struct {
first: (),
second: String,
},
}
};
let wit_name = discover_wit_name(&[], &input.ident);
let output = derive_for_enum(&input.ident, wit_name, input.variants.iter());
let expected = quote! {
const SIZE: u32 = {
let discriminant_size = std::mem::size_of::<u8>() as u32;
let mut size = discriminant_size;
let mut variants_alignment = <
< <linera_witty::HList![] as linera_witty::WitType>::Layout as linera_witty::Merge<
< <linera_witty::HList![i8, CustomType] as linera_witty::WitType>::Layout
as linera_witty::Merge<
<linera_witty::HList![(), String] as linera_witty::WitType>::Layout
>>::Output
>>::Output
as linera_witty::Layout>::ALIGNMENT;
let padding = (-(size as i32) & (variants_alignment as i32 - 1)) as u32;
let variant_size = discriminant_size
+ padding
+ <linera_witty::HList![] as linera_witty::WitType>::SIZE;
if variant_size > size {
size = variant_size;
}
let variant_size = discriminant_size
+ padding
+ <linera_witty::HList![i8, CustomType] as linera_witty::WitType>::SIZE;
if variant_size > size {
size = variant_size;
}
let variant_size = discriminant_size
+ padding
+ <linera_witty::HList![(), String] as linera_witty::WitType>::SIZE;
if variant_size > size {
size = variant_size;
}
let end_padding = (-(size as i32) & (variants_alignment as i32 - 1)) as u32;
size + end_padding
};
type Layout = linera_witty::HCons<u8,
< <linera_witty::HList![] as linera_witty::WitType>::Layout as linera_witty::Merge<
< <linera_witty::HList![i8, CustomType] as linera_witty::WitType>::Layout
as linera_witty::Merge<
<linera_witty::HList![(), String] as linera_witty::WitType>::Layout
>>::Output
>>::Output>;
type Dependencies = linera_witty::HList![i8, CustomType, (), String];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
"enum".into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration = String::from(
concat!(" ", "variant", " ", "enum" , " {\n"),
);
wit_declaration.push_str(" ");
wit_declaration.push_str("empty");
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("tuple");
wit_declaration.push_str("(tuple<");
wit_declaration.push_str(
&<i8 as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push_str(", ");
wit_declaration.push_str(
&<CustomType as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push_str(">)");
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("struct");
wit_declaration.push_str("(tuple<");
wit_declaration.push_str(
&<() as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push_str(", ");
wit_declaration.push_str(
&<String as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push_str(">)");
wit_declaration.push_str(",\n");
wit_declaration.push_str(" }\n");
wit_declaration.into ()
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for a named struct
/// with some ignored fields.
#[test]
fn named_struct_with_skipped_fields() {
let input: ItemStruct = parse_quote! {
struct Type {
#[witty(skip)]
ignored1: u8,
first: u8,
#[witty(skip)]
ignored2: i128,
#[witty(skip)]
ignored3: String,
second: CustomType,
#[witty(skip)]
ignored4: Vec<()>,
}
};
let wit_name = discover_wit_name(&[], &input.ident);
let output = derive_for_struct(wit_name, &input.fields);
let expected = quote! {
const SIZE: u32 = <linera_witty::HList![u8, CustomType] as linera_witty::WitType>::SIZE;
type Layout = <linera_witty::HList![u8, CustomType] as linera_witty::WitType>::Layout;
type Dependencies = linera_witty::HList![u8, CustomType];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
"type".into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration = String::from(concat!(" record " , "type" , " {\n"));
wit_declaration.push_str(" ");
wit_declaration.push_str("first");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<u8 as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("second");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<CustomType as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" }\n");
wit_declaration.into ()
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for a tuple struct
/// with some ignored fields.
#[test]
fn tuple_struct_with_skipped_fields() {
let input: ItemStruct = parse_quote! {
struct Type(
#[witty(skip)]
PhantomData<T>,
String,
Vec<CustomType>,
#[witty(skip)]
bool,
i64,
);
};
let wit_name = discover_wit_name(&[], &input.ident);
let output = derive_for_struct(wit_name, &input.fields);
let expected = quote! {
const SIZE: u32 =
<linera_witty::HList![String, Vec<CustomType>, i64] as linera_witty::WitType>::SIZE;
type Layout =
<linera_witty::HList![String, Vec<CustomType>, i64] as linera_witty::WitType>::Layout;
type Dependencies = linera_witty::HList![String, Vec<CustomType>, i64];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
"type".into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration = String::from(concat!(" record " , "type" , " {\n"));
wit_declaration.push_str(" ");
wit_declaration.push_str("inner1");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<String as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("inner2");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<Vec<CustomType> as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("inner4");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<i64 as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" }\n");
wit_declaration.into ()
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for an enum
/// with some ignored fields.
#[test]
fn enum_type_with_skipped_fields() {
let input: ItemEnum = parse_quote! {
enum Enum {
Empty,
Tuple(i8, CustomType, #[witty(skip)] u128),
Struct {
first: (),
#[witty(skip)]
ignored1: (u8, u16),
#[witty(skip)]
ignored2: String,
second: String,
#[witty(skip)]
ignored3: Option<String>,
},
}
};
let wit_name = discover_wit_name(&[], &input.ident);
let output = derive_for_enum(&input.ident, wit_name, input.variants.iter());
let expected = quote! {
const SIZE: u32 = {
let discriminant_size = std::mem::size_of::<u8>() as u32;
let mut size = discriminant_size;
let mut variants_alignment = <
< <linera_witty::HList![] as linera_witty::WitType>::Layout as linera_witty::Merge<
< <linera_witty::HList![i8, CustomType] as linera_witty::WitType>::Layout
as linera_witty::Merge<
<linera_witty::HList![(), String] as linera_witty::WitType>::Layout
>>::Output
>>::Output
as linera_witty::Layout>::ALIGNMENT;
let padding = (-(size as i32) & (variants_alignment as i32 - 1)) as u32;
let variant_size = discriminant_size
+ padding
+ <linera_witty::HList![] as linera_witty::WitType>::SIZE;
if variant_size > size {
size = variant_size;
}
let variant_size = discriminant_size
+ padding
+ <linera_witty::HList![i8, CustomType] as linera_witty::WitType>::SIZE;
if variant_size > size {
size = variant_size;
}
let variant_size = discriminant_size
+ padding
+ <linera_witty::HList![(), String] as linera_witty::WitType>::SIZE;
if variant_size > size {
size = variant_size;
}
let end_padding = (-(size as i32) & (variants_alignment as i32 - 1)) as u32;
size + end_padding
};
type Layout = linera_witty::HCons<u8,
< <linera_witty::HList![] as linera_witty::WitType>::Layout as linera_witty::Merge<
< <linera_witty::HList![i8, CustomType] as linera_witty::WitType>::Layout
as linera_witty::Merge<
<linera_witty::HList![(), String] as linera_witty::WitType>::Layout
>>::Output
>>::Output>;
type Dependencies = linera_witty::HList![i8, CustomType, (), String];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
"enum".into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration = String::from(
concat!(" ", "variant", " ", "enum" , " {\n"),
);
wit_declaration.push_str(" ");
wit_declaration.push_str("empty");
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("tuple");
wit_declaration.push_str("(tuple<");
wit_declaration.push_str(
&<i8 as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push_str(", ");
wit_declaration.push_str(
&<CustomType as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push_str(">)");
wit_declaration.push_str(",\n");
wit_declaration.push_str(" ");
wit_declaration.push_str("struct");
wit_declaration.push_str("(tuple<");
wit_declaration.push_str(
&<() as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push_str(", ");
wit_declaration.push_str(
&<String as linera_witty::WitType>::wit_type_name(),
);
wit_declaration.push_str(">)");
wit_declaration.push_str(",\n");
wit_declaration.push_str(" }\n");
wit_declaration.into ()
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for a struct
/// with a custom WIT type name.
#[test]
fn struct_with_a_custom_wit_name() {
let input: ItemStruct = parse_quote! {
#[witty(name = "renamed-type")]
struct Type(i16);
};
let wit_name = discover_wit_name(&input.attrs, &input.ident);
let output = derive_for_struct(wit_name, &input.fields);
let expected = quote! {
const SIZE: u32 = <linera_witty::HList![i16] as linera_witty::WitType>::SIZE;
type Layout = <linera_witty::HList![i16] as linera_witty::WitType>::Layout;
type Dependencies = linera_witty::HList![i16];
fn wit_type_name() -> std::borrow::Cow<'static, str> {
"renamed-type".into()
}
fn wit_type_declaration() -> std::borrow::Cow<'static, str> {
let mut wit_declaration =
String::from(concat!(" record " , "renamed-type" , " {\n"));
wit_declaration.push_str(" ");
wit_declaration.push_str("inner0");
wit_declaration.push_str(": ");
wit_declaration.push_str(&*<i16 as linera_witty::WitType>::wit_type_name());
wit_declaration.push_str(",\n");
wit_declaration.push_str(" }\n");
wit_declaration.into ()
}
};
assert_eq!(output.to_string(), expected.to_string());
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/unit_tests/wit_load.rs | linera-witty-macros/src/unit_tests/wit_load.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Unit tests for the `WitLoad` derive macro.
#![cfg(test)]
use quote::quote;
use syn::{parse_quote, Fields, ItemEnum, ItemStruct};
use super::{derive_for_enum, derive_for_struct};
/// Checks the generated code for the body of the implementation of `WitLoad` for a unit struct.
#[test]
fn zero_sized_type() {
let input = Fields::Unit;
let output = derive_for_struct(&input);
let expected = quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![] =
<linera_witty::HList![] as linera_witty::WitLoad>::load(memory, location)?;
Ok(Self)
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![] =
<linera_witty::HList![] as linera_witty::WitLoad>::lift_from(flat_layout, memory)?;
Ok(Self)
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitLoad` for a named struct.
#[test]
fn named_struct() {
let input: ItemStruct = parse_quote! {
struct Type {
first: u8,
second: CustomType,
}
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![first, second] =
<linera_witty::HList![u8, CustomType] as linera_witty::WitLoad>::load(
memory,
location
)?;
Ok(Self { first, second })
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![first, second] =
<linera_witty::HList![u8, CustomType] as linera_witty::WitLoad>::lift_from(
flat_layout,
memory
)?;
Ok(Self { first, second })
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitLoad` for a tuple struct.
#[test]
fn tuple_struct() {
let input: ItemStruct = parse_quote! {
struct Type(String, Vec<CustomType>, i64);
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![field0, field1, field2] = <linera_witty::HList![
String,
Vec<CustomType>,
i64
] as linera_witty::WitLoad>::load(memory, location)?;
Ok(Self(field0, field1, field2))
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![field0, field1, field2] = <linera_witty::HList![
String,
Vec<CustomType>,
i64
] as linera_witty::WitLoad>::lift_from(flat_layout, memory)?;
Ok(Self(field0, field1, field2))
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for an enum.
#[test]
fn enum_type() {
let input: ItemEnum = parse_quote! {
enum Enum {
Empty,
Tuple(i8, CustomType),
Struct {
first: (),
second: String,
},
}
};
let output = derive_for_enum(&input.ident, input.variants.iter());
let expected = quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let discriminant = <u8 as linera_witty::WitLoad>::load(memory, location,)?;
location = location
.after::<u8>()
.after_padding_for::<linera_witty::HList![]>()
.after_padding_for::<linera_witty::HList![i8, CustomType]>()
.after_padding_for::<linera_witty::HList![(), String]>();
match discriminant {
0 => {
let linera_witty::hlist_pat![] =
<linera_witty::HList![] as linera_witty::WitLoad>::load(memory, location)?;
Ok(Enum::Empty)
}
1 => {
let linera_witty::hlist_pat![field0, field1] =
<linera_witty::HList![i8, CustomType] as linera_witty::WitLoad>::load(
memory,
location
)?;
Ok(Enum::Tuple(field0, field1))
}
2 => {
let linera_witty::hlist_pat![first, second] =
<linera_witty::HList![(), String] as linera_witty::WitLoad>::load(
memory,
location
)?;
Ok(Enum::Struct { first, second })
}
discriminant => Err(linera_witty::RuntimeError::InvalidVariant {
type_name: ::std::any::type_name::<Self>(),
discriminant: discriminant.into(),
}),
}
}
fn lift_from<Instance>(
linera_witty::hlist_pat![discriminant_flat_type, ...flat_layout]:
<Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let discriminant = <u8 as linera_witty::WitLoad>::lift_from(
linera_witty::hlist![discriminant_flat_type],
memory,
)?;
match discriminant {
0 => {
let linera_witty::hlist_pat![] =
<linera_witty::HList![] as linera_witty::WitLoad>::lift_from(
linera_witty::JoinFlatLayouts::from_joined(flat_layout),
memory,
)?;
Ok(Enum::Empty)
}
1 => {
let linera_witty::hlist_pat![field0, field1] =
<linera_witty::HList![i8, CustomType] as linera_witty::WitLoad>::lift_from(
linera_witty::JoinFlatLayouts::from_joined(flat_layout),
memory,
)?;
Ok(Enum::Tuple(field0, field1))
}
2 => {
let linera_witty::hlist_pat![first, second] =
<linera_witty::HList![(), String] as linera_witty::WitLoad>::lift_from(
linera_witty::JoinFlatLayouts::from_joined(flat_layout),
memory,
)?;
Ok(Enum::Struct { first, second })
}
discriminant => Err(linera_witty::RuntimeError::InvalidVariant {
type_name: ::std::any::type_name::<Self>(),
discriminant: discriminant.into(),
}),
}
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitLoad` for a named struct
/// with some ignored fields.
#[test]
fn named_struct_with_skipped_fields() {
let input: ItemStruct = parse_quote! {
struct Type {
#[witty(skip)]
ignored1: u8,
first: u8,
#[witty(skip)]
ignored2: i128,
#[witty(skip)]
ignored3: String,
second: CustomType,
#[witty(skip)]
ignored4: Vec<()>,
}
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![first, second] =
<linera_witty::HList![u8, CustomType] as linera_witty::WitLoad>::load(
memory,
location
)?;
let ignored1 = Default::default();
let ignored2 = Default::default();
let ignored3 = Default::default();
let ignored4 = Default::default();
Ok(Self {
ignored1,
first,
ignored2,
ignored3,
second,
ignored4
})
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![first, second] =
<linera_witty::HList![u8, CustomType] as linera_witty::WitLoad>::lift_from(
flat_layout,
memory
)?;
let ignored1 = Default::default();
let ignored2 = Default::default();
let ignored3 = Default::default();
let ignored4 = Default::default();
Ok(Self {
ignored1,
first,
ignored2,
ignored3,
second,
ignored4
})
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitLoad` for a tuple struct
/// with some ignored fields.
#[test]
fn tuple_struct_with_skipped_fields() {
let input: ItemStruct = parse_quote! {
struct Type(
#[witty(skip)]
PhantomData<T>,
String,
Vec<CustomType>,
#[witty(skip)]
bool,
i64,
);
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![field1, field2, field4] = <linera_witty::HList![
String,
Vec<CustomType>,
i64
] as linera_witty::WitLoad>::load(memory, location)?;
let field0 = Default::default();
let field3 = Default::default();
Ok(Self(field0, field1, field2, field3, field4))
}
fn lift_from<Instance>(
flat_layout: <Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let linera_witty::hlist_pat![field1, field2, field4] = <linera_witty::HList![
String,
Vec<CustomType>,
i64
] as linera_witty::WitLoad>::lift_from(flat_layout, memory)?;
let field0 = Default::default();
let field3 = Default::default();
Ok(Self(field0, field1, field2, field3, field4))
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitType` for an enum with some
/// ignored fields.
#[test]
fn enum_type_with_skipped_fields() {
let input: ItemEnum = parse_quote! {
enum Enum {
Empty,
Tuple(i8, CustomType, #[witty(skip)] u128),
Struct {
first: (),
#[witty(skip)]
ignored1: (u8, u16),
#[witty(skip)]
ignored2: String,
second: String,
#[witty(skip)]
ignored3: Option<String>,
},
}
};
let output = derive_for_enum(&input.ident, input.variants.iter());
let expected = quote! {
fn load<Instance>(
memory: &linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let discriminant = <u8 as linera_witty::WitLoad>::load(memory, location,)?;
location = location
.after::<u8>()
.after_padding_for::<linera_witty::HList![]>()
.after_padding_for::<linera_witty::HList![i8, CustomType]>()
.after_padding_for::<linera_witty::HList![(), String]>();
match discriminant {
0 => {
let linera_witty::hlist_pat![] =
<linera_witty::HList![] as linera_witty::WitLoad>::load(memory, location)?;
Ok(Enum::Empty)
}
1 => {
let linera_witty::hlist_pat![field0, field1] =
<linera_witty::HList![i8, CustomType] as linera_witty::WitLoad>::load(
memory,
location
)?;
let field2 = Default::default();
Ok(Enum::Tuple(field0, field1, field2))
}
2 => {
let linera_witty::hlist_pat![first, second] =
<linera_witty::HList![(), String] as linera_witty::WitLoad>::load(
memory,
location
)?;
let ignored1 = Default::default();
let ignored2 = Default::default();
let ignored3 = Default::default();
Ok(Enum::Struct {
first,
ignored1,
ignored2,
second,
ignored3
})
}
discriminant => Err(linera_witty::RuntimeError::InvalidVariant {
type_name: ::std::any::type_name::<Self>(),
discriminant: discriminant.into(),
}),
}
}
fn lift_from<Instance>(
linera_witty::hlist_pat![discriminant_flat_type, ...flat_layout]:
<Self::Layout as linera_witty::Layout>::Flat,
memory: &linera_witty::Memory<'_, Instance>,
) -> Result<Self, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let discriminant = <u8 as linera_witty::WitLoad>::lift_from(
linera_witty::hlist![discriminant_flat_type],
memory,
)?;
match discriminant {
0 => {
let linera_witty::hlist_pat![] =
<linera_witty::HList![] as linera_witty::WitLoad>::lift_from(
linera_witty::JoinFlatLayouts::from_joined(flat_layout),
memory,
)?;
Ok(Enum::Empty)
}
1 => {
let linera_witty::hlist_pat![field0, field1] =
<linera_witty::HList![i8, CustomType] as linera_witty::WitLoad>::lift_from(
linera_witty::JoinFlatLayouts::from_joined(flat_layout),
memory,
)?;
let field2 = Default::default();
Ok(Enum::Tuple(field0, field1, field2))
}
2 => {
let linera_witty::hlist_pat![first, second] =
<linera_witty::HList![(), String] as linera_witty::WitLoad>::lift_from(
linera_witty::JoinFlatLayouts::from_joined(flat_layout),
memory,
)?;
let ignored1 = Default::default();
let ignored2 = Default::default();
let ignored3 = Default::default();
Ok(Enum::Struct {
first,
ignored1,
ignored2,
second,
ignored3
})
}
discriminant => Err(linera_witty::RuntimeError::InvalidVariant {
type_name: ::std::any::type_name::<Self>(),
discriminant: discriminant.into(),
}),
}
}
};
assert_eq!(output.to_string(), expected.to_string());
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/unit_tests/wit_store.rs | linera-witty-macros/src/unit_tests/wit_store.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Unit tests for the `WitStore` derive macro.
#![cfg(test)]
use quote::quote;
use syn::{parse_quote, Fields, ItemEnum, ItemStruct};
use super::{derive_for_enum, derive_for_struct};
/// Checks the generated code for the body of the implementation of `WitStore` for a unit struct.
#[test]
fn zero_sized_type() {
let input = Fields::Unit;
let output = derive_for_struct(&input);
let expected = quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self = self;
linera_witty::hlist![].store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self = self;
linera_witty::hlist![].lower(memory)
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitStore` for a named struct.
#[test]
fn named_struct() {
let input: ItemStruct = parse_quote! {
struct Type {
first: u8,
second: CustomType,
}
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self { first, second, } = self;
linera_witty::hlist![first, second].store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self { first, second, } = self;
linera_witty::hlist![first, second].lower(memory)
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitStore` for a tuple struct.
#[test]
fn tuple_struct() {
let input: ItemStruct = parse_quote! {
struct Type(String, Vec<CustomType>, i64);
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self(field0, field1, field2) = self;
linera_witty::hlist![field0, field1, field2].store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self(field0, field1, field2) = self;
linera_witty::hlist![field0, field1, field2].lower(memory)
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitStore` for a enum.
#[test]
fn enum_type() {
let input: ItemEnum = parse_quote! {
enum Enum {
Empty,
Tuple(i8, CustomType),
Struct {
first: (),
second: String,
},
}
};
let output = derive_for_enum(&input.ident, input.variants.iter());
let expected = quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
match self {
Enum::Empty => {
0_u8.store(memory, location)?;
location = location
.after::<u8>()
.after_padding_for::<linera_witty::HList![]>()
.after_padding_for::<linera_witty::HList![i8, CustomType]>()
.after_padding_for::<linera_witty::HList![(), String]>();
linera_witty::hlist![].store(memory, location)
}
Enum::Tuple(field0, field1) => {
1_u8.store(memory, location)?;
location = location
.after::<u8>()
.after_padding_for::<linera_witty::HList![]>()
.after_padding_for::<linera_witty::HList![i8, CustomType]>()
.after_padding_for::<linera_witty::HList![(), String]>();
linera_witty::hlist![field0, field1].store(memory, location)
}
Enum::Struct { first, second, } => {
2_u8.store(memory, location)?;
location = location
.after::<u8>()
.after_padding_for::<linera_witty::HList![]>()
.after_padding_for::<linera_witty::HList![i8, CustomType]>()
.after_padding_for::<linera_witty::HList![(), String]>();
linera_witty::hlist![first, second].store(memory, location)
}
}
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
match self {
Enum::Empty => {
let variant_flat_layout = linera_witty::hlist![].lower(memory)?;
let flat_layout: <Self::Layout as linera_witty::Layout>::Flat =
linera_witty::JoinFlatLayouts::into_joined(
0_u8.lower(memory)? + variant_flat_layout,
);
Ok(flat_layout)
}
Enum::Tuple(field0, field1) => {
let variant_flat_layout = linera_witty::hlist![field0, field1].lower(memory)?;
let flat_layout: <Self::Layout as linera_witty::Layout>::Flat =
linera_witty::JoinFlatLayouts::into_joined(
1_u8.lower(memory)? + variant_flat_layout,
);
Ok(flat_layout)
}
Enum::Struct { first, second, } => {
let variant_flat_layout = linera_witty::hlist![first, second].lower(memory)?;
let flat_layout: <Self::Layout as linera_witty::Layout>::Flat =
linera_witty::JoinFlatLayouts::into_joined(
2_u8.lower(memory)? + variant_flat_layout,
);
Ok(flat_layout)
}
}
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitStore` for a named struct
/// with a single ignored fields.
#[test]
fn named_struct_with_one_skipped_field() {
let input: ItemStruct = parse_quote! {
struct Type {
first: u8,
#[witty(skip)]
ignored: i128,
second: CustomType,
}
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self { first, second, .. } = self;
linera_witty::hlist![first, second].store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self { first, second, .. } = self;
linera_witty::hlist![first, second].lower(memory)
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitStore` for a named struct
/// with some ignored fields.
#[test]
fn named_struct_with_skipped_fields() {
let input: ItemStruct = parse_quote! {
struct Type {
#[witty(skip)]
ignored1: u8,
first: u8,
#[witty(skip)]
ignored2: i128,
#[witty(skip)]
ignored3: String,
second: CustomType,
#[witty(skip)]
ignored4: Vec<()>,
}
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self { first, second, .. } = self;
linera_witty::hlist![first, second].store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self { first, second, .. } = self;
linera_witty::hlist![first, second].lower(memory)
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitStore` for a tuple struct
/// with some ignored fields.
#[test]
fn tuple_struct_with_skipped_fields() {
let input: ItemStruct = parse_quote! {
struct Type(
#[witty(skip)]
PhantomData<T>,
String,
Vec<CustomType>,
#[witty(skip)]
bool,
i64,
);
};
let output = derive_for_struct(&input.fields);
let expected = quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self(_, field1, field2, _, field4) = self;
linera_witty::hlist![field1, field2, field4].store(memory, location)
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
let Self(_, field1, field2, _, field4) = self;
linera_witty::hlist![field1, field2, field4].lower(memory)
}
};
assert_eq!(output.to_string(), expected.to_string());
}
/// Checks the generated code for the body of the implementation of `WitStore` for a enum with some
/// ignored fields.
#[test]
fn enum_type_with_skipped_fields() {
let input: ItemEnum = parse_quote! {
enum Enum {
Empty,
Tuple(i8, CustomType, #[witty(skip)] u128),
Struct {
first: (),
#[witty(skip)]
ignored1: (u8, u16),
#[witty(skip)]
ignored2: String,
second: String,
#[witty(skip)]
ignored3: Option<String>,
},
}
};
let output = derive_for_enum(&input.ident, input.variants.iter());
let expected = quote! {
fn store<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
mut location: linera_witty::GuestPointer,
) -> Result<(), linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
match self {
Enum::Empty => {
0_u8.store(memory, location)?;
location = location
.after::<u8>()
.after_padding_for::<linera_witty::HList![]>()
.after_padding_for::<linera_witty::HList![i8, CustomType]>()
.after_padding_for::<linera_witty::HList![(), String]>();
linera_witty::hlist![].store(memory, location)
}
Enum::Tuple(field0, field1, _) => {
1_u8.store(memory, location)?;
location = location
.after::<u8>()
.after_padding_for::<linera_witty::HList![]>()
.after_padding_for::<linera_witty::HList![i8, CustomType]>()
.after_padding_for::<linera_witty::HList![(), String]>();
linera_witty::hlist![field0, field1].store(memory, location)
}
Enum::Struct { first, second, .. } => {
2_u8.store(memory, location)?;
location = location
.after::<u8>()
.after_padding_for::<linera_witty::HList![]>()
.after_padding_for::<linera_witty::HList![i8, CustomType]>()
.after_padding_for::<linera_witty::HList![(), String]>();
linera_witty::hlist![first, second].store(memory, location)
}
}
}
fn lower<Instance>(
&self,
memory: &mut linera_witty::Memory<'_, Instance>,
) -> Result<<Self::Layout as linera_witty::Layout>::Flat, linera_witty::RuntimeError>
where
Instance: linera_witty::InstanceWithMemory,
<Instance::Runtime as linera_witty::Runtime>::Memory:
linera_witty::RuntimeMemory<Instance>,
{
match self {
Enum::Empty => {
let variant_flat_layout = linera_witty::hlist![].lower(memory)?;
let flat_layout: <Self::Layout as linera_witty::Layout>::Flat =
linera_witty::JoinFlatLayouts::into_joined(
0_u8.lower(memory)? + variant_flat_layout,
);
Ok(flat_layout)
}
Enum::Tuple(field0, field1, _) => {
let variant_flat_layout = linera_witty::hlist![field0, field1].lower(memory)?;
let flat_layout: <Self::Layout as linera_witty::Layout>::Flat =
linera_witty::JoinFlatLayouts::into_joined(
1_u8.lower(memory)? + variant_flat_layout,
);
Ok(flat_layout)
}
Enum::Struct { first, second, .. } => {
let variant_flat_layout = linera_witty::hlist![first, second].lower(memory)?;
let flat_layout: <Self::Layout as linera_witty::Layout>::Flat =
linera_witty::JoinFlatLayouts::into_joined(
2_u8.lower(memory)? + variant_flat_layout,
);
Ok(flat_layout)
}
}
}
};
assert_eq!(output.to_string(), expected.to_string());
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/unit_tests/specialization.rs | linera-witty-macros/src/unit_tests/specialization.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Unit tests for the `witty_specialize_with` attribute.
use proc_macro2::Span;
use quote::{quote, ToTokens};
use syn::{parse_quote, DeriveInput, Ident};
use super::{super::apply_specialization_attribute, Specialization, Specializations};
/// Checks that the [`DeriveInput`] of a `struct` is changed.
#[test]
fn derive_input_changes() {
let mut input: DeriveInput = parse_quote! {
#[witty_specialize_with(First = u8, Second = Vec<bool>)]
#[witty_specialize_with(Third = (String, i32))]
struct Dummy<'lifetime, First, Second, Third>
where
Option<First>: From<u8>,
Box<[bool]>: From<Second>,
Third: Display,
{
list_of_first: Vec<First>,
second: Second,
third: Third,
}
};
let specializations = apply_specialization_attribute(&mut input);
let expected_specializations = vec![
Specialization {
type_parameter: Ident::new("First", Span::call_site()),
specialized_type: parse_quote!(u8),
},
Specialization {
type_parameter: Ident::new("Second", Span::call_site()),
specialized_type: parse_quote!(Vec<bool>),
},
Specialization {
type_parameter: Ident::new("Third", Span::call_site()),
specialized_type: parse_quote!((String, i32)),
},
];
assert_eq!(specializations.0, expected_specializations);
let expected_changed_input = quote! {
#[witty_specialize_with(First = u8, Second = Vec<bool>)]
#[witty_specialize_with(Third = (String, i32))]
struct Dummy<'lifetime, First, Second, Third>
where
Option<u8>: From<u8>,
Box<[bool]>: From<Vec<bool> >
{
list_of_first: Vec<u8>,
second: Vec<bool>,
third: (String, i32),
}
};
assert_eq!(
input.to_token_stream().to_string(),
expected_changed_input.to_string()
);
}
/// Checks that [`Specialization`] generates correctly specialized [`Generics`].
#[test]
fn generics_are_specialized() {
let specializations = Specializations(vec![
Specialization {
type_parameter: Ident::new("First", Span::call_site()),
specialized_type: parse_quote!(u8),
},
Specialization {
type_parameter: Ident::new("Second", Span::call_site()),
specialized_type: parse_quote!(Vec<bool>),
},
Specialization {
type_parameter: Ident::new("Third", Span::call_site()),
specialized_type: parse_quote!((String, i32)),
},
]);
let generics_source: DeriveInput = parse_quote! {
pub struct Dummy<'lifetime, First, Second, Third, Fourth>
where
Option<u8>: From<u8>,
Box<[bool]>: From<Vec<bool>>;
};
let (impl_generics, type_generics, where_clause) =
specializations.split_generics_from(&generics_source.generics);
let expected_impl_generics = quote! { <'lifetime, Fourth> };
let expected_type_generics = quote! { <'lifetime, u8, Vec<bool>, (String, i32), Fourth> };
let expected_where_clause =
quote! { where Option<u8>: From<u8>, Box<[bool]>: From<Vec<bool> > };
assert_eq!(
impl_generics.to_string(),
expected_impl_generics.to_string()
);
assert_eq!(
type_generics.to_token_stream().to_string(),
expected_type_generics.to_string()
);
assert_eq!(
where_clause.to_token_stream().to_string(),
expected_where_clause.to_string()
);
}
impl PartialEq for Specialization {
fn eq(&self, other: &Self) -> bool {
self.type_parameter == other.type_parameter
&& self.specialized_type.to_token_stream().to_string()
== other.specialized_type.to_token_stream().to_string()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/wit_export/function_information.rs | linera-witty-macros/src/wit_export/function_information.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Extraction of information and generation of code related to a single exported host function.
use heck::ToKebabCase;
use proc_macro2::{Span, TokenStream};
use proc_macro_error::abort;
use quote::{quote, quote_spanned, ToTokens};
use syn::{
spanned::Spanned, FnArg, GenericArgument, GenericParam, Ident, ImplItem, ImplItemFn, LitStr,
PatType, Path, PathArguments, PathSegment, ReturnType, Signature, Token, Type, TypePath,
TypeReference,
};
/// Pieces of information extracted from a function's definition.
pub struct FunctionInformation<'input> {
pub(crate) function: &'input ImplItemFn,
pub(crate) is_reentrant: bool,
pub(crate) call_early_return: Option<Token![?]>,
wit_name: String,
parameter_bindings: TokenStream,
interface_type: TokenStream,
}
impl<'input> FunctionInformation<'input> {
/// Parses a function definition from an [`ImplItem`] and collects pieces of information into a
/// [`FunctionInformation`] instance.
pub fn from_item(item: &'input ImplItem, caller_type_parameter: Option<&'input Ident>) -> Self {
match item {
ImplItem::Fn(function) => FunctionInformation::new(function, caller_type_parameter),
ImplItem::Const(const_item) => abort!(
const_item.ident,
"Const items are not supported in exported types"
),
ImplItem::Type(type_item) => abort!(
type_item.ident,
"Type items are not supported in exported types"
),
ImplItem::Macro(macro_item) => abort!(
macro_item.mac.path,
"Macro items are not supported in exported types"
),
_ => abort!(item, "Only function items are supported in exported types"),
}
}
/// Parses a function definition and collects pieces of information into a
/// [`FunctionInformation`] instance.
pub fn new(function: &'input ImplItemFn, caller_type: Option<&'input Ident>) -> Self {
let wit_name = function.sig.ident.to_string().to_kebab_case();
let is_reentrant = Self::is_reentrant(&function.sig)
|| Self::uses_caller_parameter(&function.sig, caller_type);
let (parameter_bindings, parameter_types) =
Self::parse_parameters(is_reentrant, function.sig.inputs.iter());
let (results, is_fallible) = Self::parse_output(&function.sig.output);
let interface_type = quote_spanned! { function.sig.span() =>
(linera_witty::HList![#parameter_types], #results)
};
FunctionInformation {
function,
is_reentrant,
call_early_return: is_fallible.then(|| Token)),
wit_name,
parameter_bindings,
interface_type,
}
}
/// Checks if a function should be considered as a reentrant function.
///
/// A reentrant function has a generic type parameter that's used as the type of the first
/// parameter.
fn is_reentrant(signature: &Signature) -> bool {
if signature.generics.params.len() != 1 {
return false;
}
let Some(GenericParam::Type(generic_type)) = signature.generics.params.first() else {
return false;
};
Self::first_parameter_is_caller(signature, &generic_type.ident)
}
/// Checks if a function uses a `caller_type` in the first parameter.
///
/// If it does, the function is assumed to be reentrant.
fn uses_caller_parameter(signature: &Signature, caller_type: Option<&Ident>) -> bool {
if let Some(caller_type) = caller_type {
Self::first_parameter_is_caller(signature, caller_type)
} else {
false
}
}
/// Checks if the type of a function's first parameter is the `caller_type`.
fn first_parameter_is_caller(signature: &Signature, caller_type: &Ident) -> bool {
let Some(first_parameter) = signature.inputs.first() else {
return false;
};
let FnArg::Typed(PatType {
ty: first_parameter_type,
..
}) = first_parameter
else {
abort!(
first_parameter,
"`self` parameters aren't supported by Witty"
);
};
let Type::Reference(TypeReference {
mutability: Some(_),
elem: referenced_type,
..
}) = &**first_parameter_type
else {
return false;
};
let Type::Path(TypePath { path, .. }) = &**referenced_type else {
return false;
};
path.is_ident(caller_type)
}
/// Parses a function's parameters and returns the generated code with a list of bindings to the
/// parameters and a list of the parameters types.
fn parse_parameters(
is_reentrant: bool,
inputs: impl Iterator<Item = &'input FnArg> + Clone,
) -> (TokenStream, TokenStream) {
let parameters = inputs
.skip(if is_reentrant { 1 } else { 0 })
.map(|input| match input {
FnArg::Typed(parameter) => parameter,
FnArg::Receiver(receiver) => abort!(
receiver.self_token,
"Exported interfaces can not have `self` parameters"
),
});
let bindings = parameters.clone().map(|parameter| ¶meter.pat);
let types = parameters.map(|parameter| ¶meter.ty);
(quote! { #( #bindings ),* }, quote! { #( #types ),* })
}
/// Parses a function's return type, returning the type to use as the WIT result and whether
/// the function is fallible.
fn parse_output(output: &ReturnType) -> (TokenStream, bool) {
match output {
ReturnType::Default => (quote_spanned! { output.span() => () }, false),
ReturnType::Type(_, return_type) => match ok_type_inside_result(return_type) {
Some(inner_type) => (inner_type.to_token_stream(), true),
None => (return_type.to_token_stream(), false),
},
}
}
/// Generates the code to export a host function using the Wasmer runtime.
#[cfg(with_wasmer)]
pub fn generate_for_wasmer(
&self,
namespace: &LitStr,
type_name: &Ident,
caller: &Type,
) -> TokenStream {
let input_to_guest_parameters = quote! {
linera_witty::wasmer::WasmerParameters::from_wasmer(input)
};
let guest_results_to_output = quote! {
linera_witty::wasmer::WasmerResults::into_wasmer(guest_results)
};
let output_results_trait = quote! { linera_witty::wasmer::WasmerResults };
self.generate(
namespace,
type_name,
caller,
input_to_guest_parameters,
guest_results_to_output,
output_results_trait,
)
}
/// Generates the code to export a host function using the Wasmtime runtime.
#[cfg(with_wasmtime)]
pub fn generate_for_wasmtime(
&self,
namespace: &LitStr,
type_name: &Ident,
caller: &Type,
) -> TokenStream {
let input_to_guest_parameters = quote! {
linera_witty::wasmtime::WasmtimeParameters::from_wasmtime(input)
};
let guest_results_to_output = quote! {
linera_witty::wasmtime::WasmtimeResults::into_wasmtime(guest_results)
};
let output_results_trait = quote! { linera_witty::wasmtime::WasmtimeResults };
self.generate(
namespace,
type_name,
caller,
input_to_guest_parameters,
guest_results_to_output,
output_results_trait,
)
}
/// Generates the code to export a host function using a mock Wasm instance for testing.
#[cfg(with_testing)]
pub fn generate_for_mock_instance(
&self,
namespace: &LitStr,
type_name: &Ident,
caller: &Type,
) -> TokenStream {
let input_to_guest_parameters = quote! { input };
let guest_results_to_output = quote! { guest_results };
let output_results_trait = quote! { linera_witty::MockResults };
self.generate(
namespace,
type_name,
caller,
input_to_guest_parameters,
guest_results_to_output,
output_results_trait,
)
}
/// Generates the code to export using a host function.
fn generate(
&self,
namespace: &LitStr,
type_name: &Ident,
caller: &Type,
input_to_guest_parameters: TokenStream,
guest_results_to_output: TokenStream,
output_results_trait: TokenStream,
) -> TokenStream {
let wit_name = &self.wit_name;
let interface_type = &self.interface_type;
let host_parameters = &self.parameter_bindings;
let call_early_return = &self.call_early_return;
let function_name = &self.function.sig.ident;
let caller_parameter = self.is_reentrant.then(|| quote! { &mut caller, });
let output_type = quote_spanned! { self.function.sig.output.span() =>
<
<
#interface_type as linera_witty::ExportedFunctionInterface
>::GuestResults as #output_results_trait
>::Results
};
quote_spanned! { self.function.span() =>
linera_witty::ExportFunction::export(
target,
#namespace,
#wit_name,
#[allow(clippy::type_complexity)]
|mut caller: #caller, input| -> Result<#output_type, linera_witty::RuntimeError> {
type Interface = #interface_type;
let guest_parameters = #input_to_guest_parameters;
let (linera_witty::hlist_pat![#host_parameters], result_storage) =
<Interface as linera_witty::ExportedFunctionInterface>::lift_parameters(
guest_parameters,
&linera_witty::InstanceWithMemory::memory(&mut caller)?,
)?;
#[allow(clippy::let_unit_value)]
let host_results = #type_name::#function_name(
#caller_parameter
#host_parameters
) #call_early_return;
let guest_results =
<Interface as linera_witty::ExportedFunctionInterface>::lower_results(
host_results,
result_storage,
&mut linera_witty::InstanceWithMemory::memory(&mut caller)?,
)?;
#[allow(clippy::unit_arg)]
Ok(#guest_results_to_output)
}
)?;
}
}
}
/// Returns the type inside the `Ok` variant of the `maybe_result_type`.
///
/// The type is only considered if it's a [`Result`] type with `RuntimeError` as its error variant.
pub(crate) fn ok_type_inside_result(maybe_result_type: &Type) -> Option<&Type> {
let Type::Path(TypePath { qself: None, path }) = maybe_result_type else {
return None;
};
let (ok_type, error_type) = result_type_arguments(path)?;
if let Type::Path(TypePath { qself: None, path }) = error_type {
if !path.is_ident("RuntimeError") {
return None;
}
} else {
return None;
}
Some(ok_type)
}
/// Returns the generic type arguments of the [`Result`] type in `result_path`.
fn result_type_arguments(result_path: &Path) -> Option<(&Type, &Type)> {
if !type_is_result(result_path) {
return None;
}
let PathArguments::AngleBracketed(type_arguments) = &result_path.segments.last()?.arguments
else {
return None;
};
if type_arguments.args.len() != 2 {
return None;
}
let mut arguments = type_arguments.args.iter();
let GenericArgument::Type(ok_type) = arguments.next()? else {
return None;
};
let GenericArgument::Type(error_type) = arguments.next()? else {
return None;
};
Some((ok_type, error_type))
}
/// Checks if `result_path` is a [`Result`] type.
fn type_is_result(result_path: &Path) -> bool {
let segment_count = result_path.segments.len();
if segment_count == 1 {
result_path.leading_colon.is_none() && path_matches_segments(result_path, &["Result"])
} else if result_path.segments.len() == 3 {
path_matches_segments(result_path, &["std", "result", "Result"])
} else {
false
}
}
/// Checks if `path` matches the provided path `segments`.
fn path_matches_segments(path: &Path, segments: &[&str]) -> bool {
if path.segments.len() != segments.len() {
return false;
}
for (index, (segment, expected)) in path.segments.iter().zip(segments).enumerate() {
let with_type_parameters = index == segments.len() - 1;
if !is_path_segment(segment, expected, with_type_parameters) {
return false;
}
}
true
}
/// Checks if `segment` is the `expected_identifier` and if it should have generic type parameters.
fn is_path_segment(
segment: &PathSegment,
expected_identifier: &str,
with_type_parameters: bool,
) -> bool {
let arguments_are_correct = if with_type_parameters {
matches!(segment.arguments, PathArguments::AngleBracketed(_))
} else {
matches!(segment.arguments, PathArguments::None)
};
segment.ident == expected_identifier && arguments_are_correct
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/wit_export/mod.rs | linera-witty-macros/src/wit_export/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Generation of code to export host functions to a Wasm guest instance.
// TODO(#1683): Remove feature flags by generating runtime agnostic code
#![cfg(with_wit_export)]
mod caller_type_parameter;
mod function_information;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use quote::{quote, ToTokens};
use syn::{
parse_quote, punctuated::Punctuated, token::Paren, Generics, Ident, ItemImpl, LitStr, Type,
TypePath, TypeTuple,
};
use self::caller_type_parameter::CallerTypeParameter;
pub(crate) use self::function_information::{ok_type_inside_result, FunctionInformation};
use super::wit_interface;
use crate::util::AttributeParameters;
/// Returns the code generated for exporting host functions to guest Wasm instances.
///
/// The generated code implements the `linera_witty::ExportTo` trait for the Wasm runtimes enabled
/// through feature flags. The trait implementation exports the host functions in the input `impl`
/// block to a provided Wasm guest instance.
pub fn generate(implementation: &ItemImpl, parameters: AttributeParameters) -> TokenStream {
WitExportGenerator::new(implementation, parameters).generate()
}
/// A helper type for generation of the code to export host functions to Wasm guest instances.
///
/// Code generating is done in two phases. First the necessary pieces are collected and stored in
/// this type. Then, they are used to generate the final code.
pub struct WitExportGenerator<'input> {
parameters: AttributeParameters,
namespace: LitStr,
type_name: &'input Ident,
caller_type_parameter: Option<CallerTypeParameter<'input>>,
generics: &'input Generics,
implementation: &'input ItemImpl,
functions: Vec<FunctionInformation<'input>>,
}
impl<'input> WitExportGenerator<'input> {
/// Collects the pieces necessary for code generation from the inputs.
pub fn new(implementation: &'input ItemImpl, parameters: AttributeParameters) -> Self {
let type_name = type_name(implementation);
let namespace = parameters.namespace(type_name);
let caller_type_parameter = CallerTypeParameter::extract_from(&implementation.generics);
let caller = caller_type_parameter
.as_ref()
.map(CallerTypeParameter::caller);
let functions = implementation
.items
.iter()
.map(|item| FunctionInformation::from_item(item, caller))
.collect();
WitExportGenerator {
parameters,
namespace,
type_name,
caller_type_parameter,
generics: &implementation.generics,
implementation,
functions,
}
}
/// Consumes the collected pieces to generate the final code.
pub fn generate(mut self) -> TokenStream {
let implementation = self.implementation;
let wasmer = self.generate_for_wasmer();
let wasmtime = self.generate_for_wasmtime();
let mock_instance = self.generate_for_mock_instance();
let wit_interface = self.generate_wit_interface();
quote! {
#implementation
#wasmer
#wasmtime
#mock_instance
#wit_interface
}
}
/// Generates the code to export functions using the Wasmer runtime.
fn generate_for_wasmer(&mut self) -> TokenStream {
#[cfg(with_wasmer)]
{
let user_data_type = self.user_data_type();
let export_target = quote! { linera_witty::wasmer::InstanceBuilder<#user_data_type> };
let target_caller_type: Type = parse_quote! {
linera_witty::wasmer::FunctionEnvMut<
'_,
linera_witty::wasmer::Environment<#user_data_type>,
>
};
let exported_functions = self.functions.iter().map(|function| {
function.generate_for_wasmer(&self.namespace, self.type_name, &target_caller_type)
});
self.generate_for(export_target, &target_caller_type, exported_functions)
}
#[cfg(not(with_wasmer))]
{
TokenStream::new()
}
}
/// Generates the code to export functions using the Wasmtime runtime.
fn generate_for_wasmtime(&mut self) -> TokenStream {
#[cfg(with_wasmtime)]
{
let user_data_type = self.user_data_type();
let export_target = quote! { linera_witty::wasmtime::Linker<#user_data_type> };
let target_caller_type: Type =
parse_quote! { linera_witty::wasmtime::Caller<'_, #user_data_type> };
let exported_functions = self.functions.iter().map(|function| {
function.generate_for_wasmtime(&self.namespace, self.type_name, &target_caller_type)
});
self.generate_for(export_target, &target_caller_type, exported_functions)
}
#[cfg(not(with_wasmtime))]
{
TokenStream::new()
}
}
/// Generates the code to export functions to a mock instance for testing.
fn generate_for_mock_instance(&mut self) -> TokenStream {
#[cfg(with_testing)]
{
let user_data_type = self.user_data_type();
let export_target = quote! { linera_witty::MockInstance<#user_data_type> };
let target_caller_type: Type =
parse_quote! { linera_witty::MockInstance<#user_data_type> };
let exported_functions = self.functions.iter().map(|function| {
function.generate_for_mock_instance(
&self.namespace,
self.type_name,
&target_caller_type,
)
});
self.generate_for(export_target, &target_caller_type, exported_functions)
}
#[cfg(not(with_testing))]
{
TokenStream::new()
}
}
/// Generates the implementation of `ExportTo` for the `export_target` including the
/// `exported_functions`.
fn generate_for(
&self,
export_target: TokenStream,
target_caller_type: &Type,
exported_functions: impl Iterator<Item = TokenStream>,
) -> TokenStream {
let (impl_generics, self_type, where_clause) = self.prepare_generics(target_caller_type);
quote! {
impl #impl_generics linera_witty::ExportTo<#export_target> for #self_type
#where_clause
{
fn export_to(
target: &mut #export_target,
) -> Result<(), linera_witty::RuntimeError> {
#( #exported_functions )*
Ok(())
}
}
}
}
/// Specializes the [`CallerTypeParameter`] (if present) with the `target_caller_type`, and
/// returns the split generics part used in the implementation blocks.
fn prepare_generics(&self, target_caller_type: &Type) -> (TokenStream, Type, TokenStream) {
let mut self_type = (*self.implementation.self_ty).clone();
if let Some(caller_type_parameter) = self.caller_type_parameter {
caller_type_parameter.specialize_type(&mut self_type, target_caller_type.clone());
let (impl_generics, _type_generics, where_clause) = caller_type_parameter
.specialize_and_split_generics(self.generics.clone(), target_caller_type.clone());
(impl_generics, self_type, where_clause)
} else {
let (impl_generics, _type_generics, where_clause) = self.generics.split_for_impl();
(
impl_generics.to_token_stream(),
self_type,
where_clause.to_token_stream(),
)
}
}
/// Returns the type to use for the custom user data.
fn user_data_type(&self) -> Type {
self.caller_type_parameter
.as_ref()
.and_then(CallerTypeParameter::user_data)
.cloned()
.unwrap_or_else(|| {
// Unit type
Type::Tuple(TypeTuple {
paren_token: Paren::default(),
elems: Punctuated::new(),
})
})
}
/// Generates the implementation of `WitInterface` for the type.
fn generate_wit_interface(&self) -> TokenStream {
let self_type = &self.implementation.self_ty;
let type_name = self.type_name;
let wit_interface_implementation = wit_interface::generate(
self.parameters.package_name(),
self.parameters.interface_name(type_name),
&self.functions,
);
let (impl_generics, _type_generics, where_clause) =
self.implementation.generics.split_for_impl();
quote! {
impl #impl_generics linera_witty::wit_generation::WitInterface for #self_type
#where_clause
{
#wit_interface_implementation
}
}
}
}
/// Returns the type name of the type the `impl` block is for.
pub fn type_name(implementation: &ItemImpl) -> &Ident {
let Type::Path(TypePath {
qself: None,
path: path_name,
}) = &*implementation.self_ty
else {
abort!(
implementation.self_ty,
"`#[wit_export]` must be used on `impl` blocks",
);
};
&path_name
.segments
.last()
.unwrap_or_else(|| {
abort!(implementation.self_ty, "Missing type name identifier");
})
.ident
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-witty-macros/src/wit_export/caller_type_parameter.rs | linera-witty-macros/src/wit_export/caller_type_parameter.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Representation of a shared generic type parameter for the caller.
use std::collections::HashMap;
use proc_macro2::TokenStream;
use proc_macro_error::abort;
use quote::ToTokens;
use syn::{
punctuated::Punctuated, AngleBracketedGenericArguments, AssocType, GenericArgument, Generics,
Ident, PathArguments, PathSegment, PredicateType, Token, TraitBound, TraitBoundModifier, Type,
TypeParam, TypeParamBound, TypePath, WhereClause, WherePredicate,
};
use crate::util::{Specialization, Specializations};
/// Information on the generic type parameter to use for the caller parameter, if present.
#[derive(Clone, Copy, Debug)]
pub struct CallerTypeParameter<'input> {
caller: &'input Ident,
user_data: Option<&'input Type>,
}
impl<'input> CallerTypeParameter<'input> {
/// Parses a type's [`Generics`] to determine if a caller type parameter should be used.
pub fn extract_from(generics: &'input Generics) -> Option<Self> {
let where_bounds = Self::parse_bounds_from_where_clause(generics.where_clause.as_ref());
generics
.type_params()
.find_map(|parameter| Self::try_from_parameter(parameter, &where_bounds))
}
/// Parses the bounds present in an optional `where_clause`.
///
/// Returns a map between constrained types and its predicates.
fn parse_bounds_from_where_clause(
where_clause: Option<&'input WhereClause>,
) -> HashMap<&'input Ident, Vec<&'input TypeParamBound>> {
where_clause
.into_iter()
.flat_map(|where_clause| where_clause.predicates.iter())
.filter_map(|predicate| match predicate {
WherePredicate::Type(predicate) => Self::extract_predicate_bounds(predicate),
_ => None,
})
.collect()
}
/// Extracts the constrained type and its bounds from a predicate.
///
/// Returns [`None`] if the predicate does not apply to a type that's a single identifier
/// (which could be a generic type parameter).
fn extract_predicate_bounds(
predicate: &'input PredicateType,
) -> Option<(&'input Ident, Vec<&'input TypeParamBound>)> {
let target_identifier = Self::extract_identifier(&predicate.bounded_ty)?;
Some((target_identifier, predicate.bounds.iter().collect()))
}
/// Extracts the [`Ident`] that forms the `candidate_type`, if the [`Type`] is a single
/// identifier.
fn extract_identifier(candidate_type: &'input Type) -> Option<&'input Ident> {
let Type::Path(TypePath { qself: None, path }) = candidate_type else {
return None;
};
if path.leading_colon.is_some() || path.segments.len() != 1 {
return None;
}
let segment = path.segments.first()?;
if !matches!(&segment.arguments, PathArguments::None) {
return None;
}
Some(&segment.ident)
}
/// Attempts to create a [`CallerTypeParameter`] from a generic [`TypeParam`].
///
/// Succeeds if and only if the `where_bounds` map contains a predicate for the `parameter`.
fn try_from_parameter(
parameter: &'input TypeParam,
where_bounds: &HashMap<&'input Ident, Vec<&'input TypeParamBound>>,
) -> Option<Self> {
let caller = ¶meter.ident;
let bounds = where_bounds
.get(caller)
.into_iter()
.flatten()
.copied()
.chain(parameter.bounds.iter());
let instance_bound_path_segment = bounds
.filter_map(Self::extract_trait_bound_path)
.find_map(Self::extract_instance_bound_path_segment)?;
let user_data =
Self::extract_instance_bound_arguments(&instance_bound_path_segment.arguments)
.and_then(Self::extract_instance_bound_user_data);
Some(CallerTypeParameter { caller, user_data })
}
/// Extracts the path from a trait `bound`.
fn extract_trait_bound_path(
bound: &'input TypeParamBound,
) -> Option<impl Iterator<Item = &'input PathSegment> + Clone + 'input> {
match bound {
TypeParamBound::Trait(TraitBound {
paren_token: None,
modifier: TraitBoundModifier::None,
lifetimes: None,
path,
}) => Some(path.segments.iter()),
_ => None,
}
}
/// Extracts the [`PathSegment`] with the generic type parameters that could contain the user
/// data type.
fn extract_instance_bound_path_segment(
segments: impl Iterator<Item = &'input PathSegment> + Clone,
) -> Option<&'input PathSegment> {
Self::extract_aliased_instance_bound_path_segment(segments.clone())
.or_else(|| Self::extract_direct_instance_bound_path_segment(segments))
}
/// Extracts the [`PathSegment`] for the identifier that uses an `InstanceFor..` trait alias
/// generated by Witty.
fn extract_aliased_instance_bound_path_segment(
mut segments: impl Iterator<Item = &'input PathSegment>,
) -> Option<&'input PathSegment> {
let segment = segments.next()?;
if segment.ident.to_string().starts_with("InstanceFor") && segments.next().is_none() {
Some(segment)
} else {
None
}
}
/// Extracts the [`PathSegment`] for the identifier that uses an `Instance` trait directly.
fn extract_direct_instance_bound_path_segment(
segments: impl Iterator<Item = &'input PathSegment>,
) -> Option<&'input PathSegment> {
let mut segments = segments.peekable();
if matches!(
segments.peek(),
Some(PathSegment { ident, arguments: PathArguments::None })
if ident == "linera_witty",
) {
segments.next();
}
let segment = segments.next()?;
if segment.ident == "Instance" && segments.next().is_none() {
Some(segment)
} else {
None
}
}
/// Extracts the generic arguments from `arguments`.
fn extract_instance_bound_arguments(
arguments: &'input PathArguments,
) -> Option<&'input Punctuated<GenericArgument, Token![,]>> {
match arguments {
PathArguments::AngleBracketed(AngleBracketedGenericArguments {
colon2_token: None,
args,
..
}) => Some(args),
_ => None,
}
}
/// Extracts the custom user data [`Type`] from the caller bound's generic `arguments`.
fn extract_instance_bound_user_data(
arguments: &'input Punctuated<GenericArgument, Token![,]>,
) -> Option<&'input Type> {
if arguments.len() != 1 {
abort!(
arguments,
"Caller type parameter should have a user data type. \
E.g. `Caller: linera_witty::Instance<UserData = CustomData>`"
);
}
match arguments
.iter()
.next()
.expect("Missing argument in arguments list")
{
GenericArgument::AssocType(AssocType {
ident,
generics: None,
ty: user_data,
..
}) if ident == "UserData" => Some(user_data),
_ => abort!(
arguments,
"Caller type parameter should have a user data type. \
E.g. `Caller: linera_witty::Instance<UserData = CustomData>`"
),
}
}
/// Returns the [`Ident`]ifier of the generic type parameter used for the caller.
pub fn caller(&self) -> &'input Ident {
self.caller
}
/// Returns the type used for custom user data, if there is a caller type parameter.
pub fn user_data(&self) -> Option<&'input Type> {
self.user_data
}
/// Specializes the [`Generics`] to replace the [`CallerTypeParameter`] with the concrete
/// `caller_type`, and splits it into the parts used in implementation blocks.
pub fn specialize_and_split_generics(
&self,
mut generics: Generics,
caller_type: Type,
) -> (TokenStream, TokenStream, TokenStream) {
let specializations = self.build_specializations(caller_type);
specializations.apply_to_generics(&mut generics);
let (impl_generics, type_generics, where_clause) =
specializations.split_generics_from(&generics);
(
impl_generics,
type_generics.into_token_stream(),
where_clause.into_token_stream(),
)
}
/// Specializes the [`CallerTypeParameter`] in the `target_type` with the concrete
/// `caller_type`.
pub fn specialize_type(&self, target_type: &mut Type, caller_type: Type) {
self.build_specializations(caller_type)
.apply_to_type(target_type);
}
/// Builds the [`Specializations`] instance to replace the [`CallerTypeParameter`] with the
/// concrete `caller_type`.
fn build_specializations(&self, caller_type: Type) -> Specializations {
Specializations::from_iter(Some(Specialization::new(self.caller.clone(), caller_type)))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-summary/src/lib.rs | linera-summary/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This crate provides the internal tool to summarize performance changes in PRs.
#![allow(missing_docs)]
pub mod ci_runtime_comparison;
pub mod github;
pub mod performance_summary;
pub mod summary_options;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-summary/src/ci_runtime_comparison.rs | linera-summary/src/ci_runtime_comparison.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::BTreeMap;
use anyhow::{ensure, Result};
use octocrab::models::workflows::{Conclusion, Job, Status};
use serde::Serialize;
use tracing::warn;
type WorkflowName = String;
#[derive(Serialize)]
pub struct WorkflowJobRuntimeComparison {
name: String,
workflow_name: String,
base_runtime: u64,
pr_runtime: u64,
runtime_difference_pct: f64,
}
impl WorkflowJobRuntimeComparison {
pub fn name(&self) -> &str {
&self.name
}
pub fn workflow_name(&self) -> &str {
&self.workflow_name
}
pub fn base_runtime(&self) -> u64 {
self.base_runtime
}
pub fn pr_runtime(&self) -> u64 {
self.pr_runtime
}
pub fn runtime_difference_pct(&self) -> f64 {
self.runtime_difference_pct
}
}
// The key is the name of the workflow, and the value is a list of per job comparisons.
#[derive(Serialize)]
pub struct CiRuntimeComparison(pub BTreeMap<WorkflowName, Vec<WorkflowJobRuntimeComparison>>);
impl CiRuntimeComparison {
fn get_runtimes(jobs: Vec<Job>) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
let mut runtimes: BTreeMap<String, BTreeMap<String, u64>> = BTreeMap::new();
for job in jobs {
ensure!(job.status == Status::Completed);
ensure!(job.conclusion.is_some());
ensure!(job.conclusion.unwrap() == Conclusion::Success);
ensure!(job.completed_at.is_some());
runtimes.entry(job.workflow_name).or_default().insert(
job.name.clone(),
job.completed_at
.unwrap()
.signed_duration_since(job.started_at)
.num_seconds()
.try_into()?,
);
}
Ok(runtimes)
}
pub fn from_jobs(base_jobs: Vec<Job>, pr_jobs: Vec<Job>) -> Result<Self> {
let base_runtimes = Self::get_runtimes(base_jobs)?;
let pr_runtimes = Self::get_runtimes(pr_jobs)?;
Ok(Self(
pr_runtimes
.into_iter()
.map(|(workflow_name, jobs)| {
(
workflow_name.clone(),
jobs.into_iter()
.filter_map(|(job_name, pr_runtime)| {
let base_jobs_runtimes = base_runtimes.get(&workflow_name);
if let Some(base_jobs_runtimes) = base_jobs_runtimes {
let base_runtime = base_jobs_runtimes.get(&job_name);
if let Some(base_runtime) = base_runtime {
return Some(WorkflowJobRuntimeComparison {
name: job_name.clone(),
workflow_name: workflow_name.clone(),
pr_runtime,
base_runtime: *base_runtime,
runtime_difference_pct:
((pr_runtime as f64) / (*base_runtime as f64)
- 1.0)
* 100.0,
});
} else {
warn!(
"No base runtime information found for job {} of workflow {}",
job_name, workflow_name
);
}
} else {
warn!(
"No base runtime information found for workflow {}",
workflow_name
);
}
None
})
.collect::<Vec<_>>(),
)
})
.collect::<BTreeMap<_, _>>(),
))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-summary/src/performance_summary.rs | linera-summary/src/performance_summary.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::HashSet, time::Duration};
use anyhow::{bail, Result};
use humantime::format_duration;
use serde::Serialize;
use crate::{ci_runtime_comparison::CiRuntimeComparison, github::Github};
pub const PR_COMMENT_HEADER: &str = "## Performance Summary for commit";
#[derive(Serialize)]
pub struct PerformanceSummary {
#[serde(skip_serializing)]
github: Github,
ci_runtime_comparison: CiRuntimeComparison,
}
impl PerformanceSummary {
pub async fn init(github: Github, tracked_workflows: HashSet<String>) -> Result<Self> {
let workflows_handler = github.workflows_handler();
let workflows = github
.workflows(&workflows_handler)
.await?
.into_iter()
.filter(|workflow| tracked_workflows.contains(&workflow.name))
.collect::<Vec<_>>();
let base_jobs = Box::pin(github.latest_jobs(
github.context().base_branch(),
"push",
&workflows_handler,
&workflows,
))
.await?;
if base_jobs.is_empty() {
bail!("No base jobs found!");
}
let pr_jobs = Box::pin(github.latest_jobs(
github.context().pr_branch(),
"pull_request",
&workflows_handler,
&workflows,
))
.await?;
if pr_jobs.is_empty() {
bail!("No PR jobs found!");
}
Ok(Self {
github,
ci_runtime_comparison: CiRuntimeComparison::from_jobs(base_jobs, pr_jobs)?,
})
}
fn format_comment_body(&self) -> String {
let commit_hash = self.github.context().pr_commit_hash();
let short_commit_hash = &commit_hash[..7];
let commit_url = format!(
"https://github.com/{}/{}/commit/{}",
self.github.context().repository().owner(),
self.github.context().repository().name(),
commit_hash
);
let mut markdown_content = format!(
"{} [{}]({})\n\n",
PR_COMMENT_HEADER, short_commit_hash, commit_url
);
markdown_content.push_str("### CI Runtime Comparison\n\n");
for (workflow_name, comparisons) in &self.ci_runtime_comparison.0 {
markdown_content.push_str(&format!("#### Workflow: {}\n\n", workflow_name));
markdown_content
.push_str("| Job Name | Base Runtime | PR Runtime | Runtime Difference (%) |\n");
markdown_content.push_str("| --- | --- | --- | --- |\n");
for comparison in comparisons {
let base_runtime =
format_duration(Duration::from_secs(comparison.base_runtime())).to_string();
let pr_runtime =
format_duration(Duration::from_secs(comparison.pr_runtime())).to_string();
let runtime_difference_pct = format!("{:.2}%", comparison.runtime_difference_pct());
markdown_content.push_str(&format!(
"| {} | {} | {} | {} |\n",
comparison.name(),
base_runtime,
pr_runtime,
runtime_difference_pct
));
}
markdown_content.push('\n');
}
markdown_content
}
// Updates an existing comment or creates a new one in the PR.
pub async fn upsert_pr_comment(&self) -> Result<()> {
self.github
.upsert_pr_comment(self.format_comment_body())
.await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-summary/src/github.rs | linera-summary/src/github.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::env;
use anyhow::{anyhow, Context, Result};
use octocrab::{
models::workflows::{Job, Run, WorkFlow},
params::workflows::Filter,
workflows::WorkflowsHandler,
Octocrab,
};
use tracing::info;
use crate::performance_summary::PR_COMMENT_HEADER;
const API_REQUEST_DELAY_MS: u64 = 100;
const IGNORED_JOB_PREFIXES: &[&str] = &["lint-", "check-outdated-cli-md"];
pub struct GithubRepository {
owner: String,
name: String,
}
impl GithubRepository {
pub fn owner(&self) -> &str {
&self.owner
}
pub fn name(&self) -> &str {
&self.name
}
fn from_env(is_local: bool) -> Result<Self> {
let env_repository = env::var("GITHUB_REPOSITORY");
let repository = if is_local {
env_repository.unwrap_or_else(|_| "linera-io/linera-protocol".to_string())
} else {
env_repository.map_err(|_| {
anyhow!("GITHUB_REPOSITORY is not set! This must be run from within CI")
})?
};
let parts = repository.split('/').collect::<Vec<_>>();
assert_eq!(parts.len(), 2);
let owner = parts[0].to_string();
let name = parts[1].to_string();
Ok(Self { owner, name })
}
}
pub struct GithubContext {
repository: GithubRepository,
pr_commit_hash: String,
pr_branch: String,
base_branch: String,
pr_number: u64,
}
impl GithubContext {
pub fn base_branch(&self) -> &str {
&self.base_branch
}
pub fn pr_branch(&self) -> &str {
&self.pr_branch
}
pub fn pr_commit_hash(&self) -> &str {
&self.pr_commit_hash
}
pub fn repository(&self) -> &GithubRepository {
&self.repository
}
fn get_local_git_info() -> Result<(String, String, String)> {
let repo = git2::Repository::open_from_env().context("Failed to open git repository")?;
let head = repo.head()?;
let commit_hash = head.peel_to_commit()?.id().to_string();
let branch_name = if head.is_branch() {
head.shorthand()
.ok_or_else(|| anyhow!("Failed to get current branch name"))?
.to_string()
} else {
anyhow::bail!("HEAD is not on a branch - it may be detached");
};
// This local mode is only used for testing, so we're just hardcoding `main` as the base branch for now.
Ok((commit_hash, branch_name, "main".to_string()))
}
fn from_env(is_local: bool, pr_number: Option<u64>) -> Result<Self> {
let env_pr_commit_hash = env::var("GITHUB_PR_COMMIT_HASH");
let env_pr_branch = env::var("GITHUB_PR_BRANCH");
let env_base_branch = env::var("GITHUB_BASE_BRANCH");
let env_pr_number = env::var("GITHUB_PR_NUMBER");
let (pr_commit_hash, pr_branch, base_branch, pr_number) = if is_local {
let (commit_hash, branch_name, base) = Self::get_local_git_info()?;
(
commit_hash,
branch_name,
base,
pr_number.ok_or_else(|| anyhow!("pr_number is None"))?,
)
} else {
let pr_string = env_pr_number.map_err(|_| {
anyhow!("GITHUB_PR_NUMBER is not set! This must be run from within CI")
})?;
(
env_pr_commit_hash.map_err(|_| {
anyhow!("GITHUB_PR_COMMIT_HASH is not set! This must be run from within CI")
})?,
env_pr_branch.map_err(|_| {
anyhow!("GITHUB_PR_BRANCH is not set! This must be run from within CI")
})?,
env_base_branch.map_err(|_| {
anyhow!("GITHUB_BASE_BRANCH is not set! This must be run from within CI")
})?,
pr_string.parse().map_err(|_| {
anyhow!("GITHUB_PR_NUMBER is not a valid number: {}", pr_string)
})?,
)
};
Ok(Self {
repository: GithubRepository::from_env(is_local)?,
pr_commit_hash,
pr_branch,
base_branch,
pr_number,
})
}
}
pub struct Github {
octocrab: Octocrab,
context: GithubContext,
is_local: bool,
}
impl Github {
pub fn new(is_local: bool, pr_number: Option<u64>) -> Result<Self> {
let octocrab_builder = Octocrab::builder();
let octocrab =
if is_local {
octocrab_builder
} else {
octocrab_builder.personal_token(env::var("GITHUB_TOKEN").map_err(|_| {
anyhow!("GITHUB_TOKEN is not set! This must be run from within CI")
})?)
}
.build()
.map_err(|_| anyhow!("Creating Octocrab instance should not fail!"))?;
Ok(Self {
octocrab,
context: GithubContext::from_env(is_local, pr_number)?,
is_local,
})
}
pub fn context(&self) -> &GithubContext {
&self.context
}
// Updates an existing comment or creates a new one in the PR.
pub async fn upsert_pr_comment(&self, body: String) -> Result<()> {
let issue_handler = self.octocrab.issues(
self.context.repository.owner.clone(),
self.context.repository.name.clone(),
);
let existing_comment_id = issue_handler
.list_comments(self.context.pr_number)
.send()
.await?
.items
.into_iter()
.find_map(|comment| {
if comment.user.login == "github-actions[bot]"
&& comment
.body
.is_some_and(|body| body.starts_with(PR_COMMENT_HEADER))
{
Some(comment.id)
} else {
None
}
});
// Always print the summary to stdout, as we'll use it to set the job summary in CI.
info!("Printing summary to stdout...");
println!("{}", body);
if let Some(existing_comment_id) = existing_comment_id {
if self.is_local {
info!(
"Would have updated comment {} on PR {}, but is local",
existing_comment_id, self.context.pr_number
);
} else {
info!(
"Updating existing comment {} on PR {}",
existing_comment_id, self.context.pr_number
);
issue_handler
.update_comment(existing_comment_id, body)
.await?;
}
} else if self.is_local {
info!(
"Would have commented on PR {}, but is local",
self.context.pr_number
);
} else {
info!("Commenting on PR {}", self.context.pr_number);
issue_handler
.create_comment(self.context.pr_number, body)
.await?;
}
Ok(())
}
async fn latest_runs(
&self,
branch: &str,
event: &str,
workflows_handler: &WorkflowsHandler<'_>,
workflows: &[WorkFlow],
) -> Result<Vec<Run>> {
let mut latest_runs = Vec::new();
for workflow in workflows {
// Add a delay between requests to avoid rate limiting
tokio::time::sleep(tokio::time::Duration::from_millis(API_REQUEST_DELAY_MS)).await;
let runs = workflows_handler
.list_runs(workflow.id.to_string())
.branch(branch)
.event(event)
.status("success")
.per_page(1)
.send()
.await?
.items;
if runs.is_empty() {
// Not all workflows will necessarily have runs for the given branch and event.
info!(
"No runs found for workflow \"{}\", on path \"{}\", for branch \"{}\" and event \"{}\", with \"success\" status",
workflow.name,
workflow.path,
branch,
event
);
continue;
}
info!(
"Got latest run for workflow \"{}\", on path \"{}\", for branch \"{}\" and event \"{}\", with \"success\" status",
workflow.name,
workflow.path,
branch,
event
);
latest_runs.push(runs.first().unwrap().clone());
}
Ok(latest_runs)
}
pub async fn latest_jobs(
&self,
branch: &str,
event: &str,
workflows_handler: &WorkflowsHandler<'_>,
workflows: &[WorkFlow],
) -> Result<Vec<Job>> {
let latest_runs = self
.latest_runs(branch, event, workflows_handler, workflows)
.await?;
let mut jobs = Vec::new();
for run in latest_runs {
// Add a delay between requests to avoid rate limiting
tokio::time::sleep(tokio::time::Duration::from_millis(API_REQUEST_DELAY_MS)).await;
let run_jobs = workflows_handler
.list_jobs(run.id)
.filter(Filter::Latest)
.send()
.await?
.items;
info!("Got {} jobs for run {}", run_jobs.len(), run.name);
jobs.push(run_jobs);
}
let jobs = jobs.into_iter().flatten().collect::<Vec<_>>();
let jobs_len = jobs.len();
let jobs_filtered = jobs
.into_iter()
.filter(|job| {
!IGNORED_JOB_PREFIXES
.iter()
.any(|prefix| job.name.starts_with(prefix))
}) // Filter out jobs with ignored prefixes
.collect::<Vec<_>>();
info!("Filtered out {} jobs", jobs_len - jobs_filtered.len());
info!("Returning {} jobs", jobs_filtered.len());
Ok(jobs_filtered)
}
pub fn workflows_handler(&self) -> WorkflowsHandler {
self.octocrab.workflows(
self.context.repository.owner.clone(),
self.context.repository.name.clone(),
)
}
pub async fn workflows(
&self,
workflows_handler: &WorkflowsHandler<'_>,
) -> Result<Vec<WorkFlow>> {
Ok(workflows_handler.list().send().await?.items)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-summary/src/main.rs | linera-summary/src/main.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::process;
use anyhow::{Context, Result};
use linera_summary::{
github::Github, performance_summary::PerformanceSummary, summary_options::SummaryOptions,
};
use tracing::{error, Instrument};
use tracing_subscriber::{
prelude::__tracing_subscriber_SubscriberExt as _, util::SubscriberInitExt as _,
};
async fn run(options: SummaryOptions) -> Result<()> {
let tracked_workflows = options.workflows();
let github = Github::new(options.is_local(), options.pr_number())?;
let summary = PerformanceSummary::init(github, tracked_workflows).await?;
summary.upsert_pr_comment().await?;
Ok(())
}
fn main() -> anyhow::Result<()> {
let options = SummaryOptions::init();
tracing_subscriber::registry()
.with(tracing_subscriber::fmt::layer())
.with(tracing_subscriber::EnvFilter::from_default_env())
.init();
let mut runtime = tokio::runtime::Builder::new_multi_thread();
let span = tracing::info_span!("linera-summary::main");
let result = runtime
.enable_all()
.build()
.context("Failed to create Tokio runtime")?
.block_on(run(options).instrument(span));
let error_code = match result {
Ok(()) => 0,
Err(msg) => {
error!("Error: {msg:?}");
2
}
};
process::exit(error_code);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-summary/src/summary_options.rs | linera-summary/src/summary_options.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashSet;
use linera_version::VersionInfo;
#[derive(clap::Parser)]
#[command(
name = "linera-summary",
version = VersionInfo::default_clap_str(),
about = "Executable for performance summary generation.",
)]
pub struct SummaryOptions {
#[command(subcommand)]
command: Command,
/// The list of comma separated workflow names to track.
#[arg(long, required = true)]
workflows: String,
}
#[derive(clap::Subcommand)]
enum Command {
/// Run in CI mode.
Ci,
/// Run in local mode. Instead of commenting on the PR, the summary is printed to stdout.
Local {
/// PR number to analyze.
#[arg(long, required = true)]
pr: u64,
},
}
impl SummaryOptions {
pub fn init() -> Self {
<SummaryOptions as clap::Parser>::parse()
}
pub fn is_local(&self) -> bool {
matches!(self.command, Command::Local { .. })
}
pub fn pr_number(&self) -> Option<u64> {
match self.command {
Command::Local { pr } => Some(pr),
Command::Ci => None,
}
}
pub fn workflows(&self) -> HashSet<String> {
self.workflows.split(',').map(str::to_string).collect()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/build.rs | linera-sdk/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
with_testing: { any(test, feature = "test") },
with_wasm_runtime: { any(feature = "wasmer", feature = "wasmtime") },
with_integration_testing: {
all(not(target_arch = "wasm32"), with_testing, with_wasm_runtime)
},
};
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/ethereum.rs | linera-sdk/src/ethereum.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Support for Linera applications that interact with Ethereum or other EVM contracts.
use std::fmt::Debug;
use async_graphql::scalar;
use async_trait::async_trait;
use linera_base::http;
pub use linera_ethereum::{
client::EthereumQueries,
common::{EthereumDataType, EthereumEvent},
};
use linera_ethereum::{client::JsonRpcClient, common::EthereumServiceError};
use serde::{Deserialize, Serialize};
use crate::{
contract::wit::base_runtime_api as contract_wit, service::wit::base_runtime_api as service_wit,
};
// TODO(#3143): Unify the two types into a single `EthereumClient` type.
/// A wrapper for a URL that implements `JsonRpcClient` and uses the JSON oracle to make requests.
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct ContractEthereumClient {
/// The URL of the JSON-RPC server, without the method or parameters.
pub url: String,
}
scalar!(ContractEthereumClient);
impl ContractEthereumClient {
/// Creates a new [`ContractEthereumClient`] from an URL.
pub fn new(url: String) -> Self {
Self { url }
}
}
#[async_trait]
impl JsonRpcClient for ContractEthereumClient {
type Error = EthereumServiceError;
async fn get_id(&self) -> u64 {
1
}
async fn request_inner(&self, payload: Vec<u8>) -> Result<Vec<u8>, Self::Error> {
let response = contract_wit::perform_http_request(
&http::Request {
method: http::Method::Post,
url: self.url.clone(),
headers: Vec::from([http::Header::new("Content-Type", b"application/json")]),
body: payload,
}
.into(),
);
Ok(response.body)
}
}
/// A wrapper for a URL that implements `JsonRpcClient` and uses the JSON oracle to make requests.
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct ServiceEthereumClient {
/// The URL of the JSON-RPC server, without the method or parameters.
pub url: String,
}
scalar!(ServiceEthereumClient);
impl ServiceEthereumClient {
/// Creates a new [`ServiceEthereumClient`] from an URL.
pub fn new(url: String) -> Self {
Self { url }
}
}
#[async_trait]
impl JsonRpcClient for ServiceEthereumClient {
type Error = EthereumServiceError;
async fn get_id(&self) -> u64 {
1
}
async fn request_inner(&self, payload: Vec<u8>) -> Result<Vec<u8>, Self::Error> {
let response = service_wit::perform_http_request(
&http::Request {
method: http::Method::Post,
url: self.url.clone(),
headers: Vec::from([http::Header::new("Content-Type", b"application/json")]),
body: payload,
}
.into(),
);
Ok(response.body)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/lib.rs | linera-sdk/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module provides an SDK for developing Linera applications using Rust.
//!
//! A Linera application consists of two WebAssembly binaries: a contract and a service.
//! Both binaries have access to the same application and chain specific storage. The service only
//! has read-only access, while the contract can write to it. The storage should be used to store
//! the application state, which is persisted across blocks. The state can be a custom type that
//! uses [`linera-views`](https://docs.rs/linera-views/latest/linera_views/index.html), a framework
//! that allows lazily loading selected parts of the state. This is useful if the application's
//! state is large and doesn't need to be loaded in its entirety for every execution.
//!
//! The contract binary should create a type to implement the [`Contract`](crate::Contract) trait.
//! The type can store the [`ContractRuntime`](contract::ContractRuntime) and the state, and must
//! have its implementation exported by using the [`contract!`](crate::contract!) macro.
//!
//! The service binary should create a type to implement the [`Service`](crate::Service) trait.
//! The type can store the [`ServiceRuntime`](service::ServiceRuntime) and the state, and must have
//! its implementation exported by using the [`service!`](crate::service!) macro.
//!
//! # Examples
//!
//! The [`examples`](https://github.com/linera-io/linera-protocol/tree/main/examples)
//! directory contains some example applications.
#[macro_use]
pub mod util;
pub mod abis;
mod base;
pub mod contract;
#[cfg(feature = "ethereum")]
pub mod ethereum;
mod extensions;
pub mod graphql;
pub mod linera_base_types;
mod log;
pub mod service;
#[cfg(with_testing)]
pub mod test;
pub mod views;
use std::fmt::Debug;
pub use bcs;
pub use linera_base::{
abi,
data_types::{Resources, SendMessageRequest},
ensure, http, task_processor,
};
use linera_base::{
abi::{ContractAbi, ServiceAbi, WithContractAbi, WithServiceAbi},
data_types::StreamUpdate,
};
use serde::{de::DeserializeOwned, Serialize};
pub use serde_json;
#[doc(hidden)]
pub use self::{contract::export_contract, service::export_service};
pub use self::{
contract::ContractRuntime,
extensions::{FromBcsBytes, ToBcsBytes},
log::{ContractLogger, ServiceLogger},
service::ServiceRuntime,
views::{KeyValueStore, ViewStorageContext},
};
/// The contract interface of a Linera application.
///
/// As opposed to the [`Service`] interface of an application, contract entry points
/// are triggered by the execution of blocks in a chain. Their execution may modify
/// storage and is gas-metered.
///
/// Below we use the word "transaction" to refer to the current operation or message being
/// executed.
#[allow(async_fn_in_trait)]
pub trait Contract: WithContractAbi + ContractAbi + Sized {
/// The type of message executed by the application.
///
/// Messages are executed when a message created by the same application is received
/// from another chain and accepted in a block.
type Message: Serialize + DeserializeOwned + Debug;
/// Immutable parameters specific to this application (e.g. the name of a token).
type Parameters: Serialize + DeserializeOwned + Clone + Debug;
/// Instantiation argument passed to a new application on the chain that created it
/// (e.g. an initial amount of tokens minted).
///
/// To share configuration data on every chain, use [`Contract::Parameters`]
/// instead.
type InstantiationArgument: Serialize + DeserializeOwned + Debug;
/// Event values for streams created by this application.
type EventValue: Serialize + DeserializeOwned + Debug;
/// Creates an in-memory instance of the contract handler.
async fn load(runtime: ContractRuntime<Self>) -> Self;
/// Instantiates the application on the chain that created it.
///
/// This is only called once when the application is created and only on the microchain that
/// created the application.
async fn instantiate(&mut self, argument: Self::InstantiationArgument);
/// Applies an operation from the current block.
///
/// Operations are created by users and added to blocks, serving as the starting point for an
/// application's execution.
async fn execute_operation(&mut self, operation: Self::Operation) -> Self::Response;
/// Applies a message originating from a cross-chain message.
///
/// Messages are sent across chains. These messages are created and received by
/// the same application. Messages can be either single-sender and single-receiver, or
/// single-sender and multiple-receivers. The former allows sending cross-chain messages to the
/// application on some other specific chain, while the latter uses broadcast channels to
/// send a message to multiple other chains where the application is subscribed to a
/// sender channel on this chain.
///
/// For a message to be executed, a user must mark it to be received in a block of the receiver
/// chain.
async fn execute_message(&mut self, message: Self::Message);
/// Reacts to new events on streams.
///
/// This is called whenever there is a new event on any stream that this application
/// subscribes to.
async fn process_streams(&mut self, _updates: Vec<StreamUpdate>) {}
/// Finishes the execution of the current transaction.
///
/// This is called once at the end of the transaction, to allow all applications that
/// participated in the transaction to perform any final operations, such as persisting their
/// state.
///
/// The application may also cancel the transaction by panicking if there are any pendencies.
async fn store(self);
}
/// The service interface of a Linera application.
///
/// As opposed to the [`Contract`] interface of an application, service entry points
/// are triggered by JSON queries (typically GraphQL). Their execution cannot modify
/// storage and is not gas-metered.
#[allow(async_fn_in_trait)]
pub trait Service: WithServiceAbi + ServiceAbi + Sized {
/// Immutable parameters specific to this application.
type Parameters: Serialize + DeserializeOwned + Send + Sync + Clone + Debug + 'static;
/// Creates an in-memory instance of the service handler.
async fn new(runtime: ServiceRuntime<Self>) -> Self;
/// Executes a read-only query on the state of this application.
async fn handle_query(&self, query: Self::Query) -> Self::QueryResponse;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/log.rs | linera-sdk/src/log.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
panic::{self, PanicHookInfo},
sync::Once,
};
use log::{LevelFilter, Log, Metadata, Record};
use crate::{
contract::wit::base_runtime_api as contract_wit, service::wit::base_runtime_api as service_wit,
};
static CONTRACT_LOGGER: ContractLogger = ContractLogger;
static SERVICE_LOGGER: ServiceLogger = ServiceLogger;
static INSTALL_LOGGER: Once = Once::new();
/// A logger that uses the system API for contracts.
#[derive(Clone, Copy, Debug)]
pub struct ContractLogger;
impl ContractLogger {
/// Configures [`log`] to use the log system API for contracts.
pub fn install() {
INSTALL_LOGGER.call_once(|| {
log::set_logger(&CONTRACT_LOGGER).expect("Failed to initialize contract logger");
log::set_max_level(LevelFilter::Trace);
panic::set_hook(Box::new(log_panic));
});
}
}
impl Log for ContractLogger {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
contract_wit::log(&record.args().to_string(), record.level().into());
}
fn flush(&self) {}
}
/// A logger that uses the system API for services.
#[derive(Clone, Copy, Debug)]
pub struct ServiceLogger;
impl ServiceLogger {
/// Configures [`log`] to use the log system API for services.
pub fn install() {
INSTALL_LOGGER.call_once(|| {
log::set_logger(&SERVICE_LOGGER).expect("Failed to initialize service logger");
log::set_max_level(LevelFilter::Trace);
panic::set_hook(Box::new(log_panic));
});
}
}
impl Log for ServiceLogger {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
service_wit::log(&record.args().to_string(), record.level().into());
}
fn flush(&self) {}
}
/// Logs a panic using the [`log`] API.
fn log_panic(info: &PanicHookInfo<'_>) {
log::error!("{info}");
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/extensions.rs | linera-sdk/src/extensions.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Extension traits with some common functionality.
use serde::{de::DeserializeOwned, Serialize};
/// Extension trait to deserialize a type from a vector of bytes using [`bcs`].
pub trait FromBcsBytes: Sized {
/// Deserializes itself from a vector of bytes using [`bcs`].
fn from_bcs_bytes(bytes: &[u8]) -> Result<Self, bcs::Error>;
}
impl<T> FromBcsBytes for T
where
T: DeserializeOwned,
{
fn from_bcs_bytes(bytes: &[u8]) -> Result<Self, bcs::Error> {
bcs::from_bytes(bytes)
}
}
/// Extension trait to serialize a type into a vector of bytes using [`bcs`].
pub trait ToBcsBytes {
/// Serializes itself into a vector of bytes using [`bcs`].
fn to_bcs_bytes(&self) -> Result<Vec<u8>, bcs::Error>;
}
impl<T> ToBcsBytes for T
where
T: Serialize,
{
fn to_bcs_bytes(&self) -> Result<Vec<u8>, bcs::Error> {
bcs::to_bytes(self)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/util.rs | linera-sdk/src/util.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Module with helper types and functions used by the SDK.
use std::{
future::Future,
pin::{pin, Pin},
task::{Context, Poll},
};
use futures::task;
/// Yields the current asynchronous task so that other tasks may progress if possible.
///
/// After other tasks progress, this task resumes as soon as possible. More explicitly, it is
/// scheduled to be woken up as soon as possible.
pub fn yield_once() -> YieldOnce {
YieldOnce::default()
}
/// A [`Future`] that returns [`Poll::Pending`] once and immediately schedules itself to wake up.
#[derive(Default)]
pub struct YieldOnce {
yielded: bool,
}
impl Future for YieldOnce {
type Output = ();
fn poll(mut self: Pin<&mut Self>, context: &mut Context) -> Poll<Self::Output> {
let mut this = self.as_mut();
if this.yielded {
Poll::Ready(())
} else {
this.yielded = true;
context.waker().wake_by_ref();
Poll::Pending
}
}
}
/// An extension trait to block on a [`Future`] until it completes.
pub trait BlockingWait {
/// The type returned by the [`Future`].
type Output;
/// Waits for the [`Future`] to complete in a blocking manner.
///
/// Effectively polls the [`Future`] repeatedly until it returns [`Poll::Ready`].
fn blocking_wait(self) -> Self::Output;
}
impl<AnyFuture> BlockingWait for AnyFuture
where
AnyFuture: Future,
{
type Output = AnyFuture::Output;
fn blocking_wait(mut self) -> Self::Output {
let waker = task::noop_waker();
let mut task_context = Context::from_waker(&waker);
let mut future = pin!(self);
loop {
match future.as_mut().poll(&mut task_context) {
Poll::Pending => continue,
Poll::Ready(output) => return output,
}
}
}
}
/// Unit tests for the helpers defined in the `util` module.
#[cfg(test)]
mod tests {
use std::task::{Context, Poll};
use futures::{future::poll_fn, task::noop_waker, FutureExt as _};
use super::{yield_once, BlockingWait};
/// Tests the behavior of the [`YieldOnce`] future.
///
/// Checks the internal state before and after the first and second polls, and ensures that
/// only the first poll returns [`Poll::Pending`].
#[test]
#[expect(clippy::bool_assert_comparison)]
fn yield_once_returns_pending_only_on_first_call() {
let mut future = yield_once();
let waker = noop_waker();
let mut context = Context::from_waker(&waker);
assert_eq!(future.yielded, false);
assert!(future.poll_unpin(&mut context).is_pending());
assert_eq!(future.yielded, true);
assert!(future.poll_unpin(&mut context).is_ready());
assert_eq!(future.yielded, true);
}
/// Tests the behavior of the [`BlockingWait`] extension.
#[test]
fn blocking_wait_blocks_until_future_is_ready() {
let mut remaining_polls = 100;
let future = poll_fn(|_context| {
if remaining_polls == 0 {
Poll::Ready(())
} else {
remaining_polls -= 1;
Poll::Pending
}
});
future.blocking_wait();
assert_eq!(remaining_polls, 0);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/linera_base_types.rs | linera-sdk/src/linera_base_types.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Types reexported from [`linera_base`].
pub use linera_base::{
abi::*,
crypto::*,
data_types::*,
identifiers::*,
ownership::*,
vm::{EvmInstantiation, EvmOperation, EvmQuery, VmRuntime},
BcsHexParseError,
};
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/graphql.rs | linera-sdk/src/graphql.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! GraphQL traits for generating interfaces into applications.
use std::sync::Arc;
/// Re-exports the derive macro for [`GraphQLMutationRoot`].
pub use linera_sdk_derive::GraphQLMutationRoot;
use crate::{Service, ServiceRuntime};
/// An object associated with a GraphQL mutation root. Those are typically used to build
/// an [`async_graphql::Schema`] object.
pub trait GraphQLMutationRoot<Application>
where
Application: Service,
{
/// The type of the mutation root.
type MutationRoot: async_graphql::ObjectType;
/// Returns the mutation root of the object.
fn mutation_root(runtime: Arc<ServiceRuntime<Application>>) -> Self::MutationRoot;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/service/wit.rs | linera-sdk/src/service/wit.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Internal module with code generated by [`wit-bindgen`](https://github.com/jvff/wit-bindgen).
#![allow(missing_docs)]
// Export the service interface.
wit_bindgen::generate!({
world: "service",
export_macro_name: "export_service",
pub_export_macro: true,
});
pub use self::linera::app::{base_runtime_api, service_runtime_api};
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/service/test_runtime.rs | linera-sdk/src/service/test_runtime.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Runtime types to simulate interfacing with the host executing the service.
use std::{
collections::{HashMap, VecDeque},
mem,
sync::Mutex,
};
use linera_base::{
abi::ServiceAbi,
data_types::{Amount, BlockHeight, Timestamp},
hex, http,
identifiers::{AccountOwner, ApplicationId, ChainId, DataBlobHash},
};
use serde::{de::DeserializeOwned, Serialize};
use crate::{KeyValueStore, Service, ViewStorageContext};
/// The runtime available during execution of a query.
pub struct MockServiceRuntime<Application>
where
Application: Service,
{
application_parameters: Mutex<Option<Application::Parameters>>,
application_id: Mutex<Option<ApplicationId<Application::Abi>>>,
application_creator_chain_id: Mutex<Option<ChainId>>,
chain_id: Mutex<Option<ChainId>>,
next_block_height: Mutex<Option<BlockHeight>>,
timestamp: Mutex<Option<Timestamp>>,
chain_balance: Mutex<Option<Amount>>,
owner_balances: Mutex<Option<HashMap<AccountOwner, Amount>>>,
query_application_handler: Mutex<Option<QueryApplicationHandler>>,
expected_http_requests: Mutex<VecDeque<(http::Request, http::Response)>>,
blobs: Mutex<Option<HashMap<DataBlobHash, Vec<u8>>>>,
scheduled_operations: Mutex<Vec<Vec<u8>>>,
key_value_store: KeyValueStore,
}
impl<Application> Default for MockServiceRuntime<Application>
where
Application: Service,
{
fn default() -> Self {
MockServiceRuntime::new()
}
}
impl<Application> MockServiceRuntime<Application>
where
Application: Service,
{
/// Creates a new [`MockServiceRuntime`] instance for a service.
pub fn new() -> Self {
MockServiceRuntime {
application_parameters: Mutex::new(None),
application_id: Mutex::new(None),
application_creator_chain_id: Mutex::new(None),
chain_id: Mutex::new(None),
next_block_height: Mutex::new(None),
timestamp: Mutex::new(None),
chain_balance: Mutex::new(None),
owner_balances: Mutex::new(None),
query_application_handler: Mutex::new(None),
expected_http_requests: Mutex::new(VecDeque::new()),
blobs: Mutex::new(None),
scheduled_operations: Mutex::new(vec![]),
key_value_store: KeyValueStore::mock(),
}
}
/// Returns the key-value store to interface with storage.
pub fn key_value_store(&self) -> KeyValueStore {
self.key_value_store.clone()
}
/// Returns a storage context suitable for a root view.
pub fn root_view_storage_context(&self) -> ViewStorageContext {
ViewStorageContext::new_unchecked(self.key_value_store(), Vec::new(), ())
}
/// Configures the application parameters to return during the test.
pub fn with_application_parameters(
self,
application_parameters: Application::Parameters,
) -> Self {
*self.application_parameters.lock().unwrap() = Some(application_parameters);
self
}
/// Configures the application parameters to return during the test.
pub fn set_application_parameters(
&self,
application_parameters: Application::Parameters,
) -> &Self {
*self.application_parameters.lock().unwrap() = Some(application_parameters);
self
}
/// Returns the application parameters provided when the application was created.
pub fn application_parameters(&self) -> Application::Parameters {
Self::fetch_mocked_value(
&self.application_parameters,
"Application parameters have not been mocked, \
please call `MockServiceRuntime::set_application_parameters` first",
)
}
/// Configures the application ID to return during the test.
pub fn with_application_id(self, application_id: ApplicationId<Application::Abi>) -> Self {
*self.application_id.lock().unwrap() = Some(application_id);
self
}
/// Configures the application ID to return during the test.
pub fn set_application_id(&self, application_id: ApplicationId<Application::Abi>) -> &Self {
*self.application_id.lock().unwrap() = Some(application_id);
self
}
/// Returns the ID of the current application.
pub fn application_id(&self) -> ApplicationId<Application::Abi> {
Self::fetch_mocked_value(
&self.application_id,
"Application ID has not been mocked, \
please call `MockServiceRuntime::set_application_id` first",
)
}
/// Configures the application creator chain ID to return during the test.
pub fn with_application_creator_chain_id(self, application_creator_chain_id: ChainId) -> Self {
*self.application_creator_chain_id.lock().unwrap() = Some(application_creator_chain_id);
self
}
/// Configures the application creator chain ID to return during the test.
pub fn set_application_creator_chain_id(&self, application_creator_chain_id: ChainId) -> &Self {
*self.application_creator_chain_id.lock().unwrap() = Some(application_creator_chain_id);
self
}
/// Returns the chain ID of the current application creator.
pub fn application_creator_chain_id(&self) -> ChainId {
Self::fetch_mocked_value(
&self.application_creator_chain_id,
"Application creator chain ID has not been mocked, \
please call `MockServiceRuntime::set_application_creator_chain_id` first",
)
}
/// Configures the chain ID to return during the test.
pub fn with_chain_id(self, chain_id: ChainId) -> Self {
*self.chain_id.lock().unwrap() = Some(chain_id);
self
}
/// Configures the chain ID to return during the test.
pub fn set_chain_id(&self, chain_id: ChainId) -> &Self {
*self.chain_id.lock().unwrap() = Some(chain_id);
self
}
/// Returns the ID of the current chain.
pub fn chain_id(&self) -> ChainId {
Self::fetch_mocked_value(
&self.chain_id,
"Chain ID has not been mocked, \
please call `MockServiceRuntime::set_chain_id` first",
)
}
/// Configures the next block height to return during the test.
pub fn with_next_block_height(self, next_block_height: BlockHeight) -> Self {
*self.next_block_height.lock().unwrap() = Some(next_block_height);
self
}
/// Configures the block height to return during the test.
pub fn set_next_block_height(&self, next_block_height: BlockHeight) -> &Self {
*self.next_block_height.lock().unwrap() = Some(next_block_height);
self
}
/// Returns the height of the next block that can be added to the current chain.
pub fn next_block_height(&self) -> BlockHeight {
Self::fetch_mocked_value(
&self.next_block_height,
"Next block height has not been mocked, \
please call `MockServiceRuntime::set_next_block_height` first",
)
}
/// Configures the system time to return during the test.
pub fn with_system_time(self, timestamp: Timestamp) -> Self {
*self.timestamp.lock().unwrap() = Some(timestamp);
self
}
/// Configures the system time to return during the test.
pub fn set_system_time(&self, timestamp: Timestamp) -> &Self {
*self.timestamp.lock().unwrap() = Some(timestamp);
self
}
/// Retrieves the current system time, i.e. the timestamp of the block in which this is called.
pub fn system_time(&self) -> Timestamp {
Self::fetch_mocked_value(
&self.timestamp,
"System time has not been mocked, \
please call `MockServiceRuntime::set_system_time` first",
)
}
/// Configures the chain balance to return during the test.
pub fn with_chain_balance(self, chain_balance: Amount) -> Self {
*self.chain_balance.lock().unwrap() = Some(chain_balance);
self
}
/// Configures the chain balance to return during the test.
pub fn set_chain_balance(&self, chain_balance: Amount) -> &Self {
*self.chain_balance.lock().unwrap() = Some(chain_balance);
self
}
/// Returns the current chain balance.
pub fn chain_balance(&self) -> Amount {
Self::fetch_mocked_value(
&self.chain_balance,
"Chain balance has not been mocked, \
please call `MockServiceRuntime::set_chain_balance` first",
)
}
/// Configures the balances on the chain to use during the test.
pub fn with_owner_balances(
self,
owner_balances: impl IntoIterator<Item = (AccountOwner, Amount)>,
) -> Self {
*self.owner_balances.lock().unwrap() = Some(owner_balances.into_iter().collect());
self
}
/// Configures the balances on the chain to use during the test.
pub fn set_owner_balances(
&self,
owner_balances: impl IntoIterator<Item = (AccountOwner, Amount)>,
) -> &Self {
*self.owner_balances.lock().unwrap() = Some(owner_balances.into_iter().collect());
self
}
/// Configures the balance of one account on the chain to use during the test.
pub fn with_owner_balance(self, owner: AccountOwner, balance: Amount) -> Self {
self.set_owner_balance(owner, balance);
self
}
/// Configures the balance of one account on the chain to use during the test.
pub fn set_owner_balance(&self, owner: AccountOwner, balance: Amount) -> &Self {
self.owner_balances
.lock()
.unwrap()
.get_or_insert_with(HashMap::new)
.insert(owner, balance);
self
}
/// Returns the balance of one of the accounts on this chain.
pub fn owner_balance(&self, owner: AccountOwner) -> Amount {
self.owner_balances
.lock()
.unwrap()
.as_mut()
.and_then(|owner_balances| owner_balances.get(&owner).copied())
.unwrap_or_else(|| {
panic!(
"Balance for owner {owner} was not mocked, \
please include a balance for them with a call to \
`MockServiceRuntime::set_owner_balance`"
)
})
}
/// Returns the balances of all accounts on the chain.
pub fn owner_balances(&self) -> Vec<(AccountOwner, Amount)> {
self.owner_balances
.lock()
.unwrap()
.as_ref()
.expect(
"Owner balances have not been mocked, \
please call `MockServiceRuntime::set_owner_balances` first",
)
.iter()
.map(|(owner, amount)| (*owner, *amount))
.collect()
}
/// Returns the owners of accounts on this chain.
pub fn balance_owners(&self) -> Vec<AccountOwner> {
self.owner_balances
.lock()
.unwrap()
.as_ref()
.expect(
"Owner balances have not been mocked, \
please call `MockServiceRuntime::set_owner_balances` first",
)
.keys()
.copied()
.collect()
}
/// Schedules an operation to be included in the block being built.
///
/// The operation is specified as an opaque blob of bytes.
pub fn schedule_raw_operation(&self, operation: Vec<u8>) {
self.scheduled_operations.lock().unwrap().push(operation);
}
/// Schedules an operation to be included in the block being built.
///
/// The operation is serialized using BCS.
pub fn schedule_operation(&self, operation: &impl Serialize) {
let bytes = bcs::to_bytes(operation).expect("Failed to serialize application operation");
self.schedule_raw_operation(bytes);
}
/// Returns the list of operations scheduled since the most recent of:
///
/// - the last call to this method;
/// - the last call to [`Self::scheduled_operations`];
/// - or since the mock runtime was created.
pub fn raw_scheduled_operations(&self) -> Vec<Vec<u8>> {
mem::take(&mut self.scheduled_operations.lock().unwrap())
}
/// Returns the list of operations scheduled since the most recent of:
///
/// - the last call to this method;
/// - the last call to [`Self::raw_scheduled_operations`];
/// - or since the mock runtime was created.
///
/// All operations are deserialized using BCS into the `Operation` generic type.
pub fn scheduled_operations<Operation>(&self) -> Vec<Operation>
where
Operation: DeserializeOwned,
{
self.raw_scheduled_operations()
.into_iter()
.enumerate()
.map(|(index, bytes)| {
bcs::from_bytes(&bytes).unwrap_or_else(|error| {
panic!(
"Failed to deserialize scheduled operation #{index} (0x{}): {error}",
hex::encode(bytes)
)
})
})
.collect()
}
/// Configures the handler for application queries made during the test.
pub fn with_query_application_handler(
self,
handler: impl FnMut(ApplicationId, Vec<u8>) -> Vec<u8> + Send + 'static,
) -> Self {
*self.query_application_handler.lock().unwrap() = Some(Box::new(handler));
self
}
/// Configures the handler for application queries made during the test.
pub fn set_query_application_handler(
&self,
handler: impl FnMut(ApplicationId, Vec<u8>) -> Vec<u8> + Send + 'static,
) -> &Self {
*self.query_application_handler.lock().unwrap() = Some(Box::new(handler));
self
}
/// Queries another application.
pub fn query_application<A: ServiceAbi>(
&self,
application: ApplicationId<A>,
query: &A::Query,
) -> A::QueryResponse {
let query_bytes =
serde_json::to_vec(&query).expect("Failed to serialize query to another application");
let mut handler_guard = self.query_application_handler.lock().unwrap();
let handler = handler_guard.as_mut().expect(
"Handler for `query_application` has not been mocked, \
please call `MockServiceRuntime::set_query_application_handler` first",
);
let response_bytes = handler(application.forget_abi(), query_bytes);
serde_json::from_slice(&response_bytes)
.expect("Failed to deserialize query response from application")
}
/// Adds an expected `http_request` call, and the response it should return in the test.
pub fn add_expected_http_request(&mut self, request: http::Request, response: http::Response) {
self.expected_http_requests
.lock()
.unwrap()
.push_back((request, response));
}
/// Makes an HTTP `request` as an oracle and returns the HTTP response.
///
/// Should only be used with queries where it is very likely that all validators will receive
/// the same response, otherwise most block proposals will fail.
///
/// Cannot be used in fast blocks: A block using this call should be proposed by a regular
/// owner, not a super owner.
pub fn http_request(&self, request: http::Request) -> http::Response {
let maybe_request = self.expected_http_requests.lock().unwrap().pop_front();
let (expected_request, response) = maybe_request.expect("Unexpected HTTP request");
assert_eq!(request, expected_request);
response
}
/// Configures the `blobs` returned when fetching from hashes during the test.
pub fn with_blobs(self, blobs: impl IntoIterator<Item = (DataBlobHash, Vec<u8>)>) -> Self {
*self.blobs.lock().unwrap() = Some(blobs.into_iter().collect());
self
}
/// Configures the `blobs` returned when fetching from hashes during the test.
pub fn set_blobs(&self, blobs: impl IntoIterator<Item = (DataBlobHash, Vec<u8>)>) -> &Self {
*self.blobs.lock().unwrap() = Some(blobs.into_iter().collect());
self
}
/// Configures the `blob` returned when fetching from the given hash during the test.
pub fn with_blob(self, hash: impl Into<DataBlobHash>, blob: Vec<u8>) -> Self {
self.set_blob(hash, blob);
self
}
/// Configures the `blob` returned when fetching from the hash during the test.
pub fn set_blob(&self, hash: impl Into<DataBlobHash>, blob: Vec<u8>) -> &Self {
self.blobs
.lock()
.unwrap()
.get_or_insert_with(HashMap::new)
.insert(hash.into(), blob);
self
}
/// Fetches a blob from a given hash.
pub fn read_data_blob(&self, hash: DataBlobHash) -> Vec<u8> {
self.blobs
.lock()
.unwrap()
.as_ref()
.and_then(|blobs| blobs.get(&hash).cloned())
.unwrap_or_else(|| {
panic!(
"Blob for hash {hash:?} has not been mocked, \
please call `MockServiceRuntime::set_blob` first"
)
})
}
/// Asserts that a blob with the given hash exists in storage.
pub fn assert_blob_exists(&self, hash: DataBlobHash) {
assert!(
self.blobs
.lock()
.unwrap()
.as_ref()
.is_some_and(|blobs| blobs.contains_key(&hash)),
"Blob for hash {hash:?} has not been mocked, \
please call `MockServiceRuntime::set_blob` first"
);
}
/// Loads a mocked value from the `slot` cache or panics with a provided `message`.
fn fetch_mocked_value<T>(slot: &Mutex<Option<T>>, message: &str) -> T
where
T: Clone,
{
slot.lock().unwrap().clone().expect(message)
}
}
/// A type alias for the handler for application queries.
pub type QueryApplicationHandler = Box<dyn FnMut(ApplicationId, Vec<u8>) -> Vec<u8> + Send>;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/service/runtime.rs | linera-sdk/src/service/runtime.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Runtime types to interface with the host executing the service.
use std::sync::Mutex;
use linera_base::{
abi::ServiceAbi,
data_types::{Amount, BlockHeight, Timestamp},
http,
identifiers::{AccountOwner, ApplicationId, ChainId, DataBlobHash},
};
use serde::Serialize;
use super::wit::{base_runtime_api as base_wit, service_runtime_api as service_wit};
use crate::{KeyValueStore, Service, ViewStorageContext};
/// The runtime available during execution of a query.
pub struct ServiceRuntime<Application>
where
Application: Service,
{
application_parameters: Mutex<Option<Application::Parameters>>,
application_id: Mutex<Option<ApplicationId<Application::Abi>>>,
application_creator_chain_id: Mutex<Option<ChainId>>,
chain_id: Mutex<Option<ChainId>>,
next_block_height: Mutex<Option<BlockHeight>>,
timestamp: Mutex<Option<Timestamp>>,
chain_balance: Mutex<Option<Amount>>,
owner_balances: Mutex<Option<Vec<(AccountOwner, Amount)>>>,
balance_owners: Mutex<Option<Vec<AccountOwner>>>,
}
impl<Application> ServiceRuntime<Application>
where
Application: Service,
{
/// Creates a new [`ServiceRuntime`] instance for a service.
pub(crate) fn new() -> Self {
ServiceRuntime {
application_parameters: Mutex::new(None),
application_id: Mutex::new(None),
application_creator_chain_id: Mutex::new(None),
chain_id: Mutex::new(None),
next_block_height: Mutex::new(None),
timestamp: Mutex::new(None),
chain_balance: Mutex::new(None),
owner_balances: Mutex::new(None),
balance_owners: Mutex::new(None),
}
}
/// Returns the key-value store to interface with storage.
pub fn key_value_store(&self) -> KeyValueStore {
KeyValueStore::for_services()
}
/// Returns a storage context suitable for a root view.
pub fn root_view_storage_context(&self) -> ViewStorageContext {
ViewStorageContext::new_unchecked(self.key_value_store(), Vec::new(), ())
}
}
impl<Application> ServiceRuntime<Application>
where
Application: Service,
{
/// Returns the application parameters provided when the application was created.
pub fn application_parameters(&self) -> Application::Parameters {
Self::fetch_value_through_cache(&self.application_parameters, || {
let bytes = base_wit::application_parameters();
serde_json::from_slice(&bytes).expect("Application parameters must be deserializable")
})
}
/// Returns the ID of the current application.
pub fn application_id(&self) -> ApplicationId<Application::Abi> {
Self::fetch_value_through_cache(&self.application_id, || {
ApplicationId::from(base_wit::get_application_id()).with_abi()
})
}
/// Returns the chain ID of the current application creator.
pub fn application_creator_chain_id(&self) -> ChainId {
Self::fetch_value_through_cache(&self.application_creator_chain_id, || {
base_wit::get_application_creator_chain_id().into()
})
}
/// Returns the ID of the current chain.
pub fn chain_id(&self) -> ChainId {
Self::fetch_value_through_cache(&self.chain_id, || base_wit::get_chain_id().into())
}
/// Returns the height of the next block that can be added to the current chain.
pub fn next_block_height(&self) -> BlockHeight {
Self::fetch_value_through_cache(&self.next_block_height, || {
base_wit::get_block_height().into()
})
}
/// Retrieves the current system time, i.e. the timestamp of the block in which this is called.
pub fn system_time(&self) -> Timestamp {
Self::fetch_value_through_cache(&self.timestamp, || {
base_wit::read_system_timestamp().into()
})
}
/// Returns the current chain balance.
pub fn chain_balance(&self) -> Amount {
Self::fetch_value_through_cache(&self.chain_balance, || {
base_wit::read_chain_balance().into()
})
}
/// Returns the balance of one of the accounts on this chain.
pub fn owner_balance(&self, owner: AccountOwner) -> Amount {
base_wit::read_owner_balance(owner.into()).into()
}
/// Returns the balances of all accounts on the chain.
pub fn owner_balances(&self) -> Vec<(AccountOwner, Amount)> {
Self::fetch_value_through_cache(&self.owner_balances, || {
base_wit::read_owner_balances()
.into_iter()
.map(|(owner, amount)| (owner.into(), amount.into()))
.collect()
})
}
/// Returns the owners of accounts on this chain.
pub fn balance_owners(&self) -> Vec<AccountOwner> {
Self::fetch_value_through_cache(&self.balance_owners, || {
base_wit::read_balance_owners()
.into_iter()
.map(AccountOwner::from)
.collect()
})
}
/// Makes an HTTP request to the given URL as an oracle and returns the answer, if any.
///
/// Should only be used with queries where it is very likely that all validators will receive
/// the same response, otherwise most block proposals will fail.
///
/// Cannot be used in fast blocks: A block using this call should be proposed by a regular
/// owner, not a super owner.
pub fn http_request(&self, request: http::Request) -> http::Response {
base_wit::perform_http_request(&request.into()).into()
}
/// Reads a data blob with the given hash from storage.
pub fn read_data_blob(&self, hash: DataBlobHash) -> Vec<u8> {
base_wit::read_data_blob(hash.into())
}
/// Asserts that a data blob with the given hash exists in storage.
pub fn assert_data_blob_exists(&self, hash: DataBlobHash) {
base_wit::assert_data_blob_exists(hash.into())
}
}
impl<Application> ServiceRuntime<Application>
where
Application: Service,
{
/// Schedules an operation to be included in the block being built.
///
/// The operation is specified as an opaque blob of bytes.
pub fn schedule_raw_operation(&self, operation: Vec<u8>) {
service_wit::schedule_operation(&operation);
}
/// Schedules an operation to be included in the block being built.
///
/// The operation is serialized using BCS.
pub fn schedule_operation(&self, operation: &impl Serialize) {
let bytes = bcs::to_bytes(operation).expect("Failed to serialize application operation");
service_wit::schedule_operation(&bytes);
}
/// Queries another application.
pub fn query_application<A: ServiceAbi>(
&self,
application: ApplicationId<A>,
query: &A::Query,
) -> A::QueryResponse {
let query_bytes =
serde_json::to_vec(&query).expect("Failed to serialize query to another application");
let response_bytes =
service_wit::try_query_application(application.forget_abi().into(), &query_bytes);
serde_json::from_slice(&response_bytes)
.expect("Failed to deserialize query response from application")
}
}
impl<Application> ServiceRuntime<Application>
where
Application: Service,
{
/// Loads a value from the `slot` cache or fetches it and stores it in the cache.
fn fetch_value_through_cache<T>(slot: &Mutex<Option<T>>, fetch: impl FnOnce() -> T) -> T
where
T: Clone,
{
let mut value = slot
.lock()
.expect("Mutex should never be poisoned because service runs in a single thread");
if value.is_none() {
*value = Some(fetch());
}
value.clone().expect("Value should be populated above")
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/service/mod.rs | linera-sdk/src/service/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Types and macros useful for writing an application service.
mod conversions_to_wit;
#[cfg(not(with_testing))]
mod runtime;
#[cfg(with_testing)]
mod test_runtime;
#[doc(hidden)]
pub mod wit;
#[cfg(not(with_testing))]
pub use self::runtime::ServiceRuntime;
#[cfg(with_testing)]
pub use self::test_runtime::MockServiceRuntime;
#[doc(hidden)]
pub use self::wit::export_service;
use crate::util::BlockingWait as _;
/// Inside tests, use the [`MockServiceRuntime`] instead of the real [`ServiceRuntime`].
#[cfg(with_testing)]
pub type ServiceRuntime<Application> = MockServiceRuntime<Application>;
/// Declares an implementation of the [`Service`][`crate::Service`] trait, exporting it from the
/// Wasm module.
///
/// Generates the necessary boilerplate for implementing the service WIT interface, exporting the
/// necessary resource types and functions so that the host can call the application service.
#[macro_export]
macro_rules! service {
($service:ident) => {
#[doc(hidden)]
static mut SERVICE: Option<$service> = None;
/// Export the service interface.
$crate::export_service!($service with_types_in $crate::service::wit);
/// Mark the service type to be exported.
impl $crate::service::wit::exports::linera::app::service_entrypoints::Guest for $service {
fn handle_query(argument: Vec<u8>) -> Vec<u8> {
use $crate::util::BlockingWait as _;
$crate::ServiceLogger::install();
let request = $crate::serde_json::from_slice(&argument)
.unwrap_or_else(|_| panic!("Query {argument:?} is invalid and could not be deserialized"));
let response = $crate::service::run_async_entrypoint(
unsafe { &mut SERVICE },
move |service| service.handle_query(request).blocking_wait(),
);
$crate::serde_json::to_vec(&response)
.expect("Failed to serialize query response")
}
}
/// Stub of a `main` entrypoint so that the binary doesn't fail to compile on targets other
/// than WebAssembly.
#[cfg(not(target_arch = "wasm32"))]
fn main() {}
};
}
/// Runs an asynchronous entrypoint in a blocking manner, by repeatedly polling the entrypoint
/// future.
pub fn run_async_entrypoint<Service, Output>(
service: &mut Option<Service>,
entrypoint: impl FnOnce(&mut Service) -> Output,
) -> Output
where
Service: crate::Service,
{
let service =
service.get_or_insert_with(|| Service::new(ServiceRuntime::new()).blocking_wait());
entrypoint(service)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/service/conversions_to_wit.rs | linera-sdk/src/service/conversions_to_wit.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Conversions from types declared in [`linera-sdk`] to types generated by [`wit-bindgen`].
use linera_base::{crypto::CryptoHash, identifiers::ApplicationId};
use super::wit::service_runtime_api as wit_service_api;
impl From<CryptoHash> for wit_service_api::CryptoHash {
fn from(hash_value: CryptoHash) -> Self {
let parts = <[u64; 4]>::from(hash_value);
wit_service_api::CryptoHash {
part1: parts[0],
part2: parts[1],
part3: parts[2],
part4: parts[3],
}
}
}
impl From<ApplicationId> for wit_service_api::ApplicationId {
fn from(application_id: ApplicationId) -> Self {
wit_service_api::ApplicationId {
application_description_hash: application_id.application_description_hash.into(),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.