repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/base/conversions_from_wit.rs | linera-sdk/src/base/conversions_from_wit.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Conversions from types generated by [`wit-bindgen`] to types declared in [`linera-sdk`].
use linera_base::{
crypto::CryptoHash,
data_types::{Amount, BlockHeight, TimeDelta, Timestamp},
http,
identifiers::{AccountOwner, ApplicationId, ChainId, DataBlobHash},
ownership::{ChainOwnership, TimeoutConfig},
};
use crate::{
contract::wit::base_runtime_api as base_contract_api,
service::wit::base_runtime_api as base_service_api,
};
macro_rules! impl_from_wit {
($wit_base_api:ident) => {
impl From<$wit_base_api::CryptoHash> for CryptoHash {
fn from(hash_value: $wit_base_api::CryptoHash) -> Self {
CryptoHash::from([
hash_value.part1,
hash_value.part2,
hash_value.part3,
hash_value.part4,
])
}
}
impl From<$wit_base_api::DataBlobHash> for DataBlobHash {
fn from(hash_value: $wit_base_api::DataBlobHash) -> Self {
DataBlobHash(hash_value.inner0.into())
}
}
impl From<$wit_base_api::Array20> for [u8; 20] {
fn from(ethereum_address: $wit_base_api::Array20) -> Self {
let mut bytes = [0u8; 20];
bytes[0..8].copy_from_slice(ðereum_address.part1.to_be_bytes());
bytes[8..16].copy_from_slice(ðereum_address.part2.to_be_bytes());
bytes[16..20].copy_from_slice(ðereum_address.part3.to_be_bytes()[0..4]);
bytes
}
}
impl From<$wit_base_api::AccountOwner> for AccountOwner {
fn from(account_owner: $wit_base_api::AccountOwner) -> Self {
match account_owner {
$wit_base_api::AccountOwner::Reserved(value) => AccountOwner::Reserved(value),
$wit_base_api::AccountOwner::Address32(value) => {
AccountOwner::Address32(value.into())
}
$wit_base_api::AccountOwner::Address20(value) => {
AccountOwner::Address20(value.into())
}
}
}
}
impl From<$wit_base_api::Amount> for Amount {
fn from(balance: $wit_base_api::Amount) -> Self {
let (lower_half, upper_half) = balance.inner0;
let value = ((upper_half as u128) << 64) | (lower_half as u128);
Amount::from_attos(value)
}
}
impl From<$wit_base_api::BlockHeight> for BlockHeight {
fn from(block_height: $wit_base_api::BlockHeight) -> Self {
BlockHeight(block_height.inner0)
}
}
impl From<$wit_base_api::ChainId> for ChainId {
fn from(chain_id: $wit_base_api::ChainId) -> Self {
ChainId(chain_id.inner0.into())
}
}
impl From<$wit_base_api::ApplicationId> for ApplicationId {
fn from(application_id: $wit_base_api::ApplicationId) -> Self {
ApplicationId::new(application_id.application_description_hash.into())
}
}
impl From<$wit_base_api::Timestamp> for Timestamp {
fn from(timestamp: $wit_base_api::Timestamp) -> Self {
Timestamp::from(timestamp.inner0)
}
}
impl From<$wit_base_api::TimeDelta> for TimeDelta {
fn from(guest: $wit_base_api::TimeDelta) -> Self {
TimeDelta::from_micros(guest.inner0)
}
}
impl From<$wit_base_api::TimeoutConfig> for TimeoutConfig {
fn from(guest: $wit_base_api::TimeoutConfig) -> TimeoutConfig {
let $wit_base_api::TimeoutConfig {
fast_round_duration,
base_timeout,
timeout_increment,
fallback_duration,
} = guest;
TimeoutConfig {
fast_round_duration: fast_round_duration.map(TimeDelta::from),
base_timeout: base_timeout.into(),
timeout_increment: timeout_increment.into(),
fallback_duration: fallback_duration.into(),
}
}
}
impl From<$wit_base_api::ChainOwnership> for ChainOwnership {
fn from(guest: $wit_base_api::ChainOwnership) -> ChainOwnership {
let $wit_base_api::ChainOwnership {
super_owners,
owners,
first_leader,
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config,
} = guest;
ChainOwnership {
super_owners: super_owners.into_iter().map(Into::into).collect(),
owners: owners
.into_iter()
.map(|(owner, weight)| (owner.into(), weight))
.collect(),
first_leader: first_leader.map(Into::into),
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config: timeout_config.into(),
}
}
}
impl From<$wit_base_api::HttpResponse> for http::Response {
fn from(response: $wit_base_api::HttpResponse) -> http::Response {
http::Response {
status: response.status,
headers: response
.headers
.into_iter()
.map(http::Header::from)
.collect(),
body: response.body,
}
}
}
impl From<$wit_base_api::HttpHeader> for http::Header {
fn from(header: $wit_base_api::HttpHeader) -> http::Header {
http::Header::new(header.name, header.value)
}
}
};
}
impl_from_wit!(base_service_api);
impl_from_wit!(base_contract_api);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/base/mod.rs | linera-sdk/src/base/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Types and macros for the base runtime.
mod conversions_from_wit;
mod conversions_to_wit;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/base/conversions_to_wit.rs | linera-sdk/src/base/conversions_to_wit.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Conversions from types declared in [`linera-sdk`] to types generated by [`wit-bindgen`].
use linera_base::{
crypto::CryptoHash,
data_types::{BlockHeight, Timestamp},
http,
identifiers::{AccountOwner, ApplicationId, ChainId, DataBlobHash},
};
use crate::{
contract::wit::base_runtime_api as base_contract_api,
service::wit::base_runtime_api as base_service_api,
};
macro_rules! impl_to_wit {
($wit_base_api:ident) => {
impl From<CryptoHash> for $wit_base_api::CryptoHash {
fn from(hash_value: CryptoHash) -> Self {
let parts = <[u64; 4]>::from(hash_value);
$wit_base_api::CryptoHash {
part1: parts[0],
part2: parts[1],
part3: parts[2],
part4: parts[3],
}
}
}
impl From<DataBlobHash> for $wit_base_api::DataBlobHash {
fn from(hash_value: DataBlobHash) -> Self {
$wit_base_api::DataBlobHash {
inner0: hash_value.0.into(),
}
}
}
impl From<[u8; 20]> for $wit_base_api::Array20 {
fn from(bytes: [u8; 20]) -> Self {
$wit_base_api::Array20 {
part1: u64::from_be_bytes(bytes[0..8].try_into().unwrap()),
part2: u64::from_be_bytes(bytes[8..16].try_into().unwrap()),
part3: (u32::from_be_bytes(bytes[16..20].try_into().unwrap()) as u64) << 32,
}
}
}
impl From<AccountOwner> for $wit_base_api::AccountOwner {
fn from(account_owner: AccountOwner) -> Self {
match account_owner {
AccountOwner::Reserved(value) => $wit_base_api::AccountOwner::Reserved(value),
AccountOwner::Address32(value) => {
$wit_base_api::AccountOwner::Address32(value.into())
}
AccountOwner::Address20(value) => {
$wit_base_api::AccountOwner::Address20(value.into())
}
}
}
}
impl From<BlockHeight> for $wit_base_api::BlockHeight {
fn from(block_height: BlockHeight) -> Self {
$wit_base_api::BlockHeight {
inner0: block_height.0,
}
}
}
impl From<ChainId> for $wit_base_api::ChainId {
fn from(chain_id: ChainId) -> Self {
$wit_base_api::ChainId {
inner0: chain_id.0.into(),
}
}
}
impl From<ApplicationId> for $wit_base_api::ApplicationId {
fn from(application_id: ApplicationId) -> Self {
$wit_base_api::ApplicationId {
application_description_hash: application_id
.application_description_hash
.into(),
}
}
}
impl From<Timestamp> for $wit_base_api::Timestamp {
fn from(timestamp: Timestamp) -> Self {
Self {
inner0: timestamp.micros(),
}
}
}
impl From<http::Request> for $wit_base_api::HttpRequest {
fn from(request: http::Request) -> Self {
$wit_base_api::HttpRequest {
method: request.method.into(),
url: request.url,
headers: request
.headers
.into_iter()
.map(http::Header::into)
.collect(),
body: request.body,
}
}
}
impl From<http::Method> for $wit_base_api::HttpMethod {
fn from(method: http::Method) -> Self {
match method {
http::Method::Get => $wit_base_api::HttpMethod::Get,
http::Method::Post => $wit_base_api::HttpMethod::Post,
http::Method::Put => $wit_base_api::HttpMethod::Put,
http::Method::Delete => $wit_base_api::HttpMethod::Delete,
http::Method::Head => $wit_base_api::HttpMethod::Head,
http::Method::Options => $wit_base_api::HttpMethod::Options,
http::Method::Connect => $wit_base_api::HttpMethod::Connect,
http::Method::Patch => $wit_base_api::HttpMethod::Patch,
http::Method::Trace => $wit_base_api::HttpMethod::Trace,
}
}
}
impl From<http::Header> for $wit_base_api::HttpHeader {
fn from(header: http::Header) -> Self {
$wit_base_api::HttpHeader {
name: header.name,
value: header.value,
}
}
}
impl From<log::Level> for $wit_base_api::LogLevel {
fn from(level: log::Level) -> Self {
match level {
log::Level::Trace => $wit_base_api::LogLevel::Trace,
log::Level::Debug => $wit_base_api::LogLevel::Debug,
log::Level::Info => $wit_base_api::LogLevel::Info,
log::Level::Warn => $wit_base_api::LogLevel::Warn,
log::Level::Error => $wit_base_api::LogLevel::Error,
}
}
}
};
}
impl_to_wit!(base_service_api);
impl_to_wit!(base_contract_api);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/test/mock_stubs.rs | linera-sdk/src/test/mock_stubs.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Stub functions for the API exported to unit tests.
//!
//! These can be used by mistake if running unit tests targeting the host architecture, and the
//! default compiler error of missing functions isn't very helpful. Instead, these allow
//! compilation to succeed and fails with a more helpful message *if* one of the functions is
//! called.
use linera_base::{
data_types::{Amount, Timestamp},
identifiers::{ApplicationId, ChainId},
};
use linera_views::context::MemoryContext;
use serde::Serialize;
/// A helpful error message to explain why the mock API isn't available.
const ERROR_MESSAGE: &str =
"The mock API is only available for unit tests running inside a WebAssembly virtual machine. \
Please check that the unit tests are executed with `linera project test` or with \
`cargo test --target wasm32-unknown-unknown`. \
Also ensure that the unit tests (or the module containing them) has a \
`#[cfg(target_arch = \"wasm32-unknown-unknown\")]` attribute so that they don't get compiled \
in for the integration tests";
/// Sets the mocked chain ID.
pub fn mock_chain_id(_chain_id: impl Into<Option<ChainId>>) {
unreachable!("{ERROR_MESSAGE}");
}
/// Sets the mocked application ID.
pub fn mock_application_id(_application_id: impl Into<Option<ApplicationId>>) {
unreachable!("{ERROR_MESSAGE}");
}
/// Sets the mocked application creator chain ID.
pub fn mock_application_creator_chain_id(
_application_creator_chain_id: impl Into<Option<ChainId>>,
) {
unreachable!("{ERROR_MESSAGE}");
}
/// Sets the mocked application parameters.
pub fn mock_application_parameters(_application_parameters: &impl Serialize) {
unreachable!("{ERROR_MESSAGE}");
}
/// Sets the mocked chain balance.
pub fn mock_chain_balance(_chain_balance: impl Into<Option<Amount>>) {
unreachable!("{ERROR_MESSAGE}");
}
/// Sets the mocked system timestamp.
pub fn mock_system_timestamp(_system_timestamp: impl Into<Option<Timestamp>>) {
unreachable!("{ERROR_MESSAGE}");
}
/// Returns all messages logged so far.
pub fn log_messages() -> Vec<(log::Level, String)> {
unreachable!("{ERROR_MESSAGE}");
}
/// Sets the mocked application state.
pub fn mock_application_state(_state: impl Into<Option<Vec<u8>>>) {
unreachable!("{ERROR_MESSAGE}");
}
/// Initializes and returns a view context for using as the mocked key-value store.
pub fn mock_key_value_store() -> MemoryContext<()> {
unreachable!("{ERROR_MESSAGE}");
}
/// Mocks the `try_query_application` system API.
pub fn mock_try_query_application<E>(
_handler: impl FnMut(ApplicationId, Vec<u8>) -> Result<Vec<u8>, E> + 'static,
) where
E: ToString + 'static,
{
unreachable!("{ERROR_MESSAGE}");
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/test/chain.rs | linera-sdk/src/test/chain.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A reference to a single microchain inside a [`TestValidator`].
//!
//! This allows manipulating a test microchain.
use std::{
collections::HashMap,
io,
path::{Path, PathBuf},
sync::Arc,
};
use cargo_toml::Manifest;
use futures::future;
use linera_base::{
crypto::{AccountPublicKey, AccountSecretKey},
data_types::{
Amount, ApplicationDescription, Blob, BlockHeight, Bytecode, ChainDescription,
CompressedBytecode, Epoch,
},
identifiers::{AccountOwner, ApplicationId, ChainId, ModuleId},
vm::VmRuntime,
};
use linera_chain::{types::ConfirmedBlockCertificate, ChainExecutionContext};
use linera_core::{data_types::ChainInfoQuery, worker::WorkerError};
use linera_execution::{
system::{SystemOperation, SystemQuery, SystemResponse},
ExecutionError, Operation, Query, QueryOutcome, QueryResponse,
};
use linera_storage::Storage as _;
use serde::Serialize;
use tokio::{fs, sync::Mutex};
use super::{BlockBuilder, TestValidator};
use crate::{ContractAbi, ServiceAbi};
/// A reference to a single microchain inside a [`TestValidator`].
pub struct ActiveChain {
key_pair: AccountSecretKey,
description: ChainDescription,
tip: Arc<Mutex<Option<ConfirmedBlockCertificate>>>,
validator: TestValidator,
}
impl Clone for ActiveChain {
fn clone(&self) -> Self {
ActiveChain {
key_pair: self.key_pair.copy(),
description: self.description.clone(),
tip: self.tip.clone(),
validator: self.validator.clone(),
}
}
}
impl ActiveChain {
/// Creates a new [`ActiveChain`] instance referencing a new empty microchain in the
/// `validator`.
///
/// The microchain has a single owner that uses the `key_pair` to produce blocks. The
/// `description` is used as the identifier of the microchain.
pub fn new(
key_pair: AccountSecretKey,
description: ChainDescription,
validator: TestValidator,
) -> Self {
ActiveChain {
key_pair,
description,
tip: Arc::default(),
validator,
}
}
/// Returns the [`ChainId`] of this microchain.
pub fn id(&self) -> ChainId {
self.description.id()
}
/// Returns the [`AccountPublicKey`] of the active owner of this microchain.
pub fn public_key(&self) -> AccountPublicKey {
self.key_pair.public()
}
/// Returns the [`AccountSecretKey`] of the active owner of this microchain.
pub fn key_pair(&self) -> &AccountSecretKey {
&self.key_pair
}
/// Sets the [`AccountSecretKey`] to use for signing new blocks.
pub fn set_key_pair(&mut self, key_pair: AccountSecretKey) {
self.key_pair = key_pair
}
/// Returns the current [`Epoch`] the chain is in.
pub async fn epoch(&self) -> Epoch {
*self
.validator
.worker()
.chain_state_view(self.id())
.await
.expect("Failed to load chain")
.execution_state
.system
.epoch
.get()
}
/// Reads the current shared balance available to all of the owners of this microchain.
pub async fn chain_balance(&self) -> Amount {
let query = Query::System(SystemQuery);
let QueryOutcome { response, .. } = self
.validator
.worker()
.query_application(self.id(), query, None)
.await
.expect("Failed to query chain's balance");
let QueryResponse::System(SystemResponse { balance, .. }) = response else {
panic!("Unexpected response from system application");
};
balance
}
/// Reads the current account balance on this microchain of an [`AccountOwner`].
pub async fn owner_balance(&self, owner: &AccountOwner) -> Option<Amount> {
let chain_state = self
.validator
.worker()
.chain_state_view(self.id())
.await
.expect("Failed to read chain state");
chain_state
.execution_state
.system
.balances
.get(owner)
.await
.expect("Failed to read owner balance")
}
/// Reads the current account balance on this microchain of all [`AccountOwner`]s.
pub async fn owner_balances(
&self,
owners: impl IntoIterator<Item = AccountOwner>,
) -> HashMap<AccountOwner, Option<Amount>> {
let chain_state = self
.validator
.worker()
.chain_state_view(self.id())
.await
.expect("Failed to read chain state");
let mut balances = HashMap::new();
for owner in owners {
let balance = chain_state
.execution_state
.system
.balances
.get(&owner)
.await
.expect("Failed to read an owner's balance");
balances.insert(owner, balance);
}
balances
}
/// Reads a list of [`AccountOwner`]s that have a non-zero balance on this microchain.
pub async fn accounts(&self) -> Vec<AccountOwner> {
let chain_state = self
.validator
.worker()
.chain_state_view(self.id())
.await
.expect("Failed to read chain state");
chain_state
.execution_state
.system
.balances
.indices()
.await
.expect("Failed to list accounts on the chain")
}
/// Reads all the non-zero account balances on this microchain.
pub async fn all_owner_balances(&self) -> HashMap<AccountOwner, Amount> {
self.owner_balances(self.accounts().await)
.await
.into_iter()
.map(|(owner, balance)| {
(
owner,
balance.expect("`accounts` should only return accounts with non-zero balance"),
)
})
.collect()
}
/// Adds a block to this microchain.
///
/// The `block_builder` parameter is a closure that should use the [`BlockBuilder`] parameter
/// to provide the block's contents.
pub async fn add_block(
&self,
block_builder: impl FnOnce(&mut BlockBuilder),
) -> ConfirmedBlockCertificate {
self.try_add_block(block_builder)
.await
.expect("Failed to execute block.")
}
/// Adds a block to this microchain, passing the blobs to be used during certificate handling.
///
/// The `block_builder` parameter is a closure that should use the [`BlockBuilder`] parameter
/// to provide the block's contents.
pub async fn add_block_with_blobs(
&self,
block_builder: impl FnOnce(&mut BlockBuilder),
blobs: Vec<Blob>,
) -> ConfirmedBlockCertificate {
self.try_add_block_with_blobs(block_builder, blobs)
.await
.expect("Failed to execute block.")
}
/// Tries to add a block to this microchain.
///
/// The `block_builder` parameter is a closure that should use the [`BlockBuilder`] parameter
/// to provide the block's contents.
pub async fn try_add_block(
&self,
block_builder: impl FnOnce(&mut BlockBuilder),
) -> Result<ConfirmedBlockCertificate, WorkerError> {
self.try_add_block_with_blobs(block_builder, vec![]).await
}
/// Tries to add a block to this microchain, writing some `blobs` to storage if needed.
///
/// The `block_builder` parameter is a closure that should use the [`BlockBuilder`] parameter
/// to provide the block's contents.
///
/// The blobs are either all written to storage, if executing the block fails due to a missing
/// blob, or none are written to storage if executing the block succeeds without the blobs.
async fn try_add_block_with_blobs(
&self,
block_builder: impl FnOnce(&mut BlockBuilder),
blobs: Vec<Blob>,
) -> Result<ConfirmedBlockCertificate, WorkerError> {
let mut tip = self.tip.lock().await;
let mut block = BlockBuilder::new(
self.description.id(),
self.key_pair.public().into(),
self.epoch().await,
tip.as_ref(),
self.validator.clone(),
);
block_builder(&mut block);
// TODO(#2066): Remove boxing once call-stack is shallower
let certificate = Box::pin(block.try_sign(&blobs)).await?;
let result = self
.validator
.worker()
.fully_handle_certificate_with_notifications(certificate.clone(), &())
.await;
if let Err(WorkerError::BlobsNotFound(_)) = &result {
self.validator.storage().maybe_write_blobs(&blobs).await?;
self.validator
.worker()
.fully_handle_certificate_with_notifications(certificate.clone(), &())
.await
.expect("Rejected certificate");
} else {
result.expect("Rejected certificate");
}
*tip = Some(certificate.clone());
Ok(certificate)
}
/// Receives all queued messages in all inboxes of this microchain.
///
/// Adds a block to this microchain that receives all queued messages in the microchains
/// inboxes.
pub async fn handle_received_messages(&self) {
let chain_id = self.id();
let (information, _) = self
.validator
.worker()
.handle_chain_info_query(ChainInfoQuery::new(chain_id).with_pending_message_bundles())
.await
.expect("Failed to query chain's pending messages");
let messages = information.info.requested_pending_message_bundles;
// Empty blocks are not allowed.
// Return early if there are no messages to process and we'd end up with an empty proposal.
if messages.is_empty() {
return;
}
self.add_block(|block| {
block.with_incoming_bundles(messages);
})
.await;
}
/// Processes all new events from streams this chain subscribes to.
///
/// Adds a block to this microchain that processes the new events.
pub async fn handle_new_events(&self) {
let chain_id = self.id();
let worker = self.validator.worker();
let subscription_map = worker
.chain_state_view(chain_id)
.await
.expect("Failed to query chain state view")
.execution_state
.system
.event_subscriptions
.index_values()
.await
.expect("Failed to query chain's event subscriptions");
// Collect the indices of all new events.
let futures = subscription_map
.into_iter()
.map(|((chain_id, stream_id), subscriptions)| {
let worker = worker.clone();
async move {
worker
.chain_state_view(chain_id)
.await
.expect("Failed to query chain state view")
.execution_state
.system
.stream_event_counts
.get(&stream_id)
.await
.expect("Failed to query chain's event counts")
.filter(|next_index| *next_index > subscriptions.next_index)
.map(|next_index| (chain_id, stream_id, next_index))
}
});
let updates = future::join_all(futures)
.await
.into_iter()
.flatten()
.collect::<Vec<_>>();
assert!(!updates.is_empty(), "No new events to process");
self.add_block(|block| {
block.with_system_operation(SystemOperation::UpdateStreams(updates));
})
.await;
}
/// Publishes the module in the crate calling this method to this microchain.
///
/// Searches the Cargo manifest for binaries that end with `contract` and `service`, builds
/// them for WebAssembly and uses the generated binaries as the contract and service bytecode files
/// to be published on this chain. Returns the module ID to reference the published module.
pub async fn publish_current_module<Abi, Parameters, InstantiationArgument>(
&self,
) -> ModuleId<Abi, Parameters, InstantiationArgument> {
self.publish_bytecode_files_in(".").await
}
/// Publishes the bytecode files in the crate at `repository_path`.
///
/// Searches the Cargo manifest for binaries that end with `contract` and `service`, builds
/// them for WebAssembly and uses the generated binaries as the contract and service bytecode files
/// to be published on this chain. Returns the module ID to reference the published module.
pub async fn publish_bytecode_files_in<Abi, Parameters, InstantiationArgument>(
&self,
repository_path: impl AsRef<Path>,
) -> ModuleId<Abi, Parameters, InstantiationArgument> {
let repository_path = fs::canonicalize(repository_path)
.await
.expect("Failed to obtain absolute application repository path");
Self::build_bytecode_files_in(&repository_path);
let (contract, service) = Self::find_compressed_bytecode_files_in(&repository_path).await;
let contract_blob = Blob::new_contract_bytecode(contract);
let service_blob = Blob::new_service_bytecode(service);
let contract_blob_hash = contract_blob.id().hash;
let service_blob_hash = service_blob.id().hash;
let vm_runtime = VmRuntime::Wasm;
let module_id = ModuleId::new(contract_blob_hash, service_blob_hash, vm_runtime);
let certificate = self
.add_block_with_blobs(
|block| {
block.with_system_operation(SystemOperation::PublishModule { module_id });
},
vec![contract_blob, service_blob],
)
.await;
let block = certificate.inner().block();
assert_eq!(block.messages().len(), 1);
assert_eq!(block.messages()[0].len(), 0);
module_id.with_abi()
}
/// Compiles the crate in the `repository` path.
pub fn build_bytecode_files_in(repository: &Path) {
let output = std::process::Command::new("cargo")
.args(["build", "--release", "--target", "wasm32-unknown-unknown"])
.current_dir(repository)
.output()
.expect("Failed to build Wasm binaries");
assert!(
output.status.success(),
"Failed to build bytecode binaries.\nstdout: {}\nstderr: {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
}
/// Searches the Cargo manifest of the crate calling this method for binaries to use as the
/// contract and service bytecode files.
///
/// Returns a tuple with the loaded contract and service [`Bytecode`]s,
/// ready to be published.
pub async fn find_bytecode_files_in(repository: &Path) -> (Bytecode, Bytecode) {
let manifest_path = repository.join("Cargo.toml");
let cargo_manifest =
Manifest::from_path(manifest_path).expect("Failed to load Cargo.toml manifest");
let binaries = cargo_manifest
.bin
.into_iter()
.filter_map(|binary| binary.name)
.filter(|name| name.ends_with("service") || name.ends_with("contract"))
.collect::<Vec<_>>();
assert_eq!(
binaries.len(),
2,
"Could not figure out contract and service bytecode binaries.\
Please specify them manually using `publish_module`."
);
let (contract_binary, service_binary) = if binaries[0].ends_with("contract") {
(&binaries[0], &binaries[1])
} else {
(&binaries[1], &binaries[0])
};
let base_path = Self::find_output_directory_of(repository)
.await
.expect("Failed to look for output binaries");
let contract_path = base_path.join(format!("{}.wasm", contract_binary));
let service_path = base_path.join(format!("{}.wasm", service_binary));
let contract = Bytecode::load_from_file(contract_path)
.expect("Failed to load contract bytecode from file");
let service = Bytecode::load_from_file(service_path)
.expect("Failed to load service bytecode from file");
(contract, service)
}
/// Returns a tuple with the loaded contract and service [`CompressedBytecode`]s,
/// ready to be published.
pub async fn find_compressed_bytecode_files_in(
repository: &Path,
) -> (CompressedBytecode, CompressedBytecode) {
let (contract, service) = Self::find_bytecode_files_in(repository).await;
tokio::task::spawn_blocking(move || (contract.compress(), service.compress()))
.await
.expect("Failed to compress bytecode files")
}
/// Searches for the directory where the built WebAssembly binaries should be.
///
/// Assumes that the binaries will be built and placed inside a
/// `target/wasm32-unknown-unknown/release` sub-directory. However, since the crate with the
/// binaries could be part of a workspace, that output sub-directory must be searched in parent
/// directories as well.
async fn find_output_directory_of(repository: &Path) -> Result<PathBuf, io::Error> {
let output_sub_directory = Path::new("target/wasm32-unknown-unknown/release");
let mut current_directory = repository;
let mut output_path = current_directory.join(output_sub_directory);
while !fs::try_exists(&output_path).await? {
current_directory = current_directory.parent().unwrap_or_else(|| {
panic!(
"Failed to find Wasm binary output directory in {}",
repository.display()
)
});
output_path = current_directory.join(output_sub_directory);
}
Ok(output_path)
}
/// Returns the height of the tip of this microchain.
pub async fn get_tip_height(&self) -> BlockHeight {
self.tip
.lock()
.await
.as_ref()
.expect("Block was not successfully added")
.inner()
.block()
.header
.height
}
/// Creates an application on this microchain, using the module referenced by `module_id`.
///
/// Returns the [`ApplicationId`] of the created application.
///
/// If necessary, this microchain will subscribe to the microchain that published the
/// module to use, and fetch it.
///
/// The application is instantiated using the instantiation parameters, which consist of the
/// global static `parameters`, the one time `instantiation_argument` and the
/// `required_application_ids` of the applications that the new application will depend on.
pub async fn create_application<Abi, Parameters, InstantiationArgument>(
&mut self,
module_id: ModuleId<Abi, Parameters, InstantiationArgument>,
parameters: Parameters,
instantiation_argument: InstantiationArgument,
required_application_ids: Vec<ApplicationId>,
) -> ApplicationId<Abi>
where
Abi: ContractAbi,
Parameters: Serialize,
InstantiationArgument: Serialize,
{
let parameters = serde_json::to_vec(¶meters).unwrap();
let instantiation_argument = serde_json::to_vec(&instantiation_argument).unwrap();
let creation_certificate = self
.add_block(|block| {
block.with_system_operation(SystemOperation::CreateApplication {
module_id: module_id.forget_abi(),
parameters: parameters.clone(),
instantiation_argument,
required_application_ids: required_application_ids.clone(),
});
})
.await;
let block = creation_certificate.inner().block();
assert_eq!(block.messages().len(), 1);
let description = ApplicationDescription {
module_id: module_id.forget_abi(),
creator_chain_id: block.header.chain_id,
block_height: block.header.height,
application_index: 0,
parameters,
required_application_ids,
};
ApplicationId::<()>::from(&description).with_abi()
}
/// Returns whether this chain has been closed.
pub async fn is_closed(&self) -> bool {
let chain = self
.validator
.worker()
.chain_state_view(self.id())
.await
.expect("Failed to load chain");
*chain.execution_state.system.closed.get()
}
/// Executes a `query` on an `application`'s state on this microchain.
///
/// Returns the deserialized response from the `application`.
pub async fn query<Abi>(
&self,
application_id: ApplicationId<Abi>,
query: Abi::Query,
) -> QueryOutcome<Abi::QueryResponse>
where
Abi: ServiceAbi,
{
self.try_query(application_id, query)
.await
.expect("Failed to execute application service query")
}
/// Attempts to execute a `query` on an `application`'s state on this microchain.
///
/// Returns the deserialized response from the `application`.
pub async fn try_query<Abi>(
&self,
application_id: ApplicationId<Abi>,
query: Abi::Query,
) -> Result<QueryOutcome<Abi::QueryResponse>, TryQueryError>
where
Abi: ServiceAbi,
{
let query_bytes = serde_json::to_vec(&query)?;
let QueryOutcome {
response,
operations,
} = self
.validator
.worker()
.query_application(
self.id(),
Query::User {
application_id: application_id.forget_abi(),
bytes: query_bytes,
},
None,
)
.await?;
let deserialized_response = match response {
QueryResponse::User(bytes) => {
serde_json::from_slice(&bytes).expect("Failed to deserialize query response")
}
QueryResponse::System(_) => {
unreachable!("User query returned a system response")
}
};
Ok(QueryOutcome {
response: deserialized_response,
operations,
})
}
/// Executes a GraphQL `query` on an `application`'s state on this microchain.
///
/// Returns the deserialized GraphQL JSON response from the `application`.
pub async fn graphql_query<Abi>(
&self,
application_id: ApplicationId<Abi>,
query: impl Into<async_graphql::Request>,
) -> QueryOutcome<serde_json::Value>
where
Abi: ServiceAbi<Query = async_graphql::Request, QueryResponse = async_graphql::Response>,
{
let query = query.into();
let query_str = query.query.clone();
self.try_graphql_query(application_id, query)
.await
.unwrap_or_else(|error| panic!("Service query {query_str:?} failed: {error}"))
}
/// Attempts to execute a GraphQL `query` on an `application`'s state on this microchain.
///
/// Returns the deserialized GraphQL JSON response from the `application`.
pub async fn try_graphql_query<Abi>(
&self,
application_id: ApplicationId<Abi>,
query: impl Into<async_graphql::Request>,
) -> Result<QueryOutcome<serde_json::Value>, TryGraphQLQueryError>
where
Abi: ServiceAbi<Query = async_graphql::Request, QueryResponse = async_graphql::Response>,
{
let query = query.into();
let QueryOutcome {
response,
operations,
} = self.try_query(application_id, query).await?;
if !response.errors.is_empty() {
return Err(TryGraphQLQueryError::Service(response.errors));
}
let json_response = response.data.into_json()?;
Ok(QueryOutcome {
response: json_response,
operations,
})
}
/// Executes a GraphQL `mutation` on an `application` and proposes a block with the resulting
/// scheduled operations.
///
/// Returns the certificate of the new block.
pub async fn graphql_mutation<Abi>(
&self,
application_id: ApplicationId<Abi>,
query: impl Into<async_graphql::Request>,
) -> ConfirmedBlockCertificate
where
Abi: ServiceAbi<Query = async_graphql::Request, QueryResponse = async_graphql::Response>,
{
self.try_graphql_mutation(application_id, query)
.await
.expect("Failed to execute service GraphQL mutation")
}
/// Attempts to execute a GraphQL `mutation` on an `application` and proposes a block with the
/// resulting scheduled operations.
///
/// Returns the certificate of the new block.
pub async fn try_graphql_mutation<Abi>(
&self,
application_id: ApplicationId<Abi>,
query: impl Into<async_graphql::Request>,
) -> Result<ConfirmedBlockCertificate, TryGraphQLMutationError>
where
Abi: ServiceAbi<Query = async_graphql::Request, QueryResponse = async_graphql::Response>,
{
let QueryOutcome { operations, .. } = self.try_graphql_query(application_id, query).await?;
let certificate = self
.try_add_block(|block| {
for operation in operations {
match operation {
Operation::User {
application_id,
bytes,
} => {
block.with_raw_operation(application_id, bytes);
}
Operation::System(system_operation) => {
block.with_system_operation(*system_operation);
}
}
}
})
.await?;
Ok(certificate)
}
}
/// Failure to query an application's service on a chain.
#[derive(Debug, thiserror::Error)]
pub enum TryQueryError {
/// The query request failed to serialize to JSON.
#[error("Failed to serialize query request")]
Serialization(#[from] serde_json::Error),
/// Executing the service to handle the query failed.
#[error("Failed to execute service query")]
Execution(#[from] WorkerError),
}
/// Failure to perform a GraphQL query on an application on a chain.
#[derive(Debug, thiserror::Error)]
pub enum TryGraphQLQueryError {
/// The [`async_graphql::Request`] failed to serialize to JSON.
#[error("Failed to serialize GraphQL query request")]
RequestSerialization(#[source] serde_json::Error),
/// Execution of the service failed.
#[error("Failed to execute service query")]
Execution(#[from] WorkerError),
/// The response returned from the service was not valid JSON.
#[error("Unexpected non-JSON service query response")]
ResponseDeserialization(#[from] serde_json::Error),
/// The service reported some errors.
#[error("Service returned errors: {_0:#?}")]
Service(Vec<async_graphql::ServerError>),
}
impl From<TryQueryError> for TryGraphQLQueryError {
fn from(query_error: TryQueryError) -> Self {
match query_error {
TryQueryError::Serialization(error) => {
TryGraphQLQueryError::RequestSerialization(error)
}
TryQueryError::Execution(error) => TryGraphQLQueryError::Execution(error),
}
}
}
impl TryGraphQLQueryError {
/// Returns the inner [`ExecutionError`] in this error.
///
/// # Panics
///
/// If this is not caused by an [`ExecutionError`].
pub fn expect_execution_error(self) -> ExecutionError {
let TryGraphQLQueryError::Execution(worker_error) = self else {
panic!("Expected an `ExecutionError`. Got: {self:#?}");
};
worker_error.expect_execution_error(ChainExecutionContext::Query)
}
}
/// Failure to perform a GraphQL mutation on an application on a chain.
#[derive(Debug, thiserror::Error)]
pub enum TryGraphQLMutationError {
/// The GraphQL query for the mutation failed.
#[error(transparent)]
Query(#[from] TryGraphQLQueryError),
/// The block with the mutation's scheduled operations failed to be proposed.
#[error("Failed to propose block with operations scheduled by the GraphQL mutation")]
Proposal(#[from] WorkerError),
}
impl TryGraphQLMutationError {
/// Returns the inner [`ExecutionError`] in this [`TryGraphQLMutationError::Proposal`] error.
///
/// # Panics
///
/// If this is not caused by an [`ExecutionError`] during a block proposal.
pub fn expect_proposal_execution_error(self, transaction_index: u32) -> ExecutionError {
let TryGraphQLMutationError::Proposal(proposal_error) = self else {
panic!("Expected an `ExecutionError` during the block proposal. Got: {self:#?}");
};
proposal_error.expect_execution_error(ChainExecutionContext::Operation(transaction_index))
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/test/block.rs | linera-sdk/src/test/block.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A builder of [`Block`]s which are then signed to become [`Certificate`]s.
//!
//! Helps with the construction of blocks, adding operations and
use linera_base::{
abi::ContractAbi,
data_types::{Amount, ApplicationPermissions, Blob, Epoch, Round, Timestamp},
identifiers::{Account, AccountOwner, ApplicationId, ChainId},
ownership::TimeoutConfig,
};
use linera_chain::{
data_types::{
IncomingBundle, LiteValue, LiteVote, MessageAction, ProposedBlock, SignatureAggregator,
Transaction,
},
types::{ConfirmedBlock, ConfirmedBlockCertificate},
};
use linera_core::worker::WorkerError;
use linera_execution::{system::SystemOperation, Operation};
use super::TestValidator;
/// A helper type to build a block proposal using the builder pattern, and then signing them into
/// [`ConfirmedBlockCertificate`]s using a [`TestValidator`].
pub struct BlockBuilder {
block: ProposedBlock,
validator: TestValidator,
}
impl BlockBuilder {
/// Creates a new [`BlockBuilder`], initializing the block so that it belongs to a microchain.
///
/// Initializes the block so that it belongs to the microchain identified by `chain_id` and
/// owned by `owner`. It becomes the block after the specified `previous_block`, or the genesis
/// block if [`None`] is specified.
///
/// # Notes
///
/// This is an internal method, because the [`BlockBuilder`] instance should be built by an
/// [`ActiveChain`]. External users should only be able to add operations and messages to the
/// block.
pub(crate) fn new(
chain_id: ChainId,
owner: AccountOwner,
epoch: Epoch,
previous_block: Option<&ConfirmedBlockCertificate>,
validator: TestValidator,
) -> Self {
let previous_block_hash = previous_block.map(|certificate| certificate.hash());
let height = previous_block
.map(|certificate| {
certificate
.inner()
.height()
.try_add_one()
.expect("Block height limit reached")
})
.unwrap_or_default();
BlockBuilder {
block: ProposedBlock {
epoch,
chain_id,
transactions: vec![],
previous_block_hash,
height,
authenticated_owner: Some(owner),
timestamp: Timestamp::from(0),
},
validator,
}
}
/// Configures the timestamp of this block.
pub fn with_timestamp(&mut self, timestamp: Timestamp) -> &mut Self {
self.block.timestamp = timestamp;
self
}
/// Adds a native token transfer to this block.
pub fn with_native_token_transfer(
&mut self,
sender: AccountOwner,
recipient: Account,
amount: Amount,
) -> &mut Self {
self.with_system_operation(SystemOperation::Transfer {
owner: sender,
recipient,
amount,
})
}
/// Adds a [`SystemOperation`] to this block.
pub(crate) fn with_system_operation(&mut self, operation: SystemOperation) -> &mut Self {
self.block
.transactions
.push(Transaction::ExecuteOperation(operation.into()));
self
}
/// Adds an operation to change this chain's ownership.
pub fn with_owner_change(
&mut self,
super_owners: Vec<AccountOwner>,
owners: Vec<(AccountOwner, u64)>,
first_leader: Option<AccountOwner>,
multi_leader_rounds: u32,
open_multi_leader_rounds: bool,
timeout_config: TimeoutConfig,
) -> &mut Self {
self.with_system_operation(SystemOperation::ChangeOwnership {
super_owners,
owners,
first_leader,
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config,
})
}
/// Adds an application permissions change to this block.
pub fn with_change_application_permissions(
&mut self,
permissions: ApplicationPermissions,
) -> &mut Self {
self.with_system_operation(SystemOperation::ChangeApplicationPermissions(permissions))
}
/// Adds a user `operation` to this block.
///
/// The operation is serialized using [`bcs`] and added to the block, marked to be executed by
/// `application`.
pub fn with_operation<Abi>(
&mut self,
application_id: ApplicationId<Abi>,
operation: Abi::Operation,
) -> &mut Self
where
Abi: ContractAbi,
{
let operation = Abi::serialize_operation(&operation)
.expect("Failed to serialize `Operation` in BlockBuilder");
self.with_raw_operation(application_id.forget_abi(), operation)
}
/// Adds an already serialized user `operation` to this block.
pub fn with_raw_operation(
&mut self,
application_id: ApplicationId,
operation: impl Into<Vec<u8>>,
) -> &mut Self {
self.block
.transactions
.push(Transaction::ExecuteOperation(Operation::User {
application_id,
bytes: operation.into(),
}));
self
}
/// Receives incoming message bundles by specifying them directly.
///
/// This is an internal method that bypasses the check to see if the messages are already
/// present in the inboxes of the microchain that owns this block.
pub(crate) fn with_incoming_bundles(
&mut self,
bundles: impl IntoIterator<Item = IncomingBundle>,
) -> &mut Self {
self.block
.transactions
.extend(bundles.into_iter().map(Transaction::ReceiveMessages));
self
}
/// Receives all direct messages that were sent to this chain by the given certificate.
pub fn with_messages_from(&mut self, certificate: &ConfirmedBlockCertificate) -> &mut Self {
self.with_messages_from_by_action(certificate, MessageAction::Accept)
}
/// Receives all messages that were sent to this chain by the given certificate.
pub fn with_messages_from_by_action(
&mut self,
certificate: &ConfirmedBlockCertificate,
action: MessageAction,
) -> &mut Self {
let origin = certificate.inner().chain_id();
let bundles =
certificate
.message_bundles_for(self.block.chain_id)
.map(|(_epoch, bundle)| IncomingBundle {
origin,
bundle,
action,
});
self.with_incoming_bundles(bundles)
}
/// Tries to sign the prepared block with the [`TestValidator`]'s keys and return the
/// resulting [`Certificate`]. Returns an error if block execution fails.
pub(crate) async fn try_sign(
self,
blobs: &[Blob],
) -> Result<ConfirmedBlockCertificate, WorkerError> {
let published_blobs = self
.block
.published_blob_ids()
.into_iter()
.map(|blob_id| {
blobs
.iter()
.find(|blob| blob.id() == blob_id)
.expect("missing published blob")
.clone()
})
.collect();
let (block, _) = self
.validator
.worker()
.stage_block_execution(self.block, None, published_blobs)
.await?;
let value = ConfirmedBlock::new(block);
let vote = LiteVote::new(
LiteValue::new(&value),
Round::Fast,
self.validator.key_pair(),
);
let committee = self.validator.committee().await;
let public_key = self.validator.key_pair().public();
let mut builder = SignatureAggregator::new(value, Round::Fast, &committee);
let certificate = builder
.append(public_key, vote.signature)
.expect("Failed to sign block")
.expect("Committee has more than one test validator");
Ok(certificate)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/test/mod.rs | linera-sdk/src/test/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Helper types for writing integration tests for WebAssembly applications.
//!
//! Integration tests are usually written in the `tests` directory in the root of the crate's
//! directory (i.e., beside the `src` directory). Linera application integration tests should be
//! executed targeting the host architecture, instead of targeting `wasm32-unknown-unknown` like
//! done for unit tests.
#![cfg(any(with_testing, with_wasm_runtime))]
#[cfg(with_integration_testing)]
mod block;
#[cfg(with_integration_testing)]
mod chain;
mod mock_stubs;
#[cfg(with_integration_testing)]
mod validator;
#[cfg(with_integration_testing)]
pub use {
linera_chain::{
data_types::MessageAction, test::HttpServer, ChainError, ChainExecutionContext,
},
linera_core::worker::WorkerError,
linera_execution::{ExecutionError, QueryOutcome, WasmExecutionError},
};
#[cfg(with_testing)]
pub use self::mock_stubs::*;
#[cfg(with_integration_testing)]
pub use self::{
block::BlockBuilder,
chain::{ActiveChain, TryGraphQLMutationError, TryGraphQLQueryError, TryQueryError},
validator::TestValidator,
};
use crate::{Contract, ContractRuntime, Service, ServiceRuntime};
/// Creates a [`ContractRuntime`] to use in tests.
pub fn test_contract_runtime<Application: Contract>() -> ContractRuntime<Application> {
ContractRuntime::new()
}
/// Creates a [`ServiceRuntime`] to use in tests.
pub fn test_service_runtime<Application: Service>() -> ServiceRuntime<Application> {
ServiceRuntime::new()
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/test/validator.rs | linera-sdk/src/test/validator.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A minimal validator implementation suited for tests.
//!
//! The [`TestValidator`] is a minimal validator with a single shard. Micro-chains can be added to
//! it, and blocks can be added to each microchain individually.
use std::sync::Arc;
use futures::{
lock::{MappedMutexGuard, Mutex, MutexGuard},
FutureExt as _,
};
use linera_base::{
crypto::{AccountSecretKey, CryptoHash, ValidatorKeypair, ValidatorSecretKey},
data_types::{
Amount, ApplicationPermissions, Blob, BlobContent, ChainDescription, ChainOrigin, Epoch,
InitialChainConfig, NetworkDescription, Timestamp,
},
identifiers::{AccountOwner, ApplicationId, ChainId, ModuleId},
ownership::ChainOwnership,
};
use linera_core::worker::WorkerState;
use linera_execution::{
committee::Committee,
system::{AdminOperation, OpenChainConfig, SystemOperation},
ResourceControlPolicy, WasmRuntime,
};
use linera_storage::{DbStorage, Storage, TestClock};
use linera_views::memory::MemoryDatabase;
use serde::Serialize;
use super::ActiveChain;
use crate::ContractAbi;
/// A minimal validator implementation suited for tests.
///
/// ```rust
/// # use linera_sdk::test::*;
/// # use linera_base::{data_types::Amount, identifiers::ChainId};
/// # tokio_test::block_on(async {
/// let validator = TestValidator::new().await;
/// assert_eq!(
/// validator.new_chain().await.chain_balance().await,
/// Amount::from_tokens(10)
/// );
/// # });
/// ```
pub struct TestValidator {
validator_secret: ValidatorSecretKey,
account_secret: AccountSecretKey,
committee: Arc<Mutex<(Epoch, Committee)>>,
storage: DbStorage<MemoryDatabase, TestClock>,
worker: WorkerState<DbStorage<MemoryDatabase, TestClock>>,
clock: TestClock,
admin_chain_id: ChainId,
chains: Arc<papaya::HashMap<ChainId, ActiveChain>>,
}
impl Clone for TestValidator {
fn clone(&self) -> Self {
TestValidator {
admin_chain_id: self.admin_chain_id,
validator_secret: self.validator_secret.copy(),
account_secret: self.account_secret.copy(),
committee: self.committee.clone(),
storage: self.storage.clone(),
worker: self.worker.clone(),
clock: self.clock.clone(),
chains: self.chains.clone(),
}
}
}
impl TestValidator {
/// Creates a new [`TestValidator`].
pub async fn new() -> Self {
let validator_keypair = ValidatorKeypair::generate();
let account_secret = AccountSecretKey::generate();
let epoch = Epoch::ZERO;
let committee = Committee::make_simple(vec![(
validator_keypair.public_key,
account_secret.public(),
)]);
let wasm_runtime = Some(WasmRuntime::default());
let storage = DbStorage::<MemoryDatabase, _>::make_test_storage(wasm_runtime)
.now_or_never()
.expect("execution of DbStorage::new should not await anything");
let clock = storage.clock().clone();
let worker = WorkerState::new(
"Single validator node".to_string(),
Some(validator_keypair.secret_key.copy()),
storage.clone(),
5_000,
10_000,
);
// Create an admin chain.
let key_pair = AccountSecretKey::generate();
let new_chain_config = InitialChainConfig {
ownership: ChainOwnership::single(key_pair.public().into()),
min_active_epoch: epoch,
max_active_epoch: epoch,
epoch,
balance: Amount::from_tokens(1_000_000),
application_permissions: ApplicationPermissions::default(),
};
let origin = ChainOrigin::Root(0);
let description = ChainDescription::new(origin, new_chain_config, Timestamp::from(0));
let admin_chain_id = description.id();
let committee_blob = Blob::new_committee(
bcs::to_bytes(&committee).expect("serializing a committee should succeed"),
);
let network_description = NetworkDescription {
name: "Test network".to_string(),
genesis_config_hash: CryptoHash::test_hash("genesis config"),
genesis_timestamp: description.timestamp(),
genesis_committee_blob_hash: committee_blob.id().hash,
admin_chain_id,
};
storage
.write_network_description(&network_description)
.await
.unwrap();
storage
.write_blob(&committee_blob)
.await
.expect("writing a blob should succeed");
worker
.storage_client()
.create_chain(description.clone())
.await
.expect("Failed to create root admin chain");
let validator = TestValidator {
validator_secret: validator_keypair.secret_key,
account_secret,
committee: Arc::new(Mutex::new((epoch, committee))),
storage,
worker,
clock,
admin_chain_id,
chains: Arc::default(),
};
let chain = ActiveChain::new(key_pair, description.clone(), validator.clone());
validator.chains.pin().insert(description.id(), chain);
validator
}
/// Creates a new [`TestValidator`] with a single microchain with the bytecode of the crate
/// calling this method published on it.
///
/// Returns the new [`TestValidator`] and the [`ModuleId`] of the published module.
pub async fn with_current_module<Abi, Parameters, InstantiationArgument>() -> (
TestValidator,
ModuleId<Abi, Parameters, InstantiationArgument>,
) {
let validator = TestValidator::new().await;
let publisher = Box::pin(validator.new_chain()).await;
let module_id = publisher.publish_current_module().await;
(validator, module_id)
}
/// Creates a new [`TestValidator`] with the application of the crate calling this method
/// created on a chain.
///
/// The bytecode is first published on one microchain, then the application is created on
/// another microchain.
///
/// Returns the new [`TestValidator`], the [`ApplicationId`] of the created application, and
/// the chain on which it was created.
pub async fn with_current_application<Abi, Parameters, InstantiationArgument>(
parameters: Parameters,
instantiation_argument: InstantiationArgument,
) -> (TestValidator, ApplicationId<Abi>, ActiveChain)
where
Abi: ContractAbi,
Parameters: Serialize,
InstantiationArgument: Serialize,
{
let (validator, module_id) =
TestValidator::with_current_module::<Abi, Parameters, InstantiationArgument>().await;
let mut creator = validator.new_chain().await;
let application_id = creator
.create_application(module_id, parameters, instantiation_argument, vec![])
.await;
(validator, application_id, creator)
}
/// Returns this validator's storage.
pub(crate) fn storage(&self) -> &DbStorage<MemoryDatabase, TestClock> {
&self.storage
}
/// Returns the locked [`WorkerState`] of this validator.
pub(crate) fn worker(&self) -> WorkerState<DbStorage<MemoryDatabase, TestClock>> {
self.worker.clone()
}
/// Returns the [`TestClock`] of this validator.
pub fn clock(&self) -> &TestClock {
&self.clock
}
/// Returns the keys this test validator uses for signing certificates.
pub fn key_pair(&self) -> &ValidatorSecretKey {
&self.validator_secret
}
/// Returns the ID of the admin chain.
pub fn admin_chain_id(&self) -> ChainId {
self.admin_chain_id
}
/// Returns the latest committee that this test validator is part of.
///
/// The committee contains only this validator.
pub async fn committee(&self) -> MappedMutexGuard<'_, (Epoch, Committee), Committee> {
MutexGuard::map(self.committee.lock().await, |(_epoch, committee)| committee)
}
/// Updates the admin chain, creating a new epoch with an updated
/// [`ResourceControlPolicy`].
pub async fn change_resource_control_policy(
&mut self,
adjustment: impl FnOnce(&mut ResourceControlPolicy),
) {
let (epoch, committee) = {
let (ref mut epoch, ref mut committee) = &mut *self.committee.lock().await;
epoch
.try_add_assign_one()
.expect("Reached the limit of epochs");
adjustment(committee.policy_mut());
(*epoch, committee.clone())
};
let admin_chain = self.get_chain(&self.admin_chain_id);
let committee_blob = Blob::new(BlobContent::new_committee(
bcs::to_bytes(&committee).unwrap(),
));
let blob_hash = committee_blob.id().hash;
self.storage
.write_blob(&committee_blob)
.await
.expect("Should write committee blob");
admin_chain
.add_block(|block| {
block.with_system_operation(SystemOperation::Admin(
AdminOperation::CreateCommittee { epoch, blob_hash },
));
})
.await;
let pinned = self.chains.pin();
for chain in pinned.values() {
if chain.id() != self.admin_chain_id {
chain
.add_block(|block| {
block.with_system_operation(SystemOperation::ProcessNewEpoch(epoch));
})
.await;
}
}
}
/// Creates a new microchain and returns the [`ActiveChain`] that can be used to add blocks to
/// it with the given key pair.
pub async fn new_chain_with_keypair(&self, key_pair: AccountSecretKey) -> ActiveChain {
let description = self
.request_new_chain_from_admin_chain(key_pair.public().into())
.await;
let chain = ActiveChain::new(key_pair, description.clone(), self.clone());
chain.handle_received_messages().await;
self.chains.pin().insert(description.id(), chain.clone());
chain
}
/// Creates a new microchain and returns the [`ActiveChain`] that can be used to add blocks to
/// it.
pub async fn new_chain(&self) -> ActiveChain {
let key_pair = AccountSecretKey::generate();
self.new_chain_with_keypair(key_pair).await
}
/// Adds an existing [`ActiveChain`].
pub fn add_chain(&self, chain: ActiveChain) {
self.chains.pin().insert(chain.id(), chain);
}
/// Adds a block to the admin chain to create a new chain.
///
/// Returns the [`ChainDescription`] of the new chain.
async fn request_new_chain_from_admin_chain(&self, owner: AccountOwner) -> ChainDescription {
let admin_id = self.admin_chain_id;
let pinned = self.chains.pin();
let admin_chain = pinned
.get(&admin_id)
.expect("Admin chain should be created when the `TestValidator` is constructed");
let (epoch, _) = self.committee.lock().await.clone();
let open_chain_config = OpenChainConfig {
ownership: ChainOwnership::single(owner),
balance: Amount::from_tokens(10),
application_permissions: ApplicationPermissions::default(),
};
let new_chain_config = open_chain_config.init_chain_config(epoch, epoch, epoch);
let certificate = admin_chain
.add_block(|block| {
block.with_system_operation(SystemOperation::OpenChain(open_chain_config));
})
.await;
let block = certificate.inner().block();
let origin = ChainOrigin::Child {
parent: block.header.chain_id,
block_height: block.header.height,
chain_index: 0,
};
ChainDescription::new(origin, new_chain_config, Timestamp::from(0))
}
/// Returns the [`ActiveChain`] reference to the microchain identified by `chain_id`.
pub fn get_chain(&self, chain_id: &ChainId) -> ActiveChain {
self.chains
.pin()
.get(chain_id)
.expect("Chain not found")
.clone()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/abis/evm.rs | linera-sdk/src/abis/evm.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! An ABI for applications that implement an EVM runtime.
use linera_base::{
abi::{ContractAbi, ServiceAbi},
vm::EvmQuery,
};
/// An ABI for applications that implement an EVM runtime.
#[derive(PartialEq)]
pub struct EvmAbi;
impl ContractAbi for EvmAbi {
type Operation = Vec<u8>;
type Response = Vec<u8>;
fn deserialize_operation(operation: Vec<u8>) -> Result<Self::Operation, String> {
Ok(operation)
}
fn serialize_operation(operation: &Self::Operation) -> Result<Vec<u8>, String> {
Ok(operation.to_vec())
}
fn deserialize_response(response: Vec<u8>) -> Result<Self::Response, String> {
Ok(response)
}
fn serialize_response(response: Self::Response) -> Result<Vec<u8>, String> {
Ok(response)
}
}
impl ServiceAbi for EvmAbi {
type Query = EvmQuery;
type QueryResponse = Vec<u8>;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/abis/controller.rs | linera-sdk/src/abis/controller.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use async_graphql::{scalar, Request, Response, SimpleObject};
use linera_sdk_derive::GraphQLMutationRootInCrate;
use serde::{Deserialize, Serialize};
use crate::linera_base_types::{
AccountOwner, ApplicationId, ChainId, ContractAbi, DataBlobHash, ServiceAbi,
};
pub struct ControllerAbi;
impl ContractAbi for ControllerAbi {
type Operation = Operation;
type Response = ();
}
impl ServiceAbi for ControllerAbi {
type Query = Request;
type QueryResponse = Response;
}
/// Service are identified by the blob ID of the description.
pub type ManagedServiceId = DataBlobHash;
#[derive(Debug, Deserialize, Serialize, GraphQLMutationRootInCrate)]
pub enum Operation {
/// Worker commands
ExecuteWorkerCommand {
owner: AccountOwner,
command: WorkerCommand,
},
/// Execute a controller command
ExecuteControllerCommand {
admin: AccountOwner,
command: ControllerCommand,
},
}
/// A worker command
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum WorkerCommand {
/// Executed by workers to register themselves.
RegisterWorker { capabilities: Vec<String> },
/// Executed by workers to de-register themselves.
DeregisterWorker,
}
scalar!(WorkerCommand);
/// A controller command
#[derive(Clone, Debug, Deserialize, Serialize)]
pub enum ControllerCommand {
/// Set the admin owners.
SetAdmins { admins: Option<Vec<AccountOwner>> },
/// Remove a worker. (This should not usually happen, but some workers may be broken
/// and need to be cleaned up.)
RemoveWorker { worker_id: ChainId },
/// Update the state of a particular service to be running on the specific workers.
UpdateService {
service_id: ManagedServiceId,
workers: Vec<ChainId>,
},
/// Remove a service from the map entirely.
RemoveService { service_id: ManagedServiceId },
/// Set the states of all services at once, possibly removing some of them.
UpdateAllServices {
services: Vec<(ManagedServiceId, Vec<ChainId>)>,
},
}
scalar!(ControllerCommand);
/// The description of a service worker.
#[derive(Clone, Debug, Serialize, Deserialize, SimpleObject)]
pub struct Worker {
/// The address used by the worker.
pub owner: AccountOwner,
/// Some tags denoting the capabilities of this worker. Each capability has a value
/// that the worker will read from its local environment and pass to the applications.
pub capabilities: Vec<String>,
}
/// The description of a service managed by the controller.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ManagedService {
/// The application ID running the service (e.g. pm-engine)
pub application_id: ApplicationId,
/// The role assumed by this service within the application (e.g. engine, event,
/// market-maker).
pub name: String,
/// The chain on which the service is run. Note that this is different from the worker
/// chains which typically only run the controller application for managing the worker
/// itself.
pub chain_id: ChainId,
/// The required capabilities for a worker to be useful (e.g. some API key).
/// Concretely, the worker will read its environment variable
pub requirements: Vec<String>,
}
scalar!(ManagedService);
/// The local state of a worker.
// This is used to facilitate service queries.
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct LocalWorkerState {
/// The description of this worker as we registered it.
pub local_worker: Option<Worker>,
/// The services currently running locally.
pub local_services: Vec<ManagedService>,
/// The chains currently followed locally (besides ours and the active service
/// chains).
pub local_chains: Vec<ChainId>,
}
scalar!(LocalWorkerState);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/abis/mod.rs | linera-sdk/src/abis/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Common ABIs that may have multiple implementations.
pub mod controller;
pub mod evm;
pub mod fungible;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/abis/fungible.rs | linera-sdk/src/abis/fungible.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! An ABI for applications that implement a fungible token.
use std::collections::BTreeMap;
use async_graphql::{Request, Response};
use linera_base::{
abi::{ContractAbi, ServiceAbi},
data_types::Amount,
identifiers::{Account, AccountOwner},
};
use linera_sdk_derive::GraphQLMutationRootInCrate;
use serde::{Deserialize, Serialize};
/// An operation
#[derive(Debug, Deserialize, Serialize, GraphQLMutationRootInCrate)]
pub enum FungibleOperation {
/// Requests an account balance.
Balance {
/// Owner to query the balance for
owner: AccountOwner,
},
/// Requests this fungible token's ticker symbol.
TickerSymbol,
/// Approve the transfer of tokens
Approve {
/// Owner to transfer from
owner: AccountOwner,
/// The spender account
spender: AccountOwner,
/// Maximum amount to be transferred
allowance: Amount,
},
/// Transfers tokens from a (locally owned) account to a (possibly remote) account.
Transfer {
/// Owner to transfer from
owner: AccountOwner,
/// Amount to be transferred
amount: Amount,
/// Target account to transfer the amount to
target_account: Account,
},
/// Transfers tokens from a (locally owned) account to a (possibly remote) account by using the allowance.
TransferFrom {
/// Owner to transfer from
owner: AccountOwner,
/// The spender of the amount.
spender: AccountOwner,
/// Amount to be transferred
amount: Amount,
/// Target account to transfer the amount to
target_account: Account,
},
/// Same as `Transfer` but the source account may be remote. Depending on its
/// configuration, the target chain may take time or refuse to process
/// the message.
Claim {
/// Source account to claim amount from
source_account: Account,
/// Amount to be claimed
amount: Amount,
/// Target account to claim the amount into
target_account: Account,
},
}
/// An ABI for applications that implement a fungible token.
pub struct FungibleTokenAbi;
impl ContractAbi for FungibleTokenAbi {
type Operation = FungibleOperation;
type Response = FungibleResponse;
}
impl ServiceAbi for FungibleTokenAbi {
type Query = Request;
type QueryResponse = Response;
}
/// A native fungible response
#[derive(Debug, Deserialize, Serialize, Default)]
pub enum FungibleResponse {
/// OK response
#[default]
Ok,
/// Balance response
Balance(Amount),
/// Ticker symbol response
TickerSymbol(String),
}
/// The initial state to instantiate fungible with
#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
pub struct InitialState {
/// Accounts and their respective initial balances
pub accounts: BTreeMap<AccountOwner, Amount>,
}
/// The parameters to instantiate fungible with
#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
pub struct Parameters {
/// Ticker symbol for the fungible
pub ticker_symbol: String,
}
impl Parameters {
/// Instantiate parameters
pub fn new(ticker_symbol: &str) -> Self {
let ticker_symbol = ticker_symbol.to_string();
Self { ticker_symbol }
}
}
/// A builder type for constructing the initial state of the application.
#[derive(Debug, Default)]
pub struct InitialStateBuilder {
/// Accounts and their respective initial balances
account_balances: BTreeMap<AccountOwner, Amount>,
}
impl InitialStateBuilder {
/// Adds an account to the initial state of the application.
pub fn with_account(mut self, account: AccountOwner, balance: impl Into<Amount>) -> Self {
self.account_balances.insert(account, balance.into());
self
}
/// Returns the serialized initial state of the application, ready to use as the
/// initialization argument.
pub fn build(&self) -> InitialState {
InitialState {
accounts: self.account_balances.clone(),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/views/mock_key_value_store.rs | linera-sdk/src/views/mock_key_value_store.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A mock system API for interfacing with the key-value store.
use std::{
collections::BTreeMap,
sync::{
atomic::{AtomicU32, Ordering},
Mutex,
},
};
use futures::FutureExt as _;
use linera_views::{
batch::Batch,
memory::MemoryStore,
store::{ReadableKeyValueStore, WritableKeyValueStore},
};
/// A mock [`KeyValueStore`] implementation using a [`MemoryStore`].
pub(super) struct MockKeyValueStore {
store: MemoryStore,
contains_key_promises: PromiseRegistry<bool>,
contains_keys_promises: PromiseRegistry<Vec<bool>>,
read_multi_promises: PromiseRegistry<Vec<Option<Vec<u8>>>>,
read_single_promises: PromiseRegistry<Option<Vec<u8>>>,
find_keys_promises: PromiseRegistry<Vec<Vec<u8>>>,
find_key_values_promises: PromiseRegistry<Vec<(Vec<u8>, Vec<u8>)>>,
}
impl Default for MockKeyValueStore {
fn default() -> Self {
MockKeyValueStore {
store: MemoryStore::new_for_testing(),
contains_key_promises: PromiseRegistry::default(),
contains_keys_promises: PromiseRegistry::default(),
read_multi_promises: PromiseRegistry::default(),
read_single_promises: PromiseRegistry::default(),
find_keys_promises: PromiseRegistry::default(),
find_key_values_promises: PromiseRegistry::default(),
}
}
}
/// Helper type to keep track of created promises by one of the functions.
#[derive(Default)]
struct PromiseRegistry<T> {
promises: Mutex<BTreeMap<u32, T>>,
id_counter: AtomicU32,
}
impl<T> PromiseRegistry<T> {
/// Creates a new promise tracking the internal `value`.
pub fn register(&self, value: T) -> u32 {
let id = self.id_counter.fetch_add(1, Ordering::AcqRel);
self.promises
.try_lock()
.expect("Unit-tests should run in a single-thread")
.insert(id, value);
id
}
/// Retrieves a tracked promise by its ID.
pub fn take(&self, id: u32) -> T {
self.promises
.try_lock()
.expect("Unit-tests should run in a single-thread")
.remove(&id)
.expect("Use of an invalid promise ID")
}
}
impl MockKeyValueStore {
/// Checks if `key` is present in the storage, returning a promise to retrieve the final
/// value.
pub(crate) fn contains_key_new(&self, key: &[u8]) -> u32 {
self.contains_key_promises.register(
self.store
.contains_key(key)
.now_or_never()
.expect("Memory store should never wait for anything")
.expect("Memory store should never fail"),
)
}
/// Returns if the key used in the respective call to [`contains_key_new`] is present.
pub(crate) fn contains_key_wait(&self, promise: u32) -> bool {
self.contains_key_promises.take(promise)
}
/// Checks if `keys` are present in the storage, returning a promise to retrieve the final
/// value.
pub(crate) fn contains_keys_new(&self, keys: &[Vec<u8>]) -> u32 {
self.contains_keys_promises.register(
self.store
.contains_keys(keys)
.now_or_never()
.expect("Memory store should never wait for anything")
.expect("Memory store should never fail"),
)
}
/// Returns if the key used in the respective call to [`contains_keys_new`] is present.
pub(crate) fn contains_keys_wait(&self, promise: u32) -> Vec<bool> {
self.contains_keys_promises.take(promise)
}
/// Reads the values addressed by `keys` from the store, returning a promise to retrieve
/// the final value.
pub(crate) fn read_multi_values_bytes_new(&self, keys: &[Vec<u8>]) -> u32 {
self.read_multi_promises.register(
self.store
.read_multi_values_bytes(keys)
.now_or_never()
.expect("Memory store should never wait for anything")
.expect("Memory store should never fail"),
)
}
/// Returns the values read from storage by the respective
/// [`read_multi_values_bytes_new`] call.
pub(crate) fn read_multi_values_bytes_wait(&self, promise: u32) -> Vec<Option<Vec<u8>>> {
self.read_multi_promises.take(promise)
}
/// Reads a value addressed by `key` from the storage, returning a promise to retrieve the
/// final value.
pub(crate) fn read_value_bytes_new(&self, key: &[u8]) -> u32 {
self.read_single_promises.register(
self.store
.read_value_bytes(key)
.now_or_never()
.expect("Memory store should never wait for anything")
.expect("Memory store should never fail"),
)
}
/// Returns the value read from storage by the respective [`read_value_bytes_new`] call.
pub(crate) fn read_value_bytes_wait(&self, promise: u32) -> Option<Vec<u8>> {
self.read_single_promises.take(promise)
}
/// Finds keys in the storage that start with `key_prefix`, returning a promise to
/// retrieve the final value.
pub(crate) fn find_keys_new(&self, key_prefix: &[u8]) -> u32 {
self.find_keys_promises.register(
self.store
.find_keys_by_prefix(key_prefix)
.now_or_never()
.expect("Memory store should never wait for anything")
.expect("Memory store should never fail"),
)
}
/// Returns the keys found in storage by the respective [`find_keys_new`] call.
pub(crate) fn find_keys_wait(&self, promise: u32) -> Vec<Vec<u8>> {
self.find_keys_promises.take(promise)
}
/// Finds key-value pairs in the storage in which the key starts with `key_prefix`,
/// returning a promise to retrieve the final value.
pub(crate) fn find_key_values_new(&self, key_prefix: &[u8]) -> u32 {
self.find_key_values_promises.register(
self.store
.find_key_values_by_prefix(key_prefix)
.now_or_never()
.expect("Memory store should never wait for anything")
.expect("Memory store should never fail"),
)
}
/// Returns the key-value pairs found in storage by the respective [`find_key_values_new`]
/// call.
pub(crate) fn find_key_values_wait(&self, promise: u32) -> Vec<(Vec<u8>, Vec<u8>)> {
self.find_key_values_promises.take(promise)
}
/// Writes a `batch` of operations to storage.
pub(crate) fn write_batch(&self, batch: Batch) {
self.store
.write_batch(batch)
.now_or_never()
.expect("Memory store should never wait for anything")
.expect("Memory store should never fail");
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/views/system_api.rs | linera-sdk/src/views/system_api.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Functions and types to interface with the system API available to application views.
#[cfg(with_testing)]
use std::sync::Arc;
use linera_base::ensure;
use linera_views::{
batch::Batch,
store::{ReadableKeyValueStore, WithError, WritableKeyValueStore},
};
use thiserror::Error;
#[cfg(with_testing)]
use super::mock_key_value_store::MockKeyValueStore;
use crate::{
contract::wit::{
base_runtime_api::{self as contract_wit},
contract_runtime_api::{self, WriteOperation},
},
service::wit::base_runtime_api as service_wit,
util::yield_once,
};
/// We need to have a maximum key size that handles all possible underlying
/// sizes. The constraint so far is DynamoDB which has a key length of 1024.
/// That key length is decreased by 4 due to the use of a value splitting.
/// Then the [`KeyValueStore`] needs to handle some base key and so we
/// reduce to 900. Depending on the size, the error can occur in `system_api`
/// or in the `KeyValueStoreView`.
const MAX_KEY_SIZE: usize = 900;
/// A type to interface with the key value storage provided to applications.
#[derive(Clone)]
pub struct KeyValueStore {
wit_api: WitInterface,
}
#[cfg_attr(with_testing, allow(dead_code))]
impl KeyValueStore {
/// Returns a [`KeyValueStore`] that uses the contract WIT interface.
pub(crate) fn for_contracts() -> Self {
KeyValueStore {
wit_api: WitInterface::Contract,
}
}
/// Returns a [`KeyValueStore`] that uses the service WIT interface.
pub(crate) fn for_services() -> Self {
KeyValueStore {
wit_api: WitInterface::Service,
}
}
/// Returns a new [`KeyValueStore`] that just keeps the storage contents in memory.
#[cfg(with_testing)]
pub fn mock() -> Self {
KeyValueStore {
wit_api: WitInterface::Mock {
store: Arc::new(MockKeyValueStore::default()),
read_only: true,
},
}
}
/// Returns a mocked [`KeyValueStore`] that shares the memory storage with this instance but
/// allows write operations.
#[cfg(with_testing)]
pub fn to_mut(&self) -> Self {
let WitInterface::Mock { store, .. } = &self.wit_api else {
panic!("Real `KeyValueStore` should not be used in unit tests");
};
KeyValueStore {
wit_api: WitInterface::Mock {
store: store.clone(),
read_only: false,
},
}
}
}
impl WithError for KeyValueStore {
type Error = KeyValueStoreError;
}
/// The error type for [`KeyValueStore`] operations.
#[derive(Error, Debug)]
pub enum KeyValueStoreError {
/// Key too long
#[error("Key too long")]
KeyTooLong,
/// BCS serialization error.
#[error(transparent)]
BcsError(#[from] bcs::Error),
}
impl linera_views::store::KeyValueStoreError for KeyValueStoreError {
const BACKEND: &'static str = "key_value_store";
}
impl ReadableKeyValueStore for KeyValueStore {
// The KeyValueStore of the system_api does not have limits
// on the size of its values.
const MAX_KEY_SIZE: usize = MAX_KEY_SIZE;
fn max_stream_queries(&self) -> usize {
1
}
fn root_key(&self) -> Result<Vec<u8>, KeyValueStoreError> {
Ok(Vec::new())
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, KeyValueStoreError> {
ensure!(
key.len() <= Self::MAX_KEY_SIZE,
KeyValueStoreError::KeyTooLong
);
let promise = self.wit_api.contains_key_new(key);
yield_once().await;
Ok(self.wit_api.contains_key_wait(promise))
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, KeyValueStoreError> {
for key in keys {
ensure!(
key.len() <= Self::MAX_KEY_SIZE,
KeyValueStoreError::KeyTooLong
);
}
let promise = self.wit_api.contains_keys_new(keys);
yield_once().await;
Ok(self.wit_api.contains_keys_wait(promise))
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, KeyValueStoreError> {
for key in keys {
ensure!(
key.len() <= Self::MAX_KEY_SIZE,
KeyValueStoreError::KeyTooLong
);
}
let promise = self.wit_api.read_multi_values_bytes_new(keys);
yield_once().await;
Ok(self.wit_api.read_multi_values_bytes_wait(promise))
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, KeyValueStoreError> {
ensure!(
key.len() <= Self::MAX_KEY_SIZE,
KeyValueStoreError::KeyTooLong
);
let promise = self.wit_api.read_value_bytes_new(key);
yield_once().await;
Ok(self.wit_api.read_value_bytes_wait(promise))
}
async fn find_keys_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<Vec<u8>>, KeyValueStoreError> {
ensure!(
key_prefix.len() <= Self::MAX_KEY_SIZE,
KeyValueStoreError::KeyTooLong
);
let promise = self.wit_api.find_keys_new(key_prefix);
yield_once().await;
Ok(self.wit_api.find_keys_wait(promise))
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, KeyValueStoreError> {
ensure!(
key_prefix.len() <= Self::MAX_KEY_SIZE,
KeyValueStoreError::KeyTooLong
);
let promise = self.wit_api.find_key_values_new(key_prefix);
yield_once().await;
Ok(self.wit_api.find_key_values_wait(promise))
}
}
impl WritableKeyValueStore for KeyValueStore {
const MAX_VALUE_SIZE: usize = usize::MAX;
async fn write_batch(&self, batch: Batch) -> Result<(), KeyValueStoreError> {
self.wit_api.write_batch(batch);
Ok(())
}
async fn clear_journal(&self) -> Result<(), KeyValueStoreError> {
Ok(())
}
}
/// Which system API should be used to interface with the storage.
#[derive(Clone)]
#[cfg_attr(with_testing, allow(dead_code))]
enum WitInterface {
/// The contract system API.
Contract,
/// The service system API.
Service,
#[cfg(with_testing)]
/// A mock system API.
Mock {
store: Arc<MockKeyValueStore>,
read_only: bool,
},
}
impl WitInterface {
/// Creates a promise for testing if a key exist in the key-value store
fn contains_key_new(&self, key: &[u8]) -> u32 {
match self {
WitInterface::Contract => contract_wit::contains_key_new(key),
WitInterface::Service => service_wit::contains_key_new(key),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.contains_key_new(key),
}
}
/// Resolves a promise for testing if a key exist in the key-value store
fn contains_key_wait(&self, promise: u32) -> bool {
match self {
WitInterface::Contract => contract_wit::contains_key_wait(promise),
WitInterface::Service => service_wit::contains_key_wait(promise),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.contains_key_wait(promise),
}
}
/// Creates a promise for testing if multiple keys exist in the key-value store
fn contains_keys_new(&self, keys: &[Vec<u8>]) -> u32 {
match self {
WitInterface::Contract => contract_wit::contains_keys_new(keys),
WitInterface::Service => service_wit::contains_keys_new(keys),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.contains_keys_new(keys),
}
}
/// Resolves a promise for testing if multiple keys exist in the key-value store
fn contains_keys_wait(&self, promise: u32) -> Vec<bool> {
match self {
WitInterface::Contract => contract_wit::contains_keys_wait(promise),
WitInterface::Service => service_wit::contains_keys_wait(promise),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.contains_keys_wait(promise),
}
}
/// Creates a promise for reading multiple keys in the key-value store
fn read_multi_values_bytes_new(&self, keys: &[Vec<u8>]) -> u32 {
match self {
WitInterface::Contract => contract_wit::read_multi_values_bytes_new(keys),
WitInterface::Service => service_wit::read_multi_values_bytes_new(keys),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.read_multi_values_bytes_new(keys),
}
}
/// Resolves a promise for reading multiple keys in the key-value store
fn read_multi_values_bytes_wait(&self, promise: u32) -> Vec<Option<Vec<u8>>> {
match self {
WitInterface::Contract => contract_wit::read_multi_values_bytes_wait(promise),
WitInterface::Service => service_wit::read_multi_values_bytes_wait(promise),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.read_multi_values_bytes_wait(promise),
}
}
/// Creates a promise for reading a key in the key-value store
fn read_value_bytes_new(&self, key: &[u8]) -> u32 {
match self {
WitInterface::Contract => contract_wit::read_value_bytes_new(key),
WitInterface::Service => service_wit::read_value_bytes_new(key),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.read_value_bytes_new(key),
}
}
/// Resolves a promise for reading a key in the key-value store
fn read_value_bytes_wait(&self, promise: u32) -> Option<Vec<u8>> {
match self {
WitInterface::Contract => contract_wit::read_value_bytes_wait(promise),
WitInterface::Service => service_wit::read_value_bytes_wait(promise),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.read_value_bytes_wait(promise),
}
}
/// Creates a promise for finding keys having a specified prefix in the key-value store
fn find_keys_new(&self, key_prefix: &[u8]) -> u32 {
match self {
WitInterface::Contract => contract_wit::find_keys_new(key_prefix),
WitInterface::Service => service_wit::find_keys_new(key_prefix),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.find_keys_new(key_prefix),
}
}
/// Resolves a promise for finding keys having a specified prefix in the key-value store
fn find_keys_wait(&self, promise: u32) -> Vec<Vec<u8>> {
match self {
WitInterface::Contract => contract_wit::find_keys_wait(promise),
WitInterface::Service => service_wit::find_keys_wait(promise),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.find_keys_wait(promise),
}
}
/// Creates a promise for finding the key/values having a specified prefix in the key-value store
fn find_key_values_new(&self, key_prefix: &[u8]) -> u32 {
match self {
WitInterface::Contract => contract_wit::find_key_values_new(key_prefix),
WitInterface::Service => service_wit::find_key_values_new(key_prefix),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.find_key_values_new(key_prefix),
}
}
/// Resolves a promise for finding the key/values having a specified prefix in the key-value store
fn find_key_values_wait(&self, promise: u32) -> Vec<(Vec<u8>, Vec<u8>)> {
match self {
WitInterface::Contract => contract_wit::find_key_values_wait(promise),
WitInterface::Service => service_wit::find_key_values_wait(promise),
#[cfg(with_testing)]
WitInterface::Mock { store, .. } => store.find_key_values_wait(promise),
}
}
/// Calls the `write_batch` WIT function.
fn write_batch(&self, batch: Batch) {
match self {
WitInterface::Contract => {
let batch_operations = batch
.operations
.into_iter()
.map(WriteOperation::from)
.collect::<Vec<_>>();
contract_runtime_api::write_batch(&batch_operations);
}
WitInterface::Service => panic!("Attempt to modify storage from a service"),
#[cfg(with_testing)]
WitInterface::Mock {
store,
read_only: false,
} => {
store.write_batch(batch);
}
#[cfg(with_testing)]
WitInterface::Mock {
read_only: true, ..
} => {
panic!("Attempt to modify storage from a service")
}
}
}
}
/// Implementation of [`linera_views::context::Context`] to be used for data storage
/// by Linera applications.
pub type ViewStorageContext = linera_views::context::ViewContext<(), KeyValueStore>;
#[cfg(all(test, not(target_arch = "wasm32")))]
mod tests {
use super::*;
#[tokio::test]
async fn test_key_value_store_mock() -> anyhow::Result<()> {
// Create a mock key-value store for testing
let store = KeyValueStore::mock();
let mock_store = store.to_mut();
// Check if key exists
let is_key_existing = mock_store.contains_key(b"foo").await?;
assert!(!is_key_existing);
// Check if keys exist
let is_keys_existing = mock_store
.contains_keys(&[b"foo".to_vec(), b"bar".to_vec()])
.await?;
assert!(!is_keys_existing[0]);
assert!(!is_keys_existing[1]);
// Read and write values
let mut batch = Batch::new();
batch.put_key_value(b"foo".to_vec(), &32_u128)?;
batch.put_key_value(b"bar".to_vec(), &42_u128)?;
mock_store.write_batch(batch).await?;
let is_key_existing = mock_store.contains_key(b"foo").await?;
assert!(is_key_existing);
let value = mock_store.read_value(b"foo").await?;
assert_eq!(value, Some(32_u128));
let value = mock_store.read_value(b"bar").await?;
assert_eq!(value, Some(42_u128));
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/views/mod.rs | linera-sdk/src/views/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Helper types for using [`linera_views`] to store application state.
mod aliases;
#[cfg(with_testing)]
mod mock_key_value_store;
mod system_api;
pub use linera_views::{
self,
common::CustomSerialize,
views::{RootView, View},
ViewError,
};
pub use self::{
aliases::{
ByteCollectionView, ByteMapView, ByteSetView, CollectionView, CustomCollectionView,
CustomMapView, CustomSetView, LogView, MapView, QueueView, ReadGuardedView, RegisterView,
SetView,
},
system_api::{KeyValueStore, ViewStorageContext},
};
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/views/aliases.rs | linera-sdk/src/views/aliases.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Aliases for views using the [`ViewStorageContext`].
use super::ViewStorageContext;
/// An alias to [`linera_views::collection_view::ByteCollectionView`] that uses the
/// WebAssembly-specific [`ViewStorageContext`].
pub type ByteCollectionView<V> =
linera_views::collection_view::ByteCollectionView<ViewStorageContext, V>;
/// An alias to [`linera_views::map_view::ByteMapView`] that uses the
/// WebAssembly-specific [`ViewStorageContext`].
pub type ByteMapView<V> = linera_views::map_view::ByteMapView<ViewStorageContext, V>;
/// An alias to [`linera_views::set_view::ByteSetView`] that uses the WebAssembly-specific
/// [`ViewStorageContext`].
pub type ByteSetView = linera_views::set_view::ByteSetView<ViewStorageContext>;
/// An alias to [`linera_views::collection_view::CollectionView`] that uses the
/// WebAssembly-specific [`ViewStorageContext`].
pub type CollectionView<K, V> =
linera_views::collection_view::CollectionView<ViewStorageContext, K, V>;
/// An alias to [`linera_views::collection_view::CustomCollectionView`] that uses the
/// WebAssembly-specific [`ViewStorageContext`].
pub type CustomCollectionView<K, V> =
linera_views::collection_view::CustomCollectionView<ViewStorageContext, K, V>;
/// An alias to [`linera_views::map_view::CustomMapView`] that uses the
/// WebAssembly-specific [`ViewStorageContext`].
pub type CustomMapView<K, V> = linera_views::map_view::CustomMapView<ViewStorageContext, K, V>;
/// An alias to [`linera_views::set_view::CustomSetView`] that uses the
/// WebAssembly-specific [`ViewStorageContext`].
pub type CustomSetView<W> = linera_views::set_view::CustomSetView<ViewStorageContext, W>;
/// An alias to [`linera_views::log_view::LogView`] that uses the WebAssembly-specific
/// [`ViewStorageContext`].
pub type LogView<T> = linera_views::log_view::LogView<ViewStorageContext, T>;
/// An alias to [`linera_views::map_view::MapView`] that uses the WebAssembly-specific
/// [`ViewStorageContext`].
pub type MapView<K, V> = linera_views::map_view::MapView<ViewStorageContext, K, V>;
/// An alias to [`linera_views::queue_view::QueueView`] that uses the WebAssembly-specific
/// [`ViewStorageContext`].
pub type QueueView<T> = linera_views::queue_view::QueueView<ViewStorageContext, T>;
/// An alias to [`linera_views::collection_view::ReadGuardedView`] that uses the
/// WebAssembly-specific [`ViewStorageContext`].
pub type ReadGuardedView<'a, W> = linera_views::collection_view::ReadGuardedView<'a, W>;
/// An alias to [`linera_views::register_view::RegisterView`] that uses the
/// WebAssembly-specific [`ViewStorageContext`].
pub type RegisterView<T> = linera_views::register_view::RegisterView<ViewStorageContext, T>;
/// An alias to [`linera_views::set_view::SetView`] that uses the WebAssembly-specific
/// [`ViewStorageContext`].
pub type SetView<W> = linera_views::set_view::SetView<ViewStorageContext, W>;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/contract/wit.rs | linera-sdk/src/contract/wit.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Internal module with code generated by [`wit-bindgen`](https://github.com/jvff/wit-bindgen).
#![allow(missing_docs)]
// Export the contract interface.
wit_bindgen::generate!({
world: "contract",
export_macro_name: "export_contract",
pub_export_macro: true,
});
pub use self::linera::app::{base_runtime_api, contract_runtime_api};
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/contract/test_runtime.rs | linera-sdk/src/contract/test_runtime.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Runtime types to simulate interfacing with the host executing the contract.
use std::{
collections::{BTreeMap, HashMap, VecDeque},
sync::{Arc, Mutex, MutexGuard},
};
use linera_base::{
abi::{ContractAbi, ServiceAbi},
data_types::{
Amount, ApplicationPermissions, BlockHeight, Bytecode, Resources, SendMessageRequest,
Timestamp,
},
ensure, http,
identifiers::{
Account, AccountOwner, ApplicationId, BlobId, ChainId, DataBlobHash, ModuleId, StreamName,
},
ownership::{
AccountPermissionError, ChainOwnership, ChangeApplicationPermissionsError, CloseChainError,
},
vm::VmRuntime,
};
use serde::Serialize;
use crate::{Contract, KeyValueStore, ViewStorageContext};
struct ExpectedPublishModuleCall {
contract: Bytecode,
service: Bytecode,
vm_runtime: VmRuntime,
module_id: ModuleId,
}
struct ExpectedCreateApplicationCall {
module_id: ModuleId,
parameters: Vec<u8>,
argument: Vec<u8>,
required_application_ids: Vec<ApplicationId>,
application_id: ApplicationId,
}
struct ExpectedCreateDataBlobCall {
bytes: Vec<u8>,
blob_id: BlobId,
}
/// A mock of the common runtime to interface with the host executing the contract.
pub struct MockContractRuntime<Application>
where
Application: Contract,
{
application_parameters: Option<Application::Parameters>,
application_id: Option<ApplicationId<Application::Abi>>,
application_creator_chain_id: Option<ChainId>,
chain_id: Option<ChainId>,
authenticated_owner: Option<Option<AccountOwner>>,
block_height: Option<BlockHeight>,
round: Option<u32>,
message_is_bouncing: Option<Option<bool>>,
message_origin_chain_id: Option<Option<ChainId>>,
authenticated_caller_id: Option<Option<ApplicationId>>,
timestamp: Option<Timestamp>,
chain_balance: Option<Amount>,
owner_balances: Option<HashMap<AccountOwner, Amount>>,
chain_ownership: Option<ChainOwnership>,
can_close_chain: Option<bool>,
can_change_application_permissions: Option<bool>,
call_application_handler: Option<CallApplicationHandler>,
send_message_requests: Arc<Mutex<Vec<SendMessageRequest<Application::Message>>>>,
outgoing_transfers: HashMap<Account, Amount>,
created_events: BTreeMap<StreamName, Vec<Vec<u8>>>,
events: BTreeMap<(ChainId, StreamName, u32), Vec<u8>>,
claim_requests: Vec<ClaimRequest>,
expected_service_queries: VecDeque<(ApplicationId, String, String)>,
expected_http_requests: VecDeque<(http::Request, http::Response)>,
expected_read_data_blob_requests: VecDeque<(DataBlobHash, Vec<u8>)>,
expected_assert_data_blob_exists_requests: VecDeque<(DataBlobHash, Option<()>)>,
expected_has_empty_storage_requests: VecDeque<(ApplicationId, bool)>,
expected_open_chain_calls: VecDeque<(ChainOwnership, ApplicationPermissions, Amount, ChainId)>,
expected_publish_module_calls: VecDeque<ExpectedPublishModuleCall>,
expected_create_application_calls: VecDeque<ExpectedCreateApplicationCall>,
expected_create_data_blob_calls: VecDeque<ExpectedCreateDataBlobCall>,
key_value_store: KeyValueStore,
}
impl<Application> Default for MockContractRuntime<Application>
where
Application: Contract,
{
fn default() -> Self {
MockContractRuntime::new()
}
}
impl<Application> MockContractRuntime<Application>
where
Application: Contract,
{
/// Creates a new [`MockContractRuntime`] instance for a contract.
pub fn new() -> Self {
MockContractRuntime {
application_parameters: None,
application_id: None,
application_creator_chain_id: None,
chain_id: None,
authenticated_owner: None,
block_height: None,
round: None,
message_is_bouncing: None,
message_origin_chain_id: None,
authenticated_caller_id: None,
timestamp: None,
chain_balance: None,
owner_balances: None,
chain_ownership: None,
can_close_chain: None,
can_change_application_permissions: None,
call_application_handler: None,
send_message_requests: Arc::default(),
outgoing_transfers: HashMap::new(),
created_events: BTreeMap::new(),
events: BTreeMap::new(),
claim_requests: Vec::new(),
expected_service_queries: VecDeque::new(),
expected_http_requests: VecDeque::new(),
expected_read_data_blob_requests: VecDeque::new(),
expected_assert_data_blob_exists_requests: VecDeque::new(),
expected_has_empty_storage_requests: VecDeque::new(),
expected_open_chain_calls: VecDeque::new(),
expected_publish_module_calls: VecDeque::new(),
expected_create_application_calls: VecDeque::new(),
expected_create_data_blob_calls: VecDeque::new(),
key_value_store: KeyValueStore::mock().to_mut(),
}
}
/// Returns the key-value store to interface with storage.
pub fn key_value_store(&self) -> KeyValueStore {
self.key_value_store.clone()
}
/// Returns a storage context suitable for a root view.
pub fn root_view_storage_context(&self) -> ViewStorageContext {
ViewStorageContext::new_unchecked(self.key_value_store(), Vec::new(), ())
}
/// Configures the application parameters to return during the test.
pub fn with_application_parameters(
mut self,
application_parameters: Application::Parameters,
) -> Self {
self.application_parameters = Some(application_parameters);
self
}
/// Configures the application parameters to return during the test.
pub fn set_application_parameters(
&mut self,
application_parameters: Application::Parameters,
) -> &mut Self {
self.application_parameters = Some(application_parameters);
self
}
/// Returns the application parameters provided when the application was created.
pub fn application_parameters(&mut self) -> Application::Parameters {
self.application_parameters.clone().expect(
"Application parameters have not been mocked, \
please call `MockContractRuntime::set_application_parameters` first",
)
}
/// Configures the application ID to return during the test.
pub fn with_application_id(mut self, application_id: ApplicationId<Application::Abi>) -> Self {
self.application_id = Some(application_id);
self
}
/// Configures the application ID to return during the test.
pub fn set_application_id(
&mut self,
application_id: ApplicationId<Application::Abi>,
) -> &mut Self {
self.application_id = Some(application_id);
self
}
/// Returns the ID of the current application.
pub fn application_id(&mut self) -> ApplicationId<Application::Abi> {
self.application_id.expect(
"Application ID has not been mocked, \
please call `MockContractRuntime::set_application_id` first",
)
}
/// Configures the application creator chain ID to return during the test.
pub fn with_application_creator_chain_id(mut self, chain_id: ChainId) -> Self {
self.application_creator_chain_id = Some(chain_id);
self
}
/// Configures the application creator chain ID to return during the test.
pub fn set_application_creator_chain_id(&mut self, chain_id: ChainId) -> &mut Self {
self.application_creator_chain_id = Some(chain_id);
self
}
/// Returns the chain ID of the current application creator.
pub fn application_creator_chain_id(&mut self) -> ChainId {
self.application_creator_chain_id.expect(
"Application creator chain ID has not been mocked, \
please call `MockContractRuntime::set_application_creator_chain_id` first",
)
}
/// Configures the chain ID to return during the test.
pub fn with_chain_id(mut self, chain_id: ChainId) -> Self {
self.chain_id = Some(chain_id);
self
}
/// Configures the chain ID to return during the test.
pub fn set_chain_id(&mut self, chain_id: ChainId) -> &mut Self {
self.chain_id = Some(chain_id);
self
}
/// Returns the ID of the current chain.
pub fn chain_id(&mut self) -> ChainId {
self.chain_id.expect(
"Chain ID has not been mocked, \
please call `MockContractRuntime::set_chain_id` first",
)
}
/// Configures the authenticated owner to return during the test.
pub fn with_authenticated_owner(
mut self,
authenticated_owner: impl Into<Option<AccountOwner>>,
) -> Self {
self.authenticated_owner = Some(authenticated_owner.into());
self
}
/// Configures the authenticated owner to return during the test.
pub fn set_authenticated_owner(
&mut self,
authenticated_owner: impl Into<Option<AccountOwner>>,
) -> &mut Self {
self.authenticated_owner = Some(authenticated_owner.into());
self
}
/// Returns the authenticated owner for this execution, if there is one.
pub fn authenticated_owner(&mut self) -> Option<AccountOwner> {
self.authenticated_owner.expect(
"Authenticated owner has not been mocked, \
please call `MockContractRuntime::set_authenticated_owner` first",
)
}
/// Configures the block height to return during the test.
pub fn with_block_height(mut self, block_height: BlockHeight) -> Self {
self.block_height = Some(block_height);
self
}
/// Configures the block height to return during the test.
pub fn set_block_height(&mut self, block_height: BlockHeight) -> &mut Self {
self.block_height = Some(block_height);
self
}
/// Configures the multi-leader round number to return during the test.
pub fn with_round(mut self, round: u32) -> Self {
self.round = Some(round);
self
}
/// Configures the multi-leader round number to return during the test.
pub fn set_round(&mut self, round: u32) -> &mut Self {
self.round = Some(round);
self
}
/// Returns the height of the current block that is executing.
pub fn block_height(&mut self) -> BlockHeight {
self.block_height.expect(
"Block height has not been mocked, \
please call `MockContractRuntime::set_block_height` first",
)
}
/// Configures the `message_is_bouncing` flag to return during the test.
pub fn with_message_is_bouncing(
mut self,
message_is_bouncing: impl Into<Option<bool>>,
) -> Self {
self.message_is_bouncing = Some(message_is_bouncing.into());
self
}
/// Configures the `message_is_bouncing` flag to return during the test.
pub fn set_message_is_bouncing(
&mut self,
message_is_bouncing: impl Into<Option<bool>>,
) -> &mut Self {
self.message_is_bouncing = Some(message_is_bouncing.into());
self
}
/// Returns [`true`] if the incoming message was rejected from the original destination and is
/// now bouncing back, or [`None`] if not executing an incoming message.
pub fn message_is_bouncing(&mut self) -> Option<bool> {
self.message_is_bouncing.expect(
"`message_is_bouncing` flag has not been mocked, \
please call `MockContractRuntime::set_message_is_bouncing` first",
)
}
/// Configures the `message_origin_chain_id` to return during the test.
pub fn set_message_origin_chain_id(
&mut self,
message_origin_chain_id: impl Into<Option<ChainId>>,
) -> &mut Self {
self.message_origin_chain_id = Some(message_origin_chain_id.into());
self
}
/// Returns the chain ID where the incoming message originated from, or [`None`] if not
/// executing an incoming message.
pub fn message_origin_chain_id(&mut self) -> Option<ChainId> {
self.message_origin_chain_id.expect(
"`message_origin_chain_id` has not been mocked, \
please call `MockContractRuntime::set_message_origin_chain_id` first",
)
}
/// Configures the authenticated caller ID to return during the test.
pub fn with_authenticated_caller_id(
mut self,
authenticated_caller_id: impl Into<Option<ApplicationId>>,
) -> Self {
self.authenticated_caller_id = Some(authenticated_caller_id.into());
self
}
/// Configures the authenticated caller ID to return during the test.
pub fn set_authenticated_caller_id(
&mut self,
authenticated_caller_id: impl Into<Option<ApplicationId>>,
) -> &mut Self {
self.authenticated_caller_id = Some(authenticated_caller_id.into());
self
}
/// Returns the authenticated caller ID, if the caller configured it and if the current context
/// is executing a cross-application call.
pub fn authenticated_caller_id(&mut self) -> Option<ApplicationId> {
self.authenticated_caller_id.expect(
"Authenticated caller ID has not been mocked, \
please call `MockContractRuntime::set_authenticated_caller_id` first",
)
}
/// Verifies that the current execution context authorizes operations on a given account.
pub fn check_account_permission(
&mut self,
owner: AccountOwner,
) -> Result<(), AccountPermissionError> {
ensure!(
self.authenticated_owner() == Some(owner)
|| self.authenticated_caller_id().map(AccountOwner::from) == Some(owner),
AccountPermissionError::NotPermitted(owner)
);
Ok(())
}
/// Configures the system time to return during the test.
pub fn with_system_time(mut self, timestamp: Timestamp) -> Self {
self.timestamp = Some(timestamp);
self
}
/// Configures the system time to return during the test.
pub fn set_system_time(&mut self, timestamp: Timestamp) -> &mut Self {
self.timestamp = Some(timestamp);
self
}
/// Retrieves the current system time, i.e. the timestamp of the block in which this is called.
pub fn system_time(&mut self) -> Timestamp {
self.timestamp.expect(
"System time has not been mocked, \
please call `MockContractRuntime::set_system_time` first",
)
}
/// Configures the chain balance to return during the test.
pub fn with_chain_balance(mut self, chain_balance: Amount) -> Self {
self.chain_balance = Some(chain_balance);
self
}
/// Configures the chain balance to return during the test.
pub fn set_chain_balance(&mut self, chain_balance: Amount) -> &mut Self {
self.chain_balance = Some(chain_balance);
self
}
/// Returns the current chain balance.
pub fn chain_balance(&mut self) -> Amount {
*self.chain_balance_mut()
}
/// Returns a mutable reference to the current chain balance.
fn chain_balance_mut(&mut self) -> &mut Amount {
self.chain_balance.as_mut().expect(
"Chain balance has not been mocked, \
please call `MockContractRuntime::set_chain_balance` first",
)
}
/// Configures the balances on the chain to use during the test.
pub fn with_owner_balances(
mut self,
owner_balances: impl IntoIterator<Item = (AccountOwner, Amount)>,
) -> Self {
self.owner_balances = Some(owner_balances.into_iter().collect());
self
}
/// Configures the balances on the chain to use during the test.
pub fn set_owner_balances(
&mut self,
owner_balances: impl IntoIterator<Item = (AccountOwner, Amount)>,
) -> &mut Self {
self.owner_balances = Some(owner_balances.into_iter().collect());
self
}
/// Configures the balance of one account on the chain to use during the test.
pub fn with_owner_balance(mut self, owner: AccountOwner, balance: Amount) -> Self {
self.set_owner_balance(owner, balance);
self
}
/// Configures the balance of one account on the chain to use during the test.
pub fn set_owner_balance(&mut self, owner: AccountOwner, balance: Amount) -> &mut Self {
self.owner_balances
.get_or_insert_with(HashMap::new)
.insert(owner, balance);
self
}
/// Returns the balance of one of the accounts on this chain.
pub fn owner_balance(&mut self, owner: AccountOwner) -> Amount {
*self.owner_balance_mut(owner)
}
/// Returns a mutable reference to the balance of one of the accounts on this chain.
fn owner_balance_mut(&mut self, owner: AccountOwner) -> &mut Amount {
self.owner_balances
.as_mut()
.expect(
"Owner balances have not been mocked, \
please call `MockContractRuntime::set_owner_balances` first",
)
.get_mut(&owner)
.unwrap_or_else(|| {
panic!(
"Balance for owner {owner} was not mocked, \
please include a balance for them in the call to \
`MockContractRuntime::set_owner_balances`"
)
})
}
/// Schedules a message to be sent to this application on another chain.
pub fn send_message(&mut self, destination: ChainId, message: Application::Message) {
self.prepare_message(message).send_to(destination)
}
/// Returns a `MessageBuilder` to prepare a message to be sent.
pub fn prepare_message(
&mut self,
message: Application::Message,
) -> MessageBuilder<Application::Message> {
MessageBuilder::new(message, self.send_message_requests.clone())
}
/// Returns the list of [`SendMessageRequest`]s created so far during the test.
pub fn created_send_message_requests(
&self,
) -> MutexGuard<'_, Vec<SendMessageRequest<Application::Message>>> {
self.send_message_requests
.try_lock()
.expect("Unit test should be single-threaded")
}
/// Transfers an `amount` of native tokens from `source` owner account (or the current chain's
/// balance) to `destination`.
pub fn transfer(&mut self, source: AccountOwner, destination: Account, amount: Amount) {
self.debit(source, amount);
if Some(destination.chain_id) == self.chain_id {
self.credit(destination.owner, amount);
} else {
let destination_entry = self.outgoing_transfers.entry(destination).or_default();
*destination_entry = destination_entry
.try_add(amount)
.expect("Outgoing transfer value overflow");
}
}
/// Debits an `amount` of native tokens from a `source` owner account (or the current
/// chain's balance).
fn debit(&mut self, source: AccountOwner, amount: Amount) {
let source_balance = if source == AccountOwner::CHAIN {
self.chain_balance_mut()
} else {
self.owner_balance_mut(source)
};
*source_balance = source_balance
.try_sub(amount)
.expect("Insufficient funds in source account");
}
/// Credits an `amount` of native tokens into a `destination` owner account (or the
/// current chain's balance).
fn credit(&mut self, destination: AccountOwner, amount: Amount) {
let destination_balance = if destination == AccountOwner::CHAIN {
self.chain_balance_mut()
} else {
self.owner_balance_mut(destination)
};
*destination_balance = destination_balance
.try_add(amount)
.expect("Account balance overflow");
}
/// Returns the outgoing transfers scheduled during the test so far.
pub fn outgoing_transfers(&self) -> &HashMap<Account, Amount> {
&self.outgoing_transfers
}
/// Claims an `amount` of native tokens from a `source` account to a `destination` account.
pub fn claim(&mut self, source: Account, destination: Account, amount: Amount) {
if Some(source.chain_id) == self.chain_id {
self.debit(source.owner, amount);
if Some(destination.chain_id) == self.chain_id {
self.credit(destination.owner, amount);
}
}
self.claim_requests.push(ClaimRequest {
source,
amount,
destination,
});
}
/// Returns the list of claims made during the test so far.
pub fn claim_requests(&self) -> &[ClaimRequest] {
&self.claim_requests
}
/// Configures the chain ownership configuration to return during the test.
pub fn with_chain_ownership(mut self, chain_ownership: ChainOwnership) -> Self {
self.chain_ownership = Some(chain_ownership);
self
}
/// Configures the chain ownership configuration to return during the test.
pub fn set_chain_ownership(&mut self, chain_ownership: ChainOwnership) -> &mut Self {
self.chain_ownership = Some(chain_ownership);
self
}
/// Retrieves the owner configuration for the current chain.
pub fn chain_ownership(&mut self) -> ChainOwnership {
self.chain_ownership.clone().expect(
"Chain ownership has not been mocked, \
please call `MockContractRuntime::set_chain_ownership` first",
)
}
/// Configures if the application being tested is allowed to close the chain its in.
pub fn with_can_close_chain(mut self, can_close_chain: bool) -> Self {
self.can_close_chain = Some(can_close_chain);
self
}
/// Configures if the application being tested is allowed to close the chain its in.
pub fn set_can_close_chain(&mut self, can_close_chain: bool) -> &mut Self {
self.can_close_chain = Some(can_close_chain);
self
}
/// Configures if the application being tested is allowed to change the application
/// permissions on the chain.
pub fn with_can_change_application_permissions(
mut self,
can_change_application_permissions: bool,
) -> Self {
self.can_change_application_permissions = Some(can_change_application_permissions);
self
}
/// Configures if the application being tested is allowed to change the application
/// permissions on the chain.
pub fn set_can_change_application_permissions(
&mut self,
can_change_application_permissions: bool,
) -> &mut Self {
self.can_change_application_permissions = Some(can_change_application_permissions);
self
}
/// Closes the current chain. Returns an error if the application doesn't have
/// permission to do so.
pub fn close_chain(&mut self) -> Result<(), CloseChainError> {
let authorized = self.can_close_chain.expect(
"Authorization to close the chain has not been mocked, \
please call `MockContractRuntime::set_can_close_chain` first",
);
if authorized {
Ok(())
} else {
Err(CloseChainError::NotPermitted)
}
}
/// Changes the application permissions on the current chain. Returns an error if the
/// application doesn't have permission to do so.
pub fn change_application_permissions(
&mut self,
application_permissions: ApplicationPermissions,
) -> Result<(), ChangeApplicationPermissionsError> {
let authorized = self.can_change_application_permissions.expect(
"Authorization to change the application permissions has not been mocked, \
please call `MockContractRuntime::set_can_change_application_permissions` first",
);
if authorized {
let application_id = self
.application_id
.expect("The application doesn't have an ID!")
.forget_abi();
self.can_close_chain = Some(application_permissions.can_close_chain(&application_id));
self.can_change_application_permissions =
Some(application_permissions.can_change_application_permissions(&application_id));
Ok(())
} else {
Err(ChangeApplicationPermissionsError::NotPermitted)
}
}
/// Adds an expected call to `open_chain`, and the child chain ID that should be returned.
pub fn add_expected_open_chain_call(
&mut self,
ownership: ChainOwnership,
application_permissions: ApplicationPermissions,
balance: Amount,
chain_id: ChainId,
) {
self.expected_open_chain_calls.push_back((
ownership,
application_permissions,
balance,
chain_id,
));
}
/// Opens a new chain, configuring it with the provided `chain_ownership`,
/// `application_permissions` and initial `balance` (debited from the current chain).
pub fn open_chain(
&mut self,
ownership: ChainOwnership,
application_permissions: ApplicationPermissions,
balance: Amount,
) -> ChainId {
let (expected_ownership, expected_permissions, expected_balance, chain_id) = self
.expected_open_chain_calls
.pop_front()
.expect("Unexpected open_chain call");
assert_eq!(ownership, expected_ownership);
assert_eq!(application_permissions, expected_permissions);
assert_eq!(balance, expected_balance);
chain_id
}
/// Adds a new expected call to `publish_module`.
pub fn add_expected_publish_module_call(
&mut self,
contract: Bytecode,
service: Bytecode,
vm_runtime: VmRuntime,
module_id: ModuleId,
) {
self.expected_publish_module_calls
.push_back(ExpectedPublishModuleCall {
contract,
service,
vm_runtime,
module_id,
});
}
/// Adds a new expected call to `create_application`.
pub fn add_expected_create_application_call<Parameters, InstantiationArgument>(
&mut self,
module_id: ModuleId,
parameters: &Parameters,
argument: &InstantiationArgument,
required_application_ids: Vec<ApplicationId>,
application_id: ApplicationId,
) where
Parameters: Serialize,
InstantiationArgument: Serialize,
{
let parameters = serde_json::to_vec(parameters)
.expect("Failed to serialize `Parameters` type for a cross-application call");
let argument = serde_json::to_vec(argument).expect(
"Failed to serialize `InstantiationArgument` type for a cross-application call",
);
self.expected_create_application_calls
.push_back(ExpectedCreateApplicationCall {
module_id,
parameters,
argument,
required_application_ids,
application_id,
});
}
/// Adds a new expected call to `create_data_blob`.
pub fn add_expected_create_data_blob_call(&mut self, bytes: Vec<u8>, blob_id: BlobId) {
self.expected_create_data_blob_calls
.push_back(ExpectedCreateDataBlobCall { bytes, blob_id });
}
/// Creates a new module-id on-chain application, based on the supplied bytecode and parameters.
pub fn publish_module(
&mut self,
contract: Bytecode,
service: Bytecode,
vm_runtime: VmRuntime,
) -> ModuleId {
let ExpectedPublishModuleCall {
contract: expected_contract,
service: expected_service,
vm_runtime: expected_vm_runtime,
module_id,
} = self
.expected_publish_module_calls
.pop_front()
.expect("Unexpected publish_module call");
assert_eq!(contract, expected_contract);
assert_eq!(service, expected_service);
assert_eq!(vm_runtime, expected_vm_runtime);
module_id
}
/// Creates a new on-chain application, based on the supplied module and parameters.
pub fn create_application<Abi, Parameters, InstantiationArgument>(
&mut self,
module_id: ModuleId,
parameters: &Parameters,
argument: &InstantiationArgument,
required_application_ids: Vec<ApplicationId>,
) -> ApplicationId<Abi>
where
Abi: ContractAbi,
Parameters: Serialize,
InstantiationArgument: Serialize,
{
let ExpectedCreateApplicationCall {
module_id: expected_module_id,
parameters: expected_parameters,
argument: expected_argument,
required_application_ids: expected_required_app_ids,
application_id,
} = self
.expected_create_application_calls
.pop_front()
.expect("Unexpected create_application call");
let parameters = serde_json::to_vec(parameters)
.expect("Failed to serialize `Parameters` type for a cross-application call");
let argument = serde_json::to_vec(argument).expect(
"Failed to serialize `InstantiationArgument` type for a cross-application call",
);
assert_eq!(module_id, expected_module_id);
assert_eq!(parameters, expected_parameters);
assert_eq!(argument, expected_argument);
assert_eq!(required_application_ids, expected_required_app_ids);
application_id.with_abi::<Abi>()
}
/// Creates a new data blob and returns its hash.
pub fn create_data_blob(&mut self, bytes: &[u8]) -> DataBlobHash {
let ExpectedCreateDataBlobCall {
bytes: expected_bytes,
blob_id,
} = self
.expected_create_data_blob_calls
.pop_front()
.expect("Unexpected create_data_blob call");
assert_eq!(bytes, &expected_bytes);
DataBlobHash(blob_id.hash)
}
/// Configures the handler for cross-application calls made during the test.
pub fn with_call_application_handler(
mut self,
handler: impl FnMut(bool, ApplicationId, Vec<u8>) -> Vec<u8> + 'static,
) -> Self {
self.call_application_handler = Some(Box::new(handler));
self
}
/// Configures the handler for cross-application calls made during the test.
pub fn set_call_application_handler(
&mut self,
handler: impl FnMut(bool, ApplicationId, Vec<u8>) -> Vec<u8> + 'static,
) -> &mut Self {
self.call_application_handler = Some(Box::new(handler));
self
}
/// Calls another application.
pub fn call_application<A: ContractAbi + Send>(
&mut self,
authenticated: bool,
application: ApplicationId<A>,
call: &A::Operation,
) -> A::Response {
let call_bytes = A::serialize_operation(call)
.expect("Failed to serialize `Operation` in test runtime cross-application call");
let handler = self.call_application_handler.as_mut().expect(
"Handler for `call_application` has not been mocked, \
please call `MockContractRuntime::set_call_application_handler` first",
);
let response_bytes = handler(authenticated, application.forget_abi(), call_bytes);
A::deserialize_response(response_bytes)
.expect("Failed to deserialize `Response` in test runtime cross-application call")
}
/// Adds a new item to an event stream. Returns the new event's index in the stream.
pub fn emit(&mut self, name: StreamName, value: &Application::EventValue) -> u32 {
let value = bcs::to_bytes(value).expect("Failed to serialize event value");
let entry = self.created_events.entry(name).or_default();
entry.push(value);
entry.len() as u32 - 1
}
/// Adds an event to a stream, so that it can be read using `read_event`.
pub fn add_event(&mut self, chain_id: ChainId, name: StreamName, index: u32, value: &[u8]) {
self.events.insert((chain_id, name, index), value.to_vec());
}
/// Reads an event from a stream. Returns the event's value.
///
/// Panics if the event doesn't exist.
pub fn read_event(
&mut self,
chain_id: ChainId,
name: StreamName,
index: u32,
) -> Application::EventValue {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/contract/conversions_from_wit.rs | linera-sdk/src/contract/conversions_from_wit.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Conversions from types generated by [`wit-bindgen`] to types declared in [`linera-sdk`].
use linera_base::{
crypto::CryptoHash,
data_types::{Amount, StreamUpdate},
identifiers::{
AccountOwner, ApplicationId, ChainId, DataBlobHash, GenericApplicationId, ModuleId,
StreamId, StreamName,
},
ownership::{ChangeApplicationPermissionsError, CloseChainError},
vm::VmRuntime,
};
use super::wit::{
contract_runtime_api as wit_contract_api,
exports::linera::app::contract_entrypoints as wit_entrypoints,
};
impl From<wit_contract_api::CryptoHash> for CryptoHash {
fn from(crypto_hash: wit_contract_api::CryptoHash) -> Self {
CryptoHash::from([
crypto_hash.part1,
crypto_hash.part2,
crypto_hash.part3,
crypto_hash.part4,
])
}
}
impl From<wit_contract_api::DataBlobHash> for DataBlobHash {
fn from(hash_value: wit_contract_api::DataBlobHash) -> Self {
DataBlobHash(hash_value.inner0.into())
}
}
impl From<wit_contract_api::Array20> for [u8; 20] {
fn from(ethereum_address: wit_contract_api::Array20) -> Self {
let mut bytes = [0u8; 20];
bytes[0..8].copy_from_slice(ðereum_address.part1.to_be_bytes());
bytes[8..16].copy_from_slice(ðereum_address.part2.to_be_bytes());
bytes[16..20].copy_from_slice(ðereum_address.part3.to_be_bytes()[0..4]);
bytes
}
}
impl From<wit_contract_api::AccountOwner> for AccountOwner {
fn from(account_owner: wit_contract_api::AccountOwner) -> Self {
match account_owner {
wit_contract_api::AccountOwner::Reserved(value) => AccountOwner::Reserved(value),
wit_contract_api::AccountOwner::Address32(value) => {
AccountOwner::Address32(value.into())
}
wit_contract_api::AccountOwner::Address20(value) => {
AccountOwner::Address20(value.into())
}
}
}
}
impl From<wit_contract_api::ModuleId> for ModuleId {
fn from(module_id: wit_contract_api::ModuleId) -> Self {
ModuleId::new(
module_id.contract_blob_hash.into(),
module_id.service_blob_hash.into(),
module_id.vm_runtime.into(),
)
}
}
impl From<wit_contract_api::VmRuntime> for VmRuntime {
fn from(vm_runtime: wit_contract_api::VmRuntime) -> Self {
match vm_runtime {
wit_contract_api::VmRuntime::Wasm => VmRuntime::Wasm,
wit_contract_api::VmRuntime::Evm => VmRuntime::Evm,
}
}
}
impl From<wit_contract_api::ApplicationId> for ApplicationId {
fn from(application_id: wit_contract_api::ApplicationId) -> Self {
ApplicationId::new(application_id.application_description_hash.into())
}
}
impl From<wit_contract_api::ChainId> for ChainId {
fn from(chain_id: wit_contract_api::ChainId) -> Self {
ChainId(chain_id.inner0.into())
}
}
impl From<wit_contract_api::Amount> for Amount {
fn from(balance: wit_contract_api::Amount) -> Self {
let (lower_half, upper_half) = balance.inner0;
let value = ((upper_half as u128) << 64) | (lower_half as u128);
Amount::from_attos(value)
}
}
impl From<wit_contract_api::CloseChainError> for CloseChainError {
fn from(guest: wit_contract_api::CloseChainError) -> Self {
match guest {
wit_contract_api::CloseChainError::NotPermitted => CloseChainError::NotPermitted,
}
}
}
impl From<wit_contract_api::ChangeApplicationPermissionsError>
for ChangeApplicationPermissionsError
{
fn from(guest: wit_contract_api::ChangeApplicationPermissionsError) -> Self {
match guest {
wit_contract_api::ChangeApplicationPermissionsError::NotPermitted => {
ChangeApplicationPermissionsError::NotPermitted
}
}
}
}
impl From<wit_entrypoints::CryptoHash> for CryptoHash {
fn from(crypto_hash: wit_entrypoints::CryptoHash) -> Self {
CryptoHash::from([
crypto_hash.part1,
crypto_hash.part2,
crypto_hash.part3,
crypto_hash.part4,
])
}
}
impl From<wit_entrypoints::ApplicationId> for ApplicationId {
fn from(application_id: wit_entrypoints::ApplicationId) -> Self {
ApplicationId::new(application_id.application_description_hash.into())
}
}
impl From<wit_entrypoints::GenericApplicationId> for GenericApplicationId {
fn from(generic_application_id: wit_entrypoints::GenericApplicationId) -> Self {
match generic_application_id {
wit_entrypoints::GenericApplicationId::System => GenericApplicationId::System,
wit_entrypoints::GenericApplicationId::User(application_id) => {
GenericApplicationId::User(application_id.into())
}
}
}
}
impl From<wit_entrypoints::ChainId> for ChainId {
fn from(chain_id: wit_entrypoints::ChainId) -> Self {
ChainId(chain_id.inner0.into())
}
}
impl From<wit_entrypoints::StreamName> for StreamName {
fn from(stream_name: wit_entrypoints::StreamName) -> Self {
StreamName(stream_name.inner0)
}
}
impl From<wit_entrypoints::StreamId> for StreamId {
fn from(stream_id: wit_entrypoints::StreamId) -> Self {
StreamId {
application_id: stream_id.application_id.into(),
stream_name: stream_id.stream_name.into(),
}
}
}
impl From<wit_entrypoints::StreamUpdate> for StreamUpdate {
fn from(stream_update: wit_entrypoints::StreamUpdate) -> Self {
StreamUpdate {
chain_id: stream_update.chain_id.into(),
stream_id: stream_update.stream_id.into(),
previous_index: stream_update.previous_index,
next_index: stream_update.next_index,
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/contract/runtime.rs | linera-sdk/src/contract/runtime.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Runtime types to interface with the host executing the contract.
use linera_base::{
abi::{ContractAbi, ServiceAbi},
data_types::{
Amount, ApplicationPermissions, BlockHeight, Bytecode, Resources, SendMessageRequest,
Timestamp,
},
ensure, http,
identifiers::{
Account, AccountOwner, ApplicationId, ChainId, DataBlobHash, ModuleId, StreamName,
},
ownership::{
AccountPermissionError, ChainOwnership, ChangeApplicationPermissionsError, CloseChainError,
},
vm::VmRuntime,
};
use serde::Serialize;
use super::wit::{base_runtime_api as base_wit, contract_runtime_api as contract_wit};
use crate::{Contract, KeyValueStore, ViewStorageContext};
/// The common runtime to interface with the host executing the contract.
///
/// It automatically caches read-only values received from the host.
#[derive(Debug)]
pub struct ContractRuntime<Application>
where
Application: Contract,
{
application_parameters: Option<Application::Parameters>,
application_id: Option<ApplicationId<Application::Abi>>,
application_creator_chain_id: Option<ChainId>,
chain_id: Option<ChainId>,
block_height: Option<BlockHeight>,
message_is_bouncing: Option<Option<bool>>,
message_origin_chain_id: Option<Option<ChainId>>,
timestamp: Option<Timestamp>,
}
impl<Application> ContractRuntime<Application>
where
Application: Contract,
{
/// Creates a new [`ContractRuntime`] instance for a contract.
pub(crate) fn new() -> Self {
ContractRuntime {
application_parameters: None,
application_id: None,
application_creator_chain_id: None,
chain_id: None,
block_height: None,
message_is_bouncing: None,
message_origin_chain_id: None,
timestamp: None,
}
}
/// Returns the key-value store to interface with storage.
pub fn key_value_store(&self) -> KeyValueStore {
KeyValueStore::for_contracts()
}
/// Returns a storage context suitable for a root view.
pub fn root_view_storage_context(&self) -> ViewStorageContext {
ViewStorageContext::new_unchecked(self.key_value_store(), Vec::new(), ())
}
}
impl<Application> ContractRuntime<Application>
where
Application: Contract,
{
/// Returns the application parameters provided when the application was created.
pub fn application_parameters(&mut self) -> Application::Parameters {
self.application_parameters
.get_or_insert_with(|| {
let bytes = base_wit::application_parameters();
serde_json::from_slice(&bytes)
.expect("Application parameters must be deserializable")
})
.clone()
}
/// Returns the ID of the current application.
pub fn application_id(&mut self) -> ApplicationId<Application::Abi> {
*self
.application_id
.get_or_insert_with(|| ApplicationId::from(base_wit::get_application_id()).with_abi())
}
/// Returns the chain ID of the current application creator.
pub fn application_creator_chain_id(&mut self) -> ChainId {
*self
.application_creator_chain_id
.get_or_insert_with(|| base_wit::get_application_creator_chain_id().into())
}
/// Returns the ID of the current chain.
pub fn chain_id(&mut self) -> ChainId {
*self
.chain_id
.get_or_insert_with(|| base_wit::get_chain_id().into())
}
/// Returns the height of the current block that is executing.
pub fn block_height(&mut self) -> BlockHeight {
*self
.block_height
.get_or_insert_with(|| base_wit::get_block_height().into())
}
/// Retrieves the current system time, i.e. the timestamp of the block in which this is called.
pub fn system_time(&mut self) -> Timestamp {
*self
.timestamp
.get_or_insert_with(|| base_wit::read_system_timestamp().into())
}
/// Returns the current chain balance.
pub fn chain_balance(&mut self) -> Amount {
base_wit::read_chain_balance().into()
}
/// Returns the balance of one of the accounts on this chain.
pub fn owner_balance(&mut self, owner: AccountOwner) -> Amount {
base_wit::read_owner_balance(owner.into()).into()
}
/// Retrieves the owner configuration for the current chain.
pub fn chain_ownership(&mut self) -> ChainOwnership {
base_wit::get_chain_ownership().into()
}
/// Makes an HTTP `request` as an oracle and returns the HTTP response.
///
/// Should only be used with queries where it is very likely that all validators will receive
/// the same response, otherwise most block proposals will fail.
///
/// Cannot be used in fast blocks: A block using this call should be proposed by a regular
/// owner, not a super owner.
pub fn http_request(&mut self, request: http::Request) -> http::Response {
base_wit::perform_http_request(&request.into()).into()
}
/// Panics if the current time at block validation is `>= timestamp`. Note that block
/// validation happens at or after the block timestamp, but isn't necessarily the same.
///
/// Cannot be used in fast blocks: A block using this call should be proposed by a regular
/// owner, not a super owner.
pub fn assert_before(&mut self, timestamp: Timestamp) {
base_wit::assert_before(timestamp.into());
}
/// Reads a data blob with the given hash from storage.
pub fn read_data_blob(&mut self, hash: DataBlobHash) -> Vec<u8> {
base_wit::read_data_blob(hash.into())
}
/// Asserts that a data blob with the given hash exists in storage.
pub fn assert_data_blob_exists(&mut self, hash: DataBlobHash) {
base_wit::assert_data_blob_exists(hash.into())
}
/// Returns true if the corresponding contract uses a zero amount of storage.
pub fn has_empty_storage(&mut self, application: ApplicationId) -> bool {
contract_wit::has_empty_storage(application.into())
}
}
impl<Application> ContractRuntime<Application>
where
Application: Contract,
{
/// Returns the authenticated owner for this execution, if there is one.
pub fn authenticated_owner(&mut self) -> Option<AccountOwner> {
contract_wit::authenticated_owner().map(AccountOwner::from)
}
/// Returns [`true`] if the incoming message was rejected from the original destination and is
/// now bouncing back, or [`None`] if not executing an incoming message.
pub fn message_is_bouncing(&mut self) -> Option<bool> {
*self
.message_is_bouncing
.get_or_insert_with(contract_wit::message_is_bouncing)
}
/// Returns the chain ID where the incoming message originated from, or [`None`] if not executing
/// an incoming message.
pub fn message_origin_chain_id(&mut self) -> Option<ChainId> {
*self
.message_origin_chain_id
.get_or_insert_with(|| contract_wit::message_origin_chain_id().map(ChainId::from))
}
/// Returns the authenticated caller ID, if the caller configured it and if the current context
/// is executing a cross-application call.
pub fn authenticated_caller_id(&mut self) -> Option<ApplicationId> {
contract_wit::authenticated_caller_id().map(ApplicationId::from)
}
/// Verifies that the current execution context authorizes operations on a given account.
pub fn check_account_permission(
&mut self,
owner: AccountOwner,
) -> Result<(), AccountPermissionError> {
ensure!(
self.authenticated_owner() == Some(owner)
|| self.authenticated_caller_id().map(AccountOwner::from) == Some(owner),
AccountPermissionError::NotPermitted(owner)
);
Ok(())
}
/// Schedules a message to be sent to this application on another chain.
pub fn send_message(&mut self, destination: ChainId, message: Application::Message) {
self.prepare_message(message).send_to(destination)
}
/// Returns a `MessageBuilder` to prepare a message to be sent.
pub fn prepare_message(
&mut self,
message: Application::Message,
) -> MessageBuilder<Application::Message> {
MessageBuilder::new(message)
}
/// Transfers an `amount` of native tokens from `source` owner account (or the current chain's
/// balance) to `destination`.
pub fn transfer(&mut self, source: AccountOwner, destination: Account, amount: Amount) {
contract_wit::transfer(source.into(), destination.into(), amount.into())
}
/// Claims an `amount` of native tokens from a `source` account to a `destination` account.
pub fn claim(&mut self, source: Account, destination: Account, amount: Amount) {
contract_wit::claim(source.into(), destination.into(), amount.into())
}
/// Calls another application.
pub fn call_application<A: ContractAbi + Send>(
&mut self,
authenticated: bool,
application: ApplicationId<A>,
call: &A::Operation,
) -> A::Response {
let call_bytes = A::serialize_operation(call)
.expect("Failed to serialize `Operation` in cross-application call");
let response_bytes = contract_wit::try_call_application(
authenticated,
application.forget_abi().into(),
&call_bytes,
);
A::deserialize_response(response_bytes)
.expect("Failed to deserialize `Response` in cross-application call")
}
/// Adds a new item to an event stream. Returns the new event's index in the stream.
pub fn emit(&mut self, name: StreamName, value: &Application::EventValue) -> u32 {
contract_wit::emit(
&name.into(),
&bcs::to_bytes(value).expect("Failed to serialize event"),
)
}
/// Reads an event from a stream. Returns the event's value.
///
/// Fails the block if the event doesn't exist.
pub fn read_event(
&mut self,
chain_id: ChainId,
name: StreamName,
index: u32,
) -> Application::EventValue {
let event = contract_wit::read_event(chain_id.into(), &name.into(), index);
bcs::from_bytes(&event).expect("Failed to deserialize event")
}
/// Subscribes this application to an event stream.
pub fn subscribe_to_events(
&mut self,
chain_id: ChainId,
application_id: ApplicationId,
name: StreamName,
) {
contract_wit::subscribe_to_events(chain_id.into(), application_id.into(), &name.into())
}
/// Unsubscribes this application from an event stream.
pub fn unsubscribe_from_events(
&mut self,
chain_id: ChainId,
application_id: ApplicationId,
name: StreamName,
) {
contract_wit::unsubscribe_from_events(chain_id.into(), application_id.into(), &name.into())
}
/// Queries an application service as an oracle and returns the response.
///
/// Should only be used with queries where it is very likely that all validators will compute
/// the same result, otherwise most block proposals will fail.
///
/// Cannot be used in fast blocks: A block using this call should be proposed by a regular
/// owner, not a super owner.
pub fn query_service<A: ServiceAbi + Send>(
&mut self,
application_id: ApplicationId<A>,
query: &A::Query,
) -> A::QueryResponse {
let query = serde_json::to_vec(query).expect("Failed to serialize service query");
let response = contract_wit::query_service(application_id.forget_abi().into(), &query);
serde_json::from_slice(&response).expect("Failed to deserialize service response")
}
/// Opens a new chain, configuring it with the provided `chain_ownership`,
/// `application_permissions` and initial `balance` (debited from the current chain).
pub fn open_chain(
&mut self,
chain_ownership: ChainOwnership,
application_permissions: ApplicationPermissions,
balance: Amount,
) -> ChainId {
let chain_id = contract_wit::open_chain(
&chain_ownership.into(),
&application_permissions.into(),
balance.into(),
);
chain_id.into()
}
/// Closes the current chain. Returns an error if the application doesn't have
/// permission to do so.
pub fn close_chain(&mut self) -> Result<(), CloseChainError> {
contract_wit::close_chain().map_err(|error| error.into())
}
/// Changes the application permissions for the current chain.
pub fn change_application_permissions(
&mut self,
application_permissions: ApplicationPermissions,
) -> Result<(), ChangeApplicationPermissionsError> {
contract_wit::change_application_permissions(&application_permissions.into())
.map_err(|error| error.into())
}
/// Creates a new on-chain application, based on the supplied module and parameters.
pub fn create_application<Abi, Parameters, InstantiationArgument>(
&mut self,
module_id: ModuleId,
parameters: &Parameters,
argument: &InstantiationArgument,
required_application_ids: Vec<ApplicationId>,
) -> ApplicationId<Abi>
where
Abi: ContractAbi,
Parameters: Serialize,
InstantiationArgument: Serialize,
{
let parameters = serde_json::to_vec(parameters)
.expect("Failed to serialize `Parameters` type for a cross-application call");
let argument = serde_json::to_vec(argument).expect(
"Failed to serialize `InstantiationArgument` type for a cross-application call",
);
let converted_application_ids: Vec<_> = required_application_ids
.into_iter()
.map(From::from)
.collect();
let application_id = contract_wit::create_application(
module_id.into(),
¶meters,
&argument,
&converted_application_ids,
);
ApplicationId::from(application_id).with_abi::<Abi>()
}
/// Creates a new data blob and returns its hash.
pub fn create_data_blob(&mut self, bytes: &[u8]) -> DataBlobHash {
let hash = contract_wit::create_data_blob(bytes);
hash.into()
}
/// Publishes a module with contract and service bytecode and returns the module ID.
pub fn publish_module(
&mut self,
contract: Bytecode,
service: Bytecode,
vm_runtime: VmRuntime,
) -> ModuleId {
contract_wit::publish_module(&contract.into(), &service.into(), vm_runtime.into()).into()
}
/// Returns the multi-leader round in which this block was validated.
pub fn validation_round(&mut self) -> Option<u32> {
contract_wit::validation_round()
}
}
/// A helper type that uses the builder pattern to configure how a message is sent, and then
/// sends the message once it is dropped.
#[must_use]
pub struct MessageBuilder<Message>
where
Message: Serialize,
{
authenticated: bool,
is_tracked: bool,
grant: Resources,
message: Message,
}
impl<Message> MessageBuilder<Message>
where
Message: Serialize,
{
/// Creates a new [`MessageBuilder`] instance to send the `message` to the `destination`.
pub(crate) fn new(message: Message) -> Self {
MessageBuilder {
authenticated: false,
is_tracked: false,
grant: Resources::default(),
message,
}
}
/// Marks the message to be tracked, so that the sender receives the message back if it is
/// rejected by the receiver.
pub fn with_tracking(mut self) -> Self {
self.is_tracked = true;
self
}
/// Forwards the authenticated owner with the message.
pub fn with_authentication(mut self) -> Self {
self.authenticated = true;
self
}
/// Forwards a grant of resources so the receiver can use it to pay for receiving the message.
pub fn with_grant(mut self, grant: Resources) -> Self {
self.grant = grant;
self
}
/// Schedules this `Message` to be sent to the `destination`.
pub fn send_to(self, destination: ChainId) {
let serialized_message =
bcs::to_bytes(&self.message).expect("Failed to serialize message to be sent");
let raw_message = SendMessageRequest {
destination,
authenticated: self.authenticated,
is_tracked: self.is_tracked,
grant: self.grant,
message: serialized_message,
};
contract_wit::send_message(&raw_message.into())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/contract/mod.rs | linera-sdk/src/contract/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Types and macros useful for writing an application contract.
mod conversions_from_wit;
mod conversions_to_wit;
#[cfg(not(with_testing))]
mod runtime;
#[cfg(with_testing)]
mod test_runtime;
#[doc(hidden)]
pub mod wit;
#[cfg(not(with_testing))]
pub use self::runtime::ContractRuntime;
#[cfg(with_testing)]
pub use self::test_runtime::MockContractRuntime;
#[doc(hidden)]
pub use self::wit::export_contract;
use crate::{log::ContractLogger, util::BlockingWait};
/// Inside tests, use the [`MockContractRuntime`] instead of the real [`ContractRuntime`].
#[cfg(with_testing)]
pub type ContractRuntime<Application> = MockContractRuntime<Application>;
/// Declares an implementation of the [`Contract`][`crate::Contract`] trait, exporting it from the
/// Wasm module.
///
/// Generates the necessary boilerplate for implementing the contract WIT interface, exporting the
/// necessary resource types and functions so that the host can call the application contract.
#[macro_export]
macro_rules! contract {
($contract:ident) => {
#[doc(hidden)]
static mut CONTRACT: Option<$contract> = None;
/// Export the contract interface.
$crate::export_contract!($contract with_types_in $crate::contract::wit);
/// Mark the contract type to be exported.
impl $crate::contract::wit::exports::linera::app::contract_entrypoints::Guest
for $contract
{
fn instantiate(argument: Vec<u8>) {
use $crate::util::BlockingWait as _;
$crate::contract::run_async_entrypoint::<$contract, _, _>(
unsafe { &mut CONTRACT },
move |contract| {
let argument = $crate::serde_json::from_slice(&argument)
.unwrap_or_else(|_| panic!("Failed to deserialize instantiation argument {argument:?}"));
contract.instantiate(argument).blocking_wait()
},
)
}
fn execute_operation(operation: Vec<u8>) -> Vec<u8> {
use $crate::util::BlockingWait as _;
$crate::contract::run_async_entrypoint::<$contract, _, _>(
unsafe { &mut CONTRACT },
move |contract| {
let operation = <$contract as $crate::abi::ContractAbi>::deserialize_operation(operation)
.expect("Failed to deserialize `Operation` in execute_operation");
let response = contract.execute_operation(operation).blocking_wait();
<$contract as $crate::abi::ContractAbi>::serialize_response(response)
.expect("Failed to serialize `Response` in execute_operation")
},
)
}
fn execute_message(message: Vec<u8>) {
use $crate::util::BlockingWait as _;
$crate::contract::run_async_entrypoint::<$contract, _, _>(
unsafe { &mut CONTRACT },
move |contract| {
let message: <$contract as $crate::Contract>::Message =
$crate::bcs::from_bytes(&message)
.expect("Failed to deserialize message");
contract.execute_message(message).blocking_wait()
},
)
}
fn process_streams(updates: Vec<
$crate::contract::wit::exports::linera::app::contract_entrypoints::StreamUpdate,
>) {
use $crate::util::BlockingWait as _;
$crate::contract::run_async_entrypoint::<$contract, _, _>(
unsafe { &mut CONTRACT },
move |contract| {
let updates = updates.into_iter().map(Into::into).collect();
contract.process_streams(updates).blocking_wait()
},
)
}
fn finalize() {
use $crate::util::BlockingWait as _;
let Some(contract) = (unsafe { CONTRACT.take() }) else {
$crate::ContractLogger::install();
panic!("Calling `store` on a `Contract` instance that wasn't loaded");
};
contract.store().blocking_wait();
}
}
/// Stub of a `main` entrypoint so that the binary doesn't fail to compile on targets other
/// than WebAssembly.
#[cfg(not(target_arch = "wasm32"))]
fn main() {}
};
}
/// Runs an asynchronous entrypoint in a blocking manner, by repeatedly polling the entrypoint
/// future.
pub fn run_async_entrypoint<Contract, Output, RawOutput>(
contract: &mut Option<Contract>,
entrypoint: impl FnOnce(&mut Contract) -> Output + Send,
) -> RawOutput
where
Contract: crate::Contract,
Output: Into<RawOutput> + Send + 'static,
{
ContractLogger::install();
let contract =
contract.get_or_insert_with(|| Contract::load(ContractRuntime::new()).blocking_wait());
entrypoint(contract).into()
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/src/contract/conversions_to_wit.rs | linera-sdk/src/contract/conversions_to_wit.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Conversions from types declared in [`linera-sdk`] to types generated by [`wit-bindgen`].
use linera_base::{
crypto::CryptoHash,
data_types::{
Amount, ApplicationPermissions, Bytecode, Resources, SendMessageRequest, TimeDelta,
},
identifiers::{
Account, AccountOwner, ApplicationId, ChainId, DataBlobHash, ModuleId, StreamName,
},
ownership::{ChainOwnership, TimeoutConfig},
vm::VmRuntime,
};
use linera_views::batch::WriteOperation;
use super::wit::contract_runtime_api as wit_contract_api;
impl From<CryptoHash> for wit_contract_api::CryptoHash {
fn from(crypto_hash: CryptoHash) -> Self {
let parts = <[u64; 4]>::from(crypto_hash);
wit_contract_api::CryptoHash {
part1: parts[0],
part2: parts[1],
part3: parts[2],
part4: parts[3],
}
}
}
impl From<DataBlobHash> for wit_contract_api::DataBlobHash {
fn from(hash_value: DataBlobHash) -> Self {
wit_contract_api::DataBlobHash {
inner0: hash_value.0.into(),
}
}
}
impl From<ChainId> for wit_contract_api::CryptoHash {
fn from(chain_id: ChainId) -> Self {
chain_id.0.into()
}
}
impl From<[u8; 20]> for wit_contract_api::Array20 {
fn from(bytes: [u8; 20]) -> Self {
wit_contract_api::Array20 {
part1: u64::from_be_bytes(bytes[0..8].try_into().unwrap()),
part2: u64::from_be_bytes(bytes[8..16].try_into().unwrap()),
part3: (u32::from_be_bytes(bytes[16..20].try_into().unwrap()) as u64) << 32,
}
}
}
impl From<Amount> for wit_contract_api::Amount {
fn from(host: Amount) -> Self {
wit_contract_api::Amount {
inner0: (host.lower_half(), host.upper_half()),
}
}
}
impl From<Account> for wit_contract_api::Account {
fn from(account: Account) -> Self {
wit_contract_api::Account {
chain_id: account.chain_id.into(),
owner: account.owner.into(),
}
}
}
impl From<AccountOwner> for wit_contract_api::AccountOwner {
fn from(account_owner: AccountOwner) -> Self {
match account_owner {
AccountOwner::Reserved(value) => wit_contract_api::AccountOwner::Reserved(value),
AccountOwner::Address32(owner) => {
wit_contract_api::AccountOwner::Address32(owner.into())
}
AccountOwner::Address20(owner) => {
wit_contract_api::AccountOwner::Address20(owner.into())
}
}
}
}
impl From<ChainId> for wit_contract_api::ChainId {
fn from(chain_id: ChainId) -> Self {
wit_contract_api::ChainId {
inner0: chain_id.0.into(),
}
}
}
impl From<ModuleId> for wit_contract_api::ModuleId {
fn from(module_id: ModuleId) -> Self {
wit_contract_api::ModuleId {
contract_blob_hash: module_id.contract_blob_hash.into(),
service_blob_hash: module_id.service_blob_hash.into(),
vm_runtime: module_id.vm_runtime.into(),
}
}
}
impl From<VmRuntime> for wit_contract_api::VmRuntime {
fn from(vm_runtime: VmRuntime) -> Self {
match vm_runtime {
VmRuntime::Wasm => wit_contract_api::VmRuntime::Wasm,
VmRuntime::Evm => wit_contract_api::VmRuntime::Evm,
}
}
}
impl From<ApplicationId> for wit_contract_api::ApplicationId {
fn from(application_id: ApplicationId) -> Self {
wit_contract_api::ApplicationId {
application_description_hash: application_id.application_description_hash.into(),
}
}
}
impl From<Resources> for wit_contract_api::Resources {
fn from(resources: Resources) -> Self {
wit_contract_api::Resources {
wasm_fuel: resources.wasm_fuel,
evm_fuel: resources.evm_fuel,
read_operations: resources.read_operations,
write_operations: resources.write_operations,
bytes_runtime: resources.bytes_runtime,
bytes_to_read: resources.bytes_to_read,
bytes_to_write: resources.bytes_to_write,
blobs_to_read: resources.blobs_to_read,
blobs_to_publish: resources.blobs_to_publish,
blob_bytes_to_publish: resources.blob_bytes_to_publish,
blob_bytes_to_read: resources.blob_bytes_to_read,
messages: resources.messages,
message_size: resources.message_size,
storage_size_delta: resources.storage_size_delta,
service_as_oracle_queries: resources.service_as_oracle_queries,
http_requests: resources.http_requests,
}
}
}
impl From<SendMessageRequest<Vec<u8>>> for wit_contract_api::SendMessageRequest {
fn from(message: SendMessageRequest<Vec<u8>>) -> Self {
Self {
destination: message.destination.into(),
authenticated: message.authenticated,
is_tracked: message.is_tracked,
grant: message.grant.into(),
message: message.message,
}
}
}
impl From<StreamName> for wit_contract_api::StreamName {
fn from(name: StreamName) -> Self {
wit_contract_api::StreamName {
inner0: name.into_bytes(),
}
}
}
impl From<TimeDelta> for wit_contract_api::TimeDelta {
fn from(delta: TimeDelta) -> Self {
Self {
inner0: delta.as_micros(),
}
}
}
impl From<TimeoutConfig> for wit_contract_api::TimeoutConfig {
fn from(config: TimeoutConfig) -> Self {
let TimeoutConfig {
fast_round_duration,
base_timeout,
timeout_increment,
fallback_duration,
} = config;
Self {
fast_round_duration: fast_round_duration.map(Into::into),
base_timeout: base_timeout.into(),
timeout_increment: timeout_increment.into(),
fallback_duration: fallback_duration.into(),
}
}
}
impl From<ApplicationPermissions> for wit_contract_api::ApplicationPermissions {
fn from(permissions: ApplicationPermissions) -> Self {
let ApplicationPermissions {
execute_operations,
mandatory_applications,
close_chain,
change_application_permissions,
call_service_as_oracle,
make_http_requests,
} = permissions;
Self {
execute_operations: execute_operations
.map(|app_ids| app_ids.into_iter().map(Into::into).collect()),
mandatory_applications: mandatory_applications.into_iter().map(Into::into).collect(),
close_chain: close_chain.into_iter().map(Into::into).collect(),
change_application_permissions: change_application_permissions
.into_iter()
.map(Into::into)
.collect(),
call_service_as_oracle: call_service_as_oracle
.map(|app_ids| app_ids.into_iter().map(Into::into).collect()),
make_http_requests: make_http_requests
.map(|app_ids| app_ids.into_iter().map(Into::into).collect()),
}
}
}
impl From<ChainOwnership> for wit_contract_api::ChainOwnership {
fn from(ownership: ChainOwnership) -> Self {
let ChainOwnership {
super_owners,
owners,
first_leader,
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config,
} = ownership;
Self {
super_owners: super_owners.into_iter().map(Into::into).collect(),
owners: owners
.into_iter()
.map(|(owner, weight)| (owner.into(), weight))
.collect(),
first_leader: first_leader.map(Into::into),
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config: timeout_config.into(),
}
}
}
impl From<WriteOperation> for wit_contract_api::WriteOperation {
fn from(write_operation: WriteOperation) -> Self {
match write_operation {
WriteOperation::Delete { key } => wit_contract_api::WriteOperation::Delete(key),
WriteOperation::DeletePrefix { key_prefix } => {
wit_contract_api::WriteOperation::DeletePrefix(key_prefix)
}
WriteOperation::Put { key, value } => {
wit_contract_api::WriteOperation::Put((key, value))
}
}
}
}
impl From<Bytecode> for wit_contract_api::Bytecode {
fn from(bytecode: Bytecode) -> Self {
wit_contract_api::Bytecode {
bytes: bytecode.bytes,
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/create-and-call/src/contract.rs | linera-sdk/tests/fixtures/create-and-call/src/contract.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use counter_no_graphql::{CounterNoGraphQlAbi, CounterOperation, CounterRequest};
use create_and_call::{CreateAndCallAbi, CreateAndCallOperation};
use linera_sdk::{
linera_base_types::{Bytecode, VmRuntime, WithContractAbi},
views::{RootView, View},
Contract, ContractRuntime,
};
use self::state::CreateAndCallState;
pub struct CreateAndCallContract {
state: CreateAndCallState,
runtime: ContractRuntime<Self>,
}
linera_sdk::contract!(CreateAndCallContract);
impl WithContractAbi for CreateAndCallContract {
type Abi = CreateAndCallAbi;
}
impl Contract for CreateAndCallContract {
type Message = ();
type InstantiationArgument = ();
type Parameters = ();
type EventValue = ();
async fn load(runtime: ContractRuntime<Self>) -> Self {
let state = CreateAndCallState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
CreateAndCallContract { state, runtime }
}
async fn instantiate(&mut self, _value: ()) {
// Validate that the application parameters were configured correctly.
self.runtime.application_parameters();
self.state.value.set(None);
}
async fn execute_operation(&mut self, operation: CreateAndCallOperation) -> u64 {
let CreateAndCallOperation::CreateAndCall(
contract_bytes,
service_bytes,
initialization_value,
increment_value,
) = operation;
// Step 1: Convert Vec<u8> to Bytecode and publish module with Wasm runtime
let contract_bytecode = Bytecode::new(contract_bytes);
let service_bytecode = Bytecode::new(service_bytes);
let module_id =
self.runtime
.publish_module(contract_bytecode, service_bytecode, VmRuntime::Wasm);
// Step 2: Create application with initialization value
let application_id = self
.runtime
.create_application::<CounterNoGraphQlAbi, (), u64>(
module_id,
&(),
&initialization_value,
vec![],
);
self.state.value.set(Some(application_id));
// Step 3: Call the service. It should return the value before
// the initialization of this contract and thus zero.
let counter_request = CounterRequest::Query;
let value = self.runtime.query_service(application_id, &counter_request);
assert_eq!(value, 0);
// Step 4: Call the contract with counter increment operation
let counter_operation = CounterOperation::Increment(increment_value);
self.runtime
.call_application(true, application_id, &counter_operation)
}
async fn execute_message(&mut self, _message: ()) {
panic!("Create and call application doesn't support any cross-chain messages");
}
async fn store(mut self) {
self.state.save().await.expect("Failed to save state");
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/create-and-call/src/lib.rs | linera-sdk/tests/fixtures/create-and-call/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*! ABI of the Create and Call Example Application that does not use GraphQL */
use std::fmt::Debug;
use linera_sdk::linera_base_types::{ContractAbi, ServiceAbi};
use serde::{Deserialize, Serialize};
pub struct CreateAndCallAbi;
impl ContractAbi for CreateAndCallAbi {
type Operation = CreateAndCallOperation;
type Response = u64;
}
impl ServiceAbi for CreateAndCallAbi {
type Query = CreateAndCallRequest;
type QueryResponse = u64;
}
#[derive(Serialize, Deserialize)]
pub enum CreateAndCallRequest {
Query,
CreateAndCall(Vec<u8>, Vec<u8>, u64, u64),
}
#[derive(Serialize, Deserialize)]
pub enum CreateAndCallOperation {
CreateAndCall(Vec<u8>, Vec<u8>, u64, u64),
}
impl Debug for CreateAndCallRequest {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
CreateAndCallRequest::Query => write!(f, "CreateAndCallRequest::Query"),
CreateAndCallRequest::CreateAndCall(code, calldata, initial_value, increment) => {
write!(
f,
"CreateAndCallRequest::CreateAndCall(code: <{} bytes>, calldata: <{} bytes>, initial value: {}, increment: {})",
code.len(),
calldata.len(),
initial_value,
increment
)
}
}
}
}
impl Debug for CreateAndCallOperation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
CreateAndCallOperation::CreateAndCall(code, calldata, initial_value, increment) => {
write!(
f,
"CreateAndCall(code: <{} bytes>, calldata: <{} bytes>, initial_value: {}, increment: {})",
code.len(),
calldata.len(),
initial_value,
increment
)
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/create-and-call/src/state.rs | linera-sdk/tests/fixtures/create-and-call/src/state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use counter_no_graphql::CounterNoGraphQlAbi;
use linera_sdk::{
linera_base_types::ApplicationId,
views::{linera_views, RegisterView, RootView, ViewStorageContext},
};
/// The application state.
#[derive(RootView)]
#[view(context = ViewStorageContext)]
pub struct CreateAndCallState {
pub value: RegisterView<Option<ApplicationId<CounterNoGraphQlAbi>>>,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/create-and-call/src/service.rs | linera-sdk/tests/fixtures/create-and-call/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use std::sync::Arc;
use counter_no_graphql::CounterRequest;
use create_and_call::{CreateAndCallOperation, CreateAndCallRequest};
use linera_sdk::{linera_base_types::WithServiceAbi, views::View, Service, ServiceRuntime};
use self::state::CreateAndCallState;
pub struct CreateAndCallService {
state: CreateAndCallState,
runtime: Arc<ServiceRuntime<Self>>,
}
linera_sdk::service!(CreateAndCallService);
impl WithServiceAbi for CreateAndCallService {
type Abi = create_and_call::CreateAndCallAbi;
}
impl Service for CreateAndCallService {
type Parameters = ();
async fn new(runtime: ServiceRuntime<Self>) -> Self {
let state = CreateAndCallState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
CreateAndCallService {
state,
runtime: Arc::new(runtime),
}
}
async fn handle_query(&self, request: CreateAndCallRequest) -> u64 {
match request {
CreateAndCallRequest::Query => {
let application_id = self.state.value.get().expect("An application_id");
let counter_request = CounterRequest::Query;
self.runtime
.query_application(application_id, &counter_request)
}
CreateAndCallRequest::CreateAndCall(bytecode, calldata, initial_value, increment) => {
let operation = CreateAndCallOperation::CreateAndCall(
bytecode,
calldata,
initial_value,
increment,
);
self.runtime.schedule_operation(&operation);
0
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/create-and-call/tests/test_create_and_call.rs | linera-sdk/tests/fixtures/create-and-call/tests/test_create_and_call.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Integration tests for the Create and Call application.
#![cfg(not(target_arch = "wasm32"))]
use linera_sdk::test::{ActiveChain, TestValidator};
/// Test creating and calling a counter application dynamically.
///
/// This test publishes the counter-no-graphql bytecode, then uses the create-and-call
/// application to dynamically create an instance of the counter and increment it.
/// The test verifies that the counter is correctly initialized and incremented.
#[tokio::test(flavor = "multi_thread")]
async fn test_create_and_call() {
let (validator, create_call_module_id) =
TestValidator::with_current_module::<create_and_call::CreateAndCallAbi, (), ()>().await;
let mut chain = validator.new_chain().await;
// Step 1: Get the bytecode for "counter-no-graphql" by compiling it from its directory
let counter_no_graphql_path = std::path::Path::new("../../../../examples/counter-no-graphql");
let counter_no_graphql_path = std::fs::canonicalize(counter_no_graphql_path)
.expect("Failed to get absolute path to counter-no-graphql");
// Build and find the counter-no-graphql bytecode files
ActiveChain::build_bytecode_files_in(&counter_no_graphql_path);
let (counter_contract, counter_service) =
ActiveChain::find_bytecode_files_in(&counter_no_graphql_path).await;
// Extract the raw bytes from the bytecode
let counter_contract_bytes = counter_contract.bytes.to_vec();
let counter_service_bytes = counter_service.bytes.to_vec();
// Step 2: Create the "create-and-call" application
let application_id = chain
.create_application(create_call_module_id, (), (), vec![])
.await;
// Step 3: Call the CreateAndCall operation with the counter bytecode,
// initialization value of 43, and increment of 5
let initialization_value = 43;
let increment_value = 5;
let create_and_call_operation = create_and_call::CreateAndCallOperation::CreateAndCall(
counter_contract_bytes,
counter_service_bytes,
initialization_value,
increment_value,
);
chain
.add_block(|block| {
block.with_operation(application_id, create_and_call_operation);
})
.await;
// Step 4: Query the create-and-call application to get the result (should be 48 = 43 + 5)
let query_request = create_and_call::CreateAndCallRequest::Query;
let outcome = chain.query(application_id, query_request).await;
let expected_value = 48; // 43 + 5
assert_eq!(outcome.response, expected_value);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/publish-read-data-blob/src/contract.rs | linera-sdk/tests/fixtures/publish-read-data-blob/src/contract.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use linera_sdk::{
linera_base_types::{DataBlobHash, WithContractAbi},
Contract, ContractRuntime,
};
use publish_read_data_blob::{Operation, PublishReadDataBlobAbi};
pub struct PublishReadDataBlobContract {
runtime: ContractRuntime<Self>,
}
linera_sdk::contract!(PublishReadDataBlobContract);
impl WithContractAbi for PublishReadDataBlobContract {
type Abi = PublishReadDataBlobAbi;
}
impl Contract for PublishReadDataBlobContract {
type Message = ();
type InstantiationArgument = ();
type Parameters = ();
type EventValue = ();
async fn load(runtime: ContractRuntime<Self>) -> Self {
PublishReadDataBlobContract { runtime }
}
async fn instantiate(&mut self, _argument: ()) {
// Validate that the application parameters were configured correctly.
self.runtime.application_parameters();
}
async fn execute_operation(&mut self, operation: Operation) {
match operation {
Operation::CreateDataBlob(data) => {
self.runtime.create_data_blob(&data);
}
Operation::ReadDataBlob(hash, expected_data) => {
let data = self.runtime.read_data_blob(hash);
assert_eq!(
data, expected_data,
"Read data does not match expected data"
);
}
Operation::CreateAndReadDataBlob(data) => {
let hash: DataBlobHash = self.runtime.create_data_blob(&data);
let data_read = self.runtime.read_data_blob(hash);
assert_eq!(data_read, data);
}
}
}
async fn execute_message(&mut self, _message: ()) {
panic!("Publish-Read Data Blob application doesn't support any cross-chain messages");
}
async fn store(self) {}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/publish-read-data-blob/src/lib.rs | linera-sdk/tests/fixtures/publish-read-data-blob/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*! ABI of the Publish-Read Data Blob Example Application */
use linera_sdk::linera_base_types::{ContractAbi, DataBlobHash, ServiceAbi};
use serde::{Deserialize, Serialize};
pub struct PublishReadDataBlobAbi;
#[derive(Debug, Deserialize, Serialize)]
pub enum Operation {
CreateDataBlob(Vec<u8>),
ReadDataBlob(DataBlobHash, Vec<u8>),
CreateAndReadDataBlob(Vec<u8>),
}
#[derive(Debug, Deserialize, Serialize)]
pub enum ServiceQuery {
PublishDataBlob(Vec<u8>),
ReadDataBlob(DataBlobHash, Vec<u8>),
PublishAndCreateOneOperation(Vec<u8>),
PublishAndCreateTwoOperations(Vec<u8>),
}
impl ContractAbi for PublishReadDataBlobAbi {
type Operation = Operation;
type Response = ();
}
impl ServiceAbi for PublishReadDataBlobAbi {
type Query = ServiceQuery;
type QueryResponse = Vec<u8>;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/publish-read-data-blob/src/state.rs | linera-sdk/tests/fixtures/publish-read-data-blob/src/state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_sdk::{
linera_base_types::DataBlobHash,
views::{linera_views, RegisterView, RootView, ViewStorageContext},
};
#[derive(RootView)]
#[view(context = ViewStorageContext)]
pub struct PublishReadDataBlobState {
pub hash: RegisterView<Option<DataBlobHash>>,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/publish-read-data-blob/src/service.rs | linera-sdk/tests/fixtures/publish-read-data-blob/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use std::sync::Arc;
use linera_sdk::{
linera_base_types::{BlobContent, CryptoHash, DataBlobHash, WithServiceAbi},
Service, ServiceRuntime,
};
use publish_read_data_blob::{Operation, PublishReadDataBlobAbi, ServiceQuery};
pub struct PublishReadDataBlobService {
runtime: Arc<ServiceRuntime<Self>>,
}
linera_sdk::service!(PublishReadDataBlobService);
impl WithServiceAbi for PublishReadDataBlobService {
type Abi = PublishReadDataBlobAbi;
}
impl Service for PublishReadDataBlobService {
type Parameters = ();
async fn new(runtime: ServiceRuntime<Self>) -> Self {
PublishReadDataBlobService {
runtime: Arc::new(runtime),
}
}
async fn handle_query(&self, query: ServiceQuery) -> Vec<u8> {
match query {
ServiceQuery::PublishDataBlob(data) => {
self.runtime
.schedule_operation(&Operation::CreateDataBlob(data));
Vec::new()
}
ServiceQuery::ReadDataBlob(hash, expected_data) => {
self.runtime
.schedule_operation(&Operation::ReadDataBlob(hash, expected_data));
Vec::new()
}
ServiceQuery::PublishAndCreateOneOperation(data) => {
self.runtime
.schedule_operation(&Operation::CreateAndReadDataBlob(data));
Vec::new()
}
ServiceQuery::PublishAndCreateTwoOperations(data) => {
// First operation: create the blob
self.runtime
.schedule_operation(&Operation::CreateDataBlob(data.clone()));
// Compute the blob_id from the data
let content = BlobContent::new_data(data.clone());
let hash = DataBlobHash(CryptoHash::new(&content));
// Second operation: read the blob with verification
self.runtime
.schedule_operation(&Operation::ReadDataBlob(hash, data));
Vec::new()
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/track-instantiation/src/contract.rs | linera-sdk/tests/fixtures/track-instantiation/src/contract.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use linera_sdk::{
linera_base_types::WithContractAbi,
views::{RootView, View},
Contract, ContractRuntime,
};
use track_instantiation::TrackInstantiationAbi;
use self::state::TrackInstantiationState;
pub struct TrackInstantiationContract {
state: TrackInstantiationState,
runtime: ContractRuntime<Self>,
}
linera_sdk::contract!(TrackInstantiationContract);
impl WithContractAbi for TrackInstantiationContract {
type Abi = TrackInstantiationAbi;
}
impl Contract for TrackInstantiationContract {
type Message = ();
type InstantiationArgument = ();
type Parameters = ();
type EventValue = ();
async fn load(runtime: ContractRuntime<Self>) -> Self {
let state = TrackInstantiationState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
TrackInstantiationContract { state, runtime }
}
async fn instantiate(&mut self, _argument: ()) {
self.runtime.application_parameters();
// Send message to creator chain about instantiation
let creator_chain = self.runtime.application_creator_chain_id();
self.runtime
.prepare_message(())
.with_authentication()
.send_to(creator_chain);
}
async fn execute_operation(&mut self, _operation: ()) {
panic!("No operation being executed");
}
async fn execute_message(&mut self, _message: ()) {
let count = self.state.stats.get_mut();
*count += 1;
}
async fn store(mut self) {
self.state.save().await.expect("Failed to save state");
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/track-instantiation/src/lib.rs | linera-sdk/tests/fixtures/track-instantiation/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*! ABI of the Track Instantiation Load Operation Application */
use linera_sdk::linera_base_types::{ContractAbi, ServiceAbi};
use serde::{Deserialize, Serialize};
pub struct TrackInstantiationAbi;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Query {
GetCount,
}
impl ContractAbi for TrackInstantiationAbi {
type Operation = ();
type Response = ();
}
impl ServiceAbi for TrackInstantiationAbi {
type Query = Query;
type QueryResponse = u64;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/track-instantiation/src/state.rs | linera-sdk/tests/fixtures/track-instantiation/src/state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_sdk::views::{linera_views, RegisterView, RootView, ViewStorageContext};
/// The application state.
#[derive(RootView)]
#[view(context = ViewStorageContext)]
pub struct TrackInstantiationState {
pub stats: RegisterView<u64>,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/track-instantiation/src/service.rs | linera-sdk/tests/fixtures/track-instantiation/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use linera_sdk::{linera_base_types::WithServiceAbi, views::View, Service, ServiceRuntime};
use track_instantiation::{Query, TrackInstantiationAbi};
use self::state::TrackInstantiationState;
pub struct TrackInstantiationService {
state: TrackInstantiationState,
}
linera_sdk::service!(TrackInstantiationService);
impl WithServiceAbi for TrackInstantiationService {
type Abi = TrackInstantiationAbi;
}
impl Service for TrackInstantiationService {
type Parameters = ();
async fn new(runtime: ServiceRuntime<Self>) -> Self {
let state = TrackInstantiationState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
TrackInstantiationService { state }
}
async fn handle_query(&self, query: Query) -> u64 {
match query {
Query::GetCount => *self.state.stats.get(),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/track-instantiation/tests/cross_chain.rs | linera-sdk/tests/fixtures/track-instantiation/tests/cross_chain.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Integration tests for the Fungible Token application.
#![cfg(not(target_arch = "wasm32"))]
use linera_sdk::test::TestValidator;
use track_instantiation::{Query, TrackInstantiationAbi};
/// Test transferring tokens across microchains.
///
/// Creates the application on a `sender_chain`, initializing it with a single account with some
/// tokens for that chain's owner. Transfers some of those tokens to a new `receiver_chain`, and
/// checks that the balances on each microchain are correct.
#[tokio::test]
async fn test_instantiation_messages() {
let (validator, module_id) =
TestValidator::with_current_module::<TrackInstantiationAbi, (), ()>().await;
let mut sender_chain = validator.new_chain().await;
let application_id = sender_chain
.create_application(module_id, (), (), vec![])
.await;
sender_chain.handle_received_messages().await;
let query = Query::GetCount;
assert_eq!(sender_chain.query(application_id, query).await.response, 1);
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/meta-counter/src/contract.rs | linera-sdk/tests/fixtures/meta-counter/src/contract.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
use linera_sdk::{
linera_base_types::{ApplicationId, StreamName, WithContractAbi},
Contract, ContractRuntime, Resources,
};
use meta_counter::{Message, MetaCounterAbi, Operation};
pub struct MetaCounterContract {
runtime: ContractRuntime<Self>,
}
linera_sdk::contract!(MetaCounterContract);
impl MetaCounterContract {
fn counter_id(&mut self) -> ApplicationId<counter::CounterAbi> {
self.runtime.application_parameters()
}
}
impl WithContractAbi for MetaCounterContract {
type Abi = MetaCounterAbi;
}
impl Contract for MetaCounterContract {
type Message = Message;
type InstantiationArgument = ();
type Parameters = ApplicationId<counter::CounterAbi>;
type EventValue = String;
async fn load(runtime: ContractRuntime<Self>) -> Self {
MetaCounterContract { runtime }
}
async fn instantiate(&mut self, _argument: ()) {
// Validate that the application parameters were configured correctly.
self.counter_id();
// Send a no-op message to ourselves. This is only for testing contracts that send messages
// on initialization. Since the value is 0 it does not change the counter value.
let this_chain = self.runtime.chain_id();
self.runtime.emit(
StreamName(b"announcements".to_vec()),
&"instantiated".to_string(),
);
self.runtime.send_message(this_chain, Message::Increment(0));
}
async fn execute_operation(&mut self, operation: Operation) {
log::trace!("operation: {:?}", operation);
let Operation {
recipient_id,
authenticated,
is_tracked,
query_service,
fuel_grant,
message,
} = operation;
let mut message = self.runtime.prepare_message(message).with_grant(Resources {
wasm_fuel: fuel_grant,
..Resources::default()
});
if authenticated {
message = message.with_authentication();
}
if is_tracked {
message = message.with_tracking();
}
if query_service {
// Make a service query: The result will be logged in the block.
let counter_id = self.counter_id();
let _ = self
.runtime
.query_service(counter_id, &"query { value }".into());
}
message.send_to(recipient_id);
}
async fn execute_message(&mut self, message: Message) {
let is_bouncing = self
.runtime
.message_is_bouncing()
.expect("Message delivery status has to be available when executing a message");
if is_bouncing {
log::trace!("receiving a bouncing message {message:?}");
return;
}
match message {
Message::Fail => {
panic!("Message failed intentionally");
}
Message::Increment(value) => {
let counter_id = self.counter_id();
log::trace!("executing {} via {:?}", value, counter_id);
let operation = counter::CounterOperation::Increment { value };
self.runtime.call_application(true, counter_id, &operation);
}
}
}
async fn store(self) {}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/meta-counter/src/lib.rs | linera-sdk/tests/fixtures/meta-counter/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*! ABI of the Meta-Counter Example Application */
use async_graphql::{Request, Response};
use linera_sdk::linera_base_types::{ChainId, ContractAbi, ServiceAbi};
use serde::{Deserialize, Serialize};
pub struct MetaCounterAbi;
impl ContractAbi for MetaCounterAbi {
type Operation = Operation;
type Response = ();
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Operation {
pub recipient_id: ChainId,
pub authenticated: bool,
pub is_tracked: bool,
pub query_service: bool,
pub fuel_grant: u64,
pub message: Message,
}
impl Operation {
pub fn increment(recipient_id: ChainId, value: u64, query_service: bool) -> Self {
Operation {
recipient_id,
authenticated: false,
is_tracked: false,
query_service,
fuel_grant: 0,
message: Message::Increment(value),
}
}
pub fn fail(recipient_id: ChainId) -> Self {
Operation {
recipient_id,
authenticated: false,
is_tracked: false,
query_service: false,
fuel_grant: 0,
message: Message::Fail,
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum Message {
Increment(u64),
Fail,
}
impl ServiceAbi for MetaCounterAbi {
type Query = Request;
type QueryResponse = Response;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/meta-counter/src/service.rs | linera-sdk/tests/fixtures/meta-counter/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
use async_graphql::{Request, Response};
use linera_sdk::{
linera_base_types::{ApplicationId, WithServiceAbi},
Service, ServiceRuntime,
};
pub struct MetaCounterService {
runtime: ServiceRuntime<Self>,
}
linera_sdk::service!(MetaCounterService);
impl WithServiceAbi for MetaCounterService {
type Abi = meta_counter::MetaCounterAbi;
}
impl Service for MetaCounterService {
type Parameters = ApplicationId<counter::CounterAbi>;
async fn new(runtime: ServiceRuntime<Self>) -> Self {
MetaCounterService { runtime }
}
async fn handle_query(&self, request: Request) -> Response {
let counter_id = self.runtime.application_parameters();
self.runtime.query_application(counter_id, &request)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/contract-call/src/contract.rs | linera-sdk/tests/fixtures/contract-call/src/contract.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
use contract_call::{ContractTransferAbi, Operation, Parameters};
use linera_sdk::{
linera_base_types::{Account, ApplicationId, WithContractAbi},
Contract, ContractRuntime,
};
pub struct ContractTransferContract {
runtime: ContractRuntime<Self>,
}
linera_sdk::contract!(ContractTransferContract);
impl WithContractAbi for ContractTransferContract {
type Abi = ContractTransferAbi;
}
impl Contract for ContractTransferContract {
type Message = ();
type InstantiationArgument = ();
type Parameters = Parameters;
type EventValue = ();
async fn load(runtime: ContractRuntime<Self>) -> Self {
ContractTransferContract { runtime }
}
async fn instantiate(&mut self, _value: ()) {
// Validate that the application parameters were configured correctly.
self.runtime.application_parameters();
}
async fn execute_operation(&mut self, operation: Operation) {
match operation {
Operation::DirectTransfer {
source,
destination,
amount,
} => {
// Direct transfer using runtime.transfer
self.runtime.transfer(source, destination, amount);
}
Operation::IndirectTransfer {
source,
destination,
amount,
} => {
// Indirect transfer: create application, transfer to it, then transfer from it
let module_id = self.runtime.application_parameters().module_id;
// let typed_module_id: ModuleId<ContractTransferAbi, Parameters, ()> = module_id.with_abi();
// Create a new application instance
let parameters = self.runtime.application_parameters();
let application_id = self
.runtime
.create_application::<ContractTransferAbi, Parameters, ()>(
module_id,
¶meters,
&(),
vec![],
);
// Transfer from source to the created application
let chain_id = self.runtime.chain_id();
let app_account = Account {
chain_id,
owner: application_id.into(),
};
self.runtime.transfer(source, app_account, amount);
// Authenticated calls should be fully visible.
let operation = Operation::TestSomeAuthenticatedOwnerCaller;
self.runtime
.call_application(true, application_id, &operation);
// Non-authenticated calls should be fully non-visible
let operation = Operation::TestNoneAuthenticatedOwnerCaller;
self.runtime
.call_application(false, application_id, &operation);
// Non-authenticated calls should be fully non-visible
let operation = Operation::TestNoneAuthenticatedOwnerCaller;
self.runtime
.call_application(false, application_id, &operation);
// Authenticated calls should be fully visible.
let operation = Operation::TestSomeAuthenticatedOwnerCaller;
self.runtime
.call_application(true, application_id, &operation);
// Call the created application to transfer from itself to destination.
// Authenticated or not, the system is able to access the caller
// for making the transfer.
let operation = Operation::DirectTransfer {
source: ApplicationId::into(application_id),
destination,
amount,
};
self.runtime
.call_application(false, application_id, &operation);
}
Operation::TestNoneAuthenticatedOwnerCaller => {
assert!(self.runtime.authenticated_owner().is_none());
assert!(self.runtime.authenticated_caller_id().is_none());
}
Operation::TestSomeAuthenticatedOwnerCaller => {
assert!(self.runtime.authenticated_owner().is_some());
assert!(self.runtime.authenticated_caller_id().is_some());
}
}
}
async fn execute_message(&mut self, _message: ()) {
panic!("Contract transfer application doesn't support any cross-chain messages");
}
async fn store(self) {}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/contract-call/src/lib.rs | linera-sdk/tests/fixtures/contract-call/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_sdk::linera_base_types::{
Account, AccountOwner, Amount, ContractAbi, ModuleId, ServiceAbi,
};
use serde::{Deserialize, Serialize};
pub struct ContractTransferAbi;
impl ContractAbi for ContractTransferAbi {
type Operation = Operation;
type Response = ();
}
impl ServiceAbi for ContractTransferAbi {
type Query = Query;
type QueryResponse = ();
}
#[derive(Debug, Serialize, Deserialize)]
pub enum Operation {
DirectTransfer {
source: AccountOwner,
destination: Account,
amount: Amount,
},
IndirectTransfer {
source: AccountOwner,
destination: Account,
amount: Amount,
},
TestNoneAuthenticatedOwnerCaller,
TestSomeAuthenticatedOwnerCaller,
}
#[derive(Debug, Serialize, Deserialize)]
pub enum Query {
DirectTransfer {
source: AccountOwner,
destination: Account,
amount: Amount,
},
IndirectTransfer {
source: AccountOwner,
destination: Account,
amount: Amount,
},
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Parameters {
pub module_id: ModuleId,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/contract-call/src/service.rs | linera-sdk/tests/fixtures/contract-call/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
use std::sync::Arc;
use contract_call::{ContractTransferAbi, Operation, Parameters, Query};
use linera_sdk::{linera_base_types::WithServiceAbi, Service, ServiceRuntime};
pub struct ContractTransferService {
runtime: Arc<ServiceRuntime<Self>>,
}
linera_sdk::service!(ContractTransferService);
impl WithServiceAbi for ContractTransferService {
type Abi = ContractTransferAbi;
}
impl Service for ContractTransferService {
type Parameters = Parameters;
async fn new(runtime: ServiceRuntime<Self>) -> Self {
ContractTransferService {
runtime: Arc::new(runtime),
}
}
async fn handle_query(&self, request: Query) {
match request {
Query::DirectTransfer {
source,
destination,
amount,
} => {
let operation = Operation::DirectTransfer {
source,
destination,
amount,
};
self.runtime.schedule_operation(&operation);
}
Query::IndirectTransfer {
source,
destination,
amount,
} => {
let operation = Operation::IndirectTransfer {
source,
destination,
amount,
};
self.runtime.schedule_operation(&operation);
}
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/contract-call/tests/integration.rs | linera-sdk/tests/fixtures/contract-call/tests/integration.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Integration tests for the Contract Call application
#![cfg(not(target_arch = "wasm32"))]
use contract_call::{ContractTransferAbi, Parameters};
use linera_sdk::{
linera_base_types::{Account, AccountOwner, Amount},
test::TestValidator,
};
/// Test that mimics the end-to-end contract call test from linera_net_tests.rs
/// This test creates two accounts, publishes the contract-call module,
/// creates an application, and tests indirect transfer functionality.
#[tokio::test]
async fn test_contract_call_integration() {
use contract_call::Operation;
let (validator, module_id) =
TestValidator::with_current_module::<ContractTransferAbi, Parameters, ()>().await;
let mut chain = validator.new_chain().await;
let owner1 = AccountOwner::from(chain.public_key());
let account1 = Account {
chain_id: chain.id(),
owner: owner1,
};
let transfer_amount = Amount::from_tokens(100);
let funding_chain = validator.get_chain(&validator.admin_chain_id());
let transfer_certificate = funding_chain
.add_block(|block| {
block.with_native_token_transfer(AccountOwner::CHAIN, account1, transfer_amount);
})
.await;
chain
.add_block(|block| {
block.with_messages_from(&transfer_certificate);
})
.await;
// Generate a second owner
let second_chain = validator.new_chain().await;
let owner2 = AccountOwner::from(second_chain.public_key());
// Create accounts for both owners
let account2 = Account {
chain_id: chain.id(),
owner: owner2,
};
// Give initial balance to the first account (simulating the transfer_with_accounts)
// In the test environment, chains start with default balance
// Create parameters for the application
let parameters = Parameters {
module_id: module_id.forget_abi(),
};
// Create the application
let application_id = chain
.create_application(module_id, parameters, (), vec![])
.await;
// Test indirect transfer: transfer from owner1 to owner2 via application
let transfer_amount = Amount::from_tokens(1);
// Use Operation to trigger indirect transfer operation
let operation = Operation::IndirectTransfer {
source: owner1,
destination: account2,
amount: transfer_amount,
};
// Add a block with the operation
chain
.add_block(|block| {
block.with_operation(application_id, operation);
})
.await;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/time-expiry/src/contract.rs | linera-sdk/tests/fixtures/time-expiry/src/contract.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use linera_sdk::{
linera_base_types::WithContractAbi,
views::{RootView, View},
Contract, ContractRuntime,
};
use time_expiry::{TimeExpiryAbi, TimeExpiryOperation};
use self::state::TimeExpiryState;
pub struct TimeExpiryContract {
state: TimeExpiryState,
runtime: ContractRuntime<Self>,
}
linera_sdk::contract!(TimeExpiryContract);
impl WithContractAbi for TimeExpiryContract {
type Abi = TimeExpiryAbi;
}
impl Contract for TimeExpiryContract {
type Message = ();
type InstantiationArgument = ();
type Parameters = ();
type EventValue = ();
async fn load(runtime: ContractRuntime<Self>) -> Self {
let state = TimeExpiryState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
TimeExpiryContract { state, runtime }
}
async fn instantiate(&mut self, _argument: ()) {
self.runtime.application_parameters();
}
async fn execute_operation(&mut self, operation: TimeExpiryOperation) {
let TimeExpiryOperation::ExpireAfter(delta) = operation;
// Get the current block timestamp.
let block_timestamp = self.runtime.system_time();
// Calculate the expiry timestamp.
let expiry_timestamp = block_timestamp.saturating_add(delta);
// Assert that the validation happens before the expiry timestamp.
self.runtime.assert_before(expiry_timestamp);
}
async fn execute_message(&mut self, _message: ()) {
panic!("TimeExpiry application doesn't support any cross-chain messages");
}
async fn store(mut self) {
self.state.save().await.expect("Failed to save state");
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/time-expiry/src/lib.rs | linera-sdk/tests/fixtures/time-expiry/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*! ABI of the Time Expiry Test Application */
use linera_sdk::linera_base_types::{ContractAbi, ServiceAbi, TimeDelta};
use serde::{Deserialize, Serialize};
pub struct TimeExpiryAbi;
#[derive(Debug, Deserialize, Serialize)]
pub enum TimeExpiryOperation {
/// Expire the operation after the given time delta from block timestamp.
ExpireAfter(TimeDelta),
}
impl ContractAbi for TimeExpiryAbi {
type Operation = TimeExpiryOperation;
type Response = ();
}
impl ServiceAbi for TimeExpiryAbi {
type Query = ();
type QueryResponse = ();
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/time-expiry/src/state.rs | linera-sdk/tests/fixtures/time-expiry/src/state.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_sdk::views::{linera_views, RegisterView, RootView, ViewStorageContext};
/// The application state (empty for this test fixture, but needs at least one field).
#[derive(RootView)]
#[view(context = ViewStorageContext)]
pub struct TimeExpiryState {
/// A dummy field since RootView requires at least one field.
pub dummy: RegisterView<u64>,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-sdk/tests/fixtures/time-expiry/src/service.rs | linera-sdk/tests/fixtures/time-expiry/src/service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#![cfg_attr(target_arch = "wasm32", no_main)]
mod state;
use linera_sdk::{linera_base_types::WithServiceAbi, views::View, Service, ServiceRuntime};
use time_expiry::TimeExpiryAbi;
use self::state::TimeExpiryState;
pub struct TimeExpiryService {
#[allow(dead_code)]
state: TimeExpiryState,
}
linera_sdk::service!(TimeExpiryService);
impl WithServiceAbi for TimeExpiryService {
type Abi = TimeExpiryAbi;
}
impl Service for TimeExpiryService {
type Parameters = ();
async fn new(runtime: ServiceRuntime<Self>) -> Self {
let state = TimeExpiryState::load(runtime.root_view_storage_context())
.await
.expect("Failed to load state");
TimeExpiryService { state }
}
async fn handle_query(&self, _query: ()) {}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/build.rs | linera-views/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
web: { all(target_arch = "wasm32", feature = "web") },
with_testing: { any(test, feature = "test") },
with_metrics: { all(not(target_arch = "wasm32"), feature = "metrics") },
with_dynamodb: { all(not(target_arch = "wasm32"), feature = "dynamodb") },
with_indexeddb: { all(web, feature = "indexeddb") },
with_rocksdb: { all(not(target_arch = "wasm32"), feature = "rocksdb") },
with_scylladb: { all(not(target_arch = "wasm32"), feature = "scylladb") },
with_graphql: { not(web) },
};
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/random.rs | linera-views/src/random.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#[cfg(target_arch = "wasm32")]
use std::sync::{Mutex, MutexGuard, OnceLock};
use rand::{rngs::SmallRng, Rng, SeedableRng};
// The following seed is chosen to have equal numbers of 1s and 0s, as advised by
// https://docs.rs/rand/latest/rand/rngs/struct.SmallRng.html
// Specifically, it's "01" × 32 in binary
const RNG_SEED: u64 = 6148914691236517205;
/// A deterministic RNG.
pub type DeterministicRng = SmallRng;
/// A RNG that is non-deterministic if the platform supports it.
#[cfg(not(target_arch = "wasm32"))]
pub type NonDeterministicRng = rand::rngs::ThreadRng;
/// A RNG that is non-deterministic if the platform supports it.
#[cfg(target_arch = "wasm32")]
pub type NonDeterministicRng = MutexGuard<'static, DeterministicRng>;
/// Returns a deterministic RNG for testing.
pub fn make_deterministic_rng() -> DeterministicRng {
SmallRng::seed_from_u64(RNG_SEED)
}
/// Returns a non-deterministic RNG where supported.
pub fn make_nondeterministic_rng() -> NonDeterministicRng {
#[cfg(target_arch = "wasm32")]
{
static RNG: OnceLock<Mutex<SmallRng>> = OnceLock::new();
RNG.get_or_init(|| Mutex::new(make_deterministic_rng()))
.lock()
.expect("failed to lock RNG mutex")
}
#[cfg(not(target_arch = "wasm32"))]
{
rand::thread_rng()
}
}
/// Get a random alphanumeric string that can be used for all tests.
pub fn generate_random_alphanumeric_string(length: usize, charset: &[u8]) -> String {
(0..length)
.map(|_| {
let random_index = make_nondeterministic_rng().gen_range(0..charset.len());
charset[random_index] as char
})
.collect()
}
/// Returns a unique namespace for testing.
pub fn generate_test_namespace() -> String {
// Define the characters that are allowed in the alphanumeric string
let charset: &[u8] = b"0123456789abcdefghijklmnopqrstuvwxyz";
let entry = generate_random_alphanumeric_string(20, charset);
let namespace = format!("table_{}", entry);
tracing::warn!("Generating namespace={}", namespace);
namespace
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/lib.rs | linera-views/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/*!
This module is used in the Linera protocol to map complex data structures onto a
key-value store. The central notion is a [`views::View`](https://docs.rs/linera-views/latest/linera_views/views/trait.View.html)
which can be loaded from storage, modified in memory, and then committed (i.e. the changes are atomically persisted in storage).
The package provides essentially two functionalities:
* An abstraction to access databases.
* Several containers named views for storing data modeled on classical ones.
See `DESIGN.md` for more details.
## The supported databases.
The databases supported are of the NoSQL variety and they are key-value stores.
We provide support for the following databases:
* `MemoryDatabase` is using the memory
* `RocksDbDatabase` is a disk-based key-value store
* `DynamoDbDatabase` is the AWS-based DynamoDB service.
* `ScyllaDbDatabase` is a cloud-based Cassandra-compatible database.
* `StorageServiceDatabase` is a gRPC-based storage that uses either memory or RocksDB. It is available in `linera-storage-service`.
The corresponding trait in the code is the [`crate::store::KeyValueDatabase`](https://docs.rs/linera-views/latest/linera_views/store/trait.KeyValueDatabase.html).
as well as [`crate::store::KeyValueStore`](https://docs.rs/linera-views/latest/linera_views/store/trait.KeyValueStore.html).
The latter trait decomposes into a [`store::ReadableKeyValueStore`](https://docs.rs/linera-views/latest/linera_views/store/trait.ReadableKeyValueStore.html)
and a [`store::WritableKeyValueStore`](https://docs.rs/linera-views/latest/linera_views/store/trait.WritableKeyValueStore.html).
A context is the combination of a client and a base key (of type `Vec<u8>`).
## Views.
A view is a container whose data lies in one of the above-mentioned databases.
When the container is modified the modification lies first in the view before
being committed to the database. In technical terms, a view implements the trait `View`.
The specific functionalities of the trait `View` are the following:
* `context` for obtaining a reference to the storage context of the view.
* `load` for loading the view from a specific context.
* `rollback` for canceling all modifications that were not committed thus far.
* `clear` for clearing the view, in other words for reverting it to its default state.
* `flush` for persisting the changes to storage.
The following views implement the `View` trait:
* `RegisterView` implements the storing of a single data.
* `LogView` implements a log, which is a list of entries that can be expanded.
* `QueueView` implements a queue, which is a list of entries that can be expanded and reduced.
* `MapView` implements a map with keys and values.
* `SetView` implements a set with keys.
* `CollectionView` implements a map whose values are views themselves.
* `ReentrantCollectionView` implements a map for which different keys can be accessed independently.
* `ViewContainer<C>` implements a `KeyValueStore` and is used internally.
The `LogView` can be seen as an analog of `VecDeque` while `MapView` is an analog of `BTreeMap`.
*/
#![deny(missing_docs)]
// These traits have `Send` variants where possible.
#![allow(async_fn_in_trait)]
/// The definition of the batches for writing in the database.
pub mod batch;
/// The `KeyValueDatabase` and `KeyValueStore` traits and related definitions.
pub mod store;
/// The `Context` trait and related definitions.
pub mod context;
/// Common definitions used for views and backends.
pub mod common;
/// Definitions for the LRU cache.
pub mod lru_prefix_cache;
mod error;
pub use error::ViewError;
/// Elementary data-structures implementing the [`views::View`] trait.
pub mod views;
/// Backend implementing the [`crate::store::KeyValueStore`] trait.
pub mod backends;
/// Support for metrics.
#[cfg(with_metrics)]
pub mod metrics;
/// GraphQL implementations.
#[cfg(with_graphql)]
mod graphql;
/// Functions for random generation
#[cfg(with_testing)]
pub mod random;
/// Helper types for tests.
#[cfg(with_testing)]
pub mod test_utils;
#[cfg(with_dynamodb)]
pub use backends::dynamo_db;
#[cfg(with_indexeddb)]
pub use backends::indexed_db;
#[cfg(with_metrics)]
pub use backends::metering;
#[cfg(with_rocksdb)]
pub use backends::rocks_db;
#[cfg(with_scylladb)]
pub use backends::scylla_db;
pub use backends::{journaling, lru_caching, memory, value_splitting};
/// Re-exports used by the derive macros of this library.
#[doc(hidden)]
#[allow(deprecated)]
pub use generic_array;
#[doc(hidden)]
pub use sha3;
pub use views::{
bucket_queue_view, collection_view, hashable_wrapper, historical_hash_wrapper,
key_value_store_view, log_view, map_view, queue_view, reentrant_collection_view, register_view,
set_view,
};
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/store.rs | linera-views/src/store.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This provides the trait definitions for the stores.
use std::{fmt::Debug, future::Future};
use serde::{de::DeserializeOwned, Serialize};
#[cfg(with_testing)]
use crate::random::generate_test_namespace;
use crate::{
batch::{Batch, SimplifiedBatch},
common::from_bytes_option,
ViewError,
};
/// The error type for the key-value stores.
pub trait KeyValueStoreError:
std::error::Error + From<bcs::Error> + Debug + Send + Sync + 'static
{
/// The name of the backend.
const BACKEND: &'static str;
}
impl<E: KeyValueStoreError> From<E> for ViewError {
fn from(error: E) -> Self {
Self::StoreError {
backend: E::BACKEND,
error: Box::new(error),
}
}
}
/// Define an associated [`KeyValueStoreError`].
pub trait WithError {
/// The error type.
type Error: KeyValueStoreError;
}
/// Asynchronous read key-value operations.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait ReadableKeyValueStore: WithError {
/// The maximal size of keys that can be stored.
const MAX_KEY_SIZE: usize;
/// Retrieve the number of stream queries.
fn max_stream_queries(&self) -> usize;
/// Gets the root key of the store.
fn root_key(&self) -> Result<Vec<u8>, Self::Error>;
/// Retrieves a `Vec<u8>` from the database using the provided `key`.
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Tests whether a key exists in the database
async fn contains_key(&self, key: &[u8]) -> Result<bool, Self::Error>;
/// Tests whether a list of keys exist in the database
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, Self::Error>;
/// Retrieves multiple `Vec<u8>` from the database using the provided `keys`.
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, Self::Error>;
/// Finds the `key` matching the prefix. The prefix is not included in the returned keys.
async fn find_keys_by_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error>;
/// Finds the `(key,value)` pairs matching the prefix. The prefix is not included in the returned keys.
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, Self::Error>;
// We can't use `async fn` here in the below implementations due to
// https://github.com/rust-lang/impl-trait-utils/issues/17, but once that bug is fixed
// we can revert them to `async fn` syntax, which is neater.
/// Reads a single `key` and deserializes the result if present.
fn read_value<V: DeserializeOwned>(
&self,
key: &[u8],
) -> impl Future<Output = Result<Option<V>, Self::Error>> {
async { Ok(from_bytes_option(&self.read_value_bytes(key).await?)?) }
}
/// Reads multiple `keys` and deserializes the results if present.
fn read_multi_values<V: DeserializeOwned + Send + Sync>(
&self,
keys: &[Vec<u8>],
) -> impl Future<Output = Result<Vec<Option<V>>, Self::Error>> {
async {
let mut values = Vec::with_capacity(keys.len());
for entry in self.read_multi_values_bytes(keys).await? {
values.push(from_bytes_option(&entry)?);
}
Ok(values)
}
}
}
/// Asynchronous write key-value operations.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait WritableKeyValueStore: WithError {
/// The maximal size of values that can be stored.
const MAX_VALUE_SIZE: usize;
/// Writes the `batch` in the database.
async fn write_batch(&self, batch: Batch) -> Result<(), Self::Error>;
/// Clears any journal entry that may remain.
/// The journal is located at the `root_key`.
async fn clear_journal(&self) -> Result<(), Self::Error>;
}
/// Asynchronous direct write key-value operations with simplified batch.
///
/// Some backend cannot implement `WritableKeyValueStore` directly and will require
/// journaling.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait DirectWritableKeyValueStore: WithError {
/// The maximal number of items in a batch.
const MAX_BATCH_SIZE: usize;
/// The maximal number of bytes of a batch.
const MAX_BATCH_TOTAL_SIZE: usize;
/// The maximal size of values that can be stored.
const MAX_VALUE_SIZE: usize;
/// The batch type.
type Batch: SimplifiedBatch + Serialize + DeserializeOwned + Default;
/// Writes the batch to the database.
async fn write_batch(&self, batch: Self::Batch) -> Result<(), Self::Error>;
}
/// The definition of a key-value database.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait KeyValueDatabase: WithError + Sized {
/// The configuration needed to interact with a new backend.
type Config: Send + Sync;
/// The result of opening a partition.
type Store;
/// The name of this database.
fn get_name() -> String;
/// Connects to an existing namespace using the given configuration.
async fn connect(config: &Self::Config, namespace: &str) -> Result<Self, Self::Error>;
/// Opens a shared partition starting at `root_key`. It is understood that the
/// partition MAY be read and written simultaneously from other clients.
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error>;
/// Opens an exclusive partition starting at `root_key`. It is assumed that the
/// partition WILL NOT be read and written simultaneously by other clients.
///
/// IMPORTANT: This assumption is not enforced at the moment. However, future
/// implementations may choose to return an error if another client is detected.
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error>;
/// Obtains the list of existing namespaces.
async fn list_all(config: &Self::Config) -> Result<Vec<String>, Self::Error>;
/// Lists the root keys of the namespace.
/// It is possible that some root keys have no keys.
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, Self::Error>;
/// Deletes all the existing namespaces.
fn delete_all(config: &Self::Config) -> impl Future<Output = Result<(), Self::Error>> {
async {
let namespaces = Self::list_all(config).await?;
for namespace in namespaces {
Self::delete(config, &namespace).await?;
}
Ok(())
}
}
/// Tests if a given namespace exists.
async fn exists(config: &Self::Config, namespace: &str) -> Result<bool, Self::Error>;
/// Creates a namespace. Returns an error if the namespace exists.
async fn create(config: &Self::Config, namespace: &str) -> Result<(), Self::Error>;
/// Deletes the given namespace.
async fn delete(config: &Self::Config, namespace: &str) -> Result<(), Self::Error>;
/// Initializes a storage if missing and provides it.
fn maybe_create_and_connect(
config: &Self::Config,
namespace: &str,
) -> impl Future<Output = Result<Self, Self::Error>> {
async {
if !Self::exists(config, namespace).await? {
Self::create(config, namespace).await?;
}
Self::connect(config, namespace).await
}
}
/// Creates a new storage. Overwrites it if this namespace already exists.
fn recreate_and_connect(
config: &Self::Config,
namespace: &str,
) -> impl Future<Output = Result<Self, Self::Error>> {
async {
if Self::exists(config, namespace).await? {
Self::delete(config, namespace).await?;
}
Self::create(config, namespace).await?;
Self::connect(config, namespace).await
}
}
}
/// A key-value store that can perform both read and direct write operations.
///
/// This trait combines the capabilities of [`ReadableKeyValueStore`] and
/// [`DirectWritableKeyValueStore`], providing a full interface for stores
/// that can handle simplified batches directly without journaling.
pub trait DirectKeyValueStore: ReadableKeyValueStore + DirectWritableKeyValueStore {}
impl<T> DirectKeyValueStore for T where T: ReadableKeyValueStore + DirectWritableKeyValueStore {}
/// A key-value store that can perform both read and write operations.
///
/// This trait combines the capabilities of [`ReadableKeyValueStore`] and
/// [`WritableKeyValueStore`], providing a full interface for stores that
/// can handle complex batches with journaling support.
pub trait KeyValueStore: ReadableKeyValueStore + WritableKeyValueStore {}
impl<T> KeyValueStore for T where T: ReadableKeyValueStore + WritableKeyValueStore {}
/// The functions needed for testing purposes
#[cfg(with_testing)]
pub trait TestKeyValueDatabase: KeyValueDatabase {
/// Obtains a test config
async fn new_test_config() -> Result<Self::Config, Self::Error>;
/// Creates a database for testing purposes
async fn connect_test_namespace() -> Result<Self, Self::Error> {
let config = Self::new_test_config().await?;
let namespace = generate_test_namespace();
Self::recreate_and_connect(&config, &namespace).await
}
/// Creates a store for testing purposes
async fn new_test_store() -> Result<Self::Store, Self::Error> {
let database = Self::connect_test_namespace().await?;
database.open_shared(&[])
}
}
/// A module containing a dummy store used for caching views.
pub mod inactive_store {
use super::*;
/// A store which does not actually store anything - used for caching views.
pub struct InactiveStore;
/// An error struct for the inactive store.
#[derive(Clone, Copy, Debug)]
pub struct InactiveStoreError;
impl std::fmt::Display for InactiveStoreError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "inactive store error")
}
}
impl From<bcs::Error> for InactiveStoreError {
fn from(_other: bcs::Error) -> Self {
Self
}
}
impl std::error::Error for InactiveStoreError {}
impl KeyValueStoreError for InactiveStoreError {
const BACKEND: &'static str = "inactive";
}
impl WithError for InactiveStore {
type Error = InactiveStoreError;
}
impl ReadableKeyValueStore for InactiveStore {
const MAX_KEY_SIZE: usize = 0;
fn max_stream_queries(&self) -> usize {
0
}
fn root_key(&self) -> Result<Vec<u8>, Self::Error> {
panic!("attempt to read from an inactive store!")
}
async fn read_value_bytes(&self, _key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
panic!("attempt to read from an inactive store!")
}
async fn contains_key(&self, _key: &[u8]) -> Result<bool, Self::Error> {
panic!("attempt to read from an inactive store!")
}
async fn contains_keys(&self, _keys: &[Vec<u8>]) -> Result<Vec<bool>, Self::Error> {
panic!("attempt to read from an inactive store!")
}
async fn read_multi_values_bytes(
&self,
_keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, Self::Error> {
panic!("attempt to read from an inactive store!")
}
async fn find_keys_by_prefix(
&self,
_key_prefix: &[u8],
) -> Result<Vec<Vec<u8>>, Self::Error> {
panic!("attempt to read from an inactive store!")
}
/// Finds the `(key,value)` pairs matching the prefix. The prefix is not included in the returned keys.
async fn find_key_values_by_prefix(
&self,
_key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, Self::Error> {
panic!("attempt to read from an inactive store!")
}
}
impl WritableKeyValueStore for InactiveStore {
const MAX_VALUE_SIZE: usize = 0;
async fn write_batch(&self, _batch: Batch) -> Result<(), Self::Error> {
panic!("attempt to write to an inactive store!")
}
async fn clear_journal(&self) -> Result<(), Self::Error> {
panic!("attempt to write to an inactive store!")
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/batch.rs | linera-views/src/batch.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! A set of functionalities for building batches to be written into the database.
//! A batch can contain three kinds of operations on a key/value store:
//! * Insertion of a key with an associated value
//! * Deletion of a specific key
//! * Deletion of all keys which contain a specified prefix
//!
//! The deletion using prefixes is generally but not always faster than deleting keys
//! one by one. The only purpose of the batch is to write some transactions into the
//! database.
//!
//! Note that normal users should not have to manipulate batches. The functionality
//! is public because some other libraries require it. But the users using views should
//! not have to deal with batches.
use std::{
collections::{BTreeMap, BTreeSet, HashSet},
fmt::Debug,
iter::Peekable,
ops::Bound,
vec::IntoIter,
};
use bcs::serialized_size;
use linera_witty::{WitLoad, WitStore, WitType};
use serde::{Deserialize, Serialize};
use crate::{
common::{get_key_range_for_prefix, get_uleb128_size},
ViewError,
};
/// A write operation as requested by a view when it needs to persist staged changes.
/// There are 3 possibilities for the batch:
/// * Deletion of a specific key.
/// * Deletion of all keys matching a specific prefix.
/// * Insertion or replacement of a key with a value.
#[derive(Clone, Debug, Eq, PartialEq, WitType, WitLoad, WitStore, Serialize)]
pub enum WriteOperation {
/// Delete the given key.
Delete {
/// The key that will be deleted.
key: Vec<u8>,
},
/// Delete all the keys matching the given prefix.
DeletePrefix {
/// The prefix of the keys to be deleted.
key_prefix: Vec<u8>,
},
/// Set or replace the value of a given key.
Put {
/// The key to be inserted or replaced.
key: Vec<u8>,
/// The value to be inserted on the key.
value: Vec<u8>,
},
}
/// A batch of write operations.
#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize)]
pub struct Batch {
/// The write operations.
pub operations: Vec<WriteOperation>,
}
/// A batch of deletions and insertions that operate on disjoint keys, thus can be
/// executed in any order.
#[derive(Default, Serialize, Deserialize)]
pub struct SimpleUnorderedBatch {
/// The deletions.
pub deletions: Vec<Vec<u8>>,
/// The insertions.
pub insertions: Vec<(Vec<u8>, Vec<u8>)>,
}
/// An unordered batch of deletions and insertions, together with a set of key-prefixes to
/// delete. Key-prefix deletions must happen before the insertions and the deletions.
#[derive(Default, Serialize, Deserialize)]
pub struct UnorderedBatch {
/// The key-prefix deletions.
pub key_prefix_deletions: Vec<Vec<u8>>,
/// The batch of deletions and insertions.
pub simple_unordered_batch: SimpleUnorderedBatch,
}
impl UnorderedBatch {
/// From an `UnorderedBatch`, creates a [`SimpleUnorderedBatch`] that does not contain the
/// `key_prefix_deletions`. This requires accessing the database to eliminate them.
pub async fn expand_delete_prefixes<DB: DeletePrefixExpander>(
self,
db: &DB,
) -> Result<SimpleUnorderedBatch, DB::Error> {
let mut insert_set = HashSet::new();
for (key, _) in &self.simple_unordered_batch.insertions {
insert_set.insert(key.clone());
}
let insertions = self.simple_unordered_batch.insertions;
let mut deletions = self.simple_unordered_batch.deletions;
for key_prefix in self.key_prefix_deletions {
for short_key in &db.expand_delete_prefix(&key_prefix).await? {
let mut key = key_prefix.clone();
key.extend(short_key);
if !insert_set.contains(&key) {
deletions.push(key);
}
}
}
Ok(SimpleUnorderedBatch {
deletions,
insertions,
})
}
/// Modifies an [`UnorderedBatch`] so that the key-prefix deletions do not conflict
/// with subsequent insertions. This may require accessing the database to compute
/// lists of deleted keys.
pub async fn expand_colliding_prefix_deletions<DB: DeletePrefixExpander>(
&mut self,
db: &DB,
) -> Result<(), DB::Error> {
if self.key_prefix_deletions.is_empty() {
return Ok(());
}
let inserted_keys = self
.simple_unordered_batch
.insertions
.iter()
.map(|x| x.0.clone())
.collect::<BTreeSet<_>>();
let mut key_prefix_deletions = Vec::new();
for key_prefix in &self.key_prefix_deletions {
if inserted_keys
.range(get_key_range_for_prefix(key_prefix.clone()))
.next()
.is_some()
{
for short_key in &db.expand_delete_prefix(key_prefix).await? {
let mut key = key_prefix.clone();
key.extend(short_key);
if !inserted_keys.contains(&key) {
self.simple_unordered_batch.deletions.push(key);
}
}
} else {
key_prefix_deletions.push(key_prefix.to_vec());
}
}
self.key_prefix_deletions = key_prefix_deletions;
Ok(())
}
/// The total number of entries of the batch.
pub fn len(&self) -> usize {
self.key_prefix_deletions.len() + self.simple_unordered_batch.len()
}
/// Tests whether the batch is empty or not
pub fn is_empty(&self) -> bool {
self.key_prefix_deletions.is_empty() && self.simple_unordered_batch.is_empty()
}
}
/// Checks if `key` is matched by any prefix in `key_prefix_set`.
/// The set `key_prefix_set` must be minimal for the function to work correctly.
/// That is, there should not be any two prefixes `p1` and `p2` such that `p1 < p2` for
/// the lexicographic ordering on `Vec<u8>` entries.
/// Under this condition we have equivalence between the following two statements:
/// * There is a key prefix in `key_prefix_set` that matches `key`.
/// * The highest key prefix in `key_prefix_set` is actually matching.
fn is_prefix_matched(key_prefix_set: &BTreeSet<Vec<u8>>, key: &[u8]) -> bool {
let range = (Bound::Unbounded, Bound::Included(key.to_vec()));
let range = key_prefix_set.range(range);
if let Some(value) = range.last() {
if value.len() > key.len() {
return false;
}
return value == &key[0..value.len()];
}
false
}
impl Batch {
/// Creates an empty batch.
pub fn new() -> Self {
Self::default()
}
/// The total size of the batch
pub fn size(&self) -> usize {
self.operations
.iter()
.map(|operation| match operation {
WriteOperation::Delete { key } => key.len(),
WriteOperation::Put { key, value } => key.len() + value.len(),
WriteOperation::DeletePrefix { key_prefix } => key_prefix.len(),
})
.sum()
}
/// Whether the batch is empty or not
pub fn is_empty(&self) -> bool {
self.operations.is_empty()
}
/// Returns the number of operations in this [`Batch`].
pub fn num_operations(&self) -> usize {
self.operations.len()
}
/// Builds a batch from a builder function.
pub async fn build<F>(builder: F) -> Result<Self, ViewError>
where
F: FnOnce(&mut Batch) -> futures::future::BoxFuture<Result<(), ViewError>> + Send + Sync,
{
let mut batch = Batch::new();
builder(&mut batch).await?;
Ok(batch)
}
/// Simplifies the batch by removing operations that are overwritten by others.
///
/// A key may appear multiple times in the batch, as an insert, a delete
/// or matched by a delete prefix.
/// ```rust
/// # use linera_views::batch::Batch;
/// let mut batch = Batch::new();
/// batch.put_key_value(vec![0, 1], &(34 as u128));
/// batch.delete_key(vec![0, 1]);
/// let unordered_batch = batch.simplify();
/// assert_eq!(unordered_batch.key_prefix_deletions.len(), 0);
/// assert_eq!(unordered_batch.simple_unordered_batch.insertions.len(), 0);
/// assert_eq!(unordered_batch.simple_unordered_batch.deletions.len(), 1);
/// ```
pub fn simplify(self) -> UnorderedBatch {
let mut delete_and_insert_map = BTreeMap::new();
let mut delete_prefix_set = BTreeSet::new();
for operation in self.operations {
match operation {
WriteOperation::Delete { key } => {
// If `key` is matched by a deleted prefix, then remove any inserted
// value. Otherwise, add the key to the set of deletions.
if is_prefix_matched(&delete_prefix_set, &key) {
delete_and_insert_map.remove(&key);
} else {
delete_and_insert_map.insert(key, None);
}
}
WriteOperation::Put { key, value } => {
// Record the insertion.
delete_and_insert_map.insert(key, Some(value));
}
WriteOperation::DeletePrefix { key_prefix } => {
// Remove the previous deletions and insertions covered by `key_prefix`.
let keys = delete_and_insert_map
.range(get_key_range_for_prefix(key_prefix.clone()))
.map(|x| x.0.to_vec())
.collect::<Vec<_>>();
for key in keys {
delete_and_insert_map.remove(&key);
}
// If `key_prefix` is covered by a previous deleted prefix, then we're done.
if is_prefix_matched(&delete_prefix_set, &key_prefix) {
continue;
}
// Otherwise, find the prefixes that are covered by the new key
// prefix.
let key_prefixes = delete_prefix_set
.range(get_key_range_for_prefix(key_prefix.clone()))
.map(|x: &Vec<u8>| x.to_vec())
.collect::<Vec<_>>();
// Delete them.
for key_prefix in key_prefixes {
delete_prefix_set.remove(&key_prefix);
}
// Then, insert the new key prefix.
delete_prefix_set.insert(key_prefix);
}
}
}
let key_prefix_deletions = delete_prefix_set.into_iter().collect();
let mut deletions = Vec::new();
let mut insertions = Vec::new();
for (key, val) in delete_and_insert_map {
match val {
Some(value) => insertions.push((key, value)),
None => deletions.push(key),
}
}
let simple_unordered_batch = SimpleUnorderedBatch {
deletions,
insertions,
};
UnorderedBatch {
key_prefix_deletions,
simple_unordered_batch,
}
}
/// Checks the size of the values of the batch.
pub fn check_value_size(&self, max_value_size: usize) -> bool {
for operation in &self.operations {
if let WriteOperation::Put { key: _, value } = operation {
if value.len() > max_value_size {
return false;
}
}
}
true
}
/// Adds the insertion of a key-value pair into the batch with a serializable value.
/// ```rust
/// # use linera_views::batch::Batch;
/// let mut batch = Batch::new();
/// batch.put_key_value(vec![0, 1], &(34 as u128));
/// ```
#[inline]
pub fn put_key_value(
&mut self,
key: Vec<u8>,
value: &impl Serialize,
) -> Result<(), bcs::Error> {
let bytes = bcs::to_bytes(value)?;
self.put_key_value_bytes(key, bytes);
Ok(())
}
/// Adds the insertion of a `(key, value)` pair into the batch with `value` a vector of `u8`.
/// ```rust
/// # use linera_views::batch::Batch;
/// let mut batch = Batch::new();
/// batch.put_key_value_bytes(vec![0, 1], vec![3, 4, 5]);
/// ```
#[inline]
pub fn put_key_value_bytes(&mut self, key: Vec<u8>, value: Vec<u8>) {
self.operations.push(WriteOperation::Put { key, value });
}
/// Inserts the deletion of a `key` into the batch.
/// ```rust
/// # use linera_views::batch::Batch;
/// let mut batch = Batch::new();
/// batch.delete_key(vec![0, 1]);
/// ```
#[inline]
pub fn delete_key(&mut self, key: Vec<u8>) {
self.operations.push(WriteOperation::Delete { key });
}
/// Inserts the deletion of a `key_prefix` into the batch.
/// ```rust
/// # use linera_views::batch::Batch;
/// let mut batch = Batch::new();
/// batch.delete_key_prefix(vec![0, 1]);
/// ```
#[inline]
pub fn delete_key_prefix(&mut self, key_prefix: Vec<u8>) {
self.operations
.push(WriteOperation::DeletePrefix { key_prefix });
}
}
/// A trait to expand `DeletePrefix` operations.
///
/// Certain databases (e.g. DynamoDB) do not support the deletion by prefix.
/// Thus we need to access the databases in order to replace a `DeletePrefix`
/// by a vector of the keys to be removed.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait DeletePrefixExpander {
/// The error type that can happen when expanding the key prefix.
type Error: Debug;
/// Returns the list of keys to be appended to the list.
async fn expand_delete_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error>;
}
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
/// A notion of batch useful for certain computations (notably journaling).
pub trait SimplifiedBatch: Sized + Send + Sync {
/// The iterator type used to process values from the batch.
type Iter: BatchValueWriter<Self>;
/// Creates a simplified batch from a standard one.
async fn from_batch<S: DeletePrefixExpander>(store: S, batch: Batch) -> Result<Self, S::Error>;
/// Returns an owning iterator over the values in the batch.
fn into_iter(self) -> Self::Iter;
/// Returns the total number of entries in the batch.
fn len(&self) -> usize;
/// Returns the total number of bytes in the batch.
fn num_bytes(&self) -> usize;
/// Returns the overhead size of the batch.
fn overhead_size(&self) -> usize;
/// Adds the deletion of key to the batch.
fn add_delete(&mut self, key: Vec<u8>);
/// Adds the insertion of a key-value pair to the batch.
fn add_insert(&mut self, key: Vec<u8>, value: Vec<u8>);
/// Returns true if the batch is empty.
fn is_empty(&self) -> bool {
self.len() == 0
}
}
/// An iterator-like object that can write values one by one to a batch while updating the
/// total size of the batch.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait BatchValueWriter<Batch> {
/// Returns true if there are no more values to write.
fn is_empty(&self) -> bool;
/// Writes the next value (if any) to the batch and updates the batch size accordingly.
///
/// Returns false if there was no value to write.
fn write_next_value(
&mut self,
batch: &mut Batch,
batch_size: &mut usize,
) -> Result<bool, bcs::Error>;
/// Computes the batch size that we would obtain if we wrote the next value (if any)
/// without consuming the value.
fn next_batch_size(
&mut self,
batch: &Batch,
batch_size: usize,
) -> Result<Option<usize>, bcs::Error>;
}
/// The iterator that corresponds to a `SimpleUnorderedBatch`
pub struct SimpleUnorderedBatchIter {
delete_iter: Peekable<IntoIter<Vec<u8>>>,
insert_iter: Peekable<IntoIter<(Vec<u8>, Vec<u8>)>>,
}
impl SimplifiedBatch for SimpleUnorderedBatch {
type Iter = SimpleUnorderedBatchIter;
fn into_iter(self) -> Self::Iter {
let delete_iter = self.deletions.into_iter().peekable();
let insert_iter = self.insertions.into_iter().peekable();
Self::Iter {
delete_iter,
insert_iter,
}
}
fn len(&self) -> usize {
self.deletions.len() + self.insertions.len()
}
fn num_bytes(&self) -> usize {
let mut total_size = 0;
for (key, value) in &self.insertions {
total_size += key.len() + value.len();
}
for deletion in &self.deletions {
total_size += deletion.len();
}
total_size
}
fn overhead_size(&self) -> usize {
get_uleb128_size(self.deletions.len()) + get_uleb128_size(self.insertions.len())
}
fn add_delete(&mut self, key: Vec<u8>) {
self.deletions.push(key)
}
fn add_insert(&mut self, key: Vec<u8>, value: Vec<u8>) {
self.insertions.push((key, value))
}
async fn from_batch<S: DeletePrefixExpander>(store: S, batch: Batch) -> Result<Self, S::Error> {
let unordered_batch = batch.simplify();
unordered_batch.expand_delete_prefixes(&store).await
}
}
impl BatchValueWriter<SimpleUnorderedBatch> for SimpleUnorderedBatchIter {
fn is_empty(&self) -> bool {
self.delete_iter.len() == 0 && self.insert_iter.len() == 0
}
fn write_next_value(
&mut self,
batch: &mut SimpleUnorderedBatch,
batch_size: &mut usize,
) -> Result<bool, bcs::Error> {
if let Some(delete) = self.delete_iter.next() {
*batch_size += serialized_size(&delete)?;
batch.deletions.push(delete);
Ok(true)
} else if let Some((key, value)) = self.insert_iter.next() {
*batch_size += serialized_size(&key)? + serialized_size(&value)?;
batch.insertions.push((key, value));
Ok(true)
} else {
Ok(false)
}
}
fn next_batch_size(
&mut self,
batch: &SimpleUnorderedBatch,
batch_size: usize,
) -> Result<Option<usize>, bcs::Error> {
if let Some(delete) = self.delete_iter.peek() {
let next_size = serialized_size(&delete)?;
Ok(Some(
batch_size
+ next_size
+ get_uleb128_size(batch.deletions.len() + 1)
+ get_uleb128_size(batch.insertions.len()),
))
} else if let Some((key, value)) = self.insert_iter.peek() {
let next_size = serialized_size(&key)? + serialized_size(&value)?;
Ok(Some(
batch_size
+ next_size
+ get_uleb128_size(batch.deletions.len())
+ get_uleb128_size(batch.insertions.len() + 1),
))
} else {
Ok(None)
}
}
}
/// The iterator that corresponds to a `SimpleUnorderedBatch`
pub struct UnorderedBatchIter {
delete_prefix_iter: Peekable<IntoIter<Vec<u8>>>,
insert_deletion_iter: SimpleUnorderedBatchIter,
}
impl SimplifiedBatch for UnorderedBatch {
type Iter = UnorderedBatchIter;
fn into_iter(self) -> Self::Iter {
let delete_prefix_iter = self.key_prefix_deletions.into_iter().peekable();
let insert_deletion_iter = self.simple_unordered_batch.into_iter();
Self::Iter {
delete_prefix_iter,
insert_deletion_iter,
}
}
fn len(&self) -> usize {
self.key_prefix_deletions.len() + self.simple_unordered_batch.len()
}
fn num_bytes(&self) -> usize {
let mut total_size = self.simple_unordered_batch.num_bytes();
for prefix_deletion in &self.key_prefix_deletions {
total_size += prefix_deletion.len();
}
total_size
}
fn overhead_size(&self) -> usize {
get_uleb128_size(self.key_prefix_deletions.len())
+ self.simple_unordered_batch.overhead_size()
}
fn add_delete(&mut self, key: Vec<u8>) {
self.simple_unordered_batch.add_delete(key)
}
fn add_insert(&mut self, key: Vec<u8>, value: Vec<u8>) {
self.simple_unordered_batch.add_insert(key, value)
}
async fn from_batch<S: DeletePrefixExpander>(store: S, batch: Batch) -> Result<Self, S::Error> {
let mut unordered_batch = batch.simplify();
unordered_batch
.expand_colliding_prefix_deletions(&store)
.await?;
Ok(unordered_batch)
}
}
impl BatchValueWriter<UnorderedBatch> for UnorderedBatchIter {
fn is_empty(&self) -> bool {
self.delete_prefix_iter.len() == 0 && self.insert_deletion_iter.is_empty()
}
fn write_next_value(
&mut self,
batch: &mut UnorderedBatch,
batch_size: &mut usize,
) -> Result<bool, bcs::Error> {
if let Some(delete_prefix) = self.delete_prefix_iter.next() {
*batch_size += serialized_size(&delete_prefix)?;
batch.key_prefix_deletions.push(delete_prefix);
Ok(true)
} else {
self.insert_deletion_iter
.write_next_value(&mut batch.simple_unordered_batch, batch_size)
}
}
fn next_batch_size(
&mut self,
batch: &UnorderedBatch,
batch_size: usize,
) -> Result<Option<usize>, bcs::Error> {
if let Some(delete_prefix) = self.delete_prefix_iter.peek() {
let next_size = serialized_size(&delete_prefix)?;
Ok(Some(
batch_size
+ next_size
+ get_uleb128_size(batch.key_prefix_deletions.len() + 1)
+ batch.simple_unordered_batch.overhead_size(),
))
} else {
let batch_size = batch_size + get_uleb128_size(batch.key_prefix_deletions.len());
self.insert_deletion_iter
.next_batch_size(&batch.simple_unordered_batch, batch_size)
}
}
}
#[cfg(test)]
mod tests {
use linera_views::{
batch::{Batch, SimpleUnorderedBatch, UnorderedBatch},
context::{Context, MemoryContext},
store::WritableKeyValueStore as _,
};
#[test]
fn test_simplify_batch1() {
let mut batch = Batch::new();
batch.put_key_value_bytes(vec![1, 2], vec![]);
batch.put_key_value_bytes(vec![1, 3, 3], vec![33, 2]);
batch.put_key_value_bytes(vec![1, 2, 3], vec![34, 2]);
batch.delete_key_prefix(vec![1, 2]);
let unordered_batch = batch.simplify();
assert_eq!(unordered_batch.key_prefix_deletions, vec![vec![1, 2]]);
assert!(unordered_batch.simple_unordered_batch.deletions.is_empty());
assert_eq!(
unordered_batch.simple_unordered_batch.insertions,
vec![(vec![1, 3, 3], vec![33, 2])]
);
}
#[test]
fn test_simplify_batch2() {
let mut batch = Batch::new();
batch.delete_key(vec![1, 2, 3]);
batch.delete_key_prefix(vec![1, 2]);
batch.delete_key(vec![1, 2, 4]);
let unordered_batch = batch.simplify();
assert_eq!(unordered_batch.key_prefix_deletions, vec![vec![1, 2]]);
assert!(unordered_batch.simple_unordered_batch.deletions.is_empty());
assert!(unordered_batch.simple_unordered_batch.insertions.is_empty());
}
#[test]
fn test_simplify_batch3() {
let mut batch = Batch::new();
batch.delete_key_prefix(vec![1, 2]);
batch.put_key_value_bytes(vec![1, 2, 3, 4], vec![]);
batch.delete_key_prefix(vec![1, 2, 3]);
let unordered_batch = batch.simplify();
assert_eq!(unordered_batch.key_prefix_deletions, vec![vec![1, 2]]);
assert!(unordered_batch.simple_unordered_batch.deletions.is_empty());
assert!(unordered_batch.simple_unordered_batch.insertions.is_empty());
}
#[test]
fn test_simplify_batch4() {
let mut batch = Batch::new();
batch.delete_key_prefix(vec![1, 2]);
batch.put_key_value_bytes(vec![1, 2, 3], vec![4, 5]);
batch.delete_key(vec![1, 2, 3]);
let unordered_batch = batch.simplify();
assert_eq!(unordered_batch.key_prefix_deletions, vec![vec![1, 2]]);
assert!(unordered_batch.simple_unordered_batch.deletions.is_empty());
assert!(unordered_batch.simple_unordered_batch.insertions.is_empty());
}
#[tokio::test]
async fn test_simplify_batch5() {
let context = MemoryContext::new_for_testing(());
let mut batch = Batch::new();
batch.put_key_value_bytes(vec![1, 2, 3], vec![]);
batch.put_key_value_bytes(vec![1, 2, 4], vec![]);
batch.put_key_value_bytes(vec![1, 2, 5], vec![]);
batch.put_key_value_bytes(vec![1, 3, 3], vec![]);
context.store().write_batch(batch).await.unwrap();
let mut batch = Batch::new();
batch.delete_key_prefix(vec![1, 2]);
let unordered_batch = batch.simplify();
let simple_unordered_batch = unordered_batch
.expand_delete_prefixes(&context)
.await
.unwrap();
assert_eq!(
simple_unordered_batch.deletions,
vec![vec![1, 2, 3], vec![1, 2, 4], vec![1, 2, 5]]
);
assert!(simple_unordered_batch.insertions.is_empty());
}
#[tokio::test]
async fn test_simplify_batch6() {
let context = MemoryContext::new_for_testing(());
let insertions = vec![(vec![1, 2, 3], vec![])];
let simple_unordered_batch = SimpleUnorderedBatch {
insertions: insertions.clone(),
deletions: vec![],
};
let key_prefix_deletions = vec![vec![1, 2]];
let mut unordered_batch = UnorderedBatch {
simple_unordered_batch,
key_prefix_deletions,
};
unordered_batch
.expand_colliding_prefix_deletions(&context)
.await
.unwrap();
assert!(unordered_batch.simple_unordered_batch.deletions.is_empty());
assert_eq!(
unordered_batch.simple_unordered_batch.insertions,
insertions
);
assert!(unordered_batch.key_prefix_deletions.is_empty());
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/error.rs | linera-views/src/error.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
/// Main error type for the crate.
#[derive(thiserror::Error, Debug)]
pub enum ViewError {
/// BCS serialization error.
#[error(transparent)]
BcsError(#[from] bcs::Error),
/// Input output error.
#[error("I/O error")]
IoError(#[from] std::io::Error),
/// Arithmetic error
#[error(transparent)]
ArithmeticError(#[from] linera_base::data_types::ArithmeticError),
/// Failed to lock a reentrant collection entry since it is currently being accessed
#[error(
"failed to lock a reentrant collection entry since it is currently being accessed: {0:?}"
)]
TryLockError(Vec<u8>),
/// Tokio errors can happen while joining.
#[error("panic in sub-task: {0}")]
TokioJoinError(#[from] tokio::task::JoinError),
/// Errors within the context can occur and are presented as `ViewError`.
#[error("storage operation error in {backend}: {error}")]
StoreError {
/// The name of the backend that produced the error
backend: &'static str,
/// The inner error
#[source]
error: Box<dyn std::error::Error + Send + Sync>,
},
/// The key must not be too long
#[error("the key must not be too long")]
KeyTooLong,
/// The entry does not exist in memory
// FIXME(#148): This belongs to a future `linera_storage::StoreError`.
#[error("entry does not exist in storage: {0}")]
NotFound(String),
/// The database is corrupt: Entries don't have the expected hash.
#[error("inconsistent database entries")]
InconsistentEntries,
/// The database is corrupt: Some entries are missing
#[error("missing database entries for the context {0}")]
MissingEntries(String),
/// The values are incoherent.
#[error("post load values error")]
PostLoadValuesError,
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/common.rs | linera-views/src/common.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This provides some common code for the linera-views.
use std::{
collections::BTreeSet,
ops::{
Bound,
Bound::{Excluded, Included, Unbounded},
},
slice::ChunksExact,
};
use allocative::Allocative;
use itertools::Either;
use serde::de::DeserializeOwned;
use crate::ViewError;
type HasherOutputSize = <sha3::Sha3_256 as sha3::digest::OutputSizeUser>::OutputSize;
#[doc(hidden)]
#[allow(deprecated)]
pub type HasherOutput = generic_array::GenericArray<u8, HasherOutputSize>;
#[derive(Clone, Debug, Allocative)]
/// An update, for example to a view.
pub enum Update<T> {
/// The entry is removed.
Removed,
/// The entry is set to the following.
Set(T),
}
#[derive(Clone, Debug, Allocative)]
pub(crate) struct DeletionSet {
pub(crate) delete_storage_first: bool,
pub(crate) deleted_prefixes: BTreeSet<Vec<u8>>,
}
impl DeletionSet {
pub(crate) fn new() -> Self {
Self {
delete_storage_first: false,
deleted_prefixes: BTreeSet::new(),
}
}
pub(crate) fn clear(&mut self) {
self.delete_storage_first = true;
self.deleted_prefixes.clear();
}
pub(crate) fn rollback(&mut self) {
self.delete_storage_first = false;
self.deleted_prefixes.clear();
}
pub(crate) fn contains_prefix_of(&self, index: &[u8]) -> bool {
self.delete_storage_first || contains_prefix_of(&self.deleted_prefixes, index)
}
pub(crate) fn has_pending_changes(&self) -> bool {
self.delete_storage_first || !self.deleted_prefixes.is_empty()
}
pub(crate) fn insert_key_prefix(&mut self, key_prefix: Vec<u8>) {
if !self.delete_storage_first {
insert_key_prefix(&mut self.deleted_prefixes, key_prefix);
}
}
}
/// When wanting to find the entries in a `BTreeMap` with a specific prefix,
/// one option is to iterate over all keys. Another is to select an interval
/// that represents exactly the keys having that prefix. Which fortunately
/// is possible with the way the comparison operators for vectors are built.
///
/// The statement is that `p` is a prefix of `v` if and only if `p <= v < upper_bound(p)`.
pub(crate) fn get_upper_bound_option(key_prefix: &[u8]) -> Option<Vec<u8>> {
let len = key_prefix.len();
for i in (0..len).rev() {
if key_prefix[i] < u8::MAX {
let mut upper_bound = key_prefix[0..=i].to_vec();
upper_bound[i] += 1;
return Some(upper_bound);
}
}
None
}
/// The upper bound that can be used in ranges when accessing
/// a container. That is a vector `v` is a prefix of `p` if and only if
/// `v` belongs to the interval `(Included(p), get_upper_bound(p))`.
pub(crate) fn get_upper_bound(key_prefix: &[u8]) -> Bound<Vec<u8>> {
match get_upper_bound_option(key_prefix) {
None => Unbounded,
Some(upper_bound) => Excluded(upper_bound),
}
}
/// Computes an interval so that a vector has `key_prefix` as a prefix
/// if and only if it belongs to the range.
pub(crate) fn get_key_range_for_prefix(key_prefix: Vec<u8>) -> (Bound<Vec<u8>>, Bound<Vec<u8>>) {
let upper_bound = get_upper_bound(&key_prefix);
(Included(key_prefix), upper_bound)
}
/// Deserializes an optional vector of `u8`
pub fn from_bytes_option<V: DeserializeOwned>(
key_opt: &Option<Vec<u8>>,
) -> Result<Option<V>, bcs::Error> {
if let Some(bytes) = key_opt {
Ok(Some(bcs::from_bytes(bytes)?))
} else {
Ok(None)
}
}
pub(crate) fn from_bytes_option_or_default<V: DeserializeOwned + Default>(
key_opt: &Option<Vec<u8>>,
) -> Result<V, bcs::Error> {
if let Some(bytes) = key_opt {
Ok(bcs::from_bytes(bytes)?)
} else {
Ok(V::default())
}
}
/// `SuffixClosedSetIterator` iterates over the entries of a container ordered
/// lexicographically.
///
/// The function call `find_lower_bound(val)` returns a `Some(x)` where `x` is the highest
/// entry such that `x <= val` for the lexicographic order. If none exists then None is
/// returned. The function calls have to be done with increasing `val`.
///
/// The function call `find_key(val)` tests whether there exists a prefix p in the
/// set of vectors such that p is a prefix of val.
pub(crate) struct SuffixClosedSetIterator<'a, I> {
prefix_len: usize,
previous: Option<&'a Vec<u8>>,
current: Option<&'a Vec<u8>>,
iter: I,
}
impl<'a, I> SuffixClosedSetIterator<'a, I>
where
I: Iterator<Item = &'a Vec<u8>>,
{
pub(crate) fn new(prefix_len: usize, mut iter: I) -> Self {
let previous = None;
let current = iter.next();
Self {
prefix_len,
previous,
current,
iter,
}
}
pub(crate) fn find_lower_bound(&mut self, val: &[u8]) -> Option<&'a Vec<u8>> {
loop {
match &self.current {
None => {
return self.previous;
}
Some(x) => {
if &x[self.prefix_len..] > val {
return self.previous;
}
}
}
let current = self.iter.next();
self.previous = std::mem::replace(&mut self.current, current);
}
}
pub(crate) fn find_key(&mut self, index: &[u8]) -> bool {
let lower_bound = self.find_lower_bound(index);
match lower_bound {
None => false,
Some(key_prefix) => index.starts_with(&key_prefix[self.prefix_len..]),
}
}
}
pub(crate) fn contains_prefix_of(prefixes: &BTreeSet<Vec<u8>>, key: &[u8]) -> bool {
let iter = prefixes.iter();
let mut suffix_closed_set = SuffixClosedSetIterator::new(0, iter);
suffix_closed_set.find_key(key)
}
pub(crate) fn insert_key_prefix(prefixes: &mut BTreeSet<Vec<u8>>, prefix: Vec<u8>) {
if !contains_prefix_of(prefixes, &prefix) {
let key_prefix_list = prefixes
.range(get_key_range_for_prefix(prefix.clone()))
.map(|x| x.to_vec())
.collect::<Vec<_>>();
for key in key_prefix_list {
prefixes.remove(&key);
}
prefixes.insert(prefix);
}
}
#[test]
fn suffix_closed_set_test1_the_lower_bound() {
let mut set = BTreeSet::<Vec<u8>>::new();
set.insert(vec![4]);
set.insert(vec![7]);
set.insert(vec![8]);
set.insert(vec![10]);
set.insert(vec![24]);
set.insert(vec![40]);
let mut suffix_closed_set = SuffixClosedSetIterator::new(0, set.iter());
assert_eq!(suffix_closed_set.find_lower_bound(&[3]), None);
assert_eq!(
suffix_closed_set.find_lower_bound(&[15]),
Some(vec![10]).as_ref()
);
assert_eq!(
suffix_closed_set.find_lower_bound(&[17]),
Some(vec![10]).as_ref()
);
assert_eq!(
suffix_closed_set.find_lower_bound(&[25]),
Some(vec![24]).as_ref()
);
assert_eq!(
suffix_closed_set.find_lower_bound(&[27]),
Some(vec![24]).as_ref()
);
assert_eq!(
suffix_closed_set.find_lower_bound(&[42]),
Some(vec![40]).as_ref()
);
}
#[test]
fn suffix_closed_set_test2_find_key() {
let mut set = BTreeSet::<Vec<u8>>::new();
set.insert(vec![4]);
set.insert(vec![0, 3]);
set.insert(vec![5]);
let mut suffix_closed_set = SuffixClosedSetIterator::new(0, set.iter());
assert!(!suffix_closed_set.find_key(&[0]));
assert!(suffix_closed_set.find_key(&[0, 3]));
assert!(suffix_closed_set.find_key(&[0, 3, 4]));
assert!(!suffix_closed_set.find_key(&[1]));
assert!(suffix_closed_set.find_key(&[4]));
}
#[test]
fn suffix_closed_set_test3_find_key_prefix_len() {
let mut set = BTreeSet::<Vec<u8>>::new();
set.insert(vec![0, 4]);
set.insert(vec![0, 3]);
set.insert(vec![0, 0, 1]);
let mut suffix_closed_set = SuffixClosedSetIterator::new(1, set.iter());
assert!(!suffix_closed_set.find_key(&[0]));
assert!(suffix_closed_set.find_key(&[0, 1]));
assert!(suffix_closed_set.find_key(&[0, 1, 4]));
assert!(suffix_closed_set.find_key(&[3]));
assert!(!suffix_closed_set.find_key(&[5]));
}
#[test]
fn insert_key_prefix_test1() {
let mut set = BTreeSet::<Vec<u8>>::new();
set.insert(vec![0, 4]);
insert_key_prefix(&mut set, vec![0, 4, 5]);
let keys = set.iter().cloned().collect::<Vec<_>>();
assert_eq!(keys, vec![vec![0, 4]]);
}
/// Sometimes we need a serialization that is different from the usual one and
/// for example preserves order.
/// `{to/from}_custom_bytes` has to be coherent with the `Borrow` trait.
pub trait CustomSerialize: Sized {
/// Serializes the value
fn to_custom_bytes(&self) -> Result<Vec<u8>, ViewError>;
/// Deserialize the vector
fn from_custom_bytes(short_key: &[u8]) -> Result<Self, ViewError>;
}
impl CustomSerialize for u128 {
fn to_custom_bytes(&self) -> Result<Vec<u8>, ViewError> {
let mut bytes = bcs::to_bytes(&self)?;
bytes.reverse();
Ok(bytes)
}
fn from_custom_bytes(bytes: &[u8]) -> Result<Self, ViewError> {
let mut bytes = bytes.to_vec();
bytes.reverse();
let value = bcs::from_bytes(&bytes)?;
Ok(value)
}
}
/// This computes the offset of the BCS serialization of a vector.
/// The formula that should be satisfied is
/// `serialized_size(vec![v_1, ...., v_n]) = get_uleb128_size(n)`
/// `+ serialized_size(v_1)? + .... serialized_size(v_n)?`
pub(crate) const fn get_uleb128_size(len: usize) -> usize {
let mut power = 128;
let mut expo = 1;
while len >= power {
power *= 128;
expo += 1;
}
expo
}
/// Extention trait for slices.
pub trait SliceExt<T> {
/// Same as `chunks_exact` but we allow the `chunk_size` to be zero when the slice is empty.
fn chunks_exact_or_repeat(
&self,
chunk_size: usize,
) -> Either<ChunksExact<'_, T>, std::iter::Repeat<&[T]>>;
}
impl<T> SliceExt<T> for [T] {
fn chunks_exact_or_repeat(
&self,
chunk_size: usize,
) -> Either<ChunksExact<'_, T>, std::iter::Repeat<&[T]>> {
if chunk_size > 0 {
Either::Left(self.chunks_exact(chunk_size))
} else if self.is_empty() {
Either::Right(std::iter::repeat(&[]))
} else {
panic!("chunk_size must be nonzero unless the slice is empty")
}
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeSet;
use linera_views::common::CustomSerialize;
use rand::Rng;
#[test]
fn test_ordering_serialization() {
let mut rng = crate::random::make_deterministic_rng();
let n = 1000;
let mut set = BTreeSet::new();
for _ in 0..n {
let val = rng.gen::<u128>();
set.insert(val);
}
let mut vec = Vec::new();
for val in set {
vec.push(val);
}
for i in 1..vec.len() {
let val1 = vec[i - 1];
let val2 = vec[i];
assert!(val1 < val2);
let vec1 = val1.to_custom_bytes().unwrap();
let vec2 = val2.to_custom_bytes().unwrap();
assert!(vec1 < vec2);
let val_ret1 = u128::from_custom_bytes(&vec1).unwrap();
let val_ret2 = u128::from_custom_bytes(&vec2).unwrap();
assert_eq!(val1, val_ret1);
assert_eq!(val2, val_ret2);
}
}
}
#[test]
fn test_upper_bound() {
assert_eq!(get_upper_bound(&[255]), Unbounded);
assert_eq!(get_upper_bound(&[255, 255, 255, 255]), Unbounded);
assert_eq!(get_upper_bound(&[0, 2]), Excluded(vec![0, 3]));
assert_eq!(get_upper_bound(&[0, 255]), Excluded(vec![1]));
assert_eq!(get_upper_bound(&[255, 0]), Excluded(vec![255, 1]));
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/metrics.rs | linera-views/src/metrics.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::LazyLock;
// Re-export for macros.
#[doc(hidden)]
pub use linera_base::prometheus_util::{self, exponential_bucket_latencies};
use prometheus::IntCounterVec;
/// Increments the metrics counter with the given name, with the struct and base key as labels.
pub fn increment_counter(counter: &LazyLock<IntCounterVec>, struct_name: &str, base_key: &[u8]) {
let base_key = hex::encode(base_key);
let labels = [struct_name, &base_key];
counter.with_label_values(&labels).inc();
}
/// The metric tracking the latency of the loading of views.
#[doc(hidden)]
pub static LOAD_VIEW_LATENCY: LazyLock<prometheus::HistogramVec> = LazyLock::new(|| {
prometheus_util::register_histogram_vec(
"load_view_latency",
"Load view latency",
&[],
exponential_bucket_latencies(10.0),
)
});
/// The metric counting how often a view is read from storage.
#[doc(hidden)]
pub static LOAD_VIEW_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"load_view",
"The metric counting how often a view is read from storage",
&["type", "base_key"],
)
});
/// The metric counting how often a view is written from storage.
#[doc(hidden)]
pub static SAVE_VIEW_COUNTER: LazyLock<IntCounterVec> = LazyLock::new(|| {
prometheus_util::register_int_counter_vec(
"save_view",
"The metric counting how often a view is written from storage",
&["type", "base_key"],
)
});
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/context.rs | linera-views/src/context.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::DeletePrefixExpander,
memory::MemoryStore,
store::{KeyValueStoreError, ReadableKeyValueStore, WithError, WritableKeyValueStore},
views::MIN_VIEW_TAG,
};
/// A wrapper over `Vec<u8>` with functions for using it as a key prefix.
#[derive(Default, Debug, Clone, derive_more::From)]
pub struct BaseKey {
/// The byte value of the key prefix.
#[from]
pub bytes: Vec<u8>,
}
impl BaseKey {
/// Concatenates the base key and tag.
pub fn base_tag(&self, tag: u8) -> Vec<u8> {
assert!(tag >= MIN_VIEW_TAG, "tag should be at least MIN_VIEW_TAG");
let mut key = Vec::with_capacity(self.bytes.len() + 1);
key.extend_from_slice(&self.bytes);
key.push(tag);
key
}
/// Concatenates the base key, tag and index.
pub fn base_tag_index(&self, tag: u8, index: &[u8]) -> Vec<u8> {
assert!(tag >= MIN_VIEW_TAG, "tag should be at least MIN_VIEW_TAG");
let mut key = Vec::with_capacity(self.bytes.len() + 1 + index.len());
key.extend_from_slice(&self.bytes);
key.push(tag);
key.extend_from_slice(index);
key
}
/// Concatenates the base key and index.
pub fn base_index(&self, index: &[u8]) -> Vec<u8> {
let mut key = Vec::with_capacity(self.bytes.len() + index.len());
key.extend_from_slice(&self.bytes);
key.extend_from_slice(index);
key
}
/// Obtains the `Vec<u8>` key from the key by serialization and using the base key.
pub fn derive_key<I: Serialize>(&self, index: &I) -> Result<Vec<u8>, bcs::Error> {
let mut key = self.bytes.clone();
bcs::serialize_into(&mut key, index)?;
assert!(
key.len() > self.bytes.len(),
"Empty indices are not allowed"
);
Ok(key)
}
/// Obtains the `Vec<u8>` key from the key by serialization and using the `base_key`.
pub fn derive_tag_key<I: Serialize>(&self, tag: u8, index: &I) -> Result<Vec<u8>, bcs::Error> {
assert!(tag >= MIN_VIEW_TAG, "tag should be at least MIN_VIEW_TAG");
let mut key = self.base_tag(tag);
bcs::serialize_into(&mut key, index)?;
Ok(key)
}
/// Returns this key with a number of final bytes trimmed.
fn trimmed_key(&self, n: usize) -> Result<Vec<u8>, bcs::Error> {
if self.bytes.len() < n {
return Err(bcs::Error::Custom(format!(
"attempted to trim {} bytes from key of length {}",
n,
self.bytes.len()
)));
}
Ok(self.bytes[0..self.bytes.len() - n].to_vec())
}
/// Obtains the short `Vec<u8>` key from the key by serialization.
pub fn derive_short_key<I: Serialize + ?Sized>(index: &I) -> Result<Vec<u8>, bcs::Error> {
bcs::to_bytes(index)
}
/// Deserialize `bytes` into type `Item`.
pub fn deserialize_value<Item: DeserializeOwned>(bytes: &[u8]) -> Result<Item, bcs::Error> {
bcs::from_bytes(bytes)
}
}
/// The context in which a view is operated. Typically, this includes the client to
/// connect to the database and the address of the current entry.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait Context: Clone
where
crate::ViewError: From<Self::Error>,
{
/// The type of the key-value store used by this context.
type Store: ReadableKeyValueStore + WritableKeyValueStore + WithError<Error = Self::Error>;
/// User-provided data to be carried along.
type Extra: Clone + linera_base::util::traits::AutoTraits;
/// The type of errors that may be returned by operations on the `Store`, a
/// convenience alias for `<Self::Store as WithError>::Error`.
type Error: KeyValueStoreError;
/// Getter for the store.
fn store(&self) -> &Self::Store;
/// Getter for the user-provided data.
fn extra(&self) -> &Self::Extra;
/// Getter for the address of the base key.
fn base_key(&self) -> &BaseKey;
/// Mutable getter for the address of the base key.
fn base_key_mut(&mut self) -> &mut BaseKey;
/// Obtains a similar [`Context`] implementation with a different base key.
fn clone_with_base_key(&self, base_key: Vec<u8>) -> Self {
let mut context = self.clone();
context.base_key_mut().bytes = base_key;
context
}
/// Obtains a similar [`Context`] implementation with the last `n` bytes of the base
/// key trimmed.
fn clone_with_trimmed_key(&self, n: usize) -> Self {
let mut context = self.clone();
let key = context.base_key().trimmed_key(n).unwrap();
context.base_key_mut().bytes = key;
context
}
}
/// A context which can't be used to read or write data, only used for caching views.
#[derive(Debug, Default, Clone)]
pub struct InactiveContext(pub BaseKey);
impl Context for InactiveContext {
type Store = crate::store::inactive_store::InactiveStore;
type Extra = ();
type Error = crate::store::inactive_store::InactiveStoreError;
fn store(&self) -> &Self::Store {
&crate::store::inactive_store::InactiveStore
}
fn extra(&self) -> &Self::Extra {
&()
}
fn base_key(&self) -> &BaseKey {
&self.0
}
fn base_key_mut(&mut self) -> &mut BaseKey {
&mut self.0
}
}
/// Implementation of the [`Context`] trait on top of a DB client implementing
/// [`crate::store::KeyValueStore`].
#[derive(Debug, Default, Clone)]
pub struct ViewContext<E, S> {
/// The DB client that is shared between views.
store: S,
/// The base key for the context.
base_key: BaseKey,
/// User-defined data attached to the view.
extra: E,
}
impl<E, S> ViewContext<E, S>
where
S: ReadableKeyValueStore + WritableKeyValueStore,
{
/// Creates a context suitable for a root view, using the given store. If the
/// journal's store is non-empty, it will be cleared first, before the context is
/// returned.
pub async fn create_root_context(store: S, extra: E) -> Result<Self, S::Error> {
store.clear_journal().await?;
Ok(Self::new_unchecked(store, Vec::new(), extra))
}
}
impl<E, S> ViewContext<E, S> {
/// Creates a context for the given base key, store, and an extra argument. NOTE: this
/// constructor doesn't check the journal of the store. In doubt, use
/// [`ViewContext::create_root_context`] instead.
pub fn new_unchecked(store: S, base_key: Vec<u8>, extra: E) -> Self {
Self {
store,
base_key: BaseKey { bytes: base_key },
extra,
}
}
}
impl<E, S> Context for ViewContext<E, S>
where
E: Clone + linera_base::util::traits::AutoTraits,
S: ReadableKeyValueStore + WritableKeyValueStore + Clone,
S::Error: From<bcs::Error> + Send + Sync + std::error::Error + 'static,
{
type Extra = E;
type Store = S;
type Error = S::Error;
fn store(&self) -> &Self::Store {
&self.store
}
fn extra(&self) -> &E {
&self.extra
}
fn base_key(&self) -> &BaseKey {
&self.base_key
}
fn base_key_mut(&mut self) -> &mut BaseKey {
&mut self.base_key
}
}
/// An implementation of [`crate::context::Context`] that stores all values in memory.
pub type MemoryContext<E> = ViewContext<E, MemoryStore>;
impl<E> MemoryContext<E> {
/// Creates a [`Context`] instance in memory for testing.
#[cfg(with_testing)]
pub fn new_for_testing(extra: E) -> Self {
Self {
store: MemoryStore::new_for_testing(),
base_key: BaseKey::default(),
extra,
}
}
}
impl DeletePrefixExpander for MemoryContext<()> {
type Error = crate::memory::MemoryStoreError;
async fn expand_delete_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error> {
self.store().find_keys_by_prefix(key_prefix).await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/lru_prefix_cache.rs | linera-views/src/lru_prefix_cache.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! An LRU cache that supports prefix-search APIs.
use std::collections::{btree_map::Entry, hash_map::RandomState, BTreeMap, BTreeSet};
use linked_hash_map::LinkedHashMap;
use serde::{Deserialize, Serialize};
use crate::common::get_key_range_for_prefix;
/// The parametrization of the cache.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct StorageCacheConfig {
/// The maximum size of the cache, in bytes (keys size + value sizes).
pub max_cache_size: usize,
/// The maximum size of a value entry size, in bytes.
pub max_value_entry_size: usize,
/// The maximum size of a find-keys entry size, in bytes.
pub max_find_keys_entry_size: usize,
/// The maximum size of a find-key-values entry size, in bytes.
pub max_find_key_values_entry_size: usize,
/// The maximum number of entries in the cache.
pub max_cache_entries: usize,
/// The maximum size of cached values.
pub max_cache_value_size: usize,
/// The maximum size of cached `find_keys_by_prefix` results.
pub max_cache_find_keys_size: usize,
/// The maximum size of cached `find_key_values_by_prefix` results.
pub max_cache_find_key_values_size: usize,
}
#[derive(Eq, Hash, PartialEq, Debug)]
enum CacheKey {
Value(Vec<u8>),
FindKeys(Vec<u8>),
FindKeyValues(Vec<u8>),
}
enum ValueEntry {
DoesNotExist,
Exists,
Value(Vec<u8>),
}
impl ValueEntry {
fn size(&self) -> usize {
match self {
ValueEntry::Value(vec) => vec.len(),
_ => 0,
}
}
}
struct FindKeysEntry(BTreeSet<Vec<u8>>);
impl FindKeysEntry {
fn size(&self) -> usize {
self.0.iter().map(Vec::len).sum()
}
fn get_keys_by_prefix(&self, key_prefix: Vec<u8>) -> Vec<Vec<u8>> {
let prefix_len = key_prefix.len();
self.0
.range(get_key_range_for_prefix(key_prefix))
.map(|key| key[prefix_len..].to_vec())
.collect()
}
fn contains_key(&self, key: &[u8]) -> bool {
self.0.contains(key)
}
fn update_entry(&mut self, key: &[u8], is_some: bool) {
if is_some {
self.0.insert(key.to_vec());
} else {
self.0.remove(key);
}
}
fn delete_prefix(&mut self, key_prefix: &[u8]) {
let keys = self
.0
.range(get_key_range_for_prefix(key_prefix.to_vec()))
.cloned()
.collect::<Vec<_>>();
for key in keys {
self.0.remove(&key);
}
}
}
struct FindKeyValuesEntry(BTreeMap<Vec<u8>, Vec<u8>>);
impl FindKeyValuesEntry {
fn size(&self) -> usize {
self.0
.iter()
.map(|(key, value)| key.len() + value.len())
.sum()
}
fn get_keys_by_prefix(&self, key_prefix: Vec<u8>) -> Vec<Vec<u8>> {
let prefix_len = key_prefix.len();
self.0
.range(get_key_range_for_prefix(key_prefix))
.map(|(key, _)| key[prefix_len..].to_vec())
.collect()
}
fn get_find_key_values(&self, key_prefix: &[u8]) -> Vec<(Vec<u8>, Vec<u8>)> {
let key_prefix = key_prefix.to_vec();
let prefix_len = key_prefix.len();
self.0
.range(get_key_range_for_prefix(key_prefix))
.map(|(key, value)| (key[prefix_len..].to_vec(), value.to_vec()))
.collect()
}
fn contains_key(&self, key: &[u8]) -> bool {
self.0.contains_key(key)
}
fn get_read_value(&self, key: &[u8]) -> Option<Vec<u8>> {
self.0.get(key).cloned()
}
fn update_entry(&mut self, key: &[u8], new_value: Option<Vec<u8>>) {
match new_value {
None => {
self.0.remove(key);
}
Some(new_value) => {
self.0.insert(key.to_vec(), new_value);
}
}
}
fn delete_prefix(&mut self, key_prefix: &[u8]) {
let keys = self
.0
.range(get_key_range_for_prefix(key_prefix.to_vec()))
.map(|(key, _)| key.clone())
.collect::<Vec<_>>();
for key in keys {
self.0.remove(&key);
}
}
}
/// Stores the data for simple `read_values` queries.
///
/// This data structure is inspired by the crate `lru-cache` but was modified to support
/// range deletions.
pub(crate) struct LruPrefixCache {
value_map: BTreeMap<Vec<u8>, ValueEntry>,
find_keys_map: BTreeMap<Vec<u8>, FindKeysEntry>,
find_key_values_map: BTreeMap<Vec<u8>, FindKeyValuesEntry>,
queue: LinkedHashMap<CacheKey, usize, RandomState>,
config: StorageCacheConfig,
total_size: usize,
total_value_size: usize,
total_find_keys_size: usize,
total_find_key_values_size: usize,
/// Whether we have exclusive R/W access to the keys under the root key of the store.
has_exclusive_access: bool,
}
impl LruPrefixCache {
/// Creates an `LruPrefixCache`.
pub(crate) fn new(config: StorageCacheConfig, has_exclusive_access: bool) -> Self {
Self {
value_map: BTreeMap::new(),
find_keys_map: BTreeMap::new(),
find_key_values_map: BTreeMap::new(),
queue: LinkedHashMap::new(),
config,
total_size: 0,
total_value_size: 0,
total_find_keys_size: 0,
total_find_key_values_size: 0,
has_exclusive_access,
}
}
/// Gets the `has_exclusive_access`.
pub(crate) fn has_exclusive_access(&self) -> bool {
self.has_exclusive_access
}
/// A used key needs to be put on top.
fn move_cache_key_on_top(&mut self, cache_key: CacheKey) {
let size = self
.queue
.remove(&cache_key)
.expect("cache_key should be present");
self.queue.insert(cache_key, size);
}
/// Update sizes by decreasing and increasing.
fn update_sizes(&mut self, cache_key: &CacheKey, old_size: usize, new_size: usize) {
use std::cmp::Ordering;
match new_size.cmp(&old_size) {
Ordering::Greater => {
let increase_size = new_size - old_size;
self.total_size += increase_size;
match cache_key {
CacheKey::Value(_) => {
self.total_value_size += increase_size;
}
CacheKey::FindKeys(_) => {
self.total_find_keys_size += increase_size;
}
CacheKey::FindKeyValues(_) => {
self.total_find_key_values_size += increase_size;
}
}
}
Ordering::Less => {
let decrease_size = old_size - new_size;
self.total_size -= decrease_size;
match cache_key {
CacheKey::Value(_) => {
self.total_value_size -= decrease_size;
}
CacheKey::FindKeys(_) => {
self.total_find_keys_size -= decrease_size;
}
CacheKey::FindKeyValues(_) => {
self.total_find_key_values_size -= decrease_size;
}
}
}
Ordering::Equal => {
// Nothing to be done
}
}
}
/// Increase the sizes of the keys.
fn increase_sizes(&mut self, cache_key: &CacheKey, size: usize) {
self.update_sizes(cache_key, 0, size);
}
/// Decrease the sizes of the keys.
fn decrease_sizes(&mut self, cache_key: &CacheKey, size: usize) {
self.update_sizes(cache_key, size, 0);
}
/// Removes the `CacheKey` from the maps.
fn remove_cache_key_from_map(&mut self, cache_key: &CacheKey) {
match cache_key {
CacheKey::Value(key) => {
assert!(self.value_map.remove(key).is_some());
}
CacheKey::FindKeys(key) => {
assert!(self.find_keys_map.remove(key).is_some());
}
CacheKey::FindKeyValues(key) => {
assert!(self.find_key_values_map.remove(key).is_some());
}
}
}
/// Remove an entry from the queue and update the sizes.
fn remove_cache_key(&mut self, cache_key: &CacheKey) {
let size = self
.queue
.remove(cache_key)
.expect("cache_key should be present");
self.decrease_sizes(cache_key, size);
}
/// Remove an entry from the queue if it exists.
fn remove_cache_key_if_exists(&mut self, cache_key: &CacheKey) {
let size = self.queue.remove(cache_key);
if let Some(size) = size {
self.decrease_sizes(cache_key, size);
self.remove_cache_key_from_map(cache_key);
}
}
/// Update the cache size to the new size without changing position.
fn update_cache_key_sizes(&mut self, cache_key: &CacheKey, new_size: usize) {
let size = self
.queue
.get_mut(cache_key)
.expect("cache_key should be present");
let old_size = *size;
*size = new_size;
self.update_sizes(cache_key, old_size, new_size);
}
/// Insert a cache_key into the queue and update sizes.
fn insert_cache_key(&mut self, cache_key: CacheKey, size: usize) {
self.increase_sizes(&cache_key, size);
assert!(self.queue.insert(cache_key, size).is_none());
}
/// If the FindKeys map contains a prefix that is a prefix of key in argument,
/// then returns it and the corresponding FindKeys. Otherwise `None`.
///
/// The algorithm of this function is tested in `test_lower_bounds`.
/// However, due to its importance we provide here a proof of correctness.
/// What makes this functionality work is that the set of keys of the `find_keys_map`
/// is prefix free.
///
/// Claim: `self.get_existing_find_keys_entry(key)` finds a prefix of key in `self.find_keys_map.keys()` iff such prefix exists.
///
/// Note that when it exists, such a prefix is unique because `self.find_keys_map.keys()` is prefix-free.
///
/// Proof: For the map `find_keys_map` in question, let us define `set` to be the `BTreeSet` of
/// the keys of the map.
/// Then define `S = { s in set | s <= key }` for the lexicographic ordering.
/// First of all the expression `self.find_keys_map.range(..=key.to_vec())` corresponds
/// to S and `self.find_keys_map.range(..=key.to_vec()).next_back()` is
/// * None if S is empty.
/// * Some(M) with M the maximum of S if S is non-empty.
///
/// First, if `self.get_existing_find_keys_entry(key) == Some(stored_key)` then given the code,
/// clearly `stored_key` is a prefix of key.
///
/// Conversely, let us assume that `self.find_keys_map.keys()` contains a certain prefix `p` of key.
/// Because in particular `p <= key`, we have `self.find_keys_map.range(..=key.to_vec()).next_back() == Some((stored_key, _))`
/// for some value `stored_key` such that `p <= stored_key <= key`.
///
/// Next, let us prove that `p` is a prefix of `stored_key`. (This will entail `stored_key == p` due to
/// the prefix-free property of `self.find_keys_map`, therefore the algorithm's answer is correct.)
///
/// By contradiction. Let `w` be the longest common prefix between `p` and `stored_key`. If `p = w p2` and
/// `stored_key = w s2` with `p2` non-empty, then `p <= stored_key` implies `s2 > p2`. But `p` is also a
/// prefix of `key` therefore `stored_key > key`, contradiction.
fn get_existing_find_keys_entry(&self, key: &[u8]) -> Option<(&Vec<u8>, &FindKeysEntry)> {
match self.find_keys_map.range(..=key.to_vec()).next_back() {
None => None,
Some((stored_key, value)) => {
if key.starts_with(stored_key) {
Some((stored_key, value))
} else {
None
}
}
}
}
/// Same as above but returns a mutable reference.
fn get_existing_keys_entry_mut(
&mut self,
key: &[u8],
) -> Option<(&Vec<u8>, &mut FindKeysEntry)> {
match self.find_keys_map.range_mut(..=key.to_vec()).next_back() {
None => None,
Some((stored_key, value)) => {
if key.starts_with(stored_key) {
Some((stored_key, value))
} else {
None
}
}
}
}
/// If the FindKeyValues map contain a prefix that is a prefix of key in argument,
/// then returns it and the corresponding FindKeyValues. Otherwise `None`.
fn get_existing_find_key_values_entry(
&self,
key: &[u8],
) -> Option<(&Vec<u8>, &FindKeyValuesEntry)> {
match self.find_key_values_map.range(..=key.to_vec()).next_back() {
None => None,
Some((stored_key, value)) => {
if key.starts_with(stored_key) {
Some((stored_key, value))
} else {
None
}
}
}
}
/// Same as above but returns a mutable reference.
fn get_existing_find_key_values_entry_mut(
&mut self,
key: &[u8],
) -> Option<(&Vec<u8>, &mut FindKeyValuesEntry)> {
match self
.find_key_values_map
.range_mut(..=key.to_vec())
.next_back()
{
None => None,
Some((stored_key, value)) => {
if key.starts_with(stored_key) {
Some((stored_key, value))
} else {
None
}
}
}
}
/// Trim value cache so that it fits within bounds.
fn trim_value_cache(&mut self) {
let mut keys = Vec::new();
let mut control_size = self.total_value_size;
let mut iter = self.queue.iter();
loop {
let value = iter.next();
let Some((cache_key, size)) = value else {
break;
};
if control_size < self.config.max_cache_value_size {
break;
}
if let CacheKey::Value(key) = cache_key {
control_size -= size;
keys.push(key.to_vec());
}
}
for key in keys {
assert!(self.value_map.remove(&key).is_some());
let cache_key = CacheKey::Value(key);
self.remove_cache_key(&cache_key);
}
}
/// Trim `find_keys_by_prefix` cache so that it fits within bounds.
fn trim_find_keys_cache(&mut self) {
let mut prefixes = Vec::new();
let mut control_size = self.total_find_keys_size;
let mut iter = self.queue.iter();
loop {
let value = iter.next();
let Some((cache_key, size)) = value else {
break;
};
if control_size < self.config.max_cache_find_keys_size {
break;
}
if let CacheKey::FindKeys(prefix) = cache_key {
control_size -= size;
prefixes.push(prefix.to_vec());
}
}
for prefix in prefixes {
assert!(self.find_keys_map.remove(&prefix).is_some());
let cache_key = CacheKey::FindKeys(prefix);
self.remove_cache_key(&cache_key);
}
}
/// Trim `find_key_values_by_prefix` cache so that it fits within bounds.
fn trim_find_key_values_cache(&mut self) {
let mut prefixes = Vec::new();
let mut control_size = self.total_find_key_values_size;
let mut iter = self.queue.iter();
loop {
let value = iter.next();
let Some((cache_key, size)) = value else {
break;
};
if control_size < self.config.max_cache_find_key_values_size {
break;
}
if let CacheKey::FindKeyValues(prefix) = cache_key {
control_size -= size;
prefixes.push(prefix.to_vec());
}
}
for prefix in prefixes {
assert!(self.find_key_values_map.remove(&prefix).is_some());
let cache_key = CacheKey::FindKeyValues(prefix);
self.remove_cache_key(&cache_key);
}
}
/// Trim the cache so that it fits within the constraints.
fn trim_cache(&mut self) {
while self.total_size >= self.config.max_cache_size
|| self.queue.len() >= self.config.max_cache_entries
{
let Some((cache_key, size)) = self.queue.pop_front() else {
break;
};
self.decrease_sizes(&cache_key, size);
self.remove_cache_key_from_map(&cache_key);
}
}
/// Inserts an entry into the cache.
fn insert_value(&mut self, key: &[u8], cache_entry: ValueEntry) {
if self.config.max_value_entry_size == 0 {
// If the maximum size of an entry is zero, then we do not insert
return;
}
let size = key.len() + cache_entry.size();
if (matches!(cache_entry, ValueEntry::DoesNotExist) && !self.has_exclusive_access)
|| size > self.config.max_value_entry_size
{
if self.value_map.remove(key).is_some() {
let cache_key = CacheKey::Value(key.to_vec());
self.remove_cache_key(&cache_key);
};
return;
}
match self.value_map.entry(key.to_vec()) {
Entry::Occupied(mut entry) => {
entry.insert(cache_entry);
// Put it on first position for LRU with the new size
let cache_key = CacheKey::Value(key.to_vec());
self.remove_cache_key(&cache_key);
self.insert_cache_key(cache_key, size);
}
Entry::Vacant(entry) => {
entry.insert(cache_entry);
let cache_key = CacheKey::Value(key.to_vec());
self.insert_cache_key(cache_key, size);
}
}
self.trim_value_cache();
self.trim_cache();
}
/// Puts a key/value in the cache.
pub(crate) fn put_key_value(&mut self, key: &[u8], value: &[u8]) {
if self.has_exclusive_access {
let lower_bound = self.get_existing_keys_entry_mut(key);
if let Some((lower_bound, cache_entry)) = lower_bound {
let reduced_key = &key[lower_bound.len()..];
cache_entry.update_entry(reduced_key, true);
let cache_key = CacheKey::FindKeys(lower_bound.to_vec());
let new_size = lower_bound.len() + cache_entry.size();
self.update_cache_key_sizes(&cache_key, new_size);
}
match self.get_existing_find_key_values_entry_mut(key) {
Some((lower_bound, cache_entry)) => {
let reduced_key = &key[lower_bound.len()..];
cache_entry.update_entry(reduced_key, Some(value.to_vec()));
let cache_key = CacheKey::FindKeyValues(lower_bound.to_vec());
let new_size = lower_bound.len() + cache_entry.size();
self.update_cache_key_sizes(&cache_key, new_size);
let cache_key = CacheKey::Value(key.to_vec());
self.remove_cache_key_if_exists(&cache_key);
}
None => {
let cache_entry = ValueEntry::Value(value.to_vec());
self.insert_value(key, cache_entry);
}
}
} else {
let cache_entry = ValueEntry::Value(value.to_vec());
self.insert_value(key, cache_entry);
}
}
/// Deletes a key from the cache.
pub(crate) fn delete_key(&mut self, key: &[u8]) {
if self.has_exclusive_access {
let lower_bound = self.get_existing_keys_entry_mut(key);
let mut matching = false; // If matching, no need to insert in the value cache
if let Some((lower_bound, cache_entry)) = lower_bound {
let reduced_key = &key[lower_bound.len()..];
cache_entry.update_entry(reduced_key, false);
let cache_key = CacheKey::FindKeys(lower_bound.to_vec());
let new_size = lower_bound.len() + cache_entry.size();
self.update_cache_key_sizes(&cache_key, new_size);
matching = true;
}
let lower_bound = self.get_existing_find_key_values_entry_mut(key);
if let Some((lower_bound, cache_entry)) = lower_bound {
let reduced_key = &key[lower_bound.len()..];
cache_entry.update_entry(reduced_key, None);
let cache_key = CacheKey::FindKeyValues(lower_bound.to_vec());
let new_size = lower_bound.len() + cache_entry.size();
self.update_cache_key_sizes(&cache_key, new_size);
matching = true;
}
if !matching {
let cache_entry = ValueEntry::DoesNotExist;
self.insert_value(key, cache_entry);
} else {
let cache_key = CacheKey::Value(key.to_vec());
self.remove_cache_key_if_exists(&cache_key);
}
} else {
let cache_key = CacheKey::Value(key.to_vec());
self.remove_cache_key_if_exists(&cache_key);
}
}
/// Inserts a read_value result into the cache.
pub(crate) fn insert_read_value(&mut self, key: &[u8], value: &Option<Vec<u8>>) {
// We do not check for the find-key-values to update. Because we would have
// matched if existing.
let cache_entry = match value {
None => ValueEntry::DoesNotExist,
Some(vec) => ValueEntry::Value(vec.to_vec()),
};
self.insert_value(key, cache_entry)
}
/// Inserts a contains_key result into the cache.
pub(crate) fn insert_contains_key(&mut self, key: &[u8], result: bool) {
// We do not check for the find-keys / find-key-values to update.
// Because we would have matched if existing.
let cache_entry = if result {
ValueEntry::Exists
} else {
ValueEntry::DoesNotExist
};
self.insert_value(key, cache_entry)
}
/// Inserts the result of `find_keys_by_prefix` in the cache.
pub(crate) fn insert_find_keys(&mut self, key_prefix: Vec<u8>, keys: &[Vec<u8>]) {
if self.config.max_find_keys_entry_size == 0 {
// zero max size, exit from the start
return;
}
let size = key_prefix.len() + keys.iter().map(Vec::len).sum::<usize>();
if size > self.config.max_find_keys_entry_size {
// The entry is too large, we do not insert it,
return;
}
let find_entry = FindKeysEntry(keys.iter().cloned().collect());
// Clearing up the FindKeys entries that are covered by the new FindKeys.
let keys = self
.find_keys_map
.range(get_key_range_for_prefix(key_prefix.clone()))
.map(|(x, _)| x.clone())
.collect::<Vec<_>>();
for key in keys {
assert!(self.find_keys_map.remove(&key).is_some());
let cache_key = CacheKey::FindKeys(key);
self.remove_cache_key(&cache_key);
}
// Clearing up the value entries as they are covered by the new FindKeys.
// That is the `Exists` and `DoesNotExist`. The Value entries are not covered by FindKeys.
let keys = self
.value_map
.range(get_key_range_for_prefix(key_prefix.clone()))
.filter_map(|(key, value)| match value {
ValueEntry::DoesNotExist => Some(key.to_vec()),
ValueEntry::Exists => Some(key.to_vec()),
ValueEntry::Value(_) => None,
})
.collect::<Vec<_>>();
for key in keys {
assert!(self.value_map.remove(&key).is_some());
let cache_key = CacheKey::Value(key);
self.remove_cache_key(&cache_key);
}
let cache_key = CacheKey::FindKeys(key_prefix.clone());
// The entry has to be missing otherwise, it would have been found
assert!(self.find_keys_map.insert(key_prefix, find_entry).is_none());
self.insert_cache_key(cache_key, size);
self.trim_find_keys_cache();
self.trim_cache();
}
/// Inserts the result of `find_key_values_by_prefix` in the cache.
pub(crate) fn insert_find_key_values(
&mut self,
key_prefix: Vec<u8>,
key_values: &[(Vec<u8>, Vec<u8>)],
) {
if self.config.max_find_key_values_entry_size == 0 {
// Zero, maximum size, exit from the start
return;
}
let size = key_prefix.len()
+ key_values
.iter()
.map(|(k, v)| k.len() + v.len())
.sum::<usize>();
if size > self.config.max_find_key_values_entry_size {
// The entry is too large, we do not insert it,
return;
}
let find_entry = FindKeyValuesEntry(
key_values
.iter()
.map(|(k, v)| (k.to_vec(), v.to_vec()))
.collect(),
);
// Clearing up the FindKeys entries that are covered by the new FindKeyValues.
let prefixes = self
.find_keys_map
.range(get_key_range_for_prefix(key_prefix.clone()))
.map(|(x, _)| x.clone())
.collect::<Vec<_>>();
for prefix in prefixes {
assert!(self.find_keys_map.remove(&prefix).is_some());
let cache_key = CacheKey::FindKeys(prefix);
self.remove_cache_key(&cache_key);
}
// Clearing up the FindKeyValues entries that are covered by the new FindKeyValues.
let prefixes = self
.find_key_values_map
.range(get_key_range_for_prefix(key_prefix.clone()))
.map(|(x, _)| x.clone())
.collect::<Vec<_>>();
for prefix in prefixes {
assert!(self.find_key_values_map.remove(&prefix).is_some());
let cache_key = CacheKey::FindKeyValues(prefix);
self.remove_cache_key(&cache_key);
}
// Clearing up the value entries as they are covered by the new FindKeyValues.
let keys = self
.value_map
.range(get_key_range_for_prefix(key_prefix.clone()))
.map(|(x, _)| x.clone())
.collect::<Vec<_>>();
for key in keys {
assert!(self.value_map.remove(&key).is_some());
let cache_key = CacheKey::Value(key);
self.remove_cache_key(&cache_key);
}
let cache_key = CacheKey::FindKeyValues(key_prefix.clone());
// The entry has to be missing otherwise, it would have been found
assert!(self
.find_key_values_map
.insert(key_prefix, find_entry)
.is_none());
self.insert_cache_key(cache_key, size);
self.trim_find_key_values_cache();
self.trim_cache();
}
/// Marks cached keys that match the prefix as deleted. Importantly, this does not
/// create new entries in the cache.
pub(crate) fn delete_prefix(&mut self, key_prefix: &[u8]) {
// When using delete_prefix, we do not insert `ValueEntry::DoesNotExist`
// and instead drop the entries from the value-cache
// This is because:
// * In non-exclusive access, this could be added by another user.
// * In exclusive access, we do this via the `FindKeyValues`.
let mut keys = Vec::new();
for (key, _) in self
.value_map
.range(get_key_range_for_prefix(key_prefix.to_vec()))
{
keys.push(key.to_vec());
}
for key in keys {
assert!(self.value_map.remove(&key).is_some());
let cache_key = CacheKey::Value(key);
self.remove_cache_key(&cache_key);
}
if self.has_exclusive_access {
// Remove the FindKeys that are covered by key_prefix.
let mut prefixes = Vec::new();
for (prefix, _) in self
.find_keys_map
.range(get_key_range_for_prefix(key_prefix.to_vec()))
{
prefixes.push(prefix.to_vec());
}
for prefix in prefixes {
assert!(self.find_keys_map.remove(&prefix).is_some());
let cache_key = CacheKey::FindKeys(prefix);
self.remove_cache_key(&cache_key);
}
// Remove the FindKeyValues that are covered by key_prefix.
let mut prefixes = Vec::new();
for (prefix, _) in self
.find_key_values_map
.range(get_key_range_for_prefix(key_prefix.to_vec()))
{
prefixes.push(prefix.to_vec());
}
for prefix in prefixes {
assert!(self.find_key_values_map.remove(&prefix).is_some());
let cache_key = CacheKey::FindKeyValues(prefix);
self.remove_cache_key(&cache_key);
}
// Finding a containing FindKeys. If existing update.
let lower_bound = self.get_existing_keys_entry_mut(key_prefix);
let result = if let Some((lower_bound, find_entry)) = lower_bound {
// Delete the keys in the entry
let key_prefix_red = &key_prefix[lower_bound.len()..];
find_entry.delete_prefix(key_prefix_red);
let new_cache_size = find_entry.size() + lower_bound.len();
Some((new_cache_size, lower_bound.clone()))
} else {
None
};
if let Some((new_cache_size, lower_bound)) = result {
// Update the size without changing the position.
let cache_key = CacheKey::FindKeys(lower_bound.clone());
self.update_cache_key_sizes(&cache_key, new_cache_size);
}
// Finding a containing FindKeyValues. If existing update, if not insert.
let lower_bound = self.get_existing_find_key_values_entry_mut(key_prefix);
let result = if let Some((lower_bound, find_entry)) = lower_bound {
// Delete the keys (or key/values) in the entry
let key_prefix_red = &key_prefix[lower_bound.len()..];
find_entry.delete_prefix(key_prefix_red);
let new_cache_size = find_entry.size() + lower_bound.len();
Some((new_cache_size, lower_bound.clone()))
} else {
None
};
if let Some((new_cache_size, lower_bound)) = result {
// Update the size without changing the position.
let cache_key = CacheKey::FindKeyValues(lower_bound.clone());
self.update_cache_key_sizes(&cache_key, new_cache_size);
} else {
// There is no lower bound. Therefore we can insert
// the deleted prefix as a FindKeyValues.
let size = key_prefix.len();
let cache_key = CacheKey::FindKeyValues(key_prefix.to_vec());
let find_key_values_entry = FindKeyValuesEntry(BTreeMap::new());
self.find_key_values_map
.insert(key_prefix.to_vec(), find_key_values_entry);
self.insert_cache_key(cache_key, size);
}
}
}
/// Returns the cached value, or `Some(None)` if the entry does not exist in the
/// database. If `None` is returned, the entry might exist in the database but is
/// not in the cache.
pub(crate) fn query_read_value(&mut self, key: &[u8]) -> Option<Option<Vec<u8>>> {
// First, query the value map
let result = match self.value_map.get(key) {
None => None,
Some(entry) => match entry {
ValueEntry::DoesNotExist => Some(None),
ValueEntry::Exists => None,
ValueEntry::Value(vec) => Some(Some(vec.clone())),
},
};
if result.is_some() {
let cache_key = CacheKey::Value(key.to_vec());
self.move_cache_key_on_top(cache_key);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/graphql.rs | linera-views/src/graphql.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::borrow::Cow;
// TODO(#1858): come up with a better name-mangling scheme
/// Mangle a GraphQL type into something that can be interpolated into a GraphQL type name
pub(crate) fn mangle(type_name: impl AsRef<str>) -> String {
let mut mangled = String::new();
for c in type_name.as_ref().chars() {
match c {
'!' | '[' => (),
']' => mangled.push_str("_Array"),
c => mangled.push(c),
}
}
mangled
}
pub(crate) fn hash_name<T: ?Sized>() -> u32 {
use sha3::Digest as _;
u32::from_le_bytes(
sha3::Sha3_256::new_with_prefix(std::any::type_name::<T>()).finalize()[..4]
.try_into()
.unwrap(),
)
}
/// A GraphQL-visible map item, complete with key.
#[derive(async_graphql::SimpleObject)]
#[graphql(name_type)]
pub struct Entry<
K: async_graphql::OutputType + Send + Sync,
V: async_graphql::OutputType + Send + Sync,
> {
pub key: K,
pub value: V,
}
impl<K: async_graphql::OutputType, V: async_graphql::OutputType> async_graphql::TypeName
for Entry<K, V>
{
fn type_name() -> Cow<'static, str> {
format!(
"Entry_{}_{}_{:08x}",
mangle(K::type_name()),
mangle(V::type_name()),
hash_name::<(K, V)>(),
)
.into()
}
}
/// A struct to use to filter map values via GraphQL.
pub struct MapFilters<K: async_graphql::InputType> {
pub keys: Option<Vec<K>>,
}
/// The inputs given when inspecting a map value via GraphQL.
pub struct MapInput<K: async_graphql::InputType> {
pub filters: Option<MapFilters<K>>,
}
impl<K: async_graphql::InputType> async_graphql::InputType for MapFilters<K> {
type RawValueType = Self;
fn type_name() -> Cow<'static, str> {
Cow::Owned(format!(
"MapFilters_{}_{:08x}",
mangle(K::type_name()),
hash_name::<K>(),
))
}
fn create_type_info(registry: &mut async_graphql::registry::Registry) -> String {
registry.create_input_type::<Self, _>(
async_graphql::registry::MetaTypeId::InputObject,
|registry| async_graphql::registry::MetaType::InputObject {
name: Self::type_name().into_owned(),
description: None,
input_fields: [(
"keys".to_owned(),
async_graphql::registry::MetaInputValue {
name: "keys".to_string(),
description: None,
ty: Option::<Vec<K>>::create_type_info(registry),
deprecation: Default::default(),
default_value: None,
visible: None,
inaccessible: false,
tags: Vec::new(),
is_secret: false,
directive_invocations: Vec::new(),
},
)]
.into_iter()
.collect(),
visible: None,
inaccessible: false,
tags: Vec::new(),
rust_typename: Some(std::any::type_name::<Self>()),
oneof: false,
directive_invocations: Default::default(),
},
)
}
fn parse(value: Option<async_graphql::Value>) -> async_graphql::InputValueResult<Self> {
let Some(async_graphql::Value::Object(obj)) = value else {
return Err(async_graphql::InputValueError::expected_type(
value.unwrap_or_default(),
));
};
Ok(Self {
keys: async_graphql::InputType::parse(obj.get("keys").cloned())
.map_err(async_graphql::InputValueError::propagate)?,
})
}
fn to_value(&self) -> async_graphql::Value {
let mut map = async_graphql::indexmap::IndexMap::new();
map.insert(async_graphql::Name::new("keys"), self.keys.to_value());
async_graphql::Value::Object(map)
}
fn federation_fields() -> Option<String> {
Some(format!(
// for each field
"{{ {0} }}",
if let Some(fields) = Vec::<K>::federation_fields() {
format!("{0} {1}", "keys", fields)
} else {
"keys".to_string()
}
))
}
fn as_raw_value(&self) -> Option<&Self::RawValueType> {
Some(self)
}
}
impl<K: async_graphql::InputType> async_graphql::InputType for MapInput<K> {
type RawValueType = Self;
fn type_name() -> Cow<'static, str> {
Cow::Owned(format!(
"MapInput_{}_{:08x}",
mangle(K::type_name()),
hash_name::<K>(),
))
}
fn create_type_info(registry: &mut async_graphql::registry::Registry) -> String {
registry.create_input_type::<Self, _>(
async_graphql::registry::MetaTypeId::InputObject,
|registry| async_graphql::registry::MetaType::InputObject {
name: Self::type_name().into_owned(),
description: None,
input_fields: [(
"filters".to_owned(),
async_graphql::registry::MetaInputValue {
name: "filters".to_string(),
description: None,
ty: Option::<MapFilters<K>>::create_type_info(registry),
deprecation: Default::default(),
default_value: None,
visible: None,
inaccessible: false,
tags: Vec::new(),
is_secret: false,
directive_invocations: Vec::new(),
},
)]
.into_iter()
.collect(),
visible: None,
inaccessible: false,
tags: Vec::new(),
rust_typename: Some(std::any::type_name::<Self>()),
oneof: false,
directive_invocations: Default::default(),
},
)
}
fn parse(value: Option<async_graphql::Value>) -> async_graphql::InputValueResult<Self> {
let Some(async_graphql::Value::Object(obj)) = value else {
return Err(async_graphql::InputValueError::expected_type(
value.unwrap_or_default(),
));
};
Ok(Self {
filters: async_graphql::InputType::parse(obj.get("filters").cloned())
.map_err(async_graphql::InputValueError::propagate)?,
})
}
fn to_value(&self) -> async_graphql::Value {
let mut map = async_graphql::indexmap::IndexMap::new();
map.insert(async_graphql::Name::new("filters"), self.filters.to_value());
async_graphql::Value::Object(map)
}
fn federation_fields() -> Option<String> {
Some(format!(
// for each field
"{{ {0} }}",
if let Some(fields) = MapFilters::<K>::federation_fields() {
format!("{0} {1}", "filters", fields)
} else {
"filters".to_string()
}
))
}
fn as_raw_value(&self) -> Option<&Self::RawValueType> {
Some(self)
}
}
pub(crate) fn missing_key_error(key: &impl std::fmt::Debug) -> async_graphql::Error {
async_graphql::Error {
message: format!("The key={:?} is missing in collection", key),
source: None,
extensions: None,
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/collection_view.rs | linera-views/src/views/collection_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
borrow::Borrow,
collections::{btree_map, BTreeMap},
io::Write,
marker::PhantomData,
mem,
ops::Deref,
};
use allocative::{Allocative, Key, Visitor};
use async_lock::{RwLock, RwLockReadGuard};
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::Batch,
common::{CustomSerialize, HasherOutput, SliceExt as _, Update},
context::{BaseKey, Context},
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
store::ReadableKeyValueStore as _,
views::{ClonableView, HashableView, Hasher, View, ViewError, MIN_VIEW_TAG},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static COLLECTION_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"collection_view_hash_runtime",
"CollectionView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
/// A view that supports accessing a collection of views of the same kind, indexed by a
/// `Vec<u8>`, one subview at a time.
#[derive(Debug)]
pub struct ByteCollectionView<C, W> {
/// The view context.
context: C,
/// Whether to clear storage before applying updates.
delete_storage_first: bool,
/// Entries that may have staged changes.
updates: RwLock<BTreeMap<Vec<u8>, Update<W>>>,
}
impl<C, W: Allocative> Allocative for ByteCollectionView<C, W> {
fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) {
let name = Key::new("ByteCollectionView");
let size = mem::size_of::<Self>();
let mut visitor = visitor.enter(name, size);
if let Some(updates) = self.updates.try_read() {
updates.deref().visit(&mut visitor);
}
visitor.exit();
}
}
/// A read-only accessor for a particular subview in a [`CollectionView`].
pub enum ReadGuardedView<'a, W> {
/// The view is loaded in the updates
Loaded {
/// The guard for the updates.
updates: RwLockReadGuard<'a, BTreeMap<Vec<u8>, Update<W>>>,
/// The key in question.
short_key: Vec<u8>,
},
/// The view is not loaded in the updates
NotLoaded {
/// The guard for the updates. It is needed so that it prevents
/// opening the view as write separately.
_updates: RwLockReadGuard<'a, BTreeMap<Vec<u8>, Update<W>>>,
/// The view obtained from the storage
view: W,
},
}
impl<W> std::ops::Deref for ReadGuardedView<'_, W> {
type Target = W;
fn deref(&self) -> &W {
match self {
ReadGuardedView::Loaded { updates, short_key } => {
let Update::Set(view) = updates.get(short_key).unwrap() else {
unreachable!();
};
view
}
ReadGuardedView::NotLoaded { _updates, view } => view,
}
}
}
/// We need to find new base keys in order to implement `CollectionView`.
/// We do this by appending a value to the base key.
///
/// Sub-views in a collection share a common key prefix, like in other view types. However,
/// just concatenating the shared prefix with sub-view keys makes it impossible to distinguish if a
/// given key belongs to child sub-view or a grandchild sub-view (consider for example if a
/// collection is stored inside the collection).
#[repr(u8)]
enum KeyTag {
/// Prefix for specifying an index and serves to indicate the existence of an entry in the collection.
Index = MIN_VIEW_TAG,
/// Prefix for specifying as the prefix for the sub-view.
Subview,
}
impl<W: View> View for ByteCollectionView<W::Context, W> {
const NUM_INIT_KEYS: usize = 0;
type Context = W::Context;
fn context(&self) -> Self::Context {
self.context.clone()
}
fn pre_load(_context: &Self::Context) -> Result<Vec<Vec<u8>>, ViewError> {
Ok(vec![])
}
fn post_load(context: Self::Context, _values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
Ok(Self {
context,
delete_storage_first: false,
updates: RwLock::new(BTreeMap::new()),
})
}
fn rollback(&mut self) {
self.delete_storage_first = false;
self.updates.get_mut().clear();
}
async fn has_pending_changes(&self) -> bool {
if self.delete_storage_first {
return true;
}
let updates = self.updates.read().await;
!updates.is_empty()
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
let updates = self
.updates
.try_read()
.ok_or_else(|| ViewError::TryLockError(vec![]))?;
if self.delete_storage_first {
delete_view = true;
batch.delete_key_prefix(self.context.base_key().bytes.clone());
for (index, update) in updates.iter() {
if let Update::Set(view) = update {
view.pre_save(batch)?;
self.add_index(batch, index);
delete_view = false;
}
}
} else {
for (index, update) in updates.iter() {
match update {
Update::Set(view) => {
view.pre_save(batch)?;
self.add_index(batch, index);
}
Update::Removed => {
let key_subview = self.get_subview_key(index);
let key_index = self.get_index_key(index);
batch.delete_key(key_index);
batch.delete_key_prefix(key_subview);
}
}
}
}
Ok(delete_view)
}
fn post_save(&mut self) {
for (_, update) in self.updates.get_mut().iter_mut() {
if let Update::Set(view) = update {
view.post_save();
}
}
self.delete_storage_first = false;
self.updates.get_mut().clear();
}
fn clear(&mut self) {
self.delete_storage_first = true;
self.updates.get_mut().clear();
}
}
impl<W: ClonableView> ClonableView for ByteCollectionView<W::Context, W> {
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
let cloned_updates = self
.updates
.get_mut()
.iter_mut()
.map(|(key, value)| {
let cloned_value: Result<_, ViewError> = match value {
Update::Removed => Ok(Update::Removed),
Update::Set(view) => Ok(Update::Set(view.clone_unchecked()?)),
};
cloned_value.map(|v| (key.clone(), v))
})
.collect::<Result<_, ViewError>>()?;
Ok(ByteCollectionView {
context: self.context.clone(),
delete_storage_first: self.delete_storage_first,
updates: RwLock::new(cloned_updates),
})
}
}
impl<W: View> ByteCollectionView<W::Context, W> {
fn get_index_key(&self, index: &[u8]) -> Vec<u8> {
self.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index)
}
fn get_subview_key(&self, index: &[u8]) -> Vec<u8> {
self.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, index)
}
fn add_index(&self, batch: &mut Batch, index: &[u8]) {
let key = self.get_index_key(index);
batch.put_key_value_bytes(key, vec![]);
}
/// Loads a subview for the data at the given index in the collection. If an entry
/// is absent then a default entry is added to the collection. The resulting view
/// can be modified.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// let subview = view.load_entry_mut(&[0, 1]).await.unwrap();
/// let value = subview.get();
/// assert_eq!(*value, String::default());
/// # })
/// ```
pub async fn load_entry_mut(&mut self, short_key: &[u8]) -> Result<&mut W, ViewError> {
match self.updates.get_mut().entry(short_key.to_vec()) {
btree_map::Entry::Occupied(entry) => {
let entry = entry.into_mut();
match entry {
Update::Set(view) => Ok(view),
Update::Removed => {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let context = self.context.clone_with_base_key(key);
// Obtain a view and set its pending state to the default (e.g. empty) state
let view = W::new(context)?;
*entry = Update::Set(view);
let Update::Set(view) = entry else {
unreachable!();
};
Ok(view)
}
}
}
btree_map::Entry::Vacant(entry) => {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let context = self.context.clone_with_base_key(key);
let view = if self.delete_storage_first {
W::new(context)?
} else {
W::load(context).await?
};
let Update::Set(view) = entry.insert(Update::Set(view)) else {
unreachable!();
};
Ok(view)
}
}
}
/// Loads a subview for the data at the given index in the collection. If an entry
/// is absent then `None` is returned. The resulting view cannot be modified.
/// May fail if one subview is already being visited.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// {
/// let _subview = view.load_entry_mut(&[0, 1]).await.unwrap();
/// }
/// {
/// let subview = view.try_load_entry(&[0, 1]).await.unwrap().unwrap();
/// let value = subview.get();
/// assert_eq!(*value, String::default());
/// }
/// assert!(view.try_load_entry(&[0, 2]).await.unwrap().is_none());
/// # })
/// ```
pub async fn try_load_entry(
&self,
short_key: &[u8],
) -> Result<Option<ReadGuardedView<W>>, ViewError> {
let updates = self.updates.read().await;
match updates.get(short_key) {
Some(update) => match update {
Update::Removed => Ok(None),
Update::Set(_) => Ok(Some(ReadGuardedView::Loaded {
updates,
short_key: short_key.to_vec(),
})),
},
None => {
let key_index = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, short_key);
if !self.delete_storage_first
&& self.context.store().contains_key(&key_index).await?
{
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let context = self.context.clone_with_base_key(key);
let view = W::load(context).await?;
Ok(Some(ReadGuardedView::NotLoaded {
_updates: updates,
view,
}))
} else {
Ok(None)
}
}
}
}
/// Load multiple entries for reading at once.
/// The entries in `short_keys` have to be all distinct.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// {
/// let _subview = view.load_entry_mut(&[0, 1]).await.unwrap();
/// }
/// let short_keys = vec![vec![0, 1], vec![2, 3]];
/// let subviews = view.try_load_entries(short_keys).await.unwrap();
/// let value0 = subviews[0].as_ref().unwrap().get();
/// assert_eq!(*value0, String::default());
/// # })
/// ```
pub async fn try_load_entries(
&self,
short_keys: Vec<Vec<u8>>,
) -> Result<Vec<Option<ReadGuardedView<W>>>, ViewError> {
let mut results = Vec::with_capacity(short_keys.len());
let mut keys_to_check = Vec::new();
let mut keys_to_check_metadata = Vec::new();
let updates = self.updates.read().await;
for (position, short_key) in short_keys.into_iter().enumerate() {
match updates.get(&short_key) {
Some(update) => match update {
Update::Removed => {
results.push(None);
}
Update::Set(_) => {
let updates = self.updates.read().await;
results.push(Some(ReadGuardedView::Loaded {
updates,
short_key: short_key.clone(),
}));
}
},
None => {
results.push(None); // Placeholder, may be updated later
if !self.delete_storage_first {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, &short_key);
let subview_context = self.context.clone_with_base_key(key);
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, &short_key);
keys_to_check.push(key);
keys_to_check_metadata.push((position, subview_context));
}
}
}
}
let found_keys = self.context.store().contains_keys(&keys_to_check).await?;
let entries_to_load = keys_to_check_metadata
.into_iter()
.zip(found_keys)
.filter_map(|(metadata, found)| found.then_some(metadata))
.collect::<Vec<_>>();
let mut keys_to_load = Vec::with_capacity(entries_to_load.len() * W::NUM_INIT_KEYS);
for (_, context) in &entries_to_load {
keys_to_load.extend(W::pre_load(context)?);
}
let values = self
.context
.store()
.read_multi_values_bytes(&keys_to_load)
.await?;
for (loaded_values, (position, context)) in values
.chunks_exact_or_repeat(W::NUM_INIT_KEYS)
.zip(entries_to_load)
{
let view = W::post_load(context, loaded_values)?;
let updates = self.updates.read().await;
results[position] = Some(ReadGuardedView::NotLoaded {
_updates: updates,
view,
});
}
Ok(results)
}
/// Loads multiple entries for reading at once with their keys.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// {
/// let subview = view.load_entry_mut(&vec![0, 1]).await.unwrap();
/// subview.set("Bonjour".into());
/// }
/// let short_keys = vec![vec![0, 1], vec![0, 2]];
/// let pairs = view.try_load_entries_pairs(short_keys).await.unwrap();
/// assert_eq!(pairs[0].0, vec![0, 1]);
/// assert_eq!(pairs[1].0, vec![0, 2]);
/// let value0 = pairs[0].1.as_ref().unwrap().get();
/// assert_eq!(*value0, "Bonjour".to_string());
/// assert!(pairs[1].1.is_none());
/// # })
/// ```
pub async fn try_load_entries_pairs(
&self,
short_keys: Vec<Vec<u8>>,
) -> Result<Vec<(Vec<u8>, Option<ReadGuardedView<W>>)>, ViewError> {
let values = self.try_load_entries(short_keys.clone()).await?;
Ok(short_keys.into_iter().zip(values).collect())
}
/// Load all entries for reading at once.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// {
/// let _subview = view.load_entry_mut(&[0, 1]).await.unwrap();
/// }
/// let subviews = view.try_load_all_entries().await.unwrap();
/// assert_eq!(subviews.len(), 1);
/// # })
/// ```
pub async fn try_load_all_entries(
&self,
) -> Result<Vec<(Vec<u8>, ReadGuardedView<W>)>, ViewError> {
let updates = self.updates.read().await; // Acquire the read lock to prevent writes.
let short_keys = self.keys().await?;
let mut results = Vec::with_capacity(short_keys.len());
let mut keys_to_load = Vec::new();
let mut keys_to_load_metadata = Vec::new();
for (position, short_key) in short_keys.iter().enumerate() {
match updates.get(short_key) {
Some(update) => {
let Update::Set(_) = update else {
unreachable!();
};
let updates = self.updates.read().await;
let view = ReadGuardedView::Loaded {
updates,
short_key: short_key.clone(),
};
results.push((short_key.clone(), Some(view)));
}
None => {
// If a key is not in `updates`, then it is in storage.
// The key exists since otherwise it would not be in `short_keys`.
// Therefore we have `self.delete_storage_first = false`.
assert!(!self.delete_storage_first);
results.push((short_key.clone(), None));
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let subview_context = self.context.clone_with_base_key(key);
keys_to_load.extend(W::pre_load(&subview_context)?);
keys_to_load_metadata.push((position, subview_context, short_key.clone()));
}
}
}
let values = self
.context
.store()
.read_multi_values_bytes(&keys_to_load)
.await?;
for (loaded_values, (position, context, short_key)) in values
.chunks_exact_or_repeat(W::NUM_INIT_KEYS)
.zip(keys_to_load_metadata)
{
let view = W::post_load(context, loaded_values)?;
let updates = self.updates.read().await;
let guarded_view = ReadGuardedView::NotLoaded {
_updates: updates,
view,
};
results[position] = (short_key, Some(guarded_view));
}
Ok(results
.into_iter()
.map(|(short_key, view)| (short_key, view.unwrap()))
.collect::<Vec<_>>())
}
/// Resets an entry to the default value.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// let subview = view.load_entry_mut(&[0, 1]).await.unwrap();
/// let value = subview.get_mut();
/// *value = String::from("Hello");
/// view.reset_entry_to_default(&[0, 1]).unwrap();
/// let subview = view.load_entry_mut(&[0, 1]).await.unwrap();
/// let value = subview.get_mut();
/// assert_eq!(*value, String::default());
/// # })
/// ```
pub fn reset_entry_to_default(&mut self, short_key: &[u8]) -> Result<(), ViewError> {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let context = self.context.clone_with_base_key(key);
let view = W::new(context)?;
self.updates
.get_mut()
.insert(short_key.to_vec(), Update::Set(view));
Ok(())
}
/// Tests if the collection contains a specified key and returns a boolean.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// {
/// let _subview = view.load_entry_mut(&[0, 1]).await.unwrap();
/// }
/// assert!(view.contains_key(&[0, 1]).await.unwrap());
/// assert!(!view.contains_key(&[0, 2]).await.unwrap());
/// # })
/// ```
pub async fn contains_key(&self, short_key: &[u8]) -> Result<bool, ViewError> {
let updates = self.updates.read().await;
Ok(match updates.get(short_key) {
Some(entry) => match entry {
Update::Set(_view) => true,
_entry @ Update::Removed => false,
},
None => {
let key_index = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, short_key);
!self.delete_storage_first && self.context.store().contains_key(&key_index).await?
}
})
}
/// Marks the entry as removed. If absent then nothing is done.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// let subview = view.load_entry_mut(&[0, 1]).await.unwrap();
/// let value = subview.get_mut();
/// assert_eq!(*value, String::default());
/// view.remove_entry(vec![0, 1]);
/// let keys = view.keys().await.unwrap();
/// assert_eq!(keys.len(), 0);
/// # })
/// ```
pub fn remove_entry(&mut self, short_key: Vec<u8>) {
if self.delete_storage_first {
// Optimization: No need to mark `short_key` for deletion as we are going to remove all the keys at once.
self.updates.get_mut().remove(&short_key);
} else {
self.updates.get_mut().insert(short_key, Update::Removed);
}
}
/// Gets the extra data.
pub fn extra(&self) -> &<W::Context as Context>::Extra {
self.context.extra()
}
}
impl<W: View> ByteCollectionView<W::Context, W> {
/// Applies a function f on each index (aka key). Keys are visited in the
/// lexicographic order. If the function returns false, then the loop
/// ends prematurely.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// view.load_entry_mut(&[0, 1]).await.unwrap();
/// view.load_entry_mut(&[0, 2]).await.unwrap();
/// let mut count = 0;
/// view.for_each_key_while(|_key| {
/// count += 1;
/// Ok(count < 1)
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 1);
/// # })
/// ```
pub async fn for_each_key_while<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(&[u8]) -> Result<bool, ViewError> + Send,
{
let updates = self.updates.read().await;
let mut updates = updates.iter();
let mut update = updates.next();
if !self.delete_storage_first {
let base = self.get_index_key(&[]);
for index in self.context.store().find_keys_by_prefix(&base).await? {
loop {
match update {
Some((key, value)) if key <= &index => {
if let Update::Set(_) = value {
if !f(key)? {
return Ok(());
}
}
update = updates.next();
if key == &index {
break;
}
}
_ => {
if !f(&index)? {
return Ok(());
}
break;
}
}
}
}
}
while let Some((key, value)) = update {
if let Update::Set(_) = value {
if !f(key)? {
return Ok(());
}
}
update = updates.next();
}
Ok(())
}
/// Applies a function f on each index (aka key). Keys are visited in a
/// lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// view.load_entry_mut(&[0, 1]).await.unwrap();
/// view.load_entry_mut(&[0, 2]).await.unwrap();
/// let mut count = 0;
/// view.for_each_key(|_key| {
/// count += 1;
/// Ok(())
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 2);
/// # })
/// ```
pub async fn for_each_key<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(&[u8]) -> Result<(), ViewError> + Send,
{
self.for_each_key_while(|key| {
f(key)?;
Ok(true)
})
.await
}
/// Returns the list of keys in the collection. The order is lexicographic.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// view.load_entry_mut(&[0, 1]).await.unwrap();
/// view.load_entry_mut(&[0, 2]).await.unwrap();
/// let keys = view.keys().await.unwrap();
/// assert_eq!(keys, vec![vec![0, 1], vec![0, 2]]);
/// # })
/// ```
pub async fn keys(&self) -> Result<Vec<Vec<u8>>, ViewError> {
let mut keys = Vec::new();
self.for_each_key(|key| {
keys.push(key.to_vec());
Ok(())
})
.await?;
Ok(keys)
}
/// Returns the number of entries in the collection.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::collection_view::ByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ByteCollectionView<_, RegisterView<_, String>> =
/// ByteCollectionView::load(context).await.unwrap();
/// view.load_entry_mut(&[0, 1]).await.unwrap();
/// view.load_entry_mut(&[0, 2]).await.unwrap();
/// assert_eq!(view.count().await.unwrap(), 2);
/// # })
/// ```
pub async fn count(&self) -> Result<usize, ViewError> {
let mut count = 0;
self.for_each_key(|_key| {
count += 1;
Ok(())
})
.await?;
Ok(count)
}
}
impl<W: HashableView> HashableView for ByteCollectionView<W::Context, W> {
type Hasher = sha3::Sha3_256;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
#[cfg(with_metrics)]
let _hash_latency = metrics::COLLECTION_VIEW_HASH_RUNTIME.measure_latency();
let mut hasher = sha3::Sha3_256::default();
let keys = self.keys().await?;
let count = keys.len() as u32;
hasher.update_with_bcs_bytes(&count)?;
let updates = self.updates.get_mut();
for key in keys {
hasher.update_with_bytes(&key)?;
let hash = match updates.get_mut(&key) {
Some(entry) => {
let Update::Set(view) = entry else {
unreachable!();
};
view.hash_mut().await?
}
None => {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, &key);
let context = self.context.clone_with_base_key(key);
let mut view = W::load(context).await?;
view.hash_mut().await?
}
};
hasher.write_all(hash.as_ref())?;
}
Ok(hasher.finalize())
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
#[cfg(with_metrics)]
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/register_view.rs | linera-views/src/views/register_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use allocative::Allocative;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::Batch,
common::{from_bytes_option_or_default, HasherOutput},
context::Context,
hashable_wrapper::WrappedHashableContainerView,
views::{ClonableView, HashableView, Hasher, ReplaceContext, View},
ViewError,
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static REGISTER_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"register_view_hash_runtime",
"RegisterView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
/// A view that supports modifying a single value of type `T`.
#[derive(Debug, Allocative)]
#[allocative(bound = "C, T: Allocative")]
pub struct RegisterView<C, T> {
/// Whether to clear storage before applying updates.
delete_storage_first: bool,
/// The view context.
#[allocative(skip)]
context: C,
/// The value persisted in storage.
stored_value: Box<T>,
/// Pending update not yet persisted to storage.
update: Option<Box<T>>,
}
impl<C, T, C2> ReplaceContext<C2> for RegisterView<C, T>
where
C: Context,
C2: Context,
T: Default + Send + Sync + Serialize + DeserializeOwned + Clone,
{
type Target = RegisterView<C2, T>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
RegisterView {
delete_storage_first: self.delete_storage_first,
context: ctx(&self.context),
stored_value: self.stored_value.clone(),
update: self.update.clone(),
}
}
}
impl<C, T> View for RegisterView<C, T>
where
C: Context,
T: Default + Send + Sync + Serialize + DeserializeOwned,
{
const NUM_INIT_KEYS: usize = 1;
type Context = C;
fn context(&self) -> C {
self.context.clone()
}
fn pre_load(context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
Ok(vec![context.base_key().bytes.clone()])
}
fn post_load(context: C, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let value =
from_bytes_option_or_default(values.first().ok_or(ViewError::PostLoadValuesError)?)?;
let stored_value = Box::new(value);
Ok(Self {
delete_storage_first: false,
context,
stored_value,
update: None,
})
}
fn rollback(&mut self) {
self.delete_storage_first = false;
self.update = None;
}
async fn has_pending_changes(&self) -> bool {
if self.delete_storage_first {
return true;
}
self.update.is_some()
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
if self.delete_storage_first {
batch.delete_key(self.context.base_key().bytes.clone());
delete_view = true;
} else if let Some(value) = &self.update {
let key = self.context.base_key().bytes.clone();
batch.put_key_value(key, value)?;
}
Ok(delete_view)
}
fn post_save(&mut self) {
if self.delete_storage_first {
self.stored_value = Box::default();
} else if let Some(value) = self.update.take() {
self.stored_value = value;
}
self.delete_storage_first = false;
self.update = None;
}
fn clear(&mut self) {
self.delete_storage_first = true;
self.update = Some(Box::default());
}
}
impl<C, T> ClonableView for RegisterView<C, T>
where
C: Context,
T: Clone + Default + Send + Sync + Serialize + DeserializeOwned,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(RegisterView {
delete_storage_first: self.delete_storage_first,
context: self.context.clone(),
stored_value: self.stored_value.clone(),
update: self.update.clone(),
})
}
}
impl<C, T> RegisterView<C, T>
where
C: Context,
{
/// Access the current value in the register.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut register = RegisterView::<_, u32>::load(context).await.unwrap();
/// let value = register.get();
/// assert_eq!(*value, 0);
/// # })
/// ```
pub fn get(&self) -> &T {
match &self.update {
None => &self.stored_value,
Some(value) => value,
}
}
/// Sets the value in the register.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut register = RegisterView::load(context).await.unwrap();
/// register.set(5);
/// let value = register.get();
/// assert_eq!(*value, 5);
/// # })
/// ```
pub fn set(&mut self, value: T) {
self.delete_storage_first = false;
self.update = Some(Box::new(value));
}
/// Obtains the extra data.
pub fn extra(&self) -> &C::Extra {
self.context.extra()
}
}
impl<C, T> RegisterView<C, T>
where
C: Context,
T: Clone + Serialize,
{
/// Obtains a mutable reference to the value in the register.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut register: RegisterView<_, u32> = RegisterView::load(context).await.unwrap();
/// let value = register.get_mut();
/// assert_eq!(*value, 0);
/// # })
/// ```
pub fn get_mut(&mut self) -> &mut T {
self.delete_storage_first = false;
match &mut self.update {
Some(value) => value,
update => {
*update = Some(self.stored_value.clone());
update.as_mut().unwrap()
}
}
}
fn compute_hash(&self) -> Result<<sha3::Sha3_256 as Hasher>::Output, ViewError> {
#[cfg(with_metrics)]
let _hash_latency = metrics::REGISTER_VIEW_HASH_RUNTIME.measure_latency();
let mut hasher = sha3::Sha3_256::default();
hasher.update_with_bcs_bytes(self.get())?;
Ok(hasher.finalize())
}
}
impl<C, T> HashableView for RegisterView<C, T>
where
C: Context,
T: Clone + Default + Send + Sync + Serialize + DeserializeOwned,
{
type Hasher = sha3::Sha3_256;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.compute_hash()
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.compute_hash()
}
}
/// Type wrapping `RegisterView` while memoizing the hash.
pub type HashedRegisterView<C, T> =
WrappedHashableContainerView<C, RegisterView<C, T>, HasherOutput>;
#[cfg(with_graphql)]
mod graphql {
use std::borrow::Cow;
use super::RegisterView;
use crate::context::Context;
impl<C, T> async_graphql::OutputType for RegisterView<C, T>
where
C: Context,
T: async_graphql::OutputType + Send + Sync,
{
fn type_name() -> Cow<'static, str> {
T::type_name()
}
fn create_type_info(registry: &mut async_graphql::registry::Registry) -> String {
T::create_type_info(registry)
}
async fn resolve(
&self,
ctx: &async_graphql::ContextSelectionSet<'_>,
field: &async_graphql::Positioned<async_graphql::parser::types::Field>,
) -> async_graphql::ServerResult<async_graphql::Value> {
self.get().resolve(ctx, field).await
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/key_value_store_view.rs | linera-views/src/views/key_value_store_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! We implement two types:
//! 1) The first type `KeyValueStoreView` implements View and the function of `KeyValueStore`.
//!
//! 2) The second type `ViewContainer` encapsulates `KeyValueStoreView` and provides the following functionalities:
//! * The `Clone` trait
//! * a `write_batch` that takes a `&self` instead of a `&mut self`
//! * a `write_batch` that writes in the context instead of writing of the view.
//!
//! Currently, that second type is only used for tests.
//!
//! Key tags to create the sub-keys of a `KeyValueStoreView` on top of the base key.
use std::{collections::BTreeMap, fmt::Debug, ops::Bound::Included, sync::Mutex};
use allocative::Allocative;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::{data_types::ArithmeticError, ensure, visit_allocative_simple};
use serde::{Deserialize, Serialize};
use crate::{
batch::{Batch, WriteOperation},
common::{
from_bytes_option, from_bytes_option_or_default, get_key_range_for_prefix, get_upper_bound,
DeletionSet, HasherOutput, SuffixClosedSetIterator, Update,
},
context::Context,
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
map_view::ByteMapView,
store::ReadableKeyValueStore,
views::{ClonableView, HashableView, Hasher, ReplaceContext, View, ViewError, MIN_VIEW_TAG},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The latency of hash computation
pub static KEY_VALUE_STORE_VIEW_HASH_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"key_value_store_view_hash_latency",
"KeyValueStoreView hash latency",
&[],
exponential_bucket_latencies(5.0),
)
});
/// The latency of get operation
pub static KEY_VALUE_STORE_VIEW_GET_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"key_value_store_view_get_latency",
"KeyValueStoreView get latency",
&[],
exponential_bucket_latencies(5.0),
)
});
/// The latency of multi get
pub static KEY_VALUE_STORE_VIEW_MULTI_GET_LATENCY: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"key_value_store_view_multi_get_latency",
"KeyValueStoreView multi get latency",
&[],
exponential_bucket_latencies(5.0),
)
});
/// The latency of contains key
pub static KEY_VALUE_STORE_VIEW_CONTAINS_KEY_LATENCY: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"key_value_store_view_contains_key_latency",
"KeyValueStoreView contains key latency",
&[],
exponential_bucket_latencies(5.0),
)
});
/// The latency of contains keys
pub static KEY_VALUE_STORE_VIEW_CONTAINS_KEYS_LATENCY: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"key_value_store_view_contains_keys_latency",
"KeyValueStoreView contains keys latency",
&[],
exponential_bucket_latencies(5.0),
)
});
/// The latency of find keys by prefix operation
pub static KEY_VALUE_STORE_VIEW_FIND_KEYS_BY_PREFIX_LATENCY: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"key_value_store_view_find_keys_by_prefix_latency",
"KeyValueStoreView find keys by prefix latency",
&[],
exponential_bucket_latencies(5.0),
)
});
/// The latency of find key values by prefix operation
pub static KEY_VALUE_STORE_VIEW_FIND_KEY_VALUES_BY_PREFIX_LATENCY: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"key_value_store_view_find_key_values_by_prefix_latency",
"KeyValueStoreView find key values by prefix latency",
&[],
exponential_bucket_latencies(5.0),
)
});
/// The latency of write batch operation
pub static KEY_VALUE_STORE_VIEW_WRITE_BATCH_LATENCY: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"key_value_store_view_write_batch_latency",
"KeyValueStoreView write batch latency",
&[],
exponential_bucket_latencies(5.0),
)
});
}
#[cfg(with_testing)]
use {
crate::store::{KeyValueStoreError, WithError, WritableKeyValueStore},
async_lock::RwLock,
std::sync::Arc,
thiserror::Error,
};
#[repr(u8)]
enum KeyTag {
/// Prefix for the indices of the view.
Index = MIN_VIEW_TAG,
/// The total stored size
TotalSize,
/// The prefix where the sizes are being stored
Sizes,
/// Prefix for the hash.
Hash,
}
/// A pair containing the key and value size.
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize, Allocative)]
pub struct SizeData {
/// The size of the key
pub key: u32,
/// The size of the value
pub value: u32,
}
impl SizeData {
/// Sums both terms
pub fn sum(&mut self) -> u32 {
self.key + self.value
}
/// Adds a size to `self`
pub fn add_assign(&mut self, size: SizeData) -> Result<(), ViewError> {
self.key = self
.key
.checked_add(size.key)
.ok_or(ViewError::ArithmeticError(ArithmeticError::Overflow))?;
self.value = self
.value
.checked_add(size.value)
.ok_or(ViewError::ArithmeticError(ArithmeticError::Overflow))?;
Ok(())
}
/// Subtracts a size from `self`
pub fn sub_assign(&mut self, size: SizeData) {
self.key -= size.key;
self.value -= size.value;
}
}
/// A view that represents the functions of `KeyValueStore`.
///
/// Comment on the data set:
/// In order to work, the view needs to store the updates and deleted prefixes.
/// The updates and deleted prefixes have to be coherent. This means:
/// * If an index is deleted by one in deleted prefixes then it should not be present
/// in the updates at all.
/// * [`DeletePrefix::key_prefix`][entry1] should not dominate anyone. That is if we have `[0,2]`
/// then we should not have `[0,2,3]` since it would be dominated by the preceding.
///
/// With that we have:
/// * in order to test if an `index` is deleted by a prefix we compute the highest deleted prefix `dp`
/// such that `dp <= index`.
/// If `dp` is indeed a prefix then we conclude that `index` is deleted, otherwise not.
/// The no domination is essential here.
///
/// [entry1]: crate::batch::WriteOperation::DeletePrefix
#[derive(Debug, Allocative)]
#[allocative(bound = "C")]
pub struct KeyValueStoreView<C> {
/// The view context.
#[allocative(skip)]
context: C,
/// Tracks deleted key prefixes.
deletion_set: DeletionSet,
/// Pending changes not yet persisted to storage.
updates: BTreeMap<Vec<u8>, Update<Vec<u8>>>,
/// The total size of keys and values persisted in storage.
stored_total_size: SizeData,
/// The total size of keys and values including pending changes.
total_size: SizeData,
/// Map of key to value size for tracking storage usage.
sizes: ByteMapView<C, u32>,
/// The hash persisted in storage.
#[allocative(visit = visit_allocative_simple)]
stored_hash: Option<HasherOutput>,
/// Memoized hash, if any.
#[allocative(visit = visit_allocative_simple)]
hash: Mutex<Option<HasherOutput>>,
}
impl<C: Context, C2: Context> ReplaceContext<C2> for KeyValueStoreView<C> {
type Target = KeyValueStoreView<C2>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
let hash = *self.hash.lock().unwrap();
KeyValueStoreView {
context: ctx.clone()(&self.context),
deletion_set: self.deletion_set.clone(),
updates: self.updates.clone(),
stored_total_size: self.stored_total_size,
total_size: self.total_size,
sizes: self.sizes.with_context(ctx.clone()).await,
stored_hash: self.stored_hash,
hash: Mutex::new(hash),
}
}
}
impl<C: Context> View for KeyValueStoreView<C> {
const NUM_INIT_KEYS: usize = 2 + ByteMapView::<C, u32>::NUM_INIT_KEYS;
type Context = C;
fn context(&self) -> C {
self.context.clone()
}
fn pre_load(context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
let key_hash = context.base_key().base_tag(KeyTag::Hash as u8);
let key_total_size = context.base_key().base_tag(KeyTag::TotalSize as u8);
let mut v = vec![key_hash, key_total_size];
let base_key = context.base_key().base_tag(KeyTag::Sizes as u8);
let context_sizes = context.clone_with_base_key(base_key);
v.extend(ByteMapView::<C, u32>::pre_load(&context_sizes)?);
Ok(v)
}
fn post_load(context: C, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let hash = from_bytes_option(values.first().ok_or(ViewError::PostLoadValuesError)?)?;
let total_size =
from_bytes_option_or_default(values.get(1).ok_or(ViewError::PostLoadValuesError)?)?;
let base_key = context.base_key().base_tag(KeyTag::Sizes as u8);
let context_sizes = context.clone_with_base_key(base_key);
let sizes = ByteMapView::post_load(
context_sizes,
values.get(2..).ok_or(ViewError::PostLoadValuesError)?,
)?;
Ok(Self {
context,
deletion_set: DeletionSet::new(),
updates: BTreeMap::new(),
stored_total_size: total_size,
total_size,
sizes,
stored_hash: hash,
hash: Mutex::new(hash),
})
}
fn rollback(&mut self) {
self.deletion_set.rollback();
self.updates.clear();
self.total_size = self.stored_total_size;
self.sizes.rollback();
*self.hash.get_mut().unwrap() = self.stored_hash;
}
async fn has_pending_changes(&self) -> bool {
if self.deletion_set.has_pending_changes() {
return true;
}
if !self.updates.is_empty() {
return true;
}
if self.stored_total_size != self.total_size {
return true;
}
if self.sizes.has_pending_changes().await {
return true;
}
let hash = self.hash.lock().unwrap();
self.stored_hash != *hash
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
if self.deletion_set.delete_storage_first {
delete_view = true;
batch.delete_key_prefix(self.context.base_key().bytes.clone());
for (index, update) in self.updates.iter() {
if let Update::Set(value) = update {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index);
batch.put_key_value_bytes(key, value.clone());
delete_view = false;
}
}
} else {
for index in self.deletion_set.deleted_prefixes.iter() {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index);
batch.delete_key_prefix(key);
}
for (index, update) in self.updates.iter() {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index);
match update {
Update::Removed => batch.delete_key(key),
Update::Set(value) => batch.put_key_value_bytes(key, value.clone()),
}
}
}
self.sizes.pre_save(batch)?;
let hash = *self.hash.lock().unwrap();
if self.stored_hash != hash {
let key = self.context.base_key().base_tag(KeyTag::Hash as u8);
match hash {
None => batch.delete_key(key),
Some(hash) => batch.put_key_value(key, &hash)?,
}
}
if self.stored_total_size != self.total_size {
let key = self.context.base_key().base_tag(KeyTag::TotalSize as u8);
batch.put_key_value(key, &self.total_size)?;
}
Ok(delete_view)
}
fn post_save(&mut self) {
self.deletion_set.delete_storage_first = false;
self.deletion_set.deleted_prefixes.clear();
self.updates.clear();
self.sizes.post_save();
let hash = *self.hash.lock().unwrap();
self.stored_hash = hash;
self.stored_total_size = self.total_size;
}
fn clear(&mut self) {
self.deletion_set.clear();
self.updates.clear();
self.total_size = SizeData::default();
self.sizes.clear();
*self.hash.get_mut().unwrap() = None;
}
}
impl<C: Context> ClonableView for KeyValueStoreView<C> {
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(KeyValueStoreView {
context: self.context.clone(),
deletion_set: self.deletion_set.clone(),
updates: self.updates.clone(),
stored_total_size: self.stored_total_size,
total_size: self.total_size,
sizes: self.sizes.clone_unchecked()?,
stored_hash: self.stored_hash,
hash: Mutex::new(*self.hash.get_mut().unwrap()),
})
}
}
impl<C: Context> KeyValueStoreView<C> {
fn max_key_size(&self) -> usize {
let prefix_len = self.context.base_key().bytes.len();
<C::Store as ReadableKeyValueStore>::MAX_KEY_SIZE - 1 - prefix_len
}
/// Getting the total sizes that will be used for keys and values when stored
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::{KeyValueStoreView, SizeData};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![0, 1, 2, 3, 4]).await.unwrap();
/// let total_size = view.total_size();
/// assert_eq!(total_size, SizeData { key: 2, value: 5 });
/// # })
/// ```
pub fn total_size(&self) -> SizeData {
self.total_size
}
/// Applies the function f over all indices. If the function f returns
/// false, then the loop ends prematurely.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![0]).await.unwrap();
/// view.insert(vec![0, 2], vec![0]).await.unwrap();
/// view.insert(vec![0, 3], vec![0]).await.unwrap();
/// let mut count = 0;
/// view.for_each_index_while(|_key| {
/// count += 1;
/// Ok(count < 2)
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 2);
/// # })
/// ```
pub async fn for_each_index_while<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(&[u8]) -> Result<bool, ViewError> + Send,
{
let key_prefix = self.context.base_key().base_tag(KeyTag::Index as u8);
let mut updates = self.updates.iter();
let mut update = updates.next();
if !self.deletion_set.delete_storage_first {
let mut suffix_closed_set =
SuffixClosedSetIterator::new(0, self.deletion_set.deleted_prefixes.iter());
for index in self
.context
.store()
.find_keys_by_prefix(&key_prefix)
.await?
{
loop {
match update {
Some((key, value)) if key <= &index => {
if let Update::Set(_) = value {
if !f(key)? {
return Ok(());
}
}
update = updates.next();
if key == &index {
break;
}
}
_ => {
if !suffix_closed_set.find_key(&index) && !f(&index)? {
return Ok(());
}
break;
}
}
}
}
}
while let Some((key, value)) = update {
if let Update::Set(_) = value {
if !f(key)? {
return Ok(());
}
}
update = updates.next();
}
Ok(())
}
/// Applies the function f over all indices.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![0]).await.unwrap();
/// view.insert(vec![0, 2], vec![0]).await.unwrap();
/// view.insert(vec![0, 3], vec![0]).await.unwrap();
/// let mut count = 0;
/// view.for_each_index(|_key| {
/// count += 1;
/// Ok(())
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 3);
/// # })
/// ```
pub async fn for_each_index<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(&[u8]) -> Result<(), ViewError> + Send,
{
self.for_each_index_while(|key| {
f(key)?;
Ok(true)
})
.await
}
/// Applies the function f over all index/value pairs.
/// If the function f returns false then the loop ends prematurely.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![0]).await.unwrap();
/// view.insert(vec![0, 2], vec![0]).await.unwrap();
/// let mut values = Vec::new();
/// view.for_each_index_value_while(|_key, value| {
/// values.push(value.to_vec());
/// Ok(values.len() < 1)
/// })
/// .await
/// .unwrap();
/// assert_eq!(values, vec![vec![0]]);
/// # })
/// ```
pub async fn for_each_index_value_while<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(&[u8], &[u8]) -> Result<bool, ViewError> + Send,
{
let key_prefix = self.context.base_key().base_tag(KeyTag::Index as u8);
let mut updates = self.updates.iter();
let mut update = updates.next();
if !self.deletion_set.delete_storage_first {
let mut suffix_closed_set =
SuffixClosedSetIterator::new(0, self.deletion_set.deleted_prefixes.iter());
for entry in self
.context
.store()
.find_key_values_by_prefix(&key_prefix)
.await?
{
let (index, index_val) = entry;
loop {
match update {
Some((key, value)) if key <= &index => {
if let Update::Set(value) = value {
if !f(key, value)? {
return Ok(());
}
}
update = updates.next();
if key == &index {
break;
}
}
_ => {
if !suffix_closed_set.find_key(&index) && !f(&index, &index_val)? {
return Ok(());
}
break;
}
}
}
}
}
while let Some((key, value)) = update {
if let Update::Set(value) = value {
if !f(key, value)? {
return Ok(());
}
}
update = updates.next();
}
Ok(())
}
/// Applies the function f over all index/value pairs.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![0]).await.unwrap();
/// view.insert(vec![0, 2], vec![0]).await.unwrap();
/// let mut part_keys = Vec::new();
/// view.for_each_index_while(|key| {
/// part_keys.push(key.to_vec());
/// Ok(part_keys.len() < 1)
/// })
/// .await
/// .unwrap();
/// assert_eq!(part_keys, vec![vec![0, 1]]);
/// # })
/// ```
pub async fn for_each_index_value<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(&[u8], &[u8]) -> Result<(), ViewError> + Send,
{
self.for_each_index_value_while(|key, value| {
f(key, value)?;
Ok(true)
})
.await
}
/// Returns the list of indices in lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![0]).await.unwrap();
/// view.insert(vec![0, 2], vec![0]).await.unwrap();
/// let indices = view.indices().await.unwrap();
/// assert_eq!(indices, vec![vec![0, 1], vec![0, 2]]);
/// # })
/// ```
pub async fn indices(&self) -> Result<Vec<Vec<u8>>, ViewError> {
let mut indices = Vec::new();
self.for_each_index(|index| {
indices.push(index.to_vec());
Ok(())
})
.await?;
Ok(indices)
}
/// Returns the list of indices and values in lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![0]).await.unwrap();
/// view.insert(vec![0, 2], vec![0]).await.unwrap();
/// let key_values = view.indices().await.unwrap();
/// assert_eq!(key_values, vec![vec![0, 1], vec![0, 2]]);
/// # })
/// ```
pub async fn index_values(&self) -> Result<Vec<(Vec<u8>, Vec<u8>)>, ViewError> {
let mut index_values = Vec::new();
self.for_each_index_value(|index, value| {
index_values.push((index.to_vec(), value.to_vec()));
Ok(())
})
.await?;
Ok(index_values)
}
/// Returns the number of entries.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![0]).await.unwrap();
/// view.insert(vec![0, 2], vec![0]).await.unwrap();
/// let count = view.count().await.unwrap();
/// assert_eq!(count, 2);
/// # })
/// ```
pub async fn count(&self) -> Result<usize, ViewError> {
let mut count = 0;
self.for_each_index(|_index| {
count += 1;
Ok(())
})
.await?;
Ok(count)
}
/// Obtains the value at the given index, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![42]).await.unwrap();
/// assert_eq!(view.get(&[0, 1]).await.unwrap(), Some(vec![42]));
/// assert_eq!(view.get(&[0, 2]).await.unwrap(), None);
/// # })
/// ```
pub async fn get(&self, index: &[u8]) -> Result<Option<Vec<u8>>, ViewError> {
#[cfg(with_metrics)]
let _latency = metrics::KEY_VALUE_STORE_VIEW_GET_LATENCY.measure_latency();
ensure!(index.len() <= self.max_key_size(), ViewError::KeyTooLong);
if let Some(update) = self.updates.get(index) {
let value = match update {
Update::Removed => None,
Update::Set(value) => Some(value.clone()),
};
return Ok(value);
}
if self.deletion_set.contains_prefix_of(index) {
return Ok(None);
}
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index);
Ok(self.context.store().read_value_bytes(&key).await?)
}
/// Tests whether the store contains a specific index.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![42]).await.unwrap();
/// assert!(view.contains_key(&[0, 1]).await.unwrap());
/// assert!(!view.contains_key(&[0, 2]).await.unwrap());
/// # })
/// ```
pub async fn contains_key(&self, index: &[u8]) -> Result<bool, ViewError> {
#[cfg(with_metrics)]
let _latency = metrics::KEY_VALUE_STORE_VIEW_CONTAINS_KEY_LATENCY.measure_latency();
ensure!(index.len() <= self.max_key_size(), ViewError::KeyTooLong);
if let Some(update) = self.updates.get(index) {
let test = match update {
Update::Removed => false,
Update::Set(_value) => true,
};
return Ok(test);
}
if self.deletion_set.contains_prefix_of(index) {
return Ok(false);
}
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index);
Ok(self.context.store().contains_key(&key).await?)
}
/// Tests whether the view contains a range of indices
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![42]).await.unwrap();
/// let keys = vec![vec![0, 1], vec![0, 2]];
/// let results = view.contains_keys(&keys).await.unwrap();
/// assert_eq!(results, vec![true, false]);
/// # })
/// ```
pub async fn contains_keys(&self, indices: &[Vec<u8>]) -> Result<Vec<bool>, ViewError> {
#[cfg(with_metrics)]
let _latency = metrics::KEY_VALUE_STORE_VIEW_CONTAINS_KEYS_LATENCY.measure_latency();
let mut results = Vec::with_capacity(indices.len());
let mut missed_indices = Vec::new();
let mut vector_query = Vec::new();
for (i, index) in indices.iter().enumerate() {
ensure!(index.len() <= self.max_key_size(), ViewError::KeyTooLong);
if let Some(update) = self.updates.get(index) {
let value = match update {
Update::Removed => false,
Update::Set(_) => true,
};
results.push(value);
} else {
results.push(false);
if !self.deletion_set.contains_prefix_of(index) {
missed_indices.push(i);
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index);
vector_query.push(key);
}
}
}
let values = self.context.store().contains_keys(&vector_query).await?;
for (i, value) in missed_indices.into_iter().zip(values) {
results[i] = value;
}
Ok(results)
}
/// Obtains the values of a range of indices
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view = KeyValueStoreView::load(context).await.unwrap();
/// view.insert(vec![0, 1], vec![42]).await.unwrap();
/// assert_eq!(
/// view.multi_get(&[vec![0, 1], vec![0, 2]]).await.unwrap(),
/// vec![Some(vec![42]), None]
/// );
/// # })
/// ```
pub async fn multi_get(&self, indices: &[Vec<u8>]) -> Result<Vec<Option<Vec<u8>>>, ViewError> {
#[cfg(with_metrics)]
let _latency = metrics::KEY_VALUE_STORE_VIEW_MULTI_GET_LATENCY.measure_latency();
let mut result = Vec::with_capacity(indices.len());
let mut missed_indices = Vec::new();
let mut vector_query = Vec::new();
for (i, index) in indices.iter().enumerate() {
ensure!(index.len() <= self.max_key_size(), ViewError::KeyTooLong);
if let Some(update) = self.updates.get(index) {
let value = match update {
Update::Removed => None,
Update::Set(value) => Some(value.clone()),
};
result.push(value);
} else {
result.push(None);
if !self.deletion_set.contains_prefix_of(index) {
missed_indices.push(i);
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index);
vector_query.push(key);
}
}
}
let values = self
.context
.store()
.read_multi_values_bytes(&vector_query)
.await?;
for (i, value) in missed_indices.into_iter().zip(values) {
result[i] = value;
}
Ok(result)
}
/// Applies the given batch of `crate::common::WriteOperation`.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::key_value_store_view::KeyValueStoreView;
/// # use linera_views::batch::Batch;
/// # use linera_views::views::View;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/historical_hash_wrapper.rs | linera-views/src/views/historical_hash_wrapper.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
marker::PhantomData,
ops::{Deref, DerefMut},
sync::Mutex,
};
use allocative::Allocative;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::visit_allocative_simple;
use crate::{
batch::Batch,
common::from_bytes_option,
context::Context,
store::ReadableKeyValueStore as _,
views::{ClonableView, Hasher, HasherOutput, ReplaceContext, View, ViewError, MIN_VIEW_TAG},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static HISTORICALLY_HASHABLE_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"historically_hashable_view_hash_runtime",
"HistoricallyHashableView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
/// Wrapper to compute the hash of the view based on its history of modifications.
#[derive(Debug, Allocative)]
#[allocative(bound = "C, W: Allocative")]
pub struct HistoricallyHashableView<C, W> {
/// The hash in storage.
#[allocative(visit = visit_allocative_simple)]
stored_hash: Option<HasherOutput>,
/// The inner view.
inner: W,
/// Memoized hash, if any.
#[allocative(visit = visit_allocative_simple)]
hash: Mutex<Option<HasherOutput>>,
/// Track context type.
#[allocative(skip)]
_phantom: PhantomData<C>,
}
/// Key tags to create the sub-keys of a `HistoricallyHashableView` on top of the base key.
#[repr(u8)]
enum KeyTag {
/// Prefix for the indices of the view.
Inner = MIN_VIEW_TAG,
/// Prefix for the hash.
Hash,
}
impl<C, W> HistoricallyHashableView<C, W> {
fn make_hash(
stored_hash: Option<HasherOutput>,
batch: &Batch,
) -> Result<HasherOutput, ViewError> {
#[cfg(with_metrics)]
let _hash_latency = metrics::HISTORICALLY_HASHABLE_VIEW_HASH_RUNTIME.measure_latency();
let stored_hash = stored_hash.unwrap_or_default();
if batch.is_empty() {
return Ok(stored_hash);
}
let mut hasher = sha3::Sha3_256::default();
hasher.update_with_bytes(&stored_hash)?;
hasher.update_with_bcs_bytes(&batch)?;
Ok(hasher.finalize())
}
}
impl<C, W, C2> ReplaceContext<C2> for HistoricallyHashableView<C, W>
where
W: View<Context = C> + ReplaceContext<C2>,
C: Context,
C2: Context,
{
type Target = HistoricallyHashableView<C2, <W as ReplaceContext<C2>>::Target>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
HistoricallyHashableView {
_phantom: PhantomData,
stored_hash: self.stored_hash,
hash: Mutex::new(*self.hash.get_mut().unwrap()),
inner: self.inner.with_context(ctx).await,
}
}
}
impl<W> View for HistoricallyHashableView<W::Context, W>
where
W: View,
{
const NUM_INIT_KEYS: usize = 1 + W::NUM_INIT_KEYS;
type Context = W::Context;
fn context(&self) -> Self::Context {
// The inner context has our base key plus the KeyTag::Inner byte
self.inner.context().clone_with_trimmed_key(1)
}
fn pre_load(context: &Self::Context) -> Result<Vec<Vec<u8>>, ViewError> {
let mut v = vec![context.base_key().base_tag(KeyTag::Hash as u8)];
let base_key = context.base_key().base_tag(KeyTag::Inner as u8);
let context = context.clone_with_base_key(base_key);
v.extend(W::pre_load(&context)?);
Ok(v)
}
fn post_load(context: Self::Context, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let hash = from_bytes_option(values.first().ok_or(ViewError::PostLoadValuesError)?)?;
let base_key = context.base_key().base_tag(KeyTag::Inner as u8);
let context = context.clone_with_base_key(base_key);
let inner = W::post_load(
context,
values.get(1..).ok_or(ViewError::PostLoadValuesError)?,
)?;
Ok(Self {
_phantom: PhantomData,
stored_hash: hash,
hash: Mutex::new(hash),
inner,
})
}
async fn load(context: Self::Context) -> Result<Self, ViewError> {
let keys = Self::pre_load(&context)?;
let values = context.store().read_multi_values_bytes(&keys).await?;
Self::post_load(context, &values)
}
fn rollback(&mut self) {
self.inner.rollback();
*self.hash.get_mut().unwrap() = self.stored_hash;
}
async fn has_pending_changes(&self) -> bool {
self.inner.has_pending_changes().await
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut inner_batch = Batch::new();
self.inner.pre_save(&mut inner_batch)?;
let new_hash = {
let mut maybe_hash = self.hash.lock().unwrap();
match maybe_hash.as_mut() {
Some(hash) => *hash,
None => {
let hash = Self::make_hash(self.stored_hash, &inner_batch)?;
*maybe_hash = Some(hash);
hash
}
}
};
batch.operations.extend(inner_batch.operations);
if self.stored_hash != Some(new_hash) {
let mut key = self.inner.context().base_key().bytes.clone();
let tag = key.last_mut().unwrap();
*tag = KeyTag::Hash as u8;
batch.put_key_value(key, &new_hash)?;
}
// Never delete the stored hash, even if the inner view was cleared.
Ok(false)
}
fn post_save(&mut self) {
let new_hash = self
.hash
.get_mut()
.unwrap()
.expect("hash should be computed in pre_save");
self.stored_hash = Some(new_hash);
self.inner.post_save();
}
fn clear(&mut self) {
self.inner.clear();
*self.hash.get_mut().unwrap() = None;
}
}
impl<W> ClonableView for HistoricallyHashableView<W::Context, W>
where
W: ClonableView,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(HistoricallyHashableView {
_phantom: PhantomData,
stored_hash: self.stored_hash,
hash: Mutex::new(*self.hash.get_mut().unwrap()),
inner: self.inner.clone_unchecked()?,
})
}
}
impl<W: View> HistoricallyHashableView<W::Context, W> {
/// Obtains a hash of the history of the changes in the view.
pub async fn historical_hash(&mut self) -> Result<HasherOutput, ViewError> {
if let Some(hash) = self.hash.get_mut().unwrap() {
return Ok(*hash);
}
let mut batch = Batch::new();
self.inner.pre_save(&mut batch)?;
let hash = Self::make_hash(self.stored_hash, &batch)?;
// Remember the hash that we just computed.
*self.hash.get_mut().unwrap() = Some(hash);
Ok(hash)
}
}
impl<C, W> Deref for HistoricallyHashableView<C, W> {
type Target = W;
fn deref(&self) -> &W {
&self.inner
}
}
impl<C, W> DerefMut for HistoricallyHashableView<C, W> {
fn deref_mut(&mut self) -> &mut W {
// Clear the memoized hash.
*self.hash.get_mut().unwrap() = None;
&mut self.inner
}
}
#[cfg(with_graphql)]
mod graphql {
use std::borrow::Cow;
use super::HistoricallyHashableView;
use crate::context::Context;
impl<C, W> async_graphql::OutputType for HistoricallyHashableView<C, W>
where
C: Context,
W: async_graphql::OutputType + Send + Sync,
{
fn type_name() -> Cow<'static, str> {
W::type_name()
}
fn qualified_type_name() -> String {
W::qualified_type_name()
}
fn create_type_info(registry: &mut async_graphql::registry::Registry) -> String {
W::create_type_info(registry)
}
async fn resolve(
&self,
ctx: &async_graphql::ContextSelectionSet<'_>,
field: &async_graphql::Positioned<async_graphql::parser::types::Field>,
) -> async_graphql::ServerResult<async_graphql::Value> {
self.inner.resolve(ctx, field).await
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
context::MemoryContext, register_view::RegisterView, store::WritableKeyValueStore as _,
};
#[tokio::test]
async fn test_historically_hashable_view_initial_state() -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Initially should have no pending changes
assert!(!view.has_pending_changes().await);
// Initial hash should be the hash of an empty batch with default stored_hash
let hash = view.historical_hash().await?;
assert_eq!(hash, HasherOutput::default());
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_hash_changes_with_modifications(
) -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Get initial hash
let hash0 = view.historical_hash().await?;
// Set a value
view.set(42);
assert!(view.has_pending_changes().await);
// Hash should change after modification
let hash1 = view.historical_hash().await?;
// Calling `historical_hash` doesn't flush changes.
assert!(view.has_pending_changes().await);
assert_ne!(hash0, hash1);
// Flush and verify hash is stored
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
context.store().write_batch(batch).await?;
view.post_save();
assert!(!view.has_pending_changes().await);
assert_eq!(hash1, view.historical_hash().await?);
// Make another modification
view.set(84);
let hash2 = view.historical_hash().await?;
assert_ne!(hash1, hash2);
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_reloaded() -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Set initial value and flush
view.set(42);
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
context.store().write_batch(batch).await?;
view.post_save();
let hash_after_flush = view.historical_hash().await?;
// Reload the view
let mut view2 =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Hash should be the same (loaded from storage)
let hash_reloaded = view2.historical_hash().await?;
assert_eq!(hash_after_flush, hash_reloaded);
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_rollback() -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Set and persist a value
view.set(42);
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
context.store().write_batch(batch).await?;
view.post_save();
let hash_before = view.historical_hash().await?;
assert!(!view.has_pending_changes().await);
// Make a modification
view.set(84);
assert!(view.has_pending_changes().await);
let hash_modified = view.historical_hash().await?;
assert_ne!(hash_before, hash_modified);
// Rollback
view.rollback();
assert!(!view.has_pending_changes().await);
// Hash should return to previous value
let hash_after_rollback = view.historical_hash().await?;
assert_eq!(hash_before, hash_after_rollback);
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_clear() -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Set and persist a value
view.set(42);
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
context.store().write_batch(batch).await?;
view.post_save();
assert_ne!(view.historical_hash().await?, HasherOutput::default());
// Clear the view
view.clear();
assert!(view.has_pending_changes().await);
// Flush the clear operation
let mut batch = Batch::new();
let delete_view = view.pre_save(&mut batch)?;
assert!(!delete_view);
context.store().write_batch(batch).await?;
view.post_save();
// Verify the view is not reset to default
assert_ne!(view.historical_hash().await?, HasherOutput::default());
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_clone_unchecked() -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Set a value
view.set(42);
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
context.store().write_batch(batch).await?;
view.post_save();
let original_hash = view.historical_hash().await?;
// Clone the view
let mut cloned_view = view.clone_unchecked()?;
// Verify the clone has the same hash initially
let cloned_hash = cloned_view.historical_hash().await?;
assert_eq!(original_hash, cloned_hash);
// Modify the clone
cloned_view.set(84);
let cloned_hash_after = cloned_view.historical_hash().await?;
assert_ne!(original_hash, cloned_hash_after);
// Original should be unchanged
let original_hash_after = view.historical_hash().await?;
assert_eq!(original_hash, original_hash_after);
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_flush_updates_stored_hash() -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Initial state - no stored hash
assert!(!view.has_pending_changes().await);
// Set a value
view.set(42);
assert!(view.has_pending_changes().await);
let hash_before_flush = view.historical_hash().await?;
// Flush - this should update stored_hash
let mut batch = Batch::new();
let delete_view = view.pre_save(&mut batch)?;
assert!(!delete_view);
context.store().write_batch(batch).await?;
view.post_save();
assert!(!view.has_pending_changes().await);
// Make another change
view.set(84);
let hash_after_second_change = view.historical_hash().await?;
// The new hash should be based on the previous stored hash
assert_ne!(hash_before_flush, hash_after_second_change);
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_deref() -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Test Deref - we can access inner view methods directly
view.set(42);
assert_eq!(*view.get(), 42);
// Test DerefMut
view.set(84);
assert_eq!(*view.get(), 84);
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_sequential_modifications() -> Result<(), ViewError> {
async fn get_hash(values: &[u32]) -> Result<HasherOutput, ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
let mut previous_hash = view.historical_hash().await?;
for &value in values {
view.set(value);
if value % 2 == 0 {
// Immediately save after odd values.
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
context.store().write_batch(batch).await?;
view.post_save();
}
let current_hash = view.historical_hash().await?;
assert_ne!(previous_hash, current_hash);
previous_hash = current_hash;
}
Ok(previous_hash)
}
let h1 = get_hash(&[10, 20, 30, 40, 50]).await?;
let h2 = get_hash(&[20, 30, 40, 50]).await?;
let h3 = get_hash(&[20, 21, 30, 40, 50]).await?;
assert_ne!(h1, h2);
assert_eq!(h2, h3);
Ok(())
}
#[tokio::test]
async fn test_historically_hashable_view_flush_with_no_hash_change() -> Result<(), ViewError> {
let context = MemoryContext::new_for_testing(());
let mut view =
HistoricallyHashableView::<_, RegisterView<_, u32>>::load(context.clone()).await?;
// Set and flush a value
view.set(42);
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
context.store().write_batch(batch).await?;
view.post_save();
let hash_before = view.historical_hash().await?;
// Flush again without changes - no new hash should be stored
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
assert!(batch.is_empty());
context.store().write_batch(batch).await?;
view.post_save();
let hash_after = view.historical_hash().await?;
assert_eq!(hash_before, hash_after);
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/mod.rs | linera-views/src/views/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{fmt::Debug, future::Future, io::Write};
use linera_base::crypto::CryptoHash;
pub use linera_views_derive::{
ClonableView, CryptoHashRootView, CryptoHashView, HashableView, RootView, View,
};
use serde::Serialize;
use crate::{batch::Batch, common::HasherOutput, ViewError};
#[cfg(test)]
#[path = "unit_tests/views.rs"]
mod tests;
/// The `RegisterView` implements a register for a single value.
pub mod register_view;
/// The `LogView` implements a log list that can be pushed.
pub mod log_view;
/// The `BucketQueueView` implements a queue that can push on the back and delete on the front and group data in buckets.
pub mod bucket_queue_view;
/// The `QueueView` implements a queue that can push on the back and delete on the front.
pub mod queue_view;
/// The `MapView` implements a map with ordered keys.
pub mod map_view;
/// The `SetView` implements a set with ordered entries.
pub mod set_view;
/// The `CollectionView` implements a map structure whose keys are ordered and the values are views.
pub mod collection_view;
/// The `ReentrantCollectionView` implements a map structure whose keys are ordered and the values are views with concurrent access.
pub mod reentrant_collection_view;
/// The implementation of a key-value store view.
pub mod key_value_store_view;
/// Wrapping a view to memoize hashing.
pub mod hashable_wrapper;
/// Wrapping a view to compute hash based on the history of modifications to the view.
pub mod historical_hash_wrapper;
/// The minimum value for the view tags. Values in `0..MIN_VIEW_TAG` are used for other purposes.
pub const MIN_VIEW_TAG: u8 = 1;
/// A view gives exclusive access to read and write the data stored at an underlying
/// address in storage.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait View: Sized {
/// The number of keys used for the initialization
const NUM_INIT_KEYS: usize;
/// The type of context stored in this view
type Context: crate::context::Context;
/// Obtains a mutable reference to the internal context.
fn context(&self) -> Self::Context;
/// Creates the keys needed for loading the view
fn pre_load(context: &Self::Context) -> Result<Vec<Vec<u8>>, ViewError>;
/// Loads a view from the values
fn post_load(context: Self::Context, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError>;
/// Loads a view
fn load(context: Self::Context) -> impl Future<Output = Result<Self, ViewError>> {
async {
if Self::NUM_INIT_KEYS == 0 {
Self::post_load(context, &[])
} else {
use crate::{context::Context, store::ReadableKeyValueStore};
let keys = Self::pre_load(&context)?;
let values = context.store().read_multi_values_bytes(&keys).await?;
Self::post_load(context, &values)
}
}
}
/// Discards all pending changes. After that `flush` should have no effect to storage.
fn rollback(&mut self);
/// Returns [`true`] if flushing this view would result in changes to the persistent storage.
async fn has_pending_changes(&self) -> bool;
/// Clears the view. That can be seen as resetting to default. If the clear is followed
/// by a flush then all the relevant data is removed on the storage.
fn clear(&mut self);
/// Computes the batch of operations to persist changes to storage without modifying the view.
/// Crash-resistant storage implementations accumulate the desired changes in the `batch` variable.
/// The returned boolean indicates whether the operation removes the view or not.
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError>;
/// Updates the view state after the batch has been executed in the database.
/// This should be called after `pre_save` and after the batch has been successfully written to storage.
/// This leaves the view in a clean state with no pending changes.
///
/// May panic if `pre_save` was not called right before on `self`.
fn post_save(&mut self);
/// Builds a trivial view that is already deleted
fn new(context: Self::Context) -> Result<Self, ViewError> {
let values = vec![None; Self::NUM_INIT_KEYS];
let mut view = Self::post_load(context, &values)?;
view.clear();
Ok(view)
}
}
/// A view which can have its context replaced.
pub trait ReplaceContext<C: crate::context::Context>: View {
/// The type returned after replacing the context.
type Target: View<Context = C>;
/// Returns a view with a replaced context.
async fn with_context(&mut self, ctx: impl FnOnce(&Self::Context) -> C + Clone)
-> Self::Target;
}
/// A view that supports hashing its values.
#[cfg_attr(not(web), trait_variant::make(Send))]
pub trait HashableView: View {
/// How to compute hashes.
type Hasher: Hasher;
/// Computes the hash of the values.
///
/// Implementations do not need to include a type tag. However, the usual precautions
/// to enforce collision resistance must be applied (e.g. including the length of a
/// collection of values).
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError>;
/// Same as `hash` but guaranteed to be wait-free.
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError>;
}
/// The requirement for the hasher type in [`HashableView`].
pub trait Hasher: Default + Write + Send + Sync + 'static {
/// The output type.
type Output: Debug + Clone + Eq + AsRef<[u8]> + 'static;
/// Finishes the hashing process and returns its output.
fn finalize(self) -> Self::Output;
/// Serializes a value with BCS and includes it in the hash.
fn update_with_bcs_bytes(&mut self, value: &impl Serialize) -> Result<(), ViewError> {
bcs::serialize_into(self, value)?;
Ok(())
}
/// Includes bytes in the hash.
fn update_with_bytes(&mut self, value: &[u8]) -> Result<(), ViewError> {
self.write_all(value)?;
Ok(())
}
}
impl Hasher for sha3::Sha3_256 {
type Output = HasherOutput;
fn finalize(self) -> Self::Output {
<sha3::Sha3_256 as sha3::Digest>::finalize(self)
}
}
/// A [`View`] whose staged modifications can be saved in storage.
#[cfg_attr(not(web), trait_variant::make(Send))]
pub trait RootView: View {
/// Saves the root view to the database context
async fn save(&mut self) -> Result<(), ViewError>;
}
/// A [`View`] that also supports crypto hash
#[cfg_attr(not(web), trait_variant::make(Send))]
pub trait CryptoHashView: HashableView {
/// Computing the hash and attributing the type to it. May require locking.
async fn crypto_hash(&self) -> Result<CryptoHash, ViewError>;
/// Same as `crypto_hash` but guaranteed to be wait-free.
async fn crypto_hash_mut(&mut self) -> Result<CryptoHash, ViewError>;
}
/// A [`RootView`] that also supports crypto hash
#[cfg_attr(not(web), trait_variant::make(Send))]
pub trait CryptoHashRootView: RootView + CryptoHashView {}
/// A view that can be shared (unsafely) by cloning it.
///
/// Note: Calling `flush` on any of the shared views will break the other views. Therefore,
/// cloning views is only safe if `flush` only ever happens after all the copies but one
/// have been dropped.
pub trait ClonableView: View {
/// Creates a clone of this view, sharing the underlying storage context but prone to
/// data races which can corrupt the view state.
fn clone_unchecked(&mut self) -> Result<Self, ViewError>;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/reentrant_collection_view.rs | linera-views/src/views/reentrant_collection_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
borrow::Borrow,
collections::{btree_map, BTreeMap},
io::Write,
marker::PhantomData,
mem,
ops::Deref,
sync::Arc,
};
use allocative::{Allocative, Key, Visitor};
use async_lock::{RwLock, RwLockReadGuardArc, RwLockWriteGuardArc};
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::Batch,
common::{CustomSerialize, HasherOutput, SliceExt as _, Update},
context::{BaseKey, Context},
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
store::ReadableKeyValueStore as _,
views::{ClonableView, HashableView, Hasher, ReplaceContext, View, ViewError, MIN_VIEW_TAG},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static REENTRANT_COLLECTION_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> =
LazyLock::new(|| {
register_histogram_vec(
"reentrant_collection_view_hash_runtime",
"ReentrantCollectionView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
/// A read-only accessor for a particular subview in a [`ReentrantCollectionView`].
#[derive(Debug)]
pub struct ReadGuardedView<T>(RwLockReadGuardArc<T>);
impl<T> std::ops::Deref for ReadGuardedView<T> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref()
}
}
/// A read-write accessor for a particular subview in a [`ReentrantCollectionView`].
#[derive(Debug)]
pub struct WriteGuardedView<T>(RwLockWriteGuardArc<T>);
impl<T> std::ops::Deref for WriteGuardedView<T> {
type Target = T;
fn deref(&self) -> &T {
self.0.deref()
}
}
impl<T> std::ops::DerefMut for WriteGuardedView<T> {
fn deref_mut(&mut self) -> &mut T {
self.0.deref_mut()
}
}
/// A view that supports accessing a collection of views of the same kind, indexed by `Vec<u8>`,
/// possibly several subviews at a time.
#[derive(Debug)]
pub struct ReentrantByteCollectionView<C, W> {
/// The view [`Context`].
context: C,
/// If the current persisted data will be completely erased and replaced on the next flush.
delete_storage_first: bool,
/// Entries that may have staged changes.
updates: BTreeMap<Vec<u8>, Update<Arc<RwLock<W>>>>,
}
impl<C, W: Allocative> Allocative for ReentrantByteCollectionView<C, W> {
fn visit<'a, 'b: 'a>(&self, visitor: &'a mut Visitor<'b>) {
let name = Key::new("ReentrantByteCollectionView");
let size = mem::size_of::<Self>();
let mut visitor = visitor.enter(name, size);
for (k, v) in &self.updates {
let key_name = Key::new("key");
visitor.visit_field(key_name, k);
match v {
Update::Removed => {
let key = Key::new("update_removed");
visitor.visit_field(key, &());
}
Update::Set(v) => {
if let Some(v) = v.try_read() {
let key = Key::new("update_set");
visitor.visit_field(key, v.deref());
}
}
}
}
visitor.exit();
}
}
impl<W, C2> ReplaceContext<C2> for ReentrantByteCollectionView<W::Context, W>
where
W: View + ReplaceContext<C2>,
C2: Context,
{
type Target = ReentrantByteCollectionView<C2, <W as ReplaceContext<C2>>::Target>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
let mut updates: BTreeMap<_, Update<Arc<RwLock<W::Target>>>> = BTreeMap::new();
for (key, update) in &self.updates {
let new_value = match update {
Update::Removed => Update::Removed,
Update::Set(x) => Update::Set(Arc::new(RwLock::new(
x.write().await.with_context(ctx.clone()).await,
))),
};
updates.insert(key.clone(), new_value);
}
ReentrantByteCollectionView {
context: ctx(&self.context),
delete_storage_first: self.delete_storage_first,
updates,
}
}
}
/// We need to find new base keys in order to implement the collection view.
/// We do this by appending a value to the base key.
///
/// Sub-views in a collection share a common key prefix, like in other view types. However,
/// just concatenating the shared prefix with sub-view keys makes it impossible to distinguish if a
/// given key belongs to a child sub-view or a grandchild sub-view (consider for example if a
/// collection is stored inside the collection).
#[repr(u8)]
enum KeyTag {
/// Prefix for specifying an index and serves to indicate the existence of an entry in the collection.
Index = MIN_VIEW_TAG,
/// Prefix for specifying as the prefix for the sub-view.
Subview,
}
impl<W: View> View for ReentrantByteCollectionView<W::Context, W> {
const NUM_INIT_KEYS: usize = 0;
type Context = W::Context;
fn context(&self) -> Self::Context {
self.context.clone()
}
fn pre_load(_context: &Self::Context) -> Result<Vec<Vec<u8>>, ViewError> {
Ok(Vec::new())
}
fn post_load(context: Self::Context, _values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
Ok(Self {
context,
delete_storage_first: false,
updates: BTreeMap::new(),
})
}
fn rollback(&mut self) {
self.delete_storage_first = false;
self.updates.clear();
}
async fn has_pending_changes(&self) -> bool {
if self.delete_storage_first {
return true;
}
!self.updates.is_empty()
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
if self.delete_storage_first {
delete_view = true;
batch.delete_key_prefix(self.context.base_key().bytes.clone());
for (index, update) in &self.updates {
if let Update::Set(view) = update {
let view = view
.try_read()
.ok_or_else(|| ViewError::TryLockError(index.clone()))?;
view.pre_save(batch)?;
self.add_index(batch, index);
delete_view = false;
}
}
} else {
for (index, update) in &self.updates {
match update {
Update::Set(view) => {
let view = view
.try_read()
.ok_or_else(|| ViewError::TryLockError(index.clone()))?;
view.pre_save(batch)?;
self.add_index(batch, index);
}
Update::Removed => {
let key_subview = self.get_subview_key(index);
let key_index = self.get_index_key(index);
batch.delete_key(key_index);
batch.delete_key_prefix(key_subview);
}
}
}
}
Ok(delete_view)
}
fn post_save(&mut self) {
for (_index, update) in mem::take(&mut self.updates) {
if let Update::Set(view) = update {
let mut view = view.try_write().expect("pre_save was called before");
view.post_save();
}
}
self.delete_storage_first = false;
}
fn clear(&mut self) {
self.delete_storage_first = true;
self.updates.clear();
}
}
impl<W: ClonableView> ClonableView for ReentrantByteCollectionView<W::Context, W> {
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
let cloned_updates = self
.updates
.iter()
.map(|(key, value)| {
let cloned_value = match value {
Update::Removed => Update::Removed,
Update::Set(view_lock) => {
let mut view = view_lock
.try_write()
.ok_or_else(|| ViewError::TryLockError(key.clone()))?;
Update::Set(Arc::new(RwLock::new(view.clone_unchecked()?)))
}
};
Ok::<_, ViewError>((key.clone(), cloned_value))
})
.collect::<Result<_, _>>()?;
Ok(ReentrantByteCollectionView {
context: self.context.clone(),
delete_storage_first: self.delete_storage_first,
updates: cloned_updates,
})
}
}
impl<C: Context, W> ReentrantByteCollectionView<C, W> {
fn get_index_key(&self, index: &[u8]) -> Vec<u8> {
self.context
.base_key()
.base_tag_index(KeyTag::Index as u8, index)
}
fn get_subview_key(&self, index: &[u8]) -> Vec<u8> {
self.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, index)
}
fn add_index(&self, batch: &mut Batch, index: &[u8]) {
let key = self.get_index_key(index);
batch.put_key_value_bytes(key, vec![]);
}
}
impl<W: View> ReentrantByteCollectionView<W::Context, W> {
/// Reads the view and if missing returns the default view
async fn wrapped_view(
context: &W::Context,
delete_storage_first: bool,
short_key: &[u8],
) -> Result<Arc<RwLock<W>>, ViewError> {
let key = context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let context = context.clone_with_base_key(key);
// Obtain a view and set its pending state to the default (e.g. empty) state
let view = if delete_storage_first {
W::new(context)?
} else {
W::load(context).await?
};
Ok(Arc::new(RwLock::new(view)))
}
/// Load the view and insert it into the updates if needed.
/// If the entry is missing, then it is set to default.
async fn try_load_view_mut(&mut self, short_key: &[u8]) -> Result<Arc<RwLock<W>>, ViewError> {
use btree_map::Entry::*;
Ok(match self.updates.entry(short_key.to_owned()) {
Occupied(mut entry) => match entry.get_mut() {
Update::Set(view) => view.clone(),
entry @ Update::Removed => {
let wrapped_view = Self::wrapped_view(&self.context, true, short_key).await?;
*entry = Update::Set(wrapped_view.clone());
wrapped_view
}
},
Vacant(entry) => {
let wrapped_view =
Self::wrapped_view(&self.context, self.delete_storage_first, short_key).await?;
entry.insert(Update::Set(wrapped_view.clone()));
wrapped_view
}
})
}
/// Load the view from the update is available.
/// If missing, then the entry is loaded from storage and if
/// missing there an error is reported.
async fn try_load_view(&self, short_key: &[u8]) -> Result<Option<Arc<RwLock<W>>>, ViewError> {
Ok(if let Some(entry) = self.updates.get(short_key) {
match entry {
Update::Set(view) => Some(view.clone()),
_entry @ Update::Removed => None,
}
} else if self.delete_storage_first {
None
} else {
let key_index = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, short_key);
if self.context.store().contains_key(&key_index).await? {
let view = Self::wrapped_view(&self.context, false, short_key).await?;
Some(view)
} else {
None
}
})
}
/// Loads a subview for the data at the given index in the collection. If an entry
/// is absent then a default entry is added to the collection. The resulting view
/// can be modified.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// let subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// let value = subview.get();
/// assert_eq!(*value, String::default());
/// # })
/// ```
pub async fn try_load_entry_mut(
&mut self,
short_key: &[u8],
) -> Result<WriteGuardedView<W>, ViewError> {
Ok(WriteGuardedView(
self.try_load_view_mut(short_key)
.await?
.try_write_arc()
.ok_or_else(|| ViewError::TryLockError(short_key.to_vec()))?,
))
}
/// Loads a subview at the given index in the collection and gives read-only access to the data.
/// If an entry is absent then `None` is returned.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// {
/// let _subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// }
/// let subview = view.try_load_entry(&[0, 1]).await.unwrap().unwrap();
/// let value = subview.get();
/// assert_eq!(*value, String::default());
/// # })
/// ```
pub async fn try_load_entry(
&self,
short_key: &[u8],
) -> Result<Option<ReadGuardedView<W>>, ViewError> {
match self.try_load_view(short_key).await? {
None => Ok(None),
Some(view) => Ok(Some(ReadGuardedView(
view.try_read_arc()
.ok_or_else(|| ViewError::TryLockError(short_key.to_vec()))?,
))),
}
}
/// Returns `true` if the collection contains a value for the specified key.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// let _subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// assert!(view.contains_key(&[0, 1]).await.unwrap());
/// assert!(!view.contains_key(&[0, 2]).await.unwrap());
/// # })
/// ```
pub async fn contains_key(&self, short_key: &[u8]) -> Result<bool, ViewError> {
Ok(if let Some(entry) = self.updates.get(short_key) {
match entry {
Update::Set(_view) => true,
Update::Removed => false,
}
} else if self.delete_storage_first {
false
} else {
let key_index = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, short_key);
self.context.store().contains_key(&key_index).await?
})
}
/// Removes an entry. If absent then nothing happens.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// let mut subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// let value = subview.get_mut();
/// assert_eq!(*value, String::default());
/// view.remove_entry(vec![0, 1]);
/// let keys = view.keys().await.unwrap();
/// assert_eq!(keys.len(), 0);
/// # })
/// ```
pub fn remove_entry(&mut self, short_key: Vec<u8>) {
if self.delete_storage_first {
// Optimization: No need to mark `short_key` for deletion as we are going to remove all the keys at once.
self.updates.remove(&short_key);
} else {
self.updates.insert(short_key, Update::Removed);
}
}
/// Marks the entry so that it is removed in the next flush.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// {
/// let mut subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// let value = subview.get_mut();
/// *value = String::from("Hello");
/// }
/// view.try_reset_entry_to_default(&[0, 1]).unwrap();
/// let mut subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// let value = subview.get_mut();
/// assert_eq!(*value, String::default());
/// # })
/// ```
pub fn try_reset_entry_to_default(&mut self, short_key: &[u8]) -> Result<(), ViewError> {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let context = self.context.clone_with_base_key(key);
let view = W::new(context)?;
let view = Arc::new(RwLock::new(view));
let view = Update::Set(view);
self.updates.insert(short_key.to_vec(), view);
Ok(())
}
/// Gets the extra data.
pub fn extra(&self) -> &<W::Context as Context>::Extra {
self.context.extra()
}
}
impl<W: View> ReentrantByteCollectionView<W::Context, W> {
/// Loads multiple entries for writing at once.
/// The entries in `short_keys` have to be all distinct.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// {
/// let mut subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// *subview.get_mut() = "Bonjour".to_string();
/// }
/// let short_keys = vec![vec![0, 1], vec![2, 3]];
/// let subviews = view.try_load_entries_mut(short_keys).await.unwrap();
/// let value1 = subviews[0].get();
/// let value2 = subviews[1].get();
/// assert_eq!(*value1, "Bonjour".to_string());
/// assert_eq!(*value2, String::default());
/// # })
/// ```
pub async fn try_load_entries_mut(
&mut self,
short_keys: Vec<Vec<u8>>,
) -> Result<Vec<WriteGuardedView<W>>, ViewError> {
let mut short_keys_to_load = Vec::new();
let mut keys = Vec::new();
for short_key in &short_keys {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let context = self.context.clone_with_base_key(key);
match self.updates.entry(short_key.to_vec()) {
btree_map::Entry::Occupied(mut entry) => {
if let Update::Removed = entry.get() {
let view = W::new(context)?;
let view = Arc::new(RwLock::new(view));
entry.insert(Update::Set(view));
}
}
btree_map::Entry::Vacant(entry) => {
if self.delete_storage_first {
let view = W::new(context)?;
let view = Arc::new(RwLock::new(view));
entry.insert(Update::Set(view));
} else {
keys.extend(W::pre_load(&context)?);
short_keys_to_load.push(short_key.to_vec());
}
}
}
}
let values = self.context.store().read_multi_values_bytes(&keys).await?;
for (loaded_values, short_key) in values
.chunks_exact_or_repeat(W::NUM_INIT_KEYS)
.zip(short_keys_to_load)
{
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, &short_key);
let context = self.context.clone_with_base_key(key);
let view = W::post_load(context, loaded_values)?;
let wrapped_view = Arc::new(RwLock::new(view));
self.updates
.insert(short_key.to_vec(), Update::Set(wrapped_view));
}
short_keys
.into_iter()
.map(|short_key| {
let Some(Update::Set(view)) = self.updates.get(&short_key) else {
unreachable!()
};
Ok(WriteGuardedView(
view.clone()
.try_write_arc()
.ok_or_else(|| ViewError::TryLockError(short_key))?,
))
})
.collect()
}
/// Loads multiple entries for writing at once with their keys.
/// The entries in short_keys have to be all distinct.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// {
/// let mut subview = view.try_load_entry_mut(&vec![0, 1]).await.unwrap();
/// *subview.get_mut() = "Bonjour".to_string();
/// }
/// let short_keys = vec![vec![0, 1], vec![2, 3]];
/// let subviews = view.try_load_entries_pairs_mut(short_keys).await.unwrap();
/// let value1 = subviews[0].1.get();
/// let value2 = subviews[1].1.get();
/// assert_eq!(*value1, "Bonjour".to_string());
/// assert_eq!(*value2, String::default());
/// # })
/// ```
pub async fn try_load_entries_pairs_mut(
&mut self,
short_keys: Vec<Vec<u8>>,
) -> Result<Vec<(Vec<u8>, WriteGuardedView<W>)>, ViewError> {
let values = self.try_load_entries_mut(short_keys.clone()).await?;
Ok(short_keys.into_iter().zip(values).collect())
}
/// Loads multiple entries for reading at once.
/// The entries in `short_keys` have to be all distinct.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// {
/// let _subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// }
/// let short_keys = vec![vec![0, 1], vec![2, 3]];
/// let subviews = view.try_load_entries(short_keys).await.unwrap();
/// assert!(subviews[1].is_none());
/// let value0 = subviews[0].as_ref().unwrap().get();
/// assert_eq!(*value0, String::default());
/// # })
/// ```
pub async fn try_load_entries(
&self,
short_keys: Vec<Vec<u8>>,
) -> Result<Vec<Option<ReadGuardedView<W>>>, ViewError> {
let mut results = vec![None; short_keys.len()];
let mut keys_to_check = Vec::new();
let mut keys_to_check_metadata = Vec::new();
for (position, short_key) in short_keys.into_iter().enumerate() {
if let Some(update) = self.updates.get(&short_key) {
if let Update::Set(view) = update {
results[position] = Some((short_key, view.clone()));
}
} else if !self.delete_storage_first {
let key_index = self
.context
.base_key()
.base_tag_index(KeyTag::Index as u8, &short_key);
keys_to_check.push(key_index);
keys_to_check_metadata.push((position, short_key));
}
}
let found_keys = self.context.store().contains_keys(&keys_to_check).await?;
let entries_to_load = keys_to_check_metadata
.into_iter()
.zip(found_keys)
.filter_map(|(metadata, found)| found.then_some(metadata))
.map(|(position, short_key)| {
let subview_key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, &short_key);
let subview_context = self.context.clone_with_base_key(subview_key);
(position, short_key.to_owned(), subview_context)
})
.collect::<Vec<_>>();
if !entries_to_load.is_empty() {
let mut keys_to_load = Vec::with_capacity(entries_to_load.len() * W::NUM_INIT_KEYS);
for (_, _, context) in &entries_to_load {
keys_to_load.extend(W::pre_load(context)?);
}
let values = self
.context
.store()
.read_multi_values_bytes(&keys_to_load)
.await?;
for (loaded_values, (position, short_key, context)) in values
.chunks_exact_or_repeat(W::NUM_INIT_KEYS)
.zip(entries_to_load)
{
let view = W::post_load(context, loaded_values)?;
let wrapped_view = Arc::new(RwLock::new(view));
results[position] = Some((short_key, wrapped_view));
}
}
results
.into_iter()
.map(|maybe_view| match maybe_view {
Some((short_key, view)) => Ok(Some(ReadGuardedView(
view.try_read_arc()
.ok_or_else(|| ViewError::TryLockError(short_key))?,
))),
None => Ok(None),
})
.collect()
}
/// Loads multiple entries for reading at once with their keys.
/// The entries in short_keys have to be all distinct.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// {
/// let _subview = view.try_load_entry_mut(&vec![0, 1]).await.unwrap();
/// }
/// let short_keys = vec![vec![0, 1], vec![0, 2]];
/// let subviews = view.try_load_entries_pairs(short_keys).await.unwrap();
/// assert!(subviews[1].1.is_none());
/// let value0 = subviews[0].1.as_ref().unwrap().get();
/// assert_eq!(*value0, String::default());
/// # })
/// ```
pub async fn try_load_entries_pairs(
&self,
short_keys: Vec<Vec<u8>>,
) -> Result<Vec<(Vec<u8>, Option<ReadGuardedView<W>>)>, ViewError> {
let values = self.try_load_entries(short_keys.clone()).await?;
Ok(short_keys.into_iter().zip(values).collect())
}
/// Loads all the entries for reading at once.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::reentrant_collection_view::ReentrantByteCollectionView;
/// # use linera_views::register_view::RegisterView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut view: ReentrantByteCollectionView<_, RegisterView<_, String>> =
/// ReentrantByteCollectionView::load(context).await.unwrap();
/// {
/// let _subview = view.try_load_entry_mut(&[0, 1]).await.unwrap();
/// }
/// let subviews = view.try_load_all_entries().await.unwrap();
/// assert_eq!(subviews.len(), 1);
/// # })
/// ```
pub async fn try_load_all_entries(
&self,
) -> Result<Vec<(Vec<u8>, ReadGuardedView<W>)>, ViewError> {
let short_keys = self.keys().await?;
let mut loaded_views = vec![None; short_keys.len()];
// Load views that are not in updates and not deleted
if !self.delete_storage_first {
let mut keys = Vec::new();
let mut short_keys_and_indexes = Vec::new();
for (index, short_key) in short_keys.iter().enumerate() {
if !self.updates.contains_key(short_key) {
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, short_key);
let context = self.context.clone_with_base_key(key);
keys.extend(W::pre_load(&context)?);
short_keys_and_indexes.push((short_key.to_vec(), index));
}
}
let values = self.context.store().read_multi_values_bytes(&keys).await?;
for (loaded_values, (short_key, index)) in values
.chunks_exact_or_repeat(W::NUM_INIT_KEYS)
.zip(short_keys_and_indexes)
{
let key = self
.context
.base_key()
.base_tag_index(KeyTag::Subview as u8, &short_key);
let context = self.context.clone_with_base_key(key);
let view = W::post_load(context, loaded_values)?;
let wrapped_view = Arc::new(RwLock::new(view));
loaded_views[index] = Some(wrapped_view);
}
}
// Create result from updates and loaded views
short_keys
.into_iter()
.zip(loaded_views)
.map(|(short_key, loaded_view)| {
let view = if let Some(Update::Set(view)) = self.updates.get(&short_key) {
view.clone()
} else if let Some(view) = loaded_view {
view
} else {
unreachable!("All entries should have been loaded into memory");
};
let guard = ReadGuardedView(
view.try_read_arc()
.ok_or_else(|| ViewError::TryLockError(short_key.clone()))?,
);
Ok((short_key, guard))
})
.collect()
}
/// Loads all the entries for writing at once.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/log_view.rs | linera-views/src/views/log_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::BTreeMap,
ops::{Bound, Range, RangeBounds},
};
use allocative::Allocative;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::Batch,
common::{from_bytes_option_or_default, HasherOutput},
context::Context,
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
store::ReadableKeyValueStore as _,
views::{ClonableView, HashableView, Hasher, View, ViewError, MIN_VIEW_TAG},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static LOG_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"log_view_hash_runtime",
"LogView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
/// Key tags to create the sub-keys of a `LogView` on top of the base key.
#[repr(u8)]
enum KeyTag {
/// Prefix for the storing of the variable `stored_count`.
Count = MIN_VIEW_TAG,
/// Prefix for the indices of the log.
Index,
}
/// A view that supports logging values of type `T`.
#[derive(Debug, Allocative)]
#[allocative(bound = "C, T: Allocative")]
pub struct LogView<C, T> {
/// The view context.
#[allocative(skip)]
context: C,
/// Whether to clear storage before applying updates.
delete_storage_first: bool,
/// The number of entries persisted in storage.
stored_count: usize,
/// New values not yet persisted to storage.
new_values: Vec<T>,
}
impl<C, T> View for LogView<C, T>
where
C: Context,
T: Send + Sync + Serialize,
{
const NUM_INIT_KEYS: usize = 1;
type Context = C;
fn context(&self) -> C {
self.context.clone()
}
fn pre_load(context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
Ok(vec![context.base_key().base_tag(KeyTag::Count as u8)])
}
fn post_load(context: C, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let stored_count =
from_bytes_option_or_default(values.first().ok_or(ViewError::PostLoadValuesError)?)?;
Ok(Self {
context,
delete_storage_first: false,
stored_count,
new_values: Vec::new(),
})
}
fn rollback(&mut self) {
self.delete_storage_first = false;
self.new_values.clear();
}
async fn has_pending_changes(&self) -> bool {
if self.delete_storage_first {
return true;
}
!self.new_values.is_empty()
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
if self.delete_storage_first {
batch.delete_key_prefix(self.context.base_key().bytes.clone());
delete_view = true;
}
if !self.new_values.is_empty() {
delete_view = false;
let mut count = self.stored_count;
for value in &self.new_values {
let key = self
.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &count)?;
batch.put_key_value(key, value)?;
count += 1;
}
let key = self.context.base_key().base_tag(KeyTag::Count as u8);
batch.put_key_value(key, &count)?;
}
Ok(delete_view)
}
fn post_save(&mut self) {
if self.delete_storage_first {
self.stored_count = 0;
}
self.stored_count += self.new_values.len();
self.new_values.clear();
self.delete_storage_first = false;
}
fn clear(&mut self) {
self.delete_storage_first = true;
self.new_values.clear();
}
}
impl<C, T> ClonableView for LogView<C, T>
where
C: Context,
T: Clone + Send + Sync + Serialize,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(LogView {
context: self.context.clone(),
delete_storage_first: self.delete_storage_first,
stored_count: self.stored_count,
new_values: self.new_values.clone(),
})
}
}
impl<C, T> LogView<C, T>
where
C: Context,
{
/// Pushes a value to the end of the log.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::log_view::LogView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut log = LogView::load(context).await.unwrap();
/// log.push(34);
/// # })
/// ```
pub fn push(&mut self, value: T) {
self.new_values.push(value);
}
/// Reads the size of the log.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::log_view::LogView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut log = LogView::load(context).await.unwrap();
/// log.push(34);
/// log.push(42);
/// assert_eq!(log.count(), 2);
/// # })
/// ```
pub fn count(&self) -> usize {
if self.delete_storage_first {
self.new_values.len()
} else {
self.stored_count + self.new_values.len()
}
}
/// Obtains the extra data.
pub fn extra(&self) -> &C::Extra {
self.context.extra()
}
}
impl<C, T> LogView<C, T>
where
C: Context,
T: Clone + DeserializeOwned + Serialize + Send + Sync,
{
/// Reads the logged value with the given index (including staged ones).
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::log_view::LogView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut log = LogView::load(context).await.unwrap();
/// log.push(34);
/// assert_eq!(log.get(0).await.unwrap(), Some(34));
/// # })
/// ```
pub async fn get(&self, index: usize) -> Result<Option<T>, ViewError> {
let value = if self.delete_storage_first {
self.new_values.get(index).cloned()
} else if index < self.stored_count {
let key = self
.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &index)?;
self.context.store().read_value(&key).await?
} else {
self.new_values.get(index - self.stored_count).cloned()
};
Ok(value)
}
/// Reads several logged keys (including staged ones)
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::log_view::LogView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut log = LogView::load(context).await.unwrap();
/// log.push(34);
/// log.push(42);
/// assert_eq!(
/// log.multi_get(vec![0, 1]).await.unwrap(),
/// vec![Some(34), Some(42)]
/// );
/// # })
/// ```
pub async fn multi_get(&self, indices: Vec<usize>) -> Result<Vec<Option<T>>, ViewError> {
let mut result = Vec::new();
if self.delete_storage_first {
for index in indices {
result.push(self.new_values.get(index).cloned());
}
} else {
let mut index_to_positions = BTreeMap::<usize, Vec<usize>>::new();
for (pos, index) in indices.into_iter().enumerate() {
if index < self.stored_count {
index_to_positions.entry(index).or_default().push(pos);
result.push(None);
} else {
result.push(self.new_values.get(index - self.stored_count).cloned());
}
}
let mut keys = Vec::new();
let mut vec_positions = Vec::new();
for (index, positions) in index_to_positions {
let key = self
.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &index)?;
keys.push(key);
vec_positions.push(positions);
}
let values = self.context.store().read_multi_values(&keys).await?;
for (positions, value) in vec_positions.into_iter().zip(values) {
if let Some((&last, rest)) = positions.split_last() {
for &position in rest {
*result.get_mut(position).unwrap() = value.clone();
}
*result.get_mut(last).unwrap() = value;
}
}
}
Ok(result)
}
/// Reads the index-value pairs at the given positions.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::log_view::LogView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut log = LogView::load(context).await.unwrap();
/// log.push(34);
/// log.push(42);
/// assert_eq!(
/// log.multi_get_pairs(vec![0, 1, 5]).await.unwrap(),
/// vec![(0, Some(34)), (1, Some(42)), (5, None)]
/// );
/// # })
/// ```
pub async fn multi_get_pairs(
&self,
indices: Vec<usize>,
) -> Result<Vec<(usize, Option<T>)>, ViewError> {
let values = self.multi_get(indices.clone()).await?;
Ok(indices.into_iter().zip(values).collect())
}
async fn read_context(&self, range: Range<usize>) -> Result<Vec<T>, ViewError> {
let count = range.len();
let mut keys = Vec::with_capacity(count);
for index in range {
let key = self
.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &index)?;
keys.push(key);
}
let mut values = Vec::with_capacity(count);
for entry in self.context.store().read_multi_values(&keys).await? {
match entry {
None => {
return Err(ViewError::MissingEntries("LogView".into()));
}
Some(value) => values.push(value),
}
}
Ok(values)
}
/// Reads the logged values in the given range (including staged ones).
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::log_view::LogView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut log = LogView::load(context).await.unwrap();
/// log.push(34);
/// log.push(42);
/// log.push(56);
/// assert_eq!(log.read(0..2).await.unwrap(), vec![34, 42]);
/// # })
/// ```
pub async fn read<R>(&self, range: R) -> Result<Vec<T>, ViewError>
where
R: RangeBounds<usize>,
{
let effective_stored_count = if self.delete_storage_first {
0
} else {
self.stored_count
};
let end = match range.end_bound() {
Bound::Included(end) => *end + 1,
Bound::Excluded(end) => *end,
Bound::Unbounded => self.count(),
}
.min(self.count());
let start = match range.start_bound() {
Bound::Included(start) => *start,
Bound::Excluded(start) => *start + 1,
Bound::Unbounded => 0,
};
if start >= end {
return Ok(Vec::new());
}
if start < effective_stored_count {
if end <= effective_stored_count {
self.read_context(start..end).await
} else {
let mut values = self.read_context(start..effective_stored_count).await?;
values.extend(
self.new_values[0..(end - effective_stored_count)]
.iter()
.cloned(),
);
Ok(values)
}
} else {
Ok(
self.new_values[(start - effective_stored_count)..(end - effective_stored_count)]
.to_vec(),
)
}
}
}
impl<C, T> HashableView for LogView<C, T>
where
C: Context,
T: Send + Sync + Clone + Serialize + DeserializeOwned,
{
type Hasher = sha3::Sha3_256;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.hash().await
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
#[cfg(with_metrics)]
let _hash_latency = metrics::LOG_VIEW_HASH_RUNTIME.measure_latency();
let elements = self.read(..).await?;
let mut hasher = sha3::Sha3_256::default();
hasher.update_with_bcs_bytes(&elements)?;
Ok(hasher.finalize())
}
}
/// Type wrapping `LogView` while memoizing the hash.
pub type HashedLogView<C, T> = WrappedHashableContainerView<C, LogView<C, T>, HasherOutput>;
/// Wrapper around `LogView` to compute hashes based on the history of changes.
pub type HistoricallyHashedLogView<C, T> = HistoricallyHashableView<C, LogView<C, T>>;
#[cfg(not(web))]
mod graphql {
use std::borrow::Cow;
use super::LogView;
use crate::{
context::Context,
graphql::{hash_name, mangle},
};
impl<C: Send + Sync, T: async_graphql::OutputType> async_graphql::TypeName for LogView<C, T> {
fn type_name() -> Cow<'static, str> {
format!(
"LogView_{}_{:08x}",
mangle(T::type_name()),
hash_name::<T>()
)
.into()
}
}
#[async_graphql::Object(cache_control(no_cache), name_type)]
impl<C: Context, T: async_graphql::OutputType> LogView<C, T>
where
T: serde::ser::Serialize + serde::de::DeserializeOwned + Clone + Send + Sync,
{
#[graphql(derived(name = "count"))]
async fn count_(&self) -> Result<u32, async_graphql::Error> {
Ok(self.count() as u32)
}
async fn entries(
&self,
start: Option<usize>,
end: Option<usize>,
) -> async_graphql::Result<Vec<T>> {
Ok(self
.read(start.unwrap_or_default()..end.unwrap_or_else(|| self.count()))
.await?)
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/queue_view.rs | linera-views/src/views/queue_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{vec_deque::IterMut, VecDeque},
ops::Range,
};
use allocative::Allocative;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::visit_allocative_simple;
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::Batch,
common::{from_bytes_option_or_default, HasherOutput},
context::Context,
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
store::ReadableKeyValueStore as _,
views::{ClonableView, HashableView, Hasher, View, ViewError, MIN_VIEW_TAG},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static QUEUE_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"queue_view_hash_runtime",
"QueueView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
/// Key tags to create the sub-keys of a `QueueView` on top of the base key.
#[repr(u8)]
enum KeyTag {
/// Prefix for the storing of the variable `stored_indices`.
Store = MIN_VIEW_TAG,
/// Prefix for the indices of the log.
Index,
}
/// A view that supports a FIFO queue for values of type `T`.
#[derive(Debug, Allocative)]
#[allocative(bound = "C, T: Allocative")]
pub struct QueueView<C, T> {
/// The view context.
#[allocative(skip)]
context: C,
/// The range of indices for entries persisted in storage.
#[allocative(visit = visit_allocative_simple)]
stored_indices: Range<usize>,
/// The number of entries to delete from the front.
front_delete_count: usize,
/// Whether to clear storage before applying updates.
delete_storage_first: bool,
/// New values added to the back, not yet persisted to storage.
new_back_values: VecDeque<T>,
}
impl<C, T> View for QueueView<C, T>
where
C: Context,
T: Serialize + Send + Sync,
{
const NUM_INIT_KEYS: usize = 1;
type Context = C;
fn context(&self) -> C {
self.context.clone()
}
fn pre_load(context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
Ok(vec![context.base_key().base_tag(KeyTag::Store as u8)])
}
fn post_load(context: C, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let stored_indices =
from_bytes_option_or_default(values.first().ok_or(ViewError::PostLoadValuesError)?)?;
Ok(Self {
context,
stored_indices,
front_delete_count: 0,
delete_storage_first: false,
new_back_values: VecDeque::new(),
})
}
fn rollback(&mut self) {
self.delete_storage_first = false;
self.front_delete_count = 0;
self.new_back_values.clear();
}
async fn has_pending_changes(&self) -> bool {
if self.delete_storage_first {
return true;
}
if self.front_delete_count > 0 {
return true;
}
!self.new_back_values.is_empty()
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
if self.delete_storage_first {
batch.delete_key_prefix(self.context.base_key().bytes.clone());
delete_view = true;
}
let mut new_stored_indices = self.stored_indices.clone();
if self.stored_count() == 0 {
let key_prefix = self.context.base_key().base_tag(KeyTag::Index as u8);
batch.delete_key_prefix(key_prefix);
new_stored_indices = Range::default();
} else if self.front_delete_count > 0 {
let deletion_range = self.stored_indices.clone().take(self.front_delete_count);
new_stored_indices.start += self.front_delete_count;
for index in deletion_range {
let key = self
.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &index)?;
batch.delete_key(key);
}
}
if !self.new_back_values.is_empty() {
delete_view = false;
for value in &self.new_back_values {
let key = self
.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &new_stored_indices.end)?;
batch.put_key_value(key, value)?;
new_stored_indices.end += 1;
}
}
if !self.delete_storage_first || !new_stored_indices.is_empty() {
let key = self.context.base_key().base_tag(KeyTag::Store as u8);
batch.put_key_value(key, &new_stored_indices)?;
}
Ok(delete_view)
}
fn post_save(&mut self) {
if self.stored_count() == 0 {
self.stored_indices = Range::default();
} else if self.front_delete_count > 0 {
self.stored_indices.start += self.front_delete_count;
}
if !self.new_back_values.is_empty() {
self.stored_indices.end += self.new_back_values.len();
self.new_back_values.clear();
}
self.front_delete_count = 0;
self.delete_storage_first = false;
}
fn clear(&mut self) {
self.delete_storage_first = true;
self.new_back_values.clear();
}
}
impl<C, T> ClonableView for QueueView<C, T>
where
C: Context,
T: Clone + Send + Sync + Serialize,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(QueueView {
context: self.context.clone(),
stored_indices: self.stored_indices.clone(),
front_delete_count: self.front_delete_count,
delete_storage_first: self.delete_storage_first,
new_back_values: self.new_back_values.clone(),
})
}
}
impl<C, T> QueueView<C, T> {
fn stored_count(&self) -> usize {
if self.delete_storage_first {
0
} else {
self.stored_indices.len() - self.front_delete_count
}
}
}
impl<'a, C, T> QueueView<C, T>
where
C: Context,
T: Send + Sync + Clone + Serialize + DeserializeOwned,
{
async fn get(&self, index: usize) -> Result<Option<T>, ViewError> {
let key = self
.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &index)?;
Ok(self.context.store().read_value(&key).await?)
}
/// Reads the front value, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(42);
/// assert_eq!(queue.front().await.unwrap(), Some(34));
/// # })
/// ```
pub async fn front(&self) -> Result<Option<T>, ViewError> {
let stored_remainder = self.stored_count();
let value = if stored_remainder > 0 {
self.get(self.stored_indices.end - stored_remainder).await?
} else {
self.new_back_values.front().cloned()
};
Ok(value)
}
/// Reads the back value, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(42);
/// assert_eq!(queue.back().await.unwrap(), Some(42));
/// # })
/// ```
pub async fn back(&self) -> Result<Option<T>, ViewError> {
Ok(match self.new_back_values.back() {
Some(value) => Some(value.clone()),
None if self.stored_count() > 0 => self.get(self.stored_indices.end - 1).await?,
_ => None,
})
}
/// Deletes the front value, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34 as u128);
/// queue.delete_front();
/// assert_eq!(queue.elements().await.unwrap(), Vec::<u128>::new());
/// # })
/// ```
pub fn delete_front(&mut self) {
if self.stored_count() > 0 {
self.front_delete_count += 1;
} else {
self.new_back_values.pop_front();
}
}
/// Pushes a value to the end of the queue.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(37);
/// assert_eq!(queue.elements().await.unwrap(), vec![34, 37]);
/// # })
/// ```
pub fn push_back(&mut self, value: T) {
self.new_back_values.push_back(value);
}
/// Reads the size of the queue.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34);
/// assert_eq!(queue.count(), 1);
/// # })
/// ```
pub fn count(&self) -> usize {
self.stored_count() + self.new_back_values.len()
}
/// Obtains the extra data.
pub fn extra(&self) -> &C::Extra {
self.context.extra()
}
async fn read_context(&self, range: Range<usize>) -> Result<Vec<T>, ViewError> {
let count = range.len();
let mut keys = Vec::with_capacity(count);
for index in range {
let key = self
.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &index)?;
keys.push(key)
}
let mut values = Vec::with_capacity(count);
for entry in self.context.store().read_multi_values(&keys).await? {
match entry {
None => {
return Err(ViewError::MissingEntries("QueueView".into()));
}
Some(value) => values.push(value),
}
}
Ok(values)
}
/// Reads the `count` next values in the queue (including staged ones).
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(42);
/// assert_eq!(queue.read_front(1).await.unwrap(), vec![34]);
/// # })
/// ```
pub async fn read_front(&self, mut count: usize) -> Result<Vec<T>, ViewError> {
if count > self.count() {
count = self.count();
}
if count == 0 {
return Ok(Vec::new());
}
let mut values = Vec::with_capacity(count);
if !self.delete_storage_first {
let stored_remainder = self.stored_count();
let start = self.stored_indices.end - stored_remainder;
if count <= stored_remainder {
values.extend(self.read_context(start..(start + count)).await?);
} else {
values.extend(self.read_context(start..self.stored_indices.end).await?);
values.extend(
self.new_back_values
.range(0..(count - stored_remainder))
.cloned(),
);
}
} else {
values.extend(self.new_back_values.range(0..count).cloned());
}
Ok(values)
}
/// Reads the `count` last values in the queue (including staged ones).
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(42);
/// assert_eq!(queue.read_back(1).await.unwrap(), vec![42]);
/// # })
/// ```
pub async fn read_back(&self, mut count: usize) -> Result<Vec<T>, ViewError> {
if count > self.count() {
count = self.count();
}
if count == 0 {
return Ok(Vec::new());
}
let mut values = Vec::with_capacity(count);
let new_back_len = self.new_back_values.len();
if count <= new_back_len || self.delete_storage_first {
values.extend(
self.new_back_values
.range((new_back_len - count)..new_back_len)
.cloned(),
);
} else {
let start = self.stored_indices.end + new_back_len - count;
values.extend(self.read_context(start..self.stored_indices.end).await?);
values.extend(self.new_back_values.iter().cloned());
}
Ok(values)
}
/// Reads all the elements
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(37);
/// assert_eq!(queue.elements().await.unwrap(), vec![34, 37]);
/// # })
/// ```
pub async fn elements(&self) -> Result<Vec<T>, ViewError> {
let count = self.count();
self.read_front(count).await
}
async fn load_all(&mut self) -> Result<(), ViewError> {
if !self.delete_storage_first {
let stored_remainder = self.stored_count();
let start = self.stored_indices.end - stored_remainder;
let elements = self.read_context(start..self.stored_indices.end).await?;
let shift = self.stored_indices.end - start;
for elt in elements {
self.new_back_values.push_back(elt);
}
self.new_back_values.rotate_right(shift);
// All indices are being deleted at the next flush. This is because they are deleted either:
// * Because a self.front_delete_count forces them to be removed
// * Or because loading them means that their value can be changed which invalidates
// the entries on storage
self.delete_storage_first = true;
}
Ok(())
}
/// Gets a mutable iterator on the entries of the queue
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::queue_view::QueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = QueueView::load(context).await.unwrap();
/// queue.push_back(34);
/// let mut iter = queue.iter_mut().await.unwrap();
/// let value = iter.next().unwrap();
/// *value = 42;
/// assert_eq!(queue.elements().await.unwrap(), vec![42]);
/// # })
/// ```
pub async fn iter_mut(&'a mut self) -> Result<IterMut<'a, T>, ViewError> {
self.load_all().await?;
Ok(self.new_back_values.iter_mut())
}
}
impl<C, T> HashableView for QueueView<C, T>
where
C: Context,
T: Send + Sync + Clone + Serialize + DeserializeOwned,
{
type Hasher = sha3::Sha3_256;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.hash().await
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
#[cfg(with_metrics)]
let _hash_latency = metrics::QUEUE_VIEW_HASH_RUNTIME.measure_latency();
let elements = self.elements().await?;
let mut hasher = sha3::Sha3_256::default();
hasher.update_with_bcs_bytes(&elements)?;
Ok(hasher.finalize())
}
}
/// Type wrapping `QueueView` while memoizing the hash.
pub type HashedQueueView<C, T> = WrappedHashableContainerView<C, QueueView<C, T>, HasherOutput>;
/// Wrapper around `QueueView` to compute hashes based on the history of changes.
pub type HistoricallyHashedQueueView<C, T> = HistoricallyHashableView<C, QueueView<C, T>>;
#[cfg(with_graphql)]
mod graphql {
use std::borrow::Cow;
use super::QueueView;
use crate::{
context::Context,
graphql::{hash_name, mangle},
};
impl<C: Send + Sync, T: async_graphql::OutputType> async_graphql::TypeName for QueueView<C, T> {
fn type_name() -> Cow<'static, str> {
format!(
"QueueView_{}_{:08x}",
mangle(T::type_name()),
hash_name::<T>()
)
.into()
}
}
#[async_graphql::Object(cache_control(no_cache), name_type)]
impl<C: Context, T: async_graphql::OutputType> QueueView<C, T>
where
T: serde::ser::Serialize + serde::de::DeserializeOwned + Clone + Send + Sync,
{
#[graphql(derived(name = "count"))]
async fn count_(&self) -> Result<u32, async_graphql::Error> {
Ok(self.count() as u32)
}
async fn entries(&self, count: Option<usize>) -> async_graphql::Result<Vec<T>> {
Ok(self
.read_front(count.unwrap_or_else(|| self.count()))
.await?)
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/map_view.rs | linera-views/src/views/map_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! The `MapView` implements a map that can be modified.
//!
//! This reproduces more or less the functionalities of the `BTreeMap`.
//! There are 3 different variants:
//! * The [`ByteMapView`][class1] whose keys are the `Vec<u8>` and the values are a serializable type `V`.
//! The ordering of the entries is via the lexicographic order of the keys.
//! * The [`MapView`][class2] whose keys are a serializable type `K` and the value a serializable type `V`.
//! The ordering is via the order of the BCS serialized keys.
//! * The [`CustomMapView`][class3] whose keys are a serializable type `K` and the value a serializable type `V`.
//! The ordering is via the order of the custom serialized keys.
//!
//! [class1]: map_view::ByteMapView
//! [class2]: map_view::MapView
//! [class3]: map_view::CustomMapView
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static MAP_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"map_view_hash_runtime",
"MapView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
use std::{
borrow::{Borrow, Cow},
collections::{btree_map::Entry, BTreeMap},
marker::PhantomData,
};
use allocative::Allocative;
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::Batch,
common::{
from_bytes_option, get_key_range_for_prefix, CustomSerialize, DeletionSet, HasherOutput,
SuffixClosedSetIterator, Update,
},
context::{BaseKey, Context},
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
store::ReadableKeyValueStore as _,
views::{ClonableView, HashableView, Hasher, ReplaceContext, View, ViewError},
};
/// A view that supports inserting and removing values indexed by `Vec<u8>`.
#[derive(Debug, Allocative)]
#[allocative(bound = "C, V: Allocative")]
pub struct ByteMapView<C, V> {
/// The view context.
#[allocative(skip)]
context: C,
/// Tracks deleted key prefixes.
deletion_set: DeletionSet,
/// Pending changes not yet persisted to storage.
updates: BTreeMap<Vec<u8>, Update<V>>,
}
impl<C: Context, C2: Context, V> ReplaceContext<C2> for ByteMapView<C, V>
where
V: Send + Sync + Serialize + Clone,
{
type Target = ByteMapView<C2, V>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
ByteMapView {
context: ctx(&self.context),
deletion_set: self.deletion_set.clone(),
updates: self.updates.clone(),
}
}
}
/// Whether we have a value or its serialization.
enum ValueOrBytes<'a, T> {
/// The value itself.
Value(&'a T),
/// The serialization.
Bytes(Vec<u8>),
}
impl<'a, T> ValueOrBytes<'a, T>
where
T: Clone + DeserializeOwned,
{
/// Convert to a Cow.
fn to_value(&self) -> Result<Cow<'a, T>, ViewError> {
match self {
ValueOrBytes::Value(value) => Ok(Cow::Borrowed(value)),
ValueOrBytes::Bytes(bytes) => Ok(Cow::Owned(bcs::from_bytes(bytes)?)),
}
}
}
impl<T> ValueOrBytes<'_, T>
where
T: Serialize,
{
/// Convert to bytes.
pub fn into_bytes(self) -> Result<Vec<u8>, ViewError> {
match self {
ValueOrBytes::Value(value) => Ok(bcs::to_bytes(value)?),
ValueOrBytes::Bytes(bytes) => Ok(bytes),
}
}
}
impl<C, V> View for ByteMapView<C, V>
where
C: Context,
V: Send + Sync + Serialize,
{
const NUM_INIT_KEYS: usize = 0;
type Context = C;
fn context(&self) -> C {
self.context.clone()
}
fn pre_load(_context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
Ok(Vec::new())
}
fn post_load(context: C, _values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
Ok(Self {
context,
updates: BTreeMap::new(),
deletion_set: DeletionSet::new(),
})
}
fn rollback(&mut self) {
self.updates.clear();
self.deletion_set.rollback();
}
async fn has_pending_changes(&self) -> bool {
self.deletion_set.has_pending_changes() || !self.updates.is_empty()
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
if self.deletion_set.delete_storage_first {
delete_view = true;
batch.delete_key_prefix(self.context.base_key().bytes.clone());
for (index, update) in &self.updates {
if let Update::Set(value) = update {
let key = self.context.base_key().base_index(index);
batch.put_key_value(key, value)?;
delete_view = false;
}
}
} else {
for index in &self.deletion_set.deleted_prefixes {
let key = self.context.base_key().base_index(index);
batch.delete_key_prefix(key);
}
for (index, update) in &self.updates {
let key = self.context.base_key().base_index(index);
match update {
Update::Removed => batch.delete_key(key),
Update::Set(value) => batch.put_key_value(key, value)?,
}
}
}
Ok(delete_view)
}
fn post_save(&mut self) {
self.updates.clear();
self.deletion_set.delete_storage_first = false;
self.deletion_set.deleted_prefixes.clear();
}
fn clear(&mut self) {
self.updates.clear();
self.deletion_set.clear();
}
}
impl<C: Clone, V: Clone> ClonableView for ByteMapView<C, V>
where
Self: View,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(ByteMapView {
context: self.context.clone(),
updates: self.updates.clone(),
deletion_set: self.deletion_set.clone(),
})
}
}
impl<C, V> ByteMapView<C, V>
where
C: Context,
{
/// Inserts or resets the value of a key of the map.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// assert_eq!(map.keys().await.unwrap(), vec![vec![0, 1]]);
/// # })
/// ```
pub fn insert(&mut self, short_key: Vec<u8>, value: V) {
self.updates.insert(short_key, Update::Set(value));
}
/// Removes a value. If absent then nothing is done.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], "Hello");
/// map.remove(vec![0, 1]);
/// # })
/// ```
pub fn remove(&mut self, short_key: Vec<u8>) {
if self.deletion_set.contains_prefix_of(&short_key) {
// Optimization: No need to mark `short_key` for deletion as we are going to remove a range of keys containing it.
self.updates.remove(&short_key);
} else {
self.updates.insert(short_key, Update::Removed);
}
}
/// Removes a value. If absent then nothing is done.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// map.insert(vec![0, 2], String::from("Bonjour"));
/// map.remove_by_prefix(vec![0]);
/// assert!(map.keys().await.unwrap().is_empty());
/// # })
/// ```
pub fn remove_by_prefix(&mut self, key_prefix: Vec<u8>) {
let key_list = self
.updates
.range(get_key_range_for_prefix(key_prefix.clone()))
.map(|x| x.0.to_vec())
.collect::<Vec<_>>();
for key in key_list {
self.updates.remove(&key);
}
self.deletion_set.insert_key_prefix(key_prefix);
}
/// Obtains the extra data.
pub fn extra(&self) -> &C::Extra {
self.context.extra()
}
/// Returns `true` if the map contains a value for the specified key.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// assert!(map.contains_key(&[0, 1]).await.unwrap());
/// assert!(!map.contains_key(&[0, 2]).await.unwrap());
/// # })
/// ```
pub async fn contains_key(&self, short_key: &[u8]) -> Result<bool, ViewError> {
if let Some(update) = self.updates.get(short_key) {
let test = match update {
Update::Removed => false,
Update::Set(_value) => true,
};
return Ok(test);
}
if self.deletion_set.contains_prefix_of(short_key) {
return Ok(false);
}
let key = self.context.base_key().base_index(short_key);
Ok(self.context.store().contains_key(&key).await?)
}
}
impl<C, V> ByteMapView<C, V>
where
C: Context,
V: Clone + DeserializeOwned + 'static,
{
/// Reads the value at the given position, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// assert_eq!(map.get(&[0, 1]).await.unwrap(), Some(String::from("Hello")));
/// # })
/// ```
pub async fn get(&self, short_key: &[u8]) -> Result<Option<V>, ViewError> {
if let Some(update) = self.updates.get(short_key) {
let value = match update {
Update::Removed => None,
Update::Set(value) => Some(value.clone()),
};
return Ok(value);
}
if self.deletion_set.contains_prefix_of(short_key) {
return Ok(None);
}
let key = self.context.base_key().base_index(short_key);
Ok(self.context.store().read_value(&key).await?)
}
/// Reads the values at the given positions, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// let values = map.multi_get(vec![vec![0, 1], vec![0, 2]]).await.unwrap();
/// assert_eq!(values, vec![Some(String::from("Hello")), None]);
/// # })
/// ```
pub async fn multi_get(&self, short_keys: Vec<Vec<u8>>) -> Result<Vec<Option<V>>, ViewError> {
let size = short_keys.len();
let mut results = vec![None; size];
let mut missed_indices = Vec::new();
let mut vector_query = Vec::new();
for (i, short_key) in short_keys.into_iter().enumerate() {
if let Some(update) = self.updates.get(&short_key) {
if let Update::Set(value) = update {
results[i] = Some(value.clone());
}
} else if !self.deletion_set.contains_prefix_of(&short_key) {
missed_indices.push(i);
let key = self.context.base_key().base_index(&short_key);
vector_query.push(key);
}
}
let values = self
.context
.store()
.read_multi_values_bytes(&vector_query)
.await?;
for (i, value) in missed_indices.into_iter().zip(values) {
results[i] = from_bytes_option(&value)?;
}
Ok(results)
}
/// Reads the key-value pairs at the given positions, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// let pairs = map
/// .multi_get_pairs(vec![vec![0, 1], vec![0, 2]])
/// .await
/// .unwrap();
/// assert_eq!(
/// pairs,
/// vec![
/// (vec![0, 1], Some(String::from("Hello"))),
/// (vec![0, 2], None)
/// ]
/// );
/// # })
/// ```
pub async fn multi_get_pairs(
&self,
short_keys: Vec<Vec<u8>>,
) -> Result<Vec<(Vec<u8>, Option<V>)>, ViewError> {
let values = self.multi_get(short_keys.clone()).await?;
Ok(short_keys.into_iter().zip(values).collect())
}
/// Obtains a mutable reference to a value at a given position if available.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// let value = map.get_mut(&[0, 1]).await.unwrap().unwrap();
/// assert_eq!(*value, String::from("Hello"));
/// *value = String::from("Hola");
/// assert_eq!(map.get(&[0, 1]).await.unwrap(), Some(String::from("Hola")));
/// # })
/// ```
pub async fn get_mut(&mut self, short_key: &[u8]) -> Result<Option<&mut V>, ViewError> {
let update = match self.updates.entry(short_key.to_vec()) {
Entry::Vacant(e) => {
if self.deletion_set.contains_prefix_of(short_key) {
None
} else {
let key = self.context.base_key().base_index(short_key);
let value = self.context.store().read_value(&key).await?;
value.map(|value| e.insert(Update::Set(value)))
}
}
Entry::Occupied(e) => Some(e.into_mut()),
};
Ok(match update {
Some(Update::Set(value)) => Some(value),
_ => None,
})
}
}
impl<C, V> ByteMapView<C, V>
where
C: Context,
V: Clone + Serialize + DeserializeOwned + 'static,
{
/// Applies the function f on each index (aka key) which has the assigned prefix.
/// Keys are visited in the lexicographic order. The shortened key is send to the
/// function and if it returns false, then the loop exits
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// map.insert(vec![1, 2], String::from("Bonjour"));
/// map.insert(vec![1, 3], String::from("Bonjour"));
/// let prefix = vec![1];
/// let mut count = 0;
/// map.for_each_key_while(
/// |_key| {
/// count += 1;
/// Ok(count < 3)
/// },
/// prefix,
/// )
/// .await
/// .unwrap();
/// assert_eq!(count, 2);
/// # })
/// ```
pub async fn for_each_key_while<F>(&self, mut f: F, prefix: Vec<u8>) -> Result<(), ViewError>
where
F: FnMut(&[u8]) -> Result<bool, ViewError> + Send,
{
let prefix_len = prefix.len();
let mut updates = self.updates.range(get_key_range_for_prefix(prefix.clone()));
let mut update = updates.next();
if !self.deletion_set.contains_prefix_of(&prefix) {
let iter = self
.deletion_set
.deleted_prefixes
.range(get_key_range_for_prefix(prefix.clone()));
let mut suffix_closed_set = SuffixClosedSetIterator::new(prefix_len, iter);
let base = self.context.base_key().base_index(&prefix);
for index in self.context.store().find_keys_by_prefix(&base).await? {
loop {
match update {
Some((key, value)) if &key[prefix_len..] <= index.as_slice() => {
if let Update::Set(_) = value {
if !f(&key[prefix_len..])? {
return Ok(());
}
}
update = updates.next();
if key[prefix_len..] == index {
break;
}
}
_ => {
if !suffix_closed_set.find_key(&index) && !f(&index)? {
return Ok(());
}
break;
}
}
}
}
}
while let Some((key, value)) = update {
if let Update::Set(_) = value {
if !f(&key[prefix_len..])? {
return Ok(());
}
}
update = updates.next();
}
Ok(())
}
/// Applies the function f on each index (aka key) having the specified prefix.
/// The shortened keys are sent to the function f. Keys are visited in the
/// lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// let mut count = 0;
/// let prefix = Vec::new();
/// map.for_each_key(
/// |_key| {
/// count += 1;
/// Ok(())
/// },
/// prefix,
/// )
/// .await
/// .unwrap();
/// assert_eq!(count, 1);
/// # })
/// ```
pub async fn for_each_key<F>(&self, mut f: F, prefix: Vec<u8>) -> Result<(), ViewError>
where
F: FnMut(&[u8]) -> Result<(), ViewError> + Send,
{
self.for_each_key_while(
|key| {
f(key)?;
Ok(true)
},
prefix,
)
.await
}
/// Returns the list of keys of the map in lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// map.insert(vec![1, 2], String::from("Bonjour"));
/// map.insert(vec![2, 2], String::from("Hallo"));
/// assert_eq!(
/// map.keys().await.unwrap(),
/// vec![vec![0, 1], vec![1, 2], vec![2, 2]]
/// );
/// # })
/// ```
pub async fn keys(&self) -> Result<Vec<Vec<u8>>, ViewError> {
let mut keys = Vec::new();
let prefix = Vec::new();
self.for_each_key(
|key| {
keys.push(key.to_vec());
Ok(())
},
prefix,
)
.await?;
Ok(keys)
}
/// Returns the list of keys of the map having a specified prefix
/// in lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// map.insert(vec![1, 2], String::from("Bonjour"));
/// map.insert(vec![1, 3], String::from("Hallo"));
/// assert_eq!(
/// map.keys_by_prefix(vec![1]).await.unwrap(),
/// vec![vec![1, 2], vec![1, 3]]
/// );
/// # })
/// ```
pub async fn keys_by_prefix(&self, prefix: Vec<u8>) -> Result<Vec<Vec<u8>>, ViewError> {
let mut keys = Vec::new();
let prefix_clone = prefix.clone();
self.for_each_key(
|key| {
let mut big_key = prefix.clone();
big_key.extend(key);
keys.push(big_key);
Ok(())
},
prefix_clone,
)
.await?;
Ok(keys)
}
/// Returns the number of keys of the map
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// map.insert(vec![1, 2], String::from("Bonjour"));
/// map.insert(vec![2, 2], String::from("Hallo"));
/// assert_eq!(map.count().await.unwrap(), 3);
/// # })
/// ```
pub async fn count(&self) -> Result<usize, ViewError> {
let mut count = 0;
let prefix = Vec::new();
self.for_each_key(
|_key| {
count += 1;
Ok(())
},
prefix,
)
.await?;
Ok(count)
}
/// Applies a function f on each key/value pair matching a prefix. The key is the
/// shortened one by the prefix. The value is an enum that can be either a value
/// or its serialization. This is needed in order to avoid a scenario where we
/// deserialize something that was serialized. The key/value are send to the
/// function f. If it returns false the loop ends prematurely. Keys and values
/// are visited in the lexicographic order.
async fn for_each_key_value_or_bytes_while<'a, F>(
&'a self,
mut f: F,
prefix: Vec<u8>,
) -> Result<(), ViewError>
where
F: FnMut(&[u8], ValueOrBytes<'a, V>) -> Result<bool, ViewError> + Send,
{
let prefix_len = prefix.len();
let mut updates = self.updates.range(get_key_range_for_prefix(prefix.clone()));
let mut update = updates.next();
if !self.deletion_set.contains_prefix_of(&prefix) {
let iter = self
.deletion_set
.deleted_prefixes
.range(get_key_range_for_prefix(prefix.clone()));
let mut suffix_closed_set = SuffixClosedSetIterator::new(prefix_len, iter);
let base = self.context.base_key().base_index(&prefix);
for (index, bytes) in self
.context
.store()
.find_key_values_by_prefix(&base)
.await?
{
loop {
match update {
Some((key, value)) if key[prefix_len..] <= *index => {
if let Update::Set(value) = value {
let value = ValueOrBytes::Value(value);
if !f(&key[prefix_len..], value)? {
return Ok(());
}
}
update = updates.next();
if key[prefix_len..] == index {
break;
}
}
_ => {
if !suffix_closed_set.find_key(&index) {
let value = ValueOrBytes::Bytes(bytes);
if !f(&index, value)? {
return Ok(());
}
}
break;
}
}
}
}
}
while let Some((key, value)) = update {
if let Update::Set(value) = value {
let value = ValueOrBytes::Value(value);
if !f(&key[prefix_len..], value)? {
return Ok(());
}
}
update = updates.next();
}
Ok(())
}
/// Applies a function f on each index/value pair matching a prefix. Keys
/// and values are visited in the lexicographic order. The shortened index
/// is send to the function f and if it returns false then the loop ends
/// prematurely
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// map.insert(vec![1, 2], String::from("Bonjour"));
/// map.insert(vec![1, 3], String::from("Hallo"));
/// let mut part_keys = Vec::new();
/// let prefix = vec![1];
/// map.for_each_key_value_while(
/// |key, _value| {
/// part_keys.push(key.to_vec());
/// Ok(part_keys.len() < 2)
/// },
/// prefix,
/// )
/// .await
/// .unwrap();
/// assert_eq!(part_keys.len(), 2);
/// # })
/// ```
pub async fn for_each_key_value_while<'a, F>(
&'a self,
mut f: F,
prefix: Vec<u8>,
) -> Result<(), ViewError>
where
F: FnMut(&[u8], Cow<'a, V>) -> Result<bool, ViewError> + Send,
{
self.for_each_key_value_or_bytes_while(
|key, value| {
let value = value.to_value()?;
f(key, value)
},
prefix,
)
.await
}
/// Applies a function f on each key/value pair matching a prefix. The key is the
/// shortened one by the prefix. The value is an enum that can be either a value
/// or its serialization. This is needed in order to avoid a scenario where we
/// deserialize something that was serialized. The key/value are send to the
/// function f. Keys and values are visited in the lexicographic order.
async fn for_each_key_value_or_bytes<'a, F>(
&'a self,
mut f: F,
prefix: Vec<u8>,
) -> Result<(), ViewError>
where
F: FnMut(&[u8], ValueOrBytes<'a, V>) -> Result<(), ViewError> + Send,
{
self.for_each_key_value_or_bytes_while(
|key, value| {
f(key, value)?;
Ok(true)
},
prefix,
)
.await
}
/// Applies a function f on each key/value pair matching a prefix. The shortened
/// key and value are send to the function f. Keys and values are visited in the
/// lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// let mut count = 0;
/// let prefix = Vec::new();
/// map.for_each_key_value(
/// |_key, _value| {
/// count += 1;
/// Ok(())
/// },
/// prefix,
/// )
/// .await
/// .unwrap();
/// assert_eq!(count, 1);
/// # })
/// ```
pub async fn for_each_key_value<'a, F>(
&'a self,
mut f: F,
prefix: Vec<u8>,
) -> Result<(), ViewError>
where
F: FnMut(&[u8], Cow<'a, V>) -> Result<(), ViewError> + Send,
{
self.for_each_key_value_while(
|key, value| {
f(key, value)?;
Ok(true)
},
prefix,
)
.await
}
}
impl<C, V> ByteMapView<C, V>
where
C: Context,
V: Clone + Send + Serialize + DeserializeOwned + 'static,
{
/// Returns the list of keys and values of the map matching a prefix
/// in lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![1, 2], String::from("Hello"));
/// let prefix = vec![1];
/// assert_eq!(
/// map.key_values_by_prefix(prefix).await.unwrap(),
/// vec![(vec![1, 2], String::from("Hello"))]
/// );
/// # })
/// ```
pub async fn key_values_by_prefix(
&self,
prefix: Vec<u8>,
) -> Result<Vec<(Vec<u8>, V)>, ViewError> {
let mut key_values = Vec::new();
let prefix_copy = prefix.clone();
self.for_each_key_value(
|key, value| {
let mut big_key = prefix.clone();
big_key.extend(key);
let value = value.into_owned();
key_values.push((big_key, value));
Ok(())
},
prefix_copy,
)
.await?;
Ok(key_values)
}
/// Returns the list of keys and values of the map in lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![1, 2], String::from("Hello"));
/// assert_eq!(
/// map.key_values().await.unwrap(),
/// vec![(vec![1, 2], String::from("Hello"))]
/// );
/// # })
/// ```
pub async fn key_values(&self) -> Result<Vec<(Vec<u8>, V)>, ViewError> {
self.key_values_by_prefix(Vec::new()).await
}
}
impl<C, V> ByteMapView<C, V>
where
C: Context,
V: Default + DeserializeOwned + 'static,
{
/// Obtains a mutable reference to a value at a given position.
/// Default value if the index is missing.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::map_view::ByteMapView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut map = ByteMapView::load(context).await.unwrap();
/// map.insert(vec![0, 1], String::from("Hello"));
/// assert_eq!(map.get_mut_or_default(&[7]).await.unwrap(), "");
/// let value = map.get_mut_or_default(&[0, 1]).await.unwrap();
/// assert_eq!(*value, String::from("Hello"));
/// *value = String::from("Hola");
/// assert_eq!(map.get(&[0, 1]).await.unwrap(), Some(String::from("Hola")));
/// # })
/// ```
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/hashable_wrapper.rs | linera-views/src/views/hashable_wrapper.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
marker::PhantomData,
ops::{Deref, DerefMut},
sync::Mutex,
};
use allocative::Allocative;
use linera_base::visit_allocative_simple;
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::Batch,
common::from_bytes_option,
context::Context,
views::{ClonableView, HashableView, Hasher, ReplaceContext, View, ViewError, MIN_VIEW_TAG},
};
/// Wrapping a view to memoize its hash.
#[derive(Debug, Allocative)]
#[allocative(bound = "C, O, W: Allocative")]
pub struct WrappedHashableContainerView<C, W, O> {
/// Phantom data for the context type.
#[allocative(skip)]
_phantom: PhantomData<C>,
/// The hash persisted in storage.
#[allocative(visit = visit_allocative_simple)]
stored_hash: Option<O>,
/// Memoized hash, if any.
#[allocative(visit = visit_allocative_simple)]
hash: Mutex<Option<O>>,
/// The wrapped view.
inner: W,
}
/// Key tags to create the sub-keys of a `WrappedHashableContainerView` on top of the base key.
#[repr(u8)]
enum KeyTag {
/// Prefix for the indices of the view.
Inner = MIN_VIEW_TAG,
/// Prefix for the hash.
Hash,
}
impl<C, W, O, C2> ReplaceContext<C2> for WrappedHashableContainerView<C, W, O>
where
W: HashableView<Hasher: Hasher<Output = O>, Context = C> + ReplaceContext<C2>,
<W as ReplaceContext<C2>>::Target: HashableView<Hasher: Hasher<Output = O>>,
O: Serialize + DeserializeOwned + Send + Sync + Copy + PartialEq,
C: Context,
C2: Context,
{
type Target = WrappedHashableContainerView<C2, <W as ReplaceContext<C2>>::Target, O>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
let hash = *self.hash.lock().unwrap();
WrappedHashableContainerView {
_phantom: PhantomData,
stored_hash: self.stored_hash,
hash: Mutex::new(hash),
inner: self.inner.with_context(ctx).await,
}
}
}
impl<W: HashableView, O> View for WrappedHashableContainerView<W::Context, W, O>
where
W: HashableView<Hasher: Hasher<Output = O>>,
O: Serialize + DeserializeOwned + Send + Sync + Copy + PartialEq,
{
const NUM_INIT_KEYS: usize = 1 + W::NUM_INIT_KEYS;
type Context = W::Context;
fn context(&self) -> Self::Context {
// The inner context has our base key + the KeyTag::Inner byte
self.inner.context().clone_with_trimmed_key(1)
}
fn pre_load(context: &Self::Context) -> Result<Vec<Vec<u8>>, ViewError> {
let mut v = vec![context.base_key().base_tag(KeyTag::Hash as u8)];
let base_key = context.base_key().base_tag(KeyTag::Inner as u8);
let context = context.clone_with_base_key(base_key);
v.extend(W::pre_load(&context)?);
Ok(v)
}
fn post_load(context: Self::Context, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let hash = from_bytes_option(values.first().ok_or(ViewError::PostLoadValuesError)?)?;
let base_key = context.base_key().base_tag(KeyTag::Inner as u8);
let context = context.clone_with_base_key(base_key);
let inner = W::post_load(
context,
values.get(1..).ok_or(ViewError::PostLoadValuesError)?,
)?;
Ok(Self {
_phantom: PhantomData,
stored_hash: hash,
hash: Mutex::new(hash),
inner,
})
}
fn rollback(&mut self) {
self.inner.rollback();
*self.hash.get_mut().unwrap() = self.stored_hash;
}
async fn has_pending_changes(&self) -> bool {
if self.inner.has_pending_changes().await {
return true;
}
let hash = self.hash.lock().unwrap();
self.stored_hash != *hash
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let delete_view = self.inner.pre_save(batch)?;
let hash = *self.hash.lock().unwrap();
if delete_view {
let mut key_prefix = self.inner.context().base_key().bytes.clone();
key_prefix.pop();
batch.delete_key_prefix(key_prefix);
} else if self.stored_hash != hash {
let mut key = self.inner.context().base_key().bytes.clone();
let tag = key.last_mut().unwrap();
*tag = KeyTag::Hash as u8;
match hash {
None => batch.delete_key(key),
Some(hash) => batch.put_key_value(key, &hash)?,
}
}
Ok(delete_view)
}
fn post_save(&mut self) {
self.inner.post_save();
let hash = *self.hash.get_mut().unwrap();
self.stored_hash = hash;
}
fn clear(&mut self) {
self.inner.clear();
*self.hash.get_mut().unwrap() = None;
}
}
impl<W, O> ClonableView for WrappedHashableContainerView<W::Context, W, O>
where
W: HashableView + ClonableView,
O: Serialize + DeserializeOwned + Send + Sync + Copy + PartialEq,
W::Hasher: Hasher<Output = O>,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(WrappedHashableContainerView {
_phantom: PhantomData,
stored_hash: self.stored_hash,
hash: Mutex::new(*self.hash.get_mut().unwrap()),
inner: self.inner.clone_unchecked()?,
})
}
}
impl<W, O> HashableView for WrappedHashableContainerView<W::Context, W, O>
where
W: HashableView,
O: Serialize + DeserializeOwned + Send + Sync + Copy + PartialEq,
W::Hasher: Hasher<Output = O>,
{
type Hasher = W::Hasher;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
let hash = *self.hash.get_mut().unwrap();
match hash {
Some(hash) => Ok(hash),
None => {
let new_hash = self.inner.hash_mut().await?;
let hash = self.hash.get_mut().unwrap();
*hash = Some(new_hash);
Ok(new_hash)
}
}
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
let hash = *self.hash.lock().unwrap();
match hash {
Some(hash) => Ok(hash),
None => {
let new_hash = self.inner.hash().await?;
let mut hash = self.hash.lock().unwrap();
*hash = Some(new_hash);
Ok(new_hash)
}
}
}
}
impl<C, W, O> Deref for WrappedHashableContainerView<C, W, O> {
type Target = W;
fn deref(&self) -> &W {
&self.inner
}
}
impl<C, W, O> DerefMut for WrappedHashableContainerView<C, W, O> {
fn deref_mut(&mut self) -> &mut W {
*self.hash.get_mut().unwrap() = None;
&mut self.inner
}
}
#[cfg(with_graphql)]
mod graphql {
use std::borrow::Cow;
use super::WrappedHashableContainerView;
use crate::context::Context;
impl<C, W, O> async_graphql::OutputType for WrappedHashableContainerView<C, W, O>
where
C: Context,
W: async_graphql::OutputType + Send + Sync,
O: Send + Sync,
{
fn type_name() -> Cow<'static, str> {
W::type_name()
}
fn qualified_type_name() -> String {
W::qualified_type_name()
}
fn create_type_info(registry: &mut async_graphql::registry::Registry) -> String {
W::create_type_info(registry)
}
async fn resolve(
&self,
ctx: &async_graphql::ContextSelectionSet<'_>,
field: &async_graphql::Positioned<async_graphql::parser::types::Field>,
) -> async_graphql::ServerResult<async_graphql::Value> {
(**self).resolve(ctx, field).await
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/bucket_queue_view.rs | linera-views/src/views/bucket_queue_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::{vec_deque::IterMut, VecDeque};
use allocative::Allocative;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::{
batch::Batch,
common::{from_bytes_option, from_bytes_option_or_default, HasherOutput},
context::Context,
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
store::ReadableKeyValueStore as _,
views::{ClonableView, HashableView, Hasher, View, ViewError, MIN_VIEW_TAG},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static BUCKET_QUEUE_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"bucket_queue_view_hash_runtime",
"BucketQueueView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
/// Key tags to create the sub-keys of a [`BucketQueueView`] on top of the base key.
#[repr(u8)]
enum KeyTag {
/// Key tag for the front bucket (index 0).
Front = MIN_VIEW_TAG,
/// Key tag for the `BucketStore`.
Store,
/// Key tag for the content of non-front buckets (index > 0).
Index,
}
/// The metadata of the view in storage.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
struct BucketStore {
/// The descriptions of all stored buckets. The first description is expected to start
/// with index 0 (front bucket) and will be ignored.
descriptions: Vec<BucketDescription>,
/// The position of the front value in the front bucket.
front_position: usize,
}
/// The description of a bucket in storage.
#[derive(Copy, Clone, Debug, Default, Serialize, Deserialize)]
struct BucketDescription {
/// The length of the bucket (at most N).
length: usize,
/// The index of the bucket in storage.
index: usize,
}
impl BucketStore {
fn len(&self) -> usize {
self.descriptions.len()
}
}
/// The position of a value in the stored buckket.
#[derive(Copy, Clone, Debug, Allocative)]
struct Cursor {
/// The offset of the bucket in the vector of stored buckets.
offset: usize,
/// The position of the value in the stored bucket.
position: usize,
}
/// The state of a stored bucket in memory.
#[derive(Clone, Debug, Allocative)]
enum State<T> {
Loaded { data: Vec<T> },
NotLoaded { length: usize },
}
impl<T> Bucket<T> {
fn len(&self) -> usize {
match &self.state {
State::Loaded { data } => data.len(),
State::NotLoaded { length } => *length,
}
}
fn is_loaded(&self) -> bool {
match self.state {
State::Loaded { .. } => true,
State::NotLoaded { .. } => false,
}
}
fn to_description(&self) -> BucketDescription {
BucketDescription {
length: self.len(),
index: self.index,
}
}
}
/// A stored bucket.
#[derive(Clone, Debug, Allocative)]
struct Bucket<T> {
/// The index in storage.
index: usize,
/// The state of the bucket.
state: State<T>,
}
/// A view that supports a FIFO queue for values of type `T`.
/// The size `N` has to be chosen by taking into account the size of the type `T`
/// and the basic size of a block. For example a total size of 100 bytes to 10 KB
/// seems adequate.
//#[allocative(bound = "T: Allocative")]
#[derive(Debug, Allocative)]
#[allocative(bound = "C, T: Allocative, const N: usize")]
pub struct BucketQueueView<C, T, const N: usize> {
/// The view context.
#[allocative(skip)]
context: C,
/// The stored buckets. Some buckets may not be loaded. The first one is always loaded.
stored_buckets: VecDeque<Bucket<T>>,
/// The newly inserted back values.
new_back_values: VecDeque<T>,
/// The position for the stored front value in the first stored bucket.
stored_front_position: usize,
/// The current position of the front value if it is in the stored buckets, and `None`
/// otherwise.
cursor: Option<Cursor>,
/// Whether the storage is to be deleted or not.
delete_storage_first: bool,
}
impl<C, T, const N: usize> View for BucketQueueView<C, T, N>
where
C: Context,
T: Send + Sync + Clone + Serialize + DeserializeOwned,
{
const NUM_INIT_KEYS: usize = 2;
type Context = C;
fn context(&self) -> C {
self.context.clone()
}
fn pre_load(context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
let key1 = context.base_key().base_tag(KeyTag::Front as u8);
let key2 = context.base_key().base_tag(KeyTag::Store as u8);
Ok(vec![key1, key2])
}
fn post_load(context: C, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let value1 = values.first().ok_or(ViewError::PostLoadValuesError)?;
let value2 = values.get(1).ok_or(ViewError::PostLoadValuesError)?;
let front = from_bytes_option::<Vec<T>>(value1)?;
let mut stored_buckets = VecDeque::from(match front {
Some(data) => {
let bucket = Bucket {
index: 0,
state: State::Loaded { data },
};
vec![bucket]
}
None => {
vec![]
}
});
let bucket_store = from_bytes_option_or_default::<BucketStore>(value2)?;
// Ignoring `bucket_store.descriptions[0]`.
// TODO(#4969): Remove redundant BucketDescription in BucketQueueView.
for i in 1..bucket_store.len() {
let length = bucket_store.descriptions[i].length;
let index = bucket_store.descriptions[i].index;
stored_buckets.push_back(Bucket {
index,
state: State::NotLoaded { length },
});
}
let cursor = if bucket_store.descriptions.is_empty() {
None
} else {
Some(Cursor {
offset: 0,
position: bucket_store.front_position,
})
};
Ok(Self {
context,
stored_buckets,
stored_front_position: bucket_store.front_position,
new_back_values: VecDeque::new(),
cursor,
delete_storage_first: false,
})
}
fn rollback(&mut self) {
self.delete_storage_first = false;
self.cursor = if self.stored_buckets.is_empty() {
None
} else {
Some(Cursor {
offset: 0,
position: self.stored_front_position,
})
};
self.new_back_values.clear();
}
async fn has_pending_changes(&self) -> bool {
if self.delete_storage_first {
return true;
}
if !self.stored_buckets.is_empty() {
let Some(cursor) = self.cursor else {
return true;
};
if cursor.offset != 0 || cursor.position != self.stored_front_position {
return true;
}
}
!self.new_back_values.is_empty()
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
let mut descriptions = Vec::new();
let mut stored_front_position = self.stored_front_position;
if self.stored_count() == 0 {
let key_prefix = self.context.base_key().bytes.clone();
batch.delete_key_prefix(key_prefix);
delete_view = true;
stored_front_position = 0;
} else if let Some(cursor) = self.cursor {
// Delete buckets that are before the cursor
for i in 0..cursor.offset {
let bucket = &self.stored_buckets[i];
let index = bucket.index;
let key = self.get_bucket_key(index)?;
batch.delete_key(key);
}
stored_front_position = cursor.position;
// Build descriptions for remaining buckets
let first_index = self.stored_buckets[cursor.offset].index;
let start_offset = if first_index != 0 {
// Need to move the first remaining bucket to index 0
let key = self.get_bucket_key(first_index)?;
batch.delete_key(key);
let key = self.get_bucket_key(0)?;
let bucket = &self.stored_buckets[cursor.offset];
let State::Loaded { data } = &bucket.state else {
unreachable!("The front bucket is always loaded.");
};
batch.put_key_value(key, data)?;
descriptions.push(BucketDescription {
length: bucket.len(),
index: 0,
});
cursor.offset + 1
} else {
cursor.offset
};
for bucket in self.stored_buckets.range(start_offset..) {
descriptions.push(bucket.to_description());
}
}
if !self.new_back_values.is_empty() {
delete_view = false;
// Calculate the starting index for new buckets
// If stored_count() == 0, all stored buckets are being removed, so start at 0
// Otherwise, start after the last remaining bucket
let mut index = if self.stored_count() == 0 {
0
} else if let Some(last_description) = descriptions.last() {
last_description.index + 1
} else {
// This shouldn't happen if stored_count() > 0
0
};
let mut start = 0;
while start < self.new_back_values.len() {
let end = std::cmp::min(start + N, self.new_back_values.len());
let value_chunk: Vec<_> = self.new_back_values.range(start..end).collect();
let key = self.get_bucket_key(index)?;
batch.put_key_value(key, &value_chunk)?;
descriptions.push(BucketDescription {
index,
length: end - start,
});
index += 1;
start = end;
}
}
if !delete_view {
let bucket_store = BucketStore {
descriptions,
front_position: stored_front_position,
};
let key = self.context.base_key().base_tag(KeyTag::Store as u8);
batch.put_key_value(key, &bucket_store)?;
}
Ok(delete_view)
}
fn post_save(&mut self) {
if self.stored_count() == 0 {
self.stored_buckets.clear();
self.stored_front_position = 0;
self.cursor = None;
} else if let Some(cursor) = self.cursor {
for _ in 0..cursor.offset {
self.stored_buckets.pop_front();
}
self.cursor = Some(Cursor {
offset: 0,
position: cursor.position,
});
self.stored_front_position = cursor.position;
// We need to ensure that the first index is in the front.
self.stored_buckets[0].index = 0;
}
if !self.new_back_values.is_empty() {
let mut index = match self.stored_buckets.back() {
Some(bucket) => bucket.index + 1,
None => 0,
};
let new_back_values = std::mem::take(&mut self.new_back_values);
let new_back_values = new_back_values.into_iter().collect::<Vec<_>>();
for value_chunk in new_back_values.chunks(N) {
self.stored_buckets.push_back(Bucket {
index,
state: State::Loaded {
data: value_chunk.to_vec(),
},
});
index += 1;
}
if self.cursor.is_none() {
self.cursor = Some(Cursor {
offset: 0,
position: 0,
});
}
}
self.delete_storage_first = false;
}
fn clear(&mut self) {
self.delete_storage_first = true;
self.new_back_values.clear();
self.cursor = None;
}
}
impl<C: Clone, T: Clone, const N: usize> ClonableView for BucketQueueView<C, T, N>
where
Self: View,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(BucketQueueView {
context: self.context.clone(),
stored_buckets: self.stored_buckets.clone(),
new_back_values: self.new_back_values.clone(),
stored_front_position: self.stored_front_position,
cursor: self.cursor,
delete_storage_first: self.delete_storage_first,
})
}
}
impl<C: Context, T, const N: usize> BucketQueueView<C, T, N> {
/// Gets the key corresponding to this bucket index.
fn get_bucket_key(&self, index: usize) -> Result<Vec<u8>, ViewError> {
Ok(if index == 0 {
self.context.base_key().base_tag(KeyTag::Front as u8)
} else {
self.context
.base_key()
.derive_tag_key(KeyTag::Index as u8, &index)?
})
}
/// Gets the number of entries in the container that are stored
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u8, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// assert_eq!(queue.stored_count(), 0);
/// # })
/// ```
pub fn stored_count(&self) -> usize {
if self.delete_storage_first {
0
} else {
let Some(cursor) = self.cursor else {
return 0;
};
let mut stored_count = 0;
for offset in cursor.offset..self.stored_buckets.len() {
stored_count += self.stored_buckets[offset].len();
}
stored_count -= cursor.position;
stored_count
}
}
/// The total number of entries of the container
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u8, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// assert_eq!(queue.count(), 1);
/// # })
/// ```
pub fn count(&self) -> usize {
self.stored_count() + self.new_back_values.len()
}
}
impl<C: Context, T: DeserializeOwned + Clone, const N: usize> BucketQueueView<C, T, N> {
/// Gets a reference on the front value if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u8, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(42);
/// assert_eq!(queue.front().cloned(), Some(34));
/// # })
/// ```
pub fn front(&self) -> Option<&T> {
match self.cursor {
Some(Cursor { offset, position }) => {
let bucket = &self.stored_buckets[offset];
let State::Loaded { data } = &bucket.state else {
unreachable!();
};
Some(&data[position])
}
None => self.new_back_values.front(),
}
}
/// Reads the front value, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u8, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(42);
/// let front = queue.front_mut().unwrap();
/// *front = 43;
/// assert_eq!(queue.front().cloned(), Some(43));
/// # })
/// ```
pub fn front_mut(&mut self) -> Option<&mut T> {
match self.cursor {
Some(Cursor { offset, position }) => {
let bucket = self.stored_buckets.get_mut(offset).unwrap();
let State::Loaded { data } = &mut bucket.state else {
unreachable!();
};
Some(data.get_mut(position).unwrap())
}
None => self.new_back_values.front_mut(),
}
}
/// Deletes the front value, if any.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u128, 5>::load(context).await.unwrap();
/// queue.push_back(34 as u128);
/// queue.delete_front().await.unwrap();
/// assert_eq!(queue.elements().await.unwrap(), Vec::<u128>::new());
/// # })
/// ```
pub async fn delete_front(&mut self) -> Result<(), ViewError> {
match self.cursor {
Some(cursor) => {
let mut offset = cursor.offset;
let mut position = cursor.position + 1;
if self.stored_buckets[offset].len() == position {
offset += 1;
position = 0;
}
if offset == self.stored_buckets.len() {
self.cursor = None;
} else {
self.cursor = Some(Cursor { offset, position });
let bucket = self.stored_buckets.get_mut(offset).unwrap();
let index = bucket.index;
if !bucket.is_loaded() {
let key = self.get_bucket_key(index)?;
let data = self.context.store().read_value(&key).await?;
let data = match data {
Some(value) => value,
None => {
return Err(ViewError::MissingEntries(
"BucketQueueView::delete_front".into(),
));
}
};
self.stored_buckets[offset].state = State::Loaded { data };
}
}
}
None => {
self.new_back_values.pop_front();
}
}
Ok(())
}
/// Pushes a value to the end of the queue.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u128, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// assert_eq!(queue.elements().await.unwrap(), vec![34]);
/// # })
/// ```
pub fn push_back(&mut self, value: T) {
self.new_back_values.push_back(value);
}
/// Returns the list of elements in the queue.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u128, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(37);
/// assert_eq!(queue.elements().await.unwrap(), vec![34, 37]);
/// # })
/// ```
pub async fn elements(&self) -> Result<Vec<T>, ViewError> {
let count = self.count();
self.read_context(self.cursor, count).await
}
/// Returns the last element of a bucket queue view
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u128, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(37);
/// assert_eq!(queue.back().await.unwrap(), Some(37));
/// # })
/// ```
pub async fn back(&mut self) -> Result<Option<T>, ViewError>
where
T: Clone,
{
if let Some(value) = self.new_back_values.back() {
return Ok(Some(value.clone()));
}
if self.cursor.is_none() {
return Ok(None);
}
let Some(bucket) = self.stored_buckets.back() else {
return Ok(None);
};
if !bucket.is_loaded() {
let key = self.get_bucket_key(bucket.index)?;
let data = self.context.store().read_value(&key).await?;
let data = match data {
Some(data) => data,
None => {
return Err(ViewError::MissingEntries("BucketQueueView::back".into()));
}
};
self.stored_buckets.back_mut().unwrap().state = State::Loaded { data };
}
let state = &self.stored_buckets.back_mut().unwrap().state;
let State::Loaded { data } = state else {
unreachable!();
};
Ok(Some(data.last().unwrap().clone()))
}
async fn read_context(
&self,
cursor: Option<Cursor>,
count: usize,
) -> Result<Vec<T>, ViewError> {
if count == 0 {
return Ok(Vec::new());
}
let mut elements = Vec::<T>::new();
let mut count_remain = count;
if let Some(cursor) = cursor {
let mut keys = Vec::new();
let mut position = cursor.position;
for offset in cursor.offset..self.stored_buckets.len() {
let bucket = &self.stored_buckets[offset];
let size = bucket.len() - position;
if !bucket.is_loaded() {
let key = self.get_bucket_key(bucket.index)?;
keys.push(key);
};
if size >= count_remain {
break;
}
count_remain -= size;
position = 0;
}
let values = self.context.store().read_multi_values_bytes(&keys).await?;
let mut value_pos = 0;
count_remain = count;
let mut position = cursor.position;
for offset in cursor.offset..self.stored_buckets.len() {
let bucket = &self.stored_buckets[offset];
let size = bucket.len() - position;
let data = match &bucket.state {
State::Loaded { data } => data,
State::NotLoaded { .. } => {
let value = match &values[value_pos] {
Some(value) => value,
None => {
return Err(ViewError::MissingEntries(
"BucketQueueView::read_context".into(),
));
}
};
value_pos += 1;
&bcs::from_bytes::<Vec<T>>(value)?
}
};
elements.extend(data[position..].iter().take(count_remain).cloned());
if size >= count_remain {
return Ok(elements);
}
count_remain -= size;
position = 0;
}
}
let count_read = std::cmp::min(count_remain, self.new_back_values.len());
elements.extend(self.new_back_values.range(0..count_read).cloned());
Ok(elements)
}
/// Returns the first elements of a bucket queue view
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u128, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(37);
/// queue.push_back(47);
/// assert_eq!(queue.read_front(2).await.unwrap(), vec![34, 37]);
/// # })
/// ```
pub async fn read_front(&self, count: usize) -> Result<Vec<T>, ViewError> {
let count = std::cmp::min(count, self.count());
self.read_context(self.cursor, count).await
}
/// Returns the last element of a bucket queue view
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use crate::linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u128, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// queue.push_back(37);
/// queue.push_back(47);
/// assert_eq!(queue.read_back(2).await.unwrap(), vec![37, 47]);
/// # })
/// ```
pub async fn read_back(&self, count: usize) -> Result<Vec<T>, ViewError> {
let count = std::cmp::min(count, self.count());
if count <= self.new_back_values.len() {
let start = self.new_back_values.len() - count;
Ok(self
.new_back_values
.range(start..)
.cloned()
.collect::<Vec<_>>())
} else {
let mut increment = self.count() - count;
let Some(cursor) = self.cursor else {
unreachable!();
};
let mut position = cursor.position;
for offset in cursor.offset..self.stored_buckets.len() {
let size = self.stored_buckets[offset].len() - position;
if increment < size {
return self
.read_context(
Some(Cursor {
offset,
position: position + increment,
}),
count,
)
.await;
}
increment -= size;
position = 0;
}
unreachable!();
}
}
async fn load_all(&mut self) -> Result<(), ViewError> {
if !self.delete_storage_first {
let elements = self.elements().await?;
self.new_back_values.clear();
for elt in elements {
self.new_back_values.push_back(elt);
}
self.cursor = None;
self.delete_storage_first = true;
}
Ok(())
}
/// Gets a mutable iterator on the entries of the queue
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::bucket_queue_view::BucketQueueView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut queue = BucketQueueView::<_, u8, 5>::load(context).await.unwrap();
/// queue.push_back(34);
/// let mut iter = queue.iter_mut().await.unwrap();
/// let value = iter.next().unwrap();
/// *value = 42;
/// assert_eq!(queue.elements().await.unwrap(), vec![42]);
/// # })
/// ```
pub async fn iter_mut(&mut self) -> Result<IterMut<'_, T>, ViewError> {
self.load_all().await?;
Ok(self.new_back_values.iter_mut())
}
}
impl<C: Context, T: Serialize + DeserializeOwned + Send + Sync + Clone, const N: usize> HashableView
for BucketQueueView<C, T, N>
where
Self: View,
{
type Hasher = sha3::Sha3_256;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.hash().await
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
#[cfg(with_metrics)]
let _hash_latency = metrics::BUCKET_QUEUE_VIEW_HASH_RUNTIME.measure_latency();
let elements = self.elements().await?;
let mut hasher = sha3::Sha3_256::default();
hasher.update_with_bcs_bytes(&elements)?;
Ok(hasher.finalize())
}
}
/// Type wrapping `QueueView` while memoizing the hash.
pub type HashedBucketQueueView<C, T, const N: usize> =
WrappedHashableContainerView<C, BucketQueueView<C, T, N>, HasherOutput>;
/// Wrapper around `BucketQueueView` to compute hashes based on the history of changes.
pub type HistoricallyHashedBucketQueueView<C, T, const N: usize> =
HistoricallyHashableView<C, BucketQueueView<C, T, N>>;
#[cfg(with_graphql)]
mod graphql {
use std::borrow::Cow;
use super::BucketQueueView;
use crate::{
context::Context,
graphql::{hash_name, mangle},
};
impl<C: Send + Sync, T: async_graphql::OutputType, const N: usize> async_graphql::TypeName
for BucketQueueView<C, T, N>
{
fn type_name() -> Cow<'static, str> {
format!(
"BucketQueueView_{}_{:08x}",
mangle(T::type_name()),
hash_name::<T>()
)
.into()
}
}
#[async_graphql::Object(cache_control(no_cache), name_type)]
impl<C: Context, T: async_graphql::OutputType, const N: usize> BucketQueueView<C, T, N>
where
C: Send + Sync,
T: serde::ser::Serialize + serde::de::DeserializeOwned + Clone + Send + Sync,
{
#[graphql(derived(name = "count"))]
async fn count_(&self) -> Result<u32, async_graphql::Error> {
Ok(self.count() as u32)
}
async fn entries(&self, count: Option<usize>) -> async_graphql::Result<Vec<T>> {
Ok(self
.read_front(count.unwrap_or_else(|| self.count()))
.await?)
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/set_view.rs | linera-views/src/views/set_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{borrow::Borrow, collections::BTreeMap, marker::PhantomData};
use allocative::Allocative;
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use serde::{de::DeserializeOwned, Serialize};
use crate::{
batch::Batch,
common::{CustomSerialize, HasherOutput, Update},
context::{BaseKey, Context},
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
store::ReadableKeyValueStore as _,
views::{ClonableView, HashableView, Hasher, ReplaceContext, View, ViewError},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// The runtime of hash computation
pub static SET_VIEW_HASH_RUNTIME: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"set_view_hash_runtime",
"SetView hash runtime",
&[],
exponential_bucket_latencies(5.0),
)
});
}
/// A [`View`] that supports inserting and removing values indexed by a key.
#[derive(Debug, Allocative)]
#[allocative(bound = "C")]
pub struct ByteSetView<C> {
/// The view context.
#[allocative(skip)]
context: C,
/// Whether to clear storage before applying updates.
delete_storage_first: bool,
/// Pending changes not yet persisted to storage.
updates: BTreeMap<Vec<u8>, Update<()>>,
}
impl<C: Context, C2: Context> ReplaceContext<C2> for ByteSetView<C> {
type Target = ByteSetView<C2>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
ByteSetView {
context: ctx(&self.context),
delete_storage_first: self.delete_storage_first,
updates: self.updates.clone(),
}
}
}
impl<C: Context> View for ByteSetView<C> {
const NUM_INIT_KEYS: usize = 0;
type Context = C;
fn context(&self) -> C {
self.context.clone()
}
fn pre_load(_context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
Ok(Vec::new())
}
fn post_load(context: C, _values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
Ok(Self {
context,
delete_storage_first: false,
updates: BTreeMap::new(),
})
}
fn rollback(&mut self) {
self.delete_storage_first = false;
self.updates.clear();
}
async fn has_pending_changes(&self) -> bool {
if self.delete_storage_first {
return true;
}
!self.updates.is_empty()
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
let mut delete_view = false;
if self.delete_storage_first {
delete_view = true;
batch.delete_key_prefix(self.context.base_key().bytes.clone());
for (index, update) in self.updates.iter() {
if let Update::Set(_) = update {
let key = self.context.base_key().base_index(index);
batch.put_key_value_bytes(key, Vec::new());
delete_view = false;
}
}
} else {
for (index, update) in self.updates.iter() {
let key = self.context.base_key().base_index(index);
match update {
Update::Removed => batch.delete_key(key),
Update::Set(_) => batch.put_key_value_bytes(key, Vec::new()),
}
}
}
Ok(delete_view)
}
fn post_save(&mut self) {
self.delete_storage_first = false;
self.updates.clear();
}
fn clear(&mut self) {
self.delete_storage_first = true;
self.updates.clear();
}
}
impl<C: Context> ClonableView for ByteSetView<C> {
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(ByteSetView {
context: self.context.clone(),
delete_storage_first: self.delete_storage_first,
updates: self.updates.clone(),
})
}
}
impl<C: Context> ByteSetView<C> {
/// Insert a value. If already present then it has no effect.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::ByteSetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = ByteSetView::load(context).await.unwrap();
/// set.insert(vec![0, 1]);
/// assert_eq!(set.contains(&[0, 1]).await.unwrap(), true);
/// # })
/// ```
pub fn insert(&mut self, short_key: Vec<u8>) {
self.updates.insert(short_key, Update::Set(()));
}
/// Removes a value from the set. If absent then no effect.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::ByteSetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = ByteSetView::load(context).await.unwrap();
/// set.remove(vec![0, 1]);
/// assert_eq!(set.contains(&[0, 1]).await.unwrap(), false);
/// # })
/// ```
pub fn remove(&mut self, short_key: Vec<u8>) {
if self.delete_storage_first {
// Optimization: No need to mark `short_key` for deletion as we are going to remove all the keys at once.
self.updates.remove(&short_key);
} else {
self.updates.insert(short_key, Update::Removed);
}
}
/// Gets the extra data.
pub fn extra(&self) -> &C::Extra {
self.context.extra()
}
}
impl<C: Context> ByteSetView<C> {
/// Returns true if the given index exists in the set.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::ByteSetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = ByteSetView::load(context).await.unwrap();
/// set.insert(vec![0, 1]);
/// assert_eq!(set.contains(&[34]).await.unwrap(), false);
/// assert_eq!(set.contains(&[0, 1]).await.unwrap(), true);
/// # })
/// ```
pub async fn contains(&self, short_key: &[u8]) -> Result<bool, ViewError> {
if let Some(update) = self.updates.get(short_key) {
let value = match update {
Update::Removed => false,
Update::Set(()) => true,
};
return Ok(value);
}
if self.delete_storage_first {
return Ok(false);
}
let key = self.context.base_key().base_index(short_key);
Ok(self.context.store().contains_key(&key).await?)
}
}
impl<C: Context> ByteSetView<C> {
/// Returns the list of keys in the set. The order is lexicographic.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::ByteSetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = ByteSetView::load(context).await.unwrap();
/// set.insert(vec![0, 1]);
/// set.insert(vec![0, 2]);
/// assert_eq!(set.keys().await.unwrap(), vec![vec![0, 1], vec![0, 2]]);
/// # })
/// ```
pub async fn keys(&self) -> Result<Vec<Vec<u8>>, ViewError> {
let mut keys = Vec::new();
self.for_each_key(|key| {
keys.push(key.to_vec());
Ok(())
})
.await?;
Ok(keys)
}
/// Returns the number of entries in the set.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::ByteSetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = ByteSetView::load(context).await.unwrap();
/// set.insert(vec![0, 1]);
/// set.insert(vec![0, 2]);
/// assert_eq!(set.keys().await.unwrap(), vec![vec![0, 1], vec![0, 2]]);
/// # })
/// ```
pub async fn count(&self) -> Result<usize, ViewError> {
let mut count = 0;
self.for_each_key(|_key| {
count += 1;
Ok(())
})
.await?;
Ok(count)
}
/// Applies a function f on each index (aka key). Keys are visited in a
/// lexicographic order. If the function returns false, then the loop ends
/// prematurely.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::ByteSetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = ByteSetView::load(context).await.unwrap();
/// set.insert(vec![0, 1]);
/// set.insert(vec![0, 2]);
/// set.insert(vec![3]);
/// let mut count = 0;
/// set.for_each_key_while(|_key| {
/// count += 1;
/// Ok(count < 2)
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 2);
/// # })
/// ```
pub async fn for_each_key_while<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(&[u8]) -> Result<bool, ViewError> + Send,
{
let mut updates = self.updates.iter();
let mut update = updates.next();
if !self.delete_storage_first {
let base = &self.context.base_key().bytes;
for index in self.context.store().find_keys_by_prefix(base).await? {
loop {
match update {
Some((key, value)) if key <= &index => {
if let Update::Set(_) = value {
if !f(key)? {
return Ok(());
}
}
update = updates.next();
if key == &index {
break;
}
}
_ => {
if !f(&index)? {
return Ok(());
}
break;
}
}
}
}
}
while let Some((key, value)) = update {
if let Update::Set(_) = value {
if !f(key)? {
return Ok(());
}
}
update = updates.next();
}
Ok(())
}
/// Applies a function f on each serialized index (aka key). Keys are visited in a
/// lexicographic order.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::ByteSetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = ByteSetView::load(context).await.unwrap();
/// set.insert(vec![0, 1]);
/// set.insert(vec![0, 2]);
/// set.insert(vec![3]);
/// let mut count = 0;
/// set.for_each_key(|_key| {
/// count += 1;
/// Ok(())
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 3);
/// # })
/// ```
pub async fn for_each_key<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(&[u8]) -> Result<(), ViewError> + Send,
{
self.for_each_key_while(|key| {
f(key)?;
Ok(true)
})
.await
}
}
impl<C: Context> HashableView for ByteSetView<C> {
type Hasher = sha3::Sha3_256;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.hash().await
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
#[cfg(with_metrics)]
let _hash_latency = metrics::SET_VIEW_HASH_RUNTIME.measure_latency();
let mut hasher = sha3::Sha3_256::default();
let mut count = 0u32;
self.for_each_key(|key| {
count += 1;
hasher.update_with_bytes(key)?;
Ok(())
})
.await?;
hasher.update_with_bcs_bytes(&count)?;
Ok(hasher.finalize())
}
}
/// A [`View`] implementing the set functionality with the index `I` being any serializable type.
#[derive(Debug, Allocative)]
#[allocative(bound = "C, I")]
pub struct SetView<C, I> {
/// The underlying set storing entries with serialized keys.
set: ByteSetView<C>,
/// Phantom data for the key type.
#[allocative(skip)]
_phantom: PhantomData<I>,
}
impl<C: Context, I: Send + Sync + Serialize, C2: Context> ReplaceContext<C2> for SetView<C, I> {
type Target = SetView<C2, I>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
SetView {
set: self.set.with_context(ctx).await,
_phantom: self._phantom,
}
}
}
impl<C: Context, I: Send + Sync + Serialize> View for SetView<C, I> {
const NUM_INIT_KEYS: usize = ByteSetView::<C>::NUM_INIT_KEYS;
type Context = C;
fn context(&self) -> C {
self.set.context()
}
fn pre_load(context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
ByteSetView::<C>::pre_load(context)
}
fn post_load(context: C, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let set = ByteSetView::post_load(context, values)?;
Ok(Self {
set,
_phantom: PhantomData,
})
}
fn rollback(&mut self) {
self.set.rollback()
}
async fn has_pending_changes(&self) -> bool {
self.set.has_pending_changes().await
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
self.set.pre_save(batch)
}
fn post_save(&mut self) {
self.set.post_save()
}
fn clear(&mut self) {
self.set.clear()
}
}
impl<C, I> ClonableView for SetView<C, I>
where
C: Context,
I: Send + Sync + Serialize,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(SetView {
set: self.set.clone_unchecked()?,
_phantom: PhantomData,
})
}
}
impl<C: Context, I: Serialize> SetView<C, I> {
/// Inserts a value. If already present then no effect.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::SetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = SetView::<_, u32>::load(context).await.unwrap();
/// set.insert(&(34 as u32));
/// assert_eq!(set.indices().await.unwrap().len(), 1);
/// # })
/// ```
pub fn insert<Q>(&mut self, index: &Q) -> Result<(), ViewError>
where
I: Borrow<Q>,
Q: Serialize + ?Sized,
{
let short_key = BaseKey::derive_short_key(index)?;
self.set.insert(short_key);
Ok(())
}
/// Removes a value. If absent then nothing is done.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::SetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = SetView::<_, u32>::load(context).await.unwrap();
/// set.remove(&(34 as u32));
/// assert_eq!(set.indices().await.unwrap().len(), 0);
/// # })
/// ```
pub fn remove<Q>(&mut self, index: &Q) -> Result<(), ViewError>
where
I: Borrow<Q>,
Q: Serialize + ?Sized,
{
let short_key = BaseKey::derive_short_key(index)?;
self.set.remove(short_key);
Ok(())
}
/// Obtains the extra data.
pub fn extra(&self) -> &C::Extra {
self.set.extra()
}
}
impl<C: Context, I: Serialize> SetView<C, I> {
/// Returns true if the given index exists in the set.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::SetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set: SetView<_, u32> = SetView::load(context).await.unwrap();
/// set.insert(&(34 as u32));
/// assert_eq!(set.contains(&(34 as u32)).await.unwrap(), true);
/// assert_eq!(set.contains(&(45 as u32)).await.unwrap(), false);
/// # })
/// ```
pub async fn contains<Q>(&self, index: &Q) -> Result<bool, ViewError>
where
I: Borrow<Q>,
Q: Serialize + ?Sized,
{
let short_key = BaseKey::derive_short_key(index)?;
self.set.contains(&short_key).await
}
}
impl<C: Context, I: Serialize + DeserializeOwned + Send> SetView<C, I> {
/// Returns the list of indices in the set. The order is determined by serialization.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::SetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set: SetView<_, u32> = SetView::load(context).await.unwrap();
/// set.insert(&(34 as u32));
/// assert_eq!(set.indices().await.unwrap(), vec![34 as u32]);
/// # })
/// ```
pub async fn indices(&self) -> Result<Vec<I>, ViewError> {
let mut indices = Vec::new();
self.for_each_index(|index| {
indices.push(index);
Ok(())
})
.await?;
Ok(indices)
}
/// Returns the number of entries in the set.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::{context::MemoryContext, set_view::SetView};
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set: SetView<_, u32> = SetView::load(context).await.unwrap();
/// set.insert(&(34 as u32));
/// assert_eq!(set.count().await.unwrap(), 1);
/// # })
/// ```
pub async fn count(&self) -> Result<usize, ViewError> {
self.set.count().await
}
/// Applies a function f on each index. Indices are visited in an order
/// determined by the serialization. If the function returns false, then the
/// loop ends prematurely.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::SetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = SetView::<_, u32>::load(context).await.unwrap();
/// set.insert(&(34 as u32));
/// set.insert(&(37 as u32));
/// set.insert(&(42 as u32));
/// let mut count = 0;
/// set.for_each_index_while(|_key| {
/// count += 1;
/// Ok(count < 2)
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 2);
/// # })
/// ```
pub async fn for_each_index_while<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(I) -> Result<bool, ViewError> + Send,
{
self.set
.for_each_key_while(|key| {
let index = BaseKey::deserialize_value(key)?;
f(index)
})
.await?;
Ok(())
}
/// Applies a function f on each index. Indices are visited in an order
/// determined by the serialization.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::SetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = SetView::<_, u32>::load(context).await.unwrap();
/// set.insert(&(34 as u32));
/// set.insert(&(37 as u32));
/// set.insert(&(42 as u32));
/// let mut count = 0;
/// set.for_each_index(|_key| {
/// count += 1;
/// Ok(())
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 3);
/// # })
/// ```
pub async fn for_each_index<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(I) -> Result<(), ViewError> + Send,
{
self.set
.for_each_key(|key| {
let index = BaseKey::deserialize_value(key)?;
f(index)
})
.await?;
Ok(())
}
}
impl<C, I> HashableView for SetView<C, I>
where
Self: View,
ByteSetView<C>: HashableView,
{
type Hasher = <ByteSetView<C> as HashableView>::Hasher;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.set.hash_mut().await
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.set.hash().await
}
}
/// A [`View`] implementing the set functionality with the index `I` being a type with a custom
/// serialization format.
#[derive(Debug, Allocative)]
#[allocative(bound = "C, I")]
pub struct CustomSetView<C, I> {
/// The underlying set storing entries with custom-serialized keys.
set: ByteSetView<C>,
/// Phantom data for the key type.
#[allocative(skip)]
_phantom: PhantomData<I>,
}
impl<C, I> View for CustomSetView<C, I>
where
C: Context,
I: Send + Sync + CustomSerialize,
{
const NUM_INIT_KEYS: usize = ByteSetView::<C>::NUM_INIT_KEYS;
type Context = C;
fn context(&self) -> C {
self.set.context()
}
fn pre_load(context: &C) -> Result<Vec<Vec<u8>>, ViewError> {
ByteSetView::pre_load(context)
}
fn post_load(context: C, values: &[Option<Vec<u8>>]) -> Result<Self, ViewError> {
let set = ByteSetView::post_load(context, values)?;
Ok(Self {
set,
_phantom: PhantomData,
})
}
fn rollback(&mut self) {
self.set.rollback()
}
async fn has_pending_changes(&self) -> bool {
self.set.has_pending_changes().await
}
fn pre_save(&self, batch: &mut Batch) -> Result<bool, ViewError> {
self.set.pre_save(batch)
}
fn post_save(&mut self) {
self.set.post_save()
}
fn clear(&mut self) {
self.set.clear()
}
}
impl<C, I> ClonableView for CustomSetView<C, I>
where
C: Context,
I: Send + Sync + CustomSerialize,
{
fn clone_unchecked(&mut self) -> Result<Self, ViewError> {
Ok(CustomSetView {
set: self.set.clone_unchecked()?,
_phantom: PhantomData,
})
}
}
impl<C: Context, I: CustomSerialize> CustomSetView<C, I> {
/// Inserts a value. If present then it has no effect.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::CustomSetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = CustomSetView::<_, u128>::load(context).await.unwrap();
/// set.insert(&(34 as u128));
/// assert_eq!(set.indices().await.unwrap().len(), 1);
/// # })
/// ```
pub fn insert<Q>(&mut self, index: &Q) -> Result<(), ViewError>
where
I: Borrow<Q>,
Q: CustomSerialize,
{
let short_key = index.to_custom_bytes()?;
self.set.insert(short_key);
Ok(())
}
/// Removes a value. If absent then nothing is done.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::CustomSetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = CustomSetView::<_, u128>::load(context).await.unwrap();
/// set.remove(&(34 as u128));
/// assert_eq!(set.indices().await.unwrap().len(), 0);
/// # })
/// ```
pub fn remove<Q>(&mut self, index: &Q) -> Result<(), ViewError>
where
I: Borrow<Q>,
Q: CustomSerialize,
{
let short_key = index.to_custom_bytes()?;
self.set.remove(short_key);
Ok(())
}
/// Obtains the extra data.
pub fn extra(&self) -> &C::Extra {
self.set.extra()
}
}
impl<C, I> CustomSetView<C, I>
where
C: Context,
I: CustomSerialize,
{
/// Returns true if the given index exists in the set.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::CustomSetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = CustomSetView::<_, u128>::load(context).await.unwrap();
/// set.insert(&(34 as u128));
/// assert_eq!(set.contains(&(34 as u128)).await.unwrap(), true);
/// assert_eq!(set.contains(&(37 as u128)).await.unwrap(), false);
/// # })
/// ```
pub async fn contains<Q>(&self, index: &Q) -> Result<bool, ViewError>
where
I: Borrow<Q>,
Q: CustomSerialize,
{
let short_key = index.to_custom_bytes()?;
self.set.contains(&short_key).await
}
}
impl<C, I> CustomSetView<C, I>
where
C: Context,
I: Sync + Send + CustomSerialize,
{
/// Returns the list of indices in the set. The order is determined by the custom
/// serialization.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::CustomSetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = CustomSetView::<_, u128>::load(context).await.unwrap();
/// set.insert(&(34 as u128));
/// set.insert(&(37 as u128));
/// assert_eq!(set.indices().await.unwrap(), vec![34 as u128, 37 as u128]);
/// # })
/// ```
pub async fn indices(&self) -> Result<Vec<I>, ViewError> {
let mut indices = Vec::new();
self.for_each_index(|index| {
indices.push(index);
Ok(())
})
.await?;
Ok(indices)
}
/// Returns the number of entries of the set.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::CustomSetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = CustomSetView::<_, u128>::load(context).await.unwrap();
/// set.insert(&(34 as u128));
/// set.insert(&(37 as u128));
/// assert_eq!(set.count().await.unwrap(), 2);
/// # })
/// ```
pub async fn count(&self) -> Result<usize, ViewError> {
self.set.count().await
}
/// Applies a function f on each index. Indices are visited in an order
/// determined by the custom serialization. If the function does return
/// false, then the loop prematurely ends.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::CustomSetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = CustomSetView::<_, u128>::load(context).await.unwrap();
/// set.insert(&(34 as u128));
/// set.insert(&(37 as u128));
/// set.insert(&(42 as u128));
/// let mut count = 0;
/// set.for_each_index_while(|_key| {
/// count += 1;
/// Ok(count < 5)
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 3);
/// # })
/// ```
pub async fn for_each_index_while<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(I) -> Result<bool, ViewError> + Send,
{
self.set
.for_each_key_while(|key| {
let index = I::from_custom_bytes(key)?;
f(index)
})
.await?;
Ok(())
}
/// Applies a function f on each index. Indices are visited in an order
/// determined by the custom serialization.
/// ```rust
/// # tokio_test::block_on(async {
/// # use linera_views::context::MemoryContext;
/// # use linera_views::set_view::CustomSetView;
/// # use linera_views::views::View;
/// # let context = MemoryContext::new_for_testing(());
/// let mut set = CustomSetView::<_, u128>::load(context).await.unwrap();
/// set.insert(&(34 as u128));
/// set.insert(&(37 as u128));
/// set.insert(&(42 as u128));
/// let mut count = 0;
/// set.for_each_index(|_key| {
/// count += 1;
/// Ok(())
/// })
/// .await
/// .unwrap();
/// assert_eq!(count, 3);
/// # })
/// ```
pub async fn for_each_index<F>(&self, mut f: F) -> Result<(), ViewError>
where
F: FnMut(I) -> Result<(), ViewError> + Send,
{
self.set
.for_each_key(|key| {
let index = I::from_custom_bytes(key)?;
f(index)
})
.await?;
Ok(())
}
}
impl<C: Context, I> HashableView for CustomSetView<C, I>
where
Self: View,
{
type Hasher = sha3::Sha3_256;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.set.hash_mut().await
}
async fn hash(&self) -> Result<<Self::Hasher as Hasher>::Output, ViewError> {
self.set.hash().await
}
}
/// Type wrapping `ByteSetView` while memoizing the hash.
pub type HashedByteSetView<C> = WrappedHashableContainerView<C, ByteSetView<C>, HasherOutput>;
/// Wrapper around `ByteSetView` to compute hashes based on the history of changes.
pub type HistoricallyHashedByteSetView<C> = HistoricallyHashableView<C, ByteSetView<C>>;
/// Type wrapping `SetView` while memoizing the hash.
pub type HashedSetView<C, I> = WrappedHashableContainerView<C, SetView<C, I>, HasherOutput>;
/// Wrapper around `SetView` to compute hashes based on the history of changes.
pub type HistoricallyHashedSetView<C, I> = HistoricallyHashableView<C, SetView<C, I>>;
/// Type wrapping `CustomSetView` while memoizing the hash.
pub type HashedCustomSetView<C, I> =
WrappedHashableContainerView<C, CustomSetView<C, I>, HasherOutput>;
/// Wrapper around `CustomSetView` to compute hashes based on the history of changes.
pub type HistoricallyHashedCustomSetView<C, I> = HistoricallyHashableView<C, CustomSetView<C, I>>;
#[cfg(with_graphql)]
mod graphql {
use std::borrow::Cow;
use serde::{de::DeserializeOwned, Serialize};
use super::{CustomSetView, SetView};
use crate::{
common::CustomSerialize,
context::Context,
graphql::{hash_name, mangle},
};
impl<C: Send + Sync, I: async_graphql::OutputType> async_graphql::TypeName for SetView<C, I> {
fn type_name() -> Cow<'static, str> {
format!(
"SetView_{}_{:08x}",
mangle(I::type_name()),
hash_name::<I>(),
)
.into()
}
}
#[async_graphql::Object(cache_control(no_cache), name_type)]
impl<C, I> SetView<C, I>
where
C: Context,
I: Send + Sync + Serialize + DeserializeOwned + async_graphql::OutputType,
{
async fn elements(&self, count: Option<usize>) -> Result<Vec<I>, async_graphql::Error> {
let mut indices = self.indices().await?;
if let Some(count) = count {
indices.truncate(count);
}
Ok(indices)
}
#[graphql(derived(name = "count"))]
async fn count_(&self) -> Result<u32, async_graphql::Error> {
Ok(self.count().await? as u32)
}
}
impl<C: Send + Sync, I: async_graphql::OutputType> async_graphql::TypeName for CustomSetView<C, I> {
fn type_name() -> Cow<'static, str> {
format!(
"CustomSetView_{}_{:08x}",
mangle(I::type_name()),
hash_name::<I>(),
)
.into()
}
}
#[async_graphql::Object(cache_control(no_cache), name_type)]
impl<C, I> CustomSetView<C, I>
where
C: Context,
I: Send + Sync + CustomSerialize + async_graphql::OutputType,
{
async fn elements(&self, count: Option<usize>) -> Result<Vec<I>, async_graphql::Error> {
let mut indices = self.indices().await?;
if let Some(count) = count {
indices.truncate(count);
}
Ok(indices)
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/views/unit_tests/views.rs | linera-views/src/views/unit_tests/views.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::VecDeque, fmt::Debug, marker::PhantomData};
use serde::{de::DeserializeOwned, Serialize};
use test_case::test_case;
#[cfg(with_dynamodb)]
use crate::dynamo_db::DynamoDbDatabase;
#[cfg(with_rocksdb)]
use crate::rocks_db::RocksDbDatabase;
#[cfg(with_scylladb)]
use crate::scylla_db::ScyllaDbDatabase;
#[cfg(any(with_scylladb, with_dynamodb, with_rocksdb))]
use crate::store::{KeyValueDatabase, TestKeyValueDatabase};
use crate::{
batch::Batch,
context::{Context, MemoryContext},
queue_view::QueueView,
reentrant_collection_view::ReentrantCollectionView,
register_view::{HashedRegisterView, RegisterView},
store::WritableKeyValueStore as _,
test_utils::test_views::{
TestBucketQueueView, TestCollectionView, TestLogView, TestMapView, TestQueueView,
TestRegisterView, TestSetView, TestView,
},
views::{HashableView, View},
};
#[cfg(any(with_rocksdb, with_scylladb, with_dynamodb))]
use crate::{context::ViewContext, random::generate_test_namespace};
#[tokio::test]
async fn test_queue_operations_with_memory_context() -> Result<(), anyhow::Error> {
run_test_queue_operations_test_cases(MemoryContextFactory).await
}
#[cfg(with_rocksdb)]
#[tokio::test]
async fn test_queue_operations_with_rocks_db_context() -> Result<(), anyhow::Error> {
run_test_queue_operations_test_cases(RocksDbContextFactory).await
}
#[cfg(with_dynamodb)]
#[tokio::test]
async fn test_queue_operations_with_dynamo_db_context() -> Result<(), anyhow::Error> {
run_test_queue_operations_test_cases(DynamoDbContextFactory).await
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_queue_operations_with_scylla_db_context() -> Result<(), anyhow::Error> {
run_test_queue_operations_test_cases(ScyllaDbContextFactory).await
}
#[derive(Clone, Copy, Debug)]
pub enum Operation {
DeleteFront,
PushBack(usize),
CommitAndReload,
}
async fn run_test_queue_operations_test_cases<C>(mut contexts: C) -> Result<(), anyhow::Error>
where
C: TestContextFactory,
{
use self::Operation::*;
let test_cases = [
vec![DeleteFront],
vec![PushBack(100)],
vec![PushBack(200), DeleteFront],
vec![PushBack(1), PushBack(2), PushBack(3)],
vec![
PushBack(1),
PushBack(2),
PushBack(3),
DeleteFront,
DeleteFront,
DeleteFront,
],
vec![
DeleteFront,
DeleteFront,
DeleteFront,
PushBack(1),
PushBack(2),
PushBack(3),
],
vec![
PushBack(1),
DeleteFront,
PushBack(2),
DeleteFront,
PushBack(3),
DeleteFront,
],
vec![
PushBack(1),
PushBack(2),
DeleteFront,
DeleteFront,
PushBack(100),
],
vec![
PushBack(1),
PushBack(2),
DeleteFront,
DeleteFront,
PushBack(100),
PushBack(3),
DeleteFront,
],
];
for test_case in test_cases {
for commit_location in 1..test_case.len() {
let mut tweaked_test_case = test_case.clone();
tweaked_test_case.insert(commit_location + 1, CommitAndReload);
tweaked_test_case.push(CommitAndReload);
run_test_queue_operations(tweaked_test_case, contexts.new_context().await?).await?;
}
}
Ok(())
}
async fn run_test_queue_operations<C>(
operations: impl IntoIterator<Item = Operation>,
context: C,
) -> Result<(), anyhow::Error>
where
C: Context + 'static,
{
let mut expected_state = VecDeque::new();
let mut queue = QueueView::load(context.clone()).await?;
check_queue_state(&mut queue, &expected_state).await?;
for operation in operations {
match operation {
Operation::PushBack(new_item) => {
queue.push_back(new_item);
expected_state.push_back(new_item);
}
Operation::DeleteFront => {
queue.delete_front();
expected_state.pop_front();
}
Operation::CommitAndReload => {
save_view(&context, &mut queue).await?;
queue = QueueView::load(context.clone()).await?;
}
}
check_queue_state(&mut queue, &expected_state).await?;
}
Ok(())
}
async fn check_queue_state<C>(
queue: &mut QueueView<C, usize>,
expected_state: &VecDeque<usize>,
) -> Result<(), anyhow::Error>
where
C: Context,
{
let count = expected_state.len();
assert_eq!(queue.front().await?, expected_state.front().copied());
assert_eq!(queue.back().await?, expected_state.back().copied());
assert_eq!(queue.count(), count);
check_contents(queue.read_front(count).await?, expected_state);
check_contents(queue.read_back(count).await?, expected_state);
Ok(())
}
fn check_contents(contents: Vec<usize>, expected: &VecDeque<usize>) {
assert_eq!(&contents.into_iter().collect::<VecDeque<_>>(), expected);
}
trait TestContextFactory {
type Context: Context + 'static;
async fn new_context(&mut self) -> Result<Self::Context, anyhow::Error>;
}
struct MemoryContextFactory;
impl TestContextFactory for MemoryContextFactory {
type Context = MemoryContext<()>;
async fn new_context(&mut self) -> Result<Self::Context, anyhow::Error> {
Ok(MemoryContext::new_for_testing(()))
}
}
#[cfg(with_rocksdb)]
struct RocksDbContextFactory;
#[cfg(with_rocksdb)]
impl TestContextFactory for RocksDbContextFactory {
type Context = ViewContext<(), <RocksDbDatabase as KeyValueDatabase>::Store>;
async fn new_context(&mut self) -> Result<Self::Context, anyhow::Error> {
let config = RocksDbDatabase::new_test_config().await?;
let namespace = generate_test_namespace();
let database = RocksDbDatabase::recreate_and_connect(&config, &namespace).await?;
let store = database.open_shared(&[])?;
let context = ViewContext::create_root_context(store, ()).await?;
Ok(context)
}
}
#[cfg(with_dynamodb)]
struct DynamoDbContextFactory;
#[cfg(with_dynamodb)]
impl TestContextFactory for DynamoDbContextFactory {
type Context = ViewContext<(), <DynamoDbDatabase as KeyValueDatabase>::Store>;
async fn new_context(&mut self) -> Result<Self::Context, anyhow::Error> {
let config = DynamoDbDatabase::new_test_config().await?;
let namespace = generate_test_namespace();
let database = DynamoDbDatabase::recreate_and_connect(&config, &namespace).await?;
let store = database.open_shared(&[])?;
Ok(ViewContext::create_root_context(store, ()).await?)
}
}
#[cfg(with_scylladb)]
struct ScyllaDbContextFactory;
#[cfg(with_scylladb)]
impl TestContextFactory for ScyllaDbContextFactory {
type Context = ViewContext<(), <ScyllaDbDatabase as KeyValueDatabase>::Store>;
async fn new_context(&mut self) -> Result<Self::Context, anyhow::Error> {
let config = ScyllaDbDatabase::new_test_config().await?;
let namespace = generate_test_namespace();
let database = ScyllaDbDatabase::recreate_and_connect(&config, &namespace).await?;
let store = database.open_shared(&[])?;
let context = ViewContext::create_root_context(store, ()).await?;
Ok(context)
}
}
/// Checks if a cloned view contains the staged changes from its source.
#[test_case(PhantomData::<TestCollectionView<_>>; "with CollectionView")]
#[test_case(PhantomData::<TestLogView<_>>; "with LogView")]
#[test_case(PhantomData::<TestMapView<_>>; "with MapView")]
#[test_case(PhantomData::<TestSetView<_>>; "with SetView")]
#[test_case(PhantomData::<TestQueueView<_>>; "with QueueView")]
#[test_case(PhantomData::<TestBucketQueueView<_>>; "with BucketQueueView")]
#[test_case(PhantomData::<TestRegisterView<_>>; "with RegisterView")]
#[tokio::test]
async fn test_clone_includes_staged_changes<V>(
_view_type: PhantomData<V>,
) -> Result<(), anyhow::Error>
where
V: TestView,
{
let context = MemoryContext::new_for_testing(());
let mut original = V::load(context).await?;
let original_state = original.stage_initial_changes().await?;
let clone = original.clone_unchecked()?;
let clone_state = clone.read().await?;
assert_eq!(original_state, clone_state);
Ok(())
}
/// Checks if new staged changes are separate between the cloned view and its source.
#[test_case(PhantomData::<TestCollectionView<_>>; "with CollectionView")]
#[test_case(PhantomData::<TestLogView<_>>; "with LogView")]
#[test_case(PhantomData::<TestMapView<_>>; "with MapView")]
#[test_case(PhantomData::<TestSetView<_>>; "with SetView")]
#[test_case(PhantomData::<TestQueueView<_>>; "with QueueView")]
#[test_case(PhantomData::<TestBucketQueueView<_>>; "with BucketQueueView")]
#[test_case(PhantomData::<TestRegisterView<_>>; "with RegisterView")]
#[tokio::test]
async fn test_original_and_clone_stage_changes_separately<V>(
_view_type: PhantomData<V>,
) -> Result<(), anyhow::Error>
where
V: TestView,
{
let context = MemoryContext::new_for_testing(());
let mut original = V::load(context).await?;
original.stage_initial_changes().await?;
let mut first_clone = original.clone_unchecked()?;
let second_clone = original.clone_unchecked()?;
let original_state = original.stage_changes_to_be_discarded().await?;
let first_clone_state = first_clone.stage_changes_to_be_persisted().await?;
let second_clone_state = second_clone.read().await?;
assert_ne!(original_state, first_clone_state);
assert_ne!(original_state, second_clone_state);
assert_ne!(first_clone_state, second_clone_state);
Ok(())
}
/// Checks if the cached hash value persisted in storage is cleared when flushing a cleared
/// [`HashableRegisterView`].
///
/// Otherwise `rollback` may set the cached staged hash value to an incorrect value.
#[tokio::test]
async fn test_clearing_of_cached_stored_hash() -> anyhow::Result<()> {
let context = MemoryContext::new_for_testing(());
let mut view = HashedRegisterView::<_, String>::load(context.clone()).await?;
let empty_hash = view.hash().await?;
assert_eq!(view.hash_mut().await?, empty_hash);
view.set("some value".to_owned());
let populated_hash = view.hash().await?;
assert_eq!(view.hash_mut().await?, populated_hash);
assert_ne!(populated_hash, empty_hash);
save_view(&context, &mut view).await?;
assert_eq!(view.hash().await?, populated_hash);
assert_eq!(view.hash_mut().await?, populated_hash);
view.clear();
assert_eq!(view.hash().await?, empty_hash);
assert_eq!(view.hash_mut().await?, empty_hash);
save_view(&context, &mut view).await?;
assert_eq!(view.hash().await?, empty_hash);
assert_eq!(view.hash_mut().await?, empty_hash);
view.rollback();
assert_eq!(view.hash().await?, empty_hash);
assert_eq!(view.hash_mut().await?, empty_hash);
Ok(())
}
/// Checks if a [`ReentrantCollectionView`] doesn't have pending changes after loading its
/// entries.
#[tokio::test]
async fn test_reentrant_collection_view_has_no_pending_changes_after_try_load_entries(
) -> anyhow::Result<()> {
let context = MemoryContext::new_for_testing(());
let values = [(1, "first".to_owned()), (2, "second".to_owned())];
let mut view =
ReentrantCollectionView::<_, u8, RegisterView<_, String>>::load(context.clone()).await?;
assert!(!view.has_pending_changes().await);
populate_reentrant_collection_view(&mut view, values.clone()).await?;
assert!(view.has_pending_changes().await);
save_view(&context, &mut view).await?;
assert!(!view.has_pending_changes().await);
let entries = view.try_load_entries(vec![&1, &2]).await?;
assert_eq!(entries.len(), 2);
assert!(entries[0].is_some());
assert!(entries[1].is_some());
assert_eq!(entries[0].as_ref().unwrap().get(), &values[0].1);
assert_eq!(entries[1].as_ref().unwrap().get(), &values[1].1);
assert!(!view.has_pending_changes().await);
Ok(())
}
/// Checks if a [`ReentrantCollectionView`] has pending changes after adding an entry.
#[tokio::test]
async fn test_reentrant_collection_view_has_pending_changes_after_new_entry() -> anyhow::Result<()>
{
let context = MemoryContext::new_for_testing(());
let values = [(1, "first".to_owned()), (2, "second".to_owned())];
let mut view =
ReentrantCollectionView::<_, u8, RegisterView<_, String>>::load(context.clone()).await?;
populate_reentrant_collection_view(&mut view, values.clone()).await?;
save_view(&context, &mut view).await?;
assert!(!view.has_pending_changes().await);
{
let entry = view.try_load_entry_mut(&3).await?;
assert_eq!(entry.get(), "");
assert!(!entry.has_pending_changes().await);
}
assert!(view.has_pending_changes().await);
Ok(())
}
/// Checks if acquiring a write-lock to a sub-view causes the collection to have pending changes.
#[tokio::test]
async fn test_reentrant_collection_view_has_pending_changes_after_try_load_entry_mut(
) -> anyhow::Result<()> {
let context = MemoryContext::new_for_testing(());
let values = [(1, "first".to_owned()), (2, "second".to_owned())];
let mut view =
ReentrantCollectionView::<_, u8, RegisterView<_, String>>::load(context.clone()).await?;
populate_reentrant_collection_view(&mut view, values.clone()).await?;
save_view(&context, &mut view).await?;
assert!(!view.has_pending_changes().await);
let entry = view
.try_load_entry(&1)
.await?
.expect("Missing first entry in collection");
assert_eq!(entry.get(), &values[0].1);
assert!(!entry.has_pending_changes().await);
assert!(!view.has_pending_changes().await);
drop(entry);
let entry = view.try_load_entry_mut(&1).await?;
assert_eq!(entry.get(), &values[0].1);
assert!(!entry.has_pending_changes().await);
assert!(view.has_pending_changes().await);
Ok(())
}
/// Checks if acquiring multiple write-locks to sub-views causes the collection to have pending
/// changes.
#[tokio::test]
async fn test_reentrant_collection_view_has_pending_changes_after_try_load_entries_mut(
) -> anyhow::Result<()> {
let context = MemoryContext::new_for_testing(());
let values = [
(1, "first".to_owned()),
(2, "second".to_owned()),
(3, "third".to_owned()),
(4, "fourth".to_owned()),
];
let mut view =
ReentrantCollectionView::<_, u8, RegisterView<_, String>>::load(context.clone()).await?;
populate_reentrant_collection_view(&mut view, values.clone()).await?;
save_view(&context, &mut view).await?;
assert!(!view.has_pending_changes().await);
let entries = view.try_load_entries([&2, &3]).await?;
assert_eq!(entries.len(), 2);
assert!(entries[0].is_some());
assert!(entries[1].is_some());
assert_eq!(entries[0].as_ref().unwrap().get(), &values[1].1);
assert_eq!(entries[1].as_ref().unwrap().get(), &values[2].1);
assert!(!entries[0].as_ref().unwrap().has_pending_changes().await);
assert!(!entries[1].as_ref().unwrap().has_pending_changes().await);
assert!(!view.has_pending_changes().await);
drop(entries);
let entries = view.try_load_entries_mut([&2, &3]).await?;
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].get(), &values[1].1);
assert_eq!(entries[1].get(), &values[2].1);
assert!(!entries[0].has_pending_changes().await);
assert!(!entries[1].has_pending_changes().await);
assert!(view.has_pending_changes().await);
Ok(())
}
/// Checks if a cleared [`TestView`] has no pending changes after flushing.
#[test_case(PhantomData::<TestCollectionView<_>>; "with CollectionView")]
#[test_case(PhantomData::<TestLogView<_>>; "with LogView")]
#[test_case(PhantomData::<TestMapView<_>>; "with MapView")]
#[test_case(PhantomData::<TestSetView<_>>; "with SetView")]
#[test_case(PhantomData::<TestQueueView<_>>; "with QueueView")]
#[test_case(PhantomData::<TestBucketQueueView<_>>; "with BucketQueueView")]
#[test_case(PhantomData::<TestRegisterView<_>>; "with RegisterView")]
#[tokio::test]
async fn test_flushing_cleared_view<V: TestView>(_view_type: PhantomData<V>) -> anyhow::Result<()> {
let context = MemoryContext::new_for_testing(());
let mut view = V::load(context.clone()).await?;
assert!(!view.has_pending_changes().await);
view.clear();
assert!(view.has_pending_changes().await);
save_view(&context, &mut view).await?;
assert!(!view.has_pending_changes().await);
Ok(())
}
/// Saves a [`View`] into the [`MemoryContext<()>`] storage simulation.
async fn save_view<V: View>(context: &V::Context, view: &mut V) -> anyhow::Result<()> {
let mut batch = Batch::new();
view.pre_save(&mut batch)?;
context.store().write_batch(batch).await?;
view.post_save();
Ok(())
}
/// Populates a [`ReentrantCollectionView`] with some `entries`.
async fn populate_reentrant_collection_view<C, Key, Value>(
collection: &mut ReentrantCollectionView<C, Key, RegisterView<C, Value>>,
entries: impl IntoIterator<Item = (Key, Value)>,
) -> anyhow::Result<()>
where
C: Context,
Key: Serialize + DeserializeOwned + Clone + Debug + Default + Send + Sync,
Value: Serialize + DeserializeOwned + Default + Send + Sync,
{
for (key, value) in entries {
let mut entry = collection.try_load_entry_mut(&key).await?;
entry.set(value);
}
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/scylla_db.rs | linera-views/src/backends/scylla_db.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implements [`crate::store::KeyValueStore`] for the ScyllaDB database.
//!
//! The current connection is done via a Session and a corresponding primary key called
//! "namespace". The maximum number of concurrent queries is controlled by
//! `max_concurrent_queries`.
use std::{
collections::{BTreeSet, HashMap},
ops::Deref,
sync::Arc,
};
use async_lock::{Semaphore, SemaphoreGuard};
use futures::{future::join_all, StreamExt as _};
use linera_base::{ensure, util::future::FutureSyncExt as _};
use scylla::{
client::{
execution_profile::{ExecutionProfile, ExecutionProfileHandle},
session::Session,
session_builder::SessionBuilder,
},
deserialize::{DeserializationError, TypeCheckError},
errors::{
DbError, ExecutionError, IntoRowsResultError, NewSessionError, NextPageError, NextRowError,
PagerExecutionError, PrepareError, RequestAttemptError, RequestError, RowsError,
},
policies::{
load_balancing::{DefaultPolicy, LoadBalancingPolicy},
retry::DefaultRetryPolicy,
},
response::PagingState,
statement::{batch::BatchType, prepared::PreparedStatement, Consistency},
};
use serde::{Deserialize, Serialize};
use thiserror::Error;
#[cfg(with_metrics)]
use crate::metering::MeteredDatabase;
#[cfg(with_testing)]
use crate::store::TestKeyValueDatabase;
use crate::{
batch::UnorderedBatch,
common::{get_uleb128_size, get_upper_bound_option},
journaling::{JournalConsistencyError, JournalingKeyValueDatabase},
lru_caching::{LruCachingConfig, LruCachingDatabase},
store::{
DirectWritableKeyValueStore, KeyValueDatabase, KeyValueStoreError, ReadableKeyValueStore,
WithError,
},
value_splitting::{ValueSplittingDatabase, ValueSplittingError},
};
/// Fundamental constant in ScyllaDB: The maximum size of a multi keys query
/// The limit is in reality 100. But we need one entry for the root key.
const MAX_MULTI_KEYS: usize = 100 - 1;
/// The maximal size of an operation on ScyllaDB seems to be 16 MiB
/// https://www.scylladb.com/2019/03/27/best-practices-for-scylla-applications/
/// "There is a hard limit at 16 MiB, and nothing bigger than that can arrive at once
/// at the database at any particular time"
/// So, we set up the maximal size of 16 MiB - 10 KiB for the values and 10 KiB for the keys
/// We also arbitrarily decrease the size by 4000 bytes because an amount of size is
/// taken internally by the database.
const RAW_MAX_VALUE_SIZE: usize = 16 * 1024 * 1024 - 10 * 1024 - 4000;
const MAX_KEY_SIZE: usize = 10 * 1024;
const MAX_BATCH_TOTAL_SIZE: usize = RAW_MAX_VALUE_SIZE + MAX_KEY_SIZE;
/// The `RAW_MAX_VALUE_SIZE` is the maximum size on the ScyllaDB storage.
/// However, the value being written can also be the serialization of a `SimpleUnorderedBatch`
/// Therefore the actual `MAX_VALUE_SIZE` is lower.
/// At the maximum the key size is 1024 bytes (see below) and we pack just one entry.
/// So if the key has 1024 bytes this gets us the inequality
/// `1 + 1 + 1 + serialized_size(MAX_KEY_SIZE)? + serialized_size(x)? <= RAW_MAX_VALUE_SIZE`.
/// and so this simplifies to `1 + 1 + 1 + (2 + 10240) + (4 + x) <= RAW_MAX_VALUE_SIZE`
/// Note on the above formula:
/// * We write 4 because `get_uleb128_size(RAW_MAX_VALUE_SIZE) = 4)`
/// * We write `1 + 1 + 1` because the `UnorderedBatch` has three entries.
///
/// This gets us to a maximal value of 16752727.
const VISIBLE_MAX_VALUE_SIZE: usize = RAW_MAX_VALUE_SIZE
- MAX_KEY_SIZE
- get_uleb128_size(RAW_MAX_VALUE_SIZE)
- get_uleb128_size(MAX_KEY_SIZE)
- 3;
/// The constant 14000 is an empirical constant that was found to be necessary
/// to make the ScyllaDB system work. We have not been able to find this or
/// a similar constant in the source code or the documentation.
/// An experimental approach gets us that 14796 is the latest value that is
/// correct.
const MAX_BATCH_SIZE: usize = 5000;
/// The keyspace to use for the ScyllaDB database.
const KEYSPACE: &str = "kv";
/// The client for ScyllaDB:
/// * The session allows to pass queries
/// * The namespace that is being assigned to the database
/// * The prepared queries used for implementing the features of `KeyValueStore`.
struct ScyllaDbClient {
session: Session,
namespace: String,
read_value: PreparedStatement,
contains_key: PreparedStatement,
write_batch_delete_prefix_unbounded: PreparedStatement,
write_batch_delete_prefix_bounded: PreparedStatement,
write_batch_deletion: PreparedStatement,
write_batch_insertion: PreparedStatement,
find_keys_by_prefix_unbounded: PreparedStatement,
find_keys_by_prefix_bounded: PreparedStatement,
find_key_values_by_prefix_unbounded: PreparedStatement,
find_key_values_by_prefix_bounded: PreparedStatement,
multi_key_values: papaya::HashMap<usize, PreparedStatement>,
multi_keys: papaya::HashMap<usize, PreparedStatement>,
}
impl ScyllaDbClient {
async fn new(session: Session, namespace: &str) -> Result<Self, ScyllaDbStoreInternalError> {
let namespace = namespace.to_string();
let read_value = session
.prepare(format!(
"SELECT v FROM {}.\"{}\" WHERE root_key = ? AND k = ?",
KEYSPACE, namespace
))
.await?;
let contains_key = session
.prepare(format!(
"SELECT root_key FROM {}.\"{}\" WHERE root_key = ? AND k = ?",
KEYSPACE, namespace
))
.await?;
let write_batch_delete_prefix_unbounded = session
.prepare(format!(
"DELETE FROM {}.\"{}\" WHERE root_key = ? AND k >= ?",
KEYSPACE, namespace
))
.await?;
let write_batch_delete_prefix_bounded = session
.prepare(format!(
"DELETE FROM {}.\"{}\" WHERE root_key = ? AND k >= ? AND k < ?",
KEYSPACE, namespace
))
.await?;
let write_batch_deletion = session
.prepare(format!(
"DELETE FROM {}.\"{}\" WHERE root_key = ? AND k = ?",
KEYSPACE, namespace
))
.await?;
let write_batch_insertion = session
.prepare(format!(
"INSERT INTO {}.\"{}\" (root_key, k, v) VALUES (?, ?, ?)",
KEYSPACE, namespace
))
.await?;
let find_keys_by_prefix_unbounded = session
.prepare(format!(
"SELECT k FROM {}.\"{}\" WHERE root_key = ? AND k >= ?",
KEYSPACE, namespace
))
.await?;
let find_keys_by_prefix_bounded = session
.prepare(format!(
"SELECT k FROM {}.\"{}\" WHERE root_key = ? AND k >= ? AND k < ?",
KEYSPACE, namespace
))
.await?;
let find_key_values_by_prefix_unbounded = session
.prepare(format!(
"SELECT k,v FROM {}.\"{}\" WHERE root_key = ? AND k >= ?",
KEYSPACE, namespace
))
.await?;
let find_key_values_by_prefix_bounded = session
.prepare(format!(
"SELECT k,v FROM {}.\"{}\" WHERE root_key = ? AND k >= ? AND k < ?",
KEYSPACE, namespace
))
.await?;
Ok(Self {
session,
namespace,
read_value,
contains_key,
write_batch_delete_prefix_unbounded,
write_batch_delete_prefix_bounded,
write_batch_deletion,
write_batch_insertion,
find_keys_by_prefix_unbounded,
find_keys_by_prefix_bounded,
find_key_values_by_prefix_unbounded,
find_key_values_by_prefix_bounded,
multi_key_values: papaya::HashMap::new(),
multi_keys: papaya::HashMap::new(),
})
}
fn build_default_policy() -> Arc<dyn LoadBalancingPolicy> {
DefaultPolicy::builder().token_aware(true).build()
}
fn build_default_execution_profile_handle(
policy: Arc<dyn LoadBalancingPolicy>,
) -> ExecutionProfileHandle {
let default_profile = ExecutionProfile::builder()
.load_balancing_policy(policy)
.retry_policy(Arc::new(DefaultRetryPolicy::new()))
.consistency(Consistency::LocalQuorum)
.build();
default_profile.into_handle()
}
async fn build_default_session(uri: &str) -> Result<Session, ScyllaDbStoreInternalError> {
// This explicitly sets a lot of default parameters for clarity and for making future changes
// easier.
SessionBuilder::new()
.known_node(uri)
.default_execution_profile_handle(Self::build_default_execution_profile_handle(
Self::build_default_policy(),
))
.build()
.boxed_sync()
.await
.map_err(Into::into)
}
async fn get_multi_key_values_statement(
&self,
num_markers: usize,
) -> Result<PreparedStatement, ScyllaDbStoreInternalError> {
if let Some(prepared_statement) = self.multi_key_values.pin().get(&num_markers) {
return Ok(prepared_statement.clone());
}
let markers = std::iter::repeat_n("?", num_markers)
.collect::<Vec<_>>()
.join(",");
let prepared_statement = self
.session
.prepare(format!(
"SELECT k,v FROM {}.\"{}\" WHERE root_key = ? AND k IN ({})",
KEYSPACE, self.namespace, markers
))
.await?;
self.multi_key_values
.pin()
.insert(num_markers, prepared_statement.clone());
Ok(prepared_statement)
}
async fn get_multi_keys_statement(
&self,
num_markers: usize,
) -> Result<PreparedStatement, ScyllaDbStoreInternalError> {
if let Some(prepared_statement) = self.multi_keys.pin().get(&num_markers) {
return Ok(prepared_statement.clone());
};
let markers = std::iter::repeat_n("?", num_markers)
.collect::<Vec<_>>()
.join(",");
let prepared_statement = self
.session
.prepare(format!(
"SELECT k FROM {}.\"{}\" WHERE root_key = ? AND k IN ({})",
KEYSPACE, self.namespace, markers
))
.await?;
self.multi_keys
.pin()
.insert(num_markers, prepared_statement.clone());
Ok(prepared_statement)
}
fn check_key_size(key: &[u8]) -> Result<(), ScyllaDbStoreInternalError> {
ensure!(
key.len() <= MAX_KEY_SIZE,
ScyllaDbStoreInternalError::KeyTooLong
);
Ok(())
}
fn check_value_size(value: &[u8]) -> Result<(), ScyllaDbStoreInternalError> {
ensure!(
value.len() <= RAW_MAX_VALUE_SIZE,
ScyllaDbStoreInternalError::ValueTooLong
);
Ok(())
}
fn check_batch_len(batch: &UnorderedBatch) -> Result<(), ScyllaDbStoreInternalError> {
ensure!(
batch.len() <= MAX_BATCH_SIZE,
ScyllaDbStoreInternalError::BatchTooLong
);
Ok(())
}
async fn read_value_internal(
&self,
root_key: &[u8],
key: Vec<u8>,
) -> Result<Option<Vec<u8>>, ScyllaDbStoreInternalError> {
Self::check_key_size(&key)?;
let session = &self.session;
// Read the value of a key
let values = (root_key.to_vec(), key);
let (result, _) = session
.execute_single_page(&self.read_value, &values, PagingState::start())
.await?;
let rows = result.into_rows_result()?;
let mut rows = rows.rows::<(Vec<u8>,)>()?;
Ok(match rows.next() {
Some(row) => Some(row?.0),
None => None,
})
}
fn get_occurrences_map(
keys: Vec<Vec<u8>>,
) -> Result<HashMap<Vec<u8>, Vec<usize>>, ScyllaDbStoreInternalError> {
let mut map = HashMap::<Vec<u8>, Vec<usize>>::new();
for (i_key, key) in keys.into_iter().enumerate() {
Self::check_key_size(&key)?;
map.entry(key).or_default().push(i_key);
}
Ok(map)
}
async fn read_multi_values_internal(
&self,
root_key: &[u8],
keys: Vec<Vec<u8>>,
) -> Result<Vec<Option<Vec<u8>>>, ScyllaDbStoreInternalError> {
let mut values = vec![None; keys.len()];
let map = Self::get_occurrences_map(keys)?;
let statement = self.get_multi_key_values_statement(map.len()).await?;
let mut inputs = vec![root_key.to_vec()];
inputs.extend(map.keys().cloned());
let mut rows = Box::pin(self.session.execute_iter(statement, &inputs))
.await?
.rows_stream::<(Vec<u8>, Vec<u8>)>()?;
while let Some(row) = rows.next().await {
let (key, value) = row?;
if let Some((&last, rest)) = map[&key].split_last() {
for position in rest {
values[*position] = Some(value.clone());
}
values[last] = Some(value);
}
}
Ok(values)
}
async fn contains_keys_internal(
&self,
root_key: &[u8],
keys: Vec<Vec<u8>>,
) -> Result<Vec<bool>, ScyllaDbStoreInternalError> {
let mut values = vec![false; keys.len()];
let map = Self::get_occurrences_map(keys)?;
let statement = self.get_multi_keys_statement(map.len()).await?;
let mut inputs = vec![root_key.to_vec()];
inputs.extend(map.keys().cloned());
let mut rows = Box::pin(self.session.execute_iter(statement, &inputs))
.await?
.rows_stream::<(Vec<u8>,)>()?;
while let Some(row) = rows.next().await {
let (key,) = row?;
for i_key in &map[&key] {
values[*i_key] = true;
}
}
Ok(values)
}
async fn contains_key_internal(
&self,
root_key: &[u8],
key: Vec<u8>,
) -> Result<bool, ScyllaDbStoreInternalError> {
Self::check_key_size(&key)?;
let session = &self.session;
// Read the value of a key
let values = (root_key.to_vec(), key);
let (result, _) = session
.execute_single_page(&self.contains_key, &values, PagingState::start())
.await?;
let rows = result.into_rows_result()?;
let mut rows = rows.rows::<(Vec<u8>,)>()?;
Ok(rows.next().is_some())
}
async fn write_batch_internal(
&self,
root_key: &[u8],
batch: UnorderedBatch,
) -> Result<(), ScyllaDbStoreInternalError> {
let session = &self.session;
let mut batch_query = scylla::statement::batch::Batch::new(BatchType::Unlogged);
let mut batch_values = Vec::new();
let query1 = &self.write_batch_delete_prefix_unbounded;
let query2 = &self.write_batch_delete_prefix_bounded;
Self::check_batch_len(&batch)?;
for key_prefix in batch.key_prefix_deletions {
Self::check_key_size(&key_prefix)?;
match get_upper_bound_option(&key_prefix) {
None => {
let values = vec![root_key.to_vec(), key_prefix];
batch_values.push(values);
batch_query.append_statement(query1.clone());
}
Some(upper_bound) => {
let values = vec![root_key.to_vec(), key_prefix, upper_bound];
batch_values.push(values);
batch_query.append_statement(query2.clone());
}
}
}
let query3 = &self.write_batch_deletion;
for key in batch.simple_unordered_batch.deletions {
Self::check_key_size(&key)?;
let values = vec![root_key.to_vec(), key];
batch_values.push(values);
batch_query.append_statement(query3.clone());
}
let query4 = &self.write_batch_insertion;
for (key, value) in batch.simple_unordered_batch.insertions {
Self::check_key_size(&key)?;
Self::check_value_size(&value)?;
let values = vec![root_key.to_vec(), key, value];
batch_values.push(values);
batch_query.append_statement(query4.clone());
}
session.batch(&batch_query, batch_values).await?;
Ok(())
}
async fn find_keys_by_prefix_internal(
&self,
root_key: &[u8],
key_prefix: Vec<u8>,
) -> Result<Vec<Vec<u8>>, ScyllaDbStoreInternalError> {
Self::check_key_size(&key_prefix)?;
let session = &self.session;
// Read the value of a key
let len = key_prefix.len();
let query_unbounded = &self.find_keys_by_prefix_unbounded;
let query_bounded = &self.find_keys_by_prefix_bounded;
let rows = match get_upper_bound_option(&key_prefix) {
None => {
let values = (root_key.to_vec(), key_prefix.clone());
Box::pin(session.execute_iter(query_unbounded.clone(), values)).await?
}
Some(upper_bound) => {
let values = (root_key.to_vec(), key_prefix.clone(), upper_bound);
Box::pin(session.execute_iter(query_bounded.clone(), values)).await?
}
};
let mut rows = rows.rows_stream::<(Vec<u8>,)>()?;
let mut keys = Vec::new();
while let Some(row) = rows.next().await {
let (key,) = row?;
let short_key = key[len..].to_vec();
keys.push(short_key);
}
Ok(keys)
}
async fn find_key_values_by_prefix_internal(
&self,
root_key: &[u8],
key_prefix: Vec<u8>,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, ScyllaDbStoreInternalError> {
Self::check_key_size(&key_prefix)?;
let session = &self.session;
// Read the value of a key
let len = key_prefix.len();
let query_unbounded = &self.find_key_values_by_prefix_unbounded;
let query_bounded = &self.find_key_values_by_prefix_bounded;
let rows = match get_upper_bound_option(&key_prefix) {
None => {
let values = (root_key.to_vec(), key_prefix.clone());
Box::pin(session.execute_iter(query_unbounded.clone(), values)).await?
}
Some(upper_bound) => {
let values = (root_key.to_vec(), key_prefix.clone(), upper_bound);
Box::pin(session.execute_iter(query_bounded.clone(), values)).await?
}
};
let mut rows = rows.rows_stream::<(Vec<u8>, Vec<u8>)>()?;
let mut key_values = Vec::new();
while let Some(row) = rows.next().await {
let (key, value) = row?;
let short_key = key[len..].to_vec();
key_values.push((short_key, value));
}
Ok(key_values)
}
}
/// The client itself and the keeping of the count of active connections.
#[derive(Clone)]
pub struct ScyllaDbStoreInternal {
store: Arc<ScyllaDbClient>,
semaphore: Option<Arc<Semaphore>>,
max_stream_queries: usize,
root_key: Vec<u8>,
}
/// Database-level connection to ScyllaDB for managing namespaces and partitions.
#[derive(Clone)]
pub struct ScyllaDbDatabaseInternal {
store: Arc<ScyllaDbClient>,
semaphore: Option<Arc<Semaphore>>,
max_stream_queries: usize,
}
impl WithError for ScyllaDbDatabaseInternal {
type Error = ScyllaDbStoreInternalError;
}
/// The error type for [`ScyllaDbStoreInternal`]
#[derive(Error, Debug)]
pub enum ScyllaDbStoreInternalError {
/// BCS serialization error.
#[error(transparent)]
BcsError(#[from] bcs::Error),
/// The key must have at most `MAX_KEY_SIZE` bytes
#[error("The key must have at most MAX_KEY_SIZE")]
KeyTooLong,
/// The value must have at most `RAW_MAX_VALUE_SIZE` bytes
#[error("The value must have at most RAW_MAX_VALUE_SIZE")]
ValueTooLong,
/// A deserialization error in ScyllaDB
#[error(transparent)]
DeserializationError(#[from] DeserializationError),
/// A row error in ScyllaDB
#[error(transparent)]
RowsError(#[from] RowsError),
/// A type error in the accessed data in ScyllaDB
#[error(transparent)]
IntoRowsResultError(#[from] IntoRowsResultError),
/// A type check error in ScyllaDB
#[error(transparent)]
TypeCheckError(#[from] TypeCheckError),
/// A query error in ScyllaDB
#[error(transparent)]
PagerExecutionError(#[from] PagerExecutionError),
/// A query error in ScyllaDB
#[error(transparent)]
ScyllaDbNewSessionError(#[from] NewSessionError),
/// Namespace contains forbidden characters
#[error("Namespace contains forbidden characters")]
InvalidNamespace,
/// The journal is not coherent
#[error(transparent)]
JournalConsistencyError(#[from] JournalConsistencyError),
/// The batch is too long to be written
#[error("The batch is too long to be written")]
BatchTooLong,
/// A prepare error in ScyllaDB
#[error(transparent)]
PrepareError(#[from] PrepareError),
/// An execution error in ScyllaDB
#[error(transparent)]
ExecutionError(#[from] ExecutionError),
/// A next row error in ScyllaDB
#[error(transparent)]
NextRowError(#[from] NextRowError),
}
impl KeyValueStoreError for ScyllaDbStoreInternalError {
const BACKEND: &'static str = "scylla_db";
}
impl WithError for ScyllaDbStoreInternal {
type Error = ScyllaDbStoreInternalError;
}
impl ReadableKeyValueStore for ScyllaDbStoreInternal {
const MAX_KEY_SIZE: usize = MAX_KEY_SIZE;
fn max_stream_queries(&self) -> usize {
self.max_stream_queries
}
fn root_key(&self) -> Result<Vec<u8>, ScyllaDbStoreInternalError> {
Ok(self.root_key[1..].to_vec())
}
async fn read_value_bytes(
&self,
key: &[u8],
) -> Result<Option<Vec<u8>>, ScyllaDbStoreInternalError> {
let store = self.store.deref();
let _guard = self.acquire().await;
Box::pin(store.read_value_internal(&self.root_key, key.to_vec())).await
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, ScyllaDbStoreInternalError> {
let store = self.store.deref();
let _guard = self.acquire().await;
Box::pin(store.contains_key_internal(&self.root_key, key.to_vec())).await
}
async fn contains_keys(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<bool>, ScyllaDbStoreInternalError> {
if keys.is_empty() {
return Ok(Vec::new());
}
let store = self.store.deref();
let _guard = self.acquire().await;
let handles = keys
.chunks(MAX_MULTI_KEYS)
.map(|keys| store.contains_keys_internal(&self.root_key, keys.to_vec()));
let results: Vec<_> = join_all(handles)
.await
.into_iter()
.collect::<Result<_, _>>()?;
Ok(results.into_iter().flatten().collect())
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, ScyllaDbStoreInternalError> {
if keys.is_empty() {
return Ok(Vec::new());
}
let store = self.store.deref();
let _guard = self.acquire().await;
let handles = keys
.chunks(MAX_MULTI_KEYS)
.map(|keys| store.read_multi_values_internal(&self.root_key, keys.to_vec()));
let results: Vec<_> = join_all(handles)
.await
.into_iter()
.collect::<Result<_, _>>()?;
Ok(results.into_iter().flatten().collect())
}
async fn find_keys_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<Vec<u8>>, ScyllaDbStoreInternalError> {
let store = self.store.deref();
let _guard = self.acquire().await;
Box::pin(store.find_keys_by_prefix_internal(&self.root_key, key_prefix.to_vec())).await
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, ScyllaDbStoreInternalError> {
let store = self.store.deref();
let _guard = self.acquire().await;
Box::pin(store.find_key_values_by_prefix_internal(&self.root_key, key_prefix.to_vec()))
.await
}
}
impl DirectWritableKeyValueStore for ScyllaDbStoreInternal {
const MAX_BATCH_SIZE: usize = MAX_BATCH_SIZE;
const MAX_BATCH_TOTAL_SIZE: usize = MAX_BATCH_TOTAL_SIZE;
const MAX_VALUE_SIZE: usize = VISIBLE_MAX_VALUE_SIZE;
// ScyllaDB cannot take a `crate::batch::Batch` directly. Indeed, if a delete is
// followed by a write, then the delete takes priority. See the sentence "The first
// tie-breaking rule when two cells have the same write timestamp is that dead cells
// win over live cells" from
// https://github.com/scylladb/scylladb/blob/master/docs/dev/timestamp-conflict-resolution.md
type Batch = UnorderedBatch;
async fn write_batch(&self, batch: Self::Batch) -> Result<(), ScyllaDbStoreInternalError> {
let store = self.store.deref();
let _guard = self.acquire().await;
store.write_batch_internal(&self.root_key, batch).await
}
}
// ScyllaDB requires that the keys are non-empty.
fn get_big_root_key(root_key: &[u8]) -> Vec<u8> {
let mut big_key = vec![0];
big_key.extend(root_key);
big_key
}
/// The type for building a new ScyllaDB Key Value Store
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct ScyllaDbStoreInternalConfig {
/// The URL to which the requests have to be sent
pub uri: String,
/// Maximum number of concurrent database queries allowed for this client.
pub max_concurrent_queries: Option<usize>,
/// Preferred buffer size for async streams.
pub max_stream_queries: usize,
/// The replication factor.
pub replication_factor: u32,
}
impl KeyValueDatabase for ScyllaDbDatabaseInternal {
type Config = ScyllaDbStoreInternalConfig;
type Store = ScyllaDbStoreInternal;
fn get_name() -> String {
"scylladb internal".to_string()
}
async fn connect(
config: &Self::Config,
namespace: &str,
) -> Result<Self, ScyllaDbStoreInternalError> {
Self::check_namespace(namespace)?;
let session = ScyllaDbClient::build_default_session(&config.uri).await?;
let store = ScyllaDbClient::new(session, namespace).await?;
let store = Arc::new(store);
let semaphore = config
.max_concurrent_queries
.map(|n| Arc::new(Semaphore::new(n)));
let max_stream_queries = config.max_stream_queries;
Ok(Self {
store,
semaphore,
max_stream_queries,
})
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, ScyllaDbStoreInternalError> {
let store = self.store.clone();
let semaphore = self.semaphore.clone();
let max_stream_queries = self.max_stream_queries;
let root_key = get_big_root_key(root_key);
Ok(ScyllaDbStoreInternal {
store,
semaphore,
max_stream_queries,
root_key,
})
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, ScyllaDbStoreInternalError> {
self.open_shared(root_key)
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, ScyllaDbStoreInternalError> {
let session = ScyllaDbClient::build_default_session(&config.uri).await?;
let statement = session
.prepare(format!("DESCRIBE KEYSPACE {}", KEYSPACE))
.await?;
let result = Box::pin(session.execute_iter(statement, &[])).await;
let miss_msg = format!("'{}' not found in keyspaces", KEYSPACE);
let result = match result {
Ok(result) => result,
Err(error) => {
let invalid_or_keyspace_not_found = match &error {
PagerExecutionError::NextPageError(NextPageError::RequestFailure(
RequestError::LastAttemptError(RequestAttemptError::DbError(db_error, msg)),
)) => *db_error == DbError::Invalid && msg.as_str() == miss_msg,
_ => false,
};
if invalid_or_keyspace_not_found {
return Ok(Vec::new());
} else {
return Err(ScyllaDbStoreInternalError::PagerExecutionError(error));
}
}
};
let mut namespaces = Vec::new();
let mut rows_stream = result.rows_stream::<(String, String, String, String)>()?;
while let Some(row) = rows_stream.next().await {
let (_, object_kind, name, _) = row?;
if object_kind == "table" {
namespaces.push(name);
}
}
Ok(namespaces)
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, ScyllaDbStoreInternalError> {
let statement = self
.store
.session
.prepare(format!(
"SELECT root_key FROM {}.\"{}\" ALLOW FILTERING",
KEYSPACE, self.store.namespace
))
.await?;
// Execute the query
let rows = Box::pin(self.store.session.execute_iter(statement, &[])).await?;
let mut rows = rows.rows_stream::<(Vec<u8>,)>()?;
let mut root_keys = BTreeSet::new();
while let Some(row) = rows.next().await {
let (root_key,) = row?;
let root_key = root_key[1..].to_vec();
root_keys.insert(root_key);
}
Ok(root_keys.into_iter().collect::<Vec<_>>())
}
async fn delete_all(store_config: &Self::Config) -> Result<(), ScyllaDbStoreInternalError> {
let session = ScyllaDbClient::build_default_session(&store_config.uri).await?;
let statement = session
.prepare(format!("DROP KEYSPACE IF EXISTS {}", KEYSPACE))
.await?;
session
.execute_single_page(&statement, &[], PagingState::start())
.await?;
Ok(())
}
async fn exists(
config: &Self::Config,
namespace: &str,
) -> Result<bool, ScyllaDbStoreInternalError> {
Self::check_namespace(namespace)?;
let session = ScyllaDbClient::build_default_session(&config.uri).await?;
// We check the way the test can fail. It can fail in different ways.
let result = session
.prepare(format!(
"SELECT root_key FROM {}.\"{}\" LIMIT 1 ALLOW FILTERING",
KEYSPACE, namespace
))
.await;
// The missing table translates into a very specific error that we matched
let miss_msg1 = format!("unconfigured table {}", namespace);
let miss_msg1 = miss_msg1.as_str();
let miss_msg2 = "Undefined name root_key in selection clause";
let miss_msg3 = format!("Keyspace {} does not exist", KEYSPACE);
let Err(error) = result else {
// If OK, then the table exists
return Ok(true);
};
let missing_table = match &error {
PrepareError::AllAttemptsFailed {
first_attempt: RequestAttemptError::DbError(db_error, msg),
} => {
if *db_error != DbError::Invalid {
false
} else {
msg.as_str() == miss_msg1
|| msg.as_str() == miss_msg2
|| msg.as_str() == miss_msg3
}
}
_ => false,
};
if missing_table {
Ok(false)
} else {
Err(ScyllaDbStoreInternalError::PrepareError(error))
}
}
async fn create(
config: &Self::Config,
namespace: &str,
) -> Result<(), ScyllaDbStoreInternalError> {
Self::check_namespace(namespace)?;
let session = ScyllaDbClient::build_default_session(&config.uri).await?;
// Create a keyspace if it doesn't exist
let statement = session
.prepare(format!(
"CREATE KEYSPACE IF NOT EXISTS {} WITH REPLICATION = {{ \
'class' : 'NetworkTopologyStrategy', \
'replication_factor' : {} \
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/journaling.rs | linera-views/src/backends/journaling.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Turns a `DirectKeyValueStore` into a `KeyValueStore` by adding journaling.
//!
//! Journaling aims to allow writing arbitrarily large batches of data in an atomic way.
//! This is useful for database backends that limit the number of keys and/or the size of
//! the data that can be written atomically (i.e. in the same database transaction).
//!
//! Journaling requires to set aside a range of keys to hold a possible "header" and an
//! array of unwritten entries called "blocks".
//!
//! When a new batch to be written exceeds the capacity of the underlying storage, the
//! "slow path" is taken: the batch of operations is first written into blocks, then the
//! journal header is (atomically) updated to make the batch of updates persistent.
//!
//! Before any new read or write operation, if a journal is present, it must first be
//! cleared. This is done by processing every block of the journal successively. Every
//! time the data in a block are written, the journal header is updated in the same
//! transaction to mark the block as processed.
use serde::{Deserialize, Serialize};
use static_assertions as sa;
use thiserror::Error;
use crate::{
batch::{Batch, BatchValueWriter, DeletePrefixExpander, SimplifiedBatch},
store::{
DirectKeyValueStore, KeyValueDatabase, ReadableKeyValueStore, WithError,
WritableKeyValueStore,
},
views::MIN_VIEW_TAG,
};
/// A journaling key-value database.
#[derive(Clone)]
pub struct JournalingKeyValueDatabase<D> {
database: D,
}
/// A journaling key-value store.
#[derive(Clone)]
pub struct JournalingKeyValueStore<S> {
/// The inner store.
store: S,
/// Whether we have exclusive R/W access to the keys under root key.
has_exclusive_access: bool,
}
/// Data type indicating that the database is not consistent
#[derive(Error, Debug)]
#[allow(missing_docs)]
pub enum JournalConsistencyError {
#[error("The journal block could not be retrieved, it could be missing or corrupted.")]
FailureToRetrieveJournalBlock,
#[error("Refusing to use the journal without exclusive database access to the root object.")]
JournalRequiresExclusiveAccess,
}
/// The tag used for the journal stuff.
const JOURNAL_TAG: u8 = 0;
// To prevent collisions, the tag value 0 is reserved for journals.
// The tags used by views must be greater or equal than `MIN_VIEW_TAG`.
sa::const_assert!(JOURNAL_TAG < MIN_VIEW_TAG);
#[repr(u8)]
enum KeyTag {
/// Prefix for the storing of the header of the journal.
Journal = 1,
/// Prefix for the block entry.
Entry,
}
fn get_journaling_key(tag: u8, pos: u32) -> Result<Vec<u8>, bcs::Error> {
let mut key = vec![JOURNAL_TAG];
key.extend([tag]);
bcs::serialize_into(&mut key, &pos)?;
Ok(key)
}
/// The header that contains the current state of the journal.
#[derive(Serialize, Deserialize, Debug, Default)]
struct JournalHeader {
block_count: u32,
}
impl<S> DeletePrefixExpander for &JournalingKeyValueStore<S>
where
S: DirectKeyValueStore,
{
type Error = S::Error;
async fn expand_delete_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error> {
self.store.find_keys_by_prefix(key_prefix).await
}
}
impl<D> WithError for JournalingKeyValueDatabase<D>
where
D: WithError,
{
type Error = D::Error;
}
impl<S> WithError for JournalingKeyValueStore<S>
where
S: WithError,
{
type Error = S::Error;
}
impl<S> ReadableKeyValueStore for JournalingKeyValueStore<S>
where
S: ReadableKeyValueStore,
S::Error: From<JournalConsistencyError>,
{
/// The size constant do not change
const MAX_KEY_SIZE: usize = S::MAX_KEY_SIZE;
/// The read stuff does not change
fn max_stream_queries(&self) -> usize {
self.store.max_stream_queries()
}
fn root_key(&self) -> Result<Vec<u8>, Self::Error> {
self.store.root_key()
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.store.read_value_bytes(key).await
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, Self::Error> {
self.store.contains_key(key).await
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, Self::Error> {
self.store.contains_keys(keys).await
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, Self::Error> {
self.store.read_multi_values_bytes(keys).await
}
async fn find_keys_by_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error> {
self.store.find_keys_by_prefix(key_prefix).await
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, Self::Error> {
self.store.find_key_values_by_prefix(key_prefix).await
}
}
impl<D> KeyValueDatabase for JournalingKeyValueDatabase<D>
where
D: KeyValueDatabase,
{
type Config = D::Config;
type Store = JournalingKeyValueStore<D::Store>;
fn get_name() -> String {
format!("journaling {}", D::get_name())
}
async fn connect(config: &Self::Config, namespace: &str) -> Result<Self, Self::Error> {
let database = D::connect(config, namespace).await?;
Ok(Self { database })
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
let store = self.database.open_shared(root_key)?;
Ok(JournalingKeyValueStore {
store,
has_exclusive_access: false,
})
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
let store = self.database.open_exclusive(root_key)?;
Ok(JournalingKeyValueStore {
store,
has_exclusive_access: true,
})
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, Self::Error> {
D::list_all(config).await
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, Self::Error> {
self.database.list_root_keys().await
}
async fn delete_all(config: &Self::Config) -> Result<(), Self::Error> {
D::delete_all(config).await
}
async fn exists(config: &Self::Config, namespace: &str) -> Result<bool, Self::Error> {
D::exists(config, namespace).await
}
async fn create(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
D::create(config, namespace).await
}
async fn delete(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
D::delete(config, namespace).await
}
}
impl<S> WritableKeyValueStore for JournalingKeyValueStore<S>
where
S: DirectKeyValueStore,
S::Error: From<JournalConsistencyError>,
{
/// The size constant do not change
const MAX_VALUE_SIZE: usize = S::MAX_VALUE_SIZE;
async fn write_batch(&self, batch: Batch) -> Result<(), Self::Error> {
let batch = S::Batch::from_batch(self, batch).await?;
if Self::is_fastpath_feasible(&batch) {
self.store.write_batch(batch).await
} else {
if !self.has_exclusive_access {
return Err(JournalConsistencyError::JournalRequiresExclusiveAccess.into());
}
let header = self.write_journal(batch).await?;
self.coherently_resolve_journal(header).await
}
}
async fn clear_journal(&self) -> Result<(), Self::Error> {
let key = get_journaling_key(KeyTag::Journal as u8, 0)?;
let value = self.read_value::<JournalHeader>(&key).await?;
if let Some(header) = value {
self.coherently_resolve_journal(header).await?;
}
Ok(())
}
}
impl<S> JournalingKeyValueStore<S>
where
S: DirectKeyValueStore,
S::Error: From<JournalConsistencyError>,
{
/// Resolves the pending operations that were previously stored in the database
/// journal.
///
/// For each block processed, we atomically update the journal header as well. When
/// the last block is processed, this atomically clears the journal and make the store
/// finally available again (for the range of keys managed by the journal).
///
/// This function respects the constraints of the underlying key-value store `K` if
/// the following conditions are met:
///
/// (1) each block contains at most `S::MAX_BATCH_SIZE - 2` operations;
///
/// (2) the total size of the all operations in a block doesn't exceed:
/// `S::MAX_BATCH_TOTAL_SIZE - sizeof(block_key) - sizeof(header_key) - sizeof(bcs_header)`
///
/// (3) every operation in a block satisfies the constraints on individual database
/// operations represented by `S::MAX_KEY_SIZE` and `S::MAX_VALUE_SIZE`.
///
/// (4) `block_key` and `header_key` don't exceed `S::MAX_KEY_SIZE` and `bcs_header`
/// doesn't exceed `S::MAX_VALUE_SIZE`.
async fn coherently_resolve_journal(&self, mut header: JournalHeader) -> Result<(), S::Error> {
let header_key = get_journaling_key(KeyTag::Journal as u8, 0)?;
while header.block_count > 0 {
let block_key = get_journaling_key(KeyTag::Entry as u8, header.block_count - 1)?;
// Read the batch of updates (aka. "block") previously saved in the journal.
let mut batch = self
.store
.read_value::<S::Batch>(&block_key)
.await?
.ok_or(JournalConsistencyError::FailureToRetrieveJournalBlock)?;
// Execute the block and delete it from the journal atomically.
batch.add_delete(block_key);
header.block_count -= 1;
if header.block_count > 0 {
let value = bcs::to_bytes(&header)?;
batch.add_insert(header_key.clone(), value);
} else {
batch.add_delete(header_key.clone());
}
self.store.write_batch(batch).await?;
}
Ok(())
}
/// Writes the content of `batch` to the journal as a succession of blocks that can be
/// interpreted later by `coherently_resolve_journal`.
///
/// Starting with a batch of operations that is typically too large to be executed in
/// one go (see `is_fastpath_feasible()` below), the goal of this function is to split
/// the batch into smaller blocks so that `coherently_resolve_journal` respects the
/// constraints of the underlying key-value store (see analysis above).
///
/// For efficiency reasons, we write as many blocks as possible in each "transaction"
/// batch, using one write-operation per block. Then we also update the journal header
/// with the final number of blocks.
///
/// As a result, the constraints of the underlying database are respected if the
/// following conditions are met while a "transaction" batch is being built:
///
/// (1) The number of blocks per transaction doesn't exceed `S::MAX_BATCH_SIZE`.
/// But it is perfectly possible to have `S::MAX_BATCH_SIZE = usize::MAX`.
///
/// (2) The total size of BCS-serialized blocks together with their corresponding keys
/// does not exceed `S::MAX_BATCH_TOTAL_SIZE`.
///
/// (3) The size of each BCS-serialized block doesn't exceed `S::MAX_VALUE_SIZE`.
///
/// (4) When processing a journal block, we have to do two other operations.
/// (a) removing the existing block. The cost is `key_len`.
/// (b) updating or removing the journal. The cost is `key_len + header_value_len`
/// or `key_len`. An upper bound is thus
/// `journal_len_upper_bound = key_len + header_value_len`.
/// Thus the following has to be taken as upper bound on the block size:
/// `S::MAX_BATCH_TOTAL_SIZE - key_len - journal_len_upper_bound`.
///
/// NOTE:
/// * Since a block must contain at least one operation and M bytes of the
/// serialization overhead (typically M is 2 or 3 bytes of vector sizes), condition (3)
/// requires that each operation in the original batch satisfies:
/// `sizeof(key) + sizeof(value) + M <= S::MAX_VALUE_SIZE`
///
/// * Similarly, a transaction must contain at least one block so it is desirable that
/// the maximum size of a block insertion `1 + sizeof(block_key) + S::MAX_VALUE_SIZE`
/// plus M bytes of overhead doesn't exceed the threshold of condition (2).
async fn write_journal(&self, batch: S::Batch) -> Result<JournalHeader, S::Error> {
let header_key = get_journaling_key(KeyTag::Journal as u8, 0)?;
let key_len = header_key.len();
let header_value_len = bcs::serialized_size(&JournalHeader::default())?;
let journal_len_upper_bound = key_len + header_value_len;
// Each block in a transaction comes with a key.
let max_transaction_size = S::MAX_BATCH_TOTAL_SIZE;
let max_block_size = std::cmp::min(
S::MAX_VALUE_SIZE,
S::MAX_BATCH_TOTAL_SIZE - key_len - journal_len_upper_bound,
);
let mut iter = batch.into_iter();
let mut block_batch = S::Batch::default();
let mut block_size = 0;
let mut block_count = 0;
let mut transaction_batch = S::Batch::default();
let mut transaction_size = 0;
while iter.write_next_value(&mut block_batch, &mut block_size)? {
let (block_flush, transaction_flush) = {
if iter.is_empty() || transaction_batch.len() == S::MAX_BATCH_SIZE - 1 {
(true, true)
} else {
let next_block_size = iter
.next_batch_size(&block_batch, block_size)?
.expect("iter is not empty");
let next_transaction_size = transaction_size + next_block_size + key_len;
let transaction_flush = next_transaction_size > max_transaction_size;
let block_flush = transaction_flush
|| block_batch.len() == S::MAX_BATCH_SIZE - 2
|| next_block_size > max_block_size;
(block_flush, transaction_flush)
}
};
if block_flush {
block_size += block_batch.overhead_size();
let value = bcs::to_bytes(&block_batch)?;
block_batch = S::Batch::default();
assert_eq!(value.len(), block_size);
let key = get_journaling_key(KeyTag::Entry as u8, block_count)?;
transaction_batch.add_insert(key, value);
block_count += 1;
transaction_size += block_size + key_len;
block_size = 0;
}
if transaction_flush {
let batch = std::mem::take(&mut transaction_batch);
self.store.write_batch(batch).await?;
transaction_size = 0;
}
}
let header = JournalHeader { block_count };
if block_count > 0 {
let value = bcs::to_bytes(&header)?;
let mut batch = S::Batch::default();
batch.add_insert(header_key, value);
self.store.write_batch(batch).await?;
}
Ok(header)
}
fn is_fastpath_feasible(batch: &S::Batch) -> bool {
batch.len() <= S::MAX_BATCH_SIZE && batch.num_bytes() <= S::MAX_BATCH_TOTAL_SIZE
}
}
impl<S> JournalingKeyValueStore<S> {
/// Creates a new journaling store.
pub fn new(store: S) -> Self {
Self {
store,
has_exclusive_access: false,
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/rocks_db.rs | linera-views/src/backends/rocks_db.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implements [`crate::store::KeyValueStore`] for the RocksDB database.
use std::{
ffi::OsString,
fmt::Display,
path::PathBuf,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use linera_base::ensure;
use rocksdb::{BlockBasedOptions, Cache, DBCompactionStyle, SliceTransform};
use serde::{Deserialize, Serialize};
use sysinfo::{CpuRefreshKind, MemoryRefreshKind, RefreshKind, System};
use tempfile::TempDir;
use thiserror::Error;
#[cfg(with_metrics)]
use crate::metering::MeteredDatabase;
#[cfg(with_testing)]
use crate::store::TestKeyValueDatabase;
use crate::{
batch::{Batch, WriteOperation},
common::get_upper_bound_option,
lru_caching::{LruCachingConfig, LruCachingDatabase},
store::{
KeyValueDatabase, KeyValueStoreError, ReadableKeyValueStore, WithError,
WritableKeyValueStore,
},
value_splitting::{ValueSplittingDatabase, ValueSplittingError},
};
/// The prefixes being used in the system
static ROOT_KEY_DOMAIN: [u8; 1] = [0];
static STORED_ROOT_KEYS_PREFIX: u8 = 1;
/// The number of streams for the test
#[cfg(with_testing)]
const TEST_ROCKS_DB_MAX_STREAM_QUERIES: usize = 10;
// The maximum size of values in RocksDB is 3 GiB
// For offset reasons we decrease by 400
const MAX_VALUE_SIZE: usize = 3 * 1024 * 1024 * 1024 - 400;
// The maximum size of keys in RocksDB is 8 MiB
// For offset reasons we decrease by 400
const MAX_KEY_SIZE: usize = 8 * 1024 * 1024 - 400;
const WRITE_BUFFER_SIZE: usize = 256 * 1024 * 1024; // 256 MiB
const MAX_WRITE_BUFFER_NUMBER: i32 = 6;
const HYPER_CLOCK_CACHE_BLOCK_SIZE: usize = 8 * 1024; // 8 KiB
/// The RocksDB client that we use.
type DB = rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>;
/// The choice of the spawning mode.
/// `SpawnBlocking` always works and is the safest.
/// `BlockInPlace` can only be used in multi-threaded environment.
/// One way to select that is to select BlockInPlace when
/// `tokio::runtime::Handle::current().metrics().num_workers() > 1`
/// `BlockInPlace` is documented in <https://docs.rs/tokio/latest/tokio/task/fn.block_in_place.html>
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum RocksDbSpawnMode {
/// This uses the `spawn_blocking` function of Tokio.
SpawnBlocking,
/// This uses the `block_in_place` function of Tokio.
BlockInPlace,
}
impl RocksDbSpawnMode {
/// Obtains the spawning mode from runtime.
pub fn get_spawn_mode_from_runtime() -> Self {
if tokio::runtime::Handle::current().metrics().num_workers() > 1 {
RocksDbSpawnMode::BlockInPlace
} else {
RocksDbSpawnMode::SpawnBlocking
}
}
/// Runs the computation for a function according to the selected policy.
#[inline]
async fn spawn<F, I, O>(&self, f: F, input: I) -> Result<O, RocksDbStoreInternalError>
where
F: FnOnce(I) -> Result<O, RocksDbStoreInternalError> + Send + 'static,
I: Send + 'static,
O: Send + 'static,
{
Ok(match self {
RocksDbSpawnMode::BlockInPlace => tokio::task::block_in_place(move || f(input))?,
RocksDbSpawnMode::SpawnBlocking => {
tokio::task::spawn_blocking(move || f(input)).await??
}
})
}
}
impl Display for RocksDbSpawnMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self {
RocksDbSpawnMode::SpawnBlocking => write!(f, "spawn_blocking"),
RocksDbSpawnMode::BlockInPlace => write!(f, "block_in_place"),
}
}
}
fn check_key_size(key: &[u8]) -> Result<(), RocksDbStoreInternalError> {
ensure!(
key.len() <= MAX_KEY_SIZE,
RocksDbStoreInternalError::KeyTooLong
);
Ok(())
}
#[derive(Clone)]
struct RocksDbStoreExecutor {
db: Arc<DB>,
start_key: Vec<u8>,
}
impl RocksDbStoreExecutor {
fn contains_keys_internal(
&self,
keys: Vec<Vec<u8>>,
) -> Result<Vec<bool>, RocksDbStoreInternalError> {
let size = keys.len();
let mut results = vec![false; size];
let mut indices = Vec::new();
let mut keys_red = Vec::new();
for (i, key) in keys.into_iter().enumerate() {
check_key_size(&key)?;
let mut full_key = self.start_key.to_vec();
full_key.extend(key);
if self.db.key_may_exist(&full_key) {
indices.push(i);
keys_red.push(full_key);
}
}
let values_red = self.db.multi_get(keys_red);
for (index, value) in indices.into_iter().zip(values_red) {
results[index] = value?.is_some();
}
Ok(results)
}
fn read_multi_values_bytes_internal(
&self,
keys: Vec<Vec<u8>>,
) -> Result<Vec<Option<Vec<u8>>>, RocksDbStoreInternalError> {
for key in &keys {
check_key_size(key)?;
}
let full_keys = keys
.into_iter()
.map(|key| {
let mut full_key = self.start_key.to_vec();
full_key.extend(key);
full_key
})
.collect::<Vec<_>>();
let entries = self.db.multi_get(&full_keys);
Ok(entries.into_iter().collect::<Result<_, _>>()?)
}
fn get_find_prefix_iterator(&self, prefix: &[u8]) -> rocksdb::DBRawIteratorWithThreadMode<DB> {
// Configure ReadOptions optimized for SSDs and iterator performance
let mut read_opts = rocksdb::ReadOptions::default();
// Enable async I/O for better concurrency
read_opts.set_async_io(true);
// Set precise upper bound to minimize key traversal
let upper_bound = get_upper_bound_option(prefix);
if let Some(upper_bound) = upper_bound {
read_opts.set_iterate_upper_bound(upper_bound);
}
let mut iter = self.db.raw_iterator_opt(read_opts);
iter.seek(prefix);
iter
}
fn find_keys_by_prefix_internal(
&self,
key_prefix: Vec<u8>,
) -> Result<Vec<Vec<u8>>, RocksDbStoreInternalError> {
check_key_size(&key_prefix)?;
let mut prefix = self.start_key.clone();
prefix.extend(key_prefix);
let len = prefix.len();
let mut iter = self.get_find_prefix_iterator(&prefix);
let mut keys = Vec::new();
while let Some(key) = iter.key() {
keys.push(key[len..].to_vec());
iter.next();
}
Ok(keys)
}
#[expect(clippy::type_complexity)]
fn find_key_values_by_prefix_internal(
&self,
key_prefix: Vec<u8>,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, RocksDbStoreInternalError> {
check_key_size(&key_prefix)?;
let mut prefix = self.start_key.clone();
prefix.extend(key_prefix);
let len = prefix.len();
let mut iter = self.get_find_prefix_iterator(&prefix);
let mut key_values = Vec::new();
while let Some((key, value)) = iter.item() {
let key_value = (key[len..].to_vec(), value.to_vec());
key_values.push(key_value);
iter.next();
}
Ok(key_values)
}
fn write_batch_internal(
&self,
batch: Batch,
write_root_key: bool,
) -> Result<(), RocksDbStoreInternalError> {
let mut inner_batch = rocksdb::WriteBatchWithTransaction::default();
for operation in batch.operations {
match operation {
WriteOperation::Delete { key } => {
check_key_size(&key)?;
let mut full_key = self.start_key.to_vec();
full_key.extend(key);
inner_batch.delete(&full_key)
}
WriteOperation::Put { key, value } => {
check_key_size(&key)?;
let mut full_key = self.start_key.to_vec();
full_key.extend(key);
inner_batch.put(&full_key, value)
}
WriteOperation::DeletePrefix { key_prefix } => {
check_key_size(&key_prefix)?;
let mut full_key1 = self.start_key.to_vec();
full_key1.extend(&key_prefix);
let full_key2 =
get_upper_bound_option(&full_key1).expect("the first entry cannot be 255");
inner_batch.delete_range(&full_key1, &full_key2);
}
}
}
if write_root_key {
let mut full_key = self.start_key.to_vec();
full_key[0] = STORED_ROOT_KEYS_PREFIX;
inner_batch.put(&full_key, vec![]);
}
self.db.write(inner_batch)?;
Ok(())
}
}
/// The inner client
#[derive(Clone)]
pub struct RocksDbStoreInternal {
executor: RocksDbStoreExecutor,
_path_with_guard: PathWithGuard,
max_stream_queries: usize,
spawn_mode: RocksDbSpawnMode,
root_key_written: Arc<AtomicBool>,
}
/// Database-level connection to RocksDB for managing namespaces and partitions.
#[derive(Clone)]
pub struct RocksDbDatabaseInternal {
executor: RocksDbStoreExecutor,
_path_with_guard: PathWithGuard,
max_stream_queries: usize,
spawn_mode: RocksDbSpawnMode,
}
impl WithError for RocksDbDatabaseInternal {
type Error = RocksDbStoreInternalError;
}
/// The initial configuration of the system
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct RocksDbStoreInternalConfig {
/// The path to the storage containing the namespaces
pub path_with_guard: PathWithGuard,
/// The chosen spawn mode
pub spawn_mode: RocksDbSpawnMode,
/// Preferred buffer size for async streams.
pub max_stream_queries: usize,
}
impl RocksDbDatabaseInternal {
fn check_namespace(namespace: &str) -> Result<(), RocksDbStoreInternalError> {
if !namespace
.chars()
.all(|character| character.is_ascii_alphanumeric() || character == '_')
{
return Err(RocksDbStoreInternalError::InvalidNamespace);
}
Ok(())
}
fn build(
config: &RocksDbStoreInternalConfig,
namespace: &str,
) -> Result<RocksDbDatabaseInternal, RocksDbStoreInternalError> {
let start_key = ROOT_KEY_DOMAIN.to_vec();
// Create a store to extract its executor and configuration
let temp_store = RocksDbStoreInternal::build(config, namespace, start_key)?;
Ok(RocksDbDatabaseInternal {
executor: temp_store.executor,
_path_with_guard: temp_store._path_with_guard,
max_stream_queries: temp_store.max_stream_queries,
spawn_mode: temp_store.spawn_mode,
})
}
}
impl RocksDbStoreInternal {
fn build(
config: &RocksDbStoreInternalConfig,
namespace: &str,
start_key: Vec<u8>,
) -> Result<RocksDbStoreInternal, RocksDbStoreInternalError> {
RocksDbDatabaseInternal::check_namespace(namespace)?;
let mut path_buf = config.path_with_guard.path_buf.clone();
let mut path_with_guard = config.path_with_guard.clone();
path_buf.push(namespace);
path_with_guard.path_buf = path_buf.clone();
let max_stream_queries = config.max_stream_queries;
let spawn_mode = config.spawn_mode;
if !std::path::Path::exists(&path_buf) {
std::fs::create_dir(path_buf.clone())?;
}
let sys = System::new_with_specifics(
RefreshKind::nothing()
.with_cpu(CpuRefreshKind::everything())
.with_memory(MemoryRefreshKind::nothing().with_ram()),
);
let num_cpus = sys.cpus().len() as i32;
let total_ram = sys.total_memory() as usize;
let mut options = rocksdb::Options::default();
options.create_if_missing(true);
options.create_missing_column_families(true);
// Flush in-memory buffer to disk more often
options.set_write_buffer_size(WRITE_BUFFER_SIZE);
options.set_max_write_buffer_number(MAX_WRITE_BUFFER_NUMBER);
options.set_compression_type(rocksdb::DBCompressionType::Lz4);
options.set_level_zero_slowdown_writes_trigger(8);
options.set_level_zero_stop_writes_trigger(12);
options.set_level_zero_file_num_compaction_trigger(2);
// We deliberately give RocksDB one background thread *per* CPU so that
// flush + (N-1) compactions can hammer the NVMe at full bandwidth while
// still leaving enough CPU time for the foreground application threads.
options.increase_parallelism(num_cpus);
options.set_max_background_jobs(num_cpus);
options.set_max_subcompactions(num_cpus as u32);
options.set_level_compaction_dynamic_level_bytes(true);
options.set_compaction_style(DBCompactionStyle::Level);
options.set_target_file_size_base(2 * WRITE_BUFFER_SIZE as u64);
let mut block_options = BlockBasedOptions::default();
block_options.set_pin_l0_filter_and_index_blocks_in_cache(true);
block_options.set_cache_index_and_filter_blocks(true);
// Allocate 1/4 of total RAM for RocksDB block cache, which is a reasonable balance:
// - Large enough to significantly improve read performance by caching frequently accessed blocks
// - Small enough to leave memory for other system components
// - Follows common practice for database caching in server environments
// - Prevents excessive memory pressure that could lead to swapping or OOM conditions
block_options.set_block_cache(&Cache::new_hyper_clock_cache(
total_ram / 4,
HYPER_CLOCK_CACHE_BLOCK_SIZE,
));
// Configure bloom filters for prefix iteration optimization
block_options.set_bloom_filter(10.0, false);
block_options.set_whole_key_filtering(false);
// 32KB blocks instead of default 4KB - reduces iterator seeks
block_options.set_block_size(32 * 1024);
// Use latest format for better compression and performance
block_options.set_format_version(5);
options.set_block_based_table_factory(&block_options);
// Configure prefix extraction for bloom filter optimization
// Use 8 bytes: ROOT_KEY_DOMAIN (1 byte) + BCS variant (1-2 bytes) + identifier start (4-5 bytes)
let prefix_extractor = SliceTransform::create_fixed_prefix(8);
options.set_prefix_extractor(prefix_extractor);
// 12.5% of memtable size for bloom filter
options.set_memtable_prefix_bloom_ratio(0.125);
// Skip bloom filter for memtable when key exists
options.set_optimize_filters_for_hits(true);
// Use memory-mapped files for faster reads
options.set_allow_mmap_reads(true);
// Don't use random access pattern since we do prefix scans
options.set_advise_random_on_open(false);
let db = DB::open(&options, path_buf)?;
let executor = RocksDbStoreExecutor {
db: Arc::new(db),
start_key,
};
Ok(RocksDbStoreInternal {
executor,
_path_with_guard: path_with_guard,
max_stream_queries,
spawn_mode,
root_key_written: Arc::new(AtomicBool::new(false)),
})
}
}
impl WithError for RocksDbStoreInternal {
type Error = RocksDbStoreInternalError;
}
impl ReadableKeyValueStore for RocksDbStoreInternal {
const MAX_KEY_SIZE: usize = MAX_KEY_SIZE;
fn max_stream_queries(&self) -> usize {
self.max_stream_queries
}
fn root_key(&self) -> Result<Vec<u8>, RocksDbStoreInternalError> {
assert!(self.executor.start_key.starts_with(&ROOT_KEY_DOMAIN));
let root_key = bcs::from_bytes(&self.executor.start_key[ROOT_KEY_DOMAIN.len()..])?;
Ok(root_key)
}
async fn read_value_bytes(
&self,
key: &[u8],
) -> Result<Option<Vec<u8>>, RocksDbStoreInternalError> {
check_key_size(key)?;
let db = self.executor.db.clone();
let mut full_key = self.executor.start_key.to_vec();
full_key.extend(key);
self.spawn_mode
.spawn(move |x| Ok(db.get(&x)?), full_key)
.await
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, RocksDbStoreInternalError> {
check_key_size(key)?;
let db = self.executor.db.clone();
let mut full_key = self.executor.start_key.to_vec();
full_key.extend(key);
self.spawn_mode
.spawn(
move |x| {
if !db.key_may_exist(&x) {
return Ok(false);
}
Ok(db.get(&x)?.is_some())
},
full_key,
)
.await
}
async fn contains_keys(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<bool>, RocksDbStoreInternalError> {
let executor = self.executor.clone();
self.spawn_mode
.spawn(move |x| executor.contains_keys_internal(x), keys.to_vec())
.await
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, RocksDbStoreInternalError> {
let executor = self.executor.clone();
self.spawn_mode
.spawn(
move |x| executor.read_multi_values_bytes_internal(x),
keys.to_vec(),
)
.await
}
async fn find_keys_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<Vec<u8>>, RocksDbStoreInternalError> {
let executor = self.executor.clone();
let key_prefix = key_prefix.to_vec();
self.spawn_mode
.spawn(
move |x| executor.find_keys_by_prefix_internal(x),
key_prefix,
)
.await
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, RocksDbStoreInternalError> {
let executor = self.executor.clone();
let key_prefix = key_prefix.to_vec();
self.spawn_mode
.spawn(
move |x| executor.find_key_values_by_prefix_internal(x),
key_prefix,
)
.await
}
}
impl WritableKeyValueStore for RocksDbStoreInternal {
const MAX_VALUE_SIZE: usize = MAX_VALUE_SIZE;
async fn write_batch(&self, batch: Batch) -> Result<(), RocksDbStoreInternalError> {
let write_root_key = !self.root_key_written.fetch_or(true, Ordering::SeqCst);
let executor = self.executor.clone();
self.spawn_mode
.spawn(
move |x| executor.write_batch_internal(x, write_root_key),
batch,
)
.await
}
async fn clear_journal(&self) -> Result<(), RocksDbStoreInternalError> {
Ok(())
}
}
impl KeyValueDatabase for RocksDbDatabaseInternal {
type Config = RocksDbStoreInternalConfig;
type Store = RocksDbStoreInternal;
fn get_name() -> String {
"rocksdb internal".to_string()
}
async fn connect(
config: &Self::Config,
namespace: &str,
) -> Result<Self, RocksDbStoreInternalError> {
Self::build(config, namespace)
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, RocksDbStoreInternalError> {
let mut start_key = ROOT_KEY_DOMAIN.to_vec();
start_key.extend(bcs::to_bytes(root_key)?);
let mut executor = self.executor.clone();
executor.start_key = start_key;
Ok(RocksDbStoreInternal {
executor,
_path_with_guard: self._path_with_guard.clone(),
max_stream_queries: self.max_stream_queries,
spawn_mode: self.spawn_mode,
root_key_written: Arc::new(AtomicBool::new(false)),
})
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, RocksDbStoreInternalError> {
self.open_shared(root_key)
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, RocksDbStoreInternalError> {
let entries = std::fs::read_dir(config.path_with_guard.path_buf.clone())?;
let mut namespaces = Vec::new();
for entry in entries {
let entry = entry?;
if !entry.file_type()?.is_dir() {
return Err(RocksDbStoreInternalError::NonDirectoryNamespace);
}
let namespace = match entry.file_name().into_string() {
Err(error) => {
return Err(RocksDbStoreInternalError::IntoStringError(error));
}
Ok(namespace) => namespace,
};
namespaces.push(namespace);
}
Ok(namespaces)
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, RocksDbStoreInternalError> {
let mut store = self.open_shared(&[])?;
store.executor.start_key = vec![STORED_ROOT_KEYS_PREFIX];
let bcs_root_keys = store.find_keys_by_prefix(&[]).await?;
let mut root_keys = Vec::new();
for bcs_root_key in bcs_root_keys {
let root_key = bcs::from_bytes::<Vec<u8>>(&bcs_root_key)?;
root_keys.push(root_key);
}
Ok(root_keys)
}
async fn delete_all(config: &Self::Config) -> Result<(), RocksDbStoreInternalError> {
let namespaces = Self::list_all(config).await?;
for namespace in namespaces {
let mut path_buf = config.path_with_guard.path_buf.clone();
path_buf.push(&namespace);
std::fs::remove_dir_all(path_buf.as_path())?;
}
Ok(())
}
async fn exists(
config: &Self::Config,
namespace: &str,
) -> Result<bool, RocksDbStoreInternalError> {
Self::check_namespace(namespace)?;
let mut path_buf = config.path_with_guard.path_buf.clone();
path_buf.push(namespace);
let test = std::path::Path::exists(&path_buf);
Ok(test)
}
async fn create(
config: &Self::Config,
namespace: &str,
) -> Result<(), RocksDbStoreInternalError> {
Self::check_namespace(namespace)?;
let mut path_buf = config.path_with_guard.path_buf.clone();
path_buf.push(namespace);
if std::path::Path::exists(&path_buf) {
return Err(RocksDbStoreInternalError::StoreAlreadyExists);
}
std::fs::create_dir_all(path_buf)?;
Ok(())
}
async fn delete(
config: &Self::Config,
namespace: &str,
) -> Result<(), RocksDbStoreInternalError> {
Self::check_namespace(namespace)?;
let mut path_buf = config.path_with_guard.path_buf.clone();
path_buf.push(namespace);
let path = path_buf.as_path();
std::fs::remove_dir_all(path)?;
Ok(())
}
}
#[cfg(with_testing)]
impl TestKeyValueDatabase for RocksDbDatabaseInternal {
async fn new_test_config() -> Result<RocksDbStoreInternalConfig, RocksDbStoreInternalError> {
let path_with_guard = PathWithGuard::new_testing();
let spawn_mode = RocksDbSpawnMode::get_spawn_mode_from_runtime();
let max_stream_queries = TEST_ROCKS_DB_MAX_STREAM_QUERIES;
Ok(RocksDbStoreInternalConfig {
path_with_guard,
spawn_mode,
max_stream_queries,
})
}
}
/// The error type for [`RocksDbStoreInternal`]
#[derive(Error, Debug)]
pub enum RocksDbStoreInternalError {
/// Store already exists
#[error("Store already exists")]
StoreAlreadyExists,
/// Tokio join error in RocksDB.
#[error("tokio join error: {0}")]
TokioJoinError(#[from] tokio::task::JoinError),
/// RocksDB error.
#[error("RocksDB error: {0}")]
RocksDb(#[from] rocksdb::Error),
/// The database contains a file which is not a directory
#[error("Namespaces should be directories")]
NonDirectoryNamespace,
/// Error converting `OsString` to `String`
#[error("error in the conversion from OsString: {0:?}")]
IntoStringError(OsString),
/// The key must have at most 8 MiB
#[error("The key must have at most 8 MiB")]
KeyTooLong,
/// Namespace contains forbidden characters
#[error("Namespace contains forbidden characters")]
InvalidNamespace,
/// Filesystem error
#[error("Filesystem error: {0}")]
FsError(#[from] std::io::Error),
/// BCS serialization error.
#[error(transparent)]
BcsError(#[from] bcs::Error),
}
/// A path and the guard for the temporary directory if needed
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct PathWithGuard {
/// The path to the data
pub path_buf: PathBuf,
/// The guard for the directory if one is needed
#[serde(skip)]
_dir: Option<Arc<TempDir>>,
}
impl PathWithGuard {
/// Creates a `PathWithGuard` from an existing path.
pub fn new(path_buf: PathBuf) -> Self {
Self {
path_buf,
_dir: None,
}
}
/// Returns the test path for RocksDB without common config.
#[cfg(with_testing)]
fn new_testing() -> PathWithGuard {
let dir = TempDir::new().unwrap();
let path_buf = dir.path().to_path_buf();
let _dir = Some(Arc::new(dir));
PathWithGuard { path_buf, _dir }
}
}
impl PartialEq for PathWithGuard {
fn eq(&self, other: &Self) -> bool {
self.path_buf == other.path_buf
}
}
impl Eq for PathWithGuard {}
impl KeyValueStoreError for RocksDbStoreInternalError {
const BACKEND: &'static str = "rocks_db";
}
/// The composed error type for the `RocksDbStore`
pub type RocksDbStoreError = ValueSplittingError<RocksDbStoreInternalError>;
/// The composed config type for the `RocksDbStore`
pub type RocksDbStoreConfig = LruCachingConfig<RocksDbStoreInternalConfig>;
/// The `RocksDbDatabase` composed type with metrics
#[cfg(with_metrics)]
pub type RocksDbDatabase = MeteredDatabase<
LruCachingDatabase<
MeteredDatabase<ValueSplittingDatabase<MeteredDatabase<RocksDbDatabaseInternal>>>,
>,
>;
/// The `RocksDbDatabase` composed type
#[cfg(not(with_metrics))]
pub type RocksDbDatabase = LruCachingDatabase<ValueSplittingDatabase<RocksDbDatabaseInternal>>;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/value_splitting.rs | linera-views/src/backends/value_splitting.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Adds support for large values to a given store by splitting them between several keys.
use linera_base::ensure;
use thiserror::Error;
use crate::{
batch::{Batch, WriteOperation},
store::{
KeyValueDatabase, KeyValueStoreError, ReadableKeyValueStore, WithError,
WritableKeyValueStore,
},
};
#[cfg(with_testing)]
use crate::{
memory::{MemoryStore, MemoryStoreError},
store::TestKeyValueDatabase,
};
/// A key-value database with no size limit for values.
///
/// It wraps a key-value store, potentially _with_ a size limit, and automatically
/// splits up large values into smaller ones. A single logical key-value pair is
/// stored as multiple smaller key-value pairs in the wrapped store.
/// See the `README.md` for additional details.
#[derive(Clone)]
pub struct ValueSplittingDatabase<D> {
/// The underlying database.
database: D,
}
/// A key-value store with no size limit for values.
#[derive(Clone)]
pub struct ValueSplittingStore<S> {
/// The underlying store.
store: S,
}
/// The composed error type built from the inner error type.
#[derive(Error, Debug)]
pub enum ValueSplittingError<E> {
/// inner store error
#[error(transparent)]
InnerStoreError(#[from] E),
/// The key is of length less than 4, so we cannot extract the first byte
#[error("the key is of length less than 4, so we cannot extract the first byte")]
TooShortKey,
/// Value segment is missing from the database
#[error("value segment is missing from the database")]
MissingSegment,
/// No count of size `u32` is available in the value
#[error("no count of size u32 is available in the value")]
NoCountAvailable,
}
impl<E: KeyValueStoreError> From<bcs::Error> for ValueSplittingError<E> {
fn from(error: bcs::Error) -> Self {
let error = E::from(error);
ValueSplittingError::InnerStoreError(error)
}
}
impl<E: KeyValueStoreError + 'static> KeyValueStoreError for ValueSplittingError<E> {
const BACKEND: &'static str = "value splitting";
}
impl<S> WithError for ValueSplittingDatabase<S>
where
S: WithError,
S::Error: 'static,
{
type Error = ValueSplittingError<S::Error>;
}
impl<D> WithError for ValueSplittingStore<D>
where
D: WithError,
D::Error: 'static,
{
type Error = ValueSplittingError<D::Error>;
}
impl<S> ReadableKeyValueStore for ValueSplittingStore<S>
where
S: ReadableKeyValueStore,
S::Error: 'static,
{
const MAX_KEY_SIZE: usize = S::MAX_KEY_SIZE - 4;
fn max_stream_queries(&self) -> usize {
self.store.max_stream_queries()
}
fn root_key(&self) -> Result<Vec<u8>, Self::Error> {
Ok(self.store.root_key()?)
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let mut big_key = key.to_vec();
big_key.extend(&[0, 0, 0, 0]);
let value = self.store.read_value_bytes(&big_key).await?;
let Some(value) = value else {
return Ok(None);
};
let count = Self::read_count_from_value(&value)?;
let mut big_value = value[4..].to_vec();
if count == 1 {
return Ok(Some(big_value));
}
let mut big_keys = Vec::new();
for i in 1..count {
let big_key_segment = Self::get_segment_key(key, i)?;
big_keys.push(big_key_segment);
}
let segments = self.store.read_multi_values_bytes(&big_keys).await?;
for segment in segments {
match segment {
None => {
return Err(ValueSplittingError::MissingSegment);
}
Some(segment) => {
big_value.extend(segment);
}
}
}
Ok(Some(big_value))
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, Self::Error> {
let mut big_key = key.to_vec();
big_key.extend(&[0, 0, 0, 0]);
Ok(self.store.contains_key(&big_key).await?)
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, Self::Error> {
let big_keys = keys
.iter()
.map(|key| {
let mut big_key = key.clone();
big_key.extend(&[0, 0, 0, 0]);
big_key
})
.collect::<Vec<_>>();
Ok(self.store.contains_keys(&big_keys).await?)
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, Self::Error> {
let mut big_keys = Vec::new();
for key in keys {
let mut big_key = key.clone();
big_key.extend(&[0, 0, 0, 0]);
big_keys.push(big_key);
}
let values = self.store.read_multi_values_bytes(&big_keys).await?;
let mut big_values = Vec::<Option<Vec<u8>>>::new();
let mut keys_add = Vec::new();
let mut n_blocks = Vec::new();
for (key, value) in keys.iter().zip(values) {
match value {
None => {
n_blocks.push(0);
big_values.push(None);
}
Some(value) => {
let count = Self::read_count_from_value(&value)?;
let big_value = value[4..].to_vec();
for i in 1..count {
let big_key_segment = Self::get_segment_key(key, i)?;
keys_add.push(big_key_segment);
}
n_blocks.push(count);
big_values.push(Some(big_value));
}
}
}
if !keys_add.is_empty() {
let mut segments = self
.store
.read_multi_values_bytes(&keys_add)
.await?
.into_iter();
for (idx, count) in n_blocks.iter().enumerate() {
if count > &1 {
let value = big_values.get_mut(idx).unwrap();
if let Some(ref mut value) = value {
for _ in 1..*count {
let segment = segments.next().unwrap().unwrap();
value.extend(segment);
}
}
}
}
}
Ok(big_values)
}
async fn find_keys_by_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error> {
let mut keys = Vec::new();
for big_key in self.store.find_keys_by_prefix(key_prefix).await? {
let len = big_key.len();
if Self::read_index_from_key(&big_key)? == 0 {
let key = big_key[0..len - 4].to_vec();
keys.push(key);
}
}
Ok(keys)
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, Self::Error> {
let small_key_values = self.store.find_key_values_by_prefix(key_prefix).await?;
let mut small_kv_iterator = small_key_values.into_iter();
let mut key_values = Vec::new();
while let Some((mut big_key, value)) = small_kv_iterator.next() {
if Self::read_index_from_key(&big_key)? != 0 {
continue; // Leftover segment from an earlier value.
}
big_key.truncate(big_key.len() - 4);
let key = big_key;
let count = Self::read_count_from_value(&value)?;
let mut big_value = value[4..].to_vec();
for idx in 1..count {
let (big_key, value) = small_kv_iterator
.next()
.ok_or(ValueSplittingError::MissingSegment)?;
ensure!(
Self::read_index_from_key(&big_key)? == idx
&& big_key.starts_with(&key)
&& big_key.len() == key.len() + 4,
ValueSplittingError::MissingSegment
);
big_value.extend(value);
}
key_values.push((key, big_value));
}
Ok(key_values)
}
}
impl<K> WritableKeyValueStore for ValueSplittingStore<K>
where
K: WritableKeyValueStore,
K::Error: 'static,
{
const MAX_VALUE_SIZE: usize = usize::MAX;
async fn write_batch(&self, batch: Batch) -> Result<(), Self::Error> {
let mut batch_new = Batch::new();
for operation in batch.operations {
match operation {
WriteOperation::Delete { key } => {
let mut big_key = key.to_vec();
big_key.extend(&[0, 0, 0, 0]);
batch_new.delete_key(big_key);
}
WriteOperation::Put { key, mut value } => {
let big_key = Self::get_segment_key(&key, 0)?;
let mut count: u32 = 1;
let value_ext = if value.len() <= K::MAX_VALUE_SIZE - 4 {
Self::get_initial_count_first_chunk(count, &value)?
} else {
let remainder = value.split_off(K::MAX_VALUE_SIZE - 4);
for value_chunk in remainder.chunks(K::MAX_VALUE_SIZE) {
let big_key_segment = Self::get_segment_key(&key, count)?;
batch_new.put_key_value_bytes(big_key_segment, value_chunk.to_vec());
count += 1;
}
Self::get_initial_count_first_chunk(count, &value)?
};
batch_new.put_key_value_bytes(big_key, value_ext);
}
WriteOperation::DeletePrefix { key_prefix } => {
batch_new.delete_key_prefix(key_prefix);
}
}
}
Ok(self.store.write_batch(batch_new).await?)
}
async fn clear_journal(&self) -> Result<(), Self::Error> {
Ok(self.store.clear_journal().await?)
}
}
impl<D> KeyValueDatabase for ValueSplittingDatabase<D>
where
D: KeyValueDatabase,
D::Error: 'static,
{
type Config = D::Config;
type Store = ValueSplittingStore<D::Store>;
fn get_name() -> String {
format!("value splitting {}", D::get_name())
}
async fn connect(config: &Self::Config, namespace: &str) -> Result<Self, Self::Error> {
let database = D::connect(config, namespace).await?;
Ok(Self { database })
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
let store = self.database.open_shared(root_key)?;
Ok(ValueSplittingStore { store })
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
let store = self.database.open_exclusive(root_key)?;
Ok(ValueSplittingStore { store })
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, Self::Error> {
Ok(D::list_all(config).await?)
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, Self::Error> {
Ok(self.database.list_root_keys().await?)
}
async fn delete_all(config: &Self::Config) -> Result<(), Self::Error> {
Ok(D::delete_all(config).await?)
}
async fn exists(config: &Self::Config, namespace: &str) -> Result<bool, Self::Error> {
Ok(D::exists(config, namespace).await?)
}
async fn create(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
Ok(D::create(config, namespace).await?)
}
async fn delete(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
Ok(D::delete(config, namespace).await?)
}
}
#[cfg(with_testing)]
impl<D> TestKeyValueDatabase for ValueSplittingDatabase<D>
where
D: TestKeyValueDatabase,
D::Error: 'static,
{
async fn new_test_config() -> Result<D::Config, Self::Error> {
Ok(D::new_test_config().await?)
}
}
impl<D> ValueSplittingStore<D>
where
D: WithError,
{
/// Creates a new store that deals with big values from one that does not.
pub fn new(store: D) -> Self {
ValueSplittingStore { store }
}
fn get_segment_key(key: &[u8], index: u32) -> Result<Vec<u8>, ValueSplittingError<D::Error>> {
let mut big_key_segment = key.to_vec();
let mut bytes = bcs::to_bytes(&index)?;
bytes.reverse();
big_key_segment.extend(bytes);
Ok(big_key_segment)
}
fn get_initial_count_first_chunk(
count: u32,
first_chunk: &[u8],
) -> Result<Vec<u8>, ValueSplittingError<D::Error>> {
let mut bytes = bcs::to_bytes(&count)?;
bytes.reverse();
let mut value_ext = Vec::new();
value_ext.extend(bytes);
value_ext.extend(first_chunk);
Ok(value_ext)
}
fn read_count_from_value(value: &[u8]) -> Result<u32, ValueSplittingError<D::Error>> {
if value.len() < 4 {
return Err(ValueSplittingError::NoCountAvailable);
}
let mut bytes = value[0..4].to_vec();
bytes.reverse();
Ok(bcs::from_bytes::<u32>(&bytes)?)
}
fn read_index_from_key(key: &[u8]) -> Result<u32, ValueSplittingError<D::Error>> {
let len = key.len();
if len < 4 {
return Err(ValueSplittingError::TooShortKey);
}
let mut bytes = key[len - 4..len].to_vec();
bytes.reverse();
Ok(bcs::from_bytes::<u32>(&bytes)?)
}
}
/// A memory store for which the values are limited to 100 bytes and can be used for tests.
#[derive(Clone)]
#[cfg(with_testing)]
pub struct LimitedTestMemoryStore {
inner: MemoryStore,
}
#[cfg(with_testing)]
impl Default for LimitedTestMemoryStore {
fn default() -> Self {
Self::new()
}
}
#[cfg(with_testing)]
impl WithError for LimitedTestMemoryStore {
type Error = MemoryStoreError;
}
#[cfg(with_testing)]
impl ReadableKeyValueStore for LimitedTestMemoryStore {
const MAX_KEY_SIZE: usize = usize::MAX;
fn max_stream_queries(&self) -> usize {
self.inner.max_stream_queries()
}
fn root_key(&self) -> Result<Vec<u8>, MemoryStoreError> {
self.inner.root_key()
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, MemoryStoreError> {
self.inner.read_value_bytes(key).await
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, MemoryStoreError> {
self.inner.contains_key(key).await
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, MemoryStoreError> {
self.inner.contains_keys(keys).await
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, MemoryStoreError> {
self.inner.read_multi_values_bytes(keys).await
}
async fn find_keys_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<Vec<u8>>, MemoryStoreError> {
self.inner.find_keys_by_prefix(key_prefix).await
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, MemoryStoreError> {
self.inner.find_key_values_by_prefix(key_prefix).await
}
}
#[cfg(with_testing)]
impl WritableKeyValueStore for LimitedTestMemoryStore {
// We set up the MAX_VALUE_SIZE to the artificially low value of 100
// purely for testing purposes.
const MAX_VALUE_SIZE: usize = 100;
async fn write_batch(&self, batch: Batch) -> Result<(), MemoryStoreError> {
assert!(
batch.check_value_size(Self::MAX_VALUE_SIZE),
"The batch size is not adequate for this test"
);
self.inner.write_batch(batch).await
}
async fn clear_journal(&self) -> Result<(), MemoryStoreError> {
self.inner.clear_journal().await
}
}
#[cfg(with_testing)]
impl LimitedTestMemoryStore {
/// Creates a `LimitedTestMemoryStore`
pub fn new() -> Self {
let inner = MemoryStore::new_for_testing();
LimitedTestMemoryStore { inner }
}
}
/// Provides a `LimitedTestMemoryStore<()>` that can be used for tests.
#[cfg(with_testing)]
pub fn create_value_splitting_memory_store() -> ValueSplittingStore<LimitedTestMemoryStore> {
ValueSplittingStore::new(LimitedTestMemoryStore::new())
}
#[cfg(test)]
mod tests {
use linera_views::{
batch::Batch,
store::{ReadableKeyValueStore, WritableKeyValueStore},
value_splitting::{LimitedTestMemoryStore, ValueSplittingStore},
};
use rand::Rng;
// The key splitting means that when a key is overwritten
// some previous segments may still be present.
#[tokio::test]
#[expect(clippy::assertions_on_constants)]
async fn test_value_splitting1_testing_leftovers() {
let store = LimitedTestMemoryStore::new();
const MAX_LEN: usize = LimitedTestMemoryStore::MAX_VALUE_SIZE;
assert!(MAX_LEN > 10);
let big_store = ValueSplittingStore::new(store.clone());
let key = vec![0, 0];
// Write a key with a long value
let mut batch = Batch::new();
let value = Vec::from([0; MAX_LEN + 1]);
batch.put_key_value_bytes(key.clone(), value.clone());
big_store.write_batch(batch).await.unwrap();
let value_read = big_store.read_value_bytes(&key).await.unwrap();
assert_eq!(value_read, Some(value));
// Write a key with a smaller value
let mut batch = Batch::new();
let value = Vec::from([0, 1]);
batch.put_key_value_bytes(key.clone(), value.clone());
big_store.write_batch(batch).await.unwrap();
let value_read = big_store.read_value_bytes(&key).await.unwrap();
assert_eq!(value_read, Some(value));
// Two segments are present even though only one is used
let keys = store.find_keys_by_prefix(&[0]).await.unwrap();
assert_eq!(keys, vec![vec![0, 0, 0, 0, 0], vec![0, 0, 0, 0, 1]]);
}
#[tokio::test]
async fn test_value_splitting2_testing_splitting() {
let store = LimitedTestMemoryStore::new();
const MAX_LEN: usize = LimitedTestMemoryStore::MAX_VALUE_SIZE;
let big_store = ValueSplittingStore::new(store.clone());
let key = vec![0, 0];
// Writing a big value
let mut batch = Batch::new();
let mut value = Vec::new();
let mut rng = crate::random::make_deterministic_rng();
for _ in 0..2 * MAX_LEN - 4 {
value.push(rng.gen::<u8>());
}
batch.put_key_value_bytes(key.clone(), value.clone());
big_store.write_batch(batch).await.unwrap();
let value_read = big_store.read_value_bytes(&key).await.unwrap();
assert_eq!(value_read, Some(value.clone()));
// Reading the segments and checking
let mut value_concat = Vec::<u8>::new();
for index in 0..2 {
let mut segment_key = key.clone();
let mut bytes = bcs::to_bytes(&index).unwrap();
bytes.reverse();
segment_key.extend(bytes);
let value_read = store.read_value_bytes(&segment_key).await.unwrap();
let Some(value_read) = value_read else {
unreachable!()
};
if index == 0 {
value_concat.extend(&value_read[4..]);
} else {
value_concat.extend(&value_read);
}
}
assert_eq!(value, value_concat);
}
#[tokio::test]
async fn test_value_splitting3_write_and_delete() {
let store = LimitedTestMemoryStore::new();
const MAX_LEN: usize = LimitedTestMemoryStore::MAX_VALUE_SIZE;
let big_store = ValueSplittingStore::new(store.clone());
let key = vec![0, 0];
// writing a big key
let mut batch = Batch::new();
let mut value = Vec::new();
let mut rng = crate::random::make_deterministic_rng();
for _ in 0..3 * MAX_LEN - 4 {
value.push(rng.gen::<u8>());
}
batch.put_key_value_bytes(key.clone(), value.clone());
big_store.write_batch(batch).await.unwrap();
// deleting it
let mut batch = Batch::new();
batch.delete_key(key.clone());
big_store.write_batch(batch).await.unwrap();
// reading everything (there are leftover keys)
let key_values = big_store.find_key_values_by_prefix(&[0]).await.unwrap();
assert_eq!(key_values.len(), 0);
// Two segments remain
let keys = store.find_keys_by_prefix(&[0]).await.unwrap();
assert_eq!(keys, vec![vec![0, 0, 0, 0, 1], vec![0, 0, 0, 0, 2]]);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/memory.rs | linera-views/src/backends/memory.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implements [`crate::store::KeyValueDatabase`] in memory.
use std::{
collections::BTreeMap,
sync::{Arc, LazyLock, Mutex, RwLock},
};
use serde::{Deserialize, Serialize};
use thiserror::Error;
#[cfg(with_testing)]
use crate::store::TestKeyValueDatabase;
use crate::{
batch::{Batch, WriteOperation},
common::get_key_range_for_prefix,
store::{
KeyValueDatabase, KeyValueStoreError, ReadableKeyValueStore, WithError,
WritableKeyValueStore,
},
};
/// The initial configuration of the system
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct MemoryStoreConfig {
/// Preferred buffer size for async streams.
pub max_stream_queries: usize,
/// Whether a namespace should be immediately cleaned up from memory when the
/// connection object is dropped.
pub kill_on_drop: bool,
}
/// The number of streams for the test
#[cfg(with_testing)]
const TEST_MEMORY_MAX_STREAM_QUERIES: usize = 10;
/// The values in a partition.
type MemoryStoreMap = BTreeMap<Vec<u8>, Vec<u8>>;
/// The container for the `MemoryStoreMap`s by namespace and then root key
#[derive(Default)]
struct MemoryDatabases {
databases: BTreeMap<String, BTreeMap<Vec<u8>, Arc<RwLock<MemoryStoreMap>>>>,
}
/// A connection to a namespace of key-values in memory.
#[derive(Clone, Debug)]
pub struct MemoryDatabase {
/// The current namespace.
namespace: String,
/// The maximum number of queries used for a stream.
max_stream_queries: usize,
/// Whether to remove the namespace on drop.
kill_on_drop: bool,
}
impl MemoryDatabases {
fn sync_open(
&mut self,
namespace: &str,
max_stream_queries: usize,
root_key: &[u8],
) -> Result<MemoryStore, MemoryStoreError> {
let Some(stores) = self.databases.get_mut(namespace) else {
return Err(MemoryStoreError::NamespaceNotFound);
};
let store = stores.entry(root_key.to_vec()).or_insert_with(|| {
let map = MemoryStoreMap::new();
Arc::new(RwLock::new(map))
});
let map = store.clone();
Ok(MemoryStore {
map,
root_key: root_key.to_vec(),
max_stream_queries,
})
}
fn sync_list_all(&self) -> Vec<String> {
self.databases.keys().cloned().collect::<Vec<_>>()
}
fn sync_list_root_keys(&self, namespace: &str) -> Vec<Vec<u8>> {
match self.databases.get(namespace) {
None => Vec::new(),
Some(map) => map.keys().cloned().collect::<Vec<_>>(),
}
}
fn sync_exists(&self, namespace: &str) -> bool {
self.databases.contains_key(namespace)
}
fn sync_create(&mut self, namespace: &str) {
self.databases
.insert(namespace.to_string(), BTreeMap::new());
}
fn sync_delete(&mut self, namespace: &str) {
self.databases.remove(namespace);
}
}
/// The global table of namespaces.
static MEMORY_DATABASES: LazyLock<Mutex<MemoryDatabases>> =
LazyLock::new(|| Mutex::new(MemoryDatabases::default()));
/// A virtual DB client where data are persisted in memory.
#[derive(Clone)]
pub struct MemoryStore {
/// The map used for storing the data.
map: Arc<RwLock<MemoryStoreMap>>,
/// The root key.
root_key: Vec<u8>,
/// The maximum number of queries used for a stream.
max_stream_queries: usize,
}
impl WithError for MemoryDatabase {
type Error = MemoryStoreError;
}
impl WithError for MemoryStore {
type Error = MemoryStoreError;
}
impl ReadableKeyValueStore for MemoryStore {
const MAX_KEY_SIZE: usize = usize::MAX;
fn max_stream_queries(&self) -> usize {
self.max_stream_queries
}
fn root_key(&self) -> Result<Vec<u8>, MemoryStoreError> {
Ok(self.root_key.clone())
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, MemoryStoreError> {
let map = self
.map
.read()
.expect("MemoryStore lock should not be poisoned");
Ok(map.get(key).cloned())
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, MemoryStoreError> {
let map = self
.map
.read()
.expect("MemoryStore lock should not be poisoned");
Ok(map.contains_key(key))
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, MemoryStoreError> {
let map = self
.map
.read()
.expect("MemoryStore lock should not be poisoned");
Ok(keys
.iter()
.map(|key| map.contains_key(key))
.collect::<Vec<_>>())
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, MemoryStoreError> {
let map = self
.map
.read()
.expect("MemoryStore lock should not be poisoned");
let mut result = Vec::new();
for key in keys {
result.push(map.get(key).cloned());
}
Ok(result)
}
async fn find_keys_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<Vec<u8>>, MemoryStoreError> {
let map = self
.map
.read()
.expect("MemoryStore lock should not be poisoned");
let mut values = Vec::new();
let len = key_prefix.len();
for (key, _value) in map.range(get_key_range_for_prefix(key_prefix.to_vec())) {
values.push(key[len..].to_vec())
}
Ok(values)
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, MemoryStoreError> {
let map = self
.map
.read()
.expect("MemoryStore lock should not be poisoned");
let mut key_values = Vec::new();
let len = key_prefix.len();
for (key, value) in map.range(get_key_range_for_prefix(key_prefix.to_vec())) {
let key_value = (key[len..].to_vec(), value.to_vec());
key_values.push(key_value);
}
Ok(key_values)
}
}
impl WritableKeyValueStore for MemoryStore {
const MAX_VALUE_SIZE: usize = usize::MAX;
async fn write_batch(&self, batch: Batch) -> Result<(), MemoryStoreError> {
let mut map = self
.map
.write()
.expect("MemoryStore lock should not be poisoned");
for ent in batch.operations {
match ent {
WriteOperation::Put { key, value } => {
map.insert(key, value);
}
WriteOperation::Delete { key } => {
map.remove(&key);
}
WriteOperation::DeletePrefix { key_prefix } => {
let key_list = map
.range(get_key_range_for_prefix(key_prefix))
.map(|x| x.0.to_vec())
.collect::<Vec<_>>();
for key in key_list {
map.remove(&key);
}
}
}
}
Ok(())
}
async fn clear_journal(&self) -> Result<(), MemoryStoreError> {
Ok(())
}
}
impl MemoryStore {
/// Creates a `MemoryStore` that doesn't belong to any registered namespace.
#[cfg(with_testing)]
pub fn new_for_testing() -> Self {
Self {
map: Arc::default(),
root_key: Vec::new(),
max_stream_queries: TEST_MEMORY_MAX_STREAM_QUERIES,
}
}
}
impl Drop for MemoryDatabase {
fn drop(&mut self) {
if self.kill_on_drop {
let mut databases = MEMORY_DATABASES
.lock()
.expect("MEMORY_DATABASES lock should not be poisoned");
databases.databases.remove(&self.namespace);
}
}
}
impl KeyValueDatabase for MemoryDatabase {
type Config = MemoryStoreConfig;
type Store = MemoryStore;
fn get_name() -> String {
"memory".to_string()
}
async fn connect(config: &Self::Config, namespace: &str) -> Result<Self, MemoryStoreError> {
let databases = MEMORY_DATABASES
.lock()
.expect("MEMORY_DATABASES lock should not be poisoned");
if !databases.sync_exists(namespace) {
return Err(MemoryStoreError::NamespaceNotFound);
};
Ok(MemoryDatabase {
namespace: namespace.to_string(),
max_stream_queries: config.max_stream_queries,
kill_on_drop: config.kill_on_drop,
})
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, MemoryStoreError> {
let mut databases = MEMORY_DATABASES
.lock()
.expect("MEMORY_DATABASES lock should not be poisoned");
databases.sync_open(&self.namespace, self.max_stream_queries, root_key)
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, MemoryStoreError> {
self.open_shared(root_key)
}
async fn list_all(_config: &Self::Config) -> Result<Vec<String>, MemoryStoreError> {
let databases = MEMORY_DATABASES
.lock()
.expect("MEMORY_DATABASES lock should not be poisoned");
Ok(databases.sync_list_all())
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, MemoryStoreError> {
let databases = MEMORY_DATABASES
.lock()
.expect("MEMORY_DATABASES lock should not be poisoned");
Ok(databases.sync_list_root_keys(&self.namespace))
}
async fn exists(_config: &Self::Config, namespace: &str) -> Result<bool, MemoryStoreError> {
let databases = MEMORY_DATABASES
.lock()
.expect("MEMORY_DATABASES lock should not be poisoned");
Ok(databases.sync_exists(namespace))
}
async fn create(_config: &Self::Config, namespace: &str) -> Result<(), MemoryStoreError> {
let mut databases = MEMORY_DATABASES
.lock()
.expect("MEMORY_DATABASES lock should not be poisoned");
if databases.sync_exists(namespace) {
return Err(MemoryStoreError::StoreAlreadyExists);
}
databases.sync_create(namespace);
Ok(())
}
async fn delete(_config: &Self::Config, namespace: &str) -> Result<(), MemoryStoreError> {
let mut databases = MEMORY_DATABASES
.lock()
.expect("MEMORY_DATABASES lock should not be poisoned");
databases.sync_delete(namespace);
Ok(())
}
}
#[cfg(with_testing)]
impl TestKeyValueDatabase for MemoryDatabase {
async fn new_test_config() -> Result<MemoryStoreConfig, MemoryStoreError> {
Ok(MemoryStoreConfig {
max_stream_queries: TEST_MEMORY_MAX_STREAM_QUERIES,
kill_on_drop: false,
})
}
}
/// The error type for [`MemoryStore`].
#[derive(Error, Debug)]
pub enum MemoryStoreError {
/// Store already exists during a create operation
#[error("Store already exists during a create operation")]
StoreAlreadyExists,
/// Serialization error with BCS.
#[error(transparent)]
BcsError(#[from] bcs::Error),
/// The namespace does not exist
#[error("The namespace does not exist")]
NamespaceNotFound,
}
impl KeyValueStoreError for MemoryStoreError {
const BACKEND: &'static str = "memory";
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/mod.rs | linera-views/src/backends/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
pub mod journaling;
#[cfg(with_metrics)]
pub mod metering;
pub mod value_splitting;
pub mod memory;
pub mod lru_caching;
pub mod dual;
#[cfg(with_scylladb)]
pub mod scylla_db;
#[cfg(with_rocksdb)]
pub mod rocks_db;
#[cfg(with_dynamodb)]
pub mod dynamo_db;
#[cfg(with_indexeddb)]
pub mod indexed_db;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/lru_caching.rs | linera-views/src/backends/lru_caching.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Add LRU (least recently used) caching to a given store.
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
#[cfg(with_testing)]
use crate::memory::MemoryDatabase;
#[cfg(with_testing)]
use crate::store::TestKeyValueDatabase;
use crate::{
batch::{Batch, WriteOperation},
lru_prefix_cache::{LruPrefixCache, StorageCacheConfig},
store::{KeyValueDatabase, ReadableKeyValueStore, WithError, WritableKeyValueStore},
};
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::register_int_counter_vec;
use prometheus::IntCounterVec;
/// The total number of cache read value misses.
pub static READ_VALUE_CACHE_MISS_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"num_read_value_cache_miss",
"Number of read value cache misses",
&[],
)
});
/// The total number of read value cache hits.
pub static READ_VALUE_CACHE_HIT_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"num_read_value_cache_hits",
"Number of read value cache hits",
&[],
)
});
/// The total number of contains key cache misses.
pub static CONTAINS_KEY_CACHE_MISS_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"num_contains_key_cache_miss",
"Number of contains key cache misses",
&[],
)
});
/// The total number of contains key cache hits.
pub static CONTAINS_KEY_CACHE_HIT_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"num_contains_key_cache_hit",
"Number of contains key cache hits",
&[],
)
});
/// The total number of find_keys_by_prefix cache misses.
pub static FIND_KEYS_BY_PREFIX_CACHE_MISS_COUNT: LazyLock<IntCounterVec> =
LazyLock::new(|| {
register_int_counter_vec(
"num_find_keys_by_prefix_cache_miss",
"Number of find keys by prefix cache misses",
&[],
)
});
/// The total number of find_keys_by_prefix cache hits.
pub static FIND_KEYS_BY_PREFIX_CACHE_HIT_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"num_find_keys_by_prefix_cache_hit",
"Number of find keys by prefix cache hits",
&[],
)
});
/// The total number of find_key_values_by_prefix cache misses.
pub static FIND_KEY_VALUES_BY_PREFIX_CACHE_MISS_COUNT: LazyLock<IntCounterVec> =
LazyLock::new(|| {
register_int_counter_vec(
"num_find_key_values_by_prefix_cache_miss",
"Number of find key values by prefix cache misses",
&[],
)
});
/// The total number of find_key_values_by_prefix cache hits.
pub static FIND_KEY_VALUES_BY_PREFIX_CACHE_HIT_COUNT: LazyLock<IntCounterVec> =
LazyLock::new(|| {
register_int_counter_vec(
"num_find_key_values_by_prefix_cache_hit",
"Number of find key values by prefix cache hits",
&[],
)
});
}
/// The maximum number of entries in the cache.
/// If the number of entries in the cache is too large then the underlying maps
/// become the limiting factor.
pub const DEFAULT_STORAGE_CACHE_CONFIG: StorageCacheConfig = StorageCacheConfig {
max_cache_size: 10000000,
max_value_entry_size: 1000000,
max_find_keys_entry_size: 1000000,
max_find_key_values_entry_size: 1000000,
max_cache_entries: 1000,
max_cache_value_size: 10000000,
max_cache_find_keys_size: 10000000,
max_cache_find_key_values_size: 10000000,
};
/// A key-value database with added LRU caching.
#[derive(Clone)]
pub struct LruCachingDatabase<D> {
/// The inner store that is called by the LRU cache one.
database: D,
/// The configuration.
config: StorageCacheConfig,
}
/// A key-value store with added LRU caching.
#[derive(Clone)]
pub struct LruCachingStore<S> {
/// The inner store that is called by the LRU cache one.
store: S,
/// The LRU cache of values.
cache: Option<Arc<Mutex<LruPrefixCache>>>,
}
impl<D> WithError for LruCachingDatabase<D>
where
D: WithError,
{
type Error = D::Error;
}
impl<S> WithError for LruCachingStore<S>
where
S: WithError,
{
type Error = S::Error;
}
impl<K> ReadableKeyValueStore for LruCachingStore<K>
where
K: ReadableKeyValueStore,
{
// The LRU cache does not change the underlying store's size limits.
const MAX_KEY_SIZE: usize = K::MAX_KEY_SIZE;
fn max_stream_queries(&self) -> usize {
self.store.max_stream_queries()
}
fn root_key(&self) -> Result<Vec<u8>, Self::Error> {
self.store.root_key()
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let Some(cache) = &self.cache else {
return self.store.read_value_bytes(key).await;
};
// First inquiring in the read_value_bytes LRU
{
let mut cache = cache.lock().unwrap();
if let Some(value) = cache.query_read_value(key) {
#[cfg(with_metrics)]
metrics::READ_VALUE_CACHE_HIT_COUNT
.with_label_values(&[])
.inc();
return Ok(value);
}
}
#[cfg(with_metrics)]
metrics::READ_VALUE_CACHE_MISS_COUNT
.with_label_values(&[])
.inc();
let value = self.store.read_value_bytes(key).await?;
let mut cache = cache.lock().unwrap();
cache.insert_read_value(key, &value);
Ok(value)
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, Self::Error> {
let Some(cache) = &self.cache else {
return self.store.contains_key(key).await;
};
{
let mut cache = cache.lock().unwrap();
if let Some(value) = cache.query_contains_key(key) {
#[cfg(with_metrics)]
metrics::CONTAINS_KEY_CACHE_HIT_COUNT
.with_label_values(&[])
.inc();
return Ok(value);
}
}
#[cfg(with_metrics)]
metrics::CONTAINS_KEY_CACHE_MISS_COUNT
.with_label_values(&[])
.inc();
let result = self.store.contains_key(key).await?;
let mut cache = cache.lock().unwrap();
cache.insert_contains_key(key, result);
Ok(result)
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, Self::Error> {
let Some(cache) = &self.cache else {
return self.store.contains_keys(keys).await;
};
let size = keys.len();
let mut results = vec![false; size];
let mut indices = Vec::new();
let mut key_requests = Vec::new();
{
let mut cache = cache.lock().unwrap();
for i in 0..size {
if let Some(value) = cache.query_contains_key(&keys[i]) {
#[cfg(with_metrics)]
metrics::CONTAINS_KEY_CACHE_HIT_COUNT
.with_label_values(&[])
.inc();
results[i] = value;
} else {
#[cfg(with_metrics)]
metrics::CONTAINS_KEY_CACHE_MISS_COUNT
.with_label_values(&[])
.inc();
indices.push(i);
key_requests.push(keys[i].clone());
}
}
}
if !key_requests.is_empty() {
let key_results = self.store.contains_keys(&key_requests).await?;
let mut cache = cache.lock().unwrap();
for ((index, result), key) in indices.into_iter().zip(key_results).zip(key_requests) {
results[index] = result;
cache.insert_contains_key(&key, result);
}
}
Ok(results)
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, Self::Error> {
let Some(cache) = &self.cache else {
return self.store.read_multi_values_bytes(keys).await;
};
let mut result = Vec::with_capacity(keys.len());
let mut cache_miss_indices = Vec::new();
let mut miss_keys = Vec::new();
{
let mut cache = cache.lock().unwrap();
for (i, key) in keys.iter().enumerate() {
if let Some(value) = cache.query_read_value(key) {
#[cfg(with_metrics)]
metrics::READ_VALUE_CACHE_HIT_COUNT
.with_label_values(&[])
.inc();
result.push(value);
} else {
#[cfg(with_metrics)]
metrics::READ_VALUE_CACHE_MISS_COUNT
.with_label_values(&[])
.inc();
result.push(None);
cache_miss_indices.push(i);
miss_keys.push(key.clone());
}
}
}
if !miss_keys.is_empty() {
let values = self.store.read_multi_values_bytes(&miss_keys).await?;
let mut cache = cache.lock().unwrap();
for (i, (key, value)) in cache_miss_indices
.into_iter()
.zip(miss_keys.into_iter().zip(values))
{
cache.insert_read_value(&key, &value);
result[i] = value;
}
}
Ok(result)
}
async fn find_keys_by_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error> {
let Some(cache) = self.get_exclusive_cache() else {
return self.store.find_keys_by_prefix(key_prefix).await;
};
{
let mut cache = cache.lock().unwrap();
if let Some(value) = cache.query_find_keys(key_prefix) {
#[cfg(with_metrics)]
metrics::FIND_KEYS_BY_PREFIX_CACHE_HIT_COUNT
.with_label_values(&[])
.inc();
return Ok(value);
}
}
#[cfg(with_metrics)]
metrics::FIND_KEYS_BY_PREFIX_CACHE_MISS_COUNT
.with_label_values(&[])
.inc();
let keys = self.store.find_keys_by_prefix(key_prefix).await?;
let mut cache = cache.lock().unwrap();
cache.insert_find_keys(key_prefix.to_vec(), &keys);
Ok(keys)
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, Self::Error> {
let Some(cache) = self.get_exclusive_cache() else {
return self.store.find_key_values_by_prefix(key_prefix).await;
};
{
let mut cache = cache.lock().unwrap();
if let Some(value) = cache.query_find_key_values(key_prefix) {
#[cfg(with_metrics)]
metrics::FIND_KEY_VALUES_BY_PREFIX_CACHE_HIT_COUNT
.with_label_values(&[])
.inc();
return Ok(value);
}
}
#[cfg(with_metrics)]
metrics::FIND_KEY_VALUES_BY_PREFIX_CACHE_MISS_COUNT
.with_label_values(&[])
.inc();
let key_values = self.store.find_key_values_by_prefix(key_prefix).await?;
let mut cache = cache.lock().unwrap();
cache.insert_find_key_values(key_prefix.to_vec(), &key_values);
Ok(key_values)
}
}
impl<K> WritableKeyValueStore for LruCachingStore<K>
where
K: WritableKeyValueStore,
{
// The LRU cache does not change the underlying store's size limits.
const MAX_VALUE_SIZE: usize = K::MAX_VALUE_SIZE;
async fn write_batch(&self, batch: Batch) -> Result<(), Self::Error> {
self.store.write_batch(batch.clone()).await?;
if let Some(cache) = &self.cache {
let mut cache = cache.lock().unwrap();
for operation in &batch.operations {
match operation {
WriteOperation::Put { key, value } => {
cache.put_key_value(key, value);
}
WriteOperation::Delete { key } => {
cache.delete_key(key);
}
WriteOperation::DeletePrefix { key_prefix } => {
cache.delete_prefix(key_prefix);
}
}
}
}
Ok(())
}
async fn clear_journal(&self) -> Result<(), Self::Error> {
self.store.clear_journal().await
}
}
/// The configuration type for the `LruCachingStore`.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LruCachingConfig<C> {
/// The inner configuration of the `LruCachingStore`.
pub inner_config: C,
/// The cache size being used.
pub storage_cache_config: StorageCacheConfig,
}
impl<D> KeyValueDatabase for LruCachingDatabase<D>
where
D: KeyValueDatabase,
{
type Config = LruCachingConfig<D::Config>;
type Store = LruCachingStore<D::Store>;
fn get_name() -> String {
format!("lru caching {}", D::get_name())
}
async fn connect(config: &Self::Config, namespace: &str) -> Result<Self, Self::Error> {
let database = D::connect(&config.inner_config, namespace).await?;
Ok(LruCachingDatabase {
database,
config: config.storage_cache_config.clone(),
})
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
let store = self.database.open_shared(root_key)?;
let store = LruCachingStore::new(
store,
self.config.clone(),
/* has_exclusive_access */ false,
);
Ok(store)
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
let store = self.database.open_exclusive(root_key)?;
let store = LruCachingStore::new(
store,
self.config.clone(),
/* has_exclusive_access */ true,
);
Ok(store)
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, Self::Error> {
D::list_all(&config.inner_config).await
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, Self::Error> {
self.database.list_root_keys().await
}
async fn delete_all(config: &Self::Config) -> Result<(), Self::Error> {
D::delete_all(&config.inner_config).await
}
async fn exists(config: &Self::Config, namespace: &str) -> Result<bool, Self::Error> {
D::exists(&config.inner_config, namespace).await
}
async fn create(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
D::create(&config.inner_config, namespace).await
}
async fn delete(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
D::delete(&config.inner_config, namespace).await
}
}
impl<S> LruCachingStore<S> {
/// Creates a new key-value store that provides LRU caching at top of the given store.
fn new(store: S, config: StorageCacheConfig, has_exclusive_access: bool) -> Self {
let cache = {
if config.max_cache_entries == 0 {
None
} else {
Some(Arc::new(Mutex::new(LruPrefixCache::new(
config,
has_exclusive_access,
))))
}
};
Self { store, cache }
}
/// Returns a cache with exclusive access if one exists.
fn get_exclusive_cache(&self) -> Option<&Arc<Mutex<LruPrefixCache>>> {
let Some(cache) = &self.cache else {
return None;
};
let has_exclusive_access = {
let cache = cache.lock().unwrap();
cache.has_exclusive_access()
};
if has_exclusive_access {
Some(cache)
} else {
None
}
}
}
/// A memory database with caching.
#[cfg(with_testing)]
pub type LruCachingMemoryDatabase = LruCachingDatabase<MemoryDatabase>;
#[cfg(with_testing)]
impl<D> TestKeyValueDatabase for LruCachingDatabase<D>
where
D: TestKeyValueDatabase,
{
async fn new_test_config() -> Result<LruCachingConfig<D::Config>, D::Error> {
let inner_config = D::new_test_config().await?;
let storage_cache_config = DEFAULT_STORAGE_CACHE_CONFIG;
Ok(LruCachingConfig {
inner_config,
storage_cache_config,
})
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/dual.rs | linera-views/src/backends/dual.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implements [`crate::store::KeyValueStore`] by combining two existing stores.
use serde::{Deserialize, Serialize};
use thiserror::Error;
#[cfg(with_testing)]
use crate::store::TestKeyValueDatabase;
use crate::{
batch::Batch,
store::{
KeyValueDatabase, KeyValueStoreError, ReadableKeyValueStore, WithError,
WritableKeyValueStore,
},
};
/// A dual database.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DualDatabase<D1, D2, A> {
/// The first database.
pub first_database: D1,
/// The second database.
pub second_database: D2,
/// Marker for the static root key assignment.
_marker: std::marker::PhantomData<A>,
}
/// The initial configuration of the system.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DualStoreConfig<C1, C2> {
/// The first config.
pub first_config: C1,
/// The second config.
pub second_config: C2,
}
/// The store in use.
#[derive(Clone, Copy, Debug)]
pub enum StoreInUse {
/// The first store.
First,
/// The second store.
Second,
}
/// The trait for a (static) root key assignment.
#[cfg_attr(not(web), trait_variant::make(Send + Sync))]
pub trait DualStoreRootKeyAssignment {
/// Obtains the store assigned to this root key.
fn assigned_store(root_key: &[u8]) -> Result<StoreInUse, bcs::Error>;
}
/// A partition opened in one of the two databases.
#[derive(Clone)]
pub enum DualStore<S1, S2> {
/// The first store.
First(S1),
/// The second store.
Second(S2),
}
impl<D1, D2, A> WithError for DualDatabase<D1, D2, A>
where
D1: WithError,
D2: WithError,
{
type Error = DualStoreError<D1::Error, D2::Error>;
}
impl<S1, S2> WithError for DualStore<S1, S2>
where
S1: WithError,
S2: WithError,
{
type Error = DualStoreError<S1::Error, S2::Error>;
}
impl<S1, S2> ReadableKeyValueStore for DualStore<S1, S2>
where
S1: ReadableKeyValueStore,
S2: ReadableKeyValueStore,
{
// TODO(#2524): consider changing MAX_KEY_SIZE into a function.
const MAX_KEY_SIZE: usize = if S1::MAX_KEY_SIZE < S2::MAX_KEY_SIZE {
S1::MAX_KEY_SIZE
} else {
S2::MAX_KEY_SIZE
};
fn max_stream_queries(&self) -> usize {
match self {
Self::First(store) => store.max_stream_queries(),
Self::Second(store) => store.max_stream_queries(),
}
}
fn root_key(&self) -> Result<Vec<u8>, Self::Error> {
Ok(match self {
Self::First(store) => store.root_key().map_err(DualStoreError::First)?,
Self::Second(store) => store.root_key().map_err(DualStoreError::Second)?,
})
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let result = match self {
Self::First(store) => store
.read_value_bytes(key)
.await
.map_err(DualStoreError::First)?,
Self::Second(store) => store
.read_value_bytes(key)
.await
.map_err(DualStoreError::Second)?,
};
Ok(result)
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, Self::Error> {
let result = match self {
Self::First(store) => store
.contains_key(key)
.await
.map_err(DualStoreError::First)?,
Self::Second(store) => store
.contains_key(key)
.await
.map_err(DualStoreError::Second)?,
};
Ok(result)
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, Self::Error> {
let result = match self {
Self::First(store) => store
.contains_keys(keys)
.await
.map_err(DualStoreError::First)?,
Self::Second(store) => store
.contains_keys(keys)
.await
.map_err(DualStoreError::Second)?,
};
Ok(result)
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, Self::Error> {
let result = match self {
Self::First(store) => store
.read_multi_values_bytes(keys)
.await
.map_err(DualStoreError::First)?,
Self::Second(store) => store
.read_multi_values_bytes(keys)
.await
.map_err(DualStoreError::Second)?,
};
Ok(result)
}
async fn find_keys_by_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error> {
let result = match self {
Self::First(store) => store
.find_keys_by_prefix(key_prefix)
.await
.map_err(DualStoreError::First)?,
Self::Second(store) => store
.find_keys_by_prefix(key_prefix)
.await
.map_err(DualStoreError::Second)?,
};
Ok(result)
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, Self::Error> {
let result = match self {
Self::First(store) => store
.find_key_values_by_prefix(key_prefix)
.await
.map_err(DualStoreError::First)?,
Self::Second(store) => store
.find_key_values_by_prefix(key_prefix)
.await
.map_err(DualStoreError::Second)?,
};
Ok(result)
}
}
impl<S1, S2> WritableKeyValueStore for DualStore<S1, S2>
where
S1: WritableKeyValueStore,
S2: WritableKeyValueStore,
{
const MAX_VALUE_SIZE: usize = usize::MAX;
async fn write_batch(&self, batch: Batch) -> Result<(), Self::Error> {
match self {
Self::First(store) => store
.write_batch(batch)
.await
.map_err(DualStoreError::First)?,
Self::Second(store) => store
.write_batch(batch)
.await
.map_err(DualStoreError::Second)?,
}
Ok(())
}
async fn clear_journal(&self) -> Result<(), Self::Error> {
match self {
Self::First(store) => store.clear_journal().await.map_err(DualStoreError::First)?,
Self::Second(store) => store
.clear_journal()
.await
.map_err(DualStoreError::Second)?,
}
Ok(())
}
}
impl<D1, D2, A> KeyValueDatabase for DualDatabase<D1, D2, A>
where
D1: KeyValueDatabase,
D2: KeyValueDatabase,
A: DualStoreRootKeyAssignment,
{
type Config = DualStoreConfig<D1::Config, D2::Config>;
type Store = DualStore<D1::Store, D2::Store>;
fn get_name() -> String {
format!("dual {} and {}", D1::get_name(), D2::get_name())
}
async fn connect(config: &Self::Config, namespace: &str) -> Result<Self, Self::Error> {
let first_database = D1::connect(&config.first_config, namespace)
.await
.map_err(DualStoreError::First)?;
let second_database = D2::connect(&config.second_config, namespace)
.await
.map_err(DualStoreError::Second)?;
let database = Self {
first_database,
second_database,
_marker: std::marker::PhantomData,
};
Ok(database)
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
match A::assigned_store(root_key)? {
StoreInUse::First => {
let store = self
.first_database
.open_shared(root_key)
.map_err(DualStoreError::First)?;
Ok(DualStore::First(store))
}
StoreInUse::Second => {
let store = self
.second_database
.open_shared(root_key)
.map_err(DualStoreError::Second)?;
Ok(DualStore::Second(store))
}
}
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
match A::assigned_store(root_key)? {
StoreInUse::First => {
let store = self
.first_database
.open_exclusive(root_key)
.map_err(DualStoreError::First)?;
Ok(DualStore::First(store))
}
StoreInUse::Second => {
let store = self
.second_database
.open_exclusive(root_key)
.map_err(DualStoreError::Second)?;
Ok(DualStore::Second(store))
}
}
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, Self::Error> {
let namespaces1 = D1::list_all(&config.first_config)
.await
.map_err(DualStoreError::First)?;
let mut namespaces = Vec::new();
for namespace in namespaces1 {
if D2::exists(&config.second_config, &namespace)
.await
.map_err(DualStoreError::Second)?
{
namespaces.push(namespace);
} else {
tracing::warn!("Namespace {} only exists in the first store", namespace);
}
}
Ok(namespaces)
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, Self::Error> {
let mut root_keys = self
.first_database
.list_root_keys()
.await
.map_err(DualStoreError::First)?;
root_keys.extend(
self.second_database
.list_root_keys()
.await
.map_err(DualStoreError::Second)?,
);
Ok(root_keys)
}
async fn exists(config: &Self::Config, namespace: &str) -> Result<bool, Self::Error> {
Ok(D1::exists(&config.first_config, namespace)
.await
.map_err(DualStoreError::First)?
&& D2::exists(&config.second_config, namespace)
.await
.map_err(DualStoreError::Second)?)
}
async fn create(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
let exists1 = D1::exists(&config.first_config, namespace)
.await
.map_err(DualStoreError::First)?;
let exists2 = D2::exists(&config.second_config, namespace)
.await
.map_err(DualStoreError::Second)?;
if exists1 && exists2 {
return Err(DualStoreError::StoreAlreadyExists);
}
if !exists1 {
D1::create(&config.first_config, namespace)
.await
.map_err(DualStoreError::First)?;
}
if !exists2 {
D2::create(&config.second_config, namespace)
.await
.map_err(DualStoreError::Second)?;
}
Ok(())
}
async fn delete(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
D1::delete(&config.first_config, namespace)
.await
.map_err(DualStoreError::First)?;
D2::delete(&config.second_config, namespace)
.await
.map_err(DualStoreError::Second)?;
Ok(())
}
}
#[cfg(with_testing)]
impl<D1, D2, A> TestKeyValueDatabase for DualDatabase<D1, D2, A>
where
D1: TestKeyValueDatabase,
D2: TestKeyValueDatabase,
A: DualStoreRootKeyAssignment,
{
async fn new_test_config() -> Result<Self::Config, Self::Error> {
let first_config = D1::new_test_config().await.map_err(DualStoreError::First)?;
let second_config = D2::new_test_config()
.await
.map_err(DualStoreError::Second)?;
Ok(DualStoreConfig {
first_config,
second_config,
})
}
}
/// The error type for [`DualStore`].
#[derive(Error, Debug)]
pub enum DualStoreError<E1, E2> {
/// Store already exists during a create operation
#[error("Store already exists during a create operation")]
StoreAlreadyExists,
/// Serialization error with BCS.
#[error(transparent)]
BcsError(#[from] bcs::Error),
/// First store.
#[error("Error in first store: {0}")]
First(E1),
/// Second store.
#[error("Error in second store: {0}")]
Second(E2),
}
impl<E1, E2> KeyValueStoreError for DualStoreError<E1, E2>
where
E1: KeyValueStoreError,
E2: KeyValueStoreError,
{
const BACKEND: &'static str = "dual_store";
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/metering.rs | linera-views/src/backends/metering.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Adds metrics to a key-value store.
use std::{
collections::{btree_map::Entry, BTreeMap},
sync::{Arc, LazyLock, Mutex},
};
use convert_case::{Case, Casing};
use linera_base::prometheus_util::{
register_histogram_vec, register_int_counter_vec, MeasureLatency as _,
};
use prometheus::{HistogramVec, IntCounterVec};
#[cfg(with_testing)]
use crate::store::TestKeyValueDatabase;
use crate::{
batch::Batch,
store::{KeyValueDatabase, ReadableKeyValueStore, WithError, WritableKeyValueStore},
};
#[derive(Clone)]
/// The implementation of the `KeyValueStoreMetrics` for the `KeyValueStore`.
pub struct KeyValueStoreMetrics {
read_value_bytes_latency: HistogramVec,
contains_key_latency: HistogramVec,
contains_keys_latency: HistogramVec,
read_multi_values_bytes_latency: HistogramVec,
find_keys_by_prefix_latency: HistogramVec,
find_key_values_by_prefix_latency: HistogramVec,
write_batch_latency: HistogramVec,
clear_journal_latency: HistogramVec,
connect_latency: HistogramVec,
open_shared_latency: HistogramVec,
open_exclusive_latency: HistogramVec,
list_all_latency: HistogramVec,
list_root_keys_latency: HistogramVec,
delete_all_latency: HistogramVec,
exists_latency: HistogramVec,
create_latency: HistogramVec,
delete_latency: HistogramVec,
read_value_none_cases: IntCounterVec,
read_value_key_size: HistogramVec,
read_value_value_size: HistogramVec,
read_multi_values_num_entries: HistogramVec,
read_multi_values_key_sizes: HistogramVec,
contains_keys_num_entries: HistogramVec,
contains_keys_key_sizes: HistogramVec,
contains_key_key_size: HistogramVec,
find_keys_by_prefix_prefix_size: HistogramVec,
find_keys_by_prefix_num_keys: HistogramVec,
find_keys_by_prefix_keys_size: HistogramVec,
find_key_values_by_prefix_prefix_size: HistogramVec,
find_key_values_by_prefix_num_keys: HistogramVec,
find_key_values_by_prefix_key_values_size: HistogramVec,
write_batch_size: HistogramVec,
list_all_sizes: HistogramVec,
exists_true_cases: IntCounterVec,
}
#[derive(Default)]
struct StoreMetrics {
stores: BTreeMap<String, Arc<KeyValueStoreMetrics>>,
}
/// The global variables of the RocksDB stores
static STORE_COUNTERS: LazyLock<Mutex<StoreMetrics>> =
LazyLock::new(|| Mutex::new(StoreMetrics::default()));
fn get_counter(name: &str) -> Arc<KeyValueStoreMetrics> {
let mut store_metrics = STORE_COUNTERS.lock().unwrap();
let key = name.to_string();
match store_metrics.stores.entry(key) {
Entry::Occupied(entry) => {
let entry = entry.into_mut();
entry.clone()
}
Entry::Vacant(entry) => {
let store_metric = Arc::new(KeyValueStoreMetrics::new(name.to_string()));
entry.insert(store_metric.clone());
store_metric
}
}
}
impl KeyValueStoreMetrics {
/// Creation of a named Metered counter.
pub fn new(name: String) -> Self {
// name can be "rocks db". Then var_name = "rocks_db" and title_name = "RocksDb"
let var_name = name.replace(' ', "_");
let title_name = name.to_case(Case::Snake);
let entry1 = format!("{}_read_value_bytes_latency", var_name);
let entry2 = format!("{} read value bytes latency", title_name);
let read_value_bytes_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_contains_key_latency", var_name);
let entry2 = format!("{} contains key latency", title_name);
let contains_key_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_contains_keys_latency", var_name);
let entry2 = format!("{} contains keys latency", title_name);
let contains_keys_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_read_multi_value_bytes_latency", var_name);
let entry2 = format!("{} read multi value bytes latency", title_name);
let read_multi_values_bytes_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_find_keys_by_prefix_latency", var_name);
let entry2 = format!("{} find keys by prefix latency", title_name);
let find_keys_by_prefix_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_find_key_values_by_prefix_latency", var_name);
let entry2 = format!("{} find key values by prefix latency", title_name);
let find_key_values_by_prefix_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_write_batch_latency", var_name);
let entry2 = format!("{} write batch latency", title_name);
let write_batch_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_clear_journal_latency", var_name);
let entry2 = format!("{} clear journal latency", title_name);
let clear_journal_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_connect_latency", var_name);
let entry2 = format!("{} connect latency", title_name);
let connect_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_open_shared_latency", var_name);
let entry2 = format!("{} open shared partition", title_name);
let open_shared_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_open_exclusive_latency", var_name);
let entry2 = format!("{} open exclusive partition", title_name);
let open_exclusive_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_list_all_latency", var_name);
let entry2 = format!("{} list all latency", title_name);
let list_all_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_list_root_keys_latency", var_name);
let entry2 = format!("{} list root keys latency", title_name);
let list_root_keys_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_delete_all_latency", var_name);
let entry2 = format!("{} delete all latency", title_name);
let delete_all_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_exists_latency", var_name);
let entry2 = format!("{} exists latency", title_name);
let exists_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_create_latency", var_name);
let entry2 = format!("{} create latency", title_name);
let create_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_delete_latency", var_name);
let entry2 = format!("{} delete latency", title_name);
let delete_latency = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_read_value_none_cases", var_name);
let entry2 = format!("{} read value none cases", title_name);
let read_value_none_cases = register_int_counter_vec(&entry1, &entry2, &[]);
let entry1 = format!("{}_read_value_key_size", var_name);
let entry2 = format!("{} read value key size", title_name);
let read_value_key_size = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_read_value_value_size", var_name);
let entry2 = format!("{} read value value size", title_name);
let read_value_value_size = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_read_multi_values_num_entries", var_name);
let entry2 = format!("{} read multi values num entries", title_name);
let read_multi_values_num_entries = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_read_multi_values_key_sizes", var_name);
let entry2 = format!("{} read multi values key sizes", title_name);
let read_multi_values_key_sizes = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_contains_keys_num_entries", var_name);
let entry2 = format!("{} contains keys num entries", title_name);
let contains_keys_num_entries = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_contains_keys_key_sizes", var_name);
let entry2 = format!("{} contains keys key sizes", title_name);
let contains_keys_key_sizes = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_contains_key_key_size", var_name);
let entry2 = format!("{} contains key key size", title_name);
let contains_key_key_size = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_find_keys_by_prefix_prefix_size", var_name);
let entry2 = format!("{} find keys by prefix prefix size", title_name);
let find_keys_by_prefix_prefix_size = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_find_keys_by_prefix_num_keys", var_name);
let entry2 = format!("{} find keys by prefix num keys", title_name);
let find_keys_by_prefix_num_keys = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_find_keys_by_prefix_keys_size", var_name);
let entry2 = format!("{} find keys by prefix keys size", title_name);
let find_keys_by_prefix_keys_size = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_find_key_values_by_prefix_prefix_size", var_name);
let entry2 = format!("{} find key values by prefix prefix size", title_name);
let find_key_values_by_prefix_prefix_size =
register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_find_key_values_by_prefix_num_keys", var_name);
let entry2 = format!("{} find key values by prefix num keys", title_name);
let find_key_values_by_prefix_num_keys =
register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_find_key_values_by_prefix_key_values_size", var_name);
let entry2 = format!("{} find key values by prefix key values size", title_name);
let find_key_values_by_prefix_key_values_size =
register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_write_batch_size", var_name);
let entry2 = format!("{} write batch size", title_name);
let write_batch_size = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_list_all_sizes", var_name);
let entry2 = format!("{} list all sizes", title_name);
let list_all_sizes = register_histogram_vec(&entry1, &entry2, &[], None);
let entry1 = format!("{}_exists_true_cases", var_name);
let entry2 = format!("{} exists true cases", title_name);
let exists_true_cases = register_int_counter_vec(&entry1, &entry2, &[]);
KeyValueStoreMetrics {
read_value_bytes_latency,
contains_key_latency,
contains_keys_latency,
read_multi_values_bytes_latency,
find_keys_by_prefix_latency,
find_key_values_by_prefix_latency,
write_batch_latency,
clear_journal_latency,
connect_latency,
open_shared_latency,
open_exclusive_latency,
list_all_latency,
list_root_keys_latency,
delete_all_latency,
exists_latency,
create_latency,
delete_latency,
read_value_none_cases,
read_value_key_size,
read_value_value_size,
read_multi_values_num_entries,
read_multi_values_key_sizes,
contains_keys_num_entries,
contains_keys_key_sizes,
contains_key_key_size,
find_keys_by_prefix_prefix_size,
find_keys_by_prefix_num_keys,
find_keys_by_prefix_keys_size,
find_key_values_by_prefix_prefix_size,
find_key_values_by_prefix_num_keys,
find_key_values_by_prefix_key_values_size,
write_batch_size,
list_all_sizes,
exists_true_cases,
}
}
}
/// A metered database that keeps track of every operation.
#[derive(Clone)]
pub struct MeteredDatabase<D> {
/// The metrics being computed.
counter: Arc<KeyValueStoreMetrics>,
/// The underlying database.
database: D,
}
/// A metered store that keeps track of every operation.
#[derive(Clone)]
pub struct MeteredStore<S> {
/// The metrics being computed.
counter: Arc<KeyValueStoreMetrics>,
/// The underlying store.
store: S,
}
impl<D> WithError for MeteredDatabase<D>
where
D: WithError,
{
type Error = D::Error;
}
impl<S> WithError for MeteredStore<S>
where
S: WithError,
{
type Error = S::Error;
}
impl<S> ReadableKeyValueStore for MeteredStore<S>
where
S: ReadableKeyValueStore,
{
const MAX_KEY_SIZE: usize = S::MAX_KEY_SIZE;
fn max_stream_queries(&self) -> usize {
self.store.max_stream_queries()
}
fn root_key(&self) -> Result<Vec<u8>, Self::Error> {
self.store.root_key()
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let _latency = self.counter.read_value_bytes_latency.measure_latency();
self.counter
.read_value_key_size
.with_label_values(&[])
.observe(key.len() as f64);
let result = self.store.read_value_bytes(key).await?;
match &result {
None => self
.counter
.read_value_none_cases
.with_label_values(&[])
.inc(),
Some(value) => self
.counter
.read_value_value_size
.with_label_values(&[])
.observe(value.len() as f64),
}
Ok(result)
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, Self::Error> {
let _latency = self.counter.contains_key_latency.measure_latency();
self.counter
.contains_key_key_size
.with_label_values(&[])
.observe(key.len() as f64);
self.store.contains_key(key).await
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, Self::Error> {
let _latency = self.counter.contains_keys_latency.measure_latency();
self.counter
.contains_keys_num_entries
.with_label_values(&[])
.observe(keys.len() as f64);
let key_sizes = keys.iter().map(|k| k.len()).sum::<usize>();
self.counter
.contains_keys_key_sizes
.with_label_values(&[])
.observe(key_sizes as f64);
self.store.contains_keys(keys).await
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, Self::Error> {
let _latency = self
.counter
.read_multi_values_bytes_latency
.measure_latency();
self.counter
.read_multi_values_num_entries
.with_label_values(&[])
.observe(keys.len() as f64);
let key_sizes = keys.iter().map(|k| k.len()).sum::<usize>();
self.counter
.read_multi_values_key_sizes
.with_label_values(&[])
.observe(key_sizes as f64);
self.store.read_multi_values_bytes(keys).await
}
async fn find_keys_by_prefix(&self, key_prefix: &[u8]) -> Result<Vec<Vec<u8>>, Self::Error> {
let _latency = self.counter.find_keys_by_prefix_latency.measure_latency();
self.counter
.find_keys_by_prefix_prefix_size
.with_label_values(&[])
.observe(key_prefix.len() as f64);
let result = self.store.find_keys_by_prefix(key_prefix).await?;
let (num_keys, keys_size) = result
.iter()
.map(|key| key.len())
.fold((0, 0), |(count, size), len| (count + 1, size + len));
self.counter
.find_keys_by_prefix_num_keys
.with_label_values(&[])
.observe(num_keys as f64);
self.counter
.find_keys_by_prefix_keys_size
.with_label_values(&[])
.observe(keys_size as f64);
Ok(result)
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, Self::Error> {
let _latency = self
.counter
.find_key_values_by_prefix_latency
.measure_latency();
self.counter
.find_key_values_by_prefix_prefix_size
.with_label_values(&[])
.observe(key_prefix.len() as f64);
let result = self.store.find_key_values_by_prefix(key_prefix).await?;
let (num_keys, key_values_size) = result
.iter()
.map(|(key, value)| key.len() + value.len())
.fold((0, 0), |(count, size), len| (count + 1, size + len));
self.counter
.find_key_values_by_prefix_num_keys
.with_label_values(&[])
.observe(num_keys as f64);
self.counter
.find_key_values_by_prefix_key_values_size
.with_label_values(&[])
.observe(key_values_size as f64);
Ok(result)
}
}
impl<S> WritableKeyValueStore for MeteredStore<S>
where
S: WritableKeyValueStore,
{
const MAX_VALUE_SIZE: usize = S::MAX_VALUE_SIZE;
async fn write_batch(&self, batch: Batch) -> Result<(), Self::Error> {
let _latency = self.counter.write_batch_latency.measure_latency();
self.counter
.write_batch_size
.with_label_values(&[])
.observe(batch.size() as f64);
self.store.write_batch(batch).await
}
async fn clear_journal(&self) -> Result<(), Self::Error> {
let _metric = self.counter.clear_journal_latency.measure_latency();
self.store.clear_journal().await
}
}
impl<D> KeyValueDatabase for MeteredDatabase<D>
where
D: KeyValueDatabase,
{
type Config = D::Config;
type Store = MeteredStore<D::Store>;
fn get_name() -> String {
D::get_name()
}
async fn connect(config: &Self::Config, namespace: &str) -> Result<Self, Self::Error> {
let name = D::get_name();
let counter = get_counter(&name);
let _latency = counter.connect_latency.measure_latency();
let database = D::connect(config, namespace).await?;
let counter = get_counter(&name);
Ok(Self { counter, database })
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
let _latency = self.counter.open_shared_latency.measure_latency();
let store = self.database.open_shared(root_key)?;
let counter = self.counter.clone();
Ok(MeteredStore { counter, store })
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, Self::Error> {
let _latency = self.counter.open_exclusive_latency.measure_latency();
let store = self.database.open_exclusive(root_key)?;
let counter = self.counter.clone();
Ok(MeteredStore { counter, store })
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, Self::Error> {
let name = D::get_name();
let counter = get_counter(&name);
let _latency = counter.list_all_latency.measure_latency();
let namespaces = D::list_all(config).await?;
let counter = get_counter(&name);
counter
.list_all_sizes
.with_label_values(&[])
.observe(namespaces.len() as f64);
Ok(namespaces)
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, Self::Error> {
let _latency = self.counter.list_root_keys_latency.measure_latency();
self.database.list_root_keys().await
}
async fn delete_all(config: &Self::Config) -> Result<(), Self::Error> {
let name = D::get_name();
let counter = get_counter(&name);
let _latency = counter.delete_all_latency.measure_latency();
D::delete_all(config).await
}
async fn exists(config: &Self::Config, namespace: &str) -> Result<bool, Self::Error> {
let name = D::get_name();
let counter = get_counter(&name);
let _latency = counter.exists_latency.measure_latency();
let result = D::exists(config, namespace).await?;
if result {
let counter = get_counter(&name);
counter.exists_true_cases.with_label_values(&[]).inc();
}
Ok(result)
}
async fn create(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
let name = D::get_name();
let counter = get_counter(&name);
let _latency = counter.create_latency.measure_latency();
D::create(config, namespace).await
}
async fn delete(config: &Self::Config, namespace: &str) -> Result<(), Self::Error> {
let name = D::get_name();
let counter = get_counter(&name);
let _latency = counter.delete_latency.measure_latency();
D::delete(config, namespace).await
}
}
#[cfg(with_testing)]
impl<D> TestKeyValueDatabase for MeteredDatabase<D>
where
D: TestKeyValueDatabase,
{
async fn new_test_config() -> Result<D::Config, Self::Error> {
D::new_test_config().await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/dynamo_db.rs | linera-views/src/backends/dynamo_db.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implements [`crate::store::KeyValueStore`] for the DynamoDB database.
use std::{
collections::HashMap,
env,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use async_lock::{Semaphore, SemaphoreGuard};
use aws_sdk_dynamodb::{
error::SdkError,
operation::{
batch_get_item::BatchGetItemError,
create_table::CreateTableError,
delete_table::DeleteTableError,
get_item::GetItemError,
list_tables::ListTablesError,
query::{QueryError, QueryOutput},
transact_write_items::TransactWriteItemsError,
},
primitives::Blob,
types::{
AttributeDefinition, AttributeValue, Delete, KeySchemaElement, KeyType, KeysAndAttributes,
ProvisionedThroughput, Put, ScalarAttributeType, TransactWriteItem,
},
Client,
};
use aws_smithy_types::error::operation::BuildError;
use futures::future::join_all;
use linera_base::{ensure, util::future::FutureSyncExt as _};
use serde::{Deserialize, Serialize};
use thiserror::Error;
#[cfg(with_metrics)]
use crate::metering::MeteredDatabase;
#[cfg(with_testing)]
use crate::store::TestKeyValueDatabase;
use crate::{
batch::SimpleUnorderedBatch,
common::get_uleb128_size,
journaling::{JournalConsistencyError, JournalingKeyValueDatabase},
lru_caching::{LruCachingConfig, LruCachingDatabase},
store::{
DirectWritableKeyValueStore, KeyValueDatabase, KeyValueStoreError, ReadableKeyValueStore,
WithError,
},
value_splitting::{ValueSplittingDatabase, ValueSplittingError},
};
/// Name of the environment variable with the address to a DynamoDB local instance.
const DYNAMODB_LOCAL_ENDPOINT: &str = "DYNAMODB_LOCAL_ENDPOINT";
/// Gets the AWS configuration from the environment
async fn get_base_config() -> Result<aws_sdk_dynamodb::Config, DynamoDbStoreInternalError> {
let base_config = aws_config::load_defaults(aws_config::BehaviorVersion::latest())
.boxed_sync()
.await;
Ok((&base_config).into())
}
fn get_endpoint_address() -> Option<String> {
env::var(DYNAMODB_LOCAL_ENDPOINT).ok()
}
/// Gets the DynamoDB local config
async fn get_dynamodb_local_config() -> Result<aws_sdk_dynamodb::Config, DynamoDbStoreInternalError>
{
let base_config = aws_config::load_defaults(aws_config::BehaviorVersion::latest())
.boxed_sync()
.await;
let endpoint_address = get_endpoint_address().unwrap();
let config = aws_sdk_dynamodb::config::Builder::from(&base_config)
.endpoint_url(endpoint_address)
.build();
Ok(config)
}
/// DynamoDB forbids the iteration over the partition keys.
/// Therefore we use a special partition key named `[1]` for storing
/// the root keys. For normal root keys, we simply put a `[0]` in
/// front therefore no intersection is possible.
const PARTITION_KEY_ROOT_KEY: &[u8] = &[1];
/// The attribute name of the partition key.
const PARTITION_ATTRIBUTE: &str = "item_partition";
/// A root key being used for testing existence of tables
const EMPTY_ROOT_KEY: &[u8] = &[0];
/// A key being used for testing existence of tables
const DB_KEY: &[u8] = &[0];
/// The attribute name of the primary key (used as a sort key).
const KEY_ATTRIBUTE: &str = "item_key";
/// The attribute name of the table value blob.
const VALUE_ATTRIBUTE: &str = "item_value";
/// The attribute for obtaining the primary key (used as a sort key) with the stored value.
const KEY_VALUE_ATTRIBUTE: &str = "item_key, item_value";
/// TODO(#1084): The scheme below with the `MAX_VALUE_SIZE` has to be checked
/// This is the maximum size of a raw value in DynamoDB.
const RAW_MAX_VALUE_SIZE: usize = 409600;
/// Fundamental constants in DynamoDB: The maximum size of a value is 400 KB
/// See https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ServiceQuotas.html
/// However, the value being written can also be the serialization of a `SimpleUnorderedBatch`
/// Therefore the actual `MAX_VALUE_SIZE` might be lower.
/// At the maximum key size is 1024 bytes (see below) and we pack just one entry.
/// So if the key has 1024 bytes this gets us the inequality
/// `1 + 1 + serialized_size(1024)? + serialized_size(x)? <= 400*1024`
/// and so this simplifies to `1 + 1 + (2 + 1024) + (3 + x) <= 400 * 1024`
/// Note on the following formula:
/// * We write 3 because `get_uleb128_size(400*1024) == 3`
/// * We write `1 + 1` because the `SimpleUnorderedBatch` has two entries
///
/// This gets us a maximal value of 408569;
const VISIBLE_MAX_VALUE_SIZE: usize = RAW_MAX_VALUE_SIZE
- MAX_KEY_SIZE
- get_uleb128_size(RAW_MAX_VALUE_SIZE)
- get_uleb128_size(MAX_KEY_SIZE)
- 1
- 1;
/// Fundamental constant in DynamoDB: The maximum size of a key is 1024 bytes.
/// We decrease by 1 because we append a [1] as prefix.
/// See https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html
const MAX_KEY_SIZE: usize = 1023;
/// Fundamental constants in DynamoDB: The maximum size of a [`TransactWriteItem`] is 4 MB.
/// See https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html
/// We're taking a conservative value because the mode of computation is unclear.
const MAX_TRANSACT_WRITE_ITEM_TOTAL_SIZE: usize = 4000000;
/// The DynamoDB database is potentially handling an infinite number of connections.
/// However, for testing or some other purpose we really need to decrease the number of
/// connections.
#[cfg(with_testing)]
const TEST_DYNAMO_DB_MAX_CONCURRENT_QUERIES: usize = 10;
/// The number of entries in a stream of the tests can be controlled by this parameter for tests.
#[cfg(with_testing)]
const TEST_DYNAMO_DB_MAX_STREAM_QUERIES: usize = 10;
/// Fundamental constants in DynamoDB: The maximum size of a [`TransactWriteItem`] is 100.
/// See <https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html>
const MAX_TRANSACT_WRITE_ITEM_SIZE: usize = 100;
/// Maximum number of entries that can be obtained in a [`BatchGetItem`] operation.
/// The two constraints are at most 100 operations and at most 16M in total.
/// Since the maximum size of a value is 400K, this gets us 40 as upper limit
const MAX_BATCH_GET_ITEM_SIZE: usize = 40;
/// Builds the key attributes for a table item.
///
/// The key is composed of two attributes that are both binary blobs. The first attribute is a
/// partition key and is currently just a dummy value that ensures all items are in the same
/// partition. This is necessary for range queries to work correctly.
///
/// The second attribute is the actual key value, which is generated by concatenating the
/// context prefix. `The Vec<u8>` expression is obtained from `self.derive_key`.
fn build_key(start_key: &[u8], key: Vec<u8>) -> HashMap<String, AttributeValue> {
let mut prefixed_key = vec![1];
prefixed_key.extend(key);
[
(
PARTITION_ATTRIBUTE.to_owned(),
AttributeValue::B(Blob::new(start_key.to_vec())),
),
(
KEY_ATTRIBUTE.to_owned(),
AttributeValue::B(Blob::new(prefixed_key)),
),
]
.into()
}
/// Builds the value attribute for storing a table item.
fn build_key_value(
start_key: &[u8],
key: Vec<u8>,
value: Vec<u8>,
) -> HashMap<String, AttributeValue> {
let mut prefixed_key = vec![1];
prefixed_key.extend(key);
[
(
PARTITION_ATTRIBUTE.to_owned(),
AttributeValue::B(Blob::new(start_key.to_vec())),
),
(
KEY_ATTRIBUTE.to_owned(),
AttributeValue::B(Blob::new(prefixed_key)),
),
(
VALUE_ATTRIBUTE.to_owned(),
AttributeValue::B(Blob::new(value)),
),
]
.into()
}
/// Checks that a key is of the correct size
fn check_key_size(key: &[u8]) -> Result<(), DynamoDbStoreInternalError> {
ensure!(
key.len() <= MAX_KEY_SIZE,
DynamoDbStoreInternalError::KeyTooLong
);
Ok(())
}
/// Extracts the key attribute from an item.
fn extract_key(
prefix_len: usize,
attributes: &HashMap<String, AttributeValue>,
) -> Result<&[u8], DynamoDbStoreInternalError> {
let key = attributes
.get(KEY_ATTRIBUTE)
.ok_or(DynamoDbStoreInternalError::MissingKey)?;
match key {
AttributeValue::B(blob) => Ok(&blob.as_ref()[1 + prefix_len..]),
key => Err(DynamoDbStoreInternalError::wrong_key_type(key)),
}
}
/// Extracts the value attribute from an item.
fn extract_value(
attributes: &HashMap<String, AttributeValue>,
) -> Result<&[u8], DynamoDbStoreInternalError> {
// According to the official AWS DynamoDB documentation:
// "Binary must have a length greater than zero if the attribute is used as a key attribute for a table or index"
let value = attributes
.get(VALUE_ATTRIBUTE)
.ok_or(DynamoDbStoreInternalError::MissingValue)?;
match value {
AttributeValue::B(blob) => Ok(blob.as_ref()),
value => Err(DynamoDbStoreInternalError::wrong_value_type(value)),
}
}
/// Extracts the value attribute from an item (returned by value).
fn extract_value_owned(
attributes: &mut HashMap<String, AttributeValue>,
) -> Result<Vec<u8>, DynamoDbStoreInternalError> {
let value = attributes
.remove(VALUE_ATTRIBUTE)
.ok_or(DynamoDbStoreInternalError::MissingValue)?;
match value {
AttributeValue::B(blob) => Ok(blob.into_inner()),
value => Err(DynamoDbStoreInternalError::wrong_value_type(&value)),
}
}
/// Extracts the key and value attributes from an item.
fn extract_key_value(
prefix_len: usize,
attributes: &HashMap<String, AttributeValue>,
) -> Result<(&[u8], &[u8]), DynamoDbStoreInternalError> {
let key = extract_key(prefix_len, attributes)?;
let value = extract_value(attributes)?;
Ok((key, value))
}
struct TransactionBuilder {
start_key: Vec<u8>,
transactions: Vec<TransactWriteItem>,
}
impl TransactionBuilder {
fn new(start_key: &[u8]) -> Self {
Self {
start_key: start_key.to_vec(),
transactions: Vec::new(),
}
}
fn insert_delete_request(
&mut self,
key: Vec<u8>,
store: &DynamoDbStoreInternal,
) -> Result<(), DynamoDbStoreInternalError> {
let transaction = store.build_delete_transaction(&self.start_key, key)?;
self.transactions.push(transaction);
Ok(())
}
fn insert_put_request(
&mut self,
key: Vec<u8>,
value: Vec<u8>,
store: &DynamoDbStoreInternal,
) -> Result<(), DynamoDbStoreInternalError> {
let transaction = store.build_put_transaction(&self.start_key, key, value)?;
self.transactions.push(transaction);
Ok(())
}
}
/// A DynamoDB client.
#[derive(Clone, Debug)]
pub struct DynamoDbStoreInternal {
client: Client,
namespace: String,
semaphore: Option<Arc<Semaphore>>,
max_stream_queries: usize,
start_key: Vec<u8>,
root_key_written: Arc<AtomicBool>,
}
/// Database-level connection to DynamoDB for managing namespaces and partitions.
#[derive(Clone)]
pub struct DynamoDbDatabaseInternal {
client: Client,
namespace: String,
semaphore: Option<Arc<Semaphore>>,
max_stream_queries: usize,
}
impl WithError for DynamoDbDatabaseInternal {
type Error = DynamoDbStoreInternalError;
}
/// The initial configuration of the system.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DynamoDbStoreInternalConfig {
/// Whether to use DynamoDB local or not.
pub use_dynamodb_local: bool,
/// Maximum number of concurrent database queries allowed for this client.
pub max_concurrent_queries: Option<usize>,
/// Preferred buffer size for async streams.
pub max_stream_queries: usize,
}
impl DynamoDbStoreInternalConfig {
async fn client(&self) -> Result<Client, DynamoDbStoreInternalError> {
let config = if self.use_dynamodb_local {
get_dynamodb_local_config().await?
} else {
get_base_config().await?
};
Ok(Client::from_conf(config))
}
}
impl KeyValueDatabase for DynamoDbDatabaseInternal {
type Config = DynamoDbStoreInternalConfig;
type Store = DynamoDbStoreInternal;
fn get_name() -> String {
"dynamodb internal".to_string()
}
async fn connect(
config: &Self::Config,
namespace: &str,
) -> Result<Self, DynamoDbStoreInternalError> {
Self::check_namespace(namespace)?;
let client = config.client().await?;
let semaphore = config
.max_concurrent_queries
.map(|n| Arc::new(Semaphore::new(n)));
let max_stream_queries = config.max_stream_queries;
let namespace = namespace.to_string();
let store = Self {
client,
namespace,
semaphore,
max_stream_queries,
};
Ok(store)
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, DynamoDbStoreInternalError> {
let mut start_key = EMPTY_ROOT_KEY.to_vec();
start_key.extend(root_key);
Ok(self.open_internal(start_key))
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, DynamoDbStoreInternalError> {
self.open_shared(root_key)
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, DynamoDbStoreInternalError> {
let client = config.client().await?;
let mut namespaces = Vec::new();
let mut start_table = None;
loop {
let response = client
.list_tables()
.set_exclusive_start_table_name(start_table)
.send()
.boxed_sync()
.await?;
if let Some(namespaces_blk) = response.table_names {
namespaces.extend(namespaces_blk);
}
if response.last_evaluated_table_name.is_none() {
break;
} else {
start_table = response.last_evaluated_table_name;
}
}
Ok(namespaces)
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, DynamoDbStoreInternalError> {
let store = self.open_internal(PARTITION_KEY_ROOT_KEY.to_vec());
store.find_keys_by_prefix(EMPTY_ROOT_KEY).await
}
async fn delete_all(config: &Self::Config) -> Result<(), DynamoDbStoreInternalError> {
let client = config.client().await?;
let tables = Self::list_all(config).await?;
for table in tables {
client
.delete_table()
.table_name(&table)
.send()
.boxed_sync()
.await?;
}
Ok(())
}
async fn exists(
config: &Self::Config,
namespace: &str,
) -> Result<bool, DynamoDbStoreInternalError> {
Self::check_namespace(namespace)?;
let client = config.client().await?;
let key_db = build_key(EMPTY_ROOT_KEY, DB_KEY.to_vec());
let response = client
.get_item()
.table_name(namespace)
.set_key(Some(key_db))
.send()
.boxed_sync()
.await;
let Err(error) = response else {
return Ok(true);
};
let test = match &error {
SdkError::ServiceError(error) => match error.err() {
GetItemError::ResourceNotFoundException(error) => {
error.message
== Some("Cannot do operations on a non-existent table".to_string())
}
_ => false,
},
_ => false,
};
if test {
Ok(false)
} else {
Err(error.into())
}
}
async fn create(
config: &Self::Config,
namespace: &str,
) -> Result<(), DynamoDbStoreInternalError> {
Self::check_namespace(namespace)?;
let client = config.client().await?;
client
.create_table()
.table_name(namespace)
.attribute_definitions(
AttributeDefinition::builder()
.attribute_name(PARTITION_ATTRIBUTE)
.attribute_type(ScalarAttributeType::B)
.build()?,
)
.attribute_definitions(
AttributeDefinition::builder()
.attribute_name(KEY_ATTRIBUTE)
.attribute_type(ScalarAttributeType::B)
.build()?,
)
.key_schema(
KeySchemaElement::builder()
.attribute_name(PARTITION_ATTRIBUTE)
.key_type(KeyType::Hash)
.build()?,
)
.key_schema(
KeySchemaElement::builder()
.attribute_name(KEY_ATTRIBUTE)
.key_type(KeyType::Range)
.build()?,
)
.provisioned_throughput(
ProvisionedThroughput::builder()
.read_capacity_units(10)
.write_capacity_units(10)
.build()?,
)
.send()
.boxed_sync()
.await?;
Ok(())
}
async fn delete(
config: &Self::Config,
namespace: &str,
) -> Result<(), DynamoDbStoreInternalError> {
Self::check_namespace(namespace)?;
let client = config.client().await?;
client
.delete_table()
.table_name(namespace)
.send()
.boxed_sync()
.await?;
Ok(())
}
}
impl DynamoDbDatabaseInternal {
/// Namespaces are named table names in DynamoDB [naming
/// rules](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.NamingRules),
/// so we need to check correctness of the namespace
fn check_namespace(namespace: &str) -> Result<(), InvalidNamespace> {
if namespace.len() < 3 {
return Err(InvalidNamespace::TooShort);
}
if namespace.len() > 255 {
return Err(InvalidNamespace::TooLong);
}
if !namespace.chars().all(|character| {
character.is_ascii_alphanumeric()
|| character == '.'
|| character == '-'
|| character == '_'
}) {
return Err(InvalidNamespace::InvalidCharacter);
}
Ok(())
}
fn open_internal(&self, start_key: Vec<u8>) -> DynamoDbStoreInternal {
let client = self.client.clone();
let namespace = self.namespace.clone();
let semaphore = self.semaphore.clone();
let max_stream_queries = self.max_stream_queries;
DynamoDbStoreInternal {
client,
namespace,
semaphore,
max_stream_queries,
start_key,
root_key_written: Arc::new(AtomicBool::new(false)),
}
}
}
impl DynamoDbStoreInternal {
fn build_delete_transaction(
&self,
start_key: &[u8],
key: Vec<u8>,
) -> Result<TransactWriteItem, DynamoDbStoreInternalError> {
check_key_size(&key)?;
let request = Delete::builder()
.table_name(&self.namespace)
.set_key(Some(build_key(start_key, key)))
.build()?;
Ok(TransactWriteItem::builder().delete(request).build())
}
fn build_put_transaction(
&self,
start_key: &[u8],
key: Vec<u8>,
value: Vec<u8>,
) -> Result<TransactWriteItem, DynamoDbStoreInternalError> {
check_key_size(&key)?;
ensure!(
value.len() <= RAW_MAX_VALUE_SIZE,
DynamoDbStoreInternalError::ValueLengthTooLarge
);
let request = Put::builder()
.table_name(&self.namespace)
.set_item(Some(build_key_value(start_key, key, value)))
.build()?;
Ok(TransactWriteItem::builder().put(request).build())
}
/// Obtains the semaphore lock on the database if needed.
async fn acquire(&self) -> Option<SemaphoreGuard<'_>> {
match &self.semaphore {
None => None,
Some(count) => Some(count.acquire().await),
}
}
async fn get_query_output(
&self,
attribute_str: &str,
start_key: &[u8],
key_prefix: &[u8],
start_key_map: Option<HashMap<String, AttributeValue>>,
) -> Result<QueryOutput, DynamoDbStoreInternalError> {
let _guard = self.acquire().await;
let start_key = start_key.to_vec();
let mut prefixed_key_prefix = vec![1];
prefixed_key_prefix.extend(key_prefix);
let response = self
.client
.query()
.table_name(&self.namespace)
.projection_expression(attribute_str)
.key_condition_expression(format!(
"{PARTITION_ATTRIBUTE} = :partition and begins_with({KEY_ATTRIBUTE}, :prefix)"
))
.expression_attribute_values(":partition", AttributeValue::B(Blob::new(start_key)))
.expression_attribute_values(
":prefix",
AttributeValue::B(Blob::new(prefixed_key_prefix)),
)
.set_exclusive_start_key(start_key_map)
.send()
.boxed_sync()
.await?;
Ok(response)
}
async fn read_value_bytes_general(
&self,
key_db: HashMap<String, AttributeValue>,
) -> Result<Option<Vec<u8>>, DynamoDbStoreInternalError> {
let _guard = self.acquire().await;
let response = self
.client
.get_item()
.table_name(&self.namespace)
.set_key(Some(key_db))
.send()
.boxed_sync()
.await?;
match response.item {
Some(mut item) => {
let value = extract_value_owned(&mut item)?;
Ok(Some(value))
}
None => Ok(None),
}
}
async fn contains_key_general(
&self,
key_db: HashMap<String, AttributeValue>,
) -> Result<bool, DynamoDbStoreInternalError> {
let _guard = self.acquire().await;
let response = self
.client
.get_item()
.table_name(&self.namespace)
.set_key(Some(key_db))
.projection_expression(PARTITION_ATTRIBUTE)
.send()
.boxed_sync()
.await?;
Ok(response.item.is_some())
}
async fn get_list_responses(
&self,
attribute: &str,
start_key: &[u8],
key_prefix: &[u8],
) -> Result<QueryResponses, DynamoDbStoreInternalError> {
check_key_size(key_prefix)?;
let mut responses = Vec::new();
let mut start_key_map = None;
loop {
let response = self
.get_query_output(attribute, start_key, key_prefix, start_key_map)
.await?;
let last_evaluated = response.last_evaluated_key.clone();
responses.push(response);
match last_evaluated {
None => {
break;
}
Some(value) => {
start_key_map = Some(value);
}
}
}
Ok(QueryResponses {
prefix_len: key_prefix.len(),
responses,
})
}
async fn read_batch_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, DynamoDbStoreInternalError> {
// Early return for empty keys
if keys.is_empty() {
return Ok(Vec::new());
}
let mut results = vec![None; keys.len()];
// Build the request keys
let mut request_keys = Vec::new();
let mut key_to_index = HashMap::<Vec<u8>, Vec<usize>>::new();
for (i, key) in keys.iter().enumerate() {
check_key_size(key)?;
let key_attrs = build_key(&self.start_key, key.clone());
key_to_index.entry(key.clone()).or_default().push(i);
request_keys.push(key_attrs);
}
let keys_and_attributes = KeysAndAttributes::builder()
.set_keys(Some(request_keys))
.build()?;
let mut request_items = HashMap::new();
request_items.insert(self.namespace.clone(), keys_and_attributes);
// Execute batch get item request with retry for unprocessed keys
let mut remaining_request_items = Some(request_items);
while let Some(request_items) = remaining_request_items {
// Skip if the request items are empty
if request_items.is_empty() {
break;
}
let _guard = self.acquire().await;
let response = self
.client
.batch_get_item()
.set_request_items(Some(request_items))
.send()
.boxed_sync()
.await?;
// Process returned items
if let Some(mut responses) = response.responses {
if let Some(items) = responses.remove(&self.namespace) {
for mut item in items {
// Extract key to find the original index
let key_attr = item
.get(KEY_ATTRIBUTE)
.ok_or(DynamoDbStoreInternalError::MissingKey)?;
if let AttributeValue::B(blob) = key_attr {
let prefixed_key = blob.as_ref();
let key = &prefixed_key[1..]; // Remove the [1] prefix
if let Some(indices) = key_to_index.get(key) {
if let Some((&last, rest)) = indices.split_last() {
let value = extract_value_owned(&mut item)?;
for index in rest {
results[*index] = Some(value.clone());
}
results[last] = Some(value);
}
}
}
}
}
}
// Handle unprocessed keys
remaining_request_items = response.unprocessed_keys;
}
Ok(results)
}
}
struct QueryResponses {
prefix_len: usize,
responses: Vec<QueryOutput>,
}
impl QueryResponses {
fn keys(&self) -> impl Iterator<Item = Result<&[u8], DynamoDbStoreInternalError>> {
self.responses
.iter()
.flat_map(|response| response.items.iter().flatten())
.map(|item| extract_key(self.prefix_len, item))
}
fn key_values(
&self,
) -> impl Iterator<Item = Result<(&[u8], &[u8]), DynamoDbStoreInternalError>> {
self.responses
.iter()
.flat_map(|response| response.items.iter().flatten())
.map(|item| extract_key_value(self.prefix_len, item))
}
}
impl WithError for DynamoDbStoreInternal {
type Error = DynamoDbStoreInternalError;
}
impl ReadableKeyValueStore for DynamoDbStoreInternal {
const MAX_KEY_SIZE: usize = MAX_KEY_SIZE;
fn max_stream_queries(&self) -> usize {
self.max_stream_queries
}
fn root_key(&self) -> Result<Vec<u8>, DynamoDbStoreInternalError> {
assert!(self.start_key.starts_with(EMPTY_ROOT_KEY));
Ok(self.start_key[EMPTY_ROOT_KEY.len()..].to_vec())
}
async fn read_value_bytes(
&self,
key: &[u8],
) -> Result<Option<Vec<u8>>, DynamoDbStoreInternalError> {
check_key_size(key)?;
let key_db = build_key(&self.start_key, key.to_vec());
self.read_value_bytes_general(key_db).await
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, DynamoDbStoreInternalError> {
check_key_size(key)?;
let key_db = build_key(&self.start_key, key.to_vec());
self.contains_key_general(key_db).await
}
async fn contains_keys(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<bool>, DynamoDbStoreInternalError> {
let mut handles = Vec::new();
for key in keys {
check_key_size(key)?;
let key_db = build_key(&self.start_key, key.clone());
let handle = self.contains_key_general(key_db);
handles.push(handle);
}
join_all(handles)
.await
.into_iter()
.collect::<Result<_, _>>()
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, DynamoDbStoreInternalError> {
if keys.is_empty() {
return Ok(Vec::new());
}
let handles = keys
.chunks(MAX_BATCH_GET_ITEM_SIZE)
.map(|key_batch| self.read_batch_values_bytes(key_batch));
let results: Vec<_> = join_all(handles)
.await
.into_iter()
.collect::<Result<_, _>>()?;
Ok(results.into_iter().flatten().collect())
}
async fn find_keys_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<Vec<u8>>, DynamoDbStoreInternalError> {
let result_queries = self
.get_list_responses(KEY_ATTRIBUTE, &self.start_key, key_prefix)
.await?;
result_queries
.keys()
.map(|key| key.map(|k| k.to_vec()))
.collect()
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, DynamoDbStoreInternalError> {
let result_queries = self
.get_list_responses(KEY_VALUE_ATTRIBUTE, &self.start_key, key_prefix)
.await?;
result_queries
.key_values()
.map(|entry| entry.map(|(key, value)| (key.to_vec(), value.to_vec())))
.collect()
}
}
impl DirectWritableKeyValueStore for DynamoDbStoreInternal {
const MAX_BATCH_SIZE: usize = MAX_TRANSACT_WRITE_ITEM_SIZE;
const MAX_BATCH_TOTAL_SIZE: usize = MAX_TRANSACT_WRITE_ITEM_TOTAL_SIZE;
const MAX_VALUE_SIZE: usize = VISIBLE_MAX_VALUE_SIZE;
// DynamoDB does not support the `DeletePrefix` operation.
type Batch = SimpleUnorderedBatch;
async fn write_batch(&self, batch: Self::Batch) -> Result<(), DynamoDbStoreInternalError> {
if !self.root_key_written.fetch_or(true, Ordering::SeqCst) {
let mut builder = TransactionBuilder::new(PARTITION_KEY_ROOT_KEY);
builder.insert_put_request(self.start_key.clone(), vec![], self)?;
self.client
.transact_write_items()
.set_transact_items(Some(builder.transactions))
.send()
.boxed_sync()
.await?;
}
let mut builder = TransactionBuilder::new(&self.start_key);
for key in batch.deletions {
builder.insert_delete_request(key, self)?;
}
for (key, value) in batch.insertions {
builder.insert_put_request(key, value, self)?;
}
if !builder.transactions.is_empty() {
let _guard = self.acquire().await;
self.client
.transact_write_items()
.set_transact_items(Some(builder.transactions))
.send()
.boxed_sync()
.await?;
}
Ok(())
}
}
/// Error when validating a namespace
#[derive(Debug, Error)]
pub enum InvalidNamespace {
/// The namespace should be at least 3 characters.
#[error("Namespace must have at least 3 characters")]
TooShort,
/// The namespace should be at most 63 characters.
#[error("Namespace must be at most 63 characters")]
TooLong,
/// allowed characters are lowercase letters, numbers, periods and hyphens
#[error("Namespace must only contain lowercase letters, numbers, periods and hyphens")]
InvalidCharacter,
}
/// Errors that occur when using [`DynamoDbStoreInternal`].
#[derive(Debug, Error)]
pub enum DynamoDbStoreInternalError {
/// An error occurred while getting the item.
#[error(transparent)]
Get(#[from] Box<SdkError<GetItemError>>),
/// An error occurred while batch getting items.
#[error(transparent)]
BatchGet(#[from] Box<SdkError<BatchGetItemError>>),
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/backends/indexed_db.rs | linera-views/src/backends/indexed_db.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Implements [`crate::store::KeyValueStore`] for the IndexedDB Web database.
use std::rc::Rc;
use futures::future;
use indexed_db_futures::{js_sys, prelude::*, web_sys};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use crate::{
batch::{Batch, WriteOperation},
common::get_upper_bound_option,
store::{
KeyValueDatabase, KeyValueStoreError, ReadableKeyValueStore, WithError,
WritableKeyValueStore,
},
};
/// The initial configuration of the system
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IndexedDbStoreConfig {
/// Preferred buffer size for async streams.
pub max_stream_queries: usize,
}
/// The prefixes being used in the system
static ROOT_KEY_DOMAIN: [u8; 1] = [0];
static STORED_ROOT_KEYS_PREFIX: [u8; 1] = [1];
/// The number of streams for the test
pub const TEST_INDEX_DB_MAX_STREAM_QUERIES: usize = 10;
const DATABASE_NAME: &str = "linera";
/// A browser implementation of a key-value store using the [IndexedDB
/// API](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API#:~:text=IndexedDB%20is%20a%20low%2Dlevel,larger%20amounts%20of%20structured%20data.).
pub struct IndexedDbDatabase {
/// The database used for storing the data.
pub database: Rc<IdbDatabase>,
/// The object store name used for storing the data.
pub object_store_name: String,
/// The maximum number of queries used for the stream.
pub max_stream_queries: usize,
}
/// A logical partition of [`IndexedDbDatabase`]
pub struct IndexedDbStore {
/// The database used for storing the data.
pub database: Rc<IdbDatabase>,
/// The object store name used for storing the data.
pub object_store_name: String,
/// The maximum number of queries used for the stream.
pub max_stream_queries: usize,
/// The key being used at the start of the writing
start_key: Vec<u8>,
}
impl IndexedDbStore {
fn with_object_store<R>(
&self,
f: impl FnOnce(IdbObjectStore) -> R,
) -> Result<R, IndexedDbStoreError> {
let transaction = self.database.transaction_on_one(&self.object_store_name)?;
let object_store = transaction.object_store(&self.object_store_name)?;
Ok(f(object_store))
}
fn full_key(&self, key: &[u8]) -> Vec<u8> {
let mut full_key = self.start_key.clone();
full_key.extend(key);
full_key
}
}
impl IndexedDbDatabase {
fn open_internal(&self, start_key: Vec<u8>) -> IndexedDbStore {
let database = self.database.clone();
let object_store_name = self.object_store_name.clone();
let max_stream_queries = self.max_stream_queries;
IndexedDbStore {
database,
object_store_name,
max_stream_queries,
start_key,
}
}
}
fn prefix_to_range(prefix: &[u8]) -> Result<web_sys::IdbKeyRange, wasm_bindgen::JsValue> {
let lower = js_sys::Uint8Array::from(prefix);
if let Some(upper) = get_upper_bound_option(prefix) {
let upper = js_sys::Uint8Array::from(&upper[..]);
web_sys::IdbKeyRange::bound_with_lower_open_and_upper_open(
&lower.into(),
&upper.into(),
false,
true,
)
} else {
web_sys::IdbKeyRange::lower_bound(&lower.into())
}
}
impl WithError for IndexedDbStore {
type Error = IndexedDbStoreError;
}
impl WithError for IndexedDbDatabase {
type Error = IndexedDbStoreError;
}
impl ReadableKeyValueStore for IndexedDbStore {
const MAX_KEY_SIZE: usize = usize::MAX;
fn max_stream_queries(&self) -> usize {
self.max_stream_queries
}
fn root_key(&self) -> Result<Vec<u8>, IndexedDbStoreError> {
assert!(self.start_key.starts_with(&ROOT_KEY_DOMAIN));
let root_key = bcs::from_bytes(&self.start_key[ROOT_KEY_DOMAIN.len()..])?;
Ok(root_key)
}
async fn read_value_bytes(&self, key: &[u8]) -> Result<Option<Vec<u8>>, IndexedDbStoreError> {
let key = self.full_key(key);
let key = js_sys::Uint8Array::from(key.as_slice());
let value = self.with_object_store(|o| o.get(&key))??.await?;
Ok(value.map(|v| js_sys::Uint8Array::new(&v).to_vec()))
}
async fn contains_key(&self, key: &[u8]) -> Result<bool, IndexedDbStoreError> {
let key = self.full_key(key);
let key = js_sys::Uint8Array::from(key.as_slice());
let count = self.with_object_store(|o| o.count_with_key(&key))??.await?;
assert!(count < 2);
Ok(count == 1)
}
async fn contains_keys(&self, keys: &[Vec<u8>]) -> Result<Vec<bool>, IndexedDbStoreError> {
future::try_join_all(
keys.iter()
.map(|key| async move { self.contains_key(key).await }),
)
.await
}
async fn read_multi_values_bytes(
&self,
keys: &[Vec<u8>],
) -> Result<Vec<Option<Vec<u8>>>, IndexedDbStoreError> {
future::try_join_all(
keys.iter()
.map(|key| async move { self.read_value_bytes(key).await }),
)
.await
}
async fn find_keys_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<Vec<u8>>, IndexedDbStoreError> {
let key_prefix = self.full_key(key_prefix);
let range = prefix_to_range(&key_prefix)?;
Ok(self
.with_object_store(|o| o.get_all_keys_with_key(&range))??
.await?
.into_iter()
.map(|key| {
let key = js_sys::Uint8Array::new(&key);
key.subarray(key_prefix.len() as u32, key.length()).to_vec()
})
.collect())
}
async fn find_key_values_by_prefix(
&self,
key_prefix: &[u8],
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, IndexedDbStoreError> {
let mut key_values = vec![];
let key_prefix = self.full_key(key_prefix);
let range = prefix_to_range(&key_prefix)?;
let transaction = self.database.transaction_on_one(&self.object_store_name)?;
let object_store = transaction.object_store(&self.object_store_name)?;
let Some(cursor) = object_store.open_cursor_with_range_owned(range)?.await? else {
return Ok(key_values);
};
loop {
let Some(key) = cursor.primary_key() else {
break;
};
let key = js_sys::Uint8Array::new(&key);
key_values.push((
key.subarray(key_prefix.len() as u32, key.length()).to_vec(),
js_sys::Uint8Array::new(&cursor.value()).to_vec(),
));
if !cursor.continue_cursor()?.await? {
break;
}
}
Ok(key_values)
}
}
impl WritableKeyValueStore for IndexedDbStore {
const MAX_VALUE_SIZE: usize = usize::MAX;
async fn write_batch(&self, batch: Batch) -> Result<(), IndexedDbStoreError> {
let transaction = self
.database
.transaction_on_one_with_mode(&self.object_store_name, IdbTransactionMode::Readwrite)?;
let object_store = transaction.object_store(&self.object_store_name)?;
for ent in batch.operations {
match ent {
WriteOperation::Put { key, value } => {
let key = self.full_key(&key);
object_store
.put_key_val_owned(
js_sys::Uint8Array::from(&key[..]),
&js_sys::Uint8Array::from(&value[..]),
)?
.await?;
}
WriteOperation::Delete { key } => {
let key = self.full_key(&key);
object_store
.delete_owned(js_sys::Uint8Array::from(&key[..]))?
.await?;
}
WriteOperation::DeletePrefix { key_prefix } => {
let key_prefix = self.full_key(&key_prefix);
object_store
.delete_owned(prefix_to_range(&key_prefix[..])?)?
.await?;
}
}
}
let mut key = self.start_key.clone();
key[0] = STORED_ROOT_KEYS_PREFIX[0];
object_store
.put_key_val_owned(
js_sys::Uint8Array::from(&key[..]),
&js_sys::Uint8Array::default(),
)?
.await?;
Ok(())
}
async fn clear_journal(&self) -> Result<(), IndexedDbStoreError> {
Ok(())
}
}
impl KeyValueDatabase for IndexedDbDatabase {
type Config = IndexedDbStoreConfig;
type Store = IndexedDbStore;
fn get_name() -> String {
"indexed db".to_string()
}
async fn connect(config: &Self::Config, namespace: &str) -> Result<Self, IndexedDbStoreError> {
let namespace = namespace.to_string();
let object_store_name = namespace.clone();
let mut database = IdbDatabase::open(DATABASE_NAME)?.await?;
if !database.object_store_names().any(|n| n == namespace) {
let version = database.version();
database.close();
let mut db_req = IdbDatabase::open_f64(DATABASE_NAME, version + 1.0)?;
db_req.set_on_upgrade_needed(Some(move |event: &IdbVersionChangeEvent| {
event.db().create_object_store(&namespace)?;
Ok(())
}));
database = db_req.await?;
}
let database = Rc::new(database);
Ok(Self {
database,
object_store_name,
max_stream_queries: config.max_stream_queries,
})
}
fn open_shared(&self, root_key: &[u8]) -> Result<Self::Store, IndexedDbStoreError> {
let mut start_key = ROOT_KEY_DOMAIN.to_vec();
start_key.extend(bcs::to_bytes(&root_key)?);
Ok(self.open_internal(start_key))
}
fn open_exclusive(&self, root_key: &[u8]) -> Result<Self::Store, IndexedDbStoreError> {
self.open_shared(root_key)
}
async fn list_all(config: &Self::Config) -> Result<Vec<String>, IndexedDbStoreError> {
Ok(Self::connect(config, "")
.await?
.database
.object_store_names()
.collect())
}
async fn list_root_keys(&self) -> Result<Vec<Vec<u8>>, IndexedDbStoreError> {
let start_key = STORED_ROOT_KEYS_PREFIX.to_vec();
let store = self.open_internal(start_key);
store.find_keys_by_prefix(&[]).await
}
async fn exists(config: &Self::Config, namespace: &str) -> Result<bool, IndexedDbStoreError> {
Ok(Self::connect(config, "")
.await?
.database
.object_store_names()
.any(|x| x == namespace))
}
async fn create(config: &Self::Config, namespace: &str) -> Result<(), IndexedDbStoreError> {
Self::connect(config, "")
.await?
.database
.create_object_store(namespace)?;
Ok(())
}
async fn delete(config: &Self::Config, namespace: &str) -> Result<(), IndexedDbStoreError> {
Ok(Self::connect(config, "")
.await?
.database
.delete_object_store(namespace)?)
}
}
#[cfg(with_testing)]
mod testing {
use super::*;
use crate::random::generate_test_namespace;
/// Creates a test IndexedDB client for working.
pub async fn create_indexed_db_store_stream_queries(
max_stream_queries: usize,
) -> IndexedDbStore {
let config = IndexedDbStoreConfig { max_stream_queries };
let namespace = generate_test_namespace();
let database = IndexedDbDatabase::connect(&config, &namespace)
.await
.unwrap();
database.open_shared(&[]).unwrap()
}
/// Creates a test IndexedDB store for working.
#[cfg(with_testing)]
pub async fn create_indexed_db_test_store() -> IndexedDbStore {
create_indexed_db_store_stream_queries(TEST_INDEX_DB_MAX_STREAM_QUERIES).await
}
}
#[cfg(with_testing)]
pub use testing::*;
/// The error type for [`IndexedDbStore`].
#[derive(Error, Debug)]
pub enum IndexedDbStoreError {
/// Serialization error with BCS.
#[error(transparent)]
BcsError(#[from] bcs::Error),
/// A DOM exception occurred in the IndexedDB operations
#[error("DOM exception: {0:?}")]
Dom(gloo_utils::errors::JsError),
/// JavaScript threw an exception whilst handling IndexedDB operations
#[error("JavaScript exception: {0:?}")]
Js(gloo_utils::errors::JsError),
}
impl From<web_sys::DomException> for IndexedDbStoreError {
fn from(dom_exception: web_sys::DomException) -> Self {
let value: &wasm_bindgen::JsValue = dom_exception.as_ref();
Self::Dom(value.clone().try_into().unwrap())
}
}
impl From<wasm_bindgen::JsValue> for IndexedDbStoreError {
fn from(js_value: wasm_bindgen::JsValue) -> Self {
Self::Js(js_value.try_into().unwrap())
}
}
impl KeyValueStoreError for IndexedDbStoreError {
const BACKEND: &'static str = "indexed_db";
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/test_utils/mod.rs | linera-views/src/test_utils/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
pub mod test_views;
/// Functions for computing the performance of stores.
#[cfg(not(target_arch = "wasm32"))]
pub mod performance;
use std::collections::{BTreeMap, BTreeSet, HashSet};
use rand::{seq::SliceRandom, Rng};
use crate::{
batch::{
Batch, WriteOperation,
WriteOperation::{Delete, Put},
},
random::{generate_test_namespace, make_deterministic_rng, make_nondeterministic_rng},
store::{
KeyValueDatabase, KeyValueStore, ReadableKeyValueStore, TestKeyValueDatabase,
WritableKeyValueStore,
},
};
/// The size of the small value used for tests.
pub const SMALL_BYTE_UPPER_LIMIT: u8 = 3;
/// Returns a random key prefix used for tests
pub fn get_random_key_prefix() -> Vec<u8> {
let mut key_prefix = vec![0];
let value: usize = make_nondeterministic_rng().gen();
bcs::serialize_into(&mut key_prefix, &value).unwrap();
key_prefix
}
fn get_random_byte_vector_with_byte_upper_limit<R: Rng>(
rng: &mut R,
key_prefix: &[u8],
n: usize,
byte_upper_limit: u8,
) -> Vec<u8> {
let mut v = key_prefix.to_vec();
for _ in 0..n {
let val = rng.gen_range(0..=byte_upper_limit);
v.push(val);
}
v
}
fn get_small_key_space<R: Rng>(rng: &mut R, key_prefix: &[u8], n: usize) -> Vec<u8> {
get_random_byte_vector_with_byte_upper_limit(rng, key_prefix, n, SMALL_BYTE_UPPER_LIMIT)
}
/// Takes a random number generator, a `key_prefix` and extends it by n random bytes.
pub fn get_random_byte_vector<R: Rng>(rng: &mut R, key_prefix: &[u8], n: usize) -> Vec<u8> {
get_random_byte_vector_with_byte_upper_limit(rng, key_prefix, n, u8::MAX)
}
/// Builds a random k element subset of n
pub fn get_random_kset<R: Rng>(rng: &mut R, n: usize, k: usize) -> Vec<usize> {
let mut values = Vec::new();
for u in 0..n {
values.push(u);
}
values.shuffle(rng);
values[..k].to_vec()
}
/// Takes a random number generator, a `key_prefix` and generates
/// pairs `(key, value)` with key obtained by appending `len_key` random bytes to `key_prefix`
/// and value obtained by creating a vector with `len_value` random bytes.
/// We return n such `(key, value)` pairs which are all distinct.
pub fn get_random_key_values_prefix<R: Rng>(
rng: &mut R,
key_prefix: Vec<u8>,
len_key: usize,
len_value: usize,
num_entries: usize,
key_byte_upper_limit: u8,
) -> Vec<(Vec<u8>, Vec<u8>)> {
let mut key_value_pairs = Vec::new();
let mut unique_keys = HashSet::new();
for _ in 0..num_entries {
let key = loop {
let key = get_random_byte_vector_with_byte_upper_limit(
rng,
&key_prefix,
len_key,
key_byte_upper_limit,
);
if !unique_keys.contains(&key) {
unique_keys.insert(key.clone());
break key;
}
};
let value = get_random_byte_vector(rng, &Vec::new(), len_value);
key_value_pairs.push((key, value));
}
key_value_pairs
}
/// Takes a random number generator `rng`, a number n and returns n random `(key, value)`
/// which are all distinct with key and value being of length 8.
pub fn get_random_key_values<R: Rng>(rng: &mut R, num_entries: usize) -> Vec<(Vec<u8>, Vec<u8>)> {
get_random_key_values_prefix(rng, Vec::new(), 8, 8, num_entries, u8::MAX)
}
type VectorPutDelete = (Vec<(Vec<u8>, Vec<u8>)>, usize);
/// A bunch of puts and some deletes.
pub fn get_random_key_value_operations<R: Rng>(
rng: &mut R,
num_entries: usize,
k: usize,
) -> VectorPutDelete {
(get_random_key_values(rng, num_entries), k)
}
/// A random reordering of the puts and deletes.
/// For something like `MapView` it should get us the same result whatever way we are calling.
pub fn span_random_reordering_put_delete<R: Rng>(
rng: &mut R,
info_op: VectorPutDelete,
) -> Vec<WriteOperation> {
let n = info_op.0.len();
let k = info_op.1;
let mut indices = Vec::new();
for i in 0..n {
indices.push(i);
}
indices.shuffle(rng);
let mut indices_rev = vec![0; n];
for i in 0..n {
indices_rev[indices[i]] = i;
}
let mut pos_remove_vector = vec![Vec::new(); n];
for (i, pos) in indices_rev.iter().enumerate().take(k) {
let idx = rng.gen_range(*pos..n);
pos_remove_vector[idx].push(i);
}
let mut operations = Vec::new();
for i in 0..n {
let pos = indices[i];
let pair = info_op.0[pos].clone();
operations.push(Put {
key: pair.0,
value: pair.1,
});
for pos_remove in pos_remove_vector[i].clone() {
let key = info_op.0[pos_remove].0.clone();
operations.push(Delete { key });
}
}
operations
}
/// This test starts with a collection of key/values being inserted into the code
/// which is then followed by a number of reading tests. The functionalities being
/// tested are all the reading functionalities:
/// * `read_value_bytes`
/// * `read_multi_values_bytes`
/// * `find_keys_by_prefix` / `find_key_values_by_prefix`
/// * The ordering of keys returned by `find_keys_by_prefix` and `find_key_values_by_prefix`
pub async fn run_reads<S: KeyValueStore>(store: S, key_values: Vec<(Vec<u8>, Vec<u8>)>) {
// We need a nontrivial key_prefix because dynamo requires a non-trivial prefix
let mut batch = Batch::new();
let mut keys = Vec::new();
let mut set_keys = HashSet::new();
for (key, value) in &key_values {
keys.push(&key[..]);
set_keys.insert(&key[..]);
batch.put_key_value_bytes(key.clone(), value.clone());
}
store.write_batch(batch).await.unwrap();
for key_prefix in keys
.iter()
.flat_map(|key| (0..=key.len()).map(|u| &key[..u]))
{
// Getting the find_keys_by_prefix / find_key_values_by_prefix
let len_prefix = key_prefix.len();
let keys_request = store.find_keys_by_prefix(key_prefix).await.unwrap();
let mut set_key_value1 = HashSet::new();
let mut keys_request_deriv = Vec::new();
let key_values_by_prefix = store.find_key_values_by_prefix(key_prefix).await.unwrap();
for (key, value) in key_values_by_prefix {
keys_request_deriv.push(key.clone());
set_key_value1.insert((key, value));
}
// Check find_keys / find_key_values
assert_eq!(keys_request, keys_request_deriv);
// Check key ordering
for i in 1..keys_request.len() {
assert!(keys_request[i - 1] < keys_request[i]);
}
// Check the obtained values
let mut set_key_value2 = HashSet::new();
for (key, value) in &key_values {
if key.starts_with(key_prefix) {
set_key_value2.insert((key[len_prefix..].to_vec(), value[..].to_vec()));
}
}
assert_eq!(set_key_value1, set_key_value2);
}
// Now checking the read_multi_values_bytes
let mut rng = make_deterministic_rng();
for _ in 0..3 {
let mut keys = Vec::new();
let mut values = Vec::new();
for (key, value) in &key_values {
if rng.gen() {
// Put a key that is already present
keys.push(key.clone());
values.push(Some(value.clone()));
} else {
// Put a missing key
let len = key.len();
let pos = rng.gen_range(0..len);
let byte = *key.get(pos).unwrap();
let new_byte: u8 = if byte < 255 { byte + 1 } else { byte - 1 };
let mut new_key = key.clone();
*new_key.get_mut(pos).unwrap() = new_byte;
if !set_keys.contains(&*new_key) {
keys.push(new_key);
values.push(None);
}
}
}
let mut test_exists = Vec::new();
let mut values_single_read = Vec::new();
for key in &keys {
test_exists.push(store.contains_key(key).await.unwrap());
values_single_read.push(store.read_value_bytes(key).await.unwrap());
}
let test_exists_direct = store.contains_keys(&keys).await.unwrap();
let values_read = store.read_multi_values_bytes(&keys).await.unwrap();
assert_eq!(values, values_read);
assert_eq!(values, values_single_read);
let values_read_stat = values_read.iter().map(|x| x.is_some()).collect::<Vec<_>>();
assert_eq!(values_read_stat, test_exists);
assert_eq!(values_read_stat, test_exists_direct);
}
}
/// Generates a list of random key-values with no duplicates
pub fn get_random_key_values_with_sizes(
num_entries: usize,
len_key: usize,
len_value: usize,
) -> Vec<(Vec<u8>, Vec<u8>)> {
let key_prefix = vec![0];
let mut rng = make_deterministic_rng();
get_random_key_values_prefix(
&mut rng,
key_prefix,
len_key,
len_value,
num_entries,
u8::MAX,
)
}
fn get_random_key_values_with_small_keys(
num_entries: usize,
len_key: usize,
len_value: usize,
) -> Vec<(Vec<u8>, Vec<u8>)> {
let key_prefix = vec![0];
let mut rng = make_deterministic_rng();
get_random_key_values_prefix(
&mut rng,
key_prefix,
len_key,
len_value,
num_entries,
SMALL_BYTE_UPPER_LIMIT,
)
}
/// Adds a prefix to a list of key-values
pub fn add_prefix(prefix: &[u8], key_values: Vec<(Vec<u8>, Vec<u8>)>) -> Vec<(Vec<u8>, Vec<u8>)> {
key_values
.into_iter()
.map(|(key, value)| {
let mut big_key = prefix.to_vec();
big_key.extend(key);
(big_key, value)
})
.collect()
}
/// We build a number of scenarios for testing the reads.
pub fn get_random_test_scenarios() -> Vec<Vec<(Vec<u8>, Vec<u8>)>> {
vec![
get_random_key_values_with_sizes(7, 8, 3),
get_random_key_values_with_sizes(150, 8, 3),
get_random_key_values_with_sizes(30, 8, 10),
get_random_key_values_with_small_keys(30, 4, 10),
get_random_key_values_with_small_keys(30, 4, 100),
]
}
fn generate_random_batch<R: Rng>(rng: &mut R, key_prefix: &[u8], batch_size: usize) -> Batch {
let mut batch = Batch::new();
// Fully random batch
for _ in 0..batch_size {
let choice = rng.gen_range(0..8);
// Inserting a key
if choice < 6 {
// Insert
let key = get_small_key_space(rng, key_prefix, 4);
let len_value = rng.gen_range(0..10); // Could need to be split
let value = get_random_byte_vector(rng, &[], len_value);
batch.put_key_value_bytes(key.clone(), value.clone());
}
if choice == 6 {
// key might be missing, no matter, it has to work
let key = get_small_key_space(rng, key_prefix, 4);
batch.delete_key(key);
}
if choice == 7 {
let len = rng.gen_range(1..4); // We want a non-trivial range
let delete_key_prefix = get_small_key_space(rng, key_prefix, len);
batch.delete_key_prefix(delete_key_prefix.clone());
}
}
batch
}
fn get_key(key_prefix: &[u8], key_suffix: Vec<u8>) -> Vec<u8> {
let mut key = key_prefix.to_vec();
key.extend(key_suffix);
key
}
fn generate_specific_batch(key_prefix: &[u8], option: usize) -> Batch {
let mut batch = Batch::new();
if option == 0 {
let key = get_key(key_prefix, vec![34]);
batch.put_key_value_bytes(key.clone(), vec![]);
batch.delete_key(key);
}
if option == 1 {
let key1 = get_key(key_prefix, vec![12, 34]);
let key2 = get_key(key_prefix, vec![12, 33]);
let key3 = get_key(key_prefix, vec![13]);
batch.put_key_value_bytes(key1.clone(), vec![]);
batch.put_key_value_bytes(key2, vec![]);
batch.put_key_value_bytes(key3, vec![]);
batch.delete_key(key1);
let key_prefix = get_key(key_prefix, vec![12]);
batch.delete_key_prefix(key_prefix);
}
batch
}
fn update_state_from_batch(kv_state: &mut BTreeMap<Vec<u8>, Vec<u8>>, batch: &Batch) {
for operation in &batch.operations {
match operation {
WriteOperation::Put { key, value } => {
kv_state.insert(key.to_vec(), value.to_vec());
}
WriteOperation::Delete { key } => {
kv_state.remove(key);
}
WriteOperation::DeletePrefix { key_prefix } => {
kv_state.retain(|key, _| !key.starts_with(key_prefix));
}
}
}
}
fn realize_batch(batch: &Batch) -> BTreeMap<Vec<u8>, Vec<u8>> {
let mut kv_state = BTreeMap::new();
update_state_from_batch(&mut kv_state, batch);
kv_state
}
async fn read_keys_prefix<C: KeyValueStore>(
key_value_store: &C,
key_prefix: &[u8],
) -> BTreeSet<Vec<u8>> {
let mut keys = BTreeSet::new();
for key_suffix in key_value_store
.find_keys_by_prefix(key_prefix)
.await
.unwrap()
{
let mut key = key_prefix.to_vec();
key.extend(key_suffix);
keys.insert(key);
}
keys
}
async fn read_key_values_prefix<C: KeyValueStore>(
key_value_store: &C,
key_prefix: &[u8],
) -> BTreeMap<Vec<u8>, Vec<u8>> {
let mut key_values = BTreeMap::new();
for key_value in key_value_store
.find_key_values_by_prefix(key_prefix)
.await
.unwrap()
{
let (key_suffix, value) = key_value;
let mut key = key_prefix.to_vec();
key.extend(key_suffix);
key_values.insert(key, value.to_vec());
}
key_values
}
/// Writes and then reads data under a prefix, and verifies the result.
pub async fn run_test_batch_from_blank<C: KeyValueStore>(
key_value_store: &C,
key_prefix: Vec<u8>,
batch: Batch,
) {
let kv_state = realize_batch(&batch);
key_value_store.write_batch(batch).await.unwrap();
// Checking the consistency
let key_values = read_key_values_prefix(key_value_store, &key_prefix).await;
assert_eq!(key_values, kv_state);
}
/// Run many operations on batches always starting from a blank state.
pub async fn run_writes_from_blank<C: KeyValueStore>(key_value_store: &C) {
let mut rng = make_deterministic_rng();
let n_oper = 10;
let batch_size = 500;
// key space has size 4^4 = 256 so we necessarily encounter collisions
// because the number of generated keys is about batch_size * n_oper = 800 > 256.
for _ in 0..n_oper {
let key_prefix = get_random_key_prefix();
let batch = generate_random_batch(&mut rng, &key_prefix, batch_size);
run_test_batch_from_blank(key_value_store, key_prefix, batch).await;
}
for option in 0..2 {
let key_prefix = get_random_key_prefix();
let batch = generate_specific_batch(&key_prefix, option);
run_test_batch_from_blank(key_value_store, key_prefix, batch).await;
}
}
/// Reading many keys at a time could trigger an error. This needs to be tested.
pub async fn big_read_multi_values<D>(config: D::Config, value_size: usize, n_entries: usize)
where
D: KeyValueDatabase,
D::Store: KeyValueStore,
{
let mut rng = make_deterministic_rng();
let namespace = generate_test_namespace();
let store = D::connect(&config, &namespace).await.unwrap();
let store = store.open_exclusive(&[]).unwrap();
let key_prefix = vec![42, 54];
let mut batch = Batch::new();
let mut keys = Vec::new();
let mut values = Vec::new();
for i in 0..n_entries {
let mut key = key_prefix.clone();
bcs::serialize_into(&mut key, &i).unwrap();
let value = get_random_byte_vector(&mut rng, &[], value_size);
batch.put_key_value_bytes(key.clone(), value.clone());
keys.push(key);
values.push(Some(value));
}
store.write_batch(batch).await.unwrap();
// We reconnect so that the read is not using the cache.
let store = D::connect(&config, &namespace).await.unwrap();
let store = store.open_exclusive(&[]).unwrap();
let values_read = store.read_multi_values_bytes(&keys).await.unwrap();
assert_eq!(values, values_read);
}
/// That test is especially challenging for ScyllaDB.
/// In its default settings, Scylla has a limitation to 10000 tombstones.
/// A tombstone is an indication that the data has been deleted. That
/// is thus a trie data structure for checking whether a requested key
/// is deleted or not.
///
/// In this test we insert 200000 keys into the database.
/// Then we select half of them at random and delete them. By the random
/// selection, Scylla is forced to introduce around 100000 tombstones
/// which triggers the crash with the default settings.
pub async fn tombstone_triggering_test<C: KeyValueStore>(key_value_store: C) {
use linera_base::time::Instant;
let t1 = Instant::now();
let mut rng = make_deterministic_rng();
let value_size = 100;
let n_entry = 200000;
// Putting the keys
let mut batch_insert = Batch::new();
let key_prefix = vec![0];
let mut batch_delete = Batch::new();
let mut remaining_key_values = BTreeMap::new();
let mut remaining_keys = BTreeSet::new();
for i in 0..n_entry {
let mut key = key_prefix.clone();
bcs::serialize_into(&mut key, &i).unwrap();
let value = get_random_byte_vector(&mut rng, &[], value_size);
batch_insert.put_key_value_bytes(key.clone(), value.clone());
let to_delete = rng.gen::<bool>();
if to_delete {
batch_delete.delete_key(key);
} else {
remaining_keys.insert(key.clone());
remaining_key_values.insert(key, value);
}
}
tracing::info!("Set up in {} ms", t1.elapsed().as_millis());
let t1 = Instant::now();
run_test_batch_from_blank(&key_value_store, key_prefix.clone(), batch_insert).await;
tracing::info!("run_test_batch in {} ms", t1.elapsed().as_millis());
// Deleting them all
let t1 = Instant::now();
key_value_store.write_batch(batch_delete).await.unwrap();
tracing::info!("batch_delete in {} ms", t1.elapsed().as_millis());
for iter in 0..5 {
// Reading everything and seeing that it is now cleaned.
let t1 = Instant::now();
let key_values = read_key_values_prefix(&key_value_store, &key_prefix).await;
assert_eq!(key_values, remaining_key_values);
tracing::info!(
"iter={} read_key_values_prefix in {} ms",
iter,
t1.elapsed().as_millis()
);
let t1 = Instant::now();
let keys = read_keys_prefix(&key_value_store, &key_prefix).await;
assert_eq!(keys, remaining_keys);
tracing::info!(
"iter={} read_keys_prefix after {} ms",
iter,
t1.elapsed().as_millis()
);
}
}
/// DynamoDB has limits at 1 MB (for pagination), 4 MB (for write)
/// Let us go right past them at 20 MB of data with writing and then
/// reading it. And 20 MB is not huge by any mean. All `KeyValueStore`
/// must handle that.
///
/// The size of the value vary as each size has its own issues.
pub async fn run_big_write_read<C: KeyValueStore>(
key_value_store: C,
target_size: usize,
value_sizes: Vec<usize>,
) {
let mut rng = make_deterministic_rng();
for (pos, value_size) in value_sizes.into_iter().enumerate() {
let n_entry: usize = target_size / value_size;
let mut batch = Batch::new();
let key_prefix = vec![0, pos as u8];
for i in 0..n_entry {
let mut key = key_prefix.clone();
bcs::serialize_into(&mut key, &i).unwrap();
let value = get_random_byte_vector(&mut rng, &[], value_size);
batch.put_key_value_bytes(key, value);
}
run_test_batch_from_blank(&key_value_store, key_prefix, batch).await;
}
}
type StateBatch = (Vec<(Vec<u8>, Vec<u8>)>, Batch);
async fn run_test_batch_from_state<C: KeyValueStore>(
key_value_store: &C,
key_prefix: Vec<u8>,
state_and_batch: StateBatch,
) {
let (key_values, batch) = state_and_batch;
let mut batch_insert = Batch::new();
let mut kv_state = BTreeMap::new();
for (key, value) in key_values {
kv_state.insert(key.clone(), value.clone());
batch_insert.put_key_value_bytes(key, value);
}
key_value_store.write_batch(batch_insert).await.unwrap();
let key_values = read_key_values_prefix(key_value_store, &key_prefix).await;
assert_eq!(key_values, kv_state);
update_state_from_batch(&mut kv_state, &batch);
key_value_store.write_batch(batch).await.unwrap();
let key_values = read_key_values_prefix(key_value_store, &key_prefix).await;
assert_eq!(key_values, kv_state);
}
fn generate_specific_state_batch(key_prefix: &[u8], option: usize) -> StateBatch {
let mut key_values = Vec::new();
let mut batch = Batch::new();
if option == 0 {
// A DeletePrefix followed by an insertion that matches the DeletePrefix
let key1 = get_key(key_prefix, vec![1, 3]);
let key2 = get_key(key_prefix, vec![1, 4]);
let key3 = get_key(key_prefix, vec![1, 4, 5]);
key_values.push((key1.clone(), vec![34]));
key_values.push((key2.clone(), vec![45]));
batch.delete_key_prefix(key2);
batch.put_key_value_bytes(key3, vec![23]);
}
if option == 1 {
// Just a DeletePrefix
let key1 = get_key(key_prefix, vec![1, 3]);
let key2 = get_key(key_prefix, vec![1, 4]);
key_values.push((key1.clone(), vec![34]));
key_values.push((key2.clone(), vec![45]));
batch.delete_key_prefix(key2);
}
if option == 2 {
// A Put followed by a DeletePrefix that matches the Put
let key1 = get_key(key_prefix, vec![1, 3]);
let key2 = get_key(key_prefix, vec![1, 4]);
let key3 = get_key(key_prefix, vec![1, 4, 5]);
key_values.push((key1.clone(), vec![34]));
key_values.push((key2.clone(), vec![45]));
batch.put_key_value_bytes(key3, vec![23]);
batch.delete_key_prefix(key2);
}
if option == 3 {
// A Put followed by a Delete on the same value
let key1 = get_key(key_prefix, vec![1, 3]);
let key2 = get_key(key_prefix, vec![1, 4]);
let key3 = get_key(key_prefix, vec![1, 4, 5]);
key_values.push((key1.clone(), vec![34]));
key_values.push((key2.clone(), vec![45]));
batch.put_key_value_bytes(key3.clone(), vec![23]);
batch.delete_key(key3);
}
if option == 4 {
// A Delete Key followed by a Put on the same key
let key1 = get_key(key_prefix, vec![1, 3]);
let key2 = get_key(key_prefix, vec![1, 4]);
let key3 = get_key(key_prefix, vec![1, 4, 5]);
key_values.push((key1.clone(), vec![34]));
key_values.push((key2.clone(), vec![45]));
batch.delete_key(key3.clone());
batch.put_key_value_bytes(key3, vec![23]);
}
if option == 5 {
// A Delete Key followed by a Put on the same key
let key1 = get_key(key_prefix, vec![1, 3]);
let key2 = get_key(key_prefix, vec![1, 4]);
let key3 = get_key(key_prefix, vec![1, 4, 5]);
let key4 = get_key(key_prefix, vec![1, 5]);
key_values.push((key1.clone(), vec![34]));
key_values.push((key2.clone(), vec![45]));
batch.delete_key(key3.clone());
batch.put_key_value_bytes(key4, vec![23]);
}
if option == 6 {
let key1 = get_key(key_prefix, vec![0]);
let key2 = get_key(key_prefix, vec![]);
key_values.push((key1, vec![33]));
batch.delete_key_prefix(key2);
}
if option == 7 {
let key1 = get_key(key_prefix, vec![255, 255]);
let key2 = get_key(key_prefix, vec![255, 255, 1]);
key_values.push((key2.clone(), vec![]));
batch.delete_key_prefix(key1);
batch.put_key_value_bytes(key2, vec![]);
}
(key_values, batch)
}
/// Run some deterministic and random batches operation and check their
/// correctness
pub async fn run_writes_from_state<C: KeyValueStore>(key_value_store: &C) {
for option in 0..8 {
let key_prefix = if option >= 6 {
vec![255, 255, 255]
} else {
get_random_key_prefix()
};
let state_batch = generate_specific_state_batch(&key_prefix, option);
run_test_batch_from_state(key_value_store, key_prefix, state_batch).await;
}
}
async fn namespaces_with_prefix<D: KeyValueDatabase>(
config: &D::Config,
prefix: &str,
) -> BTreeSet<String> {
let namespaces = D::list_all(config).await.expect("namespaces");
namespaces
.into_iter()
.filter(|x| x.starts_with(prefix))
.collect::<BTreeSet<_>>()
}
/// Exercises the namespace functionalities of the `KeyValueDatabase`.
/// This tests everything except the `delete_all` which would
/// interact with other namespaces.
pub async fn namespace_admin_test<D: TestKeyValueDatabase>() {
let config = D::new_test_config().await.expect("config");
{
let namespace = generate_test_namespace();
D::create(&config, &namespace)
.await
.expect("first creation of a namespace");
// Creating a namespace two times should returns an error
assert!(D::create(&config, &namespace).await.is_err());
}
let prefix = generate_test_namespace();
let namespaces = namespaces_with_prefix::<D>(&config, &prefix).await;
assert_eq!(namespaces.len(), 0);
let mut rng = make_deterministic_rng();
let size = 9;
// Creating the initial list of namespaces
let mut working_namespaces = BTreeSet::new();
for i in 0..size {
let namespace = format!("{}_{}", prefix, i);
assert!(!D::exists(&config, &namespace).await.expect("test"));
working_namespaces.insert(namespace);
}
// Creating the namespaces
for namespace in &working_namespaces {
D::create(&config, namespace)
.await
.expect("creation of a namespace");
assert!(D::exists(&config, namespace).await.expect("test"));
}
// Connecting to all of them at once
{
let mut connections = Vec::new();
for namespace in &working_namespaces {
let connection = D::connect(&config, namespace)
.await
.expect("a connection to the namespace");
connections.push(connection);
}
}
// Listing all of them
let namespaces = namespaces_with_prefix::<D>(&config, &prefix).await;
assert_eq!(namespaces, working_namespaces);
// Selecting at random some for deletion
let mut kept_namespaces = BTreeSet::new();
for namespace in working_namespaces {
let delete = rng.gen::<bool>();
if delete {
D::delete(&config, &namespace)
.await
.expect("A successful deletion");
assert!(!D::exists(&config, &namespace).await.expect("test"));
} else {
kept_namespaces.insert(namespace);
}
}
for namespace in &kept_namespaces {
assert!(D::exists(&config, namespace).await.expect("test"));
}
let namespaces = namespaces_with_prefix::<D>(&config, &prefix).await;
assert_eq!(namespaces, kept_namespaces);
for namespace in kept_namespaces {
D::delete(&config, &namespace)
.await
.expect("A successful deletion");
}
}
/// Tests listing the root keys.
pub async fn root_key_admin_test<D>()
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
{
let config = D::new_test_config().await.expect("config");
let namespace = generate_test_namespace();
let mut root_keys = Vec::new();
let mut keys = BTreeSet::new();
D::create(&config, &namespace).await.expect("creation");
let prefix = vec![0];
{
let size = 3;
let mut rng = make_deterministic_rng();
let database = D::connect(&config, &namespace).await.expect("store");
let shared_store = database.open_shared(&[]).expect("shared store");
root_keys.push(vec![]);
let mut batch = Batch::new();
for _ in 0..2 {
let key = get_random_byte_vector(&mut rng, &prefix, 4);
batch.put_key_value_bytes(key.clone(), vec![]);
keys.insert((vec![], key));
}
shared_store.write_batch(batch).await.expect("write batch");
for _ in 0..20 {
let root_key = get_random_byte_vector(&mut rng, &[], 4);
let exclusive_store = database.open_exclusive(&root_key).expect("exclusive store");
assert_eq!(exclusive_store.root_key().unwrap(), root_key);
root_keys.push(root_key.clone());
let size_select = rng.gen_range(0..size);
let mut batch = Batch::new();
for _ in 0..size_select {
let key = get_random_byte_vector(&mut rng, &prefix, 4);
batch.put_key_value_bytes(key.clone(), vec![]);
keys.insert((root_key.clone(), key));
}
exclusive_store
.write_batch(batch)
.await
.expect("write batch");
}
}
let read_root_keys = {
let database = D::connect(&config, &namespace).await.expect("store");
database.list_root_keys().await.expect("read_root_keys")
};
let set_root_keys = root_keys.iter().cloned().collect::<HashSet<_>>();
for read_root_key in &read_root_keys {
assert!(set_root_keys.contains(read_root_key));
}
let mut read_keys = BTreeSet::new();
for root_key in read_root_keys {
let store = D::connect(&config, &namespace)
.await
.expect("database")
.open_exclusive(&root_key)
.expect("store");
let keys = store.find_keys_by_prefix(&prefix).await.expect("keys");
for key in keys {
let mut big_key = prefix.clone();
let key = key.to_vec();
big_key.extend(key);
read_keys.insert((root_key.clone(), big_key));
}
}
assert_eq!(keys, read_keys);
// Checking prefix freeness of the (root_key, key). This is a
// common problem that needs to be tested.
let database = D::connect_test_namespace().await.expect("database");
let store1 = database.open_shared(&[2, 3, 4, 5]).expect("store1");
let mut batch = Batch::new();
batch.put_key_value_bytes(vec![6, 7], vec![123, 135]);
store1.write_batch(batch).await.expect("write_batch");
let store2 = database.open_shared(&[]).expect("store2");
let key_values = store2
.find_key_values_by_prefix(&[2])
.await
.expect("key_values");
assert_eq!(key_values.len(), 0);
}
/// A store can be in exclusive access where it stores the absence of values
/// or in shared access where only values are stored and (key, value) once
/// written are never modified nor erased.
///
/// In case of no exclusive access the following scenario is checked
/// * Store 1 deletes a key and does not mark it as missing in its cache.
/// * Store 2 writes the key
/// * Store 1 reads the key, but since it is not in the cache it can read
/// it correctly.
///
/// In case of exclusive access. We have the following scenario:
/// * Store 1 deletes a key and mark it as missing in its cache.
/// * Store 2 writes the key (it should not be doing it)
/// * Store 1 reads the key, see it as missing.
pub async fn exclusive_access_admin_test<D>(exclusive_access: bool)
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
{
let config = D::new_test_config().await.expect("config");
let namespace = generate_test_namespace();
D::create(&config, &namespace).await.expect("creation");
let key = vec![42];
let namespace = D::connect(&config, &namespace).await.expect("store");
let store1 = if exclusive_access {
namespace.open_exclusive(&[]).expect("store1")
} else {
namespace.open_shared(&[]).expect("store1")
};
let mut batch1 = Batch::new();
batch1.delete_key(key.clone());
store1.write_batch(batch1).await.expect("write batch1");
let store2 = if exclusive_access {
namespace.open_exclusive(&[]).expect("store2")
} else {
namespace.open_shared(&[]).expect("store2")
};
let mut batch2 = Batch::new();
batch2.put_key_value_bytes(key.clone(), vec![]);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/test_utils/performance.rs | linera-views/src/test_utils/performance.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::time::{Duration, Instant};
use crate::{
batch::Batch,
store::{
KeyValueStore, ReadableKeyValueStore as _, TestKeyValueDatabase, WritableKeyValueStore as _,
},
test_utils::{add_prefix, get_random_key_values_with_small_keys},
};
// We generate about 200 keys of length 10 with a value of length 10000
// The keys are of the form 0,x_1, ..., x_n with 0 <= x_i < 4 and n=10.
/// A value to use for the keys
const PREFIX: &[u8] = &[0];
/// A value to use for the keys
const PREFIX_SEARCH: &[u8] = &[0, 0];
/// The number of keys
const NUM_ENTRIES: usize = 200;
/// The number of inserted keys
const NUM_INSERT: usize = 70;
/// The length of the keys
const LEN_KEY: usize = 10;
/// The length of the values
const LEN_VALUE: usize = 10000;
async fn clear_store<S: KeyValueStore>(store: &S) {
let mut batch = Batch::new();
batch.delete_key_prefix(PREFIX.to_vec());
store.write_batch(batch).await.unwrap();
}
/// Benchmarks the `contains_key` operation.
pub async fn contains_key<D, F>(iterations: u64, f: F) -> Duration
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
F: Fn(bool) -> bool,
{
let namespace = D::connect_test_namespace().await.unwrap();
let store = namespace.open_shared(&[]).unwrap();
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let key_values = add_prefix(
PREFIX,
get_random_key_values_with_small_keys(NUM_ENTRIES, LEN_KEY, LEN_VALUE),
);
let mut batch = Batch::new();
for key_value in &key_values[..NUM_INSERT] {
batch.put_key_value_bytes(key_value.0.clone(), key_value.1.clone());
}
store.write_batch(batch).await.unwrap();
let measurement = Instant::now();
for key_value in &key_values {
f(store.contains_key(&key_value.0).await.unwrap());
}
total_time += measurement.elapsed();
clear_store(&store).await;
}
total_time
}
/// Benchmarks the `contains_keys` operation.
pub async fn contains_keys<D, F>(iterations: u64, f: F) -> Duration
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
F: Fn(Vec<bool>) -> Vec<bool>,
{
let namespace = D::connect_test_namespace().await.unwrap();
let store = namespace.open_shared(&[]).unwrap();
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let key_values = add_prefix(
PREFIX,
get_random_key_values_with_small_keys(NUM_ENTRIES, LEN_KEY, LEN_VALUE),
);
let mut batch = Batch::new();
for key_value in &key_values[..NUM_INSERT] {
batch.put_key_value_bytes(key_value.0.clone(), key_value.1.clone());
}
store.write_batch(batch).await.unwrap();
let keys = key_values
.into_iter()
.map(|(key, _)| key)
.collect::<Vec<_>>();
let measurement = Instant::now();
f(store.contains_keys(&keys).await.unwrap());
total_time += measurement.elapsed();
clear_store(&store).await;
}
total_time
}
/// Benchmarks the `find_keys_by_prefix` operation.
pub async fn find_keys_by_prefix<D, F>(iterations: u64, f: F) -> Duration
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
F: Fn(Vec<Vec<u8>>) -> Vec<Vec<u8>>,
{
let namespace = D::connect_test_namespace().await.unwrap();
let store = namespace.open_shared(&[]).unwrap();
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let key_values = add_prefix(
PREFIX,
get_random_key_values_with_small_keys(NUM_ENTRIES, LEN_KEY, LEN_VALUE),
);
let mut batch = Batch::new();
for key_value in &key_values {
batch.put_key_value_bytes(key_value.0.clone(), key_value.1.clone());
}
store.write_batch(batch).await.unwrap();
let measurement = Instant::now();
f(store.find_keys_by_prefix(PREFIX_SEARCH).await.unwrap());
total_time += measurement.elapsed();
clear_store(&store).await;
}
total_time
}
/// Benchmarks the `find_keys_by_prefix` operation.
pub async fn find_key_values_by_prefix<D, F>(iterations: u64, f: F) -> Duration
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
F: Fn(Vec<(Vec<u8>, Vec<u8>)>) -> Vec<(Vec<u8>, Vec<u8>)>,
{
let namespace = D::connect_test_namespace().await.unwrap();
let store = namespace.open_shared(&[]).unwrap();
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let key_values = add_prefix(
PREFIX,
get_random_key_values_with_small_keys(NUM_ENTRIES, LEN_KEY, LEN_VALUE),
);
let mut batch = Batch::new();
for key_value in &key_values {
batch.put_key_value_bytes(key_value.0.clone(), key_value.1.clone());
}
store.write_batch(batch).await.unwrap();
let measurement = Instant::now();
f(store
.find_key_values_by_prefix(PREFIX_SEARCH)
.await
.unwrap());
total_time += measurement.elapsed();
clear_store(&store).await;
}
total_time
}
/// Benchmarks the `read_value_bytes` operation.
pub async fn read_value_bytes<D, F>(iterations: u64, f: F) -> Duration
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
F: Fn(Option<Vec<u8>>) -> Option<Vec<u8>>,
{
let namespace = D::connect_test_namespace().await.unwrap();
let store = namespace.open_shared(&[]).unwrap();
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let key_values = add_prefix(
PREFIX,
get_random_key_values_with_small_keys(NUM_ENTRIES, LEN_KEY, LEN_VALUE),
);
let mut batch = Batch::new();
for key_value in &key_values {
batch.put_key_value_bytes(key_value.0.clone(), key_value.1.clone());
}
store.write_batch(batch).await.unwrap();
let measurement = Instant::now();
for (key, _) in &key_values {
f(store.read_value_bytes(key).await.unwrap());
}
total_time += measurement.elapsed();
clear_store(&store).await;
}
total_time
}
/// Benchmarks the `read_multi_values_bytes` operation.
pub async fn read_multi_values_bytes<D, F>(iterations: u64, f: F) -> Duration
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
F: Fn(Vec<Option<Vec<u8>>>) -> Vec<Option<Vec<u8>>>,
{
let namespace = D::connect_test_namespace().await.unwrap();
let store = namespace.open_shared(&[]).unwrap();
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let key_values = add_prefix(
PREFIX,
get_random_key_values_with_small_keys(NUM_ENTRIES, LEN_KEY, LEN_VALUE),
);
let mut batch = Batch::new();
for key_value in &key_values {
batch.put_key_value_bytes(key_value.0.clone(), key_value.1.clone());
}
store.write_batch(batch).await.unwrap();
let keys = key_values
.into_iter()
.map(|(key, _)| key)
.collect::<Vec<_>>();
let measurement = Instant::now();
f(store.read_multi_values_bytes(&keys).await.unwrap());
total_time += measurement.elapsed();
clear_store(&store).await;
}
total_time
}
/// Benchmarks the `write_batch` operation.
pub async fn write_batch<D>(iterations: u64) -> Duration
where
D: TestKeyValueDatabase,
D::Store: KeyValueStore,
{
let store = D::new_test_store().await.unwrap();
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let key_values = add_prefix(
PREFIX,
get_random_key_values_with_small_keys(NUM_ENTRIES, LEN_KEY, LEN_VALUE),
);
let mut batch = Batch::new();
for key_value in &key_values {
batch.put_key_value_bytes(key_value.0.clone(), key_value.1.clone());
}
let measurement = Instant::now();
store.write_batch(batch).await.unwrap();
total_time += measurement.elapsed();
clear_store(&store).await;
}
total_time
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/src/test_utils/test_views.rs | linera-views/src/test_utils/test_views.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Some [`View`][`crate::views::View`]s that are easy to use with test cases.
use std::{
collections::{HashMap, HashSet},
fmt::Debug,
};
use allocative::Allocative;
use crate::{
self as linera_views,
bucket_queue_view::BucketQueueView,
collection_view::CollectionView,
context::MemoryContext,
log_view::LogView,
map_view::MapView,
queue_view::QueueView,
register_view::RegisterView,
set_view::SetView,
views::{ClonableView, RootView},
ViewError,
};
/// A [`View`][`crate::views::View`] to be used in test cases.
pub trait TestView: RootView<Context = MemoryContext<()>> + ClonableView {
/// Representation of the view's state.
type State: Debug + Eq + Send;
/// Performs some initial changes to the view, staging them, and returning a representation of
/// the view's state.
async fn stage_initial_changes(&mut self) -> Result<Self::State, ViewError>;
/// Stages some changes to the view that won't be persisted during the test.
///
/// Assumes that the current view state is the initially staged changes. Returns the updated
/// state.
async fn stage_changes_to_be_discarded(&mut self) -> Result<Self::State, ViewError>;
/// Stages some changes to the view that will be persisted during the test.
///
/// Assumes that the current view state is the initially staged changes. Returns the updated
/// state.
async fn stage_changes_to_be_persisted(&mut self) -> Result<Self::State, ViewError>;
/// Reads the view's current state.
async fn read(&self) -> Result<Self::State, ViewError>;
}
/// Wrapper to test with a [`RegisterView`].
#[derive(RootView, ClonableView)]
pub struct TestRegisterView<C> {
byte: RegisterView<C, u8>,
}
impl TestView for TestRegisterView<MemoryContext<()>> {
type State = u8;
async fn stage_initial_changes(&mut self) -> Result<Self::State, ViewError> {
let dummy_value = 82;
self.byte.set(dummy_value);
Ok(dummy_value)
}
async fn stage_changes_to_be_discarded(&mut self) -> Result<Self::State, ViewError> {
let dummy_value = 209;
self.byte.set(dummy_value);
Ok(dummy_value)
}
async fn stage_changes_to_be_persisted(&mut self) -> Result<Self::State, ViewError> {
let dummy_value = 15;
self.byte.set(dummy_value);
Ok(dummy_value)
}
async fn read(&self) -> Result<Self::State, ViewError> {
Ok(*self.byte.get())
}
}
const INITIAL_LOG_QUEUE_VIEW_CHANGES: &[u16] = &[1, 2, 3, 4, 5];
/// Wrapper to test with a [`LogView`].
#[derive(RootView, ClonableView)]
pub struct TestLogView<C> {
log: LogView<C, u16>,
}
impl TestView for TestLogView<MemoryContext<()>> {
type State = Vec<u16>;
async fn stage_initial_changes(&mut self) -> Result<Self::State, ViewError> {
for value in INITIAL_LOG_QUEUE_VIEW_CHANGES {
self.log.push(*value);
}
Ok(INITIAL_LOG_QUEUE_VIEW_CHANGES.to_vec())
}
async fn stage_changes_to_be_discarded(&mut self) -> Result<Self::State, ViewError> {
let new_values = [10_000, 20_000, 30_000];
for value in new_values {
self.log.push(value);
}
Ok(INITIAL_LOG_QUEUE_VIEW_CHANGES
.iter()
.copied()
.chain(new_values)
.collect())
}
async fn stage_changes_to_be_persisted(&mut self) -> Result<Self::State, ViewError> {
let new_values = [201, 1, 50_050];
for value in new_values {
self.log.push(value);
}
Ok(INITIAL_LOG_QUEUE_VIEW_CHANGES
.iter()
.copied()
.chain(new_values)
.collect())
}
async fn read(&self) -> Result<Self::State, ViewError> {
self.log.read(..).await
}
}
const INITIAL_MAP_COLLECTION_VIEW_CHANGES: &[(i32, &str)] = &[
(0, "zero"),
(-1, "minus one"),
(2, "two"),
(-3, "minus three"),
(4, "four"),
(-5, "minus five"),
];
/// Wrapper to test with a [`MapView`].
#[derive(RootView, ClonableView)]
pub struct TestMapView<C> {
map: MapView<C, i32, String>,
}
impl TestView for TestMapView<MemoryContext<()>> {
type State = HashMap<i32, String>;
async fn stage_initial_changes(&mut self) -> Result<Self::State, ViewError> {
for (key, value) in INITIAL_MAP_COLLECTION_VIEW_CHANGES {
self.map.insert(key, value.to_string())?;
}
Ok(INITIAL_MAP_COLLECTION_VIEW_CHANGES
.iter()
.map(|(key, value)| (*key, value.to_string()))
.collect::<HashMap<_, _>>())
}
async fn stage_changes_to_be_discarded(&mut self) -> Result<Self::State, ViewError> {
let new_entries = [(-1_000_000, "foo"), (2_000_000, "bar")]
.into_iter()
.map(|(key, value)| (key, value.to_owned()));
let entries_to_remove = [0, -3];
for (key, value) in new_entries.clone() {
self.map.insert(&key, value)?;
}
for key in entries_to_remove {
self.map.remove(&key)?;
}
let new_state = INITIAL_MAP_COLLECTION_VIEW_CHANGES
.iter()
.filter(|(key, _)| !entries_to_remove.contains(key))
.map(|(key, value)| (*key, value.to_string()))
.chain(new_entries)
.collect();
Ok(new_state)
}
async fn stage_changes_to_be_persisted(&mut self) -> Result<Self::State, ViewError> {
let new_entries = [(1_234, "first new entry"), (-2_101_010, "second_new_entry")]
.into_iter()
.map(|(key, value)| (key, value.to_owned()));
let entries_to_remove = [-1, 2, 4];
for (key, value) in new_entries.clone() {
self.map.insert(&key, value)?;
}
for key in entries_to_remove {
self.map.remove(&key)?;
}
let new_state = INITIAL_MAP_COLLECTION_VIEW_CHANGES
.iter()
.filter(|(key, _)| !entries_to_remove.contains(key))
.map(|(key, value)| (*key, value.to_string()))
.chain(new_entries)
.collect();
Ok(new_state)
}
async fn read(&self) -> Result<Self::State, ViewError> {
let mut state = HashMap::new();
self.map
.for_each_index_value(|key, value| {
let value = value.into_owned();
state.insert(key, value);
Ok(())
})
.await?;
Ok(state)
}
}
/// Wrapper to test with a [`SetView`].
#[derive(RootView, ClonableView)]
pub struct TestSetView<C> {
set: SetView<C, i32>,
}
const INITIAL_SET_VIEW_CHANGES: &[i32] = &[0, -1, 2, -3, 4, -5];
impl TestView for TestSetView<MemoryContext<()>> {
type State = HashSet<i32>;
async fn stage_initial_changes(&mut self) -> Result<Self::State, ViewError> {
for key in INITIAL_SET_VIEW_CHANGES {
self.set.insert(key)?;
}
Ok(INITIAL_SET_VIEW_CHANGES.iter().copied().collect())
}
async fn stage_changes_to_be_discarded(&mut self) -> Result<Self::State, ViewError> {
let mut state = INITIAL_SET_VIEW_CHANGES
.iter()
.copied()
.collect::<HashSet<_>>();
let new_entries = [-1_000_000, 2_000_000];
let entries_to_remove = [0, -3];
for key in new_entries {
self.set.insert(&key)?;
state.insert(key);
}
for key in entries_to_remove {
self.set.remove(&key)?;
state.remove(&key);
}
Ok(state)
}
async fn stage_changes_to_be_persisted(&mut self) -> Result<Self::State, ViewError> {
let mut state = INITIAL_SET_VIEW_CHANGES
.iter()
.copied()
.collect::<HashSet<_>>();
let new_entries = [1_234, -2_101_010];
let entries_to_remove = [-1, 2, 4];
for key in new_entries {
self.set.insert(&key)?;
state.insert(key);
}
for key in entries_to_remove {
self.set.remove(&key)?;
state.remove(&key);
}
Ok(state)
}
async fn read(&self) -> Result<Self::State, ViewError> {
let indices = self.set.indices().await?;
Ok(indices.into_iter().collect())
}
}
/// Wrapper to test with a [`CollectionView`].
#[derive(RootView, ClonableView)]
pub struct TestCollectionView<C> {
collection: CollectionView<C, i32, RegisterView<C, String>>,
}
impl TestView for TestCollectionView<MemoryContext<()>> {
type State = HashMap<i32, String>;
async fn stage_initial_changes(&mut self) -> Result<Self::State, ViewError> {
for (key, value) in INITIAL_MAP_COLLECTION_VIEW_CHANGES {
self.collection
.load_entry_mut(key)
.await?
.set(value.to_string());
}
Ok(INITIAL_MAP_COLLECTION_VIEW_CHANGES
.iter()
.map(|(key, value)| (*key, value.to_string()))
.collect::<HashMap<_, _>>())
}
async fn stage_changes_to_be_discarded(&mut self) -> Result<Self::State, ViewError> {
let new_entries = [(-1_000_000, "foo"), (2_000_000, "bar")]
.into_iter()
.map(|(key, value)| (key, value.to_owned()));
let entries_to_remove = [0, -3];
for (key, value) in new_entries.clone() {
self.collection.load_entry_mut(&key).await?.set(value);
}
for key in entries_to_remove {
self.collection.remove_entry(&key)?;
}
let new_state = INITIAL_MAP_COLLECTION_VIEW_CHANGES
.iter()
.filter(|(key, _)| !entries_to_remove.contains(key))
.map(|(key, value)| (*key, value.to_string()))
.chain(new_entries)
.collect();
Ok(new_state)
}
async fn stage_changes_to_be_persisted(&mut self) -> Result<Self::State, ViewError> {
let new_entries = [(1_234, "first new entry"), (-2_101_010, "second_new_entry")]
.into_iter()
.map(|(key, value)| (key, value.to_owned()));
let entries_to_remove = [-1, 2, 4];
for (key, value) in new_entries.clone() {
self.collection.load_entry_mut(&key).await?.set(value);
}
for key in entries_to_remove {
self.collection.remove_entry(&key)?;
}
let new_state = INITIAL_MAP_COLLECTION_VIEW_CHANGES
.iter()
.filter(|(key, _)| !entries_to_remove.contains(key))
.map(|(key, value)| (*key, value.to_string()))
.chain(new_entries)
.collect();
Ok(new_state)
}
async fn read(&self) -> Result<Self::State, ViewError> {
let indices = self.collection.indices().await?;
let mut state = HashMap::with_capacity(indices.len());
for index in indices {
if let Some(value) = self.collection.try_load_entry(&index).await? {
state.insert(index, value.get().clone());
}
}
Ok(state)
}
}
/// Wrapper to test with a [`QueueView`].
#[derive(RootView, ClonableView)]
pub struct TestQueueView<C> {
queue: QueueView<C, u16>,
}
impl TestView for TestQueueView<MemoryContext<()>> {
type State = Vec<u16>;
async fn stage_initial_changes(&mut self) -> Result<Self::State, ViewError> {
for value in INITIAL_LOG_QUEUE_VIEW_CHANGES {
self.queue.push_back(*value);
}
Ok(INITIAL_LOG_QUEUE_VIEW_CHANGES.to_vec())
}
async fn stage_changes_to_be_discarded(&mut self) -> Result<Self::State, ViewError> {
let mut initial_state = INITIAL_LOG_QUEUE_VIEW_CHANGES.to_vec();
let new_values = [10_000, 20_000, 30_000];
for value in new_values {
self.queue.push_back(value);
initial_state.push(value);
}
self.queue.delete_front();
initial_state.remove(0);
self.queue.delete_front();
initial_state.remove(0);
Ok(initial_state)
}
async fn stage_changes_to_be_persisted(&mut self) -> Result<Self::State, ViewError> {
let mut initial_state = INITIAL_LOG_QUEUE_VIEW_CHANGES.to_vec();
let new_values = [201, 1, 50_050, 203];
for value in new_values {
self.queue.push_back(value);
initial_state.push(value);
}
self.queue.delete_front();
initial_state.remove(0);
Ok(initial_state)
}
async fn read(&self) -> Result<Self::State, ViewError> {
self.queue.elements().await
}
}
/// Wrapper to test with a [`BucketQueueView`].
#[derive(RootView, ClonableView, Allocative)]
pub struct TestBucketQueueView<C> {
queue: BucketQueueView<C, u16, 2>,
}
impl TestView for TestBucketQueueView<MemoryContext<()>> {
type State = Vec<u16>;
async fn stage_initial_changes(&mut self) -> Result<Self::State, ViewError> {
for value in INITIAL_LOG_QUEUE_VIEW_CHANGES {
self.queue.push_back(*value);
}
Ok(INITIAL_LOG_QUEUE_VIEW_CHANGES.to_vec())
}
async fn stage_changes_to_be_discarded(&mut self) -> Result<Self::State, ViewError> {
let mut initial_state = INITIAL_LOG_QUEUE_VIEW_CHANGES.to_vec();
let new_values = [10_000, 20_000, 30_000];
for value in new_values {
self.queue.push_back(value);
initial_state.push(value);
}
self.queue.delete_front().await?;
initial_state.remove(0);
self.queue.delete_front().await?;
initial_state.remove(0);
Ok(initial_state)
}
async fn stage_changes_to_be_persisted(&mut self) -> Result<Self::State, ViewError> {
let mut initial_state = INITIAL_LOG_QUEUE_VIEW_CHANGES.to_vec();
let new_values = [201, 1, 50_050, 203];
for value in new_values {
self.queue.push_back(value);
initial_state.push(value);
}
self.queue.delete_front().await?;
initial_state.remove(0);
Ok(initial_state)
}
async fn read(&self) -> Result<Self::State, ViewError> {
self.queue.elements().await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/tests/views_tests.rs | linera-views/tests/views_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::BTreeSet;
use anyhow::Result;
#[cfg(with_dynamodb)]
use linera_views::dynamo_db::DynamoDbDatabase;
#[cfg(with_rocksdb)]
use linera_views::rocks_db::RocksDbDatabase;
#[cfg(with_scylladb)]
use linera_views::scylla_db::ScyllaDbDatabase;
use linera_views::{
batch::{
Batch, WriteOperation,
WriteOperation::{Delete, DeletePrefix, Put},
},
collection_view::HashedCollectionView,
context::{Context, MemoryContext, ViewContext},
key_value_store_view::{KeyValueStoreView, ViewContainer},
log_view::HashedLogView,
lru_caching::LruCachingMemoryDatabase,
map_view::{ByteMapView, HashedMapView},
memory::MemoryDatabase,
queue_view::HashedQueueView,
random::make_deterministic_rng,
reentrant_collection_view::HashedReentrantCollectionView,
register_view::HashedRegisterView,
set_view::HashedSetView,
store::{KeyValueDatabase, TestKeyValueDatabase as _, WritableKeyValueStore as _},
test_utils::{
get_random_byte_vector, get_random_key_value_operations, get_random_key_values,
span_random_reordering_put_delete,
},
views::{CryptoHashRootView, HashableView, Hasher, RootView, View},
ViewError,
};
use rand::{Rng, RngCore};
#[derive(CryptoHashRootView)]
pub struct StateView<C> {
pub x1: HashedRegisterView<C, u64>,
pub x2: HashedRegisterView<C, u32>,
pub log: HashedLogView<C, u32>,
pub map: HashedMapView<C, String, usize>,
pub set: HashedSetView<C, usize>,
pub queue: HashedQueueView<C, u64>,
pub collection: HashedCollectionView<C, String, HashedLogView<C, u32>>,
pub collection2: HashedCollectionView<
C,
String,
HashedCollectionView<C, String, HashedRegisterView<C, u32>>,
>,
pub collection3: HashedCollectionView<C, String, HashedQueueView<C, u64>>,
pub collection4: HashedReentrantCollectionView<C, String, HashedQueueView<C, u64>>,
pub key_value_store: KeyValueStoreView<C>,
}
#[expect(async_fn_in_trait)]
pub trait StateStorage {
type Context: Context<Extra = usize> + 'static;
async fn new() -> Self;
async fn load(&mut self, id: usize) -> Result<StateView<Self::Context>, ViewError>;
}
pub struct MemoryTestStorage {
database: MemoryDatabase,
accessed_chains: BTreeSet<usize>,
}
impl StateStorage for MemoryTestStorage {
type Context = MemoryContext<usize>;
async fn new() -> Self {
let database = MemoryDatabase::connect_test_namespace().await.unwrap();
MemoryTestStorage {
database,
accessed_chains: BTreeSet::new(),
}
}
async fn load(&mut self, id: usize) -> Result<StateView<Self::Context>, ViewError> {
self.accessed_chains.insert(id);
let root_key = bcs::to_bytes(&id)?;
let store = self.database.open_exclusive(&root_key)?;
let context = ViewContext::create_root_context(store, id).await?;
StateView::load(context).await
}
}
pub struct KeyValueStoreTestStorage {
accessed_chains: BTreeSet<usize>,
store: ViewContainer<MemoryContext<()>>,
}
impl StateStorage for KeyValueStoreTestStorage {
type Context = ViewContext<usize, ViewContainer<MemoryContext<()>>>;
async fn new() -> Self {
let context = MemoryContext::new_for_testing(());
let store = ViewContainer::new(context).await.unwrap();
KeyValueStoreTestStorage {
accessed_chains: BTreeSet::new(),
store,
}
}
async fn load(&mut self, id: usize) -> Result<StateView<Self::Context>, ViewError> {
self.accessed_chains.insert(id);
let base_key = bcs::to_bytes(&id)?;
let store = self.store.clone();
let context = Self::Context::new_unchecked(store, base_key, id);
StateView::load(context).await
}
}
pub struct LruMemoryStorage {
database: LruCachingMemoryDatabase,
accessed_chains: BTreeSet<usize>,
}
impl StateStorage for LruMemoryStorage {
type Context = ViewContext<usize, <LruCachingMemoryDatabase as KeyValueDatabase>::Store>;
async fn new() -> Self {
let database = LruCachingMemoryDatabase::connect_test_namespace()
.await
.unwrap();
LruMemoryStorage {
accessed_chains: BTreeSet::new(),
database,
}
}
async fn load(&mut self, id: usize) -> Result<StateView<Self::Context>, ViewError> {
self.accessed_chains.insert(id);
let root_key = bcs::to_bytes(&id)?;
let store = self.database.open_exclusive(&root_key)?;
let context = ViewContext::create_root_context(store, id).await?;
StateView::load(context).await
}
}
#[cfg(with_rocksdb)]
pub struct RocksDbTestStorage {
database: RocksDbDatabase,
accessed_chains: BTreeSet<usize>,
}
#[cfg(with_rocksdb)]
impl StateStorage for RocksDbTestStorage {
type Context = ViewContext<usize, <RocksDbDatabase as KeyValueDatabase>::Store>;
async fn new() -> Self {
let database = RocksDbDatabase::connect_test_namespace().await.unwrap();
let accessed_chains = BTreeSet::new();
RocksDbTestStorage {
database,
accessed_chains,
}
}
async fn load(&mut self, id: usize) -> Result<StateView<Self::Context>, ViewError> {
self.accessed_chains.insert(id);
let root_key = bcs::to_bytes(&id)?;
let store = self.database.open_exclusive(&root_key)?;
let context = ViewContext::create_root_context(store, id).await?;
StateView::load(context).await
}
}
#[cfg(with_scylladb)]
pub struct ScyllaDbTestStorage {
database: ScyllaDbDatabase,
accessed_chains: BTreeSet<usize>,
}
#[cfg(with_scylladb)]
impl StateStorage for ScyllaDbTestStorage {
type Context = ViewContext<usize, <ScyllaDbDatabase as KeyValueDatabase>::Store>;
async fn new() -> Self {
let database = ScyllaDbDatabase::connect_test_namespace().await.unwrap();
let accessed_chains = BTreeSet::new();
ScyllaDbTestStorage {
database,
accessed_chains,
}
}
async fn load(&mut self, id: usize) -> Result<StateView<Self::Context>, ViewError> {
self.accessed_chains.insert(id);
let root_key = bcs::to_bytes(&id)?;
let store = self.database.open_exclusive(&root_key)?;
let context = ViewContext::create_root_context(store, id).await?;
StateView::load(context).await
}
}
#[cfg(with_dynamodb)]
pub struct DynamoDbTestStorage {
database: DynamoDbDatabase,
accessed_chains: BTreeSet<usize>,
}
#[cfg(with_dynamodb)]
impl StateStorage for DynamoDbTestStorage {
type Context = ViewContext<usize, <DynamoDbDatabase as KeyValueDatabase>::Store>;
async fn new() -> Self {
let database = DynamoDbDatabase::connect_test_namespace().await.unwrap();
let accessed_chains = BTreeSet::new();
DynamoDbTestStorage {
database,
accessed_chains,
}
}
async fn load(&mut self, id: usize) -> Result<StateView<Self::Context>, ViewError> {
self.accessed_chains.insert(id);
let root_key = bcs::to_bytes(&id)?;
let store = self.database.open_exclusive(&root_key)?;
let context = ViewContext::create_root_context(store, id).await?;
StateView::load(context).await
}
}
#[derive(Debug)]
pub struct TestConfig {
with_x1: bool,
with_x2: bool,
with_flush: bool,
with_map: bool,
with_set: bool,
with_queue: bool,
with_log: bool,
with_collection: bool,
}
impl Default for TestConfig {
fn default() -> Self {
Self {
with_x1: true,
with_x2: true,
with_flush: true,
with_map: true,
with_set: true,
with_queue: true,
with_log: true,
with_collection: true,
}
}
}
impl TestConfig {
fn samples() -> Vec<TestConfig> {
vec![
TestConfig {
with_x1: false,
with_x2: false,
with_flush: false,
with_map: false,
with_set: false,
with_queue: false,
with_log: false,
with_collection: false,
},
TestConfig {
with_x1: true,
with_x2: true,
with_flush: false,
with_map: false,
with_set: false,
with_queue: false,
with_log: false,
with_collection: false,
},
TestConfig {
with_x1: false,
with_x2: false,
with_flush: true,
with_map: false,
with_set: false,
with_queue: true,
with_log: true,
with_collection: false,
},
TestConfig {
with_x1: false,
with_x2: false,
with_flush: true,
with_map: true,
with_set: true,
with_queue: false,
with_log: false,
with_collection: true,
},
TestConfig::default(),
]
}
}
#[cfg(test)]
async fn test_store<S>(
store: &mut S,
config: &TestConfig,
) -> Result<<sha3::Sha3_256 as Hasher>::Output>
where
S: StateStorage,
{
Box::pin(async move {
let default_hash = {
let view = store.load(1).await?;
view.hash().await?
};
{
let mut view = store.load(1).await?;
if config.with_x1 {
assert_eq!(view.x1.extra(), &1);
}
let hash = view.hash().await?;
assert_eq!(hash, default_hash);
if config.with_x1 {
assert_eq!(view.x1.get(), &0);
view.x1.set(1);
}
view.rollback();
assert_eq!(view.hash().await?, hash);
if config.with_x2 {
view.x2.set(2);
}
if config.with_x2 {
assert_ne!(view.hash().await?, hash);
}
if config.with_log {
view.log.push(4);
}
if config.with_queue {
view.queue.push_back(8);
assert_eq!(view.queue.front().await?, Some(8));
view.queue.push_back(7);
view.queue.delete_front();
}
if config.with_map {
view.map.insert("Hello", 5)?;
assert_eq!(view.map.indices().await?, vec!["Hello".to_string()]);
let mut count = 0;
view.map
.for_each_index(|_index| {
count += 1;
Ok(())
})
.await?;
assert_eq!(count, 1);
}
if config.with_set {
view.set.insert(&42)?;
assert_eq!(view.set.indices().await?, vec![42]);
let mut count = 0;
view.set
.for_each_index(|_index| {
count += 1;
Ok(())
})
.await?;
assert_eq!(count, 1);
}
if config.with_x1 {
assert_eq!(view.x1.get(), &0);
}
if config.with_x2 {
assert_eq!(view.x2.get(), &2);
}
if config.with_log {
assert_eq!(view.log.read(0..10).await?, vec![4]);
}
if config.with_queue {
assert_eq!(view.queue.read_front(10).await?, vec![7]);
}
if config.with_map {
assert_eq!(view.map.get("Hello").await?, Some(5));
}
if config.with_set {
assert!(view.set.contains(&42).await?);
}
if config.with_collection {
{
let subview = view.collection.load_entry_mut("hola").await?;
subview.push(17);
subview.push(18);
assert_eq!(view.collection.indices().await?, vec!["hola".to_string()]);
let mut count = 0;
view.collection
.for_each_index(|_index| {
count += 1;
Ok(())
})
.await?;
assert_eq!(count, 1);
}
let subview = view.collection.try_load_entry("hola").await?.unwrap();
assert_eq!(subview.read(0..10).await?, vec![17, 18]);
}
};
let staged_hash = {
let mut view = store.load(1).await?;
assert_eq!(view.hash().await?, default_hash);
if config.with_x1 {
assert_eq!(view.x1.get(), &0);
}
if config.with_x2 {
assert_eq!(view.x2.get(), &0);
}
if config.with_log {
assert_eq!(view.log.read(0..10).await?, Vec::<u32>::new());
}
if config.with_queue {
assert_eq!(view.queue.read_front(10).await?, Vec::<u64>::new());
}
if config.with_map {
assert_eq!(view.map.get("Hello").await?, None);
}
if config.with_set {
assert!(!view.set.contains(&42).await?);
}
if config.with_collection {
let subview = view.collection.load_entry_mut("hola").await?;
assert_eq!(subview.read(0..10).await?, Vec::<u32>::new());
let subview = view.collection2.load_entry_mut("ciao").await?;
let subsubview = subview.load_entry_mut("!").await?;
subsubview.set(3);
assert_eq!(subsubview.get(), &3);
}
if config.with_x1 {
view.x1.set(1);
}
if config.with_log {
view.log.push(4);
}
if config.with_queue {
view.queue.push_back(7);
}
if config.with_map {
view.map.insert("Hello", 5)?;
view.map.insert("Hi", 2)?;
view.map.remove("Hi")?;
}
if config.with_set {
view.set.insert(&42)?;
view.set.insert(&59)?;
view.set.remove(&59)?;
}
if config.with_collection {
let subview = view.collection.load_entry_mut("hola").await?;
subview.push(17);
subview.push(18);
}
if config.with_flush {
view.save().await?;
}
let hash1 = view.hash().await?;
let hash2 = view.hash().await?;
view.save().await?;
let hash3 = view.hash().await?;
assert_eq!(hash1, hash2);
assert_eq!(hash1, hash3);
hash1
};
{
let mut view = store.load(1).await?;
let stored_hash = view.hash().await?;
assert_eq!(staged_hash, stored_hash);
if config.with_x1 {
assert_eq!(view.x1.get(), &1);
}
if config.with_x2 {
assert_eq!(view.x2.get(), &0);
}
if config.with_log {
assert_eq!(view.log.read(0..10).await?, vec![4]);
}
if config.with_queue {
view.queue.push_back(8);
assert_eq!(view.queue.read_front(10).await?, vec![7, 8]);
assert_eq!(view.queue.read_front(1).await?, vec![7]);
assert_eq!(view.queue.read_back(10).await?, vec![7, 8]);
assert_eq!(view.queue.read_back(1).await?, vec![8]);
assert_eq!(view.queue.front().await?, Some(7));
assert_eq!(view.queue.back().await?, Some(8));
assert_eq!(view.queue.count(), 2);
view.queue.delete_front();
assert_eq!(view.queue.front().await?, Some(8));
view.queue.delete_front();
assert_eq!(view.queue.front().await?, None);
assert_eq!(view.queue.count(), 0);
view.queue.push_back(13);
}
if config.with_map {
assert_eq!(view.map.get("Hello").await?, Some(5));
assert_eq!(view.map.get("Hi").await?, None);
}
if config.with_set {
assert!(view.set.contains(&42).await?);
assert!(!view.set.contains(&59).await?);
}
if config.with_collection {
let subview = view.collection.try_load_entry("hola").await?.unwrap();
assert_eq!(subview.read(0..10).await?, vec![17, 18]);
assert_eq!(subview.read(..).await?, vec![17, 18]);
assert_eq!(subview.read(1..).await?, vec![18]);
assert_eq!(subview.read(..=0).await?, vec![17]);
}
if config.with_flush {
view.save().await?;
}
if config.with_collection {
let subview = view.collection2.load_entry_mut("ciao").await?;
let subsubview = subview.try_load_entry("!").await?.unwrap();
subview.try_load_entry("!").await?.unwrap();
assert_eq!(subsubview.get(), &3);
assert_eq!(view.collection.indices().await?, vec!["hola".to_string()]);
view.collection.remove_entry("hola")?;
}
if config.with_x1
&& config.with_x2
&& config.with_map
&& config.with_set
&& config.with_queue
&& config.with_log
&& config.with_collection
{
assert_ne!(view.hash().await?, stored_hash);
}
view.save().await?;
}
{
let mut view = store.load(1).await?;
if config.with_collection {
{
let mut subview = view.collection4.try_load_entry_mut("hola").await?;
assert_eq!(subview.read_front(10).await?, Vec::<u64>::new());
assert!(view.collection4.try_load_entry_mut("hola").await.is_err());
if config.with_queue {
subview.push_back(13);
assert_eq!(subview.front().await?, Some(13));
subview.delete_front();
assert_eq!(subview.front().await?, None);
assert_eq!(subview.count(), 0);
}
}
{
let subview = view.collection4.try_load_entry("hola").await?.unwrap();
assert_eq!(subview.count(), 0);
assert!(view.collection4.try_load_entry("hola").await.is_ok());
}
}
}
if config.with_map {
{
let mut view = store.load(1).await?;
let value = view.map.get_mut_or_default("Geia").await?;
assert_eq!(*value, 0);
*value = 42;
let value = view.map.get_mut_or_default("Geia").await?;
assert_eq!(*value, 42);
view.save().await?;
}
{
let view = store.load(1).await?;
assert_eq!(view.map.get("Geia").await?, Some(42));
}
{
let mut view = store.load(1).await?;
let value = view.map.get_mut_or_default("Geia").await?;
assert_eq!(*value, 42);
*value = 43;
view.rollback();
let value = view.map.get_mut_or_default("Geia").await?;
assert_eq!(*value, 42);
}
}
if config.with_map {
{
let mut view = store.load(1).await?;
view.map.insert("Konnichiwa", 5)?;
let value = view.map.get_mut("Konnichiwa").await?.unwrap();
*value = 6;
view.save().await?;
}
{
let view = store.load(1).await?;
assert_eq!(view.map.get("Konnichiwa").await?, Some(6));
}
}
{
let mut view = store.load(1).await?;
if config.with_collection {
let subview = view.collection.load_entry_mut("hola").await?;
assert_eq!(subview.read(0..10).await?, Vec::<u32>::new());
}
if config.with_queue {
assert_eq!(view.queue.front().await?, Some(13));
view.queue.delete_front();
assert_eq!(view.queue.front().await?, None);
assert_eq!(view.queue.count(), 0);
}
view.clear();
view.save().await?;
}
Ok(staged_hash)
})
.await
}
#[derive(CryptoHashRootView)]
pub struct ByteMapStateView<C> {
pub map: ByteMapView<C, u8>,
}
#[tokio::test]
async fn test_byte_map_view() -> Result<()> {
let context = MemoryContext::new_for_testing(());
{
let mut view = ByteMapStateView::load(context.clone()).await?;
view.map.insert(vec![0, 1], 5);
view.map.insert(vec![2, 3], 23);
view.save().await?;
}
{
let mut view = ByteMapStateView::load(context.clone()).await?;
view.map.remove_by_prefix(vec![0]);
let val = view.map.get_mut_or_default(&[0, 1]).await?;
assert_eq!(*val, 0);
let val = view.map.get_mut(&[2, 3]).await?;
assert_eq!(val, Some(&mut 23));
view.save().await?;
}
{
let mut view = ByteMapStateView::load(context.clone()).await?;
view.map.remove_by_prefix(vec![2]);
let val = view.map.get_mut(&[2, 3]).await?;
assert_eq!(val, None);
}
Ok(())
}
#[cfg(test)]
async fn test_views_in_lru_memory_param(config: &TestConfig) -> Result<()> {
tracing::warn!("Testing config {:?} with lru memory", config);
let mut store = LruMemoryStorage::new().await;
test_store(&mut store, config).await?;
assert_eq!(store.accessed_chains.len(), 1);
Ok(())
}
#[tokio::test]
async fn test_views_in_lru_memory() -> Result<()> {
for config in TestConfig::samples() {
test_views_in_lru_memory_param(&config).await?;
}
Ok(())
}
#[cfg(test)]
async fn test_views_in_memory_param(config: &TestConfig) -> Result<()> {
tracing::warn!("Testing config {:?} with memory", config);
let mut store = MemoryTestStorage::new().await;
test_store(&mut store, config).await?;
assert_eq!(store.accessed_chains.len(), 1);
Ok(())
}
#[tokio::test]
async fn test_views_in_memory() -> Result<()> {
for config in TestConfig::samples() {
test_views_in_memory_param(&config).await?;
}
Ok(())
}
#[cfg(test)]
async fn test_views_in_key_value_store_view_memory_param(config: &TestConfig) -> Result<()> {
tracing::warn!(
"Testing config {:?} with key_value_store_view on memory",
config
);
let mut store = KeyValueStoreTestStorage::new().await;
test_store(&mut store, config).await?;
Ok(())
}
#[tokio::test]
async fn test_views_in_key_value_store_view_memory() -> Result<()> {
for config in TestConfig::samples() {
test_views_in_key_value_store_view_memory_param(&config).await?;
}
Ok(())
}
#[cfg(with_rocksdb)]
#[cfg(test)]
async fn test_views_in_rocks_db_param(config: &TestConfig) -> Result<()> {
tracing::warn!("Testing config {:?} with rocks_db", config);
let mut store = RocksDbTestStorage::new().await;
let hash = test_store(&mut store, config).await?;
assert_eq!(store.accessed_chains.len(), 1);
let mut store = MemoryTestStorage::new().await;
let hash2 = test_store(&mut store, config).await?;
assert_eq!(hash, hash2);
Ok(())
}
#[cfg(with_rocksdb)]
#[tokio::test]
async fn test_views_in_rocks_db() -> Result<()> {
for config in TestConfig::samples() {
test_views_in_rocks_db_param(&config).await?;
}
Ok(())
}
#[cfg(with_scylladb)]
#[cfg(test)]
async fn test_views_in_scylla_db_param(config: &TestConfig) -> Result<()> {
tracing::warn!("Testing config {:?} with scylla_db", config);
let mut store = ScyllaDbTestStorage::new().await;
let hash = test_store(&mut store, config).await?;
assert_eq!(store.accessed_chains.len(), 1);
let mut store = MemoryTestStorage::new().await;
let hash2 = test_store(&mut store, config).await?;
assert_eq!(hash, hash2);
Ok(())
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_views_in_scylla_db() -> Result<()> {
for config in TestConfig::samples() {
test_views_in_scylla_db_param(&config).await?;
}
Ok(())
}
#[cfg(with_dynamodb)]
#[tokio::test]
async fn test_views_in_dynamo_db() -> Result<()> {
let mut store = DynamoDbTestStorage::new().await;
let config = TestConfig::default();
let hash = test_store(&mut store, &config).await?;
assert_eq!(store.accessed_chains.len(), 1);
let mut store = MemoryTestStorage::new().await;
let hash2 = test_store(&mut store, &config).await?;
assert_eq!(hash, hash2);
Ok(())
}
#[cfg(with_rocksdb)]
#[cfg(test)]
async fn test_store_rollback_kernel<S>(store: &mut S) -> Result<()>
where
S: StateStorage,
{
Box::pin(async move {
{
let mut view = store.load(1).await?;
view.queue.push_back(8);
view.map.insert("Hello", 5)?;
let subview = view.collection.load_entry_mut("hola").await?;
subview.push(17);
view.save().await?;
}
{
let mut view = store.load(1).await?;
view.queue.push_back(7);
view.map.insert("Hello", 4)?;
let subview = view.collection.load_entry_mut("DobryDen").await?;
subview.push(16);
view.rollback();
view.save().await?;
}
{
let mut view = store.load(1).await?;
view.queue.clear();
view.map.clear();
view.collection.clear();
view.rollback();
view.save().await?;
}
{
let view = store.load(1).await?;
assert_eq!(view.queue.front().await?, Some(8));
assert_eq!(view.map.get("Hello").await?, Some(5));
assert_eq!(view.collection.indices().await?, vec!["hola".to_string()]);
}
Ok(())
})
.await
}
#[cfg(with_rocksdb)]
#[tokio::test]
async fn test_store_rollback() -> Result<()> {
let mut store = MemoryTestStorage::new().await;
test_store_rollback_kernel(&mut store).await?;
let mut store = RocksDbTestStorage::new().await;
test_store_rollback_kernel(&mut store).await?;
Ok(())
}
#[tokio::test]
async fn test_collection_removal() -> Result<()> {
type EntryType = HashedRegisterView<MemoryContext<()>, u8>;
type CollectionViewType = HashedCollectionView<MemoryContext<()>, u8, EntryType>;
let context = MemoryContext::new_for_testing(());
// Write a dummy entry into the collection.
let mut collection = CollectionViewType::load(context.clone()).await?;
let entry = collection.load_entry_mut(&1).await?;
entry.set(1);
let mut batch = Batch::new();
collection.pre_save(&mut batch)?;
collection.context().store().write_batch(batch).await?;
collection.post_save();
// Remove the entry from the collection.
let mut collection = CollectionViewType::load(context.clone()).await?;
collection.remove_entry(&1)?;
let mut batch = Batch::new();
collection.pre_save(&mut batch)?;
collection.context().store().write_batch(batch).await?;
collection.post_save();
// Check that the entry was removed.
let collection = CollectionViewType::load(context.clone()).await?;
assert!(!collection.indices().await?.contains(&1));
Ok(())
}
async fn test_removal_api_first_second_condition(
first_condition: bool,
second_condition: bool,
) -> Result<()> {
type EntryType = HashedRegisterView<MemoryContext<()>, u8>;
type CollectionViewType = HashedCollectionView<MemoryContext<()>, u8, EntryType>;
let context = MemoryContext::new_for_testing(());
// First add an entry `1` with value `100` and commit
let mut collection: CollectionViewType = HashedCollectionView::load(context.clone()).await?;
let entry = collection.load_entry_mut(&1).await?;
entry.set(100);
let mut batch = Batch::new();
collection.pre_save(&mut batch)?;
collection.context().store().write_batch(batch).await?;
collection.post_save();
// Reload the collection view and remove the entry, but don't commit yet
let mut collection: CollectionViewType = HashedCollectionView::load(context.clone()).await?;
collection.remove_entry(&1)?;
// Now, read the entry with a different value if a certain condition is true
if first_condition {
let entry = collection.load_entry_mut(&1).await?;
entry.set(200);
}
// Finally, either commit or rollback based on some other condition
if second_condition {
// If rolling back, then the entry `1` still exists with value `100`.
collection.rollback();
}
// We commit
let mut batch = Batch::new();
collection.pre_save(&mut batch)?;
collection.context().store().write_batch(batch).await?;
collection.post_save();
let mut collection: CollectionViewType = HashedCollectionView::load(context.clone()).await?;
let expected_val = if second_condition {
Some(100)
} else if first_condition {
Some(200)
} else {
None
};
match expected_val {
Some(expected_val_i) => {
let subview = collection.load_entry_mut(&1).await?;
assert_eq!(subview.get(), &expected_val_i);
}
None => {
assert!(!collection.indices().await?.contains(&1));
}
};
Ok(())
}
#[tokio::test]
async fn test_removal_api() -> Result<()> {
for first_condition in [true, false] {
for second_condition in [true, false] {
test_removal_api_first_second_condition(first_condition, second_condition).await?;
}
}
Ok(())
}
#[cfg(test)]
async fn compute_hash_unordered_put_view<S>(
rng: &mut impl RngCore,
store: &mut S,
key_value_vector: Vec<(Vec<u8>, Vec<u8>)>,
) -> Result<<sha3::Sha3_256 as Hasher>::Output>
where
S: StateStorage,
{
let mut view = store.load(1).await?;
for key_value in key_value_vector {
let key = key_value.0;
let value = key_value.1;
let key_str = format!("{:?}", &key);
let value_usize = (*value.first().unwrap()) as usize;
view.map.insert(&key_str, value_usize)?;
view.key_value_store.insert(key, value).await?;
{
let subview = view.collection.load_entry_mut(&key_str).await?;
subview.push(value_usize as u32);
}
//
let choice = rng.gen_range(0..20);
if choice == 0 {
view.save().await?;
}
}
Ok(view.hash().await?)
}
#[cfg(test)]
async fn compute_hash_unordered_putdelete_view<S>(
rng: &mut impl RngCore,
store: &mut S,
operations: Vec<WriteOperation>,
) -> Result<<sha3::Sha3_256 as Hasher>::Output>
where
S: StateStorage,
{
let mut view = store.load(1).await?;
for operation in operations {
match operation {
Put { key, value } => {
let key_str = format!("{:?}", &key);
let first_value = *value.first().unwrap();
let first_value_usize = first_value as usize;
let first_value_u64 = first_value as u64;
let mut tmp = *view.x1.get();
tmp += first_value_u64;
view.x1.set(tmp);
view.map.insert(&key_str, first_value_usize)?;
view.key_value_store.insert(key, value).await?;
{
let subview = view.collection.load_entry_mut(&key_str).await?;
subview.push(first_value as u32);
}
}
Delete { key } => {
let key_str = format!("{:?}", &key);
view.map.remove(&key_str)?;
view.key_value_store.remove(key).await?;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/tests/store_tests.rs | linera-views/tests/store_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_views::{
batch::Batch,
context::{Context as _, MemoryContext},
key_value_store_view::ViewContainer,
memory::MemoryDatabase,
random::make_deterministic_rng,
store::{ReadableKeyValueStore as _, TestKeyValueDatabase as _, WritableKeyValueStore as _},
test_utils::{
big_read_multi_values, get_random_test_scenarios, run_big_write_read, run_reads,
run_writes_from_blank, run_writes_from_state,
},
value_splitting::create_value_splitting_memory_store,
};
#[cfg(web)]
use wasm_bindgen_test::wasm_bindgen_test;
#[cfg(web)]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[cfg(any(with_dynamodb, with_scylladb))]
use linera_views::test_utils::access_admin_test;
#[ignore]
#[tokio::test]
async fn test_read_multi_values_memory() {
let config = MemoryDatabase::new_test_config().await.unwrap();
big_read_multi_values::<MemoryDatabase>(config, 2200000, 1000).await;
}
#[ignore]
#[cfg(with_dynamodb)]
#[tokio::test]
async fn test_read_multi_values_dynamo_db() {
use linera_views::dynamo_db::DynamoDbDatabase;
let config = DynamoDbDatabase::new_test_config().await.unwrap();
big_read_multi_values::<DynamoDbDatabase>(config, 22000000, 1000).await;
}
#[ignore]
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_read_multi_values_scylla_db() {
use linera_views::scylla_db::ScyllaDbDatabase;
let config = ScyllaDbDatabase::new_test_config().await.unwrap();
big_read_multi_values::<ScyllaDbDatabase>(config, 22200000, 200).await;
}
#[tokio::test]
async fn test_reads_test_memory() {
for scenario in get_random_test_scenarios() {
let key_value_store = create_value_splitting_memory_store();
run_reads(key_value_store, scenario).await;
}
}
#[tokio::test]
async fn test_reads_memory() {
for scenario in get_random_test_scenarios() {
let store = MemoryDatabase::new_test_store().await.unwrap();
run_reads(store, scenario).await;
}
}
#[cfg(with_rocksdb)]
#[tokio::test]
async fn test_reads_rocks_db() {
for scenario in get_random_test_scenarios() {
let store = linera_views::rocks_db::RocksDbDatabase::new_test_store()
.await
.unwrap();
run_reads(store, scenario).await;
}
}
#[cfg(with_dynamodb)]
#[tokio::test]
async fn test_reads_dynamo_db() {
use linera_views::store::KeyValueDatabase as _;
for scenario in get_random_test_scenarios() {
let database = linera_views::dynamo_db::DynamoDbDatabase::connect_test_namespace()
.await
.unwrap();
let store = database.open_exclusive(&[]).unwrap();
run_reads(store, scenario).await;
}
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_reads_scylla_db() {
use linera_views::store::KeyValueDatabase as _;
for scenario in get_random_test_scenarios() {
let database = linera_views::scylla_db::ScyllaDbDatabase::connect_test_namespace()
.await
.unwrap();
let store = database.open_exclusive(&[]).unwrap();
run_reads(store, scenario).await;
}
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_reads_scylla_db_no_root_key() {
for scenario in get_random_test_scenarios() {
let store = linera_views::scylla_db::ScyllaDbDatabase::new_test_store()
.await
.unwrap();
run_reads(store, scenario).await;
}
}
#[cfg(with_indexeddb)]
#[wasm_bindgen_test]
async fn test_reads_indexed_db() {
for scenario in get_random_test_scenarios() {
let key_value_store = linera_views::indexed_db::create_indexed_db_test_store().await;
run_reads(key_value_store, scenario).await;
}
}
#[tokio::test]
async fn test_reads_key_value_store_view_memory() {
for scenario in get_random_test_scenarios() {
let context = MemoryContext::new_for_testing(());
let key_value_store = ViewContainer::new(context).await.unwrap();
run_reads(key_value_store, scenario).await;
}
}
#[tokio::test]
async fn test_specific_reads_memory() {
let store = MemoryDatabase::new_test_store().await.unwrap();
let key_values = vec![
(vec![0, 1, 255], Vec::new()),
(vec![0, 1, 255, 37], Vec::new()),
(vec![0, 2], Vec::new()),
(vec![0, 2, 0], Vec::new()),
];
run_reads(store, key_values).await;
}
#[tokio::test]
async fn test_test_memory_writes_from_blank() {
let key_value_store = create_value_splitting_memory_store();
run_writes_from_blank(&key_value_store).await;
}
#[tokio::test]
async fn test_memory_writes_from_blank() {
let store = MemoryDatabase::new_test_store().await.unwrap();
run_writes_from_blank(&store).await;
}
#[tokio::test]
async fn test_key_value_store_view_memory_writes_from_blank() {
let context = MemoryContext::new_for_testing(());
let key_value_store = ViewContainer::new(context).await.unwrap();
run_writes_from_blank(&key_value_store).await;
}
#[cfg(with_rocksdb)]
#[tokio::test]
async fn test_rocks_db_writes_from_blank() {
let store = linera_views::rocks_db::RocksDbDatabase::new_test_store()
.await
.unwrap();
run_writes_from_blank(&store).await;
}
#[cfg(with_dynamodb)]
#[tokio::test]
async fn test_dynamo_db_writes_from_blank() {
let store = linera_views::dynamo_db::DynamoDbDatabase::new_test_store()
.await
.unwrap();
run_writes_from_blank(&store).await;
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_scylla_db_writes_from_blank() {
let store = linera_views::scylla_db::ScyllaDbDatabase::new_test_store()
.await
.unwrap();
run_writes_from_blank(&store).await;
}
#[cfg(with_indexeddb)]
#[wasm_bindgen_test]
async fn test_indexed_db_writes_from_blank() {
let key_value_store = linera_views::indexed_db::create_indexed_db_test_store().await;
run_writes_from_blank(&key_value_store).await;
}
#[tokio::test]
async fn test_big_value_read_write() {
use rand::{distributions::Alphanumeric, Rng};
let context = MemoryContext::new_for_testing(());
for count in [50, 1024] {
let rng = make_deterministic_rng();
let test_string = rng
.sample_iter(&Alphanumeric)
.take(count)
.map(char::from)
.collect::<String>();
let mut batch = Batch::new();
let key = vec![43, 23, 56];
batch.put_key_value(key.clone(), &test_string).unwrap();
context.store().write_batch(batch).await.unwrap();
let read_string = context
.store()
.read_value::<String>(&key)
.await
.unwrap()
.unwrap();
assert_eq!(read_string, test_string);
}
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn scylla_db_tombstone_triggering_test() {
use linera_views::store::KeyValueDatabase as _;
let database = linera_views::scylla_db::ScyllaDbDatabase::connect_test_namespace()
.await
.unwrap();
let store = database.open_exclusive(&[]).unwrap();
linera_views::test_utils::tombstone_triggering_test(store).await;
}
#[cfg(with_rocksdb)]
#[tokio::test]
async fn rocks_db_tombstone_triggering_test() {
let store = linera_views::rocks_db::RocksDbDatabase::new_test_store()
.await
.unwrap();
linera_views::test_utils::tombstone_triggering_test(store).await;
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_scylla_db_big_write_read() {
use linera_views::store::KeyValueDatabase as _;
let database = linera_views::scylla_db::ScyllaDbDatabase::connect_test_namespace()
.await
.unwrap();
let store = database.open_exclusive(&[]).unwrap();
let value_sizes = vec![100, 1000, 200000, 5000000];
let target_size = 20000000;
run_big_write_read(store, target_size, value_sizes).await;
}
#[tokio::test]
async fn test_memory_big_write_read() {
let store = MemoryDatabase::new_test_store().await.unwrap();
let value_sizes = vec![100, 1000, 200000, 5000000];
let target_size = 20000000;
run_big_write_read(store, target_size, value_sizes).await;
}
#[cfg(with_rocksdb)]
#[tokio::test]
async fn test_rocks_db_big_write_read() {
let store = linera_views::rocks_db::RocksDbDatabase::new_test_store()
.await
.unwrap();
let value_sizes = vec![100, 1000, 200000, 5000000];
let target_size = 20000000;
run_big_write_read(store, target_size, value_sizes).await;
}
#[cfg(with_indexeddb)]
#[wasm_bindgen_test]
async fn test_indexed_db_big_write_read() {
let key_value_store = linera_views::indexed_db::create_indexed_db_test_store().await;
let value_sizes = vec![100, 1000, 200000, 5000000];
let target_size = 20000000;
run_big_write_read(key_value_store, target_size, value_sizes).await;
}
#[cfg(with_dynamodb)]
#[tokio::test]
async fn test_dynamo_db_big_write_read() {
use linera_views::store::KeyValueDatabase as _;
let database = linera_views::dynamo_db::DynamoDbDatabase::connect_test_namespace()
.await
.unwrap();
let store = database.open_exclusive(&[]).unwrap();
let value_sizes = vec![100, 1000, 200000, 5000000];
let target_size = 20000000;
run_big_write_read(store, target_size, value_sizes).await;
}
#[tokio::test]
async fn test_memory_writes_from_state() {
let store = MemoryDatabase::new_test_store().await.unwrap();
run_writes_from_state(&store).await;
}
#[cfg(with_rocksdb)]
#[tokio::test]
async fn test_rocks_db_writes_from_state() {
let store = linera_views::rocks_db::RocksDbDatabase::new_test_store()
.await
.unwrap();
run_writes_from_state(&store).await;
}
#[cfg(with_indexeddb)]
#[wasm_bindgen_test]
async fn test_indexed_db_writes_from_state() {
let key_value_store = linera_views::indexed_db::create_indexed_db_test_store().await;
run_writes_from_state(&key_value_store).await;
}
#[cfg(with_dynamodb)]
#[tokio::test]
async fn test_dynamo_db_writes_from_state() {
let store = linera_views::dynamo_db::DynamoDbDatabase::new_test_store()
.await
.unwrap();
run_writes_from_state(&store).await;
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_scylla_db_writes_from_state() {
let store = linera_views::scylla_db::ScyllaDbDatabase::new_test_store()
.await
.unwrap();
run_writes_from_state(&store).await;
}
#[cfg(with_scylladb)]
#[tokio::test]
async fn test_scylladb_access() {
access_admin_test::<linera_views::scylla_db::ScyllaDbDatabase>().await
}
#[cfg(with_dynamodb)]
#[tokio::test]
async fn test_dynamodb_access() {
access_admin_test::<linera_views::dynamo_db::DynamoDbDatabase>().await
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/tests/hashable_tests.rs | linera-views/tests/hashable_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use linera_base::crypto::CryptoHash;
use linera_views::{
common::HasherOutput,
context::MemoryContext,
hashable_wrapper::WrappedHashableContainerView,
historical_hash_wrapper::HistoricallyHashableView,
register_view::{HashedRegisterView, RegisterView},
views::{HashableView, RootView, View},
};
use linera_views_derive::CryptoHashRootView;
#[derive(CryptoHashRootView)]
struct TestType<C> {
pub inner: RegisterView<C, String>,
pub wrap: WrappedHashableContainerView<C, RegisterView<C, String>, HasherOutput>,
}
// TODO(#560): Implement the same for CryptoHash
#[tokio::test]
async fn check_hashable_container_hash() -> Result<()> {
let context = MemoryContext::new_for_testing(());
let test = TestType::load(context).await?;
let hash1 = test.inner.hash().await?;
let hash2 = test.wrap.hash().await?;
assert_eq!(hash1, hash2);
Ok(())
}
#[tokio::test]
async fn check_hashable_hash() -> Result<()> {
let context = MemoryContext::new_for_testing(());
let mut view = HashedRegisterView::<_, u32>::load(context).await?;
let hash0 = view.hash().await?;
let val = view.get_mut();
*val = 32;
let hash32 = view.hash().await?;
assert_ne!(hash0, hash32);
view.clear();
assert_eq!(hash0, view.hash().await?);
Ok(())
}
#[derive(View)]
struct TestInnerType<C> {
pub field1: RegisterView<C, u32>,
pub field2: RegisterView<C, u32>,
pub field3: RegisterView<C, Option<CryptoHash>>,
}
#[derive(RootView)]
struct TestType2<C> {
pub field1: RegisterView<C, u32>,
pub field2: HistoricallyHashableView<C, TestInnerType<C>>,
}
#[tokio::test]
async fn check_hashable_not_overwriting_field() -> Result<()> {
let context = MemoryContext::new_for_testing(());
// Let's store some data in the view.
let mut test = TestType2::load(context.clone()).await?;
let hash1 = CryptoHash::from([0u8; 32]);
test.field2.field1.set(1);
test.field2.field2.set(2);
test.field2.field3.set(Some(hash1));
// Pre-#4983, this would overwrite the contents of test.field2.field3, because of a
// base key collision.
test.save().await?;
// Let's reload the view.
let test = TestType2::load(context).await?;
let stored_hash = test.field2.field3.get();
// Assert that the data has not been overwritten.
assert_eq!(stored_hash, &Some(hash1));
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/tests/random_container_tests.rs | linera-views/tests/random_container_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::{BTreeMap, BTreeSet};
use anyhow::Result;
use linera_views::{
bucket_queue_view::HashedBucketQueueView,
collection_view::{CollectionView, HashedCollectionView},
context::{Context, MemoryContext},
key_value_store_view::{KeyValueStoreView, SizeData},
map_view::{HashedByteMapView, MapView},
queue_view::HashedQueueView,
random::make_deterministic_rng,
reentrant_collection_view::{HashedReentrantCollectionView, ReentrantCollectionView},
register_view::RegisterView,
views::{CryptoHashRootView, CryptoHashView, HashableView as _, RootView, View},
};
use rand::{distributions::Uniform, Rng, RngCore};
#[derive(CryptoHashRootView)]
struct CollectionStateView<C> {
pub v: HashedCollectionView<C, u8, RegisterView<C, u32>>,
}
impl<C> CollectionStateView<C>
where
C: Context,
{
async fn key_values(&self) -> BTreeMap<u8, u32> {
let mut map = BTreeMap::new();
let keys = self.v.indices().await.unwrap();
for key in keys {
let subview = self.v.try_load_entry(&key).await.unwrap().unwrap();
let value = subview.get();
map.insert(key, *value);
}
map
}
}
#[tokio::test]
async fn classic_collection_view_check() -> Result<()> {
let context = MemoryContext::new_for_testing(());
let mut rng = make_deterministic_rng();
let mut map = BTreeMap::<u8, u32>::new();
let n = 20;
let nmax: u8 = 25;
for _ in 0..n {
let mut view = CollectionStateView::load(context.clone()).await?;
let hash = view.crypto_hash_mut().await?;
let save = rng.gen::<bool>();
//
let count_oper = rng.gen_range(0..25);
let mut new_map = map.clone();
for _ in 0..count_oper {
let choice = rng.gen_range(0..6);
if choice == 0 {
// deleting random stuff
let pos = rng.gen_range(0..nmax);
view.v.remove_entry(&pos)?;
new_map.remove(&pos);
}
if choice == 1 {
// changing some random entries
let n_ins = rng.gen_range(0..5);
for _i in 0..n_ins {
let pos = rng.gen_range(0..nmax);
let value = rng.gen::<u32>();
let subview = view.v.load_entry_mut(&pos).await?;
*subview.get_mut() = value;
new_map.insert(pos, value);
}
}
if choice == 2 {
// The load_entry actually changes the entries to default if missing
let n_load = rng.gen_range(0..5);
for _i in 0..n_load {
let pos = rng.gen_range(0..nmax);
let _subview = view.v.load_entry_mut(&pos).await?;
new_map.entry(pos).or_insert(0);
}
}
if choice == 3 {
// The load_entry actually changes the entries to default if missing
let n_reset = rng.gen_range(0..5);
for _i in 0..n_reset {
let pos = rng.gen_range(0..nmax);
view.v.reset_entry_to_default(&pos)?;
new_map.insert(pos, 0);
}
}
if choice == 4 {
// Doing the clearing
view.clear();
new_map.clear();
}
if choice == 5 {
// Doing the rollback
view.rollback();
assert!(!view.has_pending_changes().await);
new_map = map.clone();
}
// Checking the hash
let new_hash = view.crypto_hash_mut().await?;
if map == new_map {
assert_eq!(new_hash, hash);
} else {
assert_ne!(new_hash, hash);
}
// Checking the behavior of "try_load_entry"
for _ in 0..10 {
let pos = rng.gen::<u8>();
let test_view = view.v.try_load_entry(&pos).await?.is_some();
let test_map = new_map.contains_key(&pos);
assert_eq!(test_view, test_map);
}
// Checking the keys
let key_values = view.key_values().await;
assert_eq!(key_values, new_map);
}
if save {
if map != new_map {
assert!(view.has_pending_changes().await);
}
map = new_map.clone();
view.save().await?;
assert!(!view.has_pending_changes().await);
}
}
Ok(())
}
#[derive(CryptoHashRootView)]
pub struct KeyValueStateView<C> {
pub store: KeyValueStoreView<C>,
}
fn remove_by_prefix<V>(map: &mut BTreeMap<Vec<u8>, V>, key_prefix: Vec<u8>) {
map.retain(|key, _| !key.starts_with(&key_prefix));
}
fn total_size(vec: &Vec<(Vec<u8>, Vec<u8>)>) -> SizeData {
let mut total_key_size = 0;
let mut total_value_size = 0;
for (key, value) in vec {
total_key_size += key.len();
total_value_size += value.len();
}
SizeData {
key: total_key_size as u32,
value: total_value_size as u32,
}
}
#[tokio::test]
async fn key_value_store_view_mutability() -> Result<()> {
let context = MemoryContext::new_for_testing(());
let mut rng = make_deterministic_rng();
let mut state_map = BTreeMap::new();
let n = 40;
let mut all_keys = BTreeSet::new();
for _ in 0..n {
let mut view = KeyValueStateView::load(context.clone()).await?;
let save = rng.gen::<bool>();
let read_state = view.store.index_values().await?;
let state_vec = state_map.clone().into_iter().collect::<Vec<_>>();
assert!(read_state.iter().map(|kv| (&kv.0, &kv.1)).eq(&state_map));
assert_eq!(total_size(&state_vec), view.store.total_size());
let count_oper = rng.gen_range(0..15);
let mut new_state_map = state_map.clone();
let mut new_state_vec = state_vec.clone();
for _ in 0..count_oper {
let choice = rng.gen_range(0..5);
let entry_count = view.store.count().await?;
if choice == 0 {
// inserting random stuff
let n_ins = rng.gen_range(0..10);
for _ in 0..n_ins {
let len = rng.gen_range(1..6);
let key = (&mut rng)
.sample_iter(Uniform::from(0..4))
.take(len)
.collect::<Vec<_>>();
all_keys.insert(key.clone());
let value = Vec::new();
view.store.insert(key.clone(), value.clone()).await?;
new_state_map.insert(key, value);
new_state_vec = new_state_map.clone().into_iter().collect();
let new_key_values = view.store.index_values().await?;
assert_eq!(new_state_vec, new_key_values);
assert_eq!(total_size(&new_state_vec), view.store.total_size());
}
}
if choice == 1 && entry_count > 0 {
// deleting some entries
let n_remove = rng.gen_range(0..entry_count);
for _ in 0..n_remove {
let pos = rng.gen_range(0..entry_count);
let (key, _) = new_state_vec[pos].clone();
new_state_map.remove(&key);
view.store.remove(key).await?;
}
}
if choice == 2 && entry_count > 0 {
// deleting a prefix
let val = rng.gen_range(0..5) as u8;
let key_prefix = vec![val];
view.store.remove_by_prefix(key_prefix.clone()).await?;
remove_by_prefix(&mut new_state_map, key_prefix);
}
if choice == 3 {
// Doing the clearing
view.clear();
new_state_map.clear();
}
if choice == 4 {
// Doing the rollback
view.rollback();
assert!(!view.has_pending_changes().await);
new_state_map = state_map.clone();
}
new_state_vec = new_state_map.clone().into_iter().collect();
let new_key_values = view.store.index_values().await?;
assert_eq!(new_state_vec, new_key_values);
assert_eq!(total_size(&new_state_vec), view.store.total_size());
let all_keys_vec = all_keys.clone().into_iter().collect::<Vec<_>>();
let tests_multi_get = view.store.multi_get(&all_keys_vec).await?;
for (i, key) in all_keys.clone().into_iter().enumerate() {
let test_map = new_state_map.contains_key(&key);
let test_view = view.store.get(&key).await?.is_some();
let test_multi_get = tests_multi_get[i].is_some();
assert_eq!(test_map, test_view);
assert_eq!(test_map, test_multi_get);
}
}
if save {
if state_map != new_state_map {
assert!(view.has_pending_changes().await);
}
state_map = new_state_map.clone();
view.save().await?;
assert!(!view.has_pending_changes().await);
}
}
Ok(())
}
#[derive(CryptoHashRootView)]
pub struct ByteMapStateView<C> {
pub map: HashedByteMapView<C, u8>,
}
async fn run_map_view_mutability<R: RngCore + Clone>(rng: &mut R) -> Result<()> {
let context = MemoryContext::new_for_testing(());
let mut state_map = BTreeMap::new();
let mut all_keys = BTreeSet::new();
let n = 10;
for _ in 0..n {
let mut view = ByteMapStateView::load(context.clone()).await?;
let save = rng.gen::<bool>();
let read_state = view.map.key_values().await?;
let read_hash = view.crypto_hash_mut().await?;
let state_vec = state_map.clone().into_iter().collect::<Vec<_>>();
assert_eq!(state_vec, read_state);
//
let count_oper = rng.gen_range(0..25);
let mut new_state_map = state_map.clone();
let mut new_state_vec = state_vec.clone();
for _ in 0..count_oper {
let choice = rng.gen_range(0..7);
let count = view.map.count().await?;
if choice == 0 {
// inserting random stuff
let n_ins = rng.gen_range(0..10);
for _ in 0..n_ins {
let len = rng.gen_range(1..6);
let key = rng
.clone()
.sample_iter(Uniform::from(0..4))
.take(len)
.collect::<Vec<_>>();
all_keys.insert(key.clone());
let value = rng.gen::<u8>();
view.map.insert(key.clone(), value);
new_state_map.insert(key, value);
}
}
if choice == 1 && count > 0 {
// deleting some entries
let n_remove = rng.gen_range(0..count);
for _ in 0..n_remove {
let pos = rng.gen_range(0..count);
let vec = new_state_vec[pos].clone();
view.map.remove(vec.0.clone());
new_state_map.remove(&vec.0);
}
}
if choice == 2 && count > 0 {
// deleting a prefix
let val = rng.gen_range(0..5) as u8;
let key_prefix = vec![val];
view.map.remove_by_prefix(key_prefix.clone());
remove_by_prefix(&mut new_state_map, key_prefix);
}
if choice == 3 {
// Doing the clearing
view.clear();
new_state_map.clear();
}
if choice == 4 {
// Doing the rollback
view.rollback();
assert!(!view.has_pending_changes().await);
new_state_map = state_map.clone();
}
if choice == 5 && count > 0 {
let pos = rng.gen_range(0..count);
let vec = new_state_vec[pos].clone();
let key = vec.0;
let result = view.map.get_mut(&key).await?.unwrap();
let new_value = rng.gen::<u8>();
*result = new_value;
new_state_map.insert(key, new_value);
}
if choice == 6 && count > 0 {
let choice = rng.gen_range(0..count);
let key = match choice {
0 => {
// Scenario 1 of using existing key
let pos = rng.gen_range(0..count);
let vec = new_state_vec[pos].clone();
vec.0
}
_ => {
let len = rng.gen_range(1..6);
rng.clone()
.sample_iter(Uniform::from(0..4))
.take(len)
.collect::<Vec<_>>()
}
};
let test_view = view.map.contains_key(&key).await?;
let test_map = new_state_map.contains_key(&key);
assert_eq!(test_view, test_map);
let result = view.map.get_mut_or_default(&key).await?;
let new_value = rng.gen::<u8>();
*result = new_value;
new_state_map.insert(key, new_value);
}
new_state_vec = new_state_map.clone().into_iter().collect();
let new_hash = view.crypto_hash_mut().await?;
if state_vec == new_state_vec {
assert_eq!(new_hash, read_hash);
} else {
// Hash equality is a bug or a hash collision (unlikely)
assert_ne!(new_hash, read_hash);
}
let new_key_values = view.map.key_values().await?;
assert_eq!(new_state_vec, new_key_values);
for u in 0..4 {
let part_state_vec = new_state_vec
.iter()
.filter(|&x| x.0[0] == u)
.cloned()
.collect::<Vec<_>>();
let part_key_values = view.map.key_values_by_prefix(vec![u]).await?;
assert_eq!(part_state_vec, part_key_values);
}
let keys_vec = all_keys.iter().cloned().collect::<Vec<_>>();
let values = view.map.multi_get(keys_vec.clone()).await?;
for i in 0..keys_vec.len() {
let key = &keys_vec[i];
let test_map = new_state_map.contains_key(key);
let test_view1 = view.map.get(key).await?.is_some();
let test_view2 = view.map.contains_key(key).await?;
assert_eq!(test_map, test_view1);
assert_eq!(test_map, test_view2);
assert_eq!(test_map, values[i].is_some());
}
}
if save {
if state_map != new_state_map {
assert!(view.has_pending_changes().await);
}
state_map = new_state_map.clone();
view.save().await?;
assert!(!view.has_pending_changes().await);
}
}
Ok(())
}
#[tokio::test]
async fn map_view_mutability() -> Result<()> {
let mut rng = make_deterministic_rng();
for _ in 0..5 {
run_map_view_mutability(&mut rng).await?;
}
Ok(())
}
#[derive(CryptoHashRootView)]
pub struct BucketQueueStateView<C> {
pub queue: HashedBucketQueueView<C, u8, 5>,
}
#[tokio::test]
async fn bucket_queue_view_mutability_check() -> Result<()> {
let context = MemoryContext::new_for_testing(());
let mut rng = make_deterministic_rng();
let mut vector = Vec::new();
let n = 200;
for _ in 0..n {
let mut view = BucketQueueStateView::load(context.clone()).await?;
let hash = view.crypto_hash_mut().await?;
let save = rng.gen::<bool>();
let elements = view.queue.elements().await?;
assert_eq!(elements, vector);
let count_oper = rng.gen_range(0..25);
let mut new_vector = vector.clone();
for _ in 0..count_oper {
let choice = rng.gen_range(0..6);
let count = view.queue.count();
if choice == 0 {
// inserting random stuff
let n_ins = rng.gen_range(0..100);
for _ in 0..n_ins {
let val = rng.gen::<u8>();
view.queue.push_back(val);
new_vector.push(val);
}
}
if choice == 1 {
// deleting some entries
let n_remove = rng.gen_range(0..=count);
for _ in 0..n_remove {
view.queue.delete_front().await?;
// slow but we do not care for tests.
new_vector.remove(0);
}
}
if choice == 2 && count > 0 {
// changing some random entries
let pos = rng.gen_range(0..count);
let val = rng.gen::<u8>();
let mut iter = view.queue.iter_mut().await?;
(for _ in 0..pos {
iter.next();
});
if let Some(value) = iter.next() {
*value = val;
}
if let Some(value) = new_vector.get_mut(pos) {
*value = val;
}
}
if choice == 3 {
// Doing the clearing
view.clear();
new_vector.clear();
}
if choice == 4 {
// Doing the rollback
view.rollback();
assert!(!view.has_pending_changes().await);
new_vector.clone_from(&vector);
}
let new_elements = view.queue.elements().await?;
let new_hash = view.crypto_hash_mut().await?;
if elements == new_elements {
assert_eq!(new_hash, hash);
} else {
// If equal it is a bug or a hash collision (unlikely)
assert_ne!(new_hash, hash);
}
assert_eq!(new_elements, new_vector);
let front1 = view.queue.front();
let front2 = new_vector.first();
assert_eq!(front1, front2);
let back1 = view.queue.back().await?;
let back2 = new_vector.last().copied();
assert_eq!(back1, back2);
for _ in 0..3 {
let count = rng.gen_range(0..=new_vector.len());
let vec1 = view.queue.read_front(count).await?;
let vec2 = new_vector[..count].to_vec();
assert_eq!(vec1, vec2);
let vec1 = view.queue.read_back(count).await?;
let start = new_vector.len() - count;
let vec2 = new_vector[start..].to_vec();
assert_eq!(vec1, vec2);
}
}
if save {
if vector != new_vector {
assert!(view.has_pending_changes().await);
}
vector.clone_from(&new_vector);
view.save().await?;
let new_elements = view.queue.elements().await?;
assert_eq!(new_elements, new_vector);
assert!(!view.has_pending_changes().await);
}
}
Ok(())
}
#[derive(CryptoHashRootView)]
pub struct NestedCollectionMapView<C> {
pub map1: CollectionView<C, String, MapView<C, String, u64>>,
pub map2: ReentrantCollectionView<C, String, MapView<C, String, u64>>,
}
impl<C: Context> NestedCollectionMapView<C> {
async fn read_maps_nested_collection_map_view(
&self,
) -> Result<BTreeMap<String, BTreeMap<String, u64>>> {
let indices1 = self.map1.indices().await?;
let indices2 = self.map2.indices().await?;
assert_eq!(indices1, indices2, "Different set of indices");
let subviews1 = self.map1.try_load_entries(&indices1).await?;
let subviews2 = self.map2.try_load_entries(&indices1).await?;
let mut state_map = BTreeMap::new();
for ((subview1, subview2), index) in subviews1.into_iter().zip(subviews2).zip(indices1) {
let key_values1 = subview1.unwrap().index_values().await?;
let key_values2 = subview2.unwrap().index_values().await?;
assert_eq!(key_values1, key_values2, "key-values should be equal");
let key_values = key_values1.into_iter().collect::<BTreeMap<String, u64>>();
state_map.insert(index, key_values);
}
let key_subviews1 = self.map1.try_load_all_entries().await?;
let key_subviews2 = self.map2.try_load_all_entries().await?;
for ((key_subview1, key_subview2), index) in
key_subviews1.into_iter().zip(key_subviews2).zip(indices2)
{
let (index1, subview1) = key_subview1;
let (index2, subview2) = key_subview2;
assert_eq!(index1, index, "index1 should be coherent");
assert_eq!(index2, index, "index1 should be coherent");
let key_values1 = subview1.index_values().await?;
let key_values2 = subview2.index_values().await?;
assert_eq!(key_values1, key_values2, "key-values should be equal");
}
Ok(state_map)
}
}
#[tokio::test]
async fn nested_collection_map_view_check() -> Result<()> {
let context = MemoryContext::new_for_testing(());
let mut rng = make_deterministic_rng();
let mut state_map: BTreeMap<String, BTreeMap<String, u64>> = BTreeMap::new();
let n = 20;
for _ in 0..n {
let mut view = NestedCollectionMapView::load(context.clone()).await?;
let hash = view.crypto_hash_mut().await?;
let save = rng.gen::<bool>();
let count_oper = rng.gen_range(0..25);
let mut new_state_map = state_map.clone();
for _ in 0..count_oper {
let keys: Vec<String> = new_state_map.keys().cloned().collect::<Vec<_>>();
let count = new_state_map.len();
let choice = rng.gen_range(0..5);
if choice >= 2 {
let key1 = rng.gen_range::<u8, _>(0..10);
let key1 = format!("key1_{key1}");
let key2 = rng.gen_range::<u8, _>(0..10);
let key2 = format!("key2_{key2}");
let value = rng.gen_range::<u64, _>(0..100);
// insert into maps.
let subview1 = view.map1.load_entry_mut(&key1).await?;
subview1.insert(&key2, value)?;
let mut subview2 = view.map2.try_load_entry_mut(&key1).await?;
subview2.insert(&key2, value)?;
// insert into control
let mut map = new_state_map.get(&key1).cloned().unwrap_or_default();
map.insert(key2, value);
new_state_map.insert(key1, map);
}
if choice == 1 && count > 0 {
let pos = rng.gen_range(0..count) as usize;
let key = keys[pos].clone();
view.map1.remove_entry(&key)?;
view.map2.remove_entry(&key)?;
new_state_map.remove(&key);
}
if choice == 2 && count > 0 {
let pos = rng.gen_range(0..count);
let key1 = keys[pos].clone();
let submap = new_state_map.get_mut(&key1).unwrap();
let count = submap.len();
if count > 0 {
let subkeys = submap
.iter()
.map(|(key, _)| key.clone())
.collect::<Vec<_>>();
let pos = rng.gen_range(0..count);
let key2 = subkeys[pos].clone();
submap.remove(&key2);
// Removing some entries from the view
let subview1 = view.map1.load_entry_mut(&key1).await?;
subview1.remove(&key2)?;
let mut subview2 = view.map2.try_load_entry_mut(&key1).await?;
subview2.remove(&key2)?;
}
}
let state_view = view.read_maps_nested_collection_map_view().await?;
assert_eq!(
state_view, new_state_map,
"state_view should match new_state_map"
);
let new_hash = view.crypto_hash_mut().await?;
if state_map == new_state_map {
assert_eq!(new_hash, hash);
} else {
// If equal it is a bug or a hash collision (unlikely)
assert_ne!(new_hash, hash);
}
let hash1 = view.map1.hash().await?;
let hash2 = view.map2.hash().await?;
assert_eq!(
hash1, hash2,
"hash for CollectionView / ReentrantCollectionView should match"
);
}
if save {
if state_map != new_state_map {
assert!(view.has_pending_changes().await);
}
state_map.clone_from(&new_state_map);
view.save().await?;
assert!(!view.has_pending_changes().await);
}
}
Ok(())
}
#[derive(CryptoHashRootView)]
pub struct QueueStateView<C> {
pub queue: HashedQueueView<C, u8>,
}
#[tokio::test]
async fn queue_view_mutability_check() -> Result<()> {
let context = MemoryContext::new_for_testing(());
let mut rng = make_deterministic_rng();
let mut vector = Vec::new();
let n = 20;
for _ in 0..n {
let mut view = QueueStateView::load(context.clone()).await?;
let hash = view.crypto_hash_mut().await?;
let save = rng.gen::<bool>();
let elements = view.queue.elements().await?;
assert_eq!(elements, vector);
//
let count_oper = rng.gen_range(0..25);
let mut new_vector = vector.clone();
for _ in 0..count_oper {
let choice = rng.gen_range(0..5);
let count = view.queue.count();
if choice == 0 {
// inserting random stuff
let n_ins = rng.gen_range(0..10);
for _ in 0..n_ins {
let val = rng.gen::<u8>();
view.queue.push_back(val);
new_vector.push(val);
}
}
if choice == 1 {
// deleting some entries
let n_remove = rng.gen_range(0..=count);
for _ in 0..n_remove {
view.queue.delete_front();
// slow but we do not care for tests.
new_vector.remove(0);
}
}
if choice == 2 && count > 0 {
// changing some random entries
let pos = rng.gen_range(0..count);
let val = rng.gen::<u8>();
let mut iter = view.queue.iter_mut().await?;
(for _ in 0..pos {
iter.next();
});
if let Some(value) = iter.next() {
*value = val;
}
if let Some(value) = new_vector.get_mut(pos) {
*value = val;
}
}
if choice == 3 {
// Doing the clearing
view.clear();
new_vector.clear();
}
if choice == 4 {
// Doing the rollback
view.rollback();
assert!(!view.has_pending_changes().await);
new_vector.clone_from(&vector);
}
let front1 = view.queue.front().await?;
let front2 = new_vector.first().copied();
assert_eq!(front1, front2);
let new_elements = view.queue.elements().await?;
let new_hash = view.crypto_hash_mut().await?;
if elements == new_elements {
assert_eq!(new_hash, hash);
} else {
// If equal it is a bug or a hash collision (unlikely)
assert_ne!(new_hash, hash);
}
assert_eq!(new_elements, new_vector);
}
if save {
if vector != new_vector {
assert!(view.has_pending_changes().await);
}
vector.clone_from(&new_vector);
view.save().await?;
assert!(!view.has_pending_changes().await);
}
}
Ok(())
}
#[derive(CryptoHashRootView)]
struct ReentrantCollectionStateView<C> {
pub v: HashedReentrantCollectionView<C, u8, RegisterView<C, u32>>,
}
impl<C> ReentrantCollectionStateView<C>
where
C: Context,
{
async fn key_values(&self) -> Result<BTreeMap<u8, u32>> {
let mut map = BTreeMap::new();
let keys = self.v.indices().await?;
for key in keys {
let subview = self.v.try_load_entry(&key).await?.unwrap();
let value = subview.get();
map.insert(key, *value);
}
Ok(map)
}
}
#[tokio::test]
async fn reentrant_collection_view_check() -> Result<()> {
let context = MemoryContext::new_for_testing(());
let mut rng = make_deterministic_rng();
let mut map = BTreeMap::<u8, u32>::new();
let n = 20;
let nmax: u8 = 25;
for _ in 0..n {
let mut view = ReentrantCollectionStateView::load(context.clone()).await?;
let hash = view.crypto_hash_mut().await?;
let key_values = view.key_values().await?;
assert_eq!(key_values, map);
//
let save = rng.gen::<bool>();
let count_oper = rng.gen_range(0..25);
let mut new_map = map.clone();
for _i_op in 0..count_oper {
let choice = rng.gen_range(0..8);
if choice == 0 {
// Deleting some random stuff
let pos = rng.gen_range(0..nmax);
view.v.remove_entry(&pos)?;
new_map.remove(&pos);
}
if choice == 1 {
// Getting an array of reference
let mut indices = Vec::new();
let mut set_indices = BTreeSet::new();
let mut values = Vec::new();
let n_ins = rng.gen_range(0..5);
for _i in 0..n_ins {
let pos = rng.gen_range(0..nmax);
indices.push(pos);
set_indices.insert(pos);
let value = rng.gen::<u32>();
values.push(value);
}
// Only if all indices are distinct can the query be acceptable
if set_indices.len() == n_ins {
let mut subviews = view.v.try_load_entries_mut(&indices).await?;
for i in 0..n_ins {
let index = indices[i];
let value = values[i];
*subviews[i].get_mut() = value;
new_map.insert(index, value);
}
}
}
if choice == 2 {
// Changing some random entries
let n_ins = rng.gen_range(0..5);
for _i in 0..n_ins {
let pos = rng.gen_range(0..nmax);
let value = rng.gen::<u32>();
let mut subview = view.v.try_load_entry_mut(&pos).await?;
*subview.get_mut() = value;
new_map.insert(pos, value);
}
}
if choice == 3 {
// Loading some random entries and setting to 0 if missing
let n_ins = rng.gen_range(0..5);
for _i in 0..n_ins {
let pos = rng.gen_range(0..nmax);
let test_view = view.v.contains_key(&pos).await?;
let test_map = new_map.contains_key(&pos);
assert_eq!(test_view, test_map);
let _subview = view.v.try_load_entry_mut(&pos).await?;
new_map.entry(pos).or_insert(0);
}
}
if choice == 4 {
// Loading some random entries and checking correctness
let n_ins = rng.gen_range(0..5);
for _i_ins in 0..n_ins {
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/tests/admin_tests.rs | linera-views/tests/admin_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::marker::PhantomData;
#[cfg(with_dynamodb)]
use linera_views::dynamo_db::DynamoDbDatabase;
#[cfg(with_rocksdb)]
use linera_views::rocks_db::RocksDbDatabase;
#[cfg(with_scylladb)]
use linera_views::scylla_db::ScyllaDbDatabase;
use linera_views::{
memory::MemoryDatabase,
store::{KeyValueStore, TestKeyValueDatabase},
test_utils::{namespace_admin_test, root_key_admin_test},
};
use test_case::test_case;
#[test_case(PhantomData::<MemoryDatabase>; "MemoryDatabase")]
#[cfg_attr(with_rocksdb, test_case(PhantomData::<RocksDbDatabase>; "RocksDbDatabase"))]
#[cfg_attr(with_dynamodb, test_case(PhantomData::<DynamoDbDatabase>; "DynamoDbDatabase"))]
#[cfg_attr(with_scylladb, test_case(PhantomData::<ScyllaDbDatabase>; "ScyllaDbDatabase"))]
#[tokio::test]
async fn namespace_admin_test_cases<K: TestKeyValueDatabase>(_view_type: PhantomData<K>)
where
K::Store: KeyValueStore,
{
namespace_admin_test::<K>().await;
}
#[test_case(PhantomData::<MemoryDatabase>; "MemoryDatabase")]
#[cfg_attr(with_rocksdb, test_case(PhantomData::<RocksDbDatabase>; "RocksDbDatabase"))]
#[cfg_attr(with_dynamodb, test_case(PhantomData::<DynamoDbDatabase>; "DynamoDbDatabase"))]
#[cfg_attr(with_scylladb, test_case(PhantomData::<ScyllaDbDatabase>; "ScyllaDbDatabase"))]
#[tokio::test]
async fn root_key_admin_test_cases<K: TestKeyValueDatabase>(_view_type: PhantomData<K>)
where
K::Store: KeyValueStore,
{
root_key_admin_test::<K>().await;
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/benches/reentrant_collection_view.rs | linera-views/benches/reentrant_collection_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use linera_base::time::{Duration, Instant};
use linera_views::{
batch::Batch,
context::{Context, MemoryContext},
reentrant_collection_view::ReentrantCollectionView,
register_view::RegisterView,
store::WritableKeyValueStore as _,
views::View,
};
use serde::{Deserialize, Serialize};
use tokio::runtime::Runtime;
/// Benchmarks the [`ReentrantCollectionView::try_load_all_entries`] against the manual
/// pattern, when the collection has all of its entries staged in memory.
fn bench_load_all_entries_already_in_memory(criterion: &mut Criterion) {
criterion.bench_function(
"load_all_entries_already_in_memory_with_method",
|bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let view = create_populated_reentrant_collection_view().await;
let measurement = Instant::now();
let entries = view
.try_load_all_entries()
.await
.expect("Failed to load entries from `ReentrantCollectionView`");
for (index, entry) in entries {
black_box(index);
black_box(entry);
}
total_time += measurement.elapsed();
}
total_time
})
},
);
criterion.bench_function("load_all_entries_already_in_memory_manually", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let view = create_populated_reentrant_collection_view().await;
let measurement = Instant::now();
let indices = view
.indices()
.await
.expect("Failed to load all indices from `ReentrantCollectionView`");
let entries = view
.try_load_entries(&indices)
.await
.expect("Failed to load entries from `ReentrantCollectionView`");
for (index, entry) in indices.into_iter().zip(entries) {
if let Some(entry) = entry {
black_box(index);
black_box(entry);
}
}
total_time += measurement.elapsed();
}
total_time
})
});
}
/// Benchmarks the [`ReentrantCollectionView::try_load_all_entries`] against the manual
/// pattern, when the collection has none of its entries staged in memory.
fn bench_load_all_entries_from_storage(criterion: &mut Criterion) {
criterion.bench_function("load_all_entries_from_storage_with_method", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let view = create_and_store_populated_reentrant_collection_view().await;
let measurement = Instant::now();
let entries = view
.try_load_all_entries()
.await
.expect("Failed to load entries from `ReentrantCollectionView`");
for (index, entry) in entries {
black_box(index);
black_box(entry);
}
total_time += measurement.elapsed();
}
total_time
})
});
criterion.bench_function("load_all_entries_from_storage_manually", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let view = create_and_store_populated_reentrant_collection_view().await;
let measurement = Instant::now();
let indices = view
.indices()
.await
.expect("Failed to load all indices from `ReentrantCollectionView`");
let entries = view
.try_load_entries(&indices)
.await
.expect("Failed to load entries from `ReentrantCollectionView`");
for (index, entry) in indices.into_iter().zip(entries) {
if let Some(entry) = entry {
black_box(index);
black_box(entry);
}
}
total_time += measurement.elapsed();
}
total_time
})
});
}
/// A helper type that simulates an index type that has a non-trivial cost to
/// serialize/deserialize.
#[derive(Clone, Debug, Deserialize, Serialize)]
enum ComplexIndex {
UselessVariant,
NestedVariant(Box<ComplexIndex>),
Leaf(String),
}
/// Creates a populated [`ReentrantCollectionView`] with its contents still staged in memory.
async fn create_populated_reentrant_collection_view(
) -> ReentrantCollectionView<MemoryContext<()>, ComplexIndex, RegisterView<MemoryContext<()>, String>>
{
let context = MemoryContext::new_for_testing(());
let mut view: ReentrantCollectionView<_, ComplexIndex, RegisterView<_, String>> =
ReentrantCollectionView::load(context)
.await
.expect("Failed to create `ReentrantCollectionView`");
let greek_alphabet = [
("alpha", "α"),
("beta", "β"),
("gamma", "γ"),
("delta", "δ"),
("epsilon", "ε"),
("zeta", "ζ"),
("eta", "η"),
("theta", "θ"),
("iota", "ι"),
("kappa", "κ"),
("lambda", "λ"),
("mu", "μ"),
("nu", "ν"),
("xi", "ξ"),
("omicron", "ο"),
("pi", "π"),
("rho", "ρ"),
("sigma", "σ"),
("tau", "τ"),
("upsilon", "υ"),
("phi", "φ"),
("chi", "χ"),
("psi", "ψ"),
("omega", "ω"),
];
for (name, letter) in greek_alphabet {
let index = ComplexIndex::NestedVariant(Box::new(ComplexIndex::Leaf(name.to_owned())));
view.try_load_entry_mut(&index)
.await
.expect("Failed to create entry in `ReentrantCollectionView`")
.set(letter.to_owned());
}
view
}
/// Creates a populated [`ReentrantCollectionView`] with its contents completely flushed to
/// the storage.
async fn create_and_store_populated_reentrant_collection_view(
) -> ReentrantCollectionView<MemoryContext<()>, ComplexIndex, RegisterView<MemoryContext<()>, String>>
{
let mut view = create_populated_reentrant_collection_view().await;
let context = view.context().clone();
let mut batch = Batch::new();
view.pre_save(&mut batch)
.expect("Failed to flush populated `ReentrantCollectionView`'s contents");
context
.store()
.write_batch(batch)
.await
.expect("Failed to store populated `ReentrantCollectionView`'s contents");
view.post_save();
ReentrantCollectionView::load(context)
.await
.expect("Failed to create second `ReentrantCollectionView`")
}
criterion_group!(
benches,
bench_load_all_entries_already_in_memory,
bench_load_all_entries_from_storage
);
criterion_main!(benches);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/benches/queue_view.rs | linera-views/benches/queue_view.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use linera_base::time::{Duration, Instant};
#[cfg(with_dynamodb)]
use linera_views::dynamo_db::DynamoDbDatabase;
#[cfg(with_rocksdb)]
use linera_views::rocks_db::RocksDbDatabase;
#[cfg(with_scylladb)]
use linera_views::scylla_db::ScyllaDbDatabase;
use linera_views::{
bucket_queue_view::BucketQueueView,
context::ViewContext,
memory::MemoryDatabase,
queue_view::QueueView,
random::{make_deterministic_rng, DeterministicRng},
store::{ReadableKeyValueStore, TestKeyValueDatabase, WritableKeyValueStore},
views::{CryptoHashRootView, RootView, View},
};
use rand::Rng;
use tokio::runtime::Runtime;
/// The number of operations
const N_OPERATIONS: usize = 1000;
enum Operations {
Save,
DeleteFront,
PushBack(u8),
}
fn generate_test_case(n_operation: usize, rng: &mut DeterministicRng) -> Vec<Operations> {
let mut operations = Vec::new();
let mut total_length = 0;
for _ in 0..n_operation {
let choice = rng.gen_range(0..10);
if choice == 0 {
operations.push(Operations::Save);
} else if choice < 3 && total_length > 0 {
operations.push(Operations::DeleteFront);
total_length -= 1;
} else {
let val = rng.gen::<u8>();
operations.push(Operations::PushBack(val));
total_length += 1;
}
}
operations
}
#[derive(CryptoHashRootView)]
pub struct QueueStateView<C> {
pub queue: QueueView<C, u8>,
}
pub async fn performance_queue_view<D: TestKeyValueDatabase + Clone + 'static>(
iterations: u64,
) -> Duration
where
D::Store: ReadableKeyValueStore + WritableKeyValueStore + Clone + 'static,
{
let database = D::connect_test_namespace().await.unwrap();
let store = database.open_shared(&[]).unwrap();
let context = ViewContext::<(), D::Store>::create_root_context(store, ())
.await
.unwrap();
let mut total_time = Duration::ZERO;
let mut rng = make_deterministic_rng();
for _ in 0..iterations {
let operations = generate_test_case(N_OPERATIONS, &mut rng);
let mut view = QueueStateView::load(context.clone()).await.unwrap();
let measurement = Instant::now();
for operation in operations {
match operation {
Operations::Save => {
view.save().await.unwrap();
}
Operations::DeleteFront => {
view.queue.delete_front();
}
Operations::PushBack(val) => {
view.queue.push_back(val);
}
}
black_box(view.queue.front().await.unwrap());
}
view.clear();
view.save().await.unwrap();
total_time += measurement.elapsed();
}
total_time
}
fn bench_queue_view(criterion: &mut Criterion) {
criterion.bench_function("memory_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance_queue_view::<MemoryDatabase>(iterations).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("rocksdb_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance_queue_view::<RocksDbDatabase>(iterations).await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("dynamodb_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance_queue_view::<DynamoDbDatabase>(iterations).await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("scylladb_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance_queue_view::<ScyllaDbDatabase>(iterations).await
})
});
}
#[derive(CryptoHashRootView)]
pub struct BucketQueueStateView<C> {
pub queue: BucketQueueView<C, u8, 100>,
}
pub async fn performance_bucket_queue_view<D: TestKeyValueDatabase + Clone + 'static>(
iterations: u64,
) -> Duration
where
D::Store: ReadableKeyValueStore + WritableKeyValueStore + Clone + 'static,
{
let database = D::connect_test_namespace().await.unwrap();
let store = database.open_shared(&[]).unwrap();
let context = ViewContext::<(), D::Store>::create_root_context(store, ())
.await
.unwrap();
let mut total_time = Duration::ZERO;
let mut rng = make_deterministic_rng();
for _ in 0..iterations {
let operations = generate_test_case(N_OPERATIONS, &mut rng);
let mut view = BucketQueueStateView::load(context.clone()).await.unwrap();
//
let measurement = Instant::now();
for operation in operations {
match operation {
Operations::Save => {
view.save().await.unwrap();
}
Operations::DeleteFront => {
view.queue.delete_front().await.unwrap();
}
Operations::PushBack(val) => {
view.queue.push_back(val);
}
}
black_box(view.queue.front());
}
view.clear();
view.save().await.unwrap();
total_time += measurement.elapsed();
}
total_time
}
fn bench_bucket_queue_view(criterion: &mut Criterion) {
criterion.bench_function("memory_bucket_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance_bucket_queue_view::<MemoryDatabase>(iterations).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("rocksdb_bucket_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance_bucket_queue_view::<RocksDbDatabase>(iterations).await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("dynamodb_bucket_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance_bucket_queue_view::<DynamoDbDatabase>(iterations).await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("scylladb_bucket_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance_bucket_queue_view::<ScyllaDbDatabase>(iterations).await
})
});
}
/// Benchmark that specifically tests the overhead of saving a queue after adding one element,
/// when the queue already contains many elements. This highlights the metadata overhead
/// of `BucketQueueView` vs `QueueView`.
const INITIAL_QUEUE_SIZE: usize = 10000;
const INCREMENTAL_ADDS: usize = 100;
pub async fn incremental_save_queue_view<D: TestKeyValueDatabase + Clone + 'static>(
iterations: u64,
) -> Duration
where
D::Store: ReadableKeyValueStore + WritableKeyValueStore + Clone + 'static,
{
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let database = D::connect_test_namespace().await.unwrap();
let store = database.open_shared(&[]).unwrap();
let context = ViewContext::<(), D::Store>::create_root_context(store, ())
.await
.unwrap();
// First, populate the queue with INITIAL_QUEUE_SIZE elements.
let mut view = QueueStateView::load(context.clone()).await.unwrap();
for i in 0..INITIAL_QUEUE_SIZE {
view.queue.push_back((i % 256) as u8);
}
view.save().await.unwrap();
// Now measure adding INCREMENTAL_ADDS elements one by one, saving after each.
let measurement = Instant::now();
for i in 0..INCREMENTAL_ADDS {
view.queue.push_back((i % 256) as u8);
view.save().await.unwrap();
}
total_time += measurement.elapsed();
view.clear();
view.save().await.unwrap();
}
total_time
}
pub async fn incremental_save_bucket_queue_view<D: TestKeyValueDatabase + Clone + 'static>(
iterations: u64,
) -> Duration
where
D::Store: ReadableKeyValueStore + WritableKeyValueStore + Clone + 'static,
{
let mut total_time = Duration::ZERO;
for _ in 0..iterations {
let database = D::connect_test_namespace().await.unwrap();
let store = database.open_shared(&[]).unwrap();
let context = ViewContext::<(), D::Store>::create_root_context(store, ())
.await
.unwrap();
// First, populate the queue with INITIAL_QUEUE_SIZE elements.
let mut view = BucketQueueStateView::load(context.clone()).await.unwrap();
for i in 0..INITIAL_QUEUE_SIZE {
view.queue.push_back((i % 256) as u8);
}
view.save().await.unwrap();
// Now measure adding INCREMENTAL_ADDS elements one by one, saving after each.
let measurement = Instant::now();
for i in 0..INCREMENTAL_ADDS {
view.queue.push_back((i % 256) as u8);
view.save().await.unwrap();
}
total_time += measurement.elapsed();
view.clear();
view.save().await.unwrap();
}
total_time
}
fn bench_incremental_save(criterion: &mut Criterion) {
criterion.bench_function("memory_incremental_save_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
incremental_save_queue_view::<MemoryDatabase>(iterations).await
})
});
criterion.bench_function("memory_incremental_save_bucket_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
incremental_save_bucket_queue_view::<MemoryDatabase>(iterations).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("rocksdb_incremental_save_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
incremental_save_queue_view::<RocksDbDatabase>(iterations).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("rocksdb_incremental_save_bucket_queue_view", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
incremental_save_bucket_queue_view::<RocksDbDatabase>(iterations).await
})
});
}
criterion_group!(
benches,
bench_queue_view,
bench_bucket_queue_view,
bench_incremental_save
);
criterion_main!(benches);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views/benches/stores.rs | linera-views/benches/stores.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use criterion::{black_box, criterion_group, criterion_main, Criterion};
#[cfg(with_dynamodb)]
use linera_views::dynamo_db::DynamoDbDatabase;
#[cfg(with_rocksdb)]
use linera_views::rocks_db::RocksDbDatabase;
#[cfg(with_scylladb)]
use linera_views::scylla_db::ScyllaDbDatabase;
use linera_views::{memory::MemoryDatabase, test_utils::performance};
use tokio::runtime::Runtime;
fn bench_contains_key(criterion: &mut Criterion) {
criterion.bench_function("store_memory_contains_key", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::contains_key::<MemoryDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("store_rocksdb_contains_key", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::contains_key::<RocksDbDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("store_dynamodb_contains_key", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::contains_key::<DynamoDbDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("store_scylladb_contains_key", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::contains_key::<ScyllaDbDatabase, _>(iterations, black_box).await
})
});
}
fn bench_contains_keys(criterion: &mut Criterion) {
criterion.bench_function("store_memory_contains_keys", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::contains_keys::<MemoryDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("store_rocksdb_contains_keys", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::contains_keys::<RocksDbDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("store_dynamodb_contains_keys", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::contains_keys::<DynamoDbDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("store_scylladb_contains_keys", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::contains_keys::<ScyllaDbDatabase, _>(iterations, black_box).await
})
});
}
fn bench_find_keys_by_prefix(criterion: &mut Criterion) {
criterion.bench_function("store_memory_find_keys_by_prefix", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::find_keys_by_prefix::<MemoryDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("store_rocksdb_find_keys_by_prefix", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::find_keys_by_prefix::<RocksDbDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("store_dynamodb_find_keys_by_prefix", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::find_keys_by_prefix::<DynamoDbDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("store_scylladb_find_keys_by_prefix", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::find_keys_by_prefix::<ScyllaDbDatabase, _>(iterations, black_box).await
})
});
}
fn bench_find_key_values_by_prefix(criterion: &mut Criterion) {
criterion.bench_function("store_memory_find_key_values_by_prefix", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::find_key_values_by_prefix::<MemoryDatabase, _>(iterations, black_box)
.await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("store_rocksdb_find_key_values_by_prefix", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::find_key_values_by_prefix::<RocksDbDatabase, _>(iterations, black_box)
.await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("store_dynamodb_find_key_values_by_prefix", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::find_key_values_by_prefix::<DynamoDbDatabase, _>(iterations, black_box)
.await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("store_scylladb_find_key_values_by_prefix", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::find_key_values_by_prefix::<ScyllaDbDatabase, _>(iterations, black_box)
.await
})
});
}
fn bench_read_value_bytes(criterion: &mut Criterion) {
criterion.bench_function("store_memory_read_value_bytes", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::read_value_bytes::<MemoryDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("store_rocksdb_read_value_bytes", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::read_value_bytes::<RocksDbDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("store_dynamodb_read_value_bytes", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::read_value_bytes::<DynamoDbDatabase, _>(iterations, black_box).await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("store_scylladb_read_value_bytes", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::read_value_bytes::<ScyllaDbDatabase, _>(iterations, black_box).await
})
});
}
fn bench_read_multi_values_bytes(criterion: &mut Criterion) {
criterion.bench_function("store_memory_read_multi_values_bytes", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::read_multi_values_bytes::<MemoryDatabase, _>(iterations, black_box)
.await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("store_rocksdb_read_multi_values_bytes", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::read_multi_values_bytes::<RocksDbDatabase, _>(iterations, black_box)
.await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("store_dynamodb_read_multi_values_bytes", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::read_multi_values_bytes::<DynamoDbDatabase, _>(iterations, black_box)
.await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("store_scylladb_read_multi_values_bytes", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::read_multi_values_bytes::<ScyllaDbDatabase, _>(iterations, black_box)
.await
})
});
}
fn bench_write_batch(criterion: &mut Criterion) {
criterion.bench_function("store_memory_write_batch", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::write_batch::<MemoryDatabase>(iterations).await
})
});
#[cfg(with_rocksdb)]
criterion.bench_function("store_rocksdb_write_batch", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::write_batch::<RocksDbDatabase>(iterations).await
})
});
#[cfg(with_dynamodb)]
criterion.bench_function("store_dynamodb_write_batch", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::write_batch::<DynamoDbDatabase>(iterations).await
})
});
#[cfg(with_scylladb)]
criterion.bench_function("store_scylladb_write_batch", |bencher| {
bencher
.to_async(Runtime::new().expect("Failed to create Tokio runtime"))
.iter_custom(|iterations| async move {
performance::write_batch::<ScyllaDbDatabase>(iterations).await
})
});
}
criterion_group!(
benches,
bench_contains_key,
bench_contains_keys,
bench_find_keys_by_prefix,
bench_find_key_values_by_prefix,
bench_read_value_bytes,
bench_read_multi_values_bytes,
bench_write_batch
);
criterion_main!(benches);
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/build.rs | linera-service/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() -> Result<(), Box<dyn std::error::Error>> {
cfg_aliases::cfg_aliases! {
with_revm: { feature = "revm" },
with_testing: { any(test, feature = "test") },
with_metrics: { all(not(target_arch = "wasm32"), feature = "metrics") },
};
tonic_prost_build::compile_protos("src/exporter/proto/indexer.proto")?;
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/config.rs | linera-service/src/config.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{fmt, net::SocketAddr};
use linera_rpc::config::{ExporterServiceConfig, TlsConfig};
use serde::{
de::{Error, MapAccess, Visitor},
Deserialize, Deserializer, Serialize, Serializer,
};
/// The configuration file for the linera-exporter.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct BlockExporterConfig {
/// Identity for the block exporter state.
pub id: u32,
/// The server configuration for the linera-exporter.
pub service_config: ExporterServiceConfig,
/// The configuration file for the export destinations.
#[serde(default)]
pub destination_config: DestinationConfig,
/// The configuration file to impose various limits
/// on the resources used by the linera-exporter.
#[serde(default)]
pub limits: LimitsConfig,
/// The address to expose the `/metrics` endpoint on.
pub metrics_port: u16,
}
impl BlockExporterConfig {
/// Returns the address to expose the `/metrics` endpoint on.
pub fn metrics_address(&self) -> SocketAddr {
SocketAddr::from(([0, 0, 0, 0], self.metrics_port))
}
}
/// Configuration file for the exports.
#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)]
pub struct DestinationConfig {
/// The destination URIs to export to.
pub destinations: Vec<Destination>,
/// Export blocks to the current committee.
#[serde(default)]
pub committee_destination: bool,
}
// Each destination has an ID and a configuration.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)]
pub struct DestinationId {
address: String,
kind: DestinationKind,
}
impl DestinationId {
/// Creates a new destination ID from the address and kind.
pub fn new(address: String, kind: DestinationKind) -> Self {
Self { address, kind }
}
pub fn validator(address: String) -> Self {
Self {
address,
kind: DestinationKind::Validator,
}
}
/// Returns the address of the destination.
pub fn address(&self) -> &str {
&self.address
}
/// Returns the kind of the destination.
pub fn kind(&self) -> DestinationKind {
self.kind
}
}
/// The uri to provide export services to.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Destination {
Indexer {
/// The gRPC network protocol.
tls: TlsConfig,
/// The host name of the target destination (IP or hostname).
endpoint: String,
/// The port number of the target destination.
port: u16,
},
Validator {
/// The host name of the target destination (IP or hostname).
endpoint: String,
/// The port number of the target destination.
port: u16,
},
Logging {
/// The host name of the target destination (IP or hostname).
file_name: String,
},
}
/// The description for the gRPC based destination.
/// Discriminates the export mode and the client to use.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Copy, Hash)]
pub enum DestinationKind {
/// The indexer description.
Indexer,
/// The validator description.
Validator,
/// The logging target.
Logging,
}
impl Serialize for Destination {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
use serde::ser::SerializeMap;
match self {
Destination::Indexer {
tls,
endpoint,
port,
} => {
let mut map = serializer.serialize_map(Some(4))?;
map.serialize_entry("kind", "Indexer")?;
map.serialize_entry("tls", tls)?;
map.serialize_entry("endpoint", endpoint)?;
map.serialize_entry("port", port)?;
map.end()
}
Destination::Validator { endpoint, port } => {
let mut map = serializer.serialize_map(Some(3))?;
map.serialize_entry("kind", "Validator")?;
map.serialize_entry("endpoint", endpoint)?;
map.serialize_entry("port", port)?;
map.end()
}
Destination::Logging { file_name } => {
let mut map = serializer.serialize_map(Some(2))?;
map.serialize_entry("kind", "Logging")?;
map.serialize_entry("file_name", file_name)?;
map.end()
}
}
}
}
struct DestinationVisitor;
impl<'de> Visitor<'de> for DestinationVisitor {
type Value = Destination;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a map with a 'kind' field")
}
fn visit_map<V>(self, mut map: V) -> Result<Destination, V::Error>
where
V: MapAccess<'de>,
{
let mut kind: Option<String> = None;
let mut tls: Option<TlsConfig> = None;
let mut endpoint: Option<String> = None;
let mut port: Option<u16> = None;
let mut file_name: Option<String> = None;
while let Some(key) = map.next_key::<String>()? {
match key.as_str() {
"kind" => {
if kind.is_some() {
return Err(V::Error::duplicate_field("kind"));
}
kind = Some(map.next_value()?);
}
"tls" => {
if tls.is_some() {
return Err(V::Error::duplicate_field("tls"));
}
tls = Some(map.next_value()?);
}
"endpoint" => {
if endpoint.is_some() {
return Err(V::Error::duplicate_field("endpoint"));
}
endpoint = Some(map.next_value()?);
}
"port" => {
if port.is_some() {
return Err(V::Error::duplicate_field("port"));
}
port = Some(map.next_value()?);
}
"file_name" => {
if file_name.is_some() {
return Err(V::Error::duplicate_field("file_name"));
}
file_name = Some(map.next_value()?);
}
_ => {
// Ignore unknown fields
let _: serde::de::IgnoredAny = map.next_value()?;
}
}
}
let kind = kind.ok_or_else(|| V::Error::missing_field("kind"))?;
match kind.as_str() {
"Indexer" => {
let tls = tls.ok_or_else(|| V::Error::missing_field("tls"))?;
let endpoint = endpoint.ok_or_else(|| V::Error::missing_field("endpoint"))?;
let port = port.ok_or_else(|| V::Error::missing_field("port"))?;
Ok(Destination::Indexer {
tls,
endpoint,
port,
})
}
"Validator" => {
let endpoint = endpoint.ok_or_else(|| V::Error::missing_field("endpoint"))?;
let port = port.ok_or_else(|| V::Error::missing_field("port"))?;
Ok(Destination::Validator { endpoint, port })
}
"Logging" => {
let file_name = file_name.ok_or_else(|| V::Error::missing_field("file_name"))?;
Ok(Destination::Logging { file_name })
}
_ => Err(V::Error::unknown_variant(
&kind,
&["Indexer", "Validator", "Logging"],
)),
}
}
}
impl<'de> Deserialize<'de> for Destination {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(DestinationVisitor)
}
}
/// The configuration file to impose various limits
/// on the resources used by the linera-exporter.
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub struct LimitsConfig {
/// Time period in milliseconds between periodic persistence
/// to the shared storage.
pub persistence_period_ms: u32,
/// Maximum size of the work queue i.e. maximum number
/// of blocks queued up for exports per destination.
pub work_queue_size: u16,
/// Maximum weight of the blob cache in megabytes.
pub blob_cache_weight_mb: u16,
/// Estimated number of elements for the blob cache.
pub blob_cache_items_capacity: u16,
/// Maximum weight of the block cache in megabytes.
pub block_cache_weight_mb: u16,
/// Estimated number of elements for the block cache.
pub block_cache_items_capacity: u16,
/// Maximum weight in megabytes for the combined
/// cache, consisting of small miscellaneous items.
pub auxiliary_cache_size_mb: u16,
}
impl Default for LimitsConfig {
fn default() -> Self {
Self {
persistence_period_ms: 299 * 1000,
work_queue_size: 256,
blob_cache_weight_mb: 1024,
blob_cache_items_capacity: 8192,
block_cache_weight_mb: 1024,
block_cache_items_capacity: 8192,
auxiliary_cache_size_mb: 1024,
}
}
}
impl Destination {
pub fn address(&self) -> String {
match &self {
Destination::Indexer {
tls,
endpoint,
port,
} => {
let tls = match tls {
TlsConfig::ClearText => "http",
TlsConfig::Tls => "https",
};
format!("{}://{}:{}", tls, endpoint, port)
}
Destination::Validator { endpoint, port } => {
format!("{}:{}:{}", "grpc", endpoint, port)
}
Destination::Logging { file_name } => file_name.to_string(),
}
}
pub fn id(&self) -> DestinationId {
let kind = match self {
Destination::Indexer { .. } => DestinationKind::Indexer,
Destination::Validator { .. } => DestinationKind::Validator,
Destination::Logging { .. } => DestinationKind::Logging,
};
DestinationId {
address: self.address(),
kind,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_from_str() {
let input = r#"
tls = "ClearText"
endpoint = "127.0.0.1"
port = 8080
kind = "Indexer"
"#
.to_string();
let destination: Destination = toml::from_str(&input).unwrap();
assert_eq!(
destination,
Destination::Indexer {
tls: TlsConfig::ClearText,
endpoint: "127.0.0.1".to_owned(),
port: 8080,
}
);
let input = r#"
endpoint = "127.0.0.1"
port = 8080
kind = "Validator"
"#
.to_string();
let destination: Destination = toml::from_str(&input).unwrap();
assert_eq!(
destination,
Destination::Validator {
endpoint: "127.0.0.1".to_owned(),
port: 8080,
}
);
let input = r#"
file_name = "export.log"
kind = "Logging"
"#
.to_string();
let destination: Destination = toml::from_str(&input).unwrap();
assert_eq!(
destination,
Destination::Logging {
file_name: "export.log".to_owned(),
}
);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/task_processor.rs | linera-service/src/task_processor.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Task processor for executing off-chain operators on behalf of on-chain applications.
//!
//! The task processor watches specified applications for requests to execute off-chain tasks,
//! runs external operator binaries, and submits the results back to the chain.
use std::{
cmp::Reverse,
collections::{BTreeMap, BTreeSet, BinaryHeap},
path::PathBuf,
sync::Arc,
};
use async_graphql::InputType as _;
use futures::{stream::StreamExt, FutureExt};
use linera_base::{
data_types::{TimeDelta, Timestamp},
identifiers::{ApplicationId, ChainId},
task_processor::{ProcessorActions, TaskOutcome},
};
use linera_core::{client::ChainClient, node::NotificationStream, worker::Reason};
use serde_json::json;
use tokio::{io::AsyncWriteExt, process::Command, select, sync::mpsc};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info};
use crate::controller::Update;
/// A map from operator names to their binary paths.
pub type OperatorMap = Arc<BTreeMap<String, PathBuf>>;
/// Parse an operator mapping in the format `name=path` or just `name`.
/// If only `name` is provided, the path defaults to the name itself.
pub fn parse_operator(s: &str) -> Result<(String, PathBuf), String> {
if let Some((name, path)) = s.split_once('=') {
Ok((name.to_string(), PathBuf::from(path)))
} else {
Ok((s.to_string(), PathBuf::from(s)))
}
}
type Deadline = Reverse<(Timestamp, Option<ApplicationId>)>;
/// A task processor that watches applications and executes off-chain operators.
pub struct TaskProcessor<Env: linera_core::Environment> {
chain_id: ChainId,
application_ids: Vec<ApplicationId>,
last_requested_callbacks: BTreeMap<ApplicationId, Timestamp>,
chain_client: ChainClient<Env>,
cancellation_token: CancellationToken,
notifications: NotificationStream,
outcome_sender: mpsc::UnboundedSender<(ApplicationId, TaskOutcome)>,
outcome_receiver: mpsc::UnboundedReceiver<(ApplicationId, TaskOutcome)>,
update_receiver: mpsc::UnboundedReceiver<Update>,
deadlines: BinaryHeap<Deadline>,
operators: OperatorMap,
}
impl<Env: linera_core::Environment> TaskProcessor<Env> {
/// Creates a new task processor.
pub fn new(
chain_id: ChainId,
application_ids: Vec<ApplicationId>,
chain_client: ChainClient<Env>,
cancellation_token: CancellationToken,
operators: OperatorMap,
update_receiver: Option<mpsc::UnboundedReceiver<Update>>,
) -> Self {
let notifications = chain_client.subscribe().expect("client subscription");
let (outcome_sender, outcome_receiver) = mpsc::unbounded_channel();
let update_receiver = update_receiver.unwrap_or_else(|| mpsc::unbounded_channel().1);
Self {
chain_id,
application_ids,
last_requested_callbacks: BTreeMap::new(),
chain_client,
cancellation_token,
outcome_sender,
outcome_receiver,
notifications,
deadlines: BinaryHeap::new(),
operators,
update_receiver,
}
}
/// Runs the task processor until the cancellation token is triggered.
pub async fn run(mut self) {
info!("Watching for notifications for chain {}", self.chain_id);
self.process_actions(self.application_ids.clone()).await;
loop {
select! {
Some(notification) = self.notifications.next() => {
if let Reason::NewBlock { .. } = notification.reason {
debug!(%self.chain_id, "Processing notification");
self.process_actions(self.application_ids.clone()).await;
}
}
_ = tokio::time::sleep(Self::duration_until_next_deadline(&self.deadlines)) => {
debug!("Processing event");
let application_ids = self.process_events();
self.process_actions(application_ids).await;
}
Some((application_id, outcome)) = self.outcome_receiver.recv() => {
if let Err(e) = self.submit_task_outcome(application_id, &outcome).await {
error!("Error while processing task outcome {outcome:?}: {e}");
}
}
Some(update) = self.update_receiver.recv() => {
self.apply_update(update).await;
}
_ = self.cancellation_token.cancelled().fuse() => {
break;
}
}
}
debug!("Notification stream ended.");
}
fn duration_until_next_deadline(deadlines: &BinaryHeap<Deadline>) -> tokio::time::Duration {
deadlines
.peek()
.map_or(tokio::time::Duration::MAX, |Reverse((x, _))| {
x.delta_since(Timestamp::now()).as_duration()
})
}
async fn apply_update(&mut self, update: Update) {
info!(
"Applying update for chain {}: {:?}",
self.chain_id, update.application_ids
);
let new_app_set: BTreeSet<_> = update.application_ids.iter().cloned().collect();
let old_app_set: BTreeSet<_> = self.application_ids.iter().cloned().collect();
// Retain only last_requested_callbacks for applications that are still active
self.last_requested_callbacks
.retain(|app_id, _| new_app_set.contains(app_id));
// Update the application_ids
self.application_ids = update.application_ids;
// Process actions for newly added applications
let new_apps: Vec<_> = self
.application_ids
.iter()
.filter(|app_id| !old_app_set.contains(app_id))
.cloned()
.collect();
if !new_apps.is_empty() {
self.process_actions(new_apps).await;
}
}
fn process_events(&mut self) -> Vec<ApplicationId> {
let now = Timestamp::now();
let mut application_ids = Vec::new();
while let Some(deadline) = self.deadlines.pop() {
if let Reverse((_, Some(id))) = deadline {
application_ids.push(id);
}
let Some(Reverse((ts, _))) = self.deadlines.peek() else {
break;
};
if *ts > now {
break;
}
}
application_ids
}
async fn process_actions(&mut self, application_ids: Vec<ApplicationId>) {
for application_id in application_ids {
debug!("Processing actions for {application_id}");
let now = Timestamp::now();
let last_requested_callback =
self.last_requested_callbacks.get(&application_id).cloned();
let actions = match self
.query_actions(application_id, last_requested_callback, now)
.await
{
Ok(actions) => actions,
Err(error) => {
error!("Error reading application actions: {error}");
// Retry in at most 1 minute.
self.deadlines.push(Reverse((
now.saturating_add(TimeDelta::from_secs(60)),
None,
)));
continue;
}
};
if let Some(timestamp) = actions.request_callback {
self.last_requested_callbacks.insert(application_id, now);
self.deadlines
.push(Reverse((timestamp, Some(application_id))));
}
for task in actions.execute_tasks {
let sender = self.outcome_sender.clone();
let operators = self.operators.clone();
tokio::spawn(async move {
if let Err(e) = Self::execute_task(
application_id,
task.operator,
task.input,
sender,
operators,
)
.await
{
error!("Error executing task for {application_id}: {e}");
}
});
}
}
}
async fn execute_task(
application_id: ApplicationId,
operator: String,
input: String,
sender: mpsc::UnboundedSender<(ApplicationId, TaskOutcome)>,
operators: OperatorMap,
) -> Result<(), anyhow::Error> {
let binary_path = operators
.get(&operator)
.ok_or_else(|| anyhow::anyhow!("unsupported operator: {}", operator))?;
debug!("Executing task {operator} ({binary_path:?}) for {application_id}");
let mut child = Command::new(binary_path)
.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::piped())
.spawn()?;
let mut stdin = child.stdin.take().expect("stdin should be configured");
stdin.write_all(input.as_bytes()).await?;
drop(stdin);
let output = child.wait_with_output().await?;
anyhow::ensure!(
output.status.success(),
"operator {} exited with status: {}",
operator,
output.status
);
let outcome = TaskOutcome {
operator,
output: String::from_utf8_lossy(&output.stdout).into(),
};
debug!("Done executing task for {application_id}");
sender.send((application_id, outcome))?;
Ok(())
}
async fn query_actions(
&mut self,
application_id: ApplicationId,
last_requested_callback: Option<Timestamp>,
now: Timestamp,
) -> Result<ProcessorActions, anyhow::Error> {
let query = format!(
"query {{ nextActions(lastRequestedCallback: {}, now: {}) }}",
last_requested_callback.to_value(),
now.to_value(),
);
let bytes = serde_json::to_vec(&json!({"query": query}))?;
let query = linera_execution::Query::User {
application_id,
bytes,
};
let linera_execution::QueryOutcome {
response,
operations: _,
} = self.chain_client.query_application(query, None).await?;
let linera_execution::QueryResponse::User(response) = response else {
anyhow::bail!("cannot get a system response for a user query");
};
let mut response: serde_json::Value = serde_json::from_slice(&response)?;
let actions: ProcessorActions =
serde_json::from_value(response["data"]["nextActions"].take())?;
Ok(actions)
}
async fn submit_task_outcome(
&mut self,
application_id: ApplicationId,
task_outcome: &TaskOutcome,
) -> Result<(), anyhow::Error> {
info!("Submitting task outcome for {application_id}: {task_outcome:?}");
let query = format!(
"query {{ processTaskOutcome(outcome: {{ operator: {}, output: {} }}) }}",
task_outcome.operator.to_value(),
task_outcome.output.to_value(),
);
let bytes = serde_json::to_vec(&json!({"query": query}))?;
let query = linera_execution::Query::User {
application_id,
bytes,
};
let linera_execution::QueryOutcome {
response: _,
operations,
} = self.chain_client.query_application(query, None).await?;
if !operations.is_empty() {
if let Err(e) = self
.chain_client
.execute_operations(operations, vec![])
.await
{
// TODO: handle leader timeouts.
error!("Failed to execute on-chain operations for {application_id}: {e}");
}
}
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/lib.rs | linera-service/src/lib.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module provides the executables needed to operate a Linera service, including a placeholder wallet acting as a GraphQL service for user interfaces.
pub mod cli;
pub mod cli_wrappers;
pub mod config;
pub mod controller;
pub mod node_service;
pub mod project;
pub mod storage;
pub mod task_processor;
pub mod tracing;
pub mod util;
pub mod wallet;
pub use wallet::Wallet;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/benchmark.rs | linera-service/src/benchmark.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{collections::BTreeMap, path::Path, time::Duration};
use anyhow::{bail, Context as _, Result};
use clap::Parser as _;
use futures::future::{join_all, try_join_all};
use linera_base::{
async_graphql::InputType,
data_types::Amount,
identifiers::{Account, AccountOwner, ApplicationId, ChainId},
time::Instant,
vm::VmRuntime,
};
use linera_sdk::abis::fungible::{FungibleTokenAbi, InitialState, Parameters};
use linera_service::cli_wrappers::{
local_net::{PathProvider, ProcessInbox},
ApplicationWrapper, ClientWrapper, Faucet, Network, OnClientDrop,
};
use port_selector::random_free_tcp_port;
use rand::{Rng as _, SeedableRng};
use serde_json::Value;
use tracing::info;
#[derive(clap::Parser)]
#[command(
name = "linera-benchmark",
version = linera_version::VersionInfo::default_clap_str(),
about = "Run benchmarks against a Linera network",
)]
enum Args {
Fungible {
/// The number of wallets in the test.
#[arg(long = "wallets", default_value = "4")]
wallets: usize,
/// The number of transactions being made per wallet.
#[arg(long = "transactions", default_value = "4")]
transactions: usize,
/// The faucet (which implicitly defines the network)
#[arg(long = "faucet")]
faucet: String,
/// The seed for the PRNG determining the pattern of transactions.
#[arg(long = "seed", default_value = "0")]
seed: u64,
#[arg(long = "uniform")]
/// If set, each chain receives the exact same number of transfers.
uniform: bool,
},
}
#[tokio::main]
async fn main() -> Result<()> {
linera_service::tracing::init("benchmark");
let args = Args::parse();
match args {
Args::Fungible {
wallets,
transactions,
faucet,
seed,
uniform,
} => {
benchmark_with_fungible(wallets, transactions, Faucet::new(faucet), seed, uniform).await
}
}
}
async fn benchmark_with_fungible(
num_wallets: usize,
num_transactions: usize,
faucet: Faucet,
seed: u64,
uniform: bool,
) -> Result<()> {
info!("Creating the clients and initializing the wallets");
let path_provider = PathProvider::create_temporary_directory().unwrap();
let publisher = ClientWrapper::new(
path_provider,
Network::Grpc,
None,
num_wallets,
OnClientDrop::CloseChains,
);
publisher.wallet_init(Some(&faucet)).await?;
publisher.request_chain(&faucet, true).await?;
let clients = (0..num_wallets)
.map(|n| {
let path_provider = PathProvider::create_temporary_directory().unwrap();
Ok(ClientWrapper::new(
path_provider,
Network::Grpc,
None,
n,
OnClientDrop::CloseChains,
))
})
.collect::<Result<Vec<_>, anyhow::Error>>()?;
try_join_all(clients.iter().map(|client| async {
client.wallet_init(Some(&faucet)).await?;
client.request_chain(&faucet, true).await
}))
.await?;
info!("Synchronizing balances (sanity check)");
try_join_all(clients.iter().map(|user| async move {
let chain = user.default_chain().context("missing default chain")?;
user.sync(chain).await?;
let balance = user.query_balance(Account::chain(chain)).await?;
info!("User {:?} has {}", user.get_owner(), balance);
Ok::<_, anyhow::Error>(())
}))
.await?;
info!("Starting the node services and subscribing to the publisher chain.");
let mut services = Vec::new();
for client in &clients {
let free_port = random_free_tcp_port().context("no free TCP port")?;
let node_service = client
.run_node_service(free_port, ProcessInbox::Automatic)
.await?;
services.push(node_service);
}
info!("Building the fungible application module.");
let path = Path::new("examples/fungible").canonicalize().context(
"`linera-benchmark` is meant to run from the root of the `linera-protocol` repository",
)?;
let (contract, service) = publisher.build_application(&path, "fungible", true).await?;
info!("Publishing the fungible application module.");
let module_id = publisher
.publish_module::<FungibleTokenAbi, Parameters, InitialState>(
contract,
service,
VmRuntime::Wasm,
None,
)
.await?;
struct BenchmarkContext {
application_id: ApplicationId<FungibleTokenAbi>,
owner: AccountOwner,
default_chain: ChainId,
}
info!("Creating the fungible applications");
let apps = try_join_all(clients.iter().zip(services).enumerate().map(
|(i, (client, node_service))| async move {
let owner = client.get_owner().context("missing owner")?;
let default_chain = client.default_chain().context("missing default chain")?;
let initial_state = InitialState {
accounts: BTreeMap::from([(owner, Amount::from_tokens(num_transactions as u128))]),
};
let parameters = Parameters::new(format!("FUN{}", i).leak());
let application_id = node_service
.create_application(&default_chain, &module_id, ¶meters, &initial_state, &[])
.await?;
let context = BenchmarkContext {
application_id,
owner,
default_chain,
};
let app = FungibleApp(
node_service.make_application(&context.default_chain, &context.application_id)?,
);
Ok::<_, anyhow::Error>((app, context, node_service))
},
))
.await?;
info!("Creating the transaction futures");
let mut expected_balances = vec![vec![Amount::ZERO; apps.len()]; apps.len()];
let mut rng = rand::rngs::SmallRng::seed_from_u64(seed);
let transaction_futures = (0..num_transactions).flat_map(|transaction_i| {
apps.iter()
.enumerate()
.map(|(sender_i, (sender_app, sender_context, _))| {
let receiver_i = if uniform {
(transaction_i + sender_i + 1) % apps.len()
} else {
rng.gen_range(0..apps.len())
};
let (_, receiver_context, _) = &apps[receiver_i];
expected_balances[receiver_i][sender_i]
.try_add_assign(Amount::ONE)
.unwrap();
sender_app.transfer(
sender_context.owner,
Amount::ONE,
Account {
chain_id: receiver_context.default_chain,
owner: receiver_context.owner,
},
)
})
.collect::<Vec<_>>()
});
info!("Making {} transactions", num_wallets * num_transactions);
let timer = Instant::now();
let results = join_all(transaction_futures).await;
let successes = results.into_iter().filter(Result::is_ok).count();
let tps: f64 = successes as f64 / timer.elapsed().as_secs_f64();
let failures = num_wallets * num_transactions - successes;
info!("Successes: {:?}", successes);
info!("Failures: {:?}", failures);
info!("TPS: {:.2}", tps);
println!(
"{{\
\"successes\": {successes},
\"failures\": {failures},
\"tps\": {tps}
}}"
);
try_join_all(apps.iter().zip(expected_balances).map(
|((_, context, node_service), expected_balances)| {
try_join_all(apps.iter().zip(expected_balances).map(
|((_, sender_context, _), expected_balance)| async move {
if expected_balance == Amount::ZERO {
return Ok(()); // No transfers: The app won't be registered on this chain.
}
node_service.process_inbox(&context.default_chain).await?;
let app = FungibleApp(node_service.make_application(
&context.default_chain,
&sender_context.application_id,
)?);
for i in 0.. {
linera_base::time::timer::sleep(Duration::from_secs(i)).await;
let actual_balance = app.get_amount(&context.owner).await;
if actual_balance == expected_balance {
break;
}
if i == 4 {
bail!(
"Expected balance: {}, actual balance: {}",
expected_balance,
actual_balance
);
}
}
assert_eq!(app.get_amount(&context.owner).await, expected_balance);
Ok(())
},
))
},
))
.await?;
Ok(())
}
struct FungibleApp(ApplicationWrapper<FungibleTokenAbi>);
impl FungibleApp {
async fn get_amount(&self, account_owner: &AccountOwner) -> Amount {
let query = format!(
"accounts {{ entry(key: {}) {{ value }} }}",
account_owner.to_value()
);
let response_body = self.0.query(&query).await.unwrap();
serde_json::from_value(response_body["accounts"]["entry"]["value"].clone())
.unwrap_or_default()
}
async fn transfer(
&self,
account_owner: AccountOwner,
amount_transfer: Amount,
destination: Account,
) -> Result<Value> {
let mutation = format!(
"transfer(owner: {}, amount: \"{}\", targetAccount: {})",
account_owner.to_value(),
amount_transfer,
destination.to_value(),
);
self.0.mutate(mutation).await
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/node_service.rs | linera-service/src/node_service.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{borrow::Cow, future::IntoFuture, iter, net::SocketAddr, num::NonZeroU16, sync::Arc};
use async_graphql::{
futures_util::Stream, resolver_utils::ContainerType, Error, MergedObject, OutputType,
ScalarType, Schema, SimpleObject, Subscription,
};
use async_graphql_axum::{GraphQLRequest, GraphQLResponse, GraphQLSubscription};
use axum::{extract::Path, http::StatusCode, response, response::IntoResponse, Extension, Router};
use futures::{lock::Mutex, Future, FutureExt as _, TryStreamExt as _};
use linera_base::{
crypto::{CryptoError, CryptoHash},
data_types::{
Amount, ApplicationDescription, ApplicationPermissions, Bytecode, Epoch, TimeDelta,
},
identifiers::{
Account, AccountOwner, ApplicationId, ChainId, IndexAndEvent, ModuleId, StreamId,
},
ownership::{ChainOwnership, TimeoutConfig},
vm::VmRuntime,
BcsHexParseError,
};
use linera_chain::{
types::{ConfirmedBlock, GenericCertificate},
ChainStateView,
};
use linera_client::chain_listener::{
ChainListener, ChainListenerConfig, ClientContext, ListenerCommand,
};
use linera_core::{
client::chain_client::{self, ChainClient},
data_types::ClientOutcome,
wallet::Wallet as _,
worker::Notification,
};
use linera_execution::{
committee::Committee, system::AdminOperation, Operation, Query, QueryOutcome, QueryResponse,
SystemOperation,
};
#[cfg(with_metrics)]
use linera_metrics::monitoring_server;
use linera_sdk::linera_base_types::BlobContent;
use serde::{Deserialize, Serialize};
use serde_json::json;
use tokio::sync::{mpsc::UnboundedReceiver, OwnedRwLockReadGuard};
use tokio_util::sync::CancellationToken;
use tower_http::cors::CorsLayer;
use tracing::{debug, error, info, instrument, trace};
use crate::util;
#[derive(SimpleObject, Serialize, Deserialize, Clone)]
pub struct Chains {
pub list: Vec<ChainId>,
pub default: Option<ChainId>,
}
/// Our root GraphQL query type.
pub struct QueryRoot<C> {
context: Arc<Mutex<C>>,
port: NonZeroU16,
default_chain: Option<ChainId>,
}
/// Our root GraphQL subscription type.
pub struct SubscriptionRoot<C> {
context: Arc<Mutex<C>>,
}
/// Our root GraphQL mutation type.
pub struct MutationRoot<C> {
context: Arc<Mutex<C>>,
}
#[derive(Debug, thiserror::Error)]
enum NodeServiceError {
#[error(transparent)]
ChainClient(#[from] chain_client::Error),
#[error(transparent)]
BcsHex(#[from] BcsHexParseError),
#[error(transparent)]
Json(#[from] serde_json::Error),
#[error("malformed chain ID: {0}")]
InvalidChainId(CryptoError),
#[error(transparent)]
Client(#[from] linera_client::Error),
}
impl IntoResponse for NodeServiceError {
fn into_response(self) -> response::Response {
let status = if let NodeServiceError::InvalidChainId(_) | NodeServiceError::BcsHex(_) = self
{
StatusCode::BAD_REQUEST
} else {
StatusCode::INTERNAL_SERVER_ERROR
};
let body = json!({"error": self.to_string()}).to_string();
(status, body).into_response()
}
}
#[Subscription]
impl<C> SubscriptionRoot<C>
where
C: ClientContext + 'static,
{
/// Subscribes to notifications from the specified chain.
async fn notifications(
&self,
chain_id: ChainId,
) -> Result<impl Stream<Item = Notification>, Error> {
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
Ok(client.subscribe()?)
}
}
impl<C> MutationRoot<C>
where
C: ClientContext,
{
async fn execute_system_operation(
&self,
system_operation: SystemOperation,
chain_id: ChainId,
) -> Result<CryptoHash, Error> {
let certificate = self
.apply_client_command(&chain_id, move |client| {
let operation = Operation::system(system_operation.clone());
async move {
let result = client
.execute_operation(operation)
.await
.map_err(Error::from);
(result, client)
}
})
.await?;
Ok(certificate.hash())
}
/// Applies the given function to the chain client.
/// Updates the wallet regardless of the outcome. As long as the function returns a round
/// timeout, it will wait and retry.
async fn apply_client_command<F, Fut, T>(
&self,
chain_id: &ChainId,
mut f: F,
) -> Result<T, Error>
where
F: FnMut(ChainClient<C::Environment>) -> Fut,
Fut: Future<Output = (Result<ClientOutcome<T>, Error>, ChainClient<C::Environment>)>,
{
loop {
let client = self
.context
.lock()
.await
.make_chain_client(*chain_id)
.await?;
let mut stream = client.subscribe()?;
let (result, client) = f(client).await;
self.context.lock().await.update_wallet(&client).await?;
let timeout = match result? {
ClientOutcome::Committed(t) => return Ok(t),
ClientOutcome::WaitForTimeout(timeout) => timeout,
};
drop(client);
util::wait_for_next_round(&mut stream, timeout).await;
}
}
}
#[async_graphql::Object(cache_control(no_cache))]
impl<C> MutationRoot<C>
where
C: ClientContext + 'static,
{
/// Processes the inbox and returns the lists of certificate hashes that were created, if any.
async fn process_inbox(
&self,
#[graphql(desc = "The chain whose inbox is being processed.")] chain_id: ChainId,
) -> Result<Vec<CryptoHash>, Error> {
let mut hashes = Vec::new();
loop {
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
let result = client.process_inbox().await;
self.context.lock().await.update_wallet(&client).await?;
let (certificates, maybe_timeout) = result?;
hashes.extend(certificates.into_iter().map(|cert| cert.hash()));
match maybe_timeout {
None => return Ok(hashes),
Some(timestamp) => {
let mut stream = client.subscribe()?;
drop(client);
util::wait_for_next_round(&mut stream, timestamp).await;
}
}
}
}
/// Synchronizes the chain with the validators. Returns the chain's length.
///
/// This is only used for testing, to make sure that a client is up to date.
// TODO(#4718): Remove this mutation.
async fn sync(
&self,
#[graphql(desc = "The chain being synchronized.")] chain_id: ChainId,
) -> Result<u64, Error> {
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
let info = client.synchronize_from_validators().await?;
self.context.lock().await.update_wallet(&client).await?;
Ok(info.next_block_height.0)
}
/// Retries the pending block that was unsuccessfully proposed earlier.
async fn retry_pending_block(
&self,
#[graphql(desc = "The chain on whose block is being retried.")] chain_id: ChainId,
) -> Result<Option<CryptoHash>, Error> {
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
let outcome = client.process_pending_block().await?;
self.context.lock().await.update_wallet(&client).await?;
match outcome {
ClientOutcome::Committed(Some(certificate)) => Ok(Some(certificate.hash())),
ClientOutcome::Committed(None) => Ok(None),
ClientOutcome::WaitForTimeout(timeout) => Err(Error::from(format!(
"Please try again at {}",
timeout.timestamp
))),
}
}
/// Transfers `amount` units of value from the given owner's account to the recipient.
/// If no owner is given, try to take the units out of the chain account.
async fn transfer(
&self,
#[graphql(desc = "The chain which native tokens are being transferred from.")]
chain_id: ChainId,
#[graphql(desc = "The account being debited on the chain.")] owner: AccountOwner,
#[graphql(desc = "The recipient of the transfer.")] recipient: Account,
#[graphql(desc = "The amount being transferred.")] amount: Amount,
) -> Result<CryptoHash, Error> {
self.apply_client_command(&chain_id, move |client| async move {
let result = client
.transfer(owner, amount, recipient)
.await
.map_err(Error::from)
.map(|outcome| outcome.map(|certificate| certificate.hash()));
(result, client)
})
.await
}
/// Claims `amount` units of value from the given owner's account in the remote
/// `target` chain. Depending on its configuration, the `target` chain may refuse to
/// process the message.
async fn claim(
&self,
#[graphql(desc = "The chain for whom owner is one of the owner.")] chain_id: ChainId,
#[graphql(desc = "The owner of chain targetId being debited.")] owner: AccountOwner,
#[graphql(desc = "The chain whose owner is being debited.")] target_id: ChainId,
#[graphql(desc = "The recipient of the transfer.")] recipient: Account,
#[graphql(desc = "The amount being transferred.")] amount: Amount,
) -> Result<CryptoHash, Error> {
self.apply_client_command(&chain_id, move |client| async move {
let result = client
.claim(owner, target_id, recipient, amount)
.await
.map_err(Error::from)
.map(|outcome| outcome.map(|certificate| certificate.hash()));
(result, client)
})
.await
}
/// Test if a data blob is readable from a transaction in the current chain.
// TODO(#2490): Consider removing or renaming this.
async fn read_data_blob(
&self,
chain_id: ChainId,
hash: CryptoHash,
) -> Result<CryptoHash, Error> {
self.apply_client_command(&chain_id, move |client| async move {
let result = client
.read_data_blob(hash)
.await
.map_err(Error::from)
.map(|outcome| outcome.map(|certificate| certificate.hash()));
(result, client)
})
.await
}
/// Creates a new single-owner chain.
async fn open_chain(
&self,
#[graphql(desc = "The chain paying for the creation of the new chain.")] chain_id: ChainId,
#[graphql(desc = "The owner of the new chain.")] owner: AccountOwner,
#[graphql(desc = "The balance of the chain being created. Zero if `None`.")]
balance: Option<Amount>,
) -> Result<ChainId, Error> {
let ownership = ChainOwnership::single(owner);
let balance = balance.unwrap_or(Amount::ZERO);
let description = self
.apply_client_command(&chain_id, move |client| {
let ownership = ownership.clone();
async move {
let result = client
.open_chain(ownership, ApplicationPermissions::default(), balance)
.await
.map_err(Error::from)
.map(|outcome| outcome.map(|(chain_id, _)| chain_id));
(result, client)
}
})
.await?;
Ok(description.id())
}
/// Creates a new multi-owner chain.
#[expect(clippy::too_many_arguments)]
async fn open_multi_owner_chain(
&self,
#[graphql(desc = "The chain paying for the creation of the new chain.")] chain_id: ChainId,
#[graphql(desc = "Permissions for applications on the new chain")]
application_permissions: Option<ApplicationPermissions>,
#[graphql(desc = "The owners of the chain")] owners: Vec<AccountOwner>,
#[graphql(desc = "The weights of the owners")] weights: Option<Vec<u64>>,
#[graphql(desc = "The number of multi-leader rounds")] multi_leader_rounds: Option<u32>,
#[graphql(desc = "The balance of the chain. Zero if `None`")] balance: Option<Amount>,
#[graphql(desc = "The duration of the fast round, in milliseconds; default: no timeout")]
fast_round_ms: Option<u64>,
#[graphql(
desc = "The duration of the first single-leader and all multi-leader rounds",
default = 10_000
)]
base_timeout_ms: u64,
#[graphql(
desc = "The number of milliseconds by which the timeout increases after each \
single-leader round",
default = 1_000
)]
timeout_increment_ms: u64,
#[graphql(
desc = "The age of an incoming tracked or protected message after which the \
validators start transitioning the chain to fallback mode, in milliseconds.",
default = 86_400_000
)]
fallback_duration_ms: u64,
) -> Result<ChainId, Error> {
let owners = if let Some(weights) = weights {
if weights.len() != owners.len() {
return Err(Error::new(format!(
"There are {} owners but {} weights.",
owners.len(),
weights.len()
)));
}
owners.into_iter().zip(weights).collect::<Vec<_>>()
} else {
owners
.into_iter()
.zip(iter::repeat(100))
.collect::<Vec<_>>()
};
let multi_leader_rounds = multi_leader_rounds.unwrap_or(u32::MAX);
let timeout_config = TimeoutConfig {
fast_round_duration: fast_round_ms.map(TimeDelta::from_millis),
base_timeout: TimeDelta::from_millis(base_timeout_ms),
timeout_increment: TimeDelta::from_millis(timeout_increment_ms),
fallback_duration: TimeDelta::from_millis(fallback_duration_ms),
};
let ownership = ChainOwnership::multiple(owners, multi_leader_rounds, timeout_config);
let balance = balance.unwrap_or(Amount::ZERO);
let description = self
.apply_client_command(&chain_id, move |client| {
let ownership = ownership.clone();
let application_permissions = application_permissions.clone().unwrap_or_default();
async move {
let result = client
.open_chain(ownership, application_permissions, balance)
.await
.map_err(Error::from)
.map(|outcome| outcome.map(|(chain_id, _)| chain_id));
(result, client)
}
})
.await?;
Ok(description.id())
}
/// Closes the chain. Returns the new block hash if successful or `None` if it was already closed.
async fn close_chain(
&self,
#[graphql(desc = "The chain being closed.")] chain_id: ChainId,
) -> Result<Option<CryptoHash>, Error> {
let maybe_cert = self
.apply_client_command(&chain_id, |client| async move {
let result = client.close_chain().await.map_err(Error::from);
(result, client)
})
.await?;
Ok(maybe_cert.as_ref().map(GenericCertificate::hash))
}
/// Changes the chain to a single-owner chain
async fn change_owner(
&self,
#[graphql(desc = "The chain whose ownership changes")] chain_id: ChainId,
#[graphql(desc = "The new single owner of the chain")] new_owner: AccountOwner,
) -> Result<CryptoHash, Error> {
let operation = SystemOperation::ChangeOwnership {
super_owners: vec![new_owner],
owners: Vec::new(),
first_leader: None,
multi_leader_rounds: 2,
open_multi_leader_rounds: false,
timeout_config: TimeoutConfig::default(),
};
self.execute_system_operation(operation, chain_id).await
}
/// Changes the ownership of the chain
#[expect(clippy::too_many_arguments)]
async fn change_multiple_owners(
&self,
#[graphql(desc = "The chain whose ownership changes")] chain_id: ChainId,
#[graphql(desc = "The new list of owners of the chain")] new_owners: Vec<AccountOwner>,
#[graphql(desc = "The new list of weights of the owners")] new_weights: Vec<u64>,
#[graphql(desc = "The multi-leader round of the chain")] multi_leader_rounds: u32,
#[graphql(
desc = "Whether multi-leader rounds are unrestricted, that is not limited to chain owners."
)]
open_multi_leader_rounds: bool,
#[graphql(desc = "The leader of the first single-leader round. \
If not set, this is random like other rounds.")]
first_leader: Option<AccountOwner>,
#[graphql(desc = "The duration of the fast round, in milliseconds; default: no timeout")]
fast_round_ms: Option<u64>,
#[graphql(
desc = "The duration of the first single-leader and all multi-leader rounds",
default = 10_000
)]
base_timeout_ms: u64,
#[graphql(
desc = "The number of milliseconds by which the timeout increases after each \
single-leader round",
default = 1_000
)]
timeout_increment_ms: u64,
#[graphql(
desc = "The age of an incoming tracked or protected message after which the \
validators start transitioning the chain to fallback mode, in milliseconds.",
default = 86_400_000
)]
fallback_duration_ms: u64,
) -> Result<CryptoHash, Error> {
let operation = SystemOperation::ChangeOwnership {
super_owners: Vec::new(),
owners: new_owners.into_iter().zip(new_weights).collect(),
first_leader,
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config: TimeoutConfig {
fast_round_duration: fast_round_ms.map(TimeDelta::from_millis),
base_timeout: TimeDelta::from_millis(base_timeout_ms),
timeout_increment: TimeDelta::from_millis(timeout_increment_ms),
fallback_duration: TimeDelta::from_millis(fallback_duration_ms),
},
};
self.execute_system_operation(operation, chain_id).await
}
/// Changes the application permissions configuration on this chain.
#[expect(clippy::too_many_arguments)]
async fn change_application_permissions(
&self,
#[graphql(desc = "The chain whose permissions are being changed")] chain_id: ChainId,
#[graphql(desc = "These applications are allowed to close the current chain.")]
close_chain: Vec<ApplicationId>,
#[graphql(
desc = "If this is `None`, all system operations and application operations are allowed.
If it is `Some`, only operations from the specified applications are allowed,
and no system operations."
)]
execute_operations: Option<Vec<ApplicationId>>,
#[graphql(
desc = "At least one operation or incoming message from each of these applications must occur in every block."
)]
mandatory_applications: Vec<ApplicationId>,
#[graphql(desc = "These applications are allowed to change the application permissions.")]
change_application_permissions: Vec<ApplicationId>,
#[graphql(
desc = "These applications are allowed to perform calls to services as oracles."
)]
call_service_as_oracle: Option<Vec<ApplicationId>>,
#[graphql(desc = "These applications are allowed to perform HTTP requests.")]
make_http_requests: Option<Vec<ApplicationId>>,
) -> Result<CryptoHash, Error> {
let operation = SystemOperation::ChangeApplicationPermissions(ApplicationPermissions {
execute_operations,
mandatory_applications,
close_chain,
change_application_permissions,
call_service_as_oracle,
make_http_requests,
});
self.execute_system_operation(operation, chain_id).await
}
/// (admin chain only) Registers a new committee. This will notify the subscribers of
/// the admin chain so that they can migrate to the new epoch (by accepting the
/// notification as an "incoming message" in a next block).
async fn create_committee(
&self,
chain_id: ChainId,
committee: Committee,
) -> Result<CryptoHash, Error> {
Ok(self
.apply_client_command(&chain_id, move |client| {
let committee = committee.clone();
async move {
let result = client
.stage_new_committee(committee)
.await
.map_err(Error::from);
(result, client)
}
})
.await?
.hash())
}
/// (admin chain only) Removes a committee. Once this message is accepted by a chain,
/// blocks from the retired epoch will not be accepted until they are followed (hence
/// re-certified) by a block certified by a recent committee.
async fn remove_committee(&self, chain_id: ChainId, epoch: Epoch) -> Result<CryptoHash, Error> {
let operation = SystemOperation::Admin(AdminOperation::RemoveCommittee { epoch });
self.execute_system_operation(operation, chain_id).await
}
/// Publishes a new application module.
async fn publish_module(
&self,
#[graphql(desc = "The chain publishing the module")] chain_id: ChainId,
#[graphql(desc = "The bytecode of the contract code")] contract: Bytecode,
#[graphql(desc = "The bytecode of the service code (only relevant for WebAssembly)")]
service: Bytecode,
#[graphql(desc = "The virtual machine being used (either Wasm or Evm)")]
vm_runtime: VmRuntime,
) -> Result<ModuleId, Error> {
self.apply_client_command(&chain_id, move |client| {
let contract = contract.clone();
let service = service.clone();
async move {
let result = client
.publish_module(contract, service, vm_runtime)
.await
.map_err(Error::from)
.map(|outcome| outcome.map(|(module_id, _)| module_id));
(result, client)
}
})
.await
}
/// Publishes a new data blob.
async fn publish_data_blob(
&self,
#[graphql(desc = "The chain paying for the blob publication")] chain_id: ChainId,
#[graphql(desc = "The content of the data blob being created")] bytes: Vec<u8>,
) -> Result<CryptoHash, Error> {
self.apply_client_command(&chain_id, |client| {
let bytes = bytes.clone();
async move {
let result = client.publish_data_blob(bytes).await.map_err(Error::from);
(result, client)
}
})
.await
.map(|_| CryptoHash::new(&BlobContent::new_data(bytes)))
}
/// Creates a new application.
async fn create_application(
&self,
#[graphql(desc = "The chain paying for the creation of the application")] chain_id: ChainId,
#[graphql(desc = "The module ID of the application being created")] module_id: ModuleId,
#[graphql(desc = "The JSON serialization of the parameters of the application")]
parameters: String,
#[graphql(
desc = "The JSON serialization of the instantiation argument of the application"
)]
instantiation_argument: String,
#[graphql(desc = "The dependencies of the application being created")]
required_application_ids: Vec<ApplicationId>,
) -> Result<ApplicationId, Error> {
self.apply_client_command(&chain_id, move |client| {
let parameters = parameters.as_bytes().to_vec();
let instantiation_argument = instantiation_argument.as_bytes().to_vec();
let required_application_ids = required_application_ids.clone();
async move {
let result = client
.create_application_untyped(
module_id,
parameters,
instantiation_argument,
required_application_ids,
)
.await
.map_err(Error::from)
.map(|outcome| outcome.map(|(application_id, _)| application_id));
(result, client)
}
})
.await
}
}
#[async_graphql::Object(cache_control(no_cache))]
impl<C> QueryRoot<C>
where
C: ClientContext + 'static,
{
async fn chain(
&self,
chain_id: ChainId,
) -> Result<
ChainStateExtendedView<<C::Environment as linera_core::Environment>::StorageContext>,
Error,
> {
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
let view = client.chain_state_view().await?;
Ok(ChainStateExtendedView::new(view))
}
async fn applications(&self, chain_id: ChainId) -> Result<Vec<ApplicationOverview>, Error> {
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
let applications = client
.chain_state_view()
.await?
.execution_state
.list_applications()
.await?;
let overviews = applications
.into_iter()
.map(|(id, description)| ApplicationOverview::new(id, description, self.port, chain_id))
.collect();
Ok(overviews)
}
async fn chains(&self) -> Result<Chains, Error> {
Ok(Chains {
list: self
.context
.lock()
.await
.wallet()
.chain_ids()
.try_collect()
.await?,
default: self.default_chain,
})
}
async fn block(
&self,
hash: Option<CryptoHash>,
chain_id: ChainId,
) -> Result<Option<ConfirmedBlock>, Error> {
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
let hash = match hash {
Some(hash) => Some(hash),
None => client.chain_info().await?.block_hash,
};
if let Some(hash) = hash {
let block = client.read_confirmed_block(hash).await?;
Ok(Some(block))
} else {
Ok(None)
}
}
async fn events_from_index(
&self,
chain_id: ChainId,
stream_id: StreamId,
start_index: u32,
) -> Result<Vec<IndexAndEvent>, Error> {
Ok(self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?
.events_from_index(stream_id, start_index)
.await?)
}
async fn blocks(
&self,
from: Option<CryptoHash>,
chain_id: ChainId,
limit: Option<u32>,
) -> Result<Vec<ConfirmedBlock>, Error> {
let client = self
.context
.lock()
.await
.make_chain_client(chain_id)
.await?;
let limit = limit.unwrap_or(10);
let from = match from {
Some(from) => Some(from),
None => client.chain_info().await?.block_hash,
};
let Some(from) = from else {
return Ok(vec![]);
};
let mut hash = Some(from);
let mut values = Vec::new();
for _ in 0..limit {
let Some(next_hash) = hash else {
break;
};
let value = client.read_confirmed_block(next_hash).await?;
hash = value.block().header.previous_block_hash;
values.push(value);
}
Ok(values)
}
/// Returns the version information on this node service.
async fn version(&self) -> linera_version::VersionInfo {
linera_version::VersionInfo::default()
}
}
// What follows is a hack to add a chain_id field to `ChainStateView` based on
// https://async-graphql.github.io/async-graphql/en/merging_objects.html
struct ChainStateViewExtension(ChainId);
#[async_graphql::Object(cache_control(no_cache))]
impl ChainStateViewExtension {
async fn chain_id(&self) -> ChainId {
self.0
}
}
#[derive(MergedObject)]
struct ChainStateExtendedView<C>(ChainStateViewExtension, ReadOnlyChainStateView<C>)
where
C: linera_views::context::Context + Clone + Send + Sync + 'static,
C::Extra: linera_execution::ExecutionRuntimeContext;
/// A wrapper type that allows proxying GraphQL queries to a [`ChainStateView`] that's behind an
/// [`OwnedRwLockReadGuard`].
pub struct ReadOnlyChainStateView<C>(OwnedRwLockReadGuard<ChainStateView<C>>)
where
C: linera_views::context::Context + Clone + Send + Sync + 'static;
impl<C> ContainerType for ReadOnlyChainStateView<C>
where
C: linera_views::context::Context + Clone + Send + Sync + 'static,
{
async fn resolve_field(
&self,
context: &async_graphql::Context<'_>,
) -> async_graphql::ServerResult<Option<async_graphql::Value>> {
self.0.resolve_field(context).await
}
}
impl<C> OutputType for ReadOnlyChainStateView<C>
where
C: linera_views::context::Context + Clone + Send + Sync + 'static,
{
fn type_name() -> Cow<'static, str> {
ChainStateView::<C>::type_name()
}
fn create_type_info(registry: &mut async_graphql::registry::Registry) -> String {
ChainStateView::<C>::create_type_info(registry)
}
async fn resolve(
&self,
context: &async_graphql::ContextSelectionSet<'_>,
field: &async_graphql::Positioned<async_graphql::parser::types::Field>,
) -> async_graphql::ServerResult<async_graphql::Value> {
self.0.resolve(context, field).await
}
}
impl<C> ChainStateExtendedView<C>
where
C: linera_views::context::Context + Clone + Send + Sync + 'static,
C::Extra: linera_execution::ExecutionRuntimeContext,
{
fn new(view: OwnedRwLockReadGuard<ChainStateView<C>>) -> Self {
Self(
ChainStateViewExtension(view.chain_id()),
ReadOnlyChainStateView(view),
)
}
}
#[derive(SimpleObject)]
pub struct ApplicationOverview {
id: ApplicationId,
description: ApplicationDescription,
link: String,
}
impl ApplicationOverview {
fn new(
id: ApplicationId,
description: ApplicationDescription,
port: NonZeroU16,
chain_id: ChainId,
) -> Self {
Self {
id,
description,
link: format!(
"http://localhost:{}/chains/{}/applications/{}",
port.get(),
chain_id,
id
),
}
}
}
/// The `NodeService` is a server that exposes a web-server to the client.
/// The node service is primarily used to explore the state of a chain in GraphQL.
pub struct NodeService<C>
where
C: ClientContext + 'static,
{
config: ChainListenerConfig,
port: NonZeroU16,
#[cfg(with_metrics)]
metrics_port: NonZeroU16,
default_chain: Option<ChainId>,
context: Arc<Mutex<C>>,
}
impl<C> Clone for NodeService<C>
where
C: ClientContext + 'static,
{
fn clone(&self) -> Self {
Self {
config: self.config.clone(),
port: self.port,
#[cfg(with_metrics)]
metrics_port: self.metrics_port,
default_chain: self.default_chain,
context: Arc::clone(&self.context),
}
}
}
impl<C> NodeService<C>
where
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-service/src/schema_export.rs | linera-service/src/schema_export.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::sync::Arc;
use futures::lock::Mutex;
use linera_base::{
crypto::CryptoHash,
data_types::{BlobContent, BlockHeight, Epoch, NetworkDescription, Timestamp},
identifiers::{AccountOwner, BlobId, ChainId},
};
use linera_chain::{
data_types::BlockProposal,
types::{
ConfirmedBlock, ConfirmedBlockCertificate, GenericCertificate, LiteCertificate, Timeout,
ValidatedBlock,
},
};
use linera_client::{
chain_listener::{ChainListenerConfig, ClientContext},
Error,
};
use linera_core::{
client::ChainClient,
data_types::{ChainInfoQuery, ChainInfoResponse},
node::{
CrossChainMessageDelivery, NodeError, NotificationStream, ValidatorNode,
ValidatorNodeProvider,
},
};
use linera_execution::committee::Committee;
use linera_sdk::linera_base_types::ValidatorPublicKey;
use linera_service::node_service::NodeService;
use linera_storage::DbStorage;
use linera_version::VersionInfo;
use linera_views::memory::MemoryDatabase;
#[derive(Clone)]
struct DummyValidatorNode;
impl ValidatorNode for DummyValidatorNode {
type NotificationStream = NotificationStream;
fn address(&self) -> String {
"dummy".to_string()
}
async fn handle_block_proposal(
&self,
_: BlockProposal,
) -> Result<ChainInfoResponse, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn handle_lite_certificate(
&self,
_: LiteCertificate<'_>,
_delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn handle_timeout_certificate(
&self,
_: GenericCertificate<Timeout>,
) -> Result<ChainInfoResponse, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn handle_confirmed_certificate(
&self,
_: GenericCertificate<ConfirmedBlock>,
_delivery: CrossChainMessageDelivery,
) -> Result<ChainInfoResponse, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn handle_validated_certificate(
&self,
_: GenericCertificate<ValidatedBlock>,
) -> Result<ChainInfoResponse, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn handle_chain_info_query(
&self,
_: ChainInfoQuery,
) -> Result<ChainInfoResponse, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn download_pending_blob(&self, _: ChainId, _: BlobId) -> Result<BlobContent, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn handle_pending_blob(
&self,
_: ChainId,
_: BlobContent,
) -> Result<ChainInfoResponse, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn subscribe(&self, _: Vec<ChainId>) -> Result<NotificationStream, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn get_version_info(&self) -> Result<VersionInfo, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn get_network_description(&self) -> Result<NetworkDescription, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn upload_blob(&self, _: BlobContent) -> Result<BlobId, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn download_blob(&self, _: BlobId) -> Result<BlobContent, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn download_certificate(
&self,
_: CryptoHash,
) -> Result<ConfirmedBlockCertificate, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn download_certificates(
&self,
_: Vec<CryptoHash>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn download_certificates_by_heights(
&self,
_: ChainId,
_: Vec<BlockHeight>,
) -> Result<Vec<ConfirmedBlockCertificate>, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn blob_last_used_by(&self, _: BlobId) -> Result<CryptoHash, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn blob_last_used_by_certificate(
&self,
_blob_id: BlobId,
) -> Result<ConfirmedBlockCertificate, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn missing_blob_ids(&self, _: Vec<BlobId>) -> Result<Vec<BlobId>, NodeError> {
Err(NodeError::UnexpectedMessage)
}
async fn get_shard_info(
&self,
_: ChainId,
) -> Result<linera_core::data_types::ShardInfo, NodeError> {
Err(NodeError::UnexpectedMessage)
}
}
struct DummyValidatorNodeProvider;
impl ValidatorNodeProvider for DummyValidatorNodeProvider {
type Node = DummyValidatorNode;
fn make_node(&self, _address: &str) -> Result<Self::Node, NodeError> {
Err(NodeError::UnexpectedMessage)
}
fn make_nodes(
&self,
_committee: &Committee,
) -> Result<impl Iterator<Item = (ValidatorPublicKey, Self::Node)> + '_, NodeError> {
Err::<std::iter::Empty<_>, _>(NodeError::UnexpectedMessage)
}
}
#[derive(clap::Parser)]
#[command(
name = "linera-schema-export",
about = "Export the GraphQL schema for the core data in a Linera chain",
version = linera_version::VersionInfo::default_clap_str(),
)]
struct Options {}
struct DummyContext;
impl ClientContext for DummyContext {
type Environment = linera_core::environment::Impl<
DbStorage<MemoryDatabase>,
DummyValidatorNodeProvider,
linera_base::crypto::InMemorySigner,
linera_core::wallet::Memory,
>;
fn wallet(&self) -> &linera_core::wallet::Memory {
unimplemented!()
}
fn storage(&self) -> &DbStorage<MemoryDatabase> {
unimplemented!()
}
fn client(&self) -> &Arc<linera_core::client::Client<Self::Environment>> {
unimplemented!()
}
fn timing_sender(
&self,
) -> Option<tokio::sync::mpsc::UnboundedSender<(u64, linera_core::client::TimingType)>> {
None
}
async fn update_wallet_for_new_chain(
&mut self,
_: ChainId,
_: Option<AccountOwner>,
_: Timestamp,
_: Epoch,
) -> Result<(), Error> {
Ok(())
}
async fn update_wallet(&mut self, _: &ChainClient<Self::Environment>) -> Result<(), Error> {
Ok(())
}
}
#[tokio::main]
async fn main() -> std::io::Result<()> {
let _options = <Options as clap::Parser>::parse();
let service = NodeService::new(
ChainListenerConfig::default(),
std::num::NonZeroU16::new(8080).unwrap(),
#[cfg(with_metrics)]
std::num::NonZeroU16::new(8081).unwrap(),
None,
Arc::new(Mutex::new(DummyContext)),
);
let schema = service.schema().sdl();
print!("{}", schema);
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.