repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/db.rs | crates/anvil/src/eth/backend/db.rs | //! Helper types for working with [revm](foundry_evm::revm)
use std::{
collections::BTreeMap,
fmt::{self, Debug},
path::Path,
};
use alloy_consensus::{BlockBody, Header};
use alloy_primitives::{Address, B256, Bytes, U256, keccak256, map::HashMap};
use alloy_rpc_types::BlockId;
use anvil_core::eth::{
block::Block,
transaction::{MaybeImpersonatedTransaction, TransactionInfo},
};
use foundry_common::errors::FsPathError;
use foundry_evm::backend::{
BlockchainDb, DatabaseError, DatabaseResult, MemDb, RevertStateSnapshotAction, StateSnapshot,
};
use foundry_primitives::{FoundryReceiptEnvelope, FoundryTxEnvelope};
use revm::{
Database, DatabaseCommit,
bytecode::Bytecode,
context::BlockEnv,
context_interface::block::BlobExcessGasAndPrice,
database::{CacheDB, DatabaseRef, DbAccount},
primitives::{KECCAK_EMPTY, eip4844::BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE},
state::AccountInfo,
};
use serde::{
Deserialize, Deserializer, Serialize,
de::{Error as DeError, MapAccess, Visitor},
};
use serde_json::Value;
use crate::mem::storage::MinedTransaction;
/// Helper trait get access to the full state data of the database
pub trait MaybeFullDatabase: DatabaseRef<Error = DatabaseError> + Debug {
fn maybe_as_full_db(&self) -> Option<&HashMap<Address, DbAccount>> {
None
}
/// Clear the state and move it into a new `StateSnapshot`.
fn clear_into_state_snapshot(&mut self) -> StateSnapshot;
/// Read the state snapshot.
///
/// This clones all the states and returns a new `StateSnapshot`.
fn read_as_state_snapshot(&self) -> StateSnapshot;
/// Clears the entire database
fn clear(&mut self);
/// Reverses `clear_into_snapshot` by initializing the db's state with the state snapshot.
fn init_from_state_snapshot(&mut self, state_snapshot: StateSnapshot);
}
impl<'a, T: 'a + MaybeFullDatabase + ?Sized> MaybeFullDatabase for &'a T
where
&'a T: DatabaseRef<Error = DatabaseError>,
{
fn maybe_as_full_db(&self) -> Option<&HashMap<Address, DbAccount>> {
T::maybe_as_full_db(self)
}
fn clear_into_state_snapshot(&mut self) -> StateSnapshot {
unreachable!("never called for DatabaseRef")
}
fn read_as_state_snapshot(&self) -> StateSnapshot {
unreachable!("never called for DatabaseRef")
}
fn clear(&mut self) {}
fn init_from_state_snapshot(&mut self, _state_snapshot: StateSnapshot) {}
}
/// Helper trait to reset the DB if it's forked
pub trait MaybeForkedDatabase {
fn maybe_reset(&mut self, _url: Option<String>, block_number: BlockId) -> Result<(), String>;
fn maybe_flush_cache(&self) -> Result<(), String>;
fn maybe_inner(&self) -> Result<&BlockchainDb, String>;
}
/// This bundles all required revm traits
pub trait Db:
DatabaseRef<Error = DatabaseError>
+ Database<Error = DatabaseError>
+ DatabaseCommit
+ MaybeFullDatabase
+ MaybeForkedDatabase
+ fmt::Debug
+ Send
+ Sync
{
/// Inserts an account
fn insert_account(&mut self, address: Address, account: AccountInfo);
/// Sets the nonce of the given address
fn set_nonce(&mut self, address: Address, nonce: u64) -> DatabaseResult<()> {
let mut info = self.basic(address)?.unwrap_or_default();
info.nonce = nonce;
self.insert_account(address, info);
Ok(())
}
/// Sets the balance of the given address
fn set_balance(&mut self, address: Address, balance: U256) -> DatabaseResult<()> {
let mut info = self.basic(address)?.unwrap_or_default();
info.balance = balance;
self.insert_account(address, info);
Ok(())
}
/// Sets the code of the given address
fn set_code(&mut self, address: Address, code: Bytes) -> DatabaseResult<()> {
let mut info = self.basic(address)?.unwrap_or_default();
let code_hash = if code.as_ref().is_empty() {
KECCAK_EMPTY
} else {
B256::from_slice(&keccak256(code.as_ref())[..])
};
info.code_hash = code_hash;
info.code = Some(Bytecode::new_raw(alloy_primitives::Bytes(code.0)));
self.insert_account(address, info);
Ok(())
}
/// Sets the storage value at the given slot for the address
fn set_storage_at(&mut self, address: Address, slot: B256, val: B256) -> DatabaseResult<()>;
/// inserts a blockhash for the given number
fn insert_block_hash(&mut self, number: U256, hash: B256);
/// Write all chain data to serialized bytes buffer
fn dump_state(
&self,
at: BlockEnv,
best_number: u64,
blocks: Vec<SerializableBlock>,
transactions: Vec<SerializableTransaction>,
historical_states: Option<SerializableHistoricalStates>,
) -> DatabaseResult<Option<SerializableState>>;
/// Deserialize and add all chain data to the backend storage
fn load_state(&mut self, state: SerializableState) -> DatabaseResult<bool> {
for (addr, account) in state.accounts.into_iter() {
let old_account_nonce = DatabaseRef::basic_ref(self, addr)
.ok()
.and_then(|acc| acc.map(|acc| acc.nonce))
.unwrap_or_default();
// use max nonce in case account is imported multiple times with difference
// nonces to prevent collisions
let nonce = std::cmp::max(old_account_nonce, account.nonce);
self.insert_account(
addr,
AccountInfo {
balance: account.balance,
code_hash: KECCAK_EMPTY, // will be set automatically
code: if account.code.0.is_empty() {
None
} else {
Some(Bytecode::new_raw(alloy_primitives::Bytes(account.code.0)))
},
nonce,
},
);
for (k, v) in account.storage.into_iter() {
self.set_storage_at(addr, k, v)?;
}
}
Ok(true)
}
/// Creates a new state snapshot.
fn snapshot_state(&mut self) -> U256;
/// Reverts a state snapshot.
///
/// Returns `true` if the state snapshot was reverted.
fn revert_state(&mut self, state_snapshot: U256, action: RevertStateSnapshotAction) -> bool;
/// Returns the state root if possible to compute
fn maybe_state_root(&self) -> Option<B256> {
None
}
/// Returns the current, standalone state of the Db
fn current_state(&self) -> StateDb;
}
/// Convenience impl only used to use any `Db` on the fly as the db layer for revm's CacheDB
/// This is useful to create blocks without actually writing to the `Db`, but rather in the cache of
/// the `CacheDB` see also
/// [Backend::pending_block()](crate::eth::backend::mem::Backend::pending_block())
impl<T: DatabaseRef<Error = DatabaseError> + Send + Sync + Clone + fmt::Debug> Db for CacheDB<T> {
fn insert_account(&mut self, address: Address, account: AccountInfo) {
self.insert_account_info(address, account)
}
fn set_storage_at(&mut self, address: Address, slot: B256, val: B256) -> DatabaseResult<()> {
self.insert_account_storage(address, slot.into(), val.into())
}
fn insert_block_hash(&mut self, number: U256, hash: B256) {
self.cache.block_hashes.insert(number, hash);
}
fn dump_state(
&self,
_at: BlockEnv,
_best_number: u64,
_blocks: Vec<SerializableBlock>,
_transaction: Vec<SerializableTransaction>,
_historical_states: Option<SerializableHistoricalStates>,
) -> DatabaseResult<Option<SerializableState>> {
Ok(None)
}
fn snapshot_state(&mut self) -> U256 {
U256::ZERO
}
fn revert_state(&mut self, _state_snapshot: U256, _action: RevertStateSnapshotAction) -> bool {
false
}
fn current_state(&self) -> StateDb {
StateDb::new(MemDb::default())
}
}
impl<T: DatabaseRef<Error = DatabaseError> + Debug> MaybeFullDatabase for CacheDB<T> {
fn maybe_as_full_db(&self) -> Option<&HashMap<Address, DbAccount>> {
Some(&self.cache.accounts)
}
fn clear_into_state_snapshot(&mut self) -> StateSnapshot {
let db_accounts = std::mem::take(&mut self.cache.accounts);
let mut accounts = HashMap::default();
let mut account_storage = HashMap::default();
for (addr, mut acc) in db_accounts {
account_storage.insert(addr, std::mem::take(&mut acc.storage));
let mut info = acc.info;
info.code = self.cache.contracts.remove(&info.code_hash);
accounts.insert(addr, info);
}
let block_hashes = std::mem::take(&mut self.cache.block_hashes);
StateSnapshot { accounts, storage: account_storage, block_hashes }
}
fn read_as_state_snapshot(&self) -> StateSnapshot {
let mut accounts = HashMap::default();
let mut account_storage = HashMap::default();
for (addr, acc) in &self.cache.accounts {
account_storage.insert(*addr, acc.storage.clone());
let mut info = acc.info.clone();
info.code = self.cache.contracts.get(&info.code_hash).cloned();
accounts.insert(*addr, info);
}
let block_hashes = self.cache.block_hashes.clone();
StateSnapshot { accounts, storage: account_storage, block_hashes }
}
fn clear(&mut self) {
self.clear_into_state_snapshot();
}
fn init_from_state_snapshot(&mut self, state_snapshot: StateSnapshot) {
let StateSnapshot { accounts, mut storage, block_hashes } = state_snapshot;
for (addr, mut acc) in accounts {
if let Some(code) = acc.code.take() {
self.cache.contracts.insert(acc.code_hash, code);
}
self.cache.accounts.insert(
addr,
DbAccount {
info: acc,
storage: storage.remove(&addr).unwrap_or_default(),
..Default::default()
},
);
}
self.cache.block_hashes = block_hashes;
}
}
impl<T: DatabaseRef<Error = DatabaseError>> MaybeForkedDatabase for CacheDB<T> {
fn maybe_reset(&mut self, _url: Option<String>, _block_number: BlockId) -> Result<(), String> {
Err("not supported".to_string())
}
fn maybe_flush_cache(&self) -> Result<(), String> {
Err("not supported".to_string())
}
fn maybe_inner(&self) -> Result<&BlockchainDb, String> {
Err("not supported".to_string())
}
}
/// Represents a state at certain point
#[derive(Debug)]
pub struct StateDb(pub(crate) Box<dyn MaybeFullDatabase + Send + Sync>);
impl StateDb {
pub fn new(db: impl MaybeFullDatabase + Send + Sync + 'static) -> Self {
Self(Box::new(db))
}
pub fn serialize_state(&mut self) -> StateSnapshot {
// Using read_as_snapshot makes sures we don't clear the historical state from the current
// instance.
self.read_as_state_snapshot()
}
}
impl DatabaseRef for StateDb {
type Error = DatabaseError;
fn basic_ref(&self, address: Address) -> DatabaseResult<Option<AccountInfo>> {
self.0.basic_ref(address)
}
fn code_by_hash_ref(&self, code_hash: B256) -> DatabaseResult<Bytecode> {
self.0.code_by_hash_ref(code_hash)
}
fn storage_ref(&self, address: Address, index: U256) -> DatabaseResult<U256> {
self.0.storage_ref(address, index)
}
fn block_hash_ref(&self, number: u64) -> DatabaseResult<B256> {
self.0.block_hash_ref(number)
}
}
impl MaybeFullDatabase for StateDb {
fn maybe_as_full_db(&self) -> Option<&HashMap<Address, DbAccount>> {
self.0.maybe_as_full_db()
}
fn clear_into_state_snapshot(&mut self) -> StateSnapshot {
self.0.clear_into_state_snapshot()
}
fn read_as_state_snapshot(&self) -> StateSnapshot {
self.0.read_as_state_snapshot()
}
fn clear(&mut self) {
self.0.clear()
}
fn init_from_state_snapshot(&mut self, state_snapshot: StateSnapshot) {
self.0.init_from_state_snapshot(state_snapshot)
}
}
/// Legacy block environment from before v1.3.
#[derive(Debug, Deserialize)]
#[serde(rename_all = "snake_case")]
pub struct LegacyBlockEnv {
pub number: Option<StringOrU64>,
#[serde(alias = "coinbase")]
pub beneficiary: Option<Address>,
pub timestamp: Option<StringOrU64>,
pub gas_limit: Option<StringOrU64>,
pub basefee: Option<StringOrU64>,
pub difficulty: Option<StringOrU64>,
pub prevrandao: Option<B256>,
pub blob_excess_gas_and_price: Option<LegacyBlobExcessGasAndPrice>,
}
/// Legacy blob excess gas and price structure from before v1.3.
#[derive(Debug, Deserialize)]
pub struct LegacyBlobExcessGasAndPrice {
pub excess_blob_gas: u64,
pub blob_gasprice: u64,
}
/// Legacy string or u64 type from before v1.3.
#[derive(Debug, Deserialize)]
#[serde(untagged)]
pub enum StringOrU64 {
Hex(String),
Dec(u64),
}
impl StringOrU64 {
pub fn to_u64(&self) -> Option<u64> {
match self {
Self::Dec(n) => Some(*n),
Self::Hex(s) => s.strip_prefix("0x").and_then(|s| u64::from_str_radix(s, 16).ok()),
}
}
pub fn to_u256(&self) -> Option<U256> {
match self {
Self::Dec(n) => Some(U256::from(*n)),
Self::Hex(s) => s.strip_prefix("0x").and_then(|s| U256::from_str_radix(s, 16).ok()),
}
}
}
/// Converts a `LegacyBlockEnv` to a `BlockEnv`, handling the conversion of legacy fields.
impl TryFrom<LegacyBlockEnv> for BlockEnv {
type Error = &'static str;
fn try_from(legacy: LegacyBlockEnv) -> Result<Self, Self::Error> {
Ok(Self {
number: legacy.number.and_then(|v| v.to_u256()).unwrap_or(U256::ZERO),
beneficiary: legacy.beneficiary.unwrap_or(Address::ZERO),
timestamp: legacy.timestamp.and_then(|v| v.to_u256()).unwrap_or(U256::ONE),
gas_limit: legacy.gas_limit.and_then(|v| v.to_u64()).unwrap_or(u64::MAX),
basefee: legacy.basefee.and_then(|v| v.to_u64()).unwrap_or(0),
difficulty: legacy.difficulty.and_then(|v| v.to_u256()).unwrap_or(U256::ZERO),
prevrandao: legacy.prevrandao.or(Some(B256::ZERO)),
blob_excess_gas_and_price: legacy
.blob_excess_gas_and_price
.map(|v| BlobExcessGasAndPrice::new(v.excess_blob_gas, v.blob_gasprice))
.or_else(|| {
Some(BlobExcessGasAndPrice::new(0, BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE))
}),
})
}
}
/// Custom deserializer for `BlockEnv` that handles both v1.2 and v1.3+ formats.
fn deserialize_block_env_compat<'de, D>(deserializer: D) -> Result<Option<BlockEnv>, D::Error>
where
D: Deserializer<'de>,
{
let value: Option<Value> = Option::deserialize(deserializer)?;
let Some(value) = value else {
return Ok(None);
};
if let Ok(env) = BlockEnv::deserialize(&value) {
return Ok(Some(env));
}
let legacy: LegacyBlockEnv = serde_json::from_value(value).map_err(|e| {
D::Error::custom(format!("Legacy deserialization of `BlockEnv` failed: {e}"))
})?;
Ok(Some(BlockEnv::try_from(legacy).map_err(D::Error::custom)?))
}
/// Custom deserializer for `best_block_number` that handles both v1.2 and v1.3+ formats.
fn deserialize_best_block_number_compat<'de, D>(deserializer: D) -> Result<Option<u64>, D::Error>
where
D: Deserializer<'de>,
{
let value: Option<Value> = Option::deserialize(deserializer)?;
let Some(value) = value else {
return Ok(None);
};
let number = match value {
Value::Number(n) => n.as_u64(),
Value::String(s) => {
if let Some(s) = s.strip_prefix("0x") {
u64::from_str_radix(s, 16).ok()
} else {
s.parse().ok()
}
}
_ => None,
};
Ok(number)
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct SerializableState {
/// The block number of the state
///
/// Note: This is an Option for backwards compatibility: <https://github.com/foundry-rs/foundry/issues/5460>
#[serde(deserialize_with = "deserialize_block_env_compat")]
pub block: Option<BlockEnv>,
pub accounts: BTreeMap<Address, SerializableAccountRecord>,
/// The best block number of the state, can be different from block number (Arbitrum chain).
#[serde(deserialize_with = "deserialize_best_block_number_compat")]
pub best_block_number: Option<u64>,
#[serde(default)]
pub blocks: Vec<SerializableBlock>,
#[serde(default)]
pub transactions: Vec<SerializableTransaction>,
/// Historical states of accounts and storage at particular block hashes.
///
/// Note: This is an Option for backwards compatibility.
#[serde(default)]
pub historical_states: Option<SerializableHistoricalStates>,
}
impl SerializableState {
/// Loads the `Genesis` object from the given json file path
pub fn load(path: impl AsRef<Path>) -> Result<Self, FsPathError> {
let path = path.as_ref();
if path.is_dir() {
foundry_common::fs::read_json_file(&path.join("state.json"))
} else {
foundry_common::fs::read_json_file(path)
}
}
/// This is used as the clap `value_parser` implementation
#[cfg(feature = "cmd")]
pub(crate) fn parse(path: &str) -> Result<Self, String> {
Self::load(path).map_err(|err| err.to_string())
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SerializableAccountRecord {
pub nonce: u64,
pub balance: U256,
pub code: Bytes,
#[serde(deserialize_with = "deserialize_btree")]
pub storage: BTreeMap<B256, B256>,
}
fn deserialize_btree<'de, D>(deserializer: D) -> Result<BTreeMap<B256, B256>, D::Error>
where
D: Deserializer<'de>,
{
struct BTreeVisitor;
impl<'de> Visitor<'de> for BTreeVisitor {
type Value = BTreeMap<B256, B256>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a mapping of hex encoded storage slots to hex encoded state data")
}
fn visit_map<M>(self, mut mapping: M) -> Result<BTreeMap<B256, B256>, M::Error>
where
M: MapAccess<'de>,
{
let mut btree = BTreeMap::new();
while let Some((key, value)) = mapping.next_entry::<U256, U256>()? {
btree.insert(B256::from(key), B256::from(value));
}
Ok(btree)
}
}
deserializer.deserialize_map(BTreeVisitor)
}
/// Defines a backwards-compatible enum for transactions.
/// This is essential for maintaining compatibility with state dumps
/// created before the changes introduced in PR #8411.
///
/// The enum can represent either a `TypedTransaction` or a `MaybeImpersonatedTransaction`,
/// depending on the data being deserialized. This flexibility ensures that older state
/// dumps can still be loaded correctly, even after the changes in #8411.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(untagged)]
pub enum SerializableTransactionType {
TypedTransaction(FoundryTxEnvelope),
MaybeImpersonatedTransaction(MaybeImpersonatedTransaction),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SerializableBlock {
pub header: Header,
pub transactions: Vec<SerializableTransactionType>,
pub ommers: Vec<Header>,
}
impl From<Block> for SerializableBlock {
fn from(block: Block) -> Self {
Self {
header: block.header,
transactions: block.body.transactions.into_iter().map(Into::into).collect(),
ommers: block.body.ommers.into_iter().collect(),
}
}
}
impl From<SerializableBlock> for Block {
fn from(block: SerializableBlock) -> Self {
let transactions = block.transactions.into_iter().map(Into::into).collect();
let ommers = block.ommers;
let body = BlockBody { transactions, ommers, withdrawals: None };
Self::new(block.header, body)
}
}
impl From<MaybeImpersonatedTransaction> for SerializableTransactionType {
fn from(transaction: MaybeImpersonatedTransaction) -> Self {
Self::MaybeImpersonatedTransaction(transaction)
}
}
impl From<SerializableTransactionType> for MaybeImpersonatedTransaction {
fn from(transaction: SerializableTransactionType) -> Self {
match transaction {
SerializableTransactionType::TypedTransaction(tx) => Self::new(tx),
SerializableTransactionType::MaybeImpersonatedTransaction(tx) => tx,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SerializableTransaction {
pub info: TransactionInfo,
pub receipt: FoundryReceiptEnvelope,
pub block_hash: B256,
pub block_number: u64,
}
impl From<MinedTransaction> for SerializableTransaction {
fn from(transaction: MinedTransaction) -> Self {
Self {
info: transaction.info,
receipt: transaction.receipt,
block_hash: transaction.block_hash,
block_number: transaction.block_number,
}
}
}
impl From<SerializableTransaction> for MinedTransaction {
fn from(transaction: SerializableTransaction) -> Self {
Self {
info: transaction.info,
receipt: transaction.receipt,
block_hash: transaction.block_hash,
block_number: transaction.block_number,
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct SerializableHistoricalStates(Vec<(B256, StateSnapshot)>);
impl SerializableHistoricalStates {
pub const fn new(states: Vec<(B256, StateSnapshot)>) -> Self {
Self(states)
}
}
impl IntoIterator for SerializableHistoricalStates {
type Item = (B256, StateSnapshot);
type IntoIter = std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_deser_block() {
let block = r#"{
"header": {
"parentHash": "0xceb0fe420d6f14a8eeec4319515b89acbb0bb4861cad9983d529ab4b1e4af929",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0xe1423fd180478ab4fd05a7103277d64496b15eb914ecafe71eeec871b552efd1",
"transactionsRoot": "0x2b5598ef261e5f88e4303bb2b3986b3d5c0ebf4cd9977daebccae82a6469b988",
"receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x0",
"number": "0x2",
"gasLimit": "0x1c9c380",
"gasUsed": "0x5208",
"timestamp": "0x66cdc823",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x342a1c58",
"blobGasUsed": "0x0",
"excessBlobGas": "0x0",
"extraData": "0x"
},
"transactions": [
{
"type": "0x2",
"chainId": "0x7a69",
"nonce": "0x0",
"gas": "0x5209",
"maxFeePerGas": "0x77359401",
"maxPriorityFeePerGas": "0x1",
"to": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266",
"value": "0x0",
"accessList": [],
"input": "0x",
"r": "0x85c2794a580da137e24ccc823b45ae5cea99371ae23ee13860fcc6935f8305b0",
"s": "0x41de7fa4121dab284af4453d30928241208bafa90cdb701fe9bc7054759fe3cd",
"yParity": "0x0",
"hash": "0x8c9b68e8947ace33028dba167354fde369ed7bbe34911b772d09b3c64b861515"
}
],
"ommers": []
}
"#;
let _block: SerializableBlock = serde_json::from_str(block).unwrap();
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/genesis.rs | crates/anvil/src/eth/backend/genesis.rs | //! Genesis settings
use crate::eth::backend::db::Db;
use alloy_genesis::{Genesis, GenesisAccount};
use alloy_primitives::{Address, U256};
use foundry_evm::backend::DatabaseResult;
use revm::{bytecode::Bytecode, primitives::KECCAK_EMPTY, state::AccountInfo};
use tokio::sync::RwLockWriteGuard;
/// Genesis settings
#[derive(Clone, Debug, Default)]
pub struct GenesisConfig {
/// The initial number for the genesis block
pub number: u64,
/// The initial timestamp for the genesis block
pub timestamp: u64,
/// Balance for genesis accounts
pub balance: U256,
/// All accounts that should be initialised at genesis
pub accounts: Vec<Address>,
/// The `genesis.json` if provided
pub genesis_init: Option<Genesis>,
}
impl GenesisConfig {
/// Returns fresh `AccountInfo`s for the configured `accounts`
pub fn account_infos(&self) -> impl Iterator<Item = (Address, AccountInfo)> + '_ {
self.accounts.iter().copied().map(|address| {
let info = AccountInfo {
balance: self.balance,
code_hash: KECCAK_EMPTY,
// we set this to empty so `Database::code_by_hash` doesn't get called
code: Some(Default::default()),
nonce: 0,
};
(address, info)
})
}
/// If an initial `genesis.json` was provided, this applies the account alloc to the db
pub fn apply_genesis_json_alloc(
&self,
mut db: RwLockWriteGuard<'_, Box<dyn Db>>,
) -> DatabaseResult<()> {
if let Some(ref genesis) = self.genesis_init {
for (addr, mut acc) in genesis.alloc.clone() {
let storage = std::mem::take(&mut acc.storage);
// insert all accounts
db.insert_account(addr, self.genesis_to_account_info(&acc));
// insert all storage values
for (k, v) in &storage.unwrap_or_default() {
db.set_storage_at(addr, *k, *v)?;
}
}
}
Ok(())
}
/// Converts a [`GenesisAccount`] to an [`AccountInfo`]
fn genesis_to_account_info(&self, acc: &GenesisAccount) -> AccountInfo {
let GenesisAccount { code, balance, nonce, .. } = acc.clone();
let code = code.map(Bytecode::new_raw);
AccountInfo {
balance,
nonce: nonce.unwrap_or_default(),
code_hash: code.as_ref().map(|code| code.hash_slow()).unwrap_or(KECCAK_EMPTY),
code,
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/time.rs | crates/anvil/src/eth/backend/time.rs | //! Manages the block time
use crate::eth::error::BlockchainError;
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use std::{sync::Arc, time::Duration};
/// Returns the `Utc` datetime for the given seconds since unix epoch
pub fn utc_from_secs(secs: u64) -> DateTime<Utc> {
DateTime::from_timestamp(secs as i64, 0).unwrap()
}
/// Manages block time
#[derive(Clone, Debug)]
pub struct TimeManager {
/// tracks the overall applied timestamp offset
offset: Arc<RwLock<i128>>,
/// The timestamp of the last block header
last_timestamp: Arc<RwLock<u64>>,
/// Contains the next timestamp to use
/// if this is set then the next time `[TimeManager::current_timestamp()]` is called this value
/// will be taken and returned. After which the `offset` will be updated accordingly
next_exact_timestamp: Arc<RwLock<Option<u64>>>,
/// The interval to use when determining the next block's timestamp
interval: Arc<RwLock<Option<u64>>>,
}
impl TimeManager {
pub fn new(start_timestamp: u64) -> Self {
let time_manager = Self {
last_timestamp: Default::default(),
offset: Default::default(),
next_exact_timestamp: Default::default(),
interval: Default::default(),
};
time_manager.reset(start_timestamp);
time_manager
}
/// Resets the current time manager to the given timestamp, resetting the offsets and
/// next block timestamp option
pub fn reset(&self, start_timestamp: u64) {
let current = duration_since_unix_epoch().as_secs() as i128;
*self.last_timestamp.write() = start_timestamp;
*self.offset.write() = (start_timestamp as i128) - current;
self.next_exact_timestamp.write().take();
}
pub fn offset(&self) -> i128 {
*self.offset.read()
}
/// Adds the given `offset` to the already tracked offset and returns the result
fn add_offset(&self, offset: i128) -> i128 {
let mut current = self.offset.write();
let next = current.saturating_add(offset);
trace!(target: "time", "adding timestamp offset={}, total={}", offset, next);
*current = next;
next
}
/// Jumps forward in time by the given seconds
///
/// This will apply a permanent offset to the natural UNIX Epoch timestamp
pub fn increase_time(&self, seconds: u64) -> i128 {
self.add_offset(seconds as i128)
}
/// Sets the exact timestamp to use in the next block
/// Fails if it's before (or at the same time) the last timestamp
pub fn set_next_block_timestamp(&self, timestamp: u64) -> Result<(), BlockchainError> {
trace!(target: "time", "override next timestamp {}", timestamp);
if timestamp < *self.last_timestamp.read() {
return Err(BlockchainError::TimestampError(format!(
"{timestamp} is lower than previous block's timestamp"
)));
}
self.next_exact_timestamp.write().replace(timestamp);
Ok(())
}
/// Sets an interval to use when computing the next timestamp
///
/// If an interval already exists, this will update the interval, otherwise a new interval will
/// be set starting with the current timestamp.
pub fn set_block_timestamp_interval(&self, interval: u64) {
trace!(target: "time", "set interval {}", interval);
self.interval.write().replace(interval);
}
/// Removes the interval if it exists
pub fn remove_block_timestamp_interval(&self) -> bool {
if self.interval.write().take().is_some() {
trace!(target: "time", "removed interval");
true
} else {
false
}
}
/// Computes the next timestamp without updating internals
fn compute_next_timestamp(&self) -> (u64, Option<i128>) {
let current = duration_since_unix_epoch().as_secs() as i128;
let last_timestamp = *self.last_timestamp.read();
let (mut next_timestamp, update_offset) =
if let Some(next) = *self.next_exact_timestamp.read() {
(next, true)
} else if let Some(interval) = *self.interval.read() {
(last_timestamp.saturating_add(interval), false)
} else {
(current.saturating_add(self.offset()) as u64, false)
};
// Ensures that the timestamp is always increasing
if next_timestamp < last_timestamp {
next_timestamp = last_timestamp + 1;
}
let next_offset = update_offset.then_some((next_timestamp as i128) - current);
(next_timestamp, next_offset)
}
/// Returns the current timestamp and updates the underlying offset and interval accordingly
pub fn next_timestamp(&self) -> u64 {
let (next_timestamp, next_offset) = self.compute_next_timestamp();
// Make sure we reset the `next_exact_timestamp`
self.next_exact_timestamp.write().take();
if let Some(next_offset) = next_offset {
*self.offset.write() = next_offset;
}
*self.last_timestamp.write() = next_timestamp;
next_timestamp
}
/// Returns the current timestamp for a call that does _not_ update the value
pub fn current_call_timestamp(&self) -> u64 {
let (next_timestamp, _) = self.compute_next_timestamp();
next_timestamp
}
}
/// Returns the current duration since unix epoch.
pub fn duration_since_unix_epoch() -> Duration {
use std::time::SystemTime;
let now = SystemTime::now();
now.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_else(|err| panic!("Current time {now:?} is invalid: {err:?}"))
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/info.rs | crates/anvil/src/eth/backend/info.rs | //! Handler that can get current storage related data
use crate::mem::Backend;
use alloy_network::AnyRpcBlock;
use alloy_primitives::B256;
use anvil_core::eth::block::Block;
use foundry_primitives::FoundryReceiptEnvelope;
use std::{fmt, sync::Arc};
/// A type that can fetch data related to the ethereum storage.
///
/// This is simply a wrapper type for the [`Backend`] but exposes a limited set of functions to
/// fetch ethereum storage related data
// TODO(mattsee): once we have multiple Backend types, this should be turned into a trait
#[derive(Clone)]
pub struct StorageInfo {
backend: Arc<Backend>,
}
impl StorageInfo {
pub(crate) fn new(backend: Arc<Backend>) -> Self {
Self { backend }
}
/// Returns the receipts of the current block
pub fn current_receipts(&self) -> Option<Vec<FoundryReceiptEnvelope>> {
self.backend.mined_receipts(self.backend.best_hash())
}
/// Returns the current block
pub fn current_block(&self) -> Option<Block> {
self.backend.get_block(self.backend.best_number())
}
/// Returns the receipts of the block with the given hash
pub fn receipts(&self, hash: B256) -> Option<Vec<FoundryReceiptEnvelope>> {
self.backend.mined_receipts(hash)
}
/// Returns the block with the given hash
pub fn block(&self, hash: B256) -> Option<Block> {
self.backend.get_block_by_hash(hash)
}
/// Returns the block with the given hash in the format of the ethereum API
pub fn eth_block(&self, hash: B256) -> Option<AnyRpcBlock> {
let block = self.block(hash)?;
Some(self.backend.convert_block(block))
}
}
impl fmt::Debug for StorageInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StorageInfo").finish_non_exhaustive()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/env.rs | crates/anvil/src/eth/backend/env.rs | use alloy_evm::EvmEnv;
use foundry_evm::{EnvMut, core::AsEnvMut};
use foundry_evm_networks::NetworkConfigs;
use op_revm::OpTransaction;
use revm::context::TxEnv;
/// Helper container type for [`EvmEnv`] and [`OpTransaction<TxEnd>`].
#[derive(Clone, Debug, Default)]
pub struct Env {
pub evm_env: EvmEnv,
pub tx: OpTransaction<TxEnv>,
pub networks: NetworkConfigs,
}
/// Helper container type for [`EvmEnv`] and [`OpTransaction<TxEnv>`].
impl Env {
pub fn new(evm_env: EvmEnv, tx: OpTransaction<TxEnv>, networks: NetworkConfigs) -> Self {
Self { evm_env, tx, networks }
}
}
impl AsEnvMut for Env {
fn as_env_mut(&mut self) -> EnvMut<'_> {
EnvMut {
block: &mut self.evm_env.block_env,
cfg: &mut self.evm_env.cfg_env,
tx: &mut self.tx.base,
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/cheats.rs | crates/anvil/src/eth/backend/cheats.rs | //! Support for "cheat codes" / bypass functions
use alloy_evm::precompiles::{Precompile, PrecompileInput};
use alloy_primitives::{
Address, Bytes,
map::{AddressHashSet, foldhash::HashMap},
};
use parking_lot::RwLock;
use revm::precompile::{
PrecompileError, PrecompileId, PrecompileOutput, PrecompileResult, secp256k1::ec_recover_run,
utilities::right_pad,
};
use std::{borrow::Cow, sync::Arc};
/// ID for the [`CheatEcrecover::precompile_id`] precompile.
static PRECOMPILE_ID_CHEAT_ECRECOVER: PrecompileId =
PrecompileId::Custom(Cow::Borrowed("cheat_ecrecover"));
/// Manages user modifications that may affect the node's behavior
///
/// Contains the state of executed, non-eth standard cheat code RPC
#[derive(Clone, Debug, Default)]
pub struct CheatsManager {
/// shareable state
state: Arc<RwLock<CheatsState>>,
}
impl CheatsManager {
/// Sets the account to impersonate
///
/// Returns `true` if the account is already impersonated
pub fn impersonate(&self, addr: Address) -> bool {
trace!(target: "cheats", %addr, "start impersonating");
// When somebody **explicitly** impersonates an account we need to store it so we are able
// to return it from `eth_accounts`. That's why we do not simply call `is_impersonated()`
// which does not check that list when auto impersonation is enabled.
!self.state.write().impersonated_accounts.insert(addr)
}
/// Removes the account that from the impersonated set
pub fn stop_impersonating(&self, addr: &Address) {
trace!(target: "cheats", %addr, "stop impersonating");
self.state.write().impersonated_accounts.remove(addr);
}
/// Returns true if the `addr` is currently impersonated
pub fn is_impersonated(&self, addr: Address) -> bool {
if self.auto_impersonate_accounts() {
true
} else {
self.state.read().impersonated_accounts.contains(&addr)
}
}
/// Returns true is auto impersonation is enabled
pub fn auto_impersonate_accounts(&self) -> bool {
self.state.read().auto_impersonate_accounts
}
/// Sets the auto impersonation flag which if set to true will make the `is_impersonated`
/// function always return true
pub fn set_auto_impersonate_account(&self, enabled: bool) {
trace!(target: "cheats", "Auto impersonation set to {:?}", enabled);
self.state.write().auto_impersonate_accounts = enabled
}
/// Returns all accounts that are currently being impersonated.
pub fn impersonated_accounts(&self) -> AddressHashSet {
self.state.read().impersonated_accounts.clone()
}
/// Registers an override so that `ecrecover(signature)` returns `addr`.
pub fn add_recover_override(&self, sig: Bytes, addr: Address) {
self.state.write().signature_overrides.insert(sig, addr);
}
/// If an override exists for `sig`, returns the address; otherwise `None`.
pub fn get_recover_override(&self, sig: &Bytes) -> Option<Address> {
self.state.read().signature_overrides.get(sig).copied()
}
/// Returns true if any ecrecover overrides have been registered.
pub fn has_recover_overrides(&self) -> bool {
!self.state.read().signature_overrides.is_empty()
}
}
/// Container type for all the state variables
#[derive(Clone, Debug, Default)]
pub struct CheatsState {
/// All accounts that are currently impersonated
pub impersonated_accounts: AddressHashSet,
/// If set to true will make the `is_impersonated` function always return true
pub auto_impersonate_accounts: bool,
/// Overrides for ecrecover: Signature => Address
pub signature_overrides: HashMap<Bytes, Address>,
}
impl CheatEcrecover {
pub fn new(cheats: Arc<CheatsManager>) -> Self {
Self { cheats }
}
}
impl Precompile for CheatEcrecover {
fn call(&self, input: PrecompileInput<'_>) -> PrecompileResult {
if !self.cheats.has_recover_overrides() {
return ec_recover_run(input.data, input.gas);
}
const ECRECOVER_BASE: u64 = 3_000;
if input.gas < ECRECOVER_BASE {
return Err(PrecompileError::OutOfGas);
}
let padded = right_pad::<128>(input.data);
let v = padded[63];
let mut sig_bytes = [0u8; 65];
sig_bytes[..64].copy_from_slice(&padded[64..128]);
sig_bytes[64] = v;
let sig_bytes_wrapped = Bytes::copy_from_slice(&sig_bytes);
if let Some(addr) = self.cheats.get_recover_override(&sig_bytes_wrapped) {
let mut out = [0u8; 32];
out[12..].copy_from_slice(addr.as_slice());
return Ok(PrecompileOutput::new(ECRECOVER_BASE, Bytes::copy_from_slice(&out)));
}
ec_recover_run(input.data, input.gas)
}
fn precompile_id(&self) -> &PrecompileId {
&PRECOMPILE_ID_CHEAT_ECRECOVER
}
fn is_pure(&self) -> bool {
false
}
}
/// A custom ecrecover precompile that supports cheat-based signature overrides.
#[derive(Clone, Debug)]
pub struct CheatEcrecover {
cheats: Arc<CheatsManager>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn impersonate_returns_false_then_true() {
let mgr = CheatsManager::default();
let addr = Address::from([1u8; 20]);
assert!(!mgr.impersonate(addr));
assert!(mgr.impersonate(addr));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/executor.rs | crates/anvil/src/eth/backend/executor.rs | use crate::{
PrecompileFactory,
eth::{
backend::{
cheats::{CheatEcrecover, CheatsManager},
db::Db,
env::Env,
mem::op_haltreason_to_instruction_result,
validate::TransactionValidator,
},
error::InvalidTransactionError,
pool::transactions::PoolTransaction,
},
mem::inspector::AnvilInspector,
};
use alloy_consensus::{
Header, Receipt, ReceiptWithBloom, Transaction, constants::EMPTY_WITHDRAWALS,
proofs::calculate_receipt_root, transaction::Either,
};
use alloy_eips::{
eip7685::EMPTY_REQUESTS_HASH,
eip7702::{RecoveredAuthority, RecoveredAuthorization},
eip7840::BlobParams,
};
use alloy_evm::{
EthEvmFactory, Evm, EvmEnv, EvmFactory, FromRecoveredTx,
eth::EthEvmContext,
precompiles::{DynPrecompile, Precompile, PrecompilesMap},
};
use alloy_op_evm::OpEvmFactory;
use alloy_primitives::{B256, Bloom, BloomInput, Log};
use anvil_core::eth::{
block::{BlockInfo, create_block},
transaction::{PendingTransaction, TransactionInfo},
};
use foundry_evm::{
backend::DatabaseError,
core::{either_evm::EitherEvm, precompiles::EC_RECOVER},
traces::{CallTraceDecoder, CallTraceNode},
};
use foundry_evm_networks::NetworkConfigs;
use foundry_primitives::{FoundryReceiptEnvelope, FoundryTxEnvelope};
use op_revm::{OpContext, OpTransaction};
use revm::{
Database, Inspector,
context::{Block as RevmBlock, Cfg, TxEnv},
context_interface::result::{EVMError, ExecutionResult, Output},
interpreter::InstructionResult,
primitives::hardfork::SpecId,
};
use std::{fmt::Debug, sync::Arc};
/// Represents an executed transaction (transacted on the DB)
#[derive(Debug)]
pub struct ExecutedTransaction {
transaction: Arc<PoolTransaction>,
exit_reason: InstructionResult,
out: Option<Output>,
gas_used: u64,
logs: Vec<Log>,
traces: Vec<CallTraceNode>,
nonce: u64,
}
// == impl ExecutedTransaction ==
impl ExecutedTransaction {
/// Creates the receipt for the transaction
fn create_receipt(&self, cumulative_gas_used: &mut u64) -> FoundryReceiptEnvelope {
let logs = self.logs.clone();
*cumulative_gas_used = cumulative_gas_used.saturating_add(self.gas_used);
// successful return see [Return]
let status_code = u8::from(self.exit_reason as u8 <= InstructionResult::SelfDestruct as u8);
let receipt_with_bloom: ReceiptWithBloom = Receipt {
status: (status_code == 1).into(),
cumulative_gas_used: *cumulative_gas_used,
logs,
}
.into();
match self.transaction.pending_transaction.transaction.as_ref() {
FoundryTxEnvelope::Legacy(_) => FoundryReceiptEnvelope::Legacy(receipt_with_bloom),
FoundryTxEnvelope::Eip2930(_) => FoundryReceiptEnvelope::Eip2930(receipt_with_bloom),
FoundryTxEnvelope::Eip1559(_) => FoundryReceiptEnvelope::Eip1559(receipt_with_bloom),
FoundryTxEnvelope::Eip4844(_) => FoundryReceiptEnvelope::Eip4844(receipt_with_bloom),
FoundryTxEnvelope::Eip7702(_) => FoundryReceiptEnvelope::Eip7702(receipt_with_bloom),
FoundryTxEnvelope::Deposit(_tx) => {
FoundryReceiptEnvelope::Deposit(op_alloy_consensus::OpDepositReceiptWithBloom {
receipt: op_alloy_consensus::OpDepositReceipt {
inner: receipt_with_bloom.receipt,
deposit_nonce: Some(0),
deposit_receipt_version: Some(1),
},
logs_bloom: receipt_with_bloom.logs_bloom,
})
}
// TODO(onbjerg): we should impl support for Tempo transactions
FoundryTxEnvelope::Tempo(_) => todo!(),
}
}
}
/// Represents the outcome of mining a new block
#[derive(Clone, Debug)]
pub struct ExecutedTransactions {
/// The block created after executing the `included` transactions
pub block: BlockInfo,
/// All transactions included in the block
pub included: Vec<Arc<PoolTransaction>>,
/// All transactions that were invalid at the point of their execution and were not included in
/// the block
pub invalid: Vec<Arc<PoolTransaction>>,
}
/// An executor for a series of transactions
pub struct TransactionExecutor<'a, Db: ?Sized, V: TransactionValidator> {
/// where to insert the transactions
pub db: &'a mut Db,
/// type used to validate before inclusion
pub validator: &'a V,
/// all pending transactions
pub pending: std::vec::IntoIter<Arc<PoolTransaction>>,
pub evm_env: EvmEnv,
pub parent_hash: B256,
/// Cumulative gas used by all executed transactions
pub gas_used: u64,
/// Cumulative blob gas used by all executed transactions
pub blob_gas_used: u64,
pub enable_steps_tracing: bool,
pub networks: NetworkConfigs,
pub print_logs: bool,
pub print_traces: bool,
/// Recorder used for decoding traces, used together with print_traces
pub call_trace_decoder: Arc<CallTraceDecoder>,
/// Precompiles to inject to the EVM.
pub precompile_factory: Option<Arc<dyn PrecompileFactory>>,
pub blob_params: BlobParams,
pub cheats: CheatsManager,
}
impl<DB: Db + ?Sized, V: TransactionValidator> TransactionExecutor<'_, DB, V> {
/// Executes all transactions and puts them in a new block with the provided `timestamp`
pub fn execute(mut self) -> ExecutedTransactions {
let mut transactions = Vec::new();
let mut transaction_infos = Vec::new();
let mut receipts = Vec::new();
let mut bloom = Bloom::default();
let mut cumulative_gas_used = 0u64;
let mut invalid = Vec::new();
let mut included = Vec::new();
let gas_limit = self.evm_env.block_env().gas_limit;
let parent_hash = self.parent_hash;
let block_number = self.evm_env.block_env().number;
let difficulty = self.evm_env.block_env().difficulty;
let mix_hash = self.evm_env.block_env().prevrandao;
let beneficiary = self.evm_env.block_env().beneficiary;
let timestamp = self.evm_env.block_env().timestamp;
let base_fee = if self.evm_env.cfg_env().spec.is_enabled_in(SpecId::LONDON) {
Some(self.evm_env.block_env().basefee)
} else {
None
};
let is_shanghai = self.evm_env.cfg_env().spec >= SpecId::SHANGHAI;
let is_cancun = self.evm_env.cfg_env().spec >= SpecId::CANCUN;
let is_prague = self.evm_env.cfg_env().spec >= SpecId::PRAGUE;
let excess_blob_gas =
if is_cancun { self.evm_env.block_env().blob_excess_gas() } else { None };
let mut cumulative_blob_gas_used = if is_cancun { Some(0u64) } else { None };
for tx in self.into_iter() {
let tx = match tx {
TransactionExecutionOutcome::Executed(tx) => {
included.push(tx.transaction.clone());
tx
}
TransactionExecutionOutcome::BlockGasExhausted(tx) => {
trace!(target: "backend", tx_gas_limit = %tx.pending_transaction.transaction.gas_limit(), ?tx, "block gas limit exhausting, skipping transaction");
continue;
}
TransactionExecutionOutcome::BlobGasExhausted(tx) => {
trace!(target: "backend", blob_gas = %tx.pending_transaction.transaction.blob_gas_used().unwrap_or_default(), ?tx, "block blob gas limit exhausting, skipping transaction");
continue;
}
TransactionExecutionOutcome::TransactionGasExhausted(tx) => {
trace!(target: "backend", tx_gas_limit = %tx.pending_transaction.transaction.gas_limit(), ?tx, "transaction gas limit exhausting, skipping transaction");
continue;
}
TransactionExecutionOutcome::Invalid(tx, _) => {
trace!(target: "backend", ?tx, "skipping invalid transaction");
invalid.push(tx);
continue;
}
TransactionExecutionOutcome::DatabaseError(_, err) => {
// Note: this is only possible in forking mode, if for example a rpc request
// failed
trace!(target: "backend", ?err, "Failed to execute transaction due to database error");
continue;
}
};
if is_cancun {
let tx_blob_gas =
tx.transaction.pending_transaction.transaction.blob_gas_used().unwrap_or(0);
cumulative_blob_gas_used =
Some(cumulative_blob_gas_used.unwrap_or(0u64).saturating_add(tx_blob_gas));
}
let receipt = tx.create_receipt(&mut cumulative_gas_used);
let ExecutedTransaction { transaction, logs, out, traces, exit_reason: exit, .. } = tx;
build_logs_bloom(&logs, &mut bloom);
let contract_address = out.as_ref().and_then(|out| {
if let Output::Create(_, contract_address) = out {
trace!(target: "backend", "New contract deployed: at {:?}", contract_address);
*contract_address
} else {
None
}
});
let transaction_index = transaction_infos.len() as u64;
let info = TransactionInfo {
transaction_hash: transaction.hash(),
transaction_index,
from: *transaction.pending_transaction.sender(),
to: transaction.pending_transaction.transaction.to(),
contract_address,
traces,
exit,
out: out.map(Output::into_data),
nonce: tx.nonce,
gas_used: tx.gas_used,
};
transaction_infos.push(info);
receipts.push(receipt);
transactions.push(transaction.pending_transaction.transaction.clone());
}
let receipts_root = calculate_receipt_root(&receipts);
let header = Header {
parent_hash,
ommers_hash: Default::default(),
beneficiary,
state_root: self.db.maybe_state_root().unwrap_or_default(),
transactions_root: Default::default(), // Will be computed by create_block
receipts_root,
logs_bloom: bloom,
difficulty,
number: block_number.saturating_to(),
gas_limit,
gas_used: cumulative_gas_used,
timestamp: timestamp.saturating_to(),
extra_data: Default::default(),
mix_hash: mix_hash.unwrap_or_default(),
nonce: Default::default(),
base_fee_per_gas: base_fee,
parent_beacon_block_root: is_cancun.then_some(Default::default()),
blob_gas_used: cumulative_blob_gas_used,
excess_blob_gas,
withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS),
requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH),
};
let block = create_block(header, transactions);
let block = BlockInfo { block, transactions: transaction_infos, receipts };
ExecutedTransactions { block, included, invalid }
}
fn env_for(&self, tx: &PendingTransaction) -> Env {
let mut tx_env: OpTransaction<TxEnv> =
FromRecoveredTx::from_recovered_tx(tx.transaction.as_ref(), *tx.sender());
if let FoundryTxEnvelope::Eip7702(tx_7702) = tx.transaction.as_ref()
&& self.cheats.has_recover_overrides()
{
// Override invalid recovered authorizations with signature overrides from cheat manager
let cheated_auths = tx_7702
.tx()
.authorization_list
.iter()
.zip(tx_env.base.authorization_list)
.map(|(signed_auth, either_auth)| {
either_auth.right_and_then(|recovered_auth| {
if recovered_auth.authority().is_none()
&& let Ok(signature) = signed_auth.signature()
&& let Some(override_addr) =
self.cheats.get_recover_override(&signature.as_bytes().into())
{
Either::Right(RecoveredAuthorization::new_unchecked(
recovered_auth.into_parts().0,
RecoveredAuthority::Valid(override_addr),
))
} else {
Either::Right(recovered_auth)
}
})
})
.collect();
tx_env.base.authorization_list = cheated_auths;
}
if self.networks.is_optimism() {
tx_env.enveloped_tx = Some(alloy_rlp::encode(tx.transaction.as_ref()).into());
}
Env::new(self.evm_env.clone(), tx_env, self.networks)
}
}
/// Represents the result of a single transaction execution attempt
#[derive(Debug)]
pub enum TransactionExecutionOutcome {
/// Transaction successfully executed
Executed(ExecutedTransaction),
/// Invalid transaction not executed
Invalid(Arc<PoolTransaction>, InvalidTransactionError),
/// Execution skipped because could exceed block gas limit
BlockGasExhausted(Arc<PoolTransaction>),
/// Execution skipped because it exceeded the blob gas limit
BlobGasExhausted(Arc<PoolTransaction>),
/// Execution skipped because it exceeded the transaction gas limit
TransactionGasExhausted(Arc<PoolTransaction>),
/// When an error occurred during execution
DatabaseError(Arc<PoolTransaction>, DatabaseError),
}
impl<DB: Db + ?Sized, V: TransactionValidator> Iterator for &mut TransactionExecutor<'_, DB, V> {
type Item = TransactionExecutionOutcome;
fn next(&mut self) -> Option<Self::Item> {
let transaction = self.pending.next()?;
let sender = *transaction.pending_transaction.sender();
let account = match self.db.basic(sender).map(|acc| acc.unwrap_or_default()) {
Ok(account) => account,
Err(err) => return Some(TransactionExecutionOutcome::DatabaseError(transaction, err)),
};
let env = self.env_for(&transaction.pending_transaction);
// check that we comply with the block's gas limit, if not disabled
let max_block_gas = self.gas_used.saturating_add(env.tx.base.gas_limit);
if !env.evm_env.cfg_env.disable_block_gas_limit
&& max_block_gas > env.evm_env.block_env.gas_limit
{
return Some(TransactionExecutionOutcome::BlockGasExhausted(transaction));
}
// check that we comply with the transaction's gas limit as imposed by Osaka (EIP-7825)
if env.evm_env.cfg_env.tx_gas_limit_cap.is_none()
&& transaction.pending_transaction.transaction.gas_limit()
> env.evm_env.cfg_env().tx_gas_limit_cap()
{
return Some(TransactionExecutionOutcome::TransactionGasExhausted(transaction));
}
// check that we comply with the block's blob gas limit
let max_blob_gas = self.blob_gas_used.saturating_add(
transaction.pending_transaction.transaction.blob_gas_used().unwrap_or(0),
);
if max_blob_gas > self.blob_params.max_blob_gas_per_block() {
return Some(TransactionExecutionOutcome::BlobGasExhausted(transaction));
}
// validate before executing
if let Err(err) = self.validator.validate_pool_transaction_for(
&transaction.pending_transaction,
&account,
&env,
) {
warn!(target: "backend", "Skipping invalid tx execution [{:?}] {}", transaction.hash(), err);
return Some(TransactionExecutionOutcome::Invalid(transaction, err));
}
let nonce = account.nonce;
let mut inspector = AnvilInspector::default().with_tracing();
if self.enable_steps_tracing {
inspector = inspector.with_steps_tracing();
}
if self.print_logs {
inspector = inspector.with_log_collector();
}
if self.print_traces {
inspector = inspector.with_trace_printer();
}
let exec_result = {
let mut evm = new_evm_with_inspector(&mut *self.db, &env, &mut inspector);
self.networks.inject_precompiles(evm.precompiles_mut());
if let Some(factory) = &self.precompile_factory {
evm.precompiles_mut().extend_precompiles(factory.precompiles());
}
let cheats = Arc::new(self.cheats.clone());
if cheats.has_recover_overrides() {
let cheat_ecrecover = CheatEcrecover::new(Arc::clone(&cheats));
evm.precompiles_mut().apply_precompile(&EC_RECOVER, move |_| {
Some(DynPrecompile::new_stateful(
cheat_ecrecover.precompile_id().clone(),
move |input| cheat_ecrecover.call(input),
))
});
}
trace!(target: "backend", "[{:?}] executing", transaction.hash());
// transact and commit the transaction
match evm.transact_commit(env.tx) {
Ok(exec_result) => exec_result,
Err(err) => {
warn!(target: "backend", "[{:?}] failed to execute: {:?}", transaction.hash(), err);
match err {
EVMError::Database(err) => {
return Some(TransactionExecutionOutcome::DatabaseError(
transaction,
err,
));
}
EVMError::Transaction(err) => {
return Some(TransactionExecutionOutcome::Invalid(
transaction,
err.into(),
));
}
// This will correspond to prevrandao not set, and it should never happen.
// If it does, it's a bug.
e => panic!("failed to execute transaction: {e}"),
}
}
}
};
if self.print_traces {
inspector.print_traces(self.call_trace_decoder.clone());
}
inspector.print_logs();
let (exit_reason, gas_used, out, logs) = match exec_result {
ExecutionResult::Success { reason, gas_used, logs, output, .. } => {
(reason.into(), gas_used, Some(output), Some(logs))
}
ExecutionResult::Revert { gas_used, output } => {
(InstructionResult::Revert, gas_used, Some(Output::Call(output)), None)
}
ExecutionResult::Halt { reason, gas_used } => {
(op_haltreason_to_instruction_result(reason), gas_used, None, None)
}
};
if exit_reason == InstructionResult::OutOfGas {
// this currently useful for debugging estimations
warn!(target: "backend", "[{:?}] executed with out of gas", transaction.hash())
}
trace!(target: "backend", ?exit_reason, ?gas_used, "[{:?}] executed with out={:?}", transaction.hash(), out);
// Track the total gas used for total gas per block checks
self.gas_used = self.gas_used.saturating_add(gas_used);
// Track the total blob gas used for total blob gas per blob checks
if let Some(blob_gas) = transaction.pending_transaction.transaction.blob_gas_used() {
self.blob_gas_used = self.blob_gas_used.saturating_add(blob_gas);
}
trace!(target: "backend::executor", "transacted [{:?}], result: {:?} gas {}", transaction.hash(), exit_reason, gas_used);
let tx = ExecutedTransaction {
transaction,
exit_reason,
out,
gas_used,
logs: logs.unwrap_or_default(),
traces: inspector.tracer.map(|t| t.into_traces().into_nodes()).unwrap_or_default(),
nonce,
};
Some(TransactionExecutionOutcome::Executed(tx))
}
}
/// Inserts all logs into the bloom
fn build_logs_bloom(logs: &[Log], bloom: &mut Bloom) {
for log in logs {
bloom.accrue(BloomInput::Raw(&log.address[..]));
for topic in log.topics() {
bloom.accrue(BloomInput::Raw(&topic[..]));
}
}
}
/// Creates a database with given database and inspector.
pub fn new_evm_with_inspector<DB, I>(
db: DB,
env: &Env,
inspector: I,
) -> EitherEvm<DB, I, PrecompilesMap>
where
DB: Database<Error = DatabaseError> + Debug,
I: Inspector<EthEvmContext<DB>> + Inspector<OpContext<DB>>,
{
if env.networks.is_optimism() {
let evm_env = EvmEnv::new(
env.evm_env.cfg_env.clone().with_spec(op_revm::OpSpecId::ISTHMUS),
env.evm_env.block_env.clone(),
);
EitherEvm::Op(OpEvmFactory::default().create_evm_with_inspector(db, evm_env, inspector))
} else {
EitherEvm::Eth(EthEvmFactory::default().create_evm_with_inspector(
db,
env.evm_env.clone(),
inspector,
))
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/mod.rs | crates/anvil/src/eth/backend/mod.rs | //! blockchain Backend
/// [revm](foundry_evm::revm) related types
pub mod db;
/// In-memory Backend
pub mod mem;
pub mod cheats;
pub mod time;
pub mod env;
pub mod executor;
pub mod fork;
pub mod genesis;
pub mod info;
pub mod notifications;
pub mod validate;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/fork.rs | crates/anvil/src/eth/backend/fork.rs | //! Support for forking off another client
use crate::eth::{backend::db::Db, error::BlockchainError, pool::transactions::PoolTransaction};
use alloy_consensus::TrieAccount;
use alloy_eips::eip2930::AccessListResult;
use alloy_network::{AnyRpcBlock, AnyRpcTransaction, BlockResponse, TransactionResponse};
use alloy_primitives::{
Address, B256, Bytes, StorageValue, U256,
map::{FbHashMap, HashMap},
};
use alloy_provider::{
Provider,
ext::{DebugApi, TraceApi},
};
use alloy_rpc_types::{
BlockId, BlockNumberOrTag as BlockNumber, BlockTransactions, EIP1186AccountProofResponse,
FeeHistory, Filter, Log,
request::TransactionRequest,
simulate::{SimulatePayload, SimulatedBlock},
trace::{
geth::{GethDebugTracingOptions, GethTrace},
parity::LocalizedTransactionTrace as Trace,
},
};
use alloy_serde::WithOtherFields;
use alloy_transport::TransportError;
use foundry_common::provider::{ProviderBuilder, RetryProvider};
use foundry_primitives::FoundryTxReceipt;
use parking_lot::{
RawRwLock, RwLock,
lock_api::{RwLockReadGuard, RwLockWriteGuard},
};
use revm::context_interface::block::BlobExcessGasAndPrice;
use std::{sync::Arc, time::Duration};
use tokio::sync::RwLock as AsyncRwLock;
/// Represents a fork of a remote client
///
/// This type contains a subset of the [`EthApi`](crate::eth::EthApi) functions but will exclusively
/// fetch the requested data from the remote client, if it wasn't already fetched.
#[derive(Clone, Debug)]
pub struct ClientFork {
/// Contains the cached data
pub storage: Arc<RwLock<ForkedStorage>>,
/// contains the info how the fork is configured
// Wrapping this in a lock, ensures we can update this on the fly via additional custom RPC
// endpoints
pub config: Arc<RwLock<ClientForkConfig>>,
/// This also holds a handle to the underlying database
pub database: Arc<AsyncRwLock<Box<dyn Db>>>,
}
impl ClientFork {
/// Creates a new instance of the fork
pub fn new(config: ClientForkConfig, database: Arc<AsyncRwLock<Box<dyn Db>>>) -> Self {
Self { storage: Default::default(), config: Arc::new(RwLock::new(config)), database }
}
/// Reset the fork to a fresh forked state, and optionally update the fork config
pub async fn reset(
&self,
url: Option<String>,
block_number: impl Into<BlockId>,
) -> Result<(), BlockchainError> {
let block_number = block_number.into();
{
self.database
.write()
.await
.maybe_reset(url.clone(), block_number)
.map_err(BlockchainError::Internal)?;
}
if let Some(url) = url {
self.config.write().update_url(url)?;
let override_chain_id = self.config.read().override_chain_id;
let chain_id = if let Some(chain_id) = override_chain_id {
chain_id
} else {
self.provider().get_chain_id().await?
};
self.config.write().chain_id = chain_id;
}
let provider = self.provider();
let block =
provider.get_block(block_number).await?.ok_or(BlockchainError::BlockNotFound)?;
let block_hash = block.header.hash;
let timestamp = block.header.timestamp;
let base_fee = block.header.base_fee_per_gas;
let total_difficulty = block.header.total_difficulty.unwrap_or_default();
let number = block.header.number;
self.config.write().update_block(
number,
block_hash,
timestamp,
base_fee.map(|g| g as u128),
total_difficulty,
);
self.clear_cached_storage();
self.database.write().await.insert_block_hash(U256::from(number), block_hash);
Ok(())
}
/// Removes all data cached from previous responses
pub fn clear_cached_storage(&self) {
self.storage.write().clear()
}
/// Returns true whether the block predates the fork
pub fn predates_fork(&self, block: u64) -> bool {
block < self.block_number()
}
/// Returns true whether the block predates the fork _or_ is the same block as the fork
pub fn predates_fork_inclusive(&self, block: u64) -> bool {
block <= self.block_number()
}
pub fn timestamp(&self) -> u64 {
self.config.read().timestamp
}
pub fn block_number(&self) -> u64 {
self.config.read().block_number
}
/// Returns the transaction hash we forked off of, if any.
pub fn transaction_hash(&self) -> Option<B256> {
self.config.read().transaction_hash
}
pub fn total_difficulty(&self) -> U256 {
self.config.read().total_difficulty
}
pub fn base_fee(&self) -> Option<u128> {
self.config.read().base_fee
}
pub fn block_hash(&self) -> B256 {
self.config.read().block_hash
}
pub fn eth_rpc_url(&self) -> String {
self.config.read().eth_rpc_url.clone()
}
pub fn chain_id(&self) -> u64 {
self.config.read().chain_id
}
fn provider(&self) -> Arc<RetryProvider> {
self.config.read().provider.clone()
}
fn storage_read(&self) -> RwLockReadGuard<'_, RawRwLock, ForkedStorage> {
self.storage.read()
}
fn storage_write(&self) -> RwLockWriteGuard<'_, RawRwLock, ForkedStorage> {
self.storage.write()
}
/// Returns the fee history `eth_feeHistory`
pub async fn fee_history(
&self,
block_count: u64,
newest_block: BlockNumber,
reward_percentiles: &[f64],
) -> Result<FeeHistory, TransportError> {
self.provider().get_fee_history(block_count, newest_block, reward_percentiles).await
}
/// Sends `eth_getProof`
pub async fn get_proof(
&self,
address: Address,
keys: Vec<B256>,
block_number: Option<BlockId>,
) -> Result<EIP1186AccountProofResponse, TransportError> {
self.provider().get_proof(address, keys).block_id(block_number.unwrap_or_default()).await
}
/// Sends `eth_call`
pub async fn call(
&self,
request: &WithOtherFields<TransactionRequest>,
block: Option<BlockNumber>,
) -> Result<Bytes, TransportError> {
let block = block.unwrap_or(BlockNumber::Latest);
let res = self.provider().call(request.clone()).block(block.into()).await?;
Ok(res)
}
/// Sends `eth_simulateV1`
pub async fn simulate_v1(
&self,
request: &SimulatePayload,
block: Option<BlockNumber>,
) -> Result<Vec<SimulatedBlock<AnyRpcBlock>>, TransportError> {
let mut simulate_call = self.provider().simulate(request);
if let Some(n) = block {
simulate_call = simulate_call.number(n.as_number().unwrap());
}
let res = simulate_call.await?;
Ok(res)
}
/// Sends `eth_estimateGas`
pub async fn estimate_gas(
&self,
request: &WithOtherFields<TransactionRequest>,
block: Option<BlockNumber>,
) -> Result<u128, TransportError> {
let block = block.unwrap_or_default();
let res = self.provider().estimate_gas(request.clone()).block(block.into()).await?;
Ok(res as u128)
}
/// Sends `eth_createAccessList`
pub async fn create_access_list(
&self,
request: &WithOtherFields<TransactionRequest>,
block: Option<BlockNumber>,
) -> Result<AccessListResult, TransportError> {
self.provider().create_access_list(request).block_id(block.unwrap_or_default().into()).await
}
pub async fn storage_at(
&self,
address: Address,
index: U256,
number: Option<BlockNumber>,
) -> Result<StorageValue, TransportError> {
self.provider()
.get_storage_at(address, index)
.block_id(number.unwrap_or_default().into())
.await
}
pub async fn logs(&self, filter: &Filter) -> Result<Vec<Log>, TransportError> {
if let Some(logs) = self.storage_read().logs.get(filter).cloned() {
return Ok(logs);
}
let logs = self.provider().get_logs(filter).await?;
let mut storage = self.storage_write();
storage.logs.insert(filter.clone(), logs.clone());
Ok(logs)
}
pub async fn get_code(
&self,
address: Address,
blocknumber: u64,
) -> Result<Bytes, TransportError> {
trace!(target: "backend::fork", "get_code={:?}", address);
if let Some(code) = self.storage_read().code_at.get(&(address, blocknumber)).cloned() {
return Ok(code);
}
let block_id = BlockId::number(blocknumber);
let code = self.provider().get_code_at(address).block_id(block_id).await?;
let mut storage = self.storage_write();
storage.code_at.insert((address, blocknumber), code.clone());
Ok(code)
}
pub async fn get_balance(
&self,
address: Address,
blocknumber: u64,
) -> Result<U256, TransportError> {
trace!(target: "backend::fork", "get_balance={:?}", address);
self.provider().get_balance(address).block_id(blocknumber.into()).await
}
pub async fn get_nonce(&self, address: Address, block: u64) -> Result<u64, TransportError> {
trace!(target: "backend::fork", "get_nonce={:?}", address);
self.provider().get_transaction_count(address).block_id(block.into()).await
}
pub async fn get_account(
&self,
address: Address,
blocknumber: u64,
) -> Result<TrieAccount, TransportError> {
trace!(target: "backend::fork", "get_account={:?}", address);
self.provider().get_account(address).block_id(blocknumber.into()).await
}
pub async fn transaction_by_block_number_and_index(
&self,
number: u64,
index: usize,
) -> Result<Option<AnyRpcTransaction>, TransportError> {
if let Some(block) = self.block_by_number(number).await? {
match block.transactions() {
BlockTransactions::Full(txs) => {
if let Some(tx) = txs.get(index) {
return Ok(Some(tx.clone()));
}
}
BlockTransactions::Hashes(hashes) => {
if let Some(tx_hash) = hashes.get(index) {
return self.transaction_by_hash(*tx_hash).await;
}
}
// TODO(evalir): Is it possible to reach this case? Should we support it
BlockTransactions::Uncle => panic!("Uncles not supported"),
}
}
Ok(None)
}
pub async fn transaction_by_block_hash_and_index(
&self,
hash: B256,
index: usize,
) -> Result<Option<AnyRpcTransaction>, TransportError> {
if let Some(block) = self.block_by_hash(hash).await? {
match block.transactions() {
BlockTransactions::Full(txs) => {
if let Some(tx) = txs.get(index) {
return Ok(Some(tx.clone()));
}
}
BlockTransactions::Hashes(hashes) => {
if let Some(tx_hash) = hashes.get(index) {
return self.transaction_by_hash(*tx_hash).await;
}
}
// TODO(evalir): Is it possible to reach this case? Should we support it
BlockTransactions::Uncle => panic!("Uncles not supported"),
}
}
Ok(None)
}
pub async fn transaction_by_hash(
&self,
hash: B256,
) -> Result<Option<AnyRpcTransaction>, TransportError> {
trace!(target: "backend::fork", "transaction_by_hash={:?}", hash);
if let tx @ Some(_) = self.storage_read().transactions.get(&hash).cloned() {
return Ok(tx);
}
let tx = self.provider().get_transaction_by_hash(hash).await?;
if let Some(tx) = tx.clone() {
let mut storage = self.storage_write();
storage.transactions.insert(hash, tx);
}
Ok(tx)
}
pub async fn trace_transaction(&self, hash: B256) -> Result<Vec<Trace>, TransportError> {
if let Some(traces) = self.storage_read().transaction_traces.get(&hash).cloned() {
return Ok(traces);
}
let traces = self.provider().trace_transaction(hash).await?.into_iter().collect::<Vec<_>>();
let mut storage = self.storage_write();
storage.transaction_traces.insert(hash, traces.clone());
Ok(traces)
}
pub async fn debug_trace_transaction(
&self,
hash: B256,
opts: GethDebugTracingOptions,
) -> Result<GethTrace, TransportError> {
if let Some(traces) = self.storage_read().geth_transaction_traces.get(&hash).cloned() {
return Ok(traces);
}
let trace = self.provider().debug_trace_transaction(hash, opts).await?;
let mut storage = self.storage_write();
storage.geth_transaction_traces.insert(hash, trace.clone());
Ok(trace)
}
pub async fn debug_code_by_hash(
&self,
code_hash: B256,
block_id: Option<BlockId>,
) -> Result<Option<Bytes>, TransportError> {
self.provider().debug_code_by_hash(code_hash, block_id).await
}
pub async fn trace_block(&self, number: u64) -> Result<Vec<Trace>, TransportError> {
if let Some(traces) = self.storage_read().block_traces.get(&number).cloned() {
return Ok(traces);
}
let traces =
self.provider().trace_block(number.into()).await?.into_iter().collect::<Vec<_>>();
let mut storage = self.storage_write();
storage.block_traces.insert(number, traces.clone());
Ok(traces)
}
pub async fn transaction_receipt(
&self,
hash: B256,
) -> Result<Option<FoundryTxReceipt>, BlockchainError> {
if let Some(receipt) = self.storage_read().transaction_receipts.get(&hash).cloned() {
return Ok(Some(receipt));
}
if let Some(receipt) = self.provider().get_transaction_receipt(hash).await? {
let receipt = FoundryTxReceipt::try_from(receipt)
.map_err(|_| BlockchainError::FailedToDecodeReceipt)?;
let mut storage = self.storage_write();
storage.transaction_receipts.insert(hash, receipt.clone());
return Ok(Some(receipt));
}
Ok(None)
}
pub async fn block_receipts(
&self,
number: u64,
) -> Result<Option<Vec<FoundryTxReceipt>>, BlockchainError> {
if let receipts @ Some(_) = self.storage_read().block_receipts.get(&number).cloned() {
return Ok(receipts);
}
// TODO Needs to be removed.
// Since alloy doesn't indicate in the result whether the block exists,
// this is being temporarily implemented in anvil.
if self.predates_fork_inclusive(number) {
let receipts = self.provider().get_block_receipts(BlockId::from(number)).await?;
let receipts = receipts
.map(|r| {
r.into_iter()
.map(|r| {
FoundryTxReceipt::try_from(r)
.map_err(|_| BlockchainError::FailedToDecodeReceipt)
})
.collect::<Result<Vec<_>, _>>()
})
.transpose()?;
if let Some(receipts) = receipts.clone() {
let mut storage = self.storage_write();
storage.block_receipts.insert(number, receipts);
}
return Ok(receipts);
}
Ok(None)
}
pub async fn block_by_hash(&self, hash: B256) -> Result<Option<AnyRpcBlock>, TransportError> {
if let Some(mut block) = self.storage_read().blocks.get(&hash).cloned() {
block.transactions.convert_to_hashes();
return Ok(Some(block));
}
Ok(self.fetch_full_block(hash).await?.map(|mut b| {
b.transactions.convert_to_hashes();
b
}))
}
pub async fn block_by_hash_full(
&self,
hash: B256,
) -> Result<Option<AnyRpcBlock>, TransportError> {
if let Some(block) = self.storage_read().blocks.get(&hash).cloned() {
return Ok(Some(self.convert_to_full_block(block)));
}
self.fetch_full_block(hash).await
}
pub async fn block_by_number(
&self,
block_number: u64,
) -> Result<Option<AnyRpcBlock>, TransportError> {
if let Some(mut block) = self
.storage_read()
.hashes
.get(&block_number)
.and_then(|hash| self.storage_read().blocks.get(hash).cloned())
{
block.transactions.convert_to_hashes();
return Ok(Some(block));
}
let mut block = self.fetch_full_block(block_number).await?;
if let Some(block) = &mut block {
block.transactions.convert_to_hashes();
}
Ok(block)
}
pub async fn block_by_number_full(
&self,
block_number: u64,
) -> Result<Option<AnyRpcBlock>, TransportError> {
if let Some(block) = self
.storage_read()
.hashes
.get(&block_number)
.copied()
.and_then(|hash| self.storage_read().blocks.get(&hash).cloned())
{
return Ok(Some(self.convert_to_full_block(block)));
}
self.fetch_full_block(block_number).await
}
async fn fetch_full_block(
&self,
block_id: impl Into<BlockId>,
) -> Result<Option<AnyRpcBlock>, TransportError> {
if let Some(block) = self.provider().get_block(block_id.into()).full().await? {
let hash = block.header.hash;
let block_number = block.header.number;
let mut storage = self.storage_write();
// also insert all transactions
let block_txs = match block.transactions() {
BlockTransactions::Full(txs) => txs.to_owned(),
_ => vec![],
};
storage.transactions.extend(block_txs.iter().map(|tx| (tx.tx_hash(), tx.clone())));
storage.hashes.insert(block_number, hash);
storage.blocks.insert(hash, block.clone());
return Ok(Some(block));
}
Ok(None)
}
pub async fn uncle_by_block_hash_and_index(
&self,
hash: B256,
index: usize,
) -> Result<Option<AnyRpcBlock>, TransportError> {
if let Some(block) = self.block_by_hash(hash).await? {
return self.uncles_by_block_and_index(block, index).await;
}
Ok(None)
}
pub async fn uncle_by_block_number_and_index(
&self,
number: u64,
index: usize,
) -> Result<Option<AnyRpcBlock>, TransportError> {
if let Some(block) = self.block_by_number(number).await? {
return self.uncles_by_block_and_index(block, index).await;
}
Ok(None)
}
async fn uncles_by_block_and_index(
&self,
block: AnyRpcBlock,
index: usize,
) -> Result<Option<AnyRpcBlock>, TransportError> {
let block_hash = block.header.hash;
let block_number = block.header.number;
if let Some(uncles) = self.storage_read().uncles.get(&block_hash) {
return Ok(uncles.get(index).cloned());
}
let mut uncles = Vec::with_capacity(block.uncles.len());
for (uncle_idx, _) in block.uncles.iter().enumerate() {
let uncle =
match self.provider().get_uncle(block_number.into(), uncle_idx as u64).await? {
Some(u) => u,
None => return Ok(None),
};
uncles.push(uncle);
}
self.storage_write().uncles.insert(block_hash, uncles.clone());
Ok(uncles.get(index).cloned())
}
/// Converts a block of hashes into a full block
fn convert_to_full_block(&self, mut block: AnyRpcBlock) -> AnyRpcBlock {
let storage = self.storage.read();
let block_txs_len = match block.transactions {
BlockTransactions::Full(ref txs) => txs.len(),
BlockTransactions::Hashes(ref hashes) => hashes.len(),
// TODO: Should this be supported at all?
BlockTransactions::Uncle => 0,
};
let mut transactions = Vec::with_capacity(block_txs_len);
for tx in block.transactions.hashes() {
if let Some(tx) = storage.transactions.get(&tx).cloned() {
transactions.push(tx);
}
}
// TODO: fix once blocks have generic transactions
block.inner.transactions = BlockTransactions::Full(transactions);
block
}
}
/// Contains all fork metadata
#[derive(Clone, Debug)]
pub struct ClientForkConfig {
pub eth_rpc_url: String,
/// The block number of the forked block
pub block_number: u64,
/// The hash of the forked block
pub block_hash: B256,
/// The transaction hash we forked off of, if any.
pub transaction_hash: Option<B256>,
// TODO make provider agnostic
pub provider: Arc<RetryProvider>,
pub chain_id: u64,
pub override_chain_id: Option<u64>,
/// The timestamp for the forked block
pub timestamp: u64,
/// The basefee of the forked block
pub base_fee: Option<u128>,
/// Blob gas used of the forked block
pub blob_gas_used: Option<u128>,
/// Blob excess gas and price of the forked block
pub blob_excess_gas_and_price: Option<BlobExcessGasAndPrice>,
/// request timeout
pub timeout: Duration,
/// request retries for spurious networks
pub retries: u32,
/// request retries for spurious networks
pub backoff: Duration,
/// available CUPS
pub compute_units_per_second: u64,
/// total difficulty of the chain until this block
pub total_difficulty: U256,
/// Transactions to force include in the forked chain
pub force_transactions: Option<Vec<PoolTransaction>>,
}
impl ClientForkConfig {
/// Updates the provider URL
///
/// # Errors
///
/// This will fail if no new provider could be established (erroneous URL)
fn update_url(&mut self, url: String) -> Result<(), BlockchainError> {
// let interval = self.provider.get_interval();
self.provider = Arc::new(
ProviderBuilder::new(url.as_str())
.timeout(self.timeout)
// .timeout_retry(self.retries)
.max_retry(self.retries)
.initial_backoff(self.backoff.as_millis() as u64)
.compute_units_per_second(self.compute_units_per_second)
.build()
.map_err(|_| BlockchainError::InvalidUrl(url.clone()))?, // .interval(interval),
);
trace!(target: "fork", "Updated rpc url {}", url);
self.eth_rpc_url = url;
Ok(())
}
/// Updates the block forked off `(block number, block hash, timestamp)`
pub fn update_block(
&mut self,
block_number: u64,
block_hash: B256,
timestamp: u64,
base_fee: Option<u128>,
total_difficulty: U256,
) {
self.block_number = block_number;
self.block_hash = block_hash;
self.timestamp = timestamp;
self.base_fee = base_fee;
self.total_difficulty = total_difficulty;
trace!(target: "fork", "Updated block number={} hash={:?}", block_number, block_hash);
}
}
/// Contains cached state fetched to serve EthApi requests
///
/// This is used as a cache so repeated requests to the same data are not sent to the remote client
#[derive(Clone, Debug, Default)]
pub struct ForkedStorage {
pub uncles: FbHashMap<32, Vec<AnyRpcBlock>>,
pub blocks: FbHashMap<32, AnyRpcBlock>,
pub hashes: HashMap<u64, B256>,
pub transactions: FbHashMap<32, AnyRpcTransaction>,
pub transaction_receipts: FbHashMap<32, FoundryTxReceipt>,
pub transaction_traces: FbHashMap<32, Vec<Trace>>,
pub logs: HashMap<Filter, Vec<Log>>,
pub geth_transaction_traces: FbHashMap<32, GethTrace>,
pub block_traces: HashMap<u64, Vec<Trace>>,
pub block_receipts: HashMap<u64, Vec<FoundryTxReceipt>>,
pub code_at: HashMap<(Address, u64), Bytes>,
}
impl ForkedStorage {
/// Clears all data
pub fn clear(&mut self) {
// simply replace with a completely new, empty instance
*self = Self::default()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/validate.rs | crates/anvil/src/eth/backend/validate.rs | //! Support for validating transactions at certain stages
use crate::eth::{
backend::env::Env,
error::{BlockchainError, InvalidTransactionError},
};
use anvil_core::eth::transaction::PendingTransaction;
use revm::state::AccountInfo;
/// A trait for validating transactions
#[async_trait::async_trait]
pub trait TransactionValidator {
/// Validates the transaction's validity when it comes to nonce, payment
///
/// This is intended to be checked before the transaction makes it into the pool and whether it
/// should rather be outright rejected if the sender has insufficient funds.
async fn validate_pool_transaction(
&self,
tx: &PendingTransaction,
) -> Result<(), BlockchainError>;
/// Validates the transaction against a specific account before entering the pool
fn validate_pool_transaction_for(
&self,
tx: &PendingTransaction,
account: &AccountInfo,
env: &Env,
) -> Result<(), InvalidTransactionError>;
/// Validates the transaction against a specific account
///
/// This should succeed if the transaction is ready to be executed
fn validate_for(
&self,
tx: &PendingTransaction,
account: &AccountInfo,
env: &Env,
) -> Result<(), InvalidTransactionError>;
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/notifications.rs | crates/anvil/src/eth/backend/notifications.rs | //! Notifications emitted from the backed
use alloy_consensus::Header;
use alloy_primitives::B256;
use futures::channel::mpsc::UnboundedReceiver;
use std::sync::Arc;
/// A notification that's emitted when a new block was imported
#[derive(Clone, Debug)]
pub struct NewBlockNotification {
/// Hash of the imported block
pub hash: B256,
/// block header
pub header: Arc<Header>,
}
/// Type alias for a receiver that receives [NewBlockNotification]
pub type NewBlockNotifications = UnboundedReceiver<NewBlockNotification>;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/mem/fork_db.rs | crates/anvil/src/eth/backend/mem/fork_db.rs | use crate::eth::backend::db::{
Db, MaybeForkedDatabase, MaybeFullDatabase, SerializableAccountRecord, SerializableBlock,
SerializableHistoricalStates, SerializableState, SerializableTransaction, StateDb,
};
use alloy_primitives::{Address, B256, U256, map::HashMap};
use alloy_rpc_types::BlockId;
use foundry_evm::{
backend::{BlockchainDb, DatabaseResult, RevertStateSnapshotAction, StateSnapshot},
fork::database::ForkDbStateSnapshot,
};
use revm::{
context::BlockEnv,
database::{Database, DbAccount},
state::AccountInfo,
};
pub use foundry_evm::fork::database::ForkedDatabase;
impl Db for ForkedDatabase {
fn insert_account(&mut self, address: Address, account: AccountInfo) {
self.database_mut().insert_account(address, account)
}
fn set_storage_at(&mut self, address: Address, slot: B256, val: B256) -> DatabaseResult<()> {
// this ensures the account is loaded first
let _ = Database::basic(self, address)?;
self.database_mut().set_storage_at(address, slot, val)
}
fn insert_block_hash(&mut self, number: U256, hash: B256) {
self.inner().block_hashes().write().insert(number, hash);
}
fn dump_state(
&self,
at: BlockEnv,
best_number: u64,
blocks: Vec<SerializableBlock>,
transactions: Vec<SerializableTransaction>,
historical_states: Option<SerializableHistoricalStates>,
) -> DatabaseResult<Option<SerializableState>> {
let mut db = self.database().clone();
let accounts = self
.database()
.cache
.accounts
.clone()
.into_iter()
.map(|(k, v)| -> DatabaseResult<_> {
let code = if let Some(code) = v.info.code {
code
} else {
db.code_by_hash(v.info.code_hash)?
};
Ok((
k,
SerializableAccountRecord {
nonce: v.info.nonce,
balance: v.info.balance,
code: code.original_bytes(),
storage: v.storage.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
},
))
})
.collect::<Result<_, _>>()?;
Ok(Some(SerializableState {
block: Some(at),
accounts,
best_block_number: Some(best_number),
blocks,
transactions,
historical_states,
}))
}
fn snapshot_state(&mut self) -> U256 {
self.insert_state_snapshot()
}
fn revert_state(&mut self, id: U256, action: RevertStateSnapshotAction) -> bool {
self.revert_state_snapshot(id, action)
}
fn current_state(&self) -> StateDb {
StateDb::new(self.create_state_snapshot())
}
}
impl MaybeFullDatabase for ForkedDatabase {
fn maybe_as_full_db(&self) -> Option<&HashMap<Address, DbAccount>> {
Some(&self.database().cache.accounts)
}
fn clear_into_state_snapshot(&mut self) -> StateSnapshot {
let db = self.inner().db();
let accounts = std::mem::take(&mut *db.accounts.write());
let storage = std::mem::take(&mut *db.storage.write());
let block_hashes = std::mem::take(&mut *db.block_hashes.write());
StateSnapshot { accounts, storage, block_hashes }
}
fn read_as_state_snapshot(&self) -> StateSnapshot {
let db = self.inner().db();
let accounts = db.accounts.read().clone();
let storage = db.storage.read().clone();
let block_hashes = db.block_hashes.read().clone();
StateSnapshot { accounts, storage, block_hashes }
}
fn clear(&mut self) {
self.flush_cache();
self.clear_into_state_snapshot();
}
fn init_from_state_snapshot(&mut self, state_snapshot: StateSnapshot) {
let db = self.inner().db();
let StateSnapshot { accounts, storage, block_hashes } = state_snapshot;
*db.accounts.write() = accounts;
*db.storage.write() = storage;
*db.block_hashes.write() = block_hashes;
}
}
impl MaybeFullDatabase for ForkDbStateSnapshot {
fn maybe_as_full_db(&self) -> Option<&HashMap<Address, DbAccount>> {
Some(&self.local.cache.accounts)
}
fn clear_into_state_snapshot(&mut self) -> StateSnapshot {
let mut state_snapshot = std::mem::take(&mut self.state_snapshot);
let local_state_snapshot = self.local.clear_into_state_snapshot();
state_snapshot.accounts.extend(local_state_snapshot.accounts);
state_snapshot.storage.extend(local_state_snapshot.storage);
state_snapshot.block_hashes.extend(local_state_snapshot.block_hashes);
state_snapshot
}
fn read_as_state_snapshot(&self) -> StateSnapshot {
let mut state_snapshot = self.state_snapshot.clone();
let local_state_snapshot = self.local.read_as_state_snapshot();
state_snapshot.accounts.extend(local_state_snapshot.accounts);
state_snapshot.storage.extend(local_state_snapshot.storage);
state_snapshot.block_hashes.extend(local_state_snapshot.block_hashes);
state_snapshot
}
fn clear(&mut self) {
std::mem::take(&mut self.state_snapshot);
self.local.clear()
}
fn init_from_state_snapshot(&mut self, state_snapshot: StateSnapshot) {
self.state_snapshot = state_snapshot;
}
}
impl MaybeForkedDatabase for ForkedDatabase {
fn maybe_reset(&mut self, url: Option<String>, block_number: BlockId) -> Result<(), String> {
self.reset(url, block_number)
}
fn maybe_flush_cache(&self) -> Result<(), String> {
self.flush_cache();
Ok(())
}
fn maybe_inner(&self) -> Result<&BlockchainDb, String> {
Ok(self.inner())
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/mem/state.rs | crates/anvil/src/eth/backend/mem/state.rs | //! Support for generating the state root for memdb storage
use alloy_primitives::{Address, B256, U256, keccak256, map::HashMap};
use alloy_rlp::Encodable;
use alloy_trie::{HashBuilder, Nibbles};
use revm::{database::DbAccount, state::AccountInfo};
pub fn build_root(values: impl IntoIterator<Item = (Nibbles, Vec<u8>)>) -> B256 {
let mut builder = HashBuilder::default();
for (key, value) in values {
builder.add_leaf(key, value.as_ref());
}
builder.root()
}
/// Builds state root from the given accounts
pub fn state_root(accounts: &HashMap<Address, DbAccount>) -> B256 {
build_root(trie_accounts(accounts))
}
/// Builds storage root from the given storage
pub fn storage_root(storage: &HashMap<U256, U256>) -> B256 {
build_root(trie_storage(storage))
}
/// Builds iterator over stored key-value pairs ready for storage trie root calculation.
pub fn trie_storage(storage: &HashMap<U256, U256>) -> Vec<(Nibbles, Vec<u8>)> {
let mut storage = storage
.iter()
.map(|(key, value)| {
let data = alloy_rlp::encode(value);
(Nibbles::unpack(keccak256(key.to_be_bytes::<32>())), data)
})
.collect::<Vec<_>>();
storage.sort_by(|(key1, _), (key2, _)| key1.cmp(key2));
storage
}
/// Builds iterator over stored key-value pairs ready for account trie root calculation.
pub fn trie_accounts(accounts: &HashMap<Address, DbAccount>) -> Vec<(Nibbles, Vec<u8>)> {
let mut accounts = accounts
.iter()
.map(|(address, account)| {
let data = trie_account_rlp(&account.info, &account.storage);
(Nibbles::unpack(keccak256(*address)), data)
})
.collect::<Vec<_>>();
accounts.sort_by(|(key1, _), (key2, _)| key1.cmp(key2));
accounts
}
/// Returns the RLP for this account.
pub fn trie_account_rlp(info: &AccountInfo, storage: &HashMap<U256, U256>) -> Vec<u8> {
let mut out: Vec<u8> = Vec::new();
let list: [&dyn Encodable; 4] =
[&info.nonce, &info.balance, &storage_root(storage), &info.code_hash];
alloy_rlp::encode_list::<_, dyn Encodable>(&list, &mut out);
out
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/mem/storage.rs | crates/anvil/src/eth/backend/mem/storage.rs | //! In-memory blockchain storage
use crate::eth::{
backend::{
db::{
MaybeFullDatabase, SerializableBlock, SerializableHistoricalStates,
SerializableTransaction, StateDb,
},
env::Env,
mem::cache::DiskStateCache,
},
pool::transactions::PoolTransaction,
};
use alloy_consensus::{Header, constants::EMPTY_WITHDRAWALS};
use alloy_eips::eip7685::EMPTY_REQUESTS_HASH;
use alloy_primitives::{
B256, Bytes, U256,
map::{B256HashMap, HashMap},
};
use alloy_rpc_types::{
BlockId, BlockNumberOrTag, TransactionInfo as RethTransactionInfo,
trace::{
otterscan::{InternalOperation, OperationType},
parity::LocalizedTransactionTrace,
},
};
use anvil_core::eth::{
block::{Block, create_block},
transaction::{MaybeImpersonatedTransaction, TransactionInfo},
};
use foundry_evm::{
backend::MemDb,
traces::{CallKind, ParityTraceBuilder, TracingInspectorConfig},
};
use foundry_primitives::{FoundryReceiptEnvelope, FoundryTxReceipt};
use parking_lot::RwLock;
use revm::{context::Block as RevmBlock, primitives::hardfork::SpecId};
use std::{collections::VecDeque, fmt, path::PathBuf, sync::Arc, time::Duration};
// use yansi::Paint;
// === various limits in number of blocks ===
pub const DEFAULT_HISTORY_LIMIT: usize = 500;
const MIN_HISTORY_LIMIT: usize = 10;
// 1hr of up-time at lowest 1s interval
const MAX_ON_DISK_HISTORY_LIMIT: usize = 3_600;
/// Represents the complete state of single block
pub struct InMemoryBlockStates {
/// The states at a certain block
states: B256HashMap<StateDb>,
/// states which data is moved to disk
on_disk_states: B256HashMap<StateDb>,
/// How many states to store at most
in_memory_limit: usize,
/// minimum amount of states we keep in memory
min_in_memory_limit: usize,
/// maximum amount of states we keep on disk
///
/// Limiting the states will prevent disk blow up, especially in interval mining mode
max_on_disk_limit: usize,
/// the oldest states written to disk
oldest_on_disk: VecDeque<B256>,
/// all states present, used to enforce `in_memory_limit`
present: VecDeque<B256>,
/// Stores old states on disk
disk_cache: DiskStateCache,
}
impl InMemoryBlockStates {
/// Creates a new instance with limited slots
pub fn new(in_memory_limit: usize, on_disk_limit: usize) -> Self {
Self {
states: Default::default(),
on_disk_states: Default::default(),
in_memory_limit,
min_in_memory_limit: in_memory_limit.min(MIN_HISTORY_LIMIT),
max_on_disk_limit: on_disk_limit,
oldest_on_disk: Default::default(),
present: Default::default(),
disk_cache: Default::default(),
}
}
/// Configures no disk caching
pub fn memory_only(mut self) -> Self {
self.max_on_disk_limit = 0;
self
}
/// Configures the path on disk where the states will cached.
pub fn disk_path(mut self, path: PathBuf) -> Self {
self.disk_cache = self.disk_cache.with_path(path);
self
}
/// This modifies the `limit` what to keep stored in memory.
///
/// This will ensure the new limit adjusts based on the block time.
/// The lowest blocktime is 1s which should increase the limit slightly
pub fn update_interval_mine_block_time(&mut self, block_time: Duration) {
let block_time = block_time.as_secs();
// for block times lower than 2s we increase the mem limit since we're mining _small_ blocks
// very fast
// this will gradually be decreased once the max limit was reached
if block_time <= 2 {
self.in_memory_limit = DEFAULT_HISTORY_LIMIT * 3;
self.enforce_limits();
}
}
/// Returns true if only memory caching is supported.
fn is_memory_only(&self) -> bool {
self.max_on_disk_limit == 0
}
/// Inserts a new (hash -> state) pair
///
/// When the configured limit for the number of states that can be stored in memory is reached,
/// the oldest state is removed.
///
/// Since we keep a snapshot of the entire state as history, the size of the state will increase
/// with the transactions processed. To counter this, we gradually decrease the cache limit with
/// the number of states/blocks until we reached the `min_limit`.
///
/// When a state that was previously written to disk is requested, it is simply read from disk.
pub fn insert(&mut self, hash: B256, state: StateDb) {
if !self.is_memory_only() && self.present.len() >= self.in_memory_limit {
// once we hit the max limit we gradually decrease it
self.in_memory_limit =
self.in_memory_limit.saturating_sub(1).max(self.min_in_memory_limit);
}
self.enforce_limits();
self.states.insert(hash, state);
self.present.push_back(hash);
}
/// Enforces configured limits
fn enforce_limits(&mut self) {
// enforce memory limits
while self.present.len() >= self.in_memory_limit {
// evict the oldest block
if let Some((hash, mut state)) = self
.present
.pop_front()
.and_then(|hash| self.states.remove(&hash).map(|state| (hash, state)))
{
// only write to disk if supported
if !self.is_memory_only() {
let state_snapshot = state.0.clear_into_state_snapshot();
self.disk_cache.write(hash, state_snapshot);
self.on_disk_states.insert(hash, state);
self.oldest_on_disk.push_back(hash);
}
}
}
// enforce on disk limit and purge the oldest state cached on disk
while !self.is_memory_only() && self.oldest_on_disk.len() >= self.max_on_disk_limit {
// evict the oldest block
if let Some(hash) = self.oldest_on_disk.pop_front() {
self.on_disk_states.remove(&hash);
self.disk_cache.remove(hash);
}
}
}
/// Returns the in-memory state for the given `hash` if present
pub fn get_state(&self, hash: &B256) -> Option<&StateDb> {
self.states.get(hash)
}
/// Returns on-disk state for the given `hash` if present
pub fn get_on_disk_state(&mut self, hash: &B256) -> Option<&StateDb> {
if let Some(state) = self.on_disk_states.get_mut(hash)
&& let Some(cached) = self.disk_cache.read(*hash)
{
state.init_from_state_snapshot(cached);
return Some(state);
}
None
}
/// Sets the maximum number of stats we keep in memory
pub fn set_cache_limit(&mut self, limit: usize) {
self.in_memory_limit = limit;
}
/// Clears all entries
pub fn clear(&mut self) {
self.states.clear();
self.on_disk_states.clear();
self.present.clear();
for on_disk in std::mem::take(&mut self.oldest_on_disk) {
self.disk_cache.remove(on_disk)
}
}
/// Serialize all states to a list of serializable historical states
pub fn serialized_states(&mut self) -> SerializableHistoricalStates {
// Get in-memory states
let mut states = self
.states
.iter_mut()
.map(|(hash, state)| (*hash, state.serialize_state()))
.collect::<Vec<_>>();
// Get on-disk state snapshots
self.on_disk_states.iter().for_each(|(hash, _)| {
if let Some(state_snapshot) = self.disk_cache.read(*hash) {
states.push((*hash, state_snapshot));
}
});
SerializableHistoricalStates::new(states)
}
/// Load states from serialized data
pub fn load_states(&mut self, states: SerializableHistoricalStates) {
for (hash, state_snapshot) in states {
let mut state_db = StateDb::new(MemDb::default());
state_db.init_from_state_snapshot(state_snapshot);
self.insert(hash, state_db);
}
}
}
impl fmt::Debug for InMemoryBlockStates {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("InMemoryBlockStates")
.field("in_memory_limit", &self.in_memory_limit)
.field("min_in_memory_limit", &self.min_in_memory_limit)
.field("max_on_disk_limit", &self.max_on_disk_limit)
.field("oldest_on_disk", &self.oldest_on_disk)
.field("present", &self.present)
.finish_non_exhaustive()
}
}
impl Default for InMemoryBlockStates {
fn default() -> Self {
// enough in memory to store `DEFAULT_HISTORY_LIMIT` blocks in memory
Self::new(DEFAULT_HISTORY_LIMIT, MAX_ON_DISK_HISTORY_LIMIT)
}
}
/// Stores the blockchain data (blocks, transactions)
#[derive(Clone, Debug)]
pub struct BlockchainStorage {
/// all stored blocks (block hash -> block)
pub blocks: B256HashMap<Block>,
/// mapping from block number -> block hash
pub hashes: HashMap<u64, B256>,
/// The current best hash
pub best_hash: B256,
/// The current best block number
pub best_number: u64,
/// genesis hash of the chain
pub genesis_hash: B256,
/// Mapping from the transaction hash to a tuple containing the transaction as well as the
/// transaction receipt
pub transactions: B256HashMap<MinedTransaction>,
/// The total difficulty of the chain until this block
pub total_difficulty: U256,
}
impl BlockchainStorage {
/// Creates a new storage with a genesis block
pub fn new(
env: &Env,
spec_id: SpecId,
base_fee: Option<u64>,
timestamp: u64,
genesis_number: u64,
) -> Self {
let is_shanghai = spec_id >= SpecId::SHANGHAI;
let is_cancun = spec_id >= SpecId::CANCUN;
let is_prague = spec_id >= SpecId::PRAGUE;
// create a dummy genesis block
let header = Header {
timestamp,
base_fee_per_gas: base_fee,
gas_limit: env.evm_env.block_env.gas_limit,
beneficiary: env.evm_env.block_env.beneficiary,
difficulty: env.evm_env.block_env.difficulty,
blob_gas_used: env.evm_env.block_env.blob_excess_gas_and_price.as_ref().map(|_| 0),
excess_blob_gas: env.evm_env.block_env.blob_excess_gas(),
number: genesis_number,
parent_beacon_block_root: is_cancun.then_some(Default::default()),
withdrawals_root: is_shanghai.then_some(EMPTY_WITHDRAWALS),
requests_hash: is_prague.then_some(EMPTY_REQUESTS_HASH),
..Default::default()
};
let block = create_block(header, Vec::<MaybeImpersonatedTransaction>::new());
let genesis_hash = block.header.hash_slow();
let best_hash = genesis_hash;
let best_number = genesis_number;
let mut blocks = B256HashMap::default();
blocks.insert(genesis_hash, block);
let mut hashes = HashMap::default();
hashes.insert(best_number, genesis_hash);
Self {
blocks,
hashes,
best_hash,
best_number,
genesis_hash,
transactions: Default::default(),
total_difficulty: Default::default(),
}
}
pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
let mut hashes = HashMap::default();
hashes.insert(block_number, block_hash);
Self {
blocks: B256HashMap::default(),
hashes,
best_hash: block_hash,
best_number: block_number,
genesis_hash: Default::default(),
transactions: Default::default(),
total_difficulty,
}
}
/// Unwind the chain state back to the given block in storage.
///
/// The block identified by `block_number` and `block_hash` is __non-inclusive__, i.e. it will
/// remain in the state.
pub fn unwind_to(&mut self, block_number: u64, block_hash: B256) -> Vec<Block> {
let mut removed = vec![];
let best_num: u64 = self.best_number;
for i in (block_number + 1)..=best_num {
if let Some(hash) = self.hashes.get(&i).copied() {
// First remove the block's transactions while the mappings still exist
self.remove_block_transactions_by_number(i);
// Now remove the block from storage (may already be empty of txs) and drop mapping
if let Some(block) = self.blocks.remove(&hash) {
removed.push(block);
}
self.hashes.remove(&i);
}
}
self.best_hash = block_hash;
self.best_number = block_number;
removed
}
pub fn empty() -> Self {
Self {
blocks: Default::default(),
hashes: Default::default(),
best_hash: Default::default(),
best_number: Default::default(),
genesis_hash: Default::default(),
transactions: Default::default(),
total_difficulty: Default::default(),
}
}
/// Removes all stored transactions for the given block number
pub fn remove_block_transactions_by_number(&mut self, num: u64) {
if let Some(hash) = self.hashes.get(&num).copied() {
self.remove_block_transactions(hash);
}
}
/// Removes all stored transactions for the given block hash
pub fn remove_block_transactions(&mut self, block_hash: B256) {
if let Some(block) = self.blocks.get_mut(&block_hash) {
for tx in &block.body.transactions {
self.transactions.remove(&tx.hash());
}
block.body.transactions.clear();
}
}
}
impl BlockchainStorage {
/// Returns the hash for [BlockNumberOrTag]
pub fn hash(&self, number: BlockNumberOrTag) -> Option<B256> {
let slots_in_an_epoch = 32;
match number {
BlockNumberOrTag::Latest => Some(self.best_hash),
BlockNumberOrTag::Earliest => Some(self.genesis_hash),
BlockNumberOrTag::Pending => None,
BlockNumberOrTag::Number(num) => self.hashes.get(&num).copied(),
BlockNumberOrTag::Safe => {
if self.best_number > (slots_in_an_epoch) {
self.hashes.get(&(self.best_number - (slots_in_an_epoch))).copied()
} else {
Some(self.genesis_hash) // treat the genesis block as safe "by definition"
}
}
BlockNumberOrTag::Finalized => {
if self.best_number > (slots_in_an_epoch * 2) {
self.hashes.get(&(self.best_number - (slots_in_an_epoch * 2))).copied()
} else {
Some(self.genesis_hash)
}
}
}
}
pub fn serialized_blocks(&self) -> Vec<SerializableBlock> {
self.blocks.values().map(|block| block.clone().into()).collect()
}
pub fn serialized_transactions(&self) -> Vec<SerializableTransaction> {
self.transactions.values().map(|tx: &MinedTransaction| tx.clone().into()).collect()
}
/// Deserialize and add all blocks data to the backend storage
pub fn load_blocks(&mut self, serializable_blocks: Vec<SerializableBlock>) {
for serializable_block in &serializable_blocks {
let block: Block = serializable_block.clone().into();
let block_hash = block.header.hash_slow();
let block_number = block.header.number;
self.blocks.insert(block_hash, block);
self.hashes.insert(block_number, block_hash);
}
}
/// Deserialize and add all blocks data to the backend storage
pub fn load_transactions(&mut self, serializable_transactions: Vec<SerializableTransaction>) {
for serializable_transaction in &serializable_transactions {
let transaction: MinedTransaction = serializable_transaction.clone().into();
self.transactions.insert(transaction.info.transaction_hash, transaction);
}
}
}
/// A simple in-memory blockchain
#[derive(Clone, Debug)]
pub struct Blockchain {
/// underlying storage that supports concurrent reads
pub storage: Arc<RwLock<BlockchainStorage>>,
}
impl Blockchain {
/// Creates a new storage with a genesis block
pub fn new(
env: &Env,
spec_id: SpecId,
base_fee: Option<u64>,
timestamp: u64,
genesis_number: u64,
) -> Self {
Self {
storage: Arc::new(RwLock::new(BlockchainStorage::new(
env,
spec_id,
base_fee,
timestamp,
genesis_number,
))),
}
}
pub fn forked(block_number: u64, block_hash: B256, total_difficulty: U256) -> Self {
Self {
storage: Arc::new(RwLock::new(BlockchainStorage::forked(
block_number,
block_hash,
total_difficulty,
))),
}
}
/// returns the header hash of given block
pub fn hash(&self, id: BlockId) -> Option<B256> {
match id {
BlockId::Hash(h) => Some(h.block_hash),
BlockId::Number(num) => self.storage.read().hash(num),
}
}
pub fn get_block_by_hash(&self, hash: &B256) -> Option<Block> {
self.storage.read().blocks.get(hash).cloned()
}
pub fn get_transaction_by_hash(&self, hash: &B256) -> Option<MinedTransaction> {
self.storage.read().transactions.get(hash).cloned()
}
/// Returns the total number of blocks
pub fn blocks_count(&self) -> usize {
self.storage.read().blocks.len()
}
}
/// Represents the outcome of mining a new block
#[derive(Clone, Debug)]
pub struct MinedBlockOutcome {
/// The block that was mined
pub block_number: u64,
/// All transactions included in the block
pub included: Vec<Arc<PoolTransaction>>,
/// All transactions that were attempted to be included but were invalid at the time of
/// execution
pub invalid: Vec<Arc<PoolTransaction>>,
}
/// Container type for a mined transaction
#[derive(Clone, Debug)]
pub struct MinedTransaction {
pub info: TransactionInfo,
pub receipt: FoundryReceiptEnvelope,
pub block_hash: B256,
pub block_number: u64,
}
impl MinedTransaction {
/// Returns the traces of the transaction for `trace_transaction`
pub fn parity_traces(&self) -> Vec<LocalizedTransactionTrace> {
ParityTraceBuilder::new(
self.info.traces.clone(),
None,
TracingInspectorConfig::default_parity(),
)
.into_localized_transaction_traces(RethTransactionInfo {
hash: Some(self.info.transaction_hash),
index: Some(self.info.transaction_index),
block_hash: Some(self.block_hash),
block_number: Some(self.block_number),
base_fee: None,
})
}
pub fn ots_internal_operations(&self) -> Vec<InternalOperation> {
self.info
.traces
.iter()
.filter_map(|node| {
let r#type = match node.trace.kind {
_ if node.is_selfdestruct() => OperationType::OpSelfDestruct,
CallKind::Call if !node.trace.value.is_zero() => OperationType::OpTransfer,
CallKind::Create => OperationType::OpCreate,
CallKind::Create2 => OperationType::OpCreate2,
_ => return None,
};
let mut from = node.trace.caller;
let mut to = node.trace.address;
let mut value = node.trace.value;
if node.is_selfdestruct() {
from = node.trace.address;
to = node.trace.selfdestruct_refund_target.unwrap_or_default();
value = node.trace.selfdestruct_transferred_value.unwrap_or_default();
}
Some(InternalOperation { r#type, from, to, value })
})
.collect()
}
}
/// Intermediary Anvil representation of a receipt
#[derive(Clone, Debug)]
pub struct MinedTransactionReceipt {
/// The actual json rpc receipt object
pub inner: FoundryTxReceipt,
/// Output data for the transaction
pub out: Option<Bytes>,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::eth::backend::db::Db;
use alloy_primitives::{Address, hex};
use alloy_rlp::Decodable;
use foundry_primitives::FoundryTxEnvelope;
use revm::{database::DatabaseRef, state::AccountInfo};
#[test]
fn test_interval_update() {
let mut storage = InMemoryBlockStates::default();
storage.update_interval_mine_block_time(Duration::from_secs(1));
assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT * 3);
}
#[test]
fn test_init_state_limits() {
let mut storage = InMemoryBlockStates::default();
assert_eq!(storage.in_memory_limit, DEFAULT_HISTORY_LIMIT);
assert_eq!(storage.min_in_memory_limit, MIN_HISTORY_LIMIT);
assert_eq!(storage.max_on_disk_limit, MAX_ON_DISK_HISTORY_LIMIT);
storage = storage.memory_only();
assert!(storage.is_memory_only());
storage = InMemoryBlockStates::new(1, 0);
assert!(storage.is_memory_only());
assert_eq!(storage.in_memory_limit, 1);
assert_eq!(storage.min_in_memory_limit, 1);
assert_eq!(storage.max_on_disk_limit, 0);
storage = InMemoryBlockStates::new(1, 2);
assert!(!storage.is_memory_only());
assert_eq!(storage.in_memory_limit, 1);
assert_eq!(storage.min_in_memory_limit, 1);
assert_eq!(storage.max_on_disk_limit, 2);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_read_write_cached_state() {
let mut storage = InMemoryBlockStates::new(1, MAX_ON_DISK_HISTORY_LIMIT);
let one = B256::from(U256::from(1));
let two = B256::from(U256::from(2));
let mut state = MemDb::default();
let addr = Address::random();
let info = AccountInfo::from_balance(U256::from(1337));
state.insert_account(addr, info);
storage.insert(one, StateDb::new(state));
storage.insert(two, StateDb::new(MemDb::default()));
// wait for files to be flushed
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
assert_eq!(storage.on_disk_states.len(), 1);
assert!(storage.on_disk_states.contains_key(&one));
let loaded = storage.get_on_disk_state(&one).unwrap();
let acc = loaded.basic_ref(addr).unwrap().unwrap();
assert_eq!(acc.balance, U256::from(1337u64));
}
#[tokio::test(flavor = "multi_thread")]
async fn can_decrease_state_cache_size() {
let limit = 15;
let mut storage = InMemoryBlockStates::new(limit, MAX_ON_DISK_HISTORY_LIMIT);
let num_states = 30;
for idx in 0..num_states {
let mut state = MemDb::default();
let hash = B256::from(U256::from(idx));
let addr = Address::from_word(hash);
let balance = (idx * 2) as u64;
let info = AccountInfo::from_balance(U256::from(balance));
state.insert_account(addr, info);
storage.insert(hash, StateDb::new(state));
}
// wait for files to be flushed
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
let on_disk_states_len = num_states - storage.min_in_memory_limit;
assert_eq!(storage.on_disk_states.len(), on_disk_states_len);
assert_eq!(storage.present.len(), storage.min_in_memory_limit);
for idx in 0..num_states {
let hash = B256::from(U256::from(idx));
let addr = Address::from_word(hash);
let loaded = if idx < on_disk_states_len {
storage.get_on_disk_state(&hash).unwrap()
} else {
storage.get_state(&hash).unwrap()
};
let acc = loaded.basic_ref(addr).unwrap().unwrap();
let balance = (idx * 2) as u64;
assert_eq!(acc.balance, U256::from(balance));
}
}
// verifies that blocks and transactions in BlockchainStorage remain the same when dumped and
// reloaded
#[test]
fn test_storage_dump_reload_cycle() {
let mut dump_storage = BlockchainStorage::empty();
let header = Header { gas_limit: 123456, ..Default::default() };
let bytes_first = &mut &hex::decode("f86b02843b9aca00830186a094d3e8763675e4c425df46cc3b5c0f6cbdac39604687038d7ea4c68000802ba00eb96ca19e8a77102767a41fc85a36afd5c61ccb09911cec5d3e86e193d9c5aea03a456401896b1b6055311536bf00a718568c744d8c1f9df59879e8350220ca18").unwrap()[..];
let tx: MaybeImpersonatedTransaction =
FoundryTxEnvelope::decode(&mut &bytes_first[..]).unwrap().into();
let block = create_block(header.clone(), vec![tx.clone()]);
let block_hash = block.header.hash_slow();
dump_storage.blocks.insert(block_hash, block);
let serialized_blocks = dump_storage.serialized_blocks();
let serialized_transactions = dump_storage.serialized_transactions();
let mut load_storage = BlockchainStorage::empty();
load_storage.load_blocks(serialized_blocks);
load_storage.load_transactions(serialized_transactions);
let loaded_block = load_storage.blocks.get(&block_hash).unwrap();
assert_eq!(loaded_block.header.gas_limit, { header.gas_limit });
let loaded_tx = loaded_block.body.transactions.first().unwrap();
assert_eq!(loaded_tx, &tx);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/mem/mod.rs | crates/anvil/src/eth/backend/mem/mod.rs | //! In-memory blockchain backend.
use self::state::trie_storage;
use super::executor::new_evm_with_inspector;
use crate::{
ForkChoice, NodeConfig, PrecompileFactory,
config::PruneStateHistoryConfig,
eth::{
backend::{
cheats::{CheatEcrecover, CheatsManager},
db::{Db, MaybeFullDatabase, SerializableState, StateDb},
env::Env,
executor::{ExecutedTransactions, TransactionExecutor},
fork::ClientFork,
genesis::GenesisConfig,
mem::{
state::{storage_root, trie_accounts},
storage::MinedTransactionReceipt,
},
notifications::{NewBlockNotification, NewBlockNotifications},
time::{TimeManager, utc_from_secs},
validate::TransactionValidator,
},
error::{BlockchainError, ErrDetail, InvalidTransactionError},
fees::{FeeDetails, FeeManager, MIN_SUGGESTED_PRIORITY_FEE},
macros::node_info,
pool::transactions::PoolTransaction,
sign::build_typed_transaction,
},
mem::{
inspector::AnvilInspector,
storage::{BlockchainStorage, InMemoryBlockStates, MinedBlockOutcome},
},
};
use alloy_chains::NamedChain;
use alloy_consensus::{
Blob, BlockHeader, EnvKzgSettings, Header, Signed, Transaction as TransactionTrait,
TrieAccount, TxEnvelope, Typed2718,
proofs::{calculate_receipt_root, calculate_transaction_root},
transaction::Recovered,
};
use alloy_eip5792::{Capabilities, DelegationCapability};
use alloy_eips::{
BlockNumHash, Encodable2718,
eip4844::{BlobTransactionSidecar, kzg_to_versioned_hash},
eip7840::BlobParams,
eip7910::SystemContract,
};
use alloy_evm::{
Database, Evm, FromRecoveredTx,
eth::EthEvmContext,
overrides::{OverrideBlockHashes, apply_state_overrides},
precompiles::{DynPrecompile, Precompile, PrecompilesMap},
};
use alloy_network::{
AnyHeader, AnyRpcBlock, AnyRpcHeader, AnyRpcTransaction, AnyTxEnvelope, AnyTxType,
EthereumWallet, ReceiptResponse, TransactionBuilder, UnknownTxEnvelope,
UnknownTypedTransaction,
};
use alloy_primitives::{
Address, B256, Bytes, TxHash, TxKind, U64, U256, address, hex, keccak256, logs_bloom,
map::HashMap,
};
use alloy_rpc_types::{
AccessList, Block as AlloyBlock, BlockId, BlockNumberOrTag as BlockNumber, BlockTransactions,
EIP1186AccountProofResponse as AccountProof, EIP1186StorageProof as StorageProof, Filter,
Header as AlloyHeader, Index, Log, Transaction, TransactionReceipt,
anvil::Forking,
request::TransactionRequest,
serde_helpers::JsonStorageKey,
simulate::{SimBlock, SimCallResult, SimulatePayload, SimulatedBlock},
state::EvmOverrides,
trace::{
filter::TraceFilter,
geth::{
FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerType,
GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, NoopFrame,
},
parity::LocalizedTransactionTrace,
},
};
use alloy_serde::{OtherFields, WithOtherFields};
use alloy_signer::Signature;
use alloy_signer_local::PrivateKeySigner;
use alloy_trie::{HashBuilder, Nibbles, proof::ProofRetainer};
use anvil_core::eth::{
block::{Block, BlockInfo},
transaction::{MaybeImpersonatedTransaction, PendingTransaction, TransactionInfo},
wallet::WalletCapabilities,
};
use anvil_rpc::error::RpcError;
use chrono::Datelike;
use eyre::{Context, Result};
use flate2::{Compression, read::GzDecoder, write::GzEncoder};
use foundry_evm::{
backend::{DatabaseError, DatabaseResult, RevertStateSnapshotAction},
constants::DEFAULT_CREATE2_DEPLOYER_RUNTIME_CODE,
core::{either_evm::EitherEvm, precompiles::EC_RECOVER},
decode::RevertDecoder,
inspectors::AccessListInspector,
traces::{
CallTraceDecoder, FourByteInspector, GethTraceBuilder, TracingInspector,
TracingInspectorConfig,
},
utils::{get_blob_base_fee_update_fraction, get_blob_base_fee_update_fraction_by_spec_id},
};
use foundry_primitives::{
FoundryReceiptEnvelope, FoundryTransactionRequest, FoundryTxEnvelope, FoundryTxReceipt,
get_deposit_tx_parts,
};
use futures::channel::mpsc::{UnboundedSender, unbounded};
use op_alloy_consensus::DEPOSIT_TX_TYPE_ID;
use op_revm::{OpContext, OpHaltReason, OpTransaction};
use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard};
use revm::{
DatabaseCommit, Inspector,
context::{Block as RevmBlock, BlockEnv, Cfg, TxEnv},
context_interface::{
block::BlobExcessGasAndPrice,
result::{ExecutionResult, Output, ResultAndState},
},
database::{CacheDB, DbAccount, WrapDatabaseRef},
interpreter::InstructionResult,
precompile::{PrecompileSpecId, Precompiles},
primitives::{KECCAK_EMPTY, hardfork::SpecId},
state::AccountInfo,
};
use std::{
collections::BTreeMap,
fmt::Debug,
io::{Read, Write},
ops::{Mul, Not},
path::PathBuf,
sync::Arc,
time::Duration,
};
use storage::{Blockchain, DEFAULT_HISTORY_LIMIT, MinedTransaction};
use tokio::sync::RwLock as AsyncRwLock;
pub mod cache;
pub mod fork_db;
pub mod in_memory_db;
pub mod inspector;
pub mod state;
pub mod storage;
/// Helper trait that combines revm::DatabaseRef with Debug.
/// This is needed because alloy-evm requires Debug on Database implementations.
/// With trait upcasting now stable, we can now upcast from this trait to revm::DatabaseRef.
pub trait DatabaseRef: revm::DatabaseRef<Error = DatabaseError> + Debug {}
impl<T> DatabaseRef for T where T: revm::DatabaseRef<Error = DatabaseError> + Debug {}
impl DatabaseRef for dyn crate::eth::backend::db::Db {}
// Gas per transaction not creating a contract.
pub const MIN_TRANSACTION_GAS: u128 = 21000;
// Gas per transaction creating a contract.
pub const MIN_CREATE_GAS: u128 = 53000;
// Executor
pub const EXECUTOR: Address = address!("0x6634F723546eCc92277e8a2F93d4f248bf1189ea");
pub const EXECUTOR_PK: &str = "0x502d47e1421cb9abef497096728e69f07543232b93ef24de4998e18b5fd9ba0f";
// Experimental ERC20
pub const EXP_ERC20_CONTRACT: Address = address!("0x238c8CD93ee9F8c7Edf395548eF60c0d2e46665E");
// Runtime code of the experimental ERC20 contract
pub const EXP_ERC20_RUNTIME_CODE: &[u8] = &hex!(
"60806040526004361015610010575b005b5f3560e01c806306fdde03146106f7578063095ea7b31461068c57806318160ddd1461066757806323b872dd146105a15780632bb7c5951461050e578063313ce567146104f35780633644e5151461045557806340c10f191461043057806370a08231146103fe5780637ecebe00146103cc57806395d89b4114610366578063a9059cbb146102ea578063ad0c8fdd146102ad578063d505accf146100fb5763dd62ed3e0361000e57346100f75760403660031901126100f7576100d261075c565b6100da610772565b602052637f5e9f20600c525f5260206034600c2054604051908152f35b5f80fd5b346100f75760e03660031901126100f75761011461075c565b61011c610772565b6084359160643560443560ff851685036100f757610138610788565b60208101906e04578706572696d656e74455243323608c1b8252519020908242116102a0576040519360018060a01b03169460018060a01b03169565383775081901600e52855f5260c06020600c20958654957f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f8252602082019586528660408301967fc89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc688528b6060850198468a528c608087019330855260a08820602e527f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c9885252528688525260a082015220604e526042602c205f5260ff1660205260a43560405260c43560605260208060805f60015afa93853d5103610293577f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92594602094019055856303faf4f960a51b176040526034602c2055a3005b63ddafbaef5f526004601cfd5b631a15a3cc5f526004601cfd5b5f3660031901126100f7576103e834023481046103e814341517156102d65761000e90336107ac565b634e487b7160e01b5f52601160045260245ffd5b346100f75760403660031901126100f75761030361075c565b602435906387a211a2600c52335f526020600c2080548084116103595783900390555f526020600c20818154019055602052600c5160601c335f51602061080d5f395f51905f52602080a3602060405160018152f35b63f4d678b85f526004601cfd5b346100f7575f3660031901126100f757604051604081019080821067ffffffffffffffff8311176103b8576103b491604052600381526204558560ec1b602082015260405191829182610732565b0390f35b634e487b7160e01b5f52604160045260245ffd5b346100f75760203660031901126100f7576103e561075c565b6338377508600c525f52602080600c2054604051908152f35b346100f75760203660031901126100f75761041761075c565b6387a211a2600c525f52602080600c2054604051908152f35b346100f75760403660031901126100f75761000e61044c61075c565b602435906107ac565b346100f7575f3660031901126100f757602060a0610471610788565b828101906e04578706572696d656e74455243323608c1b8252519020604051907f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f8252838201527fc89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc6604082015246606082015230608082015220604051908152f35b346100f7575f3660031901126100f757602060405160128152f35b346100f75760203660031901126100f7576004356387a211a2600c52335f526020600c2090815490818111610359575f80806103e88487839688039055806805345cdf77eb68f44c54036805345cdf77eb68f44c5580835282335f51602061080d5f395f51905f52602083a304818115610598575b3390f11561058d57005b6040513d5f823e3d90fd5b506108fc610583565b346100f75760603660031901126100f7576105ba61075c565b6105c2610772565b604435908260601b33602052637f5e9f208117600c526034600c20908154918219610643575b506387a211a2915017600c526020600c2080548084116103595783900390555f526020600c20818154019055602052600c5160601c9060018060a01b03165f51602061080d5f395f51905f52602080a3602060405160018152f35b82851161065a57846387a211a293039055856105e8565b6313be252b5f526004601cfd5b346100f7575f3660031901126100f75760206805345cdf77eb68f44c54604051908152f35b346100f75760403660031901126100f7576106a561075c565b60243590602052637f5e9f20600c52335f52806034600c20555f52602c5160601c337f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92560205fa3602060405160018152f35b346100f7575f3660031901126100f7576103b4610712610788565b6e04578706572696d656e74455243323608c1b6020820152604051918291825b602060409281835280519182918282860152018484015e5f828201840152601f01601f1916010190565b600435906001600160a01b03821682036100f757565b602435906001600160a01b03821682036100f757565b604051906040820182811067ffffffffffffffff8211176103b857604052600f8252565b6805345cdf77eb68f44c548281019081106107ff576805345cdf77eb68f44c556387a211a2600c525f526020600c20818154019055602052600c5160601c5f5f51602061080d5f395f51905f52602080a3565b63e5cfe9575f526004601cfdfeddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3efa2646970667358221220fbe302881d9891005ba1448ba48547cc1cb17dea1a5c4011dfcb035de325bb1d64736f6c634300081b0033"
);
pub type State = foundry_evm::utils::StateChangeset;
/// A block request, which includes the Pool Transactions if it's Pending
#[derive(Debug)]
pub enum BlockRequest {
Pending(Vec<Arc<PoolTransaction>>),
Number(u64),
}
impl BlockRequest {
pub fn block_number(&self) -> BlockNumber {
match *self {
Self::Pending(_) => BlockNumber::Pending,
Self::Number(n) => BlockNumber::Number(n),
}
}
}
/// Gives access to the [revm::Database]
#[derive(Clone, Debug)]
pub struct Backend {
/// Access to [`revm::Database`] abstraction.
///
/// This will be used in combination with [`alloy_evm::Evm`] and is responsible for feeding
/// data to the evm during its execution.
///
/// At time of writing, there are two different types of `Db`:
/// - [`MemDb`](crate::mem::in_memory_db::MemDb): everything is stored in memory
/// - [`ForkDb`](crate::mem::fork_db::ForkedDatabase): forks off a remote client, missing
/// data is retrieved via RPC-calls
///
/// In order to commit changes to the [`revm::Database`], the [`alloy_evm::Evm`] requires
/// mutable access, which requires a write-lock from this `db`. In forking mode, the time
/// during which the write-lock is active depends on whether the `ForkDb` can provide all
/// requested data from memory or whether it has to retrieve it via RPC calls first. This
/// means that it potentially blocks for some time, even taking into account the rate
/// limits of RPC endpoints. Therefore the `Db` is guarded by a `tokio::sync::RwLock` here
/// so calls that need to read from it, while it's currently written to, don't block. E.g.
/// a new block is currently mined and a new [`Self::set_storage_at()`] request is being
/// executed.
db: Arc<AsyncRwLock<Box<dyn Db>>>,
/// stores all block related data in memory.
blockchain: Blockchain,
/// Historic states of previous blocks.
states: Arc<RwLock<InMemoryBlockStates>>,
/// Env data of the chain
env: Arc<RwLock<Env>>,
/// This is set if this is currently forked off another client.
fork: Arc<RwLock<Option<ClientFork>>>,
/// Provides time related info, like timestamp.
time: TimeManager,
/// Contains state of custom overrides.
cheats: CheatsManager,
/// Contains fee data.
fees: FeeManager,
/// Initialised genesis.
genesis: GenesisConfig,
/// Listeners for new blocks that get notified when a new block was imported.
new_block_listeners: Arc<Mutex<Vec<UnboundedSender<NewBlockNotification>>>>,
/// Keeps track of active state snapshots at a specific block.
active_state_snapshots: Arc<Mutex<HashMap<U256, (u64, B256)>>>,
enable_steps_tracing: bool,
print_logs: bool,
print_traces: bool,
/// Recorder used for decoding traces, used together with print_traces
call_trace_decoder: Arc<CallTraceDecoder>,
/// How to keep history state
prune_state_history_config: PruneStateHistoryConfig,
/// max number of blocks with transactions in memory
transaction_block_keeper: Option<usize>,
node_config: Arc<AsyncRwLock<NodeConfig>>,
/// Slots in an epoch
slots_in_an_epoch: u64,
/// Precompiles to inject to the EVM.
precompile_factory: Option<Arc<dyn PrecompileFactory>>,
/// Prevent race conditions during mining
mining: Arc<tokio::sync::Mutex<()>>,
// === wallet === //
capabilities: Arc<RwLock<WalletCapabilities>>,
executor_wallet: Arc<RwLock<Option<EthereumWallet>>>,
/// Disable pool balance checks
disable_pool_balance_checks: bool,
}
impl Backend {
/// Initialises the balance of the given accounts
#[expect(clippy::too_many_arguments)]
pub async fn with_genesis(
db: Arc<AsyncRwLock<Box<dyn Db>>>,
env: Arc<RwLock<Env>>,
genesis: GenesisConfig,
fees: FeeManager,
fork: Arc<RwLock<Option<ClientFork>>>,
enable_steps_tracing: bool,
print_logs: bool,
print_traces: bool,
call_trace_decoder: Arc<CallTraceDecoder>,
prune_state_history_config: PruneStateHistoryConfig,
max_persisted_states: Option<usize>,
transaction_block_keeper: Option<usize>,
automine_block_time: Option<Duration>,
cache_path: Option<PathBuf>,
node_config: Arc<AsyncRwLock<NodeConfig>>,
) -> Result<Self> {
// if this is a fork then adjust the blockchain storage
let blockchain = if let Some(fork) = fork.read().as_ref() {
trace!(target: "backend", "using forked blockchain at {}", fork.block_number());
Blockchain::forked(fork.block_number(), fork.block_hash(), fork.total_difficulty())
} else {
let env = env.read();
Blockchain::new(
&env,
env.evm_env.cfg_env.spec,
fees.is_eip1559().then(|| fees.base_fee()),
genesis.timestamp,
genesis.number,
)
};
// Sync EVM block.number with genesis for non-fork mode.
// Fork mode syncs in setup_fork_db_config() instead.
if fork.read().is_none() {
let mut write_env = env.write();
write_env.evm_env.block_env.number = U256::from(genesis.number);
}
let start_timestamp = if let Some(fork) = fork.read().as_ref() {
fork.timestamp()
} else {
genesis.timestamp
};
let mut states = if prune_state_history_config.is_config_enabled() {
// if prune state history is enabled, configure the state cache only for memory
prune_state_history_config
.max_memory_history
.map(|limit| InMemoryBlockStates::new(limit, 0))
.unwrap_or_default()
.memory_only()
} else if max_persisted_states.is_some() {
max_persisted_states
.map(|limit| InMemoryBlockStates::new(DEFAULT_HISTORY_LIMIT, limit))
.unwrap_or_default()
} else {
Default::default()
};
if let Some(cache_path) = cache_path {
states = states.disk_path(cache_path);
}
let (slots_in_an_epoch, precompile_factory, disable_pool_balance_checks) = {
let cfg = node_config.read().await;
(cfg.slots_in_an_epoch, cfg.precompile_factory.clone(), cfg.disable_pool_balance_checks)
};
let backend = Self {
db,
blockchain,
states: Arc::new(RwLock::new(states)),
env,
fork,
time: TimeManager::new(start_timestamp),
cheats: Default::default(),
new_block_listeners: Default::default(),
fees,
genesis,
active_state_snapshots: Arc::new(Mutex::new(Default::default())),
enable_steps_tracing,
print_logs,
print_traces,
call_trace_decoder,
prune_state_history_config,
transaction_block_keeper,
node_config,
slots_in_an_epoch,
precompile_factory,
mining: Arc::new(tokio::sync::Mutex::new(())),
capabilities: Arc::new(RwLock::new(WalletCapabilities(Default::default()))),
executor_wallet: Arc::new(RwLock::new(None)),
disable_pool_balance_checks,
};
if let Some(interval_block_time) = automine_block_time {
backend.update_interval_mine_block_time(interval_block_time);
}
// Note: this can only fail in forking mode, in which case we can't recover
backend.apply_genesis().await.wrap_err("failed to create genesis")?;
Ok(backend)
}
/// Writes the CREATE2 deployer code directly to the database at the address provided.
pub async fn set_create2_deployer(&self, address: Address) -> DatabaseResult<()> {
self.set_code(address, Bytes::from_static(DEFAULT_CREATE2_DEPLOYER_RUNTIME_CODE)).await?;
Ok(())
}
/// Get the capabilities of the wallet.
///
/// Currently the only capability is delegation.
///
/// See `anvil_core::eth::wallet::Capabilities` for construction helpers.
pub(crate) fn get_capabilities(&self) -> WalletCapabilities {
self.capabilities.read().clone()
}
/// Updates memory limits that should be more strict when auto-mine is enabled
pub(crate) fn update_interval_mine_block_time(&self, block_time: Duration) {
self.states.write().update_interval_mine_block_time(block_time)
}
/// Adds an address to the wallet's delegation capability.
pub(crate) fn add_capability(&self, address: Address) {
let chain_id = self.env.read().evm_env.cfg_env.chain_id;
let mut capabilities = self.capabilities.write();
let mut capability = capabilities
.get(chain_id)
.cloned()
.unwrap_or(Capabilities { delegation: DelegationCapability { addresses: vec![] } });
capability.delegation.addresses.push(address);
capabilities.0.insert(chain_id, capability);
}
pub(crate) fn set_executor(&self, executor_pk: String) -> Result<Address, BlockchainError> {
let signer: PrivateKeySigner =
executor_pk.parse().map_err(|_| RpcError::invalid_params("Invalid private key"))?;
let executor = signer.address();
let wallet = EthereumWallet::new(signer);
*self.executor_wallet.write() = Some(wallet);
Ok(executor)
}
/// Applies the configured genesis settings
///
/// This will fund, create the genesis accounts
async fn apply_genesis(&self) -> Result<(), DatabaseError> {
trace!(target: "backend", "setting genesis balances");
if self.fork.read().is_some() {
// fetch all account first
let mut genesis_accounts_futures = Vec::with_capacity(self.genesis.accounts.len());
for address in self.genesis.accounts.iter().copied() {
let db = Arc::clone(&self.db);
// The forking Database backend can handle concurrent requests, we can fetch all dev
// accounts concurrently by spawning the job to a new task
genesis_accounts_futures.push(tokio::task::spawn(async move {
let db = db.read().await;
let info = db.basic_ref(address)?.unwrap_or_default();
Ok::<_, DatabaseError>((address, info))
}));
}
let genesis_accounts = futures::future::join_all(genesis_accounts_futures).await;
let mut db = self.db.write().await;
for res in genesis_accounts {
let (address, mut info) = res.unwrap()?;
info.balance = self.genesis.balance;
db.insert_account(address, info.clone());
}
} else {
let mut db = self.db.write().await;
for (account, info) in self.genesis.account_infos() {
db.insert_account(account, info);
}
// insert the new genesis hash to the database so it's available for the next block in
// the evm
db.insert_block_hash(U256::from(self.best_number()), self.best_hash());
}
let db = self.db.write().await;
// apply the genesis.json alloc
self.genesis.apply_genesis_json_alloc(db)?;
trace!(target: "backend", "set genesis balances");
Ok(())
}
/// Sets the account to impersonate
///
/// Returns `true` if the account is already impersonated
pub fn impersonate(&self, addr: Address) -> bool {
if self.cheats.impersonated_accounts().contains(&addr) {
return true;
}
// Ensure EIP-3607 is disabled
let mut env = self.env.write();
env.evm_env.cfg_env.disable_eip3607 = true;
self.cheats.impersonate(addr)
}
/// Removes the account that from the impersonated set
///
/// If the impersonated `addr` is a contract then we also reset the code here
pub fn stop_impersonating(&self, addr: Address) {
self.cheats.stop_impersonating(&addr);
}
/// If set to true will make every account impersonated
pub fn auto_impersonate_account(&self, enabled: bool) {
self.cheats.set_auto_impersonate_account(enabled);
}
/// Returns the configured fork, if any
pub fn get_fork(&self) -> Option<ClientFork> {
self.fork.read().clone()
}
/// Returns the database
pub fn get_db(&self) -> &Arc<AsyncRwLock<Box<dyn Db>>> {
&self.db
}
/// Returns the `AccountInfo` from the database
pub async fn get_account(&self, address: Address) -> DatabaseResult<AccountInfo> {
Ok(self.db.read().await.basic_ref(address)?.unwrap_or_default())
}
/// Whether we're forked off some remote client
pub fn is_fork(&self) -> bool {
self.fork.read().is_some()
}
/// Resets the fork to a fresh state
pub async fn reset_fork(&self, forking: Forking) -> Result<(), BlockchainError> {
if !self.is_fork() {
if let Some(eth_rpc_url) = forking.clone().json_rpc_url {
let mut env = self.env.read().clone();
let (db, config) = {
let mut node_config = self.node_config.write().await;
// we want to force the correct base fee for the next block during
// `setup_fork_db_config`
node_config.base_fee.take();
node_config.setup_fork_db_config(eth_rpc_url, &mut env, &self.fees).await?
};
*self.db.write().await = Box::new(db);
let fork = ClientFork::new(config, Arc::clone(&self.db));
*self.env.write() = env;
*self.fork.write() = Some(fork);
} else {
return Err(RpcError::invalid_params(
"Forking not enabled and RPC URL not provided to start forking",
)
.into());
}
}
if let Some(fork) = self.get_fork() {
let block_number =
forking.block_number.map(BlockNumber::from).unwrap_or(BlockNumber::Latest);
// reset the fork entirely and reapply the genesis config
fork.reset(forking.json_rpc_url.clone(), block_number).await?;
let fork_block_number = fork.block_number();
let fork_block = fork
.block_by_number(fork_block_number)
.await?
.ok_or(BlockchainError::BlockNotFound)?;
// update all settings related to the forked block
{
if let Some(fork_url) = forking.json_rpc_url {
self.reset_block_number(fork_url, fork_block_number).await?;
} else {
// If rpc url is unspecified, then update the fork with the new block number and
// existing rpc url, this updates the cache path
{
let maybe_fork_url = { self.node_config.read().await.eth_rpc_url.clone() };
if let Some(fork_url) = maybe_fork_url {
self.reset_block_number(fork_url, fork_block_number).await?;
}
}
let gas_limit = self.node_config.read().await.fork_gas_limit(&fork_block);
let mut env = self.env.write();
env.evm_env.cfg_env.chain_id = fork.chain_id();
env.evm_env.block_env = BlockEnv {
number: U256::from(fork_block_number),
timestamp: U256::from(fork_block.header.timestamp),
gas_limit,
difficulty: fork_block.header.difficulty,
prevrandao: Some(fork_block.header.mix_hash.unwrap_or_default()),
// Keep previous `beneficiary` and `basefee` value
beneficiary: env.evm_env.block_env.beneficiary,
basefee: env.evm_env.block_env.basefee,
..env.evm_env.block_env.clone()
};
// this is the base fee of the current block, but we need the base fee of
// the next block
let next_block_base_fee = self.fees.get_next_block_base_fee_per_gas(
fork_block.header.gas_used,
gas_limit,
fork_block.header.base_fee_per_gas.unwrap_or_default(),
);
self.fees.set_base_fee(next_block_base_fee);
}
// reset the time to the timestamp of the forked block
self.time.reset(fork_block.header.timestamp);
// also reset the total difficulty
self.blockchain.storage.write().total_difficulty = fork.total_difficulty();
}
// reset storage
*self.blockchain.storage.write() = BlockchainStorage::forked(
fork.block_number(),
fork.block_hash(),
fork.total_difficulty(),
);
self.states.write().clear();
self.db.write().await.clear();
self.apply_genesis().await?;
trace!(target: "backend", "reset fork");
Ok(())
} else {
Err(RpcError::invalid_params("Forking not enabled").into())
}
}
/// Resets the backend to a fresh in-memory state, clearing all existing data
pub async fn reset_to_in_mem(&self) -> Result<(), BlockchainError> {
// Clear the fork if any exists
*self.fork.write() = None;
// Get environment and genesis config
let env = self.env.read().clone();
let genesis_timestamp = self.genesis.timestamp;
let genesis_number = self.genesis.number;
let spec_id = self.spec_id();
// Reset environment to genesis state
{
let mut env = self.env.write();
env.evm_env.block_env.number = U256::from(genesis_number);
env.evm_env.block_env.timestamp = U256::from(genesis_timestamp);
// Reset other block env fields to their defaults
env.evm_env.block_env.basefee = self.fees.base_fee();
env.evm_env.block_env.prevrandao = Some(B256::ZERO);
}
// Clear all storage and reinitialize with genesis
let base_fee = if self.fees.is_eip1559() { Some(self.fees.base_fee()) } else { None };
*self.blockchain.storage.write() =
BlockchainStorage::new(&env, spec_id, base_fee, genesis_timestamp, genesis_number);
self.states.write().clear();
// Clear the database
self.db.write().await.clear();
// Reset time manager
self.time.reset(genesis_timestamp);
// Reset fees to initial state
if self.fees.is_eip1559() {
self.fees.set_base_fee(crate::eth::fees::INITIAL_BASE_FEE);
}
self.fees.set_gas_price(crate::eth::fees::INITIAL_GAS_PRICE);
// Reapply genesis configuration
self.apply_genesis().await?;
trace!(target: "backend", "reset to fresh in-memory state");
Ok(())
}
async fn reset_block_number(
&self,
fork_url: String,
fork_block_number: u64,
) -> Result<(), BlockchainError> {
let mut node_config = self.node_config.write().await;
node_config.fork_choice = Some(ForkChoice::Block(fork_block_number as i128));
let mut env = self.env.read().clone();
let (forked_db, client_fork_config) =
node_config.setup_fork_db_config(fork_url, &mut env, &self.fees).await?;
*self.db.write().await = Box::new(forked_db);
let fork = ClientFork::new(client_fork_config, Arc::clone(&self.db));
*self.fork.write() = Some(fork);
*self.env.write() = env;
Ok(())
}
/// Returns the `TimeManager` responsible for timestamps
pub fn time(&self) -> &TimeManager {
&self.time
}
/// Returns the `CheatsManager` responsible for executing cheatcodes
pub fn cheats(&self) -> &CheatsManager {
&self.cheats
}
/// Whether to skip blob validation
pub fn skip_blob_validation(&self, impersonator: Option<Address>) -> bool {
self.cheats().auto_impersonate_accounts()
|| impersonator
.is_some_and(|addr| self.cheats().impersonated_accounts().contains(&addr))
}
/// Returns the `FeeManager` that manages fee/pricings
pub fn fees(&self) -> &FeeManager {
&self.fees
}
/// The env data of the blockchain
pub fn env(&self) -> &Arc<RwLock<Env>> {
&self.env
}
/// Returns the current best hash of the chain
pub fn best_hash(&self) -> B256 {
self.blockchain.storage.read().best_hash
}
/// Returns the current best number of the chain
pub fn best_number(&self) -> u64 {
self.blockchain.storage.read().best_number
}
/// Sets the block number
pub fn set_block_number(&self, number: u64) {
let mut env = self.env.write();
env.evm_env.block_env.number = U256::from(number);
}
/// Returns the client coinbase address.
pub fn coinbase(&self) -> Address {
self.env.read().evm_env.block_env.beneficiary
}
/// Returns the client coinbase address.
pub fn chain_id(&self) -> U256 {
U256::from(self.env.read().evm_env.cfg_env.chain_id)
}
pub fn set_chain_id(&self, chain_id: u64) {
self.env.write().evm_env.cfg_env.chain_id = chain_id;
}
/// Returns the genesis data for the Beacon API.
pub fn genesis_time(&self) -> u64 {
self.genesis.timestamp
}
/// Returns balance of the given account.
pub async fn current_balance(&self, address: Address) -> DatabaseResult<U256> {
Ok(self.get_account(address).await?.balance)
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/mem/in_memory_db.rs | crates/anvil/src/eth/backend/mem/in_memory_db.rs | //! The in memory DB
use crate::{
eth::backend::db::{
Db, MaybeForkedDatabase, MaybeFullDatabase, SerializableAccountRecord, SerializableBlock,
SerializableHistoricalStates, SerializableState, SerializableTransaction, StateDb,
},
mem::state::state_root,
};
use alloy_primitives::{Address, B256, U256, map::HashMap};
use alloy_rpc_types::BlockId;
use foundry_evm::backend::{BlockchainDb, DatabaseResult, StateSnapshot};
use revm::{
context::BlockEnv,
database::{DatabaseRef, DbAccount},
state::AccountInfo,
};
// reexport for convenience
pub use foundry_evm::backend::MemDb;
use foundry_evm::backend::RevertStateSnapshotAction;
impl Db for MemDb {
fn insert_account(&mut self, address: Address, account: AccountInfo) {
self.inner.insert_account_info(address, account)
}
fn set_storage_at(&mut self, address: Address, slot: B256, val: B256) -> DatabaseResult<()> {
self.inner.insert_account_storage(address, slot.into(), val.into())
}
fn insert_block_hash(&mut self, number: U256, hash: B256) {
self.inner.cache.block_hashes.insert(number, hash);
}
fn dump_state(
&self,
at: BlockEnv,
best_number: u64,
blocks: Vec<SerializableBlock>,
transactions: Vec<SerializableTransaction>,
historical_states: Option<SerializableHistoricalStates>,
) -> DatabaseResult<Option<SerializableState>> {
let accounts = self
.inner
.cache
.accounts
.clone()
.into_iter()
.map(|(k, v)| -> DatabaseResult<_> {
let code = if let Some(code) = v.info.code {
code
} else {
self.inner.code_by_hash_ref(v.info.code_hash)?
};
Ok((
k,
SerializableAccountRecord {
nonce: v.info.nonce,
balance: v.info.balance,
code: code.original_bytes(),
storage: v.storage.into_iter().map(|(k, v)| (k.into(), v.into())).collect(),
},
))
})
.collect::<Result<_, _>>()?;
Ok(Some(SerializableState {
block: Some(at),
accounts,
best_block_number: Some(best_number),
blocks,
transactions,
historical_states,
}))
}
/// Creates a new snapshot
fn snapshot_state(&mut self) -> U256 {
let id = self.state_snapshots.insert(self.inner.clone());
trace!(target: "backend::memdb", "Created new state snapshot {}", id);
id
}
fn revert_state(&mut self, id: U256, action: RevertStateSnapshotAction) -> bool {
if let Some(state_snapshot) = self.state_snapshots.remove(id) {
if action.is_keep() {
self.state_snapshots.insert_at(state_snapshot.clone(), id);
}
self.inner = state_snapshot;
trace!(target: "backend::memdb", "Reverted state snapshot {}", id);
true
} else {
warn!(target: "backend::memdb", "No state snapshot to revert for {}", id);
false
}
}
fn maybe_state_root(&self) -> Option<B256> {
Some(state_root(&self.inner.cache.accounts))
}
fn current_state(&self) -> StateDb {
StateDb::new(Self { inner: self.inner.clone(), ..Default::default() })
}
}
impl MaybeFullDatabase for MemDb {
fn maybe_as_full_db(&self) -> Option<&HashMap<Address, DbAccount>> {
Some(&self.inner.cache.accounts)
}
fn clear_into_state_snapshot(&mut self) -> StateSnapshot {
self.inner.clear_into_state_snapshot()
}
fn read_as_state_snapshot(&self) -> StateSnapshot {
self.inner.read_as_state_snapshot()
}
fn clear(&mut self) {
self.inner.clear();
}
fn init_from_state_snapshot(&mut self, snapshot: StateSnapshot) {
self.inner.init_from_state_snapshot(snapshot)
}
}
impl MaybeForkedDatabase for MemDb {
fn maybe_reset(&mut self, _url: Option<String>, _block_number: BlockId) -> Result<(), String> {
Err("not supported".to_string())
}
fn maybe_flush_cache(&self) -> Result<(), String> {
Err("not supported".to_string())
}
fn maybe_inner(&self) -> Result<&BlockchainDb, String> {
Err("not supported".to_string())
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::{Bytes, address};
use revm::{bytecode::Bytecode, primitives::KECCAK_EMPTY};
use std::collections::BTreeMap;
// verifies that all substantial aspects of a loaded account remain the same after an account
// is dumped and reloaded
#[test]
fn test_dump_reload_cycle() {
let test_addr: Address = address!("0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266");
let mut dump_db = MemDb::default();
let contract_code = Bytecode::new_raw(Bytes::from("fake contract code"));
dump_db.insert_account(
test_addr,
AccountInfo {
balance: U256::from(123456),
code_hash: KECCAK_EMPTY,
code: Some(contract_code.clone()),
nonce: 1234,
},
);
dump_db
.set_storage_at(test_addr, U256::from(1234567).into(), U256::from(1).into())
.unwrap();
// blocks dumping/loading tested in storage.rs
let state = dump_db
.dump_state(Default::default(), 0, Vec::new(), Vec::new(), Default::default())
.unwrap()
.unwrap();
let mut load_db = MemDb::default();
load_db.load_state(state).unwrap();
let loaded_account = load_db.basic_ref(test_addr).unwrap().unwrap();
assert_eq!(loaded_account.balance, U256::from(123456));
assert_eq!(load_db.code_by_hash_ref(loaded_account.code_hash).unwrap(), contract_code);
assert_eq!(loaded_account.nonce, 1234);
assert_eq!(load_db.storage_ref(test_addr, U256::from(1234567)).unwrap(), U256::from(1));
}
// verifies that multiple accounts can be loaded at a time, and storage is merged within those
// accounts as well.
#[test]
fn test_load_state_merge() {
let test_addr: Address = address!("0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266");
let test_addr2: Address = address!("0x70997970c51812dc3a010c7d01b50e0d17dc79c8");
let contract_code = Bytecode::new_raw(Bytes::from("fake contract code"));
let mut db = MemDb::default();
db.insert_account(
test_addr,
AccountInfo {
balance: U256::from(123456),
code_hash: KECCAK_EMPTY,
code: Some(contract_code.clone()),
nonce: 1234,
},
);
db.set_storage_at(test_addr, U256::from(1234567).into(), U256::from(1).into()).unwrap();
db.set_storage_at(test_addr, U256::from(1234568).into(), U256::from(2).into()).unwrap();
let mut new_state = SerializableState::default();
new_state.accounts.insert(
test_addr2,
SerializableAccountRecord {
balance: Default::default(),
code: Default::default(),
nonce: 1,
storage: Default::default(),
},
);
let mut new_storage = BTreeMap::default();
new_storage.insert(U256::from(1234568).into(), U256::from(5).into());
new_state.accounts.insert(
test_addr,
SerializableAccountRecord {
balance: U256::from(100100),
code: contract_code.bytes()[..contract_code.len()].to_vec().into(),
nonce: 100,
storage: new_storage,
},
);
db.load_state(new_state).unwrap();
let loaded_account = db.basic_ref(test_addr).unwrap().unwrap();
let loaded_account2 = db.basic_ref(test_addr2).unwrap().unwrap();
assert_eq!(loaded_account2.nonce, 1);
assert_eq!(loaded_account.balance, U256::from(100100));
assert_eq!(db.code_by_hash_ref(loaded_account.code_hash).unwrap(), contract_code);
assert_eq!(loaded_account.nonce, 1234);
assert_eq!(db.storage_ref(test_addr, U256::from(1234567)).unwrap(), U256::from(1));
assert_eq!(db.storage_ref(test_addr, U256::from(1234568)).unwrap(), U256::from(5));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/mem/inspector.rs | crates/anvil/src/eth/backend/mem/inspector.rs | //! Anvil specific [`revm::Inspector`] implementation
use crate::eth::macros::node_info;
use alloy_primitives::{Address, Log, U256};
use foundry_evm::{
call_inspectors,
decode::decode_console_logs,
inspectors::{LogCollector, TracingInspector},
traces::{
CallTraceDecoder, SparsedTraceArena, TracingInspectorConfig, render_trace_arena_inner,
},
};
use revm::{
Inspector,
context::ContextTr,
inspector::JournalExt,
interpreter::{
CallInputs, CallOutcome, CreateInputs, CreateOutcome, Interpreter,
interpreter::EthInterpreter,
},
};
use revm_inspectors::transfer::TransferInspector;
use std::sync::Arc;
/// The [`revm::Inspector`] used when transacting in the evm
#[derive(Clone, Debug, Default)]
pub struct AnvilInspector {
/// Collects all traces
pub tracer: Option<TracingInspector>,
/// Collects all `console.sol` logs
pub log_collector: Option<LogCollector>,
/// Collects all internal ETH transfers as ERC20 transfer events.
pub transfer: Option<TransferInspector>,
}
impl AnvilInspector {
/// Called after the inspecting the evm
///
/// This will log all `console.sol` logs
pub fn print_logs(&self) {
if let Some(collector) = &self.log_collector {
print_logs(&collector.logs);
}
}
/// Consumes the type and prints the traces.
pub fn into_print_traces(mut self, decoder: Arc<CallTraceDecoder>) {
if let Some(a) = self.tracer.take() {
print_traces(a, decoder);
}
}
/// Called after the inspecting the evm
/// This will log all traces
pub fn print_traces(&self, decoder: Arc<CallTraceDecoder>) {
if let Some(a) = self.tracer.clone() {
print_traces(a, decoder);
}
}
/// Configures the `Tracer` [`revm::Inspector`]
pub fn with_tracing(mut self) -> Self {
self.tracer = Some(TracingInspector::new(TracingInspectorConfig::all().set_steps(false)));
self
}
/// Configures the `TracingInspector` [`revm::Inspector`]
pub fn with_tracing_config(mut self, config: TracingInspectorConfig) -> Self {
self.tracer = Some(TracingInspector::new(config));
self
}
/// Enables steps recording for `Tracer`.
pub fn with_steps_tracing(mut self) -> Self {
self.tracer = Some(TracingInspector::new(TracingInspectorConfig::all().with_state_diffs()));
self
}
/// Configures the `Tracer` [`revm::Inspector`] with a log collector
pub fn with_log_collector(mut self) -> Self {
self.log_collector = Some(Default::default());
self
}
/// Configures the `Tracer` [`revm::Inspector`] with a transfer event collector
pub fn with_transfers(mut self) -> Self {
self.transfer = Some(TransferInspector::new(false).with_logs(true));
self
}
/// Configures the `Tracer` [`revm::Inspector`] with a trace printer
pub fn with_trace_printer(mut self) -> Self {
self.tracer = Some(TracingInspector::new(TracingInspectorConfig::all().with_state_diffs()));
self
}
}
/// Prints the traces for the inspector
///
/// Caution: This blocks on call trace decoding
///
/// # Panics
///
/// If called outside tokio runtime
fn print_traces(tracer: TracingInspector, decoder: Arc<CallTraceDecoder>) {
let arena = tokio::task::block_in_place(move || {
tokio::runtime::Handle::current().block_on(async move {
let mut arena = tracer.into_traces();
decoder.populate_traces(arena.nodes_mut()).await;
arena
})
});
let traces = SparsedTraceArena { arena, ignored: Default::default() };
let trace = render_trace_arena_inner(&traces, false, true);
node_info!(Traces = %format!("\n{}", trace));
}
impl<CTX> Inspector<CTX, EthInterpreter> for AnvilInspector
where
CTX: ContextTr<Journal: JournalExt>,
{
fn initialize_interp(&mut self, interp: &mut Interpreter, ecx: &mut CTX) {
call_inspectors!([&mut self.tracer], |inspector| {
inspector.initialize_interp(interp, ecx);
});
}
fn step(&mut self, interp: &mut Interpreter, ecx: &mut CTX) {
call_inspectors!([&mut self.tracer], |inspector| {
inspector.step(interp, ecx);
});
}
fn step_end(&mut self, interp: &mut Interpreter, ecx: &mut CTX) {
call_inspectors!([&mut self.tracer], |inspector| {
inspector.step_end(interp, ecx);
});
}
#[allow(clippy::redundant_clone)]
fn log(&mut self, ecx: &mut CTX, log: Log) {
call_inspectors!([&mut self.tracer, &mut self.log_collector], |inspector| {
inspector.log(ecx, log.clone());
});
}
#[allow(clippy::redundant_clone)]
fn log_full(&mut self, interp: &mut Interpreter, ecx: &mut CTX, log: Log) {
call_inspectors!([&mut self.tracer, &mut self.log_collector], |inspector| {
inspector.log_full(interp, ecx, log.clone());
});
}
fn call(&mut self, ecx: &mut CTX, inputs: &mut CallInputs) -> Option<CallOutcome> {
call_inspectors!(
#[ret]
[&mut self.tracer, &mut self.log_collector, &mut self.transfer],
|inspector| inspector.call(ecx, inputs).map(Some),
);
None
}
fn call_end(&mut self, ecx: &mut CTX, inputs: &CallInputs, outcome: &mut CallOutcome) {
if let Some(tracer) = &mut self.tracer {
tracer.call_end(ecx, inputs, outcome);
}
}
fn create(&mut self, ecx: &mut CTX, inputs: &mut CreateInputs) -> Option<CreateOutcome> {
call_inspectors!(
#[ret]
[&mut self.tracer, &mut self.transfer],
|inspector| inspector.create(ecx, inputs).map(Some),
);
None
}
fn create_end(&mut self, ecx: &mut CTX, inputs: &CreateInputs, outcome: &mut CreateOutcome) {
if let Some(tracer) = &mut self.tracer {
tracer.create_end(ecx, inputs, outcome);
}
}
fn selfdestruct(&mut self, contract: Address, target: Address, value: U256) {
call_inspectors!([&mut self.tracer, &mut self.transfer], |inspector| {
Inspector::<CTX, EthInterpreter>::selfdestruct(inspector, contract, target, value)
});
}
}
/// Prints all the logs
pub fn print_logs(logs: &[Log]) {
for log in decode_console_logs(logs) {
tracing::info!(target: crate::logging::EVM_CONSOLE_LOG_TARGET, "{}", log);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/eth/backend/mem/cache.rs | crates/anvil/src/eth/backend/mem/cache.rs | use crate::config::anvil_tmp_dir;
use alloy_primitives::B256;
use foundry_evm::backend::StateSnapshot;
use std::{
io,
path::{Path, PathBuf},
};
use tempfile::TempDir;
/// On disk state cache
///
/// A basic tempdir which stores states on disk
pub struct DiskStateCache {
/// The path where to create the tempdir in
pub(crate) temp_path: Option<PathBuf>,
/// Holds the temp dir object.
pub(crate) temp_dir: Option<TempDir>,
}
impl DiskStateCache {
/// Specify the path where to create the tempdir in
pub fn with_path(self, temp_path: PathBuf) -> Self {
Self { temp_path: Some(temp_path), temp_dir: None }
}
/// Returns the cache file for the given hash
fn with_cache_file<F, R>(&mut self, hash: B256, f: F) -> Option<R>
where
F: FnOnce(PathBuf) -> R,
{
if self.temp_dir.is_none() {
let tmp_dir = self
.temp_path
.as_ref()
.map(|p| -> io::Result<TempDir> {
std::fs::create_dir_all(p)?;
build_tmp_dir(Some(p))
})
.unwrap_or_else(|| build_tmp_dir(None));
match tmp_dir {
Ok(temp_dir) => {
trace!(target: "backend", path=?temp_dir.path(), "created disk state cache dir");
self.temp_dir = Some(temp_dir);
}
Err(err) => {
error!(target: "backend", %err, "failed to create disk state cache dir");
}
}
}
if let Some(temp_dir) = &self.temp_dir {
let path = temp_dir.path().join(format!("{hash:?}.json"));
Some(f(path))
} else {
None
}
}
/// Stores the snapshot for the given hash
///
/// Note: this writes the state on a new spawned task
///
/// Caution: this requires a running tokio Runtime.
pub fn write(&mut self, hash: B256, state: StateSnapshot) {
self.with_cache_file(hash, |file| {
tokio::task::spawn_blocking(move || {
match foundry_common::fs::write_json_file(&file, &state) {
Ok(_) => {
trace!(target: "backend", ?hash, "wrote state json file");
}
Err(err) => {
error!(target: "backend", %err, ?hash, "Failed to load state snapshot");
}
};
});
});
}
/// Loads the snapshot file for the given hash
///
/// Returns None if it doesn't exist or deserialization failed
pub fn read(&mut self, hash: B256) -> Option<StateSnapshot> {
self.with_cache_file(hash, |file| {
match foundry_common::fs::read_json_file::<StateSnapshot>(&file) {
Ok(state) => {
trace!(target: "backend", ?hash,"loaded cached state");
Some(state)
}
Err(err) => {
error!(target: "backend", %err, ?hash, "Failed to load state snapshot");
None
}
}
})
.flatten()
}
/// Removes the cache file for the given hash, if it exists
pub fn remove(&mut self, hash: B256) {
self.with_cache_file(hash, |file| {
foundry_common::fs::remove_file(file).map_err(|err| {
error!(target: "backend", %err, %hash, "Failed to remove state snapshot");
})
});
}
}
impl Default for DiskStateCache {
fn default() -> Self {
Self { temp_path: anvil_tmp_dir(), temp_dir: None }
}
}
/// Returns the temporary dir for the cached state
///
/// This will create a prefixed temp dir with `anvil-state-06-11-2022-12-50`
fn build_tmp_dir(p: Option<&Path>) -> io::Result<TempDir> {
let mut builder = tempfile::Builder::new();
let now = chrono::offset::Utc::now();
let prefix = now.format("anvil-state-%d-%m-%Y-%H-%M").to_string();
builder.prefix(&prefix);
if let Some(p) = p { builder.tempdir_in(p) } else { builder.tempdir() }
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::tempdir;
#[test]
fn can_build_temp_dir() {
let dir = tempdir().unwrap();
let p = dir.path();
let cache_dir = build_tmp_dir(Some(p)).unwrap();
assert!(
cache_dir.path().file_name().unwrap().to_str().unwrap().starts_with("anvil-state-")
);
let cache_dir = build_tmp_dir(None).unwrap();
assert!(
cache_dir.path().file_name().unwrap().to_str().unwrap().starts_with("anvil-state-")
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/server/rpc_handlers.rs | crates/anvil/src/server/rpc_handlers.rs | //! Contains RPC handlers
use crate::{
EthApi,
eth::error::to_rpc_result,
pubsub::{EthSubscription, LogsSubscription},
};
use alloy_rpc_types::{
FilteredParams,
pubsub::{Params, SubscriptionKind},
};
use anvil_core::eth::{EthPubSub, EthRequest, EthRpcCall, subscription::SubscriptionId};
use anvil_rpc::{error::RpcError, response::ResponseResult};
use anvil_server::{PubSubContext, PubSubRpcHandler, RpcHandler};
/// A `RpcHandler` that expects `EthRequest` rpc calls via http
#[derive(Clone)]
pub struct HttpEthRpcHandler {
/// Access to the node
api: EthApi,
}
impl HttpEthRpcHandler {
/// Creates a new instance of the handler using the given `EthApi`
pub fn new(api: EthApi) -> Self {
Self { api }
}
}
#[async_trait::async_trait]
impl RpcHandler for HttpEthRpcHandler {
type Request = EthRequest;
async fn on_request(&self, request: Self::Request) -> ResponseResult {
self.api.execute(request).await
}
}
/// A `RpcHandler` that expects `EthRequest` rpc calls and `EthPubSub` via pubsub connection
#[derive(Clone)]
pub struct PubSubEthRpcHandler {
/// Access to the node
api: EthApi,
}
impl PubSubEthRpcHandler {
/// Creates a new instance of the handler using the given `EthApi`
pub fn new(api: EthApi) -> Self {
Self { api }
}
/// Invoked for an ethereum pubsub rpc call
async fn on_pub_sub(&self, pubsub: EthPubSub, cx: PubSubContext<Self>) -> ResponseResult {
let id = SubscriptionId::random_hex();
trace!(target: "rpc::ws", "received pubsub request {:?}", pubsub);
match pubsub {
EthPubSub::EthUnSubscribe(id) => {
trace!(target: "rpc::ws", "canceling subscription {:?}", id);
let canceled = cx.remove_subscription(&id).is_some();
ResponseResult::Success(canceled.into())
}
EthPubSub::EthSubscribe(kind, raw_params) => {
let filter = match &*raw_params {
Params::None => None,
Params::Logs(filter) => Some(filter.clone()),
Params::Bool(_) => None,
};
let params = FilteredParams::new(filter.map(|b| *b));
let subscription = match kind {
SubscriptionKind::Logs => {
if raw_params.is_bool() {
return ResponseResult::Error(RpcError::invalid_params(
"Expected params for logs subscription",
));
}
trace!(target: "rpc::ws", "received logs subscription {:?}", params);
let blocks = self.api.new_block_notifications();
let storage = self.api.storage_info();
EthSubscription::Logs(Box::new(LogsSubscription {
blocks,
storage,
filter: params,
queued: Default::default(),
id: id.clone(),
}))
}
SubscriptionKind::NewHeads => {
trace!(target: "rpc::ws", "received header subscription");
let blocks = self.api.new_block_notifications();
let storage = self.api.storage_info();
EthSubscription::Header(blocks, storage, id.clone())
}
SubscriptionKind::NewPendingTransactions => {
trace!(target: "rpc::ws", "received pending transactions subscription");
match *raw_params {
Params::Bool(true) => EthSubscription::FullPendingTransactions(
self.api.full_pending_transactions(),
id.clone(),
),
Params::Bool(false) | Params::None => {
EthSubscription::PendingTransactions(
self.api.new_ready_transactions(),
id.clone(),
)
}
_ => {
return ResponseResult::Error(RpcError::invalid_params(
"Expected boolean parameter for newPendingTransactions",
));
}
}
}
SubscriptionKind::Syncing => {
return RpcError::internal_error_with("Not implemented").into();
}
};
cx.add_subscription(id.clone(), subscription);
trace!(target: "rpc::ws", "created new subscription: {:?}", id);
to_rpc_result(id)
}
}
}
}
#[async_trait::async_trait]
impl PubSubRpcHandler for PubSubEthRpcHandler {
type Request = EthRpcCall;
type SubscriptionId = SubscriptionId;
type Subscription = EthSubscription;
async fn on_request(&self, request: Self::Request, cx: PubSubContext<Self>) -> ResponseResult {
trace!(target: "rpc", "received pubsub request {:?}", request);
match request {
EthRpcCall::Request(request) => self.api.execute(*request).await,
EthRpcCall::PubSub(pubsub) => self.on_pub_sub(pubsub, cx).await,
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/server/mod.rs | crates/anvil/src/server/mod.rs | //! This module provides the infrastructure to launch an Ethereum JSON-RPC server
//! (via HTTP, WebSocket, and IPC) and Beacon Node REST API.
use crate::{EthApi, IpcTask};
use anvil_server::{ServerConfig, ipc::IpcEndpoint};
use axum::Router;
use futures::StreamExt;
use rpc_handlers::{HttpEthRpcHandler, PubSubEthRpcHandler};
use std::{io, net::SocketAddr, pin::pin};
use tokio::net::TcpListener;
mod beacon;
mod rpc_handlers;
/// Configures a server that handles [`EthApi`] related JSON-RPC calls via HTTP and WS.
///
/// The returned future creates a new server, binding it to the given address, which returns another
/// future that runs it.
pub async fn serve(
addr: SocketAddr,
api: EthApi,
config: ServerConfig,
) -> io::Result<impl Future<Output = io::Result<()>>> {
let tcp_listener = TcpListener::bind(addr).await?;
Ok(serve_on(tcp_listener, api, config))
}
/// Configures a server that handles [`EthApi`] related JSON-RPC calls via HTTP and WS.
pub async fn serve_on(
tcp_listener: TcpListener,
api: EthApi,
config: ServerConfig,
) -> io::Result<()> {
axum::serve(tcp_listener, router(api, config).into_make_service()).await
}
/// Configures an [`axum::Router`] that handles [`EthApi`] related JSON-RPC calls via HTTP and WS,
/// and Beacon REST API calls.
pub fn router(api: EthApi, config: ServerConfig) -> Router {
let http = HttpEthRpcHandler::new(api.clone());
let ws = PubSubEthRpcHandler::new(api.clone());
// JSON-RPC router
let rpc_router = anvil_server::http_ws_router(config, http, ws);
// Beacon REST API router
let beacon_router = beacon::router(api);
// Merge the routers
rpc_router.merge(beacon_router)
}
/// Launches an ipc server at the given path in a new task
///
/// # Panics
///
/// Panics if setting up the IPC connection was unsuccessful.
#[track_caller]
pub fn spawn_ipc(api: EthApi, path: String) -> IpcTask {
try_spawn_ipc(api, path).expect("failed to establish ipc connection")
}
/// Launches an ipc server at the given path in a new task.
pub fn try_spawn_ipc(api: EthApi, path: String) -> io::Result<IpcTask> {
let handler = PubSubEthRpcHandler::new(api);
let ipc = IpcEndpoint::new(handler, path);
let incoming = ipc.incoming()?;
let task = tokio::task::spawn(async move {
let mut incoming = pin!(incoming);
while let Some(stream) = incoming.next().await {
trace!(target: "ipc", "new ipc connection");
tokio::task::spawn(stream);
}
});
Ok(task)
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/server/beacon/handlers.rs | crates/anvil/src/server/beacon/handlers.rs | use super::{error::BeaconError, utils::must_be_ssz};
use crate::eth::EthApi;
use alloy_eips::BlockId;
use alloy_primitives::{B256, aliases::B32};
use alloy_rpc_types_beacon::{
genesis::{GenesisData, GenesisResponse},
sidecar::GetBlobsResponse,
};
use axum::{
Json,
extract::{Path, Query, State},
http::HeaderMap,
response::{IntoResponse, Response},
};
use ssz::Encode;
use std::{collections::HashMap, str::FromStr as _};
/// Handles incoming Beacon API requests for blob sidecars
///
/// This endpoint is deprecated. Use `GET /eth/v1/beacon/blobs/{block_id}` instead.
///
/// GET /eth/v1/beacon/blob_sidecars/{block_id}
pub async fn handle_get_blob_sidecars(
State(_api): State<EthApi>,
Path(_block_id): Path<String>,
Query(_params): Query<HashMap<String, String>>,
) -> Response {
BeaconError::deprecated_endpoint_with_hint("Use `GET /eth/v1/beacon/blobs/{block_id}` instead.")
.into_response()
}
/// Handles incoming Beacon API requests for blobs
///
/// GET /eth/v1/beacon/blobs/{block_id}
pub async fn handle_get_blobs(
headers: HeaderMap,
State(api): State<EthApi>,
Path(block_id): Path<String>,
Query(versioned_hashes): Query<HashMap<String, String>>,
) -> Response {
// Parse block_id from path parameter
let Ok(block_id) = BlockId::from_str(&block_id) else {
return BeaconError::invalid_block_id(block_id).into_response();
};
// Parse indices from query parameters
// Supports both comma-separated (?indices=1,2,3) and repeated parameters (?indices=1&indices=2)
let versioned_hashes: Vec<B256> = versioned_hashes
.get("versioned_hashes")
.map(|s| s.split(',').filter_map(|hash| B256::from_str(hash.trim()).ok()).collect())
.unwrap_or_default();
// Get the blob sidecars using existing EthApi logic
match api.anvil_get_blobs_by_block_id(block_id, versioned_hashes) {
Ok(Some(blobs)) => {
if must_be_ssz(&headers) {
blobs.as_ssz_bytes().into_response()
} else {
Json(GetBlobsResponse {
execution_optimistic: false,
finalized: false,
data: blobs,
})
.into_response()
}
}
Ok(None) => BeaconError::block_not_found().into_response(),
Err(_) => BeaconError::internal_error().into_response(),
}
}
/// Handles incoming Beacon API requests for genesis details
///
/// Only returns the `genesis_time`, other fields are set to zero.
///
/// GET /eth/v1/beacon/genesis
pub async fn handle_get_genesis(State(api): State<EthApi>) -> Response {
match api.anvil_get_genesis_time() {
Ok(genesis_time) => Json(GenesisResponse {
data: GenesisData {
genesis_time,
genesis_validators_root: B256::ZERO,
genesis_fork_version: B32::ZERO,
},
})
.into_response(),
Err(_) => BeaconError::internal_error().into_response(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use axum::http::HeaderValue;
fn header_map_with_accept(accept: &str) -> HeaderMap {
let mut headers = HeaderMap::new();
headers.insert(axum::http::header::ACCEPT, HeaderValue::from_str(accept).unwrap());
headers
}
#[test]
fn test_must_be_ssz() {
let test_cases = vec![
(None, false, "no Accept header"),
(Some("application/json"), false, "JSON only"),
(Some("application/octet-stream"), true, "octet-stream only"),
(Some("application/octet-stream;q=1.0,application/json;q=0.9"), true, "SSZ preferred"),
(
Some("application/json;q=1.0,application/octet-stream;q=0.9"),
false,
"JSON preferred",
),
(Some("application/octet-stream;q=0.5,application/json;q=0.5"), false, "equal quality"),
(
Some("text/html;q=0.9, application/octet-stream;q=1.0, application/json;q=0.8"),
true,
"multiple types",
),
(
Some("application/octet-stream ; q=1.0 , application/json ; q=0.9"),
true,
"whitespace handling",
),
(Some("application/octet-stream, application/json;q=0.9"), true, "default quality"),
];
for (accept_header, expected, description) in test_cases {
let headers = match accept_header {
None => HeaderMap::new(),
Some(header) => header_map_with_accept(header),
};
assert_eq!(
must_be_ssz(&headers),
expected,
"Test case '{}' failed: expected {}, got {}",
description,
expected,
!expected
);
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/server/beacon/error.rs | crates/anvil/src/server/beacon/error.rs | //! Beacon API error types
use axum::{
Json,
http::StatusCode,
response::{IntoResponse, Response},
};
use serde::{Deserialize, Serialize};
use std::{
borrow::Cow,
fmt::{self, Display},
};
/// Represents a Beacon API error response
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct BeaconError {
/// HTTP status code
#[serde(skip)]
pub status_code: u16,
/// Error code
pub code: BeaconErrorCode,
/// Error message
pub message: Cow<'static, str>,
}
impl BeaconError {
/// Creates a new beacon error with the given code
pub fn new(code: BeaconErrorCode, message: impl Into<Cow<'static, str>>) -> Self {
let status_code = code.status_code();
Self { status_code, code, message: message.into() }
}
/// Helper function to create a 400 Bad Request error for invalid block ID
pub fn invalid_block_id(block_id: impl Display) -> Self {
Self::new(BeaconErrorCode::BadRequest, format!("Invalid block ID: {block_id}"))
}
/// Helper function to create a 404 Not Found error for block not found
pub fn block_not_found() -> Self {
Self::new(BeaconErrorCode::NotFound, "Block not found")
}
/// Helper function to create a 500 Internal Server Error
pub fn internal_error() -> Self {
Self::new(BeaconErrorCode::InternalError, "Internal server error")
}
/// Helper function to create a 410 Gone error for deprecated endpoints
pub fn deprecated_endpoint_with_hint(hint: impl Display) -> Self {
Self::new(BeaconErrorCode::Gone, format!("This endpoint is deprecated. {hint}"))
}
/// Converts to an Axum response
pub fn into_response(self) -> Response {
let status =
StatusCode::from_u16(self.status_code).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
(
status,
Json(serde_json::json!({
"code": self.code as u16,
"message": self.message,
})),
)
.into_response()
}
}
impl fmt::Display for BeaconError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}: {}", self.code.as_str(), self.message)
}
}
impl std::error::Error for BeaconError {}
impl IntoResponse for BeaconError {
fn into_response(self) -> Response {
Self::into_response(self)
}
}
/// Beacon API error codes following the beacon chain specification
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[repr(u16)]
pub enum BeaconErrorCode {
BadRequest = 400,
NotFound = 404,
Gone = 410,
InternalError = 500,
}
impl BeaconErrorCode {
/// Returns the HTTP status code for this error
pub const fn status_code(&self) -> u16 {
*self as u16
}
/// Returns a string representation of the error code
pub const fn as_str(&self) -> &'static str {
match self {
Self::BadRequest => "Bad Request",
Self::NotFound => "Not Found",
Self::Gone => "Gone",
Self::InternalError => "Internal Server Error",
}
}
}
impl fmt::Display for BeaconErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_beacon_error_codes() {
assert_eq!(BeaconErrorCode::BadRequest.status_code(), 400);
assert_eq!(BeaconErrorCode::NotFound.status_code(), 404);
assert_eq!(BeaconErrorCode::InternalError.status_code(), 500);
}
#[test]
fn test_beacon_error_display() {
let err = BeaconError::invalid_block_id("current");
assert_eq!(err.to_string(), "Bad Request: Invalid block ID: current");
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/server/beacon/utils.rs | crates/anvil/src/server/beacon/utils.rs | use hyper::HeaderMap;
/// Helper function to determine if the Accept header indicates a preference for SSZ (octet-stream)
/// over JSON.
pub fn must_be_ssz(headers: &HeaderMap) -> bool {
headers
.get(axum::http::header::ACCEPT)
.and_then(|v| v.to_str().ok())
.map(|accept_str| {
let mut octet_stream_q = 0.0;
let mut json_q = 0.0;
// Parse each media type in the Accept header
for media_type in accept_str.split(',') {
let media_type = media_type.trim();
let quality = media_type
.split(';')
.find_map(|param| {
let param = param.trim();
if let Some(q) = param.strip_prefix("q=") {
q.parse::<f32>().ok()
} else {
None
}
})
.unwrap_or(1.0); // Default quality factor is 1.0
if media_type.starts_with("application/octet-stream") {
octet_stream_q = quality;
} else if media_type.starts_with("application/json") {
json_q = quality;
}
}
// Prefer octet-stream if it has higher quality factor
octet_stream_q > json_q
})
.unwrap_or(false)
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/src/server/beacon/mod.rs | crates/anvil/src/server/beacon/mod.rs | //! Beacon Node REST API implementation for Anvil.
use axum::{Router, routing::get};
use crate::eth::EthApi;
mod error;
mod handlers;
mod utils;
/// Configures an [`axum::Router`] that handles Beacon REST API calls.
pub fn router(api: EthApi) -> Router {
Router::new()
.route("/eth/v1/beacon/blob_sidecars/{block_id}", get(handlers::handle_get_blob_sidecars))
.route("/eth/v1/beacon/blobs/{block_id}", get(handlers::handle_get_blobs))
.route("/eth/v1/beacon/genesis", get(handlers::handle_get_genesis))
.with_state(api)
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/genesis.rs | crates/anvil/tests/it/genesis.rs | //! genesis.json tests
use crate::fork::fork_config;
use alloy_genesis::Genesis;
use alloy_primitives::{Address, U256};
use alloy_provider::Provider;
use anvil::{NodeConfig, spawn};
use std::str::FromStr;
const GENESIS: &str = r#"{
"config": {
"chainId": 19763,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"ethash": {}
},
"nonce": "0xdeadbeefdeadbeef",
"timestamp": "0x0",
"extraData": "0x0000000000000000000000000000000000000000000000000000000000000000",
"gasLimit": "0x80000000",
"difficulty": "0x20000",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"alloc": {
"71562b71999873db5b286df957af199ec94617f7": {
"balance": "0xffffffffffffffffffffffffff"
}
},
"number": 73,
"gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
"#;
#[tokio::test(flavor = "multi_thread")]
async fn can_apply_genesis() {
let genesis: Genesis = serde_json::from_str(GENESIS).unwrap();
let (_api, handle) = spawn(NodeConfig::test().with_genesis(Some(genesis))).await;
let provider = handle.http_provider();
assert_eq!(provider.get_chain_id().await.unwrap(), 19763u64);
let addr: Address = Address::from_str("71562b71999873db5b286df957af199ec94617f7").unwrap();
let balance = provider.get_balance(addr).await.unwrap();
let expected: U256 = U256::from_str_radix("ffffffffffffffffffffffffff", 16).unwrap();
assert_eq!(balance, expected);
let block_number = provider.get_block_number().await.unwrap();
assert_eq!(block_number, 73u64);
}
// <https://github.com/foundry-rs/foundry/issues/10059>
// <https://github.com/foundry-rs/foundry/issues/10238>
#[tokio::test(flavor = "multi_thread")]
async fn chain_id_precedence() {
// Order: --chain-id > fork-chain-id > Genesis > default.
// --chain-id > Genesis.
let genesis: Genesis = serde_json::from_str(GENESIS).unwrap();
let (_api, handle) =
spawn(NodeConfig::test().with_genesis(Some(genesis.clone())).with_chain_id(Some(300u64)))
.await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 300u64);
// fork > Genesis.
let (_api, handle) = spawn(fork_config().with_genesis(Some(genesis.clone()))).await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 1);
// --chain-id > fork.
let (_api, handle) = spawn(fork_config().with_chain_id(Some(300u64))).await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 300u64);
// fork
let (_api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 1);
// Genesis
let (_api, handle) = spawn(NodeConfig::test().with_genesis(Some(genesis))).await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 19763u64);
// default
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 31337);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/proof.rs | crates/anvil/tests/it/proof.rs | //! tests for `eth_getProof`
use alloy_primitives::{Address, B256, Bytes, U256, address, fixed_bytes};
use anvil::{NodeConfig, eth::EthApi, spawn};
use std::{collections::BTreeMap, str::FromStr};
async fn verify_account_proof(
api: &EthApi,
address: Address,
proof: impl IntoIterator<Item = &str>,
) {
let expected_proof =
proof.into_iter().map(Bytes::from_str).collect::<Result<Vec<_>, _>>().unwrap();
let proof = api.get_proof(address, Vec::new(), None).await.unwrap();
assert_eq!(proof.account_proof, expected_proof);
}
async fn verify_storage_proof(
api: &EthApi,
address: Address,
slot: B256,
proof: impl IntoIterator<Item = &str>,
) {
let expected_proof =
proof.into_iter().map(Bytes::from_str).collect::<Result<Vec<_>, _>>().unwrap();
let proof = api.get_proof(address, vec![slot], None).await.unwrap();
assert_eq!(proof.storage_proof[0].proof, expected_proof);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_account_proof() {
let (api, _handle) = spawn(NodeConfig::empty_state()).await;
api.anvil_set_balance(
address!("0x2031f89b3ea8014eb51a78c316e42af3e0d7695f"),
U256::from(45000000000000000000_u128),
)
.await
.unwrap();
api.anvil_set_balance(address!("0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2"), U256::from(1))
.await
.unwrap();
api.anvil_set_balance(
address!("0x62b0dd4aab2b1a0a04e279e2b828791a10755528"),
U256::from(1100000000000000000_u128),
)
.await
.unwrap();
api.anvil_set_balance(
address!("0x1ed9b1dd266b607ee278726d324b855a093394a6"),
U256::from(120000000000000000_u128),
)
.await
.unwrap();
verify_account_proof(&api, address!("0x2031f89b3ea8014eb51a78c316e42af3e0d7695f"), [
"0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3",
"0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880",
"0xf8719f31355ec1c8f7e26bb3ccbcb0b75d870d15846c0b98e5cc452db46c37faea40b84ff84d80890270801d946c940000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
]).await;
verify_account_proof(&api, address!("0x33f0fc440b8477fcfbe9d0bf8649e7dea9baedb2"), [
"0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3",
"0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880",
"0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54",
"0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080",
"0xf8679e207781e762f3577784bab7491fcc43e291ce5a356b9bc517ac52eed3a37ab846f8448001a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
]).await;
verify_account_proof(&api, address!("0x62b0dd4aab2b1a0a04e279e2b828791a10755528"), [
"0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3",
"0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880",
"0xf8709f3936599f93b769acf90c7178fd2ddcac1b5b4bc9949ee5a04b7e0823c2446eb84ef84c80880f43fc2c04ee0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
]).await;
verify_account_proof(&api, address!("0x1ed9b1dd266b607ee278726d324b855a093394a6"), [
"0xe48200a7a040f916999be583c572cc4dd369ec53b0a99f7de95f13880cf203d98f935ed1b3",
"0xf87180a04fb9bab4bb88c062f32452b7c94c8f64d07b5851d44a39f1e32ba4b1829fdbfb8080808080a0b61eeb2eb82808b73c4ad14140a2836689f4ab8445d69dd40554eaf1fce34bc080808080808080a0dea230ff2026e65de419288183a340125b04b8405cc61627b3b4137e2260a1e880",
"0xe48200d3a0ef957210bca5b9b402d614eb8408c88cfbf4913eb6ab83ca233c8b8f0e626b54",
"0xf851808080a02743a5addaf4cf9b8c0c073e1eaa555deaaf8c41cb2b41958e88624fa45c2d908080808080a0bfbf6937911dfb88113fecdaa6bde822e4e99dae62489fcf61a91cb2f36793d680808080808080",
"0xf86f9e207a32b8ab5eb4b043c65b1f00c93f517bc8883c5cd31baf8e8a279475e3b84ef84c808801aa535d3d0c0000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
]).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn test_storage_proof() {
let target = address!("0x1ed9b1dd266b607ee278726d324b855a093394a6");
let (api, _handle) = spawn(NodeConfig::empty_state()).await;
let storage: BTreeMap<U256, B256> =
serde_json::from_str(include_str!("../../test-data/storage_sample.json")).unwrap();
for (key, value) in storage {
api.anvil_set_storage_at(target, key, value).await.unwrap();
}
verify_storage_proof(&api, target, fixed_bytes!("0000000000000000000000000000000000000000000000000000000000000022"), [
"0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080",
"0xf85180a0776aa456ba9c5008e03b82b841a9cf2fc1e8578cfacd5c9015804eae315f17fb80808080808080808080808080a072e3e284d47badbb0a5ca1421e1179d3ea90cc10785b26b74fb8a81f0f9e841880",
"0xf843a020035b26e3e9eee00e0d72fd1ee8ddca6894550dca6916ea2ac6baa90d11e510a1a0f5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b"
]).await;
verify_storage_proof(&api, target, fixed_bytes!("0000000000000000000000000000000000000000000000000000000000000023"), [
"0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080",
"0xf8518080808080a0d546c4ca227a267d29796643032422374624ed109b3d94848c5dc06baceaee76808080808080a027c48e210ccc6e01686be2d4a199d35f0e1e8df624a8d3a17c163be8861acd6680808080",
"0xf843a0207b2b5166478fd4318d2acc6cc2c704584312bdd8781b32d5d06abda57f4230a1a0db56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71"
]).await;
verify_storage_proof(&api, target, fixed_bytes!("0000000000000000000000000000000000000000000000000000000000000024"), [
"0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080",
"0xf85180808080a030263404acfee103d0b1019053ff3240fce433c69b709831673285fa5887ce4c80808080808080a0f8f1fbb1f7b482d9860480feebb83ff54a8b6ec1ead61cc7d2f25d7c01659f9c80808080",
"0xf843a020d332d19b93bcabe3cce7ca0c18a052f57e5fd03b4758a09f30f5ddc4b22ec4a1a0c78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c",
]).await;
verify_storage_proof(&api, target, fixed_bytes!("0000000000000000000000000000000000000000000000000000000000000100"), [
"0xf9019180a0aafd5b14a6edacd149e110ba6776a654f2dbffca340902be933d011113f2750380a0a502c93b1918c4c6534d4593ae03a5a23fa10ebc30ffb7080b297bff2446e42da02eb2bf45fd443bd1df8b6f9c09726a4c6252a0f7896a131a081e39a7f644b38980a0a9cf7f673a0bce76fd40332afe8601542910b48dea44e93933a3e5e930da5d19a0ddf79db0a36d0c8134ba143bcb541cd4795a9a2bae8aca0ba24b8d8963c2a77da0b973ec0f48f710bf79f63688485755cbe87f9d4c68326bb83c26af620802a80ea0f0855349af6bf84afc8bca2eda31c8ef8c5139be1929eeb3da4ba6b68a818cb0a0c271e189aeeb1db5d59d7fe87d7d6327bbe7cfa389619016459196497de3ccdea0e7503ba5799e77aa31bbe1310c312ca17b2c5bcc8fa38f266675e8f154c2516ba09278b846696d37213ab9d20a5eb42b03db3173ce490a2ef3b2f3b3600579fc63a0e9041059114f9c910adeca12dbba1fef79b2e2c8899f2d7213cd22dfe4310561a047c59da56bb2bf348c9dd2a2e8f5538a92b904b661cfe54a4298b85868bbe4858080",
"0xf891a090bacef44b189ddffdc5f22edc70fe298c58e5e523e6e1dfdf7dbc6d657f7d1b80a026eed68746028bc369eb456b7d3ee475aa16f34e5eaa0c98fdedb9c59ebc53b0808080a09ce86197173e14e0633db84ce8eea32c5454eebe954779255644b45b717e8841808080a0328c7afb2c58ef3f8c4117a8ebd336f1a61d24591067ed9c5aae94796cac987d808080808080",
]).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_random_account_proofs() {
let (api, _handle) = spawn(NodeConfig::test()).await;
for acc in std::iter::repeat_with(Address::random).take(10) {
let _ = api
.get_proof(acc, Vec::new(), None)
.await
.unwrap_or_else(|_| panic!("Failed to get proof for {acc:?}"));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/revert.rs | crates/anvil/tests/it/revert.rs | use crate::abi::VendingMachine;
use alloy_network::TransactionBuilder;
use alloy_primitives::{U256, bytes};
use alloy_provider::Provider;
use alloy_rpc_types::TransactionRequest;
use alloy_serde::WithOtherFields;
use alloy_sol_types::sol;
use anvil::{NodeConfig, spawn};
#[tokio::test(flavor = "multi_thread")]
async fn test_deploy_reverting() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let sender = handle.dev_accounts().next().unwrap();
let code = bytes!("5f5ffd"); // PUSH0 PUSH0 REVERT
let tx = TransactionRequest::default().from(sender).with_deploy_code(code);
let tx = WithOtherFields::new(tx);
// Calling/estimating gas fails early.
let err = provider.call(tx.clone()).await.unwrap_err();
let s = err.to_string();
assert!(s.contains("execution reverted"), "{s:?}");
// Sending the transaction is successful but reverts on chain.
let tx = provider.send_transaction(tx).await.unwrap();
let receipt = tx.get_receipt().await.unwrap();
assert!(!receipt.inner.inner.status());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_revert_messages() {
sol!(
#[sol(rpc, bytecode = "608080604052346025575f80546001600160a01b031916600117905560b69081602a8239f35b5f80fdfe60808060405260043610156011575f80fd5b5f3560e01c635b9fdc30146023575f80fd5b34607c575f366003190112607c575f546001600160a01b03163303604c576020604051607b8152f35b62461bcd60e51b815260206004820152600b60248201526a08585d5d1a1bdc9a5e995960aa1b6044820152606490fd5b5f80fdfea2646970667358221220f593e5ccd46935f623185de62a72d9f1492d8d15075a111b0fa4d7e16acf4a7064736f6c63430008190033")]
contract Contract {
address private owner;
constructor() {
owner = address(1);
}
modifier onlyOwner() {
require(msg.sender == owner, "!authorized");
_;
}
#[derive(Debug)]
function getSecret() public onlyOwner view returns(uint256 secret) {
return 123;
}
}
);
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let contract = Contract::deploy(&provider).await.unwrap();
let err = contract.getSecret().call().await.unwrap_err();
let s = err.to_string();
assert!(s.contains("!authorized"), "{s:?}");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_solc_revert_example() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let sender = handle.dev_accounts().next().unwrap();
let provider = handle.http_provider();
let contract = VendingMachine::deploy(&provider).await.unwrap();
let err =
contract.buy(U256::from(100)).value(U256::from(1)).from(sender).call().await.unwrap_err();
let s = err.to_string();
assert!(s.contains("Not enough Ether provided."), "{s:?}");
}
// <https://github.com/foundry-rs/foundry/issues/1871>
#[tokio::test(flavor = "multi_thread")]
async fn test_another_revert_message() {
sol!(
#[sol(rpc, bytecode = "6080806040523460135760d7908160188239f35b5f80fdfe60808060405260043610156011575f80fd5b5f3560e01c9081633fb5c1cb14604d5750638381f58a14602f575f80fd5b346049575f36600319011260495760205f54604051908152f35b5f80fd5b346049576020366003190112604957600435908115606a57505f55005b62461bcd60e51b81526020600482015260126024820152712932bb32b93a29ba3934b733a337b7a130b960711b6044820152606490fdfea2646970667358221220314bf8261cc467619137c071584f8d3bd8d9d97bf2846c138c0567040cf9828a64736f6c63430008190033")]
contract Contract {
uint256 public number;
#[derive(Debug)]
function setNumber(uint256 num) public {
require(num != 0, "RevertStringFooBar");
number = num;
}
}
);
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let contract = Contract::deploy(&provider).await.unwrap();
let err = contract.setNumber(U256::from(0)).call().await.unwrap_err();
let s = err.to_string();
assert!(s.contains("RevertStringFooBar"), "{s:?}");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_solc_revert_custom_errors() {
sol!(
#[sol(rpc, bytecode = "608080604052346013576081908160188239f35b5f80fdfe60808060405260043610156011575f80fd5b5f3560e01c63e57207e6146023575f80fd5b346047575f3660031901126047576373ea2a7f60e01b815260016004820152602490fd5b5f80fdfea26469706673582212202a8d69545801394af36c56ca229b52ae0b22d7b8f938b107dca8ebbf655464f764736f6c63430008190033")]
contract Contract {
error AddressRevert(address);
#[derive(Debug)]
function revertAddress() public {
revert AddressRevert(address(1));
}
}
);
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let contract = Contract::deploy(&provider).await.unwrap();
let err = contract.revertAddress().call().await.unwrap_err();
let s = err.to_string();
assert!(s.contains("execution reverted"), "{s:?}");
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/abi.rs | crates/anvil/tests/it/abi.rs | //! commonly used sol generated types
use alloy_sol_types::sol;
sol!(
#[sol(rpc)]
Greeter,
"test-data/greeter.json"
);
sol!(
#[derive(Debug)]
#[sol(rpc)]
SimpleStorage,
"test-data/SimpleStorage.json"
);
sol!(
#[sol(rpc)]
Multicall,
"test-data/multicall.json"
);
sol!(
#[sol(rpc)]
contract BUSD {
function balanceOf(address) external view returns (uint256);
}
);
sol!(
#[sol(rpc)]
interface ERC721 {
function balanceOf(address owner) public view virtual returns (uint256);
function ownerOf(uint256 tokenId) public view virtual returns (address);
function name() public view virtual returns (string memory);
function symbol() public view virtual returns (string memory);
function tokenURI(uint256 tokenId) public view virtual returns (string memory);
function getApproved(uint256 tokenId) public view virtual returns (address);
function setApprovalForAll(address operator, bool approved) public virtual;
function isApprovedForAll(address owner, address operator) public view virtual returns (bool);
function transferFrom(address from, address to, uint256 tokenId) public virtual;
function safeTransferFrom(address from, address to, uint256 tokenId) public;
function safeTransferFrom(address from, address to, uint256 tokenId, bytes memory data) public virtual;
function _mint(address to, uint256 tokenId) internal;
function _safeMint(address to, uint256 tokenId, bytes memory data) internal virtual;
function _burn(uint256 tokenId) internal;
function _transfer(address from, address to, uint256 tokenId) internal;
function _approve(address to, uint256 tokenId, address auth) internal;
}
);
// https://docs.soliditylang.org/en/latest/control-structures.html#revert
sol!(
// SPDX-License-Identifier: GPL-3.0
pragma solidity ^0.8.4;
#[sol(rpc, bytecode = "6080806040523460155761011e908161001a8239f35b5f80fdfe60808060405260043610156011575f80fd5b5f3560e01c9081633ccfd60b146094575063d96a094a14602f575f80fd5b6020366003190112609057671bc16d674ec80000340460043511604e57005b60405162461bcd60e51b815260206004820152601a6024820152792737ba1032b737bab3b41022ba3432b910383937bb34b232b21760311b6044820152606490fd5b5f80fd5b346090575f3660031901126090575f546001600160a01b0316330360da575f8080804781811560d2575b3390f11560c757005b6040513d5f823e3d90fd5b506108fc60be565b6282b42960e81b8152600490fdfea2646970667358221220c143fcbf0da5cee61ae3fcc385d9f7c4d6a7fb2ea42530d70d6049478db0b8a964736f6c63430008190033")]
contract VendingMachine {
address owner;
error Unauthorized();
#[derive(Debug)]
function buy(uint amount) public payable {
if (amount > msg.value / 2 ether)
revert("Not enough Ether provided.");
// Alternative way to do it:
require(
amount <= msg.value / 2 ether,
"Not enough Ether provided."
);
// Perform the purchase.
}
function withdraw() public {
if (msg.sender != owner)
revert Unauthorized();
payable(msg.sender).transfer(address(this).balance);
}
}
);
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/sign.rs | crates/anvil/tests/it/sign.rs | use crate::utils::http_provider_with_signer;
use alloy_dyn_abi::TypedData;
use alloy_network::EthereumWallet;
use alloy_primitives::{Address, U256};
use alloy_provider::Provider;
use alloy_rpc_types::TransactionRequest;
use alloy_serde::WithOtherFields;
use alloy_signer::Signer;
use anvil::{NodeConfig, spawn};
#[tokio::test(flavor = "multi_thread")]
async fn can_sign_typed_data() {
let (api, _handle) = spawn(NodeConfig::test()).await;
let json = serde_json::json!(
{
"types": {
"EIP712Domain": [
{
"name": "name",
"type": "string"
},
{
"name": "version",
"type": "string"
},
{
"name": "chainId",
"type": "uint256"
},
{
"name": "verifyingContract",
"type": "address"
}
],
"Person": [
{
"name": "name",
"type": "string"
},
{
"name": "wallet",
"type": "address"
}
],
"Mail": [
{
"name": "from",
"type": "Person"
},
{
"name": "to",
"type": "Person"
},
{
"name": "contents",
"type": "string"
}
]
},
"primaryType": "Mail",
"domain": {
"name": "Ether Mail",
"version": "1",
"chainId": 1,
"verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"
},
"message": {
"from": {
"name": "Cow",
"wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
},
"to": {
"name": "Bob",
"wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
},
"contents": "Hello, Bob!"
}
});
let typed_data: TypedData = serde_json::from_value(json).unwrap();
// `curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","method": "eth_signTypedData_v4", "params": ["0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", {"types":{"EIP712Domain":[{"name":"name","type":"string"},{"name":"version","type":"string"},{"name":"chainId","type":"uint256"},{"name":"verifyingContract","type":"address"}],"Person":[{"name":"name","type":"string"},{"name":"wallet","type":"address"}],"Mail":[{"name":"from","type":"Person"},{"name":"to","type":"Person"},{"name":"contents","type":"string"}]},"primaryType":"Mail","domain":{"name":"Ether Mail","version":"1","chainId":1,"verifyingContract":"0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"},"message":{"from":{"name":"Cow","wallet":"0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"},"to":{"name":"Bob","wallet":"0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"},"contents":"Hello, Bob!"}}],"id":67}' http://localhost:8545`
let signature = api
.sign_typed_data_v4(
"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266".parse().unwrap(),
&typed_data,
)
.await
.unwrap();
assert_eq!(
signature,
"0x6ea8bb309a3401225701f3565e32519f94a0ea91a5910ce9229fe488e773584c0390416a2190d9560219dab757ecca2029e63fa9d1c2aebf676cc25b9f03126a1b".to_string()
);
}
// <https://github.com/foundry-rs/foundry/issues/2458>
#[tokio::test(flavor = "multi_thread")]
async fn can_sign_typed_data_os() {
let (api, _handle) = spawn(NodeConfig::test()).await;
let json = serde_json::json!(
{
"types": {
"EIP712Domain": [
{
"name": "name",
"type": "string"
},
{
"name": "version",
"type": "string"
},
{
"name": "chainId",
"type": "uint256"
},
{
"name": "verifyingContract",
"type": "address"
}
],
"OrderComponents": [
{
"name": "offerer",
"type": "address"
},
{
"name": "zone",
"type": "address"
},
{
"name": "offer",
"type": "OfferItem[]"
},
{
"name": "consideration",
"type": "ConsiderationItem[]"
},
{
"name": "orderType",
"type": "uint8"
},
{
"name": "startTime",
"type": "uint256"
},
{
"name": "endTime",
"type": "uint256"
},
{
"name": "zoneHash",
"type": "bytes32"
},
{
"name": "salt",
"type": "uint256"
},
{
"name": "conduitKey",
"type": "bytes32"
},
{
"name": "counter",
"type": "uint256"
}
],
"OfferItem": [
{
"name": "itemType",
"type": "uint8"
},
{
"name": "token",
"type": "address"
},
{
"name": "identifierOrCriteria",
"type": "uint256"
},
{
"name": "startAmount",
"type": "uint256"
},
{
"name": "endAmount",
"type": "uint256"
}
],
"ConsiderationItem": [
{
"name": "itemType",
"type": "uint8"
},
{
"name": "token",
"type": "address"
},
{
"name": "identifierOrCriteria",
"type": "uint256"
},
{
"name": "startAmount",
"type": "uint256"
},
{
"name": "endAmount",
"type": "uint256"
},
{
"name": "recipient",
"type": "address"
}
]
},
"primaryType": "OrderComponents",
"domain": {
"name": "Seaport",
"version": "1.1",
"chainId": "1",
"verifyingContract": "0x00000000006c3852cbEf3e08E8dF289169EdE581"
},
"message": {
"offerer": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
"offer": [
{
"itemType": "3",
"token": "0xA604060890923Ff400e8c6f5290461A83AEDACec",
"identifierOrCriteria": "110194434039389003190498847789203126033799499726478230611233094448886344768909",
"startAmount": "1",
"endAmount": "1"
}
],
"consideration": [
{
"itemType": "0",
"token": "0x0000000000000000000000000000000000000000",
"identifierOrCriteria": "0",
"startAmount": "487500000000000000",
"endAmount": "487500000000000000",
"recipient": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
},
{
"itemType": "0",
"token": "0x0000000000000000000000000000000000000000",
"identifierOrCriteria": "0",
"startAmount": "12500000000000000",
"endAmount": "12500000000000000",
"recipient": "0x8De9C5A032463C561423387a9648c5C7BCC5BC90"
}
],
"startTime": "1658645591",
"endTime": "1659250386",
"orderType": "3",
"zone": "0x004C00500000aD104D7DBd00e3ae0A5C00560C00",
"zoneHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"salt": "16178208897136618",
"conduitKey": "0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000",
"totalOriginalConsiderationItems": "2",
"counter": "0"
}
}
);
let typed_data: TypedData = serde_json::from_value(json).unwrap();
// `curl -X POST http://localhost:8545 -d '{"jsonrpc": "2.0", "method": "eth_signTypedData_v4", "params": ["0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", {"types":{"EIP712Domain":[{"name":"name","type":"string"},{"name":"version","type":"string"},{"name":"chainId","type":"uint256"},{"name":"verifyingContract","type":"address"}],"OrderComponents":[{"name":"offerer","type":"address"},{"name":"zone","type":"address"},{"name":"offer","type":"OfferItem[]"},{"name":"consideration","type":"ConsiderationItem[]"},{"name":"orderType","type":"uint8"},{"name":"startTime","type":"uint256"},{"name":"endTime","type":"uint256"},{"name":"zoneHash","type":"bytes32"},{"name":"salt","type":"uint256"},{"name":"conduitKey","type":"bytes32"},{"name":"counter","type":"uint256"}],"OfferItem":[{"name":"itemType","type":"uint8"},{"name":"token","type":"address"},{"name":"identifierOrCriteria","type":"uint256"},{"name":"startAmount","type":"uint256"},{"name":"endAmount","type":"uint256"}],"ConsiderationItem":[{"name":"itemType","type":"uint8"},{"name":"token","type":"address"},{"name":"identifierOrCriteria","type":"uint256"},{"name":"startAmount","type":"uint256"},{"name":"endAmount","type":"uint256"},{"name":"recipient","type":"address"}]},"primaryType":"OrderComponents","domain":{"name":"Seaport","version":"1.1","chainId":"1","verifyingContract":"0x00000000006c3852cbEf3e08E8dF289169EdE581"},"message":{"offerer":"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266","offer":[{"itemType":"3","token":"0xA604060890923Ff400e8c6f5290461A83AEDACec","identifierOrCriteria":"110194434039389003190498847789203126033799499726478230611233094448886344768909","startAmount":"1","endAmount":"1"}],"consideration":[{"itemType":"0","token":"0x0000000000000000000000000000000000000000","identifierOrCriteria":"0","startAmount":"487500000000000000","endAmount":"487500000000000000","recipient":"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"},{"itemType":"0","token":"0x0000000000000000000000000000000000000000","identifierOrCriteria":"0","startAmount":"12500000000000000","endAmount":"12500000000000000","recipient":"0x8De9C5A032463C561423387a9648c5C7BCC5BC90"}],"startTime":"1658645591","endTime":"1659250386","orderType":"3","zone":"0x004C00500000aD104D7DBd00e3ae0A5C00560C00","zoneHash":"0x0000000000000000000000000000000000000000000000000000000000000000","salt":"16178208897136618","conduitKey":"0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000","totalOriginalConsiderationItems":"2","counter":"0"}}], "id": "1"}' -H "Content-Type: application/json"`
let signature = api
.sign_typed_data_v4(
"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266".parse().unwrap(),
&typed_data,
)
.await
.unwrap();
assert_eq!(
signature,
"0xedb0fa55ac67e3ca52b6bd6ee3576b193731adc2aff42151f67826932fa9f6191261ebdecc2c650204ff7625752b033293fb67ef5cfca78e16de359200040b761b".to_string()
);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_sign_transaction() {
let (api, handle) = spawn(NodeConfig::test()).await;
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
// craft the tx
// specify the `from` field so that the client knows which account to use
let tx = TransactionRequest::default()
.nonce(10)
.max_fee_per_gas(100)
.max_priority_fee_per_gas(101)
.to(to)
.value(U256::from(1001u64))
.from(from);
let tx = WithOtherFields::new(tx);
// sign it via the eth_signTransaction API
let signed_tx = api.sign_transaction(tx).await.unwrap();
assert_eq!(
signed_tx,
"0x02f866827a690a65648252089470997970c51812dc3a010c7d01b50e0d17dc79c88203e980c001a0e4de88aefcf87ccb04466e60de66a83192e46aa26177d5ea35efbfd43fd0ecdca00e3148e0e8e0b9a6f9b329efd6e30c4a461920f3a27497be3dbefaba996601da"
);
}
#[tokio::test(flavor = "multi_thread")]
async fn rejects_different_chain_id() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap().with_chain_id(Some(1));
let provider = http_provider_with_signer(&handle.http_endpoint(), EthereumWallet::from(wallet));
let tx = TransactionRequest::default().to(Address::random()).value(U256::from(100));
let tx = WithOtherFields::new(tx);
let res = provider.send_transaction(tx).await;
let err = res.unwrap_err();
assert!(err.to_string().contains("does not match the signer's"), "{}", err.to_string());
}
#[tokio::test(flavor = "multi_thread")]
async fn rejects_invalid_chain_id() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let wallet = wallet.with_chain_id(Some(99u64));
let provider = http_provider_with_signer(&handle.http_endpoint(), EthereumWallet::from(wallet));
let tx = TransactionRequest::default().to(Address::random()).value(U256::from(100u64));
let tx = WithOtherFields::new(tx);
let res = provider.send_transaction(tx).await;
let _err = res.unwrap_err();
}
// <https://github.com/foundry-rs/foundry/issues/3409>
#[tokio::test(flavor = "multi_thread")]
async fn can_sign_typed_seaport_data() {
let (api, _handle) = spawn(NodeConfig::test()).await;
let json = serde_json::json!(
{
"types": {
"EIP712Domain": [
{
"name": "name",
"type": "string"
},
{
"name": "version",
"type": "string"
},
{
"name": "chainId",
"type": "uint256"
},
{
"name": "verifyingContract",
"type": "address"
}
],
"OrderComponents": [
{
"name": "offerer",
"type": "address"
},
{
"name": "zone",
"type": "address"
},
{
"name": "offer",
"type": "OfferItem[]"
},
{
"name": "consideration",
"type": "ConsiderationItem[]"
},
{
"name": "orderType",
"type": "uint8"
},
{
"name": "startTime",
"type": "uint256"
},
{
"name": "endTime",
"type": "uint256"
},
{
"name": "zoneHash",
"type": "bytes32"
},
{
"name": "salt",
"type": "uint256"
},
{
"name": "conduitKey",
"type": "bytes32"
},
{
"name": "counter",
"type": "uint256"
}
],
"OfferItem": [
{
"name": "itemType",
"type": "uint8"
},
{
"name": "token",
"type": "address"
},
{
"name": "identifierOrCriteria",
"type": "uint256"
},
{
"name": "startAmount",
"type": "uint256"
},
{
"name": "endAmount",
"type": "uint256"
}
],
"ConsiderationItem": [
{
"name": "itemType",
"type": "uint8"
},
{
"name": "token",
"type": "address"
},
{
"name": "identifierOrCriteria",
"type": "uint256"
},
{
"name": "startAmount",
"type": "uint256"
},
{
"name": "endAmount",
"type": "uint256"
},
{
"name": "recipient",
"type": "address"
}
]
},
"primaryType": "OrderComponents",
"domain": {
"name": "Seaport",
"version": "1.1",
"chainId": "137",
"verifyingContract": "0x00000000006c3852cbEf3e08E8dF289169EdE581"
},
"message": {
"offerer": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266",
"offer": [
{
"itemType": "3",
"token": "0xA604060890923Ff400e8c6f5290461A83AEDACec",
"identifierOrCriteria": "110194434039389003190498847789203126033799499726478230611233094448886344768909",
"startAmount": "1",
"endAmount": "1"
}
],
"consideration": [
{
"itemType": "0",
"token": "0x0000000000000000000000000000000000000000",
"identifierOrCriteria": "0",
"startAmount": "487500000000000000",
"endAmount": "487500000000000000",
"recipient": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
},
{
"itemType": "0",
"token": "0x0000000000000000000000000000000000000000",
"identifierOrCriteria": "0",
"startAmount": "12500000000000000",
"endAmount": "12500000000000000",
"recipient": "0x8De9C5A032463C561423387a9648c5C7BCC5BC90"
}
],
"startTime": "1658645591",
"endTime": "1659250386",
"orderType": "3",
"zone": "0x004C00500000aD104D7DBd00e3ae0A5C00560C00",
"zoneHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"salt": "16178208897136618",
"conduitKey": "0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000",
"totalOriginalConsiderationItems": "2",
"counter": "0"
}
}
);
let typed_data: TypedData = serde_json::from_value(json).unwrap();
// `curl -X POST http://localhost:8545 -d '{"jsonrpc": "2.0", "method": "eth_signTypedData_v4", "params": ["0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", "{\"types\":{\"EIP712Domain\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"version\",\"type\":\"string\"},{\"name\":\"chainId\",\"type\":\"uint256\"},{\"name\":\"verifyingContract\",\"type\":\"address\"}],\"OrderComponents\":[{\"name\":\"offerer\",\"type\":\"address\"},{\"name\":\"zone\",\"type\":\"address\"},{\"name\":\"offer\",\"type\":\"OfferItem[]\"},{\"name\":\"consideration\",\"type\":\"ConsiderationItem[]\"},{\"name\":\"orderType\",\"type\":\"uint8\"},{\"name\":\"startTime\",\"type\":\"uint256\"},{\"name\":\"endTime\",\"type\":\"uint256\"},{\"name\":\"zoneHash\",\"type\":\"bytes32\"},{\"name\":\"salt\",\"type\":\"uint256\"},{\"name\":\"conduitKey\",\"type\":\"bytes32\"},{\"name\":\"counter\",\"type\":\"uint256\"}],\"OfferItem\":[{\"name\":\"itemType\",\"type\":\"uint8\"},{\"name\":\"token\",\"type\":\"address\"},{\"name\":\"identifierOrCriteria\",\"type\":\"uint256\"},{\"name\":\"startAmount\",\"type\":\"uint256\"},{\"name\":\"endAmount\",\"type\":\"uint256\"}],\"ConsiderationItem\":[{\"name\":\"itemType\",\"type\":\"uint8\"},{\"name\":\"token\",\"type\":\"address\"},{\"name\":\"identifierOrCriteria\",\"type\":\"uint256\"},{\"name\":\"startAmount\",\"type\":\"uint256\"},{\"name\":\"endAmount\",\"type\":\"uint256\"},{\"name\":\"recipient\",\"type\":\"address\"}]},\"primaryType\":\"OrderComponents\",\"domain\":{\"name\":\"Seaport\",\"version\":\"1.1\",\"chainId\":\"137\",\"verifyingContract\":\"0x00000000006c3852cbEf3e08E8dF289169EdE581\"},\"message\":{\"offerer\":\"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\",\"offer\":[{\"itemType\":\"3\",\"token\":\"0xA604060890923Ff400e8c6f5290461A83AEDACec\",\"identifierOrCriteria\":\"110194434039389003190498847789203126033799499726478230611233094448886344768909\",\"startAmount\":\"1\",\"endAmount\":\"1\"}],\"consideration\":[{\"itemType\":\"0\",\"token\":\"0x0000000000000000000000000000000000000000\",\"identifierOrCriteria\":\"0\",\"startAmount\":\"487500000000000000\",\"endAmount\":\"487500000000000000\",\"recipient\":\"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\"},{\"itemType\":\"0\",\"token\":\"0x0000000000000000000000000000000000000000\",\"identifierOrCriteria\":\"0\",\"startAmount\":\"12500000000000000\",\"endAmount\":\"12500000000000000\",\"recipient\":\"0x8De9C5A032463C561423387a9648c5C7BCC5BC90\"}],\"startTime\":\"1658645591\",\"endTime\":\"1659250386\",\"orderType\":\"3\",\"zone\":\"0x004C00500000aD104D7DBd00e3ae0A5C00560C00\",\"zoneHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"salt\":\"16178208897136618\",\"conduitKey\":\"0x0000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f0000\",\"totalOriginalConsiderationItems\":\"2\",\"counter\":\"0\"}}"], "id": "1"}' -H "Content-Type: application/json"`
let signature = api
.sign_typed_data_v4(
"0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266".parse().unwrap(),
&typed_data,
)
.await
.unwrap();
assert_eq!(
signature,
"0xed9afe7f377155ee3a42b25b696d79b55d441aeac7790b97a51b54ad0569b9665ea30bf8e8df12d6ee801c4dcb85ecfb8b23a6f7ae166d5af9acac9befb905451c".to_string()
);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/state.rs | crates/anvil/tests/it/state.rs | //! general eth api tests
use crate::abi::Greeter;
use alloy_network::{ReceiptResponse, TransactionBuilder};
use alloy_primitives::{Bytes, U256, Uint, address, b256, utils::Unit};
use alloy_provider::Provider;
use alloy_rpc_types::{BlockId, TransactionRequest};
use alloy_serde::WithOtherFields;
use anvil::{NodeConfig, eth::backend::db::SerializableState, spawn};
use foundry_test_utils::rpc::next_http_archive_rpc_url;
use revm::{
context_interface::block::BlobExcessGasAndPrice,
primitives::eip4844::BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE,
};
use serde_json::json;
use std::str::FromStr;
#[tokio::test(flavor = "multi_thread")]
async fn can_load_state() {
let tmp = tempfile::tempdir().unwrap();
let state_file = tmp.path().join("state.json");
let (api, _handle) = spawn(NodeConfig::test()).await;
api.mine_one().await;
api.mine_one().await;
let num = api.block_number().unwrap();
let state = api.serialized_state(false).await.unwrap();
foundry_common::fs::write_json_file(&state_file, &state).unwrap();
let (api, _handle) = spawn(NodeConfig::test().with_init_state_path(state_file)).await;
let num2 = api.block_number().unwrap();
// Ref: https://github.com/foundry-rs/foundry/issues/9017
// Check responses of eth_blockNumber and eth_getBlockByNumber don't deviate after loading state
let num_from_tag = api
.block_by_number(alloy_eips::BlockNumberOrTag::Latest)
.await
.unwrap()
.unwrap()
.header
.number;
assert_eq!(num, num2);
assert_eq!(num, U256::from(num_from_tag));
}
#[tokio::test(flavor = "multi_thread")]
async fn can_load_existing_state_legacy() {
let state_file = "test-data/state-dump-legacy.json";
let (api, _handle) = spawn(NodeConfig::test().with_init_state_path(state_file)).await;
let block_number = api.block_number().unwrap();
assert_eq!(block_number, Uint::from(2));
}
#[tokio::test(flavor = "multi_thread")]
async fn can_load_existing_state_legacy_stress() {
let state_file = "test-data/state-dump-legacy-stress.json";
let (api, _handle) = spawn(NodeConfig::test().with_init_state_path(state_file)).await;
let block_number = api.block_number().unwrap();
assert_eq!(block_number, Uint::from(5));
}
#[tokio::test(flavor = "multi_thread")]
async fn can_load_existing_state() {
let state_file = "test-data/state-dump.json";
let (api, _handle) = spawn(NodeConfig::test().with_init_state_path(state_file)).await;
let block_number = api.block_number().unwrap();
assert_eq!(block_number, Uint::from(2));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_make_sure_historical_state_is_not_cleared_on_dump() {
let tmp = tempfile::tempdir().unwrap();
let state_file = tmp.path().join("state.json");
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let greeter = Greeter::deploy(&provider, "Hello".to_string()).await.unwrap();
let address = greeter.address();
let _tx = greeter
.setGreeting("World!".to_string())
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
api.mine_one().await;
let ser_state = api.serialized_state(true).await.unwrap();
foundry_common::fs::write_json_file(&state_file, &ser_state).unwrap();
let block_number = api.block_number().unwrap();
assert_eq!(block_number, Uint::from(3));
// Makes sure historical states of the new instance are not cleared.
let code = provider.get_code_at(*address).block_id(BlockId::number(2)).await.unwrap();
assert_ne!(code, Bytes::new());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_preserve_historical_states_between_dump_and_load() {
let tmp = tempfile::tempdir().unwrap();
let state_file = tmp.path().join("state.json");
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let greeter = Greeter::deploy(&provider, "Hello".to_string()).await.unwrap();
let address = greeter.address();
let deploy_blk_num = provider.get_block_number().await.unwrap();
let tx = greeter
.setGreeting("World!".to_string())
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
let change_greeting_blk_num = tx.block_number.unwrap();
api.mine_one().await;
let ser_state = api.serialized_state(true).await.unwrap();
foundry_common::fs::write_json_file(&state_file, &ser_state).unwrap();
let (api, handle) = spawn(NodeConfig::test().with_init_state_path(state_file)).await;
let block_number = api.block_number().unwrap();
assert_eq!(block_number, Uint::from(3));
let provider = handle.http_provider();
let greeter = Greeter::new(*address, provider);
let greeting_at_init =
greeter.greet().block(BlockId::number(deploy_blk_num)).call().await.unwrap();
assert_eq!(greeting_at_init, "Hello");
let greeting_after_change =
greeter.greet().block(BlockId::number(change_greeting_blk_num)).call().await.unwrap();
assert_eq!(greeting_after_change, "World!");
}
// <https://github.com/foundry-rs/foundry/issues/9053>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_load_state() {
let (api, handle) = spawn(
NodeConfig::test()
.with_eth_rpc_url(Some(next_http_archive_rpc_url()))
.with_fork_block_number(Some(21070682u64)),
)
.await;
let bob = address!("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266");
let alice = address!("0x9276449EaC5b4f7Bc17cFC6700f7BeeB86F9bCd0");
let provider = handle.http_provider();
let init_nonce_bob = provider.get_transaction_count(bob).await.unwrap();
let init_balance_alice = provider.get_balance(alice).await.unwrap();
let value = Unit::ETHER.wei().saturating_mul(U256::from(1)); // 1 ether
let tx = TransactionRequest::default().with_to(alice).with_value(value).with_from(bob);
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
assert!(receipt.status());
let serialized_state = api.serialized_state(false).await.unwrap();
let state_dump_block = api.block_number().unwrap();
let (api, handle) = spawn(
NodeConfig::test()
.with_eth_rpc_url(Some(next_http_archive_rpc_url()))
.with_fork_block_number(Some(21070686u64)) // Forked chain has moved forward
.with_init_state(Some(serialized_state)),
)
.await;
// Ensure the initial block number is the fork_block_number and not the state_dump_block
let block_number = api.block_number().unwrap();
assert_eq!(block_number, U256::from(21070686u64));
assert_ne!(block_number, state_dump_block);
let provider = handle.http_provider();
let restart_nonce_bob = provider.get_transaction_count(bob).await.unwrap();
let restart_balance_alice = provider.get_balance(alice).await.unwrap();
assert_eq!(init_nonce_bob + 1, restart_nonce_bob);
assert_eq!(init_balance_alice + value, restart_balance_alice);
// Send another tx to check if the state is preserved
let tx = TransactionRequest::default().with_to(alice).with_value(value).with_from(bob);
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
assert!(receipt.status());
let nonce_bob = provider.get_transaction_count(bob).await.unwrap();
let balance_alice = provider.get_balance(alice).await.unwrap();
let tx = TransactionRequest::default()
.with_to(alice)
.with_value(value)
.with_from(bob)
.with_nonce(nonce_bob);
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
assert!(receipt.status());
let latest_nonce_bob = provider.get_transaction_count(bob).await.unwrap();
let latest_balance_alice = provider.get_balance(alice).await.unwrap();
assert_eq!(nonce_bob + 1, latest_nonce_bob);
assert_eq!(balance_alice + value, latest_balance_alice);
}
// <https://github.com/foundry-rs/foundry/issues/9539>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_load_state_with_greater_state_block() {
let (api, _handle) = spawn(
NodeConfig::test()
.with_eth_rpc_url(Some(next_http_archive_rpc_url()))
.with_fork_block_number(Some(21070682u64)),
)
.await;
api.mine_one().await;
let block_number = api.block_number().unwrap();
let serialized_state = api.serialized_state(false).await.unwrap();
assert_eq!(serialized_state.best_block_number, Some(block_number.to::<u64>()));
let (api, _handle) = spawn(
NodeConfig::test()
.with_eth_rpc_url(Some(next_http_archive_rpc_url()))
.with_fork_block_number(Some(21070682u64)) // Forked chain has moved forward
.with_init_state(Some(serialized_state)),
)
.await;
let new_block_number = api.block_number().unwrap();
assert_eq!(new_block_number, block_number);
}
// <https://github.com/foundry-rs/foundry/issues/10488>
#[tokio::test(flavor = "multi_thread")]
async fn computes_next_base_fee_after_loading_state() {
let tmp = tempfile::tempdir().unwrap();
let state_file = tmp.path().join("state.json");
let (api, handle) = spawn(NodeConfig::test()).await;
let bob = address!("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266");
let alice = address!("0x9276449EaC5b4f7Bc17cFC6700f7BeeB86F9bCd0");
let provider = handle.http_provider();
let base_fee_empty_chain = api.backend.fees().base_fee();
let value = Unit::ETHER.wei().saturating_mul(U256::from(1)); // 1 ether
let tx = TransactionRequest::default().with_to(alice).with_value(value).with_from(bob);
let tx = WithOtherFields::new(tx);
let _receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let base_fee_after_one_tx = api.backend.fees().base_fee();
// the test is meaningless if this does not hold
assert_ne!(base_fee_empty_chain, base_fee_after_one_tx);
let ser_state = api.serialized_state(true).await.unwrap();
foundry_common::fs::write_json_file(&state_file, &ser_state).unwrap();
let (api, _handle) = spawn(NodeConfig::test().with_init_state_path(state_file)).await;
let base_fee_after_reload = api.backend.fees().base_fee();
assert_eq!(base_fee_after_reload, base_fee_after_one_tx);
}
// <https://github.com/foundry-rs/foundry/issues/11176>
#[tokio::test(flavor = "multi_thread")]
async fn test_backward_compatibility_deserialization_v1_2() {
let old_format = r#"{
"block": {
"number": "0x5",
"coinbase": "0x1234567890123456789012345678901234567890",
"timestamp": "0x688c83b5",
"gas_limit": "0x1c9c380",
"basefee": "0x3b9aca00",
"difficulty": "0x0",
"prevrandao": "0xecc5f0af8ff6b65c14bfdac55ba9db870d89482eb2b87200c6d7e7cd3a3a5ad5",
"blob_excess_gas_and_price": {
"excess_blob_gas": 0,
"blob_gasprice": 1
}
},
"accounts": {},
"best_block_number": "0x5",
"blocks": [],
"transactions": []
}"#;
let state: SerializableState = serde_json::from_str(old_format).unwrap();
assert!(state.block.is_some());
let block_env = state.block.unwrap();
assert_eq!(block_env.number, U256::from(5));
// Verify coinbase was converted to beneficiary
assert_eq!(block_env.beneficiary, address!("0x1234567890123456789012345678901234567890"));
// New format with beneficiary and numeric values
let new_format = r#"{
"block": {
"number": 6,
"beneficiary": "0x1234567890123456789012345678901234567891",
"timestamp": 1751619509,
"gas_limit": 30000000,
"basefee": 1000000000,
"difficulty": "0x0",
"prevrandao": "0xecc5f0af8ff6b65c14bfdac55ba9db870d89482eb2b87200c6d7e7cd3a3a5ad5",
"blob_excess_gas_and_price": {
"excess_blob_gas": 0,
"blob_gasprice": 1
}
},
"accounts": {},
"best_block_number": 6,
"blocks": [],
"transactions": []
}"#;
let state: SerializableState = serde_json::from_str(new_format).unwrap();
assert!(state.block.is_some());
let block_env = state.block.unwrap();
assert_eq!(block_env.number, U256::from(6));
assert_eq!(block_env.beneficiary, address!("0x1234567890123456789012345678901234567891"));
}
// <https://github.com/foundry-rs/foundry/issues/11176>
#[tokio::test(flavor = "multi_thread")]
async fn test_backward_compatibility_mixed_formats_deserialization_v1_2() {
let mixed_format = json!({
"block": {
"number": "0x3",
"coinbase": "0x1111111111111111111111111111111111111111",
"timestamp": 1751619509,
"gas_limit": "0x1c9c380",
"basefee": 1000000000,
"difficulty": "0x0",
"prevrandao": "0xecc5f0af8ff6b65c14bfdac55ba9db870d89482eb2b87200c6d7e7cd3a3a5ad5",
"blob_excess_gas_and_price": {
"excess_blob_gas": 0,
"blob_gasprice": 1
}
},
"accounts": {},
"best_block_number": 3,
"blocks": [],
"transactions": []
});
let state: SerializableState = serde_json::from_str(&mixed_format.to_string()).unwrap();
let block_env = state.block.unwrap();
assert_eq!(block_env.number, U256::from(3));
assert_eq!(block_env.beneficiary, address!("0x1111111111111111111111111111111111111111"));
assert_eq!(block_env.timestamp, U256::from(1751619509));
assert_eq!(block_env.gas_limit, 0x1c9c380);
assert_eq!(block_env.basefee, 1_000_000_000);
assert_eq!(block_env.difficulty, U256::ZERO);
assert_eq!(
block_env.prevrandao.unwrap(),
b256!("ecc5f0af8ff6b65c14bfdac55ba9db870d89482eb2b87200c6d7e7cd3a3a5ad5")
);
let blob = block_env.blob_excess_gas_and_price.unwrap();
assert_eq!(blob.excess_blob_gas, 0);
assert_eq!(blob.blob_gasprice, 1);
assert_eq!(state.best_block_number, Some(3));
}
// <https://github.com/foundry-rs/foundry/issues/11176>
#[tokio::test(flavor = "multi_thread")]
async fn test_backward_compatibility_optional_fields_deserialization_v1_2() {
let partial_old_format = json!({
"block": {
"number": "0x1",
"coinbase": "0x0000000000000000000000000000000000000000",
"timestamp": "0x688c83b5",
"gas_limit": "0x1c9c380",
"basefee": "0x3b9aca00",
"difficulty": "0x0",
"prevrandao": "0xecc5f0af8ff6b65c14bfdac55ba9db870d89482eb2b87200c6d7e7cd3a3a5ad5"
// Missing blob_excess_gas_and_price - should be None
},
"accounts": {},
"best_block_number": "0x1"
// Missing blocks and transactions arrays - should default to empty
});
let state: SerializableState = serde_json::from_str(&partial_old_format.to_string()).unwrap();
let block_env = state.block.unwrap();
assert_eq!(block_env.number, U256::from(1));
assert_eq!(block_env.beneficiary, address!("0x0000000000000000000000000000000000000000"));
assert_eq!(block_env.timestamp, U256::from(0x688c83b5));
assert_eq!(block_env.gas_limit, 0x1c9c380);
assert_eq!(block_env.basefee, 0x3b9aca00);
assert_eq!(block_env.difficulty, U256::ZERO);
assert_eq!(
block_env.prevrandao.unwrap(),
b256!("ecc5f0af8ff6b65c14bfdac55ba9db870d89482eb2b87200c6d7e7cd3a3a5ad5")
);
assert_eq!(
block_env.blob_excess_gas_and_price,
Some(BlobExcessGasAndPrice::new(0, BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE))
);
assert_eq!(state.best_block_number, Some(1));
assert!(state.blocks.is_empty());
assert!(state.transactions.is_empty());
}
// <https://github.com/foundry-rs/foundry/issues/11176>
#[tokio::test(flavor = "multi_thread")]
async fn test_backward_compatibility_state_dump_deserialization_v1_2() {
let tmp = tempfile::tempdir().unwrap();
let old_state_file = tmp.path().join("old_state.json");
// A simple state dump with a single block containing one transaction of a Counter contract
// deployment.
let old_state_json = json!({
"block": {
"number": "0x1",
"coinbase": "0x0000000000000000000000000000000000000001",
"timestamp": "0x688c83b5",
"gas_limit": "0x1c9c380",
"basefee": "0x3b9aca00",
"difficulty": "0x0",
"prevrandao": "0xecc5f0af8ff6b65c14bfdac55ba9db870d89482eb2b87200c6d7e7cd3a3a5ad5",
"blob_excess_gas_and_price": {
"excess_blob_gas": 0,
"blob_gasprice": 1
}
},
"accounts": {
"0x0000000000000000000000000000000000000000": {
"nonce": 0,
"balance": "0x26481",
"code": "0x",
"storage": {}
},
"0x14dc79964da2c08b23698b3d3cc7ca32193d9955": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0x15d34aaf54267db7d7c367839aaf71a00a2c6a65": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0x23618e81e3f5cdf7f54c3d65f7fbc0abf5b21e8f": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0x3c44cdddb6a900fa2b585dd299e03d12fa4293bc": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0x4e59b44847b379578588920ca78fbf26c0b4956c": {
"nonce": 0,
"balance": "0x0",
"code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3",
"storage": {}
},
"0x5fbdb2315678afecb367f032d93f642f64180aa3": {
"nonce": 1,
"balance": "0x0",
"code": "0x608060405234801561000f575f5ffd5b506004361061003f575f3560e01c80633fb5c1cb146100435780638381f58a1461005f578063d09de08a1461007d575b5f5ffd5b61005d600480360381019061005891906100e4565b610087565b005b610067610090565b604051610074919061011e565b60405180910390f35b610085610095565b005b805f8190555050565b5f5481565b5f5f8154809291906100a690610164565b9190505550565b5f5ffd5b5f819050919050565b6100c3816100b1565b81146100cd575f5ffd5b50565b5f813590506100de816100ba565b92915050565b5f602082840312156100f9576100f86100ad565b5b5f610106848285016100d0565b91505092915050565b610118816100b1565b82525050565b5f6020820190506101315f83018461010f565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61016e826100b1565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036101a05761019f610137565b5b60018201905091905056fea264697066735822122040b6a3cd3ec8f890002f39a8719ebee029ba9bac3d7fa9d581d4712cfe9ffec264736f6c634300081e0033",
"storage": {}
},
"0x70997970c51812dc3a010c7d01b50e0d17dc79c8": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0x90f79bf6eb2c4f870365e785982e1f101e93b906": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0x976ea74026e726554db657fa54763abd0c3a0aa9": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0xa0ee7a142d267c1f36714e4a8f75612f20a79720": {
"nonce": 0,
"balance": "0x21e19e0c9bab2400000",
"code": "0x",
"storage": {}
},
"0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": {
"nonce": 1,
"balance": "0x21e19e03b1e9e55d17f",
"code": "0x",
"storage": {}
}
},
"best_block_number": "0x1",
"blocks": [
{
"header": {
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x0",
"number": "0x0",
"gasLimit": "0x1c9c380",
"gasUsed": "0x0",
"timestamp": "0x688c83b0",
"extraData": "0x",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x3b9aca00",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"blobGasUsed": "0x0",
"excessBlobGas": "0x0",
"parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"transactions": [],
"ommers": []
},
{
"header": {
"parentHash": "0x25097583380d90c4ac42b454ed7d2f59450ed3a16fdcf7f7bd93295aa126a901",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x6e005b459ac9acefa5f47fd2d7ff8ca81a91794fdc5f7fbc3e2faeeaefe5d516",
"transactionsRoot": "0x59f0457ec18e2181c186f49d9ac911b33b5f4f55db5c494022147346bcfc9837",
"receiptsRoot": "0x88ac48b910f796aab7407814203b3a15a04a812f387e92efeccc92a2ecf809da",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x0",
"number": "0x1",
"gasLimit": "0x1c9c380",
"gasUsed": "0x26481",
"timestamp": "0x688c83b5",
"extraData": "0x",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x3b9aca00",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"blobGasUsed": "0x0",
"excessBlobGas": "0x0",
"parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"transactions": [
{
"transaction": {
"type": "0x2",
"chainId": "0x7a69",
"nonce": "0x0",
"gas": "0x31c41",
"maxFeePerGas": "0x77359401",
"maxPriorityFeePerGas": "0x1",
"to": null,
"value": "0x0",
"accessList": [],
"input": "0x6080604052348015600e575f5ffd5b506101e18061001c5f395ff3fe608060405234801561000f575f5ffd5b506004361061003f575f3560e01c80633fb5c1cb146100435780638381f58a1461005f578063d09de08a1461007d575b5f5ffd5b61005d600480360381019061005891906100e4565b610087565b005b610067610090565b604051610074919061011e565b60405180910390f35b610085610095565b005b805f8190555050565b5f5481565b5f5f8154809291906100a690610164565b9190505550565b5f5ffd5b5f819050919050565b6100c3816100b1565b81146100cd575f5ffd5b50565b5f813590506100de816100ba565b92915050565b5f602082840312156100f9576100f86100ad565b5b5f610106848285016100d0565b91505092915050565b610118816100b1565b82525050565b5f6020820190506101315f83018461010f565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61016e826100b1565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036101a05761019f610137565b5b60018201905091905056fea264697066735822122040b6a3cd3ec8f890002f39a8719ebee029ba9bac3d7fa9d581d4712cfe9ffec264736f6c634300081e0033",
"r": "0xa7398e28ca9a56b423cab87aeb3612378bac9c5684aaf778a78943f2637fd731",
"s": "0x583511da658f564253c8c0f9ee1820ef370f23556be504b304ac1292f869d9a0",
"yParity": "0x0",
"v": "0x0",
"hash": "0x9e4846328caa09cbe8086d11b7e115adf70390e79ff203d8e5f37785c2a890be"
},
"impersonated_sender": null
}
],
"ommers": []
}
],
"transactions": [
{
"info": {
"transaction_hash": "0x9e4846328caa09cbe8086d11b7e115adf70390e79ff203d8e5f37785c2a890be",
"transaction_index": 0,
"from": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266",
"to": null,
"contract_address": "0x5fbdb2315678afecb367f032d93f642f64180aa3",
"traces": [
{
"parent": null,
"children": [],
"idx": 0,
"trace": {
"depth": 0,
"success": true,
"caller": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266",
"address": "0x5fbdb2315678afecb367f032d93f642f64180aa3",
"maybe_precompile": false,
"selfdestruct_address": null,
"selfdestruct_refund_target": null,
"selfdestruct_transferred_value": null,
"kind": "CREATE",
"value": "0x0",
"data": "0x6080604052348015600e575f5ffd5b506101e18061001c5f395ff3fe608060405234801561000f575f5ffd5b506004361061003f575f3560e01c80633fb5c1cb146100435780638381f58a1461005f578063d09de08a1461007d575b5f5ffd5b61005d600480360381019061005891906100e4565b610087565b005b610067610090565b604051610074919061011e565b60405180910390f35b610085610095565b005b805f8190555050565b5f5481565b5f5f8154809291906100a690610164565b9190505550565b5f5ffd5b5f819050919050565b6100c3816100b1565b81146100cd575f5ffd5b50565b5f813590506100de816100ba565b92915050565b5f602082840312156100f9576100f86100ad565b5b5f610106848285016100d0565b91505092915050565b610118816100b1565b82525050565b5f6020820190506101315f83018461010f565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61016e826100b1565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036101a05761019f610137565b5b60018201905091905056fea264697066735822122040b6a3cd3ec8f890002f39a8719ebee029ba9bac3d7fa9d581d4712cfe9ffec264736f6c634300081e0033",
"output": "0x608060405234801561000f575f5ffd5b506004361061003f575f3560e01c80633fb5c1cb146100435780638381f58a1461005f578063d09de08a1461007d575b5f5ffd5b61005d600480360381019061005891906100e4565b610087565b005b610067610090565b604051610074919061011e565b60405180910390f35b610085610095565b005b805f8190555050565b5f5481565b5f5f8154809291906100a690610164565b9190505550565b5f5ffd5b5f819050919050565b6100c3816100b1565b81146100cd575f5ffd5b50565b5f813590506100de816100ba565b92915050565b5f602082840312156100f9576100f86100ad565b5b5f610106848285016100d0565b91505092915050565b610118816100b1565b82525050565b5f6020820190506101315f83018461010f565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61016e826100b1565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036101a05761019f610137565b5b60018201905091905056fea264697066735822122040b6a3cd3ec8f890002f39a8719ebee029ba9bac3d7fa9d581d4712cfe9ffec264736f6c634300081e0033",
"gas_used": 96345,
"gas_limit": 143385,
"status": "Return",
"steps": [],
"decoded": null
},
"logs": [],
"ordering": []
}
],
"exit": "Return",
"out": "0x608060405234801561000f575f5ffd5b506004361061003f575f3560e01c80633fb5c1cb146100435780638381f58a1461005f578063d09de08a1461007d575b5f5ffd5b61005d600480360381019061005891906100e4565b610087565b005b610067610090565b604051610074919061011e565b60405180910390f35b610085610095565b005b805f8190555050565b5f5481565b5f5f8154809291906100a690610164565b9190505550565b5f5ffd5b5f819050919050565b6100c3816100b1565b81146100cd575f5ffd5b50565b5f813590506100de816100ba565b92915050565b5f602082840312156100f9576100f86100ad565b5b5f610106848285016100d0565b91505092915050565b610118816100b1565b82525050565b5f6020820190506101315f83018461010f565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61016e826100b1565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036101a05761019f610137565b5b60018201905091905056fea264697066735822122040b6a3cd3ec8f890002f39a8719ebee029ba9bac3d7fa9d581d4712cfe9ffec264736f6c634300081e0033",
"nonce": 0,
"gas_used": 156801
},
"receipt": {
"type": "0x2",
"status": "0x1",
"cumulativeGasUsed": "0x26481",
"logs": [],
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
},
"block_hash": "0x313ea0d32d662434a55a20d7c58544e6baaea421b6eccf4b68392dec2a76d771",
"block_number": 1
}
],
"historical_states": null
});
// Write the old state to file.
foundry_common::fs::write_json_file(&old_state_file, &old_state_json).unwrap();
// Test deserializing the old state dump directly.
let deserialized_state: SerializableState = serde_json::from_value(old_state_json).unwrap();
// Verify the old state was loaded correctly with `coinbase` to `beneficiary` conversion.
let block_env = deserialized_state.block.unwrap();
assert_eq!(block_env.number, U256::from(1));
assert_eq!(block_env.beneficiary, address!("0000000000000000000000000000000000000001"));
assert_eq!(block_env.gas_limit, 0x1c9c380);
assert_eq!(block_env.basefee, 0x3b9aca00);
// Verify best_block_number hex string parsing.
assert_eq!(deserialized_state.best_block_number, Some(1));
// Verify account data was preserved.
assert_eq!(deserialized_state.accounts.len(), 13);
// Test specific accounts from the old dump.
let deployer_addr = "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266".parse().unwrap();
let deployer_account = deserialized_state.accounts.get(&deployer_addr).unwrap();
assert_eq!(deployer_account.nonce, 1);
assert_eq!(deployer_account.balance, U256::from_str("0x21e19e03b1e9e55d17f").unwrap());
// Test contract account.
let contract_addr = "0x5fbdb2315678afecb367f032d93f642f64180aa3".parse().unwrap();
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/pubsub.rs | crates/anvil/tests/it/pubsub.rs | //! tests for subscriptions
use crate::utils::{connect_pubsub, connect_pubsub_with_wallet};
use alloy_network::{EthereumWallet, TransactionBuilder};
use alloy_primitives::{Address, U256};
use alloy_provider::Provider;
use alloy_pubsub::Subscription;
use alloy_rpc_types::{Block as AlloyBlock, Filter, TransactionRequest};
use alloy_serde::WithOtherFields;
use alloy_sol_types::sol;
use anvil::{NodeConfig, spawn};
use futures::StreamExt;
#[tokio::test(flavor = "multi_thread")]
async fn test_sub_new_heads() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = connect_pubsub(&handle.ws_endpoint()).await;
let blocks = provider.subscribe_blocks().await.unwrap();
// mine a block every 1 seconds
api.anvil_set_interval_mining(1).unwrap();
let blocks = blocks.into_stream().take(3).collect::<Vec<_>>().await;
let block_numbers = blocks.into_iter().map(|b| b.number).collect::<Vec<_>>();
assert_eq!(block_numbers, vec![1, 2, 3]);
}
sol!(
#[sol(rpc)]
EmitLogs,
"test-data/emit_logs.json"
);
// FIXME: Use .legacy() in tx when implemented in alloy
#[tokio::test(flavor = "multi_thread")]
async fn test_sub_logs_legacy() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let provider = connect_pubsub(&handle.ws_endpoint()).await;
let msg = "First Message".to_string();
let contract_addr = EmitLogs::deploy_builder(provider.clone(), msg.clone())
.from(wallet.address())
.deploy()
.await
.unwrap();
let contract = EmitLogs::new(contract_addr, provider.clone());
let val = contract.getValue().call().await.unwrap();
assert_eq!(val, msg);
// subscribe to events from the contract
let filter = Filter::new().address(contract.address().to_owned());
let logs_sub = provider.subscribe_logs(&filter).await.unwrap();
// send a tx triggering an event
// FIXME: Use .legacy() in tx
let receipt = contract
.setValue("Next Message".to_string())
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
let mut logs_sub = logs_sub.into_stream();
// get the emitted event
let log = logs_sub.next().await.unwrap();
// ensure the log in the receipt is the same as received via subscription stream
assert_eq!(receipt.inner.logs()[0], log);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_sub_logs() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let provider = connect_pubsub(&handle.ws_endpoint()).await;
let msg = "First Message".to_string();
let contract_addr = EmitLogs::deploy_builder(provider.clone(), msg.clone())
.from(wallet.address())
.deploy()
.await
.unwrap();
let contract = EmitLogs::new(contract_addr, provider.clone());
let val = contract.getValue().call().await.unwrap();
assert_eq!(val, msg);
// subscribe to events from the contract
let filter = Filter::new().address(contract.address().to_owned());
let logs_sub = provider.subscribe_logs(&filter).await.unwrap();
// send a tx triggering an event
let receipt = contract
.setValue("Next Message".to_string())
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
let mut logs_sub = logs_sub.into_stream();
// get the emitted event
let log = logs_sub.next().await.unwrap();
// ensure the log in the receipt is the same as received via subscription stream
assert_eq!(receipt.inner.logs()[0], log);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_sub_logs_impersonated() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let provider =
connect_pubsub_with_wallet(&handle.ws_endpoint(), EthereumWallet::from(wallet.clone()))
.await;
// impersonate account
let impersonate = Address::random();
let funding = U256::from(1e18 as u64);
api.anvil_set_balance(impersonate, funding).await.unwrap();
api.anvil_impersonate_account(impersonate).await.unwrap();
let msg = "First Message".to_string();
let contract = EmitLogs::deploy(provider.clone(), msg.clone()).await.unwrap();
let _val = contract.getValue().call().await.unwrap();
// subscribe to events from the impersonated account
let filter = Filter::new().address(contract.address().to_owned());
let logs_sub = provider.subscribe_logs(&filter).await.unwrap();
// send a tx triggering an event
let data = contract.setValue("Next Message".to_string());
let data = data.calldata().clone();
let tx =
TransactionRequest::default().from(impersonate).to(*contract.address()).with_input(data);
let tx = WithOtherFields::new(tx);
let provider = handle.http_provider();
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let mut logs_sub = logs_sub.into_stream();
// get the emitted event
let log = logs_sub.next().await.unwrap();
// ensure the log in the receipt is the same as received via subscription stream
assert_eq!(receipt.inner.inner.logs()[0], log);
}
// FIXME: Use legacy() in tx when implemented in alloy
#[tokio::test(flavor = "multi_thread")]
async fn test_filters_legacy() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let provider =
connect_pubsub_with_wallet(&handle.ws_endpoint(), EthereumWallet::from(wallet.clone()))
.await;
let from = wallet.address();
let msg = "First Message".to_string();
// FIXME: Use legacy() in tx when implemented in alloy
let contract = EmitLogs::deploy(provider.clone(), msg.clone()).await.unwrap();
let stream = contract.ValueChanged_filter().subscribe().await.unwrap();
// send a tx triggering an event
// FIXME: Use legacy() in tx when implemented in alloy
let _receipt = contract
.setValue("Next Message".to_string())
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
let mut log = stream.into_stream();
// get the emitted event
let (value_changed, _log) = log.next().await.unwrap().unwrap();
assert_eq!(value_changed.author, from);
assert_eq!(value_changed.oldValue, "First Message".to_string());
assert_eq!(value_changed.newValue, "Next Message".to_string());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_filters() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let provider =
connect_pubsub_with_wallet(&handle.ws_endpoint(), EthereumWallet::from(wallet.clone()))
.await;
let from = wallet.address();
let msg = "First Message".to_string();
let contract = EmitLogs::deploy(provider.clone(), msg.clone()).await.unwrap();
let stream = contract.ValueChanged_filter().subscribe().await.unwrap();
// send a tx triggering an event
let _receipt = contract
.setValue("Next Message".to_string())
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
let mut log = stream.into_stream();
// get the emitted event
let (value_changed, _log) = log.next().await.unwrap().unwrap();
assert_eq!(value_changed.author, from);
assert_eq!(value_changed.oldValue, "First Message".to_string());
assert_eq!(value_changed.newValue, "Next Message".to_string());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_subscriptions() {
let (_api, handle) =
spawn(NodeConfig::test().with_blocktime(Some(std::time::Duration::from_secs(1)))).await;
let provider = connect_pubsub(&handle.ws_endpoint()).await;
let sub_id = provider.raw_request("eth_subscribe".into(), ["newHeads"]).await.unwrap();
let stream: Subscription<AlloyBlock> = provider.get_subscription(sub_id).await.unwrap();
let blocks = stream
.into_stream()
.take(3)
.collect::<Vec<_>>()
.await
.into_iter()
.map(|b| b.header.number)
.collect::<Vec<_>>();
assert_eq!(blocks, vec![1, 2, 3])
}
#[expect(clippy::disallowed_macros)]
#[tokio::test(flavor = "multi_thread")]
async fn test_sub_new_heads_fast() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = connect_pubsub(&handle.ws_endpoint()).await;
let blocks = provider.subscribe_blocks().await.unwrap();
let mut blocks = blocks.into_stream();
let num = 1000u64;
let mut block_numbers = Vec::new();
for _ in 0..num {
api.mine_one().await;
let block_number = blocks.next().await.unwrap().number;
block_numbers.push(block_number);
}
println!("Collected {} blocks", block_numbers.len());
let numbers = (1..=num).collect::<Vec<_>>();
assert_eq!(block_numbers, numbers);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/api.rs | crates/anvil/tests/it/api.rs | //! general eth api tests
use crate::{
abi::{Multicall, SimpleStorage, VendingMachine},
utils::{connect_pubsub_with_wallet, http_provider, http_provider_with_signer},
};
use alloy_consensus::{SidecarBuilder, SignableTransaction, SimpleCoder, Transaction, TxEip1559};
use alloy_network::{
EthereumWallet, ReceiptResponse, TransactionBuilder, TransactionBuilder4844, TxSignerSync,
};
use alloy_primitives::{
Address, B256, ChainId, U256, b256, bytes,
map::{AddressHashMap, B256HashMap, HashMap},
};
use alloy_provider::Provider;
use alloy_rpc_types::{
BlockId, BlockNumberOrTag, BlockTransactions, request::TransactionRequest,
state::AccountOverride,
};
use alloy_serde::WithOtherFields;
use alloy_sol_types::SolCall;
use anvil::{CHAIN_ID, EthereumHardfork, NodeConfig, eth::api::CLIENT_VERSION, spawn};
use foundry_test_utils::rpc;
use futures::join;
use std::time::Duration;
#[tokio::test(flavor = "multi_thread")]
async fn can_get_block_number() {
let (api, handle) = spawn(NodeConfig::test()).await;
let block_num = api.block_number().unwrap();
assert_eq!(block_num, U256::from(0));
let provider = handle.http_provider();
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, block_num.to::<u64>());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_dev_get_balance() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let genesis_balance = handle.genesis_balance();
for acc in handle.genesis_accounts() {
let balance = provider.get_balance(acc).await.unwrap();
assert_eq!(balance, genesis_balance);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_price() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let _ = provider.get_gas_price().await.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_accounts() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let _ = provider.get_accounts().await.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_client_version() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let version = provider.get_client_version().await.unwrap();
assert_eq!(CLIENT_VERSION, version);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_chain_id() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, CHAIN_ID);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_modify_chain_id() {
let (_api, handle) =
spawn(NodeConfig::test().with_chain_id(Some(ChainId::from(777_u64)))).await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 777);
let chain_id = provider.get_net_version().await.unwrap();
assert_eq!(chain_id, 777);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_network_id() {
let (api, _handle) = spawn(NodeConfig::test()).await;
let chain_id = api.network_id().unwrap().unwrap();
assert_eq!(chain_id, CHAIN_ID.to_string());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_block_by_number() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let accounts: Vec<_> = handle.dev_wallets().collect();
let signer: EthereumWallet = accounts[0].clone().into();
let from = accounts[0].address();
let to = accounts[1].address();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let val = handle.genesis_balance().checked_div(U256::from(2)).unwrap();
// send a dummy transaction
let tx = TransactionRequest::default().with_from(from).with_to(to).with_value(val);
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let block = provider.get_block(BlockId::number(1)).full().await.unwrap().unwrap();
assert_eq!(block.transactions.len(), 1);
let block = provider.get_block(BlockId::hash(block.header.hash)).full().await.unwrap().unwrap();
assert_eq!(block.transactions.len(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_pending_block() {
let (api, handle) = spawn(NodeConfig::test()).await;
let accounts: Vec<_> = handle.dev_wallets().collect();
let signer: EthereumWallet = accounts[0].clone().into();
let from = accounts[0].address();
let to = accounts[1].address();
let provider = connect_pubsub_with_wallet(&handle.http_endpoint(), signer).await;
let block = provider.get_block(BlockId::pending()).await.unwrap().unwrap();
assert_eq!(block.header.number, 1);
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, 0);
api.anvil_set_auto_mine(false).await.unwrap();
let tx = TransactionRequest::default().with_from(from).with_to(to).with_value(U256::from(100));
let pending = provider.send_transaction(tx.clone()).await.unwrap().register().await.unwrap();
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, 0);
let block = provider.get_block(BlockId::pending()).await.unwrap().unwrap();
assert_eq!(block.header.number, 1);
assert_eq!(block.transactions.len(), 1);
assert_eq!(block.transactions, BlockTransactions::Hashes(vec![*pending.tx_hash()]));
let block = provider.get_block(BlockId::pending()).full().await.unwrap().unwrap();
assert_eq!(block.header.number, 1);
assert_eq!(block.transactions.len(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_estimate_gas_with_undersized_max_fee_per_gas() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
api.anvil_set_auto_mine(true).await.unwrap();
let init_value = "toto".to_string();
let simple_storage_contract =
SimpleStorage::deploy(&provider, init_value.clone()).await.unwrap();
let undersized_max_fee_per_gas = 1;
let latest_block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
let latest_block_base_fee_per_gas = latest_block.header.base_fee_per_gas.unwrap();
assert!(undersized_max_fee_per_gas < latest_block_base_fee_per_gas);
let estimated_gas = simple_storage_contract
.setValue("new_value".to_string())
.max_fee_per_gas(undersized_max_fee_per_gas.into())
.from(wallet.address())
.estimate_gas()
.await
.unwrap();
assert!(estimated_gas > 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_call_on_pending_block() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let sender = wallet.address();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, 0);
api.anvil_set_auto_mine(false).await.unwrap();
let _contract_pending = Multicall::deploy_builder(&provider)
.from(wallet.address())
.send()
.await
.unwrap()
.register()
.await
.unwrap();
let contract_address = sender.create(0);
let contract = Multicall::new(contract_address, &provider);
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, 0);
// Ensure that we can get the block_number from the pending contract
let Multicall::aggregateReturn { blockNumber: ret_block_number, .. } =
contract.aggregate(vec![]).block(BlockId::pending()).call().await.unwrap();
assert_eq!(ret_block_number, U256::from(1));
let accounts: Vec<Address> = handle.dev_wallets().map(|w| w.address()).collect();
for i in 1..10 {
api.anvil_set_coinbase(accounts[i % accounts.len()]).await.unwrap();
api.evm_set_block_gas_limit(U256::from(30_000_000 + i)).unwrap();
api.anvil_mine(Some(U256::from(1)), None).await.unwrap();
tokio::time::sleep(Duration::from_millis(100)).await;
}
// Ensure that the right header values are set when calling a past block
for anvil_block_number in 1..(api.block_number().unwrap().to::<usize>() + 1) {
let block_number = BlockNumberOrTag::Number(anvil_block_number as u64);
let block = api.block_by_number(block_number).await.unwrap().unwrap();
let ret_timestamp = contract
.getCurrentBlockTimestamp()
.block(BlockId::number(anvil_block_number as u64))
.call()
.await
.unwrap();
assert_eq!(block.header.timestamp, ret_timestamp.to::<u64>());
let ret_gas_limit = contract
.getCurrentBlockGasLimit()
.block(BlockId::number(anvil_block_number as u64))
.call()
.await
.unwrap();
assert_eq!(block.header.gas_limit, ret_gas_limit.to::<u64>());
let ret_coinbase = contract
.getCurrentBlockCoinbase()
.block(BlockId::number(anvil_block_number as u64))
.call()
.await
.unwrap();
assert_eq!(block.header.beneficiary, ret_coinbase);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn can_call_with_undersized_max_fee_per_gas() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
api.anvil_set_auto_mine(true).await.unwrap();
let init_value = "toto".to_string();
let simple_storage_contract =
SimpleStorage::deploy(&provider, init_value.clone()).await.unwrap();
let latest_block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
let latest_block_base_fee_per_gas = latest_block.header.base_fee_per_gas.unwrap();
let undersized_max_fee_per_gas = 1;
assert!(undersized_max_fee_per_gas < latest_block_base_fee_per_gas);
let last_sender = simple_storage_contract
.lastSender()
.max_fee_per_gas(undersized_max_fee_per_gas.into())
.from(wallet.address())
.call()
.await
.unwrap();
assert_eq!(last_sender, Address::ZERO);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_call_with_state_override() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let account = wallet.address();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
api.anvil_set_auto_mine(true).await.unwrap();
let multicall_contract = Multicall::deploy(&provider).await.unwrap();
let init_value = "toto".to_string();
let simple_storage_contract =
SimpleStorage::deploy(&provider, init_value.clone()).await.unwrap();
// Test the `balance` account override
let balance = U256::from(42u64);
let mut overrides = AddressHashMap::default();
overrides.insert(account, AccountOverride { balance: Some(balance), ..Default::default() });
let result = multicall_contract.getEthBalance(account).state(overrides).call().await.unwrap();
assert_eq!(result, balance);
// Test the `state_diff` account override
let mut state_diff = B256HashMap::default();
state_diff.insert(B256::ZERO, account.into_word());
let mut overrides = AddressHashMap::default();
overrides.insert(
*simple_storage_contract.address(),
AccountOverride {
// The `lastSender` is in the first storage slot
state_diff: Some(state_diff),
..Default::default()
},
);
let last_sender =
simple_storage_contract.lastSender().state(HashMap::default()).call().await.unwrap();
// No `sender` set without override
assert_eq!(last_sender, Address::ZERO);
let last_sender =
simple_storage_contract.lastSender().state(overrides.clone()).call().await.unwrap();
// `sender` *is* set with override
assert_eq!(last_sender, account);
let value = simple_storage_contract.getValue().state(overrides).call().await.unwrap();
// `value` *is not* changed with state-diff
assert_eq!(value, init_value);
// Test the `state` account override
let mut state = B256HashMap::default();
state.insert(B256::ZERO, account.into_word());
let mut overrides = AddressHashMap::default();
overrides.insert(
*simple_storage_contract.address(),
AccountOverride {
// The `lastSender` is in the first storage slot
state: Some(state),
..Default::default()
},
);
let last_sender =
simple_storage_contract.lastSender().state(overrides.clone()).call().await.unwrap();
// `sender` *is* set with override
assert_eq!(last_sender, account);
let value = simple_storage_contract.getValue().state(overrides).call().await.unwrap();
// `value` *is* changed with state
assert_eq!(value, "");
}
#[tokio::test(flavor = "multi_thread")]
async fn can_mine_while_mining() {
let (api, _) = spawn(NodeConfig::test()).await;
let total_blocks = 200;
let block_number =
api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap().header.number;
assert_eq!(block_number, 0);
let block = api.block_by_number(BlockNumberOrTag::Number(block_number)).await.unwrap().unwrap();
assert_eq!(block.header.number, 0);
let result = join!(
api.anvil_mine(Some(U256::from(total_blocks / 2)), None),
api.anvil_mine(Some(U256::from(total_blocks / 2)), None)
);
result.0.unwrap();
result.1.unwrap();
tokio::time::sleep(Duration::from_millis(100)).await;
let block_number =
api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap().header.number;
assert_eq!(block_number, total_blocks);
let block = api.block_by_number(BlockNumberOrTag::Number(block_number)).await.unwrap().unwrap();
assert_eq!(block.header.number, total_blocks);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_send_raw_tx_sync() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Prague.into()));
let (api, handle) = spawn(node_config).await;
let provider = http_provider(&handle.http_endpoint());
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let from = wallets[0].address();
let mut tx = TxEip1559 {
max_fee_per_gas: eip1559_est.max_fee_per_gas,
max_priority_fee_per_gas: eip1559_est.max_priority_fee_per_gas,
gas_limit: 100000,
chain_id: 31337,
to: alloy_primitives::TxKind::Call(from),
input: bytes!("11112222"),
..Default::default()
};
let signature = wallets[1].sign_transaction_sync(&mut tx).unwrap();
let tx = tx.into_signed(signature);
let mut encoded = Vec::new();
tx.eip2718_encode(&mut encoded);
let receipt = api.send_raw_transaction_sync(encoded.into()).await.unwrap();
assert_eq!(receipt.from(), wallets[1].address());
assert_eq!(receipt.to(), tx.to());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_send_tx_sync() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Prague.into()));
let (api, handle) = spawn(node_config).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let logger_bytecode = bytes!("66365f5f37365fa05f5260076019f3");
let from = wallets[0].address();
let tx = TransactionRequest::default()
.with_from(from)
.into_create()
.with_nonce(0)
.with_input(logger_bytecode);
let receipt = api.send_transaction_sync(WithOtherFields::new(tx)).await.unwrap();
assert_eq!(receipt.from(), wallets[0].address());
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "no debug_"]
async fn can_get_code_by_hash() {
let (api, _) =
spawn(NodeConfig::test().with_eth_rpc_url(Some(rpc::next_http_archive_rpc_url()))).await;
// The code hash for DEFAULT_CREATE2_DEPLOYER_RUNTIME_CODE
let code_hash = b256!("2fa86add0aed31f33a762c9d88e807c475bd51d0f52bd0955754b2608f7e4989");
let code = api.debug_code_by_hash(code_hash, None).await.unwrap();
assert_eq!(&code.unwrap(), foundry_evm::constants::DEFAULT_CREATE2_DEPLOYER_RUNTIME_CODE);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fill_transaction_fills_chain_id() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
let tx_req = TransactionRequest::default()
.with_from(from)
.with_to(Address::random())
.with_gas_limit(21_000);
let filled = api.fill_transaction(WithOtherFields::new(tx_req)).await.unwrap();
// Should fill with the chain id from provider
assert!(filled.tx.chain_id().is_some());
assert_eq!(filled.tx.chain_id().unwrap(), CHAIN_ID);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fill_transaction_fills_nonce() {
let (api, handle) = spawn(NodeConfig::test()).await;
let accounts: Vec<_> = handle.dev_wallets().collect();
let signer: EthereumWallet = accounts[0].clone().into();
let from = accounts[0].address();
let to = accounts[1].address();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
// Send a transaction to increment nonce
let tx = TransactionRequest::default().with_from(from).with_to(to).with_value(U256::from(100));
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
// Now the account should have nonce 1
let tx_req = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_value(U256::from(1000))
.with_gas_limit(21_000);
let filled = api.fill_transaction(WithOtherFields::new(tx_req)).await.unwrap();
assert_eq!(filled.tx.nonce(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fill_transaction_preserves_provided_fields() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
let provided_nonce = 100u64;
let provided_gas_limit = 50_000u64;
let tx_req = TransactionRequest::default()
.with_from(from)
.with_to(Address::random())
.with_value(U256::from(1000))
.with_nonce(provided_nonce)
.with_gas_limit(provided_gas_limit);
let filled = api.fill_transaction(WithOtherFields::new(tx_req)).await.unwrap();
// Should preserve the provided nonce and gas limit
assert_eq!(filled.tx.nonce(), provided_nonce);
assert_eq!(filled.tx.gas_limit(), provided_gas_limit);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fill_transaction_fills_all_missing_fields() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
// Create a simple transfer transaction with minimal fields
let tx_req = TransactionRequest::default().with_from(from).with_to(Address::random());
let filled = api.fill_transaction(WithOtherFields::new(tx_req)).await.unwrap();
// Should fill all required fields and be EIP-1559
assert!(filled.tx.is_eip1559());
assert!(filled.tx.gas_limit() > 0);
assert!(filled.tx.max_fee_per_gas() > 0);
assert!(filled.tx.max_priority_fee_per_gas().is_some());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fill_transaction_eip4844_blob_fee() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (api, handle) = spawn(node_config).await;
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
let mut builder = SidecarBuilder::<SimpleCoder>::new();
builder.ingest(b"dummy blob");
let sidecar = builder.build().unwrap();
// EIP-4844 blob transaction with sidecar but no blob fee
let mut tx_req = TransactionRequest::default().with_from(from).with_to(Address::random());
tx_req.sidecar = Some(sidecar.into());
tx_req.transaction_type = Some(3); // EIP-4844
let filled = api.fill_transaction(WithOtherFields::new(tx_req)).await.unwrap();
// Blob transaction should have max_fee_per_blob_gas filled
assert!(
filled.tx.max_fee_per_blob_gas().is_some(),
"max_fee_per_blob_gas should be filled for blob tx"
);
assert!(filled.tx.blob_versioned_hashes().is_some(), "blob_versioned_hashes should be present");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fill_transaction_eip4844_preserves_blob_fee() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (api, handle) = spawn(node_config).await;
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
let provided_blob_fee = 5_000_000u128;
let mut builder = SidecarBuilder::<SimpleCoder>::new();
builder.ingest(b"dummy blob");
let sidecar = builder.build().unwrap();
// EIP-4844 blob transaction with blob fee already set
let mut tx_req = TransactionRequest::default()
.with_from(from)
.with_to(Address::random())
.with_max_fee_per_blob_gas(provided_blob_fee);
tx_req.sidecar = Some(sidecar.into());
tx_req.transaction_type = Some(3); // EIP-4844
let filled = api.fill_transaction(WithOtherFields::new(tx_req)).await.unwrap();
// Should preserve the provided blob fee
assert_eq!(
filled.tx.max_fee_per_blob_gas(),
Some(provided_blob_fee),
"should preserve provided max_fee_per_blob_gas"
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fill_transaction_non_blob_tx_no_blob_fee() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
// EIP-1559 transaction without blob fields
let mut tx_req = TransactionRequest::default().with_from(from).with_to(Address::random());
tx_req.transaction_type = Some(2); // EIP-1559
let filled = api.fill_transaction(WithOtherFields::new(tx_req)).await.unwrap();
// Non-blob transaction should NOT have blob fee filled
assert!(
filled.tx.max_fee_per_blob_gas().is_none(),
"max_fee_per_blob_gas should not be set for non-blob tx"
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fill_transaction_reverts_on_gas_estimation_failure() {
let (api, handle) = spawn(NodeConfig::test()).await;
let accounts: Vec<_> = handle.dev_wallets().collect();
let signer: EthereumWallet = accounts[0].clone().into();
let from = accounts[0].address();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
// Deploy VendingMachine contract
let contract = VendingMachine::deploy(&provider).await.unwrap();
let contract_address = *contract.address();
// Call buy function with insufficient ether
let tx_req = TransactionRequest::default()
.with_from(from)
.with_to(contract_address)
.with_input(VendingMachine::buyCall { amount: U256::from(10) }.abi_encode());
// fill_transaction should fail because gas estimation fails due to revert
let result = api.fill_transaction(WithOtherFields::new(tx_req)).await;
assert!(result.is_err(), "fill_transaction should return an error when gas estimation fails");
let error_message = result.unwrap_err().to_string();
assert!(
error_message.contains("execution reverted"),
"Error should indicate a revert, got: {error_message}"
);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/optimism.rs | crates/anvil/tests/it/optimism.rs | //! Tests for OP chain support.
use crate::utils::{http_provider, http_provider_with_signer};
use alloy_eips::eip2718::Encodable2718;
use alloy_network::{EthereumWallet, TransactionBuilder};
use alloy_primitives::{Address, Bloom, TxHash, TxKind, U256, b256};
use alloy_provider::Provider;
use alloy_rpc_types::{BlockId, TransactionRequest};
use alloy_serde::WithOtherFields;
use anvil::{NodeConfig, eth::fees::INITIAL_BASE_FEE, spawn};
use foundry_evm_networks::NetworkConfigs;
use op_alloy_consensus::TxDeposit;
use op_alloy_rpc_types::OpTransactionFields;
use serde_json::{Value, json};
#[tokio::test(flavor = "multi_thread")]
async fn test_deposits_not_supported_if_optimism_disabled() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let accounts: Vec<_> = handle.dev_wallets().collect();
let from = accounts[0].address();
let to = accounts[1].address();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_value(U256::from(1234))
.with_gas_limit(21000);
let op_fields = OpTransactionFields {
source_hash: Some(b256!(
"0x0000000000000000000000000000000000000000000000000000000000000000"
)),
mint: Some(0),
is_system_tx: Some(true),
deposit_receipt_version: None,
};
let other = serde_json::to_value(op_fields).unwrap().try_into().unwrap();
let tx = WithOtherFields { inner: tx, other };
let err = provider.send_transaction(tx).await.unwrap_err();
let s = err.to_string();
assert!(s.contains("op-stack deposit tx received but is not supported"), "{s:?}");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_send_value_deposit_transaction() {
// enable the Optimism flag
let (api, handle) =
spawn(NodeConfig::test().with_networks(NetworkConfigs::with_optimism())).await;
let accounts: Vec<_> = handle.dev_wallets().collect();
let signer: EthereumWallet = accounts[0].clone().into();
let from = accounts[0].address();
let to = accounts[1].address();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let send_value = U256::from(1234);
let before_balance_to = provider.get_balance(to).await.unwrap();
let op_fields = OpTransactionFields {
source_hash: Some(b256!(
"0x0000000000000000000000000000000000000000000000000000000000000000"
)),
mint: Some(0),
is_system_tx: Some(true),
deposit_receipt_version: None,
};
let other = serde_json::to_value(op_fields).unwrap().try_into().unwrap();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_value(send_value)
.with_gas_limit(21000);
let tx: WithOtherFields<TransactionRequest> = WithOtherFields { inner: tx, other };
let pending = provider.send_transaction(tx).await.unwrap().register().await.unwrap();
// mine block
api.evm_mine(None).await.unwrap();
let receipt =
provider.get_transaction_receipt(pending.tx_hash().to_owned()).await.unwrap().unwrap();
assert_eq!(receipt.from, from);
assert_eq!(receipt.to, Some(to));
// the recipient should have received the value
let after_balance_to = provider.get_balance(to).await.unwrap();
assert_eq!(after_balance_to, before_balance_to + send_value);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_send_value_raw_deposit_transaction() {
// enable the Optimism flag
let (api, handle) =
spawn(NodeConfig::test().with_networks(NetworkConfigs::with_optimism())).await;
let accounts: Vec<_> = handle.dev_wallets().collect();
let signer: EthereumWallet = accounts[0].clone().into();
let from = accounts[0].address();
let to = accounts[1].address();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer.clone());
let send_value = U256::from(1234);
let before_balance_to = provider.get_balance(to).await.unwrap();
let tx = TransactionRequest::default()
.with_chain_id(31337)
.with_nonce(0)
.with_from(from)
.with_to(to)
.with_value(send_value)
.with_gas_limit(21_000)
.with_max_fee_per_gas(20_000_000_000)
.with_max_priority_fee_per_gas(1_000_000_000);
let op_fields = OpTransactionFields {
source_hash: Some(b256!(
"0x0000000000000000000000000000000000000000000000000000000000000000"
)),
mint: Some(0),
is_system_tx: Some(true),
deposit_receipt_version: None,
};
let other = serde_json::to_value(op_fields).unwrap().try_into().unwrap();
let tx = WithOtherFields { inner: tx, other };
let tx_envelope = tx.build(&signer).await.unwrap();
let mut tx_buffer = Vec::with_capacity(tx_envelope.encode_2718_len());
tx_envelope.encode_2718(&mut tx_buffer);
let tx_encoded = tx_buffer.as_slice();
let pending =
provider.send_raw_transaction(tx_encoded).await.unwrap().register().await.unwrap();
// mine block
api.evm_mine(None).await.unwrap();
let receipt =
provider.get_transaction_receipt(pending.tx_hash().to_owned()).await.unwrap().unwrap();
assert_eq!(receipt.from, from);
assert_eq!(receipt.to, Some(to));
// the recipient should have received the value
let after_balance_to = provider.get_balance(to).await.unwrap();
assert_eq!(after_balance_to, before_balance_to + send_value);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_deposit_transaction_hash_matches_sepolia() {
// enable the Optimism flag
let (_api, handle) =
spawn(NodeConfig::test().with_networks(NetworkConfigs::with_optimism())).await;
let accounts: Vec<_> = handle.dev_wallets().collect();
let signer: EthereumWallet = accounts[0].clone().into();
// https://sepolia-optimism.etherscan.io/tx/0xbf8b5f08c43e4b860715cd64fc0849bbce0d0ea20a76b269e7bc8886d112fca7
let tx_hash: TxHash = "0xbf8b5f08c43e4b860715cd64fc0849bbce0d0ea20a76b269e7bc8886d112fca7"
.parse::<TxHash>()
.unwrap();
// https://sepolia-optimism.etherscan.io/getRawTx?tx=0xbf8b5f08c43e4b860715cd64fc0849bbce0d0ea20a76b269e7bc8886d112fca7
let raw_deposit_tx = alloy_primitives::hex::decode(
"7ef861a0dfd7ae78bf3c414cfaa77f13c0205c82eb9365e217b2daa3448c3156b69b27ac94778f2146f48179643473b82931c4cd7b8f153efd94778f2146f48179643473b82931c4cd7b8f153efd872386f26fc10000872386f26fc10000830186a08080",
)
.unwrap();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer.clone());
let receipt = provider
.send_raw_transaction(raw_deposit_tx.as_slice())
.await
.unwrap()
.get_receipt()
.await
.unwrap();
assert_eq!(receipt.transaction_hash, tx_hash);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_deposit_tx_checks_sufficient_funds_after_applying_deposited_value() {
// enable the Optimism flag
let (_api, handle) =
spawn(NodeConfig::test().with_networks(NetworkConfigs::with_optimism())).await;
let provider = http_provider(&handle.http_endpoint());
let sender = Address::random();
let recipient = Address::random();
let send_value = 1_000_000_000_u128;
let sender_prev_balance = provider.get_balance(sender).await.unwrap();
assert_eq!(sender_prev_balance, U256::from(0));
let recipient_prev_balance = provider.get_balance(recipient).await.unwrap();
assert_eq!(recipient_prev_balance, U256::from(0));
let deposit_tx = TxDeposit {
source_hash: b256!("0x0000000000000000000000000000000000000000000000000000000000000000"),
from: sender,
to: TxKind::Call(recipient),
mint: send_value,
value: U256::from(send_value),
gas_limit: 21_000,
is_system_transaction: false,
input: Vec::new().into(),
};
let mut tx_buffer = Vec::new();
deposit_tx.encode_2718(&mut tx_buffer);
provider.send_raw_transaction(&tx_buffer).await.unwrap().get_receipt().await.unwrap();
let sender_new_balance = provider.get_balance(sender).await.unwrap();
// sender should've sent the entire deposited value to recipient
assert_eq!(sender_new_balance, U256::from(0));
let recipient_new_balance = provider.get_balance(recipient).await.unwrap();
// recipient should've received the entire deposited value
assert_eq!(recipient_new_balance, U256::from(send_value));
}
#[test]
fn preserves_op_fields_in_convert_to_anvil_receipt() {
let receipt_json = json!({
"status": "0x1",
"cumulativeGasUsed": "0x74e483",
"logs": [],
"logsBloom": Bloom::default(),
"type": "0x2",
"transactionHash": "0x91181b0dca3b29aa136eeb2f536be5ce7b0aebc949be1c44b5509093c516097d",
"transactionIndex": "0x10",
"blockHash": "0x54bafb12e8cea9bb355fbf03a4ac49e42a2a1a80fa6cf4364b342e2de6432b5d",
"blockNumber": "0x7b1ab93",
"gasUsed": "0xc222",
"effectiveGasPrice": "0x18961",
"from": "0x2d815240a61731c75fa01b2793e1d3ed09f289d0",
"to": "0x4200000000000000000000000000000000000000",
"contractAddress": Value::Null,
"l1BaseFeeScalar": "0x146b",
"l1BlobBaseFee": "0x6a83078",
"l1BlobBaseFeeScalar": "0xf79c5",
"l1Fee": "0x51a9af7fd3",
"l1GasPrice": "0x972fe4acc",
"l1GasUsed": "0x640",
});
let receipt: alloy_network::AnyTransactionReceipt =
serde_json::from_value(receipt_json).expect("valid receipt json");
let converted =
foundry_primitives::FoundryTxReceipt::try_from(receipt).expect("conversion should succeed");
let converted_json = serde_json::to_value(&converted).expect("serialize to json");
for (key, expected) in [
("l1Fee", "0x51a9af7fd3"),
("l1GasPrice", "0x972fe4acc"),
("l1GasUsed", "0x640"),
("l1BaseFeeScalar", "0x146b"),
("l1BlobBaseFee", "0x6a83078"),
("l1BlobBaseFeeScalar", "0xf79c5"),
] {
let got = converted_json.get(key).and_then(Value::as_str);
assert_eq!(got, Some(expected), "field `{key}` mismatch");
}
}
const GAS_TRANSFER: u64 = 21_000;
/// Test that Optimism uses Canyon base fee params instead of Ethereum params.
///
/// Optimism Canyon uses different EIP-1559 parameters:
/// - elasticity_multiplier: 6 (vs Ethereum's 2)
/// - base_fee_max_change_denominator: 250 (vs Ethereum's 8)
///
/// This means with a full block:
/// - Ethereum: base_fee increases by base_fee * 1 / 8 = 12.5%
/// - Optimism: base_fee increases by base_fee * 5 / 250 = 2%
#[tokio::test(flavor = "multi_thread")]
async fn test_optimism_base_fee_params() {
// Spawn an Optimism node with a gas limit equal to one transfer (full block scenario)
let (_api, handle) = spawn(
NodeConfig::test()
.with_networks(NetworkConfigs::with_optimism())
.with_base_fee(Some(INITIAL_BASE_FEE))
.with_gas_limit(Some(GAS_TRANSFER)),
)
.await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let tx = TransactionRequest::default().to(Address::random()).with_value(U256::from(1337));
let tx = WithOtherFields::new(tx);
// Send first transaction to fill the block
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let base_fee = provider
.get_block(BlockId::latest())
.await
.unwrap()
.unwrap()
.header
.base_fee_per_gas
.unwrap();
// Send second transaction to fill the next block
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let next_base_fee = provider
.get_block(BlockId::latest())
.await
.unwrap()
.unwrap()
.header
.base_fee_per_gas
.unwrap();
assert!(next_base_fee > base_fee, "base fee should increase with full block");
// Optimism Canyon formula: base_fee * (elasticity - 1) / denominator = base_fee * 5 / 250
// = INITIAL_BASE_FEE * 5 / 250 = 1_000_000_000 * 5 / 250 = 20_000_000
//
// Note: Ethereum would be INITIAL_BASE_FEE + 125_000_000 (12.5% increase)
let expected_op_increase = INITIAL_BASE_FEE * 5 / 250; // 2% increase = 20_000_000
assert_eq!(
next_base_fee,
INITIAL_BASE_FEE + expected_op_increase,
"Optimism should use Canyon base fee params (2% max increase), not Ethereum's (12.5%)"
);
// Explicitly verify it's NOT using Ethereum params (which would give 12.5% increase)
let ethereum_increase = INITIAL_BASE_FEE / 8; // 125_000_000
assert_ne!(
next_base_fee,
INITIAL_BASE_FEE + ethereum_increase,
"Should not be using Ethereum base fee params"
);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/traces.rs | crates/anvil/tests/it/traces.rs | use std::collections::HashMap;
use crate::{
abi::{Multicall, SimpleStorage},
fork::fork_config,
utils::http_provider_with_signer,
};
use alloy_eips::BlockId;
use alloy_network::{EthereumWallet, TransactionBuilder};
use alloy_primitives::{
Address, Bytes, U256,
hex::{self, FromHex},
};
use alloy_provider::{
Provider,
ext::{DebugApi, TraceApi},
};
use alloy_rpc_types::{
TransactionRequest,
state::StateOverride,
trace::{
filter::{TraceFilter, TraceFilterMode},
geth::{
AccountState, CallConfig, GethDebugBuiltInTracerType, GethDebugTracerType,
GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, PreStateConfig,
PreStateFrame,
},
parity::{Action, LocalizedTransactionTrace},
},
};
use alloy_serde::WithOtherFields;
use alloy_sol_types::sol;
use anvil::{NodeConfig, spawn};
use foundry_evm::hardfork::EthereumHardfork;
#[tokio::test(flavor = "multi_thread")]
async fn test_get_transfer_parity_traces() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.ws_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let amount = handle.genesis_balance().checked_div(U256::from(2u64)).unwrap();
// specify the `from` field so that the client knows which account to use
let tx = TransactionRequest::default().to(to).value(amount).from(from);
let tx = WithOtherFields::new(tx);
// broadcast it via the eth_sendTransaction API
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let traces = provider.trace_transaction(tx.transaction_hash).await.unwrap();
assert!(!traces.is_empty());
match traces[0].trace.action {
Action::Call(ref call) => {
assert_eq!(call.from, from);
assert_eq!(call.to, to);
assert_eq!(call.value, amount);
}
_ => unreachable!("unexpected action"),
}
let num = provider.get_block_number().await.unwrap();
let block_traces = provider.trace_block(num.into()).await.unwrap();
assert!(!block_traces.is_empty());
assert_eq!(traces, block_traces);
}
sol!(
#[sol(rpc, bytecode = "0x6080604052348015600f57600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060a48061005e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806375fc8e3c14602d575b600080fd5b60336035565b005b60008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16fffea26469706673582212205006867290df97c54f2df1cb94fc081197ab670e2adf5353071d2ecce1d694b864736f6c634300080d0033")]
contract SuicideContract {
address payable private owner;
constructor() public {
owner = payable(msg.sender);
}
function goodbye() public {
selfdestruct(owner);
}
}
);
#[tokio::test(flavor = "multi_thread")]
async fn test_parity_suicide_trace() {
let (_api, handle) =
spawn(NodeConfig::test().with_hardfork(Some(EthereumHardfork::Shanghai.into()))).await;
let provider = handle.ws_provider();
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let owner = wallets[0].address();
let destructor = wallets[1].address();
let contract_addr =
SuicideContract::deploy_builder(provider.clone()).from(owner).deploy().await.unwrap();
let contract = SuicideContract::new(contract_addr, provider.clone());
let call = contract.goodbye().from(destructor);
let call = call.send().await.unwrap();
let tx = call.get_receipt().await.unwrap();
let traces = handle.http_provider().trace_transaction(tx.transaction_hash).await.unwrap();
assert!(!traces.is_empty());
assert!(traces[1].trace.action.is_selfdestruct());
}
sol!(
#[sol(rpc, bytecode = "0x6080604052348015600f57600080fd5b50336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555060a48061005e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806375fc8e3c14602d575b600080fd5b60336035565b005b60008054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16fffea26469706673582212205006867290df97c54f2df1cb94fc081197ab670e2adf5353071d2ecce1d694b864736f6c634300080d0033")]
contract DebugTraceContract {
address payable private owner;
constructor() public {
owner = payable(msg.sender);
}
function goodbye() public {
selfdestruct(owner);
}
}
);
#[tokio::test(flavor = "multi_thread")]
async fn test_transfer_debug_trace_call() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let deployer: EthereumWallet = wallets[0].clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), deployer);
let contract_addr = DebugTraceContract::deploy_builder(provider.clone())
.from(wallets[0].clone().address())
.deploy()
.await
.unwrap();
let caller: EthereumWallet = wallets[1].clone().into();
let caller_provider = http_provider_with_signer(&handle.http_endpoint(), caller);
let contract = DebugTraceContract::new(contract_addr, caller_provider);
let call = contract.goodbye().from(wallets[1].address());
let calldata = call.calldata().to_owned();
let tx = TransactionRequest::default()
.from(wallets[1].address())
.to(*contract.address())
.with_input(calldata);
let traces = handle
.http_provider()
.debug_trace_call(
WithOtherFields::new(tx),
BlockId::latest(),
GethDebugTracingCallOptions::default(),
)
.await
.unwrap();
match traces {
GethTrace::Default(default_frame) => {
assert!(!default_frame.failed);
}
_ => {
unreachable!()
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_tracer_debug_trace_call() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let deployer: EthereumWallet = wallets[0].clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), deployer);
let multicall_contract = Multicall::deploy(&provider).await.unwrap();
let simple_storage_contract =
SimpleStorage::deploy(&provider, "init value".to_string()).await.unwrap();
let set_value = simple_storage_contract.setValue("bar".to_string());
let set_value_calldata = set_value.calldata();
let internal_call_tx_builder = multicall_contract.aggregate(vec![Multicall::Call {
target: *simple_storage_contract.address(),
callData: set_value_calldata.to_owned(),
}]);
let internal_call_tx_calldata = internal_call_tx_builder.calldata().to_owned();
// calling SimpleStorage contract through Multicall should result in an internal call
let internal_call_tx = TransactionRequest::default()
.from(wallets[1].address())
.to(*multicall_contract.address())
.with_input(internal_call_tx_calldata);
let internal_call_tx_traces = handle
.http_provider()
.debug_trace_call(
WithOtherFields::new(internal_call_tx.clone()),
BlockId::latest(),
GethDebugTracingCallOptions::default().with_tracing_options(
GethDebugTracingOptions::default()
.with_tracer(GethDebugTracerType::from(GethDebugBuiltInTracerType::CallTracer))
.with_call_config(CallConfig::default().with_log()),
),
)
.await
.unwrap();
match internal_call_tx_traces {
GethTrace::CallTracer(call_frame) => {
assert!(call_frame.calls.len() == 1);
assert!(
call_frame.calls.first().unwrap().to.unwrap() == *simple_storage_contract.address()
);
assert!(call_frame.calls.first().unwrap().logs.len() == 1);
}
_ => {
unreachable!()
}
}
// only_top_call option - should not return any internal calls
let internal_call_only_top_call_tx_traces = handle
.http_provider()
.debug_trace_call(
WithOtherFields::new(internal_call_tx.clone()),
BlockId::latest(),
GethDebugTracingCallOptions::default().with_tracing_options(
GethDebugTracingOptions::default()
.with_tracer(GethDebugTracerType::from(GethDebugBuiltInTracerType::CallTracer))
.with_call_config(CallConfig::default().with_log().only_top_call()),
),
)
.await
.unwrap();
match internal_call_only_top_call_tx_traces {
GethTrace::CallTracer(call_frame) => {
assert!(call_frame.calls.is_empty());
}
_ => {
unreachable!()
}
}
// directly calling the SimpleStorage contract should not result in any internal calls
let direct_call_tx = TransactionRequest::default()
.from(wallets[1].address())
.to(*simple_storage_contract.address())
.with_input(set_value_calldata.to_owned());
let direct_call_tx_traces = handle
.http_provider()
.debug_trace_call(
WithOtherFields::new(direct_call_tx),
BlockId::latest(),
GethDebugTracingCallOptions::default().with_tracing_options(
GethDebugTracingOptions::default()
.with_tracer(GethDebugTracerType::from(GethDebugBuiltInTracerType::CallTracer))
.with_call_config(CallConfig::default().with_log()),
),
)
.await
.unwrap();
match direct_call_tx_traces {
GethTrace::CallTracer(call_frame) => {
assert!(call_frame.calls.is_empty());
assert!(call_frame.to.unwrap() == *simple_storage_contract.address());
assert!(call_frame.logs.len() == 1);
}
_ => {
unreachable!()
}
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_debug_trace_call_state_override() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let tx = TransactionRequest::default()
.from(wallets[1].address())
.to("0x1234567890123456789012345678901234567890".parse().unwrap());
let override_json = r#"{
"0x1234567890123456789012345678901234567890": {
"balance": "0x01",
"code": "0x30315f5260205ff3"
}
}"#;
let state_override: StateOverride = serde_json::from_str(override_json).unwrap();
let tx_traces = handle
.http_provider()
.debug_trace_call(
WithOtherFields::new(tx.clone()),
BlockId::latest(),
GethDebugTracingCallOptions::default()
.with_tracing_options(GethDebugTracingOptions::default())
.with_state_overrides(state_override),
)
.await
.unwrap();
match tx_traces {
GethTrace::Default(trace_res) => {
assert_eq!(
trace_res.return_value,
Bytes::from_hex("0000000000000000000000000000000000000000000000000000000000000001")
.unwrap()
);
}
_ => {
unreachable!()
}
}
}
// <https://github.com/foundry-rs/foundry/issues/2656>
#[tokio::test(flavor = "multi_thread")]
async fn test_trace_address_fork() {
let (api, handle) = spawn(fork_config().with_fork_block_number(Some(15291050u64))).await;
let provider = handle.http_provider();
let input = hex::decode("43bcfab60000000000000000000000006b175474e89094c44da98b954eedeac495271d0f0000000000000000000000000000000000000000000000e0bd811c8769a824b00000000000000000000000000000000000000000000000e0ae9925047d8440b60000000000000000000000002e4777139254ff76db957e284b186a4507ff8c67").unwrap();
let from: Address = "0x2e4777139254ff76db957e284b186a4507ff8c67".parse().unwrap();
let to: Address = "0xe2f2a5c287993345a840db3b0845fbc70f5935a5".parse().unwrap();
let tx = TransactionRequest::default()
.to(to)
.from(from)
.with_input::<Bytes>(input.into())
.with_gas_limit(300_000);
let tx = WithOtherFields::new(tx);
api.anvil_impersonate_account(from).await.unwrap();
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let traces = provider.trace_transaction(tx.transaction_hash).await.unwrap();
assert!(!traces.is_empty());
match traces[0].trace.action {
Action::Call(ref call) => {
assert_eq!(call.from, from);
assert_eq!(call.to, to);
}
_ => unreachable!("unexpected action"),
}
let json = serde_json::json!([
{
"action": {
"callType": "call",
"from": "0x2e4777139254ff76db957e284b186a4507ff8c67",
"gas": "0x262b3",
"input": "0x43bcfab60000000000000000000000006b175474e89094c44da98b954eedeac495271d0f0000000000000000000000000000000000000000000000e0bd811c8769a824b00000000000000000000000000000000000000000000000e0ae9925047d8440b60000000000000000000000002e4777139254ff76db957e284b186a4507ff8c67",
"to": "0xe2f2a5c287993345a840db3b0845fbc70f5935a5",
"value": "0x0"
},
"blockHash": "0xa47c8f1d8c284cb614e9c8e10d260b33eae16b1957a83141191bc335838d7e29",
"blockNumber": 15291051,
"result": {
"gasUsed": "0x2131b",
"output": "0x0000000000000000000000000000000000000000000000e0e82ca52ec6e6a4d3"
},
"subtraces": 1,
"traceAddress": [],
"transactionHash": "0x3255cce7312e9c4470e1a1883be13718e971f6faafb96199b8bd75e5b7c39e3a",
"transactionPosition": 19,
"type": "call"
},
{
"action": {
"callType": "delegatecall",
"from": "0xe2f2a5c287993345a840db3b0845fbc70f5935a5",
"gas": "0x23d88",
"input": "0x43bcfab60000000000000000000000006b175474e89094c44da98b954eedeac495271d0f0000000000000000000000000000000000000000000000e0bd811c8769a824b00000000000000000000000000000000000000000000000e0ae9925047d8440b60000000000000000000000002e4777139254ff76db957e284b186a4507ff8c67",
"to": "0x15b2838cd28cc353afbe59385db3f366d8945aee",
"value": "0x0"
},
"blockHash": "0xa47c8f1d8c284cb614e9c8e10d260b33eae16b1957a83141191bc335838d7e29",
"blockNumber": 15291051,
"result": {
"gasUsed": "0x1f6e1",
"output": "0x0000000000000000000000000000000000000000000000e0e82ca52ec6e6a4d3"
},
"subtraces": 2,
"traceAddress": [0],
"transactionHash": "0x3255cce7312e9c4470e1a1883be13718e971f6faafb96199b8bd75e5b7c39e3a",
"transactionPosition": 19,
"type": "call"
},
{
"action": {
"callType": "staticcall",
"from": "0xe2f2a5c287993345a840db3b0845fbc70f5935a5",
"gas": "0x192ed",
"input": "0x50494dc000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000e0b1ff65617f654b2f00000000000000000000000000000000000000000000000000000000000061a800000000000000000000000000000000000000000000000000b1a2bc2ec5000000000000000000000000000000000000000000000000000006f05b59d3b2000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000005f5e1000000000000000000000000000000000000000000000414ec22973db48fd3a3370000000000000000000000000000000000000000000000056bc75e2d6310000000000000000000000000000000000000000000000000000000000a7314a9ba5c0000000000000000000000000000000000000000000000000000000005f5e100000000000000000000000000000000000000000000095f783edc5a5dabcb4ba70000000000000000000000000000000000000000000000056bc75e2d6310000000000000000000000000000000000000000000000000000000000a3f42df4dab",
"to": "0xca480d596e6717c95a62a4dc1bd4fbd7b7e7d705",
"value": "0x0"
},
"blockHash": "0xa47c8f1d8c284cb614e9c8e10d260b33eae16b1957a83141191bc335838d7e29",
"blockNumber": 15291051,
"result": {
"gasUsed": "0x661a",
"output": "0x0000000000000000000000000000000000000000000000e0e82ca52ec6e6a4d3"
},
"subtraces": 0,
"traceAddress": [0, 0],
"transactionHash": "0x3255cce7312e9c4470e1a1883be13718e971f6faafb96199b8bd75e5b7c39e3a",
"transactionPosition": 19,
"type": "call"
},
{
"action": {
"callType": "delegatecall",
"from": "0xe2f2a5c287993345a840db3b0845fbc70f5935a5",
"gas": "0xd2dc",
"input": "0x4e331a540000000000000000000000000000000000000000000000e0e82ca52ec6e6a4d30000000000000000000000006b175474e89094c44da98b954eedeac495271d0f000000000000000000000000a2a3cae63476891ab2d640d9a5a800755ee79d6e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000005f5e100000000000000000000000000000000000000000000095f783edc5a5dabcb4ba70000000000000000000000002e4777139254ff76db957e284b186a4507ff8c6700000000000000000000000000000000000000000000f7be2b91f8a2e2df496e",
"to": "0x1e91f826fa8aa4fa4d3f595898af3a64dd188848",
"value": "0x0"
},
"blockHash": "0xa47c8f1d8c284cb614e9c8e10d260b33eae16b1957a83141191bc335838d7e29",
"blockNumber": 15291051,
"result": {
"gasUsed": "0x7617",
"output": "0x"
},
"subtraces": 2,
"traceAddress": [0, 1],
"transactionHash": "0x3255cce7312e9c4470e1a1883be13718e971f6faafb96199b8bd75e5b7c39e3a",
"transactionPosition": 19,
"type": "call"
},
{
"action": {
"callType": "staticcall",
"from": "0xe2f2a5c287993345a840db3b0845fbc70f5935a5",
"gas": "0xbf50",
"input": "0x70a08231000000000000000000000000a2a3cae63476891ab2d640d9a5a800755ee79d6e",
"to": "0x6b175474e89094c44da98b954eedeac495271d0f",
"value": "0x0"
},
"blockHash": "0xa47c8f1d8c284cb614e9c8e10d260b33eae16b1957a83141191bc335838d7e29",
"blockNumber": 15291051,
"result": {
"gasUsed": "0xa2a",
"output": "0x0000000000000000000000000000000000000000000020fe99f8898600d94750"
},
"subtraces": 0,
"traceAddress": [0, 1, 0],
"transactionHash": "0x3255cce7312e9c4470e1a1883be13718e971f6faafb96199b8bd75e5b7c39e3a",
"transactionPosition": 19,
"type": "call"
},
{
"action": {
"callType": "call",
"from": "0xe2f2a5c287993345a840db3b0845fbc70f5935a5",
"gas": "0xa92a",
"input": "0xa4e285950000000000000000000000002e4777139254ff76db957e284b186a4507ff8c670000000000000000000000006b175474e89094c44da98b954eedeac495271d0f0000000000000000000000000000000000000000000000e0e82ca52ec6e6a4d3",
"to": "0xa2a3cae63476891ab2d640d9a5a800755ee79d6e",
"value": "0x0"
},
"blockHash": "0xa47c8f1d8c284cb614e9c8e10d260b33eae16b1957a83141191bc335838d7e29",
"blockNumber": 15291051,
"result": {
"gasUsed": "0x4ed3",
"output": "0x"
},
"subtraces": 1,
"traceAddress": [0, 1, 1],
"transactionHash": "0x3255cce7312e9c4470e1a1883be13718e971f6faafb96199b8bd75e5b7c39e3a",
"transactionPosition": 19,
"type": "call"
},
{
"action": {
"callType": "call",
"from": "0xa2a3cae63476891ab2d640d9a5a800755ee79d6e",
"gas": "0x8c90",
"input": "0xa9059cbb0000000000000000000000002e4777139254ff76db957e284b186a4507ff8c670000000000000000000000000000000000000000000000e0e82ca52ec6e6a4d3",
"to": "0x6b175474e89094c44da98b954eedeac495271d0f",
"value": "0x0"
},
"blockHash": "0xa47c8f1d8c284cb614e9c8e10d260b33eae16b1957a83141191bc335838d7e29",
"blockNumber": 15291051,
"result": {
"gasUsed": "0x2b42",
"output": "0x0000000000000000000000000000000000000000000000000000000000000001"
},
"subtraces": 0,
"traceAddress": [0, 1, 1, 0],
"transactionHash": "0x3255cce7312e9c4470e1a1883be13718e971f6faafb96199b8bd75e5b7c39e3a",
"transactionPosition": 19,
"type": "call"
}
]);
let expected_traces: Vec<LocalizedTransactionTrace> = serde_json::from_value(json).unwrap();
// test matching traceAddress
traces.into_iter().zip(expected_traces).for_each(|(a, b)| {
assert_eq!(a.trace.trace_address, b.trace.trace_address);
assert_eq!(a.trace.subtraces, b.trace.subtraces);
match (a.trace.action, b.trace.action) {
(Action::Call(a), Action::Call(b)) => {
assert_eq!(a.from, b.from);
assert_eq!(a.to, b.to);
}
_ => unreachable!("unexpected action"),
}
})
}
// <https://github.com/foundry-rs/foundry/issues/2705>
// <https://etherscan.io/tx/0x2d951c5c95d374263ca99ad9c20c9797fc714330a8037429a3aa4c83d456f845>
#[tokio::test(flavor = "multi_thread")]
async fn test_trace_address_fork2() {
let (api, handle) = spawn(fork_config().with_fork_block_number(Some(15314401u64))).await;
let provider = handle.http_provider();
let input = hex::decode("30000003000000000000000000000000adda1059a6c6c102b0fa562b9bb2cb9a0de5b1f4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a300000004fffffffffffffffffffffffffffffffffffffffffffff679dc91ecfe150fb980c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f4d2888d29d722226fafa5d9b24f9164c092421e000bb8000000000000004319b52bf08b65295d49117e790000000000000000000000000000000000000000000000008b6d9e8818d6141f000000000000000000000000000000000000000000000000000000086a23af210000000000000000000000000000000000000000000000000000000000").unwrap();
let from: Address = "0xa009fa1ac416ec02f6f902a3a4a584b092ae6123".parse().unwrap();
let to: Address = "0x99999999d116ffa7d76590de2f427d8e15aeb0b8".parse().unwrap();
let tx = TransactionRequest::default()
.to(to)
.from(from)
.with_input::<Bytes>(input.into())
.with_gas_limit(350_000);
let tx = WithOtherFields::new(tx);
api.anvil_impersonate_account(from).await.unwrap();
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let status = tx.inner.inner.inner.receipt.status.coerce_status();
assert!(status);
let traces = provider.trace_transaction(tx.transaction_hash).await.unwrap();
assert!(!traces.is_empty());
match traces[0].trace.action {
Action::Call(ref call) => {
assert_eq!(call.from, from);
assert_eq!(call.to, to);
}
_ => unreachable!("unexpected action"),
}
let json = serde_json::json!([
{
"action": {
"from": "0xa009fa1ac416ec02f6f902a3a4a584b092ae6123",
"callType": "call",
"gas": "0x4fabc",
"input": "0x30000003000000000000000000000000adda1059a6c6c102b0fa562b9bb2cb9a0de5b1f4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a300000004fffffffffffffffffffffffffffffffffffffffffffff679dc91ecfe150fb980c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f4d2888d29d722226fafa5d9b24f9164c092421e000bb8000000000000004319b52bf08b65295d49117e790000000000000000000000000000000000000000000000008b6d9e8818d6141f000000000000000000000000000000000000000000000000000000086a23af210000000000000000000000000000000000000000000000000000000000",
"to": "0x99999999d116ffa7d76590de2f427d8e15aeb0b8",
"value": "0x0"
},
"blockHash": "0xf689ba7749648b8c5c8f5eedd73001033f0aed7ea50b7c81048ad1533b8d3d73",
"blockNumber": 15314402,
"result": {
"gasUsed": "0x1d51b",
"output": "0x"
},
"subtraces": 1,
"traceAddress": [],
"transactionHash": "0x2d951c5c95d374263ca99ad9c20c9797fc714330a8037429a3aa4c83d456f845",
"transactionPosition": 289,
"type": "call"
},
{
"action": {
"from": "0x99999999d116ffa7d76590de2f427d8e15aeb0b8",
"callType": "delegatecall",
"gas": "0x4d594",
"input": "0x00000004fffffffffffffffffffffffffffffffffffffffffffff679dc91ecfe150fb980c02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f4d2888d29d722226fafa5d9b24f9164c092421e000bb8000000000000004319b52bf08b65295d49117e790000000000000000000000000000000000000000000000008b6d9e8818d6141f000000000000000000000000000000000000000000000000000000086a23af21",
"to": "0xadda1059a6c6c102b0fa562b9bb2cb9a0de5b1f4",
"value": "0x0"
},
"blockHash": "0xf689ba7749648b8c5c8f5eedd73001033f0aed7ea50b7c81048ad1533b8d3d73",
"blockNumber": 15314402,
"result": {
"gasUsed": "0x1c35f",
"output": "0x"
},
"subtraces": 3,
"traceAddress": [0],
"transactionHash": "0x2d951c5c95d374263ca99ad9c20c9797fc714330a8037429a3aa4c83d456f845",
"transactionPosition": 289,
"type": "call"
},
{
"action": {
"from": "0x99999999d116ffa7d76590de2f427d8e15aeb0b8",
"callType": "call",
"gas": "0x4b6d6",
"input": "0x16b2da82000000000000000000000000000000000000000000000000000000086a23af21",
"to": "0xd1663cfb8ceaf22039ebb98914a8c98264643710",
"value": "0x0"
},
"blockHash": "0xf689ba7749648b8c5c8f5eedd73001033f0aed7ea50b7c81048ad1533b8d3d73",
"blockNumber": 15314402,
"result": {
"gasUsed": "0xd6d",
"output": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"subtraces": 0,
"traceAddress": [0, 0],
"transactionHash": "0x2d951c5c95d374263ca99ad9c20c9797fc714330a8037429a3aa4c83d456f845",
"transactionPosition": 289,
"type": "call"
},
{
"action": {
"from": "0x99999999d116ffa7d76590de2f427d8e15aeb0b8",
"callType": "staticcall",
"gas": "0x49c35",
"input": "0x3850c7bd",
"to": "0x4b5ab61593a2401b1075b90c04cbcdd3f87ce011",
"value": "0x0"
},
"blockHash": "0xf689ba7749648b8c5c8f5eedd73001033f0aed7ea50b7c81048ad1533b8d3d73",
"blockNumber": 15314402,
"result": {
"gasUsed": "0xa88",
"output": "0x000000000000000000000000000000000000004319b52bf08b65295d49117e7900000000000000000000000000000000000000000000000000000000000148a0000000000000000000000000000000000000000000000000000000000000010e000000000000000000000000000000000000000000000000000000000000012c000000000000000000000000000000000000000000000000000000000000012c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"
},
"subtraces": 0,
"traceAddress": [0, 1],
"transactionHash": "0x2d951c5c95d374263ca99ad9c20c9797fc714330a8037429a3aa4c83d456f845",
"transactionPosition": 289,
"type": "call"
},
{
"action": {
"from": "0x99999999d116ffa7d76590de2f427d8e15aeb0b8",
"callType": "call",
"gas": "0x48d01",
"input": "0x128acb0800000000000000000000000099999999d116ffa7d76590de2f427d8e15aeb0b80000000000000000000000000000000000000000000000000000000000000001fffffffffffffffffffffffffffffffffffffffffffff679dc91ecfe150fb98000000000000000000000000000000000000000000000000000000001000276a400000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000002bc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2f4d2888d29d722226fafa5d9b24f9164c092421e000bb8000000000000000000000000000000000000000000",
"to": "0x4b5ab61593a2401b1075b90c04cbcdd3f87ce011",
"value": "0x0"
},
"blockHash": "0xf689ba7749648b8c5c8f5eedd73001033f0aed7ea50b7c81048ad1533b8d3d73",
"blockNumber": 15314402,
"result": {
"gasUsed": "0x18c20",
"output": "0x0000000000000000000000000000000000000000000000008b5116525f9edc3efffffffffffffffffffffffffffffffffffffffffffff679dc91ecfe150fb980"
},
"subtraces": 4,
"traceAddress": [0, 2],
"transactionHash": "0x2d951c5c95d374263ca99ad9c20c9797fc714330a8037429a3aa4c83d456f845",
"transactionPosition": 289,
"type": "call"
},
{
"action": {
"from": "0x4b5ab61593a2401b1075b90c04cbcdd3f87ce011",
"callType": "call",
"gas": "0x3802a",
"input": "0xa9059cbb00000000000000000000000099999999d116ffa7d76590de2f427d8e15aeb0b8000000000000000000000000000000000000000000000986236e1301eaf04680",
"to": "0xf4d2888d29d722226fafa5d9b24f9164c092421e",
"value": "0x0"
},
"blockHash": "0xf689ba7749648b8c5c8f5eedd73001033f0aed7ea50b7c81048ad1533b8d3d73",
"blockNumber": 15314402,
"result": {
"gasUsed": "0x31b6",
"output": "0x0000000000000000000000000000000000000000000000000000000000000001"
},
"subtraces": 0,
"traceAddress": [0, 2, 0],
"transactionHash": "0x2d951c5c95d374263ca99ad9c20c9797fc714330a8037429a3aa4c83d456f845",
"transactionPosition": 289,
"type": "call"
},
{
"action": {
"from": "0x4b5ab61593a2401b1075b90c04cbcdd3f87ce011",
"callType": "staticcall",
"gas": "0x34237",
"input": "0x70a082310000000000000000000000004b5ab61593a2401b1075b90c04cbcdd3f87ce011",
"to": "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
"value": "0x0"
},
"blockHash": "0xf689ba7749648b8c5c8f5eedd73001033f0aed7ea50b7c81048ad1533b8d3d73",
"blockNumber": 15314402,
"result": {
"gasUsed": "0x9e6",
"output": "0x000000000000000000000000000000000000000000000091cda6c1ce33e53b89"
},
"subtraces": 0,
"traceAddress": [0, 2, 1],
"transactionHash": "0x2d951c5c95d374263ca99ad9c20c9797fc714330a8037429a3aa4c83d456f845",
"transactionPosition": 289,
"type": "call"
},
{
"action": {
"from": "0x4b5ab61593a2401b1075b90c04cbcdd3f87ce011",
"callType": "call",
"gas": "0x3357e",
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/wsapi.rs | crates/anvil/tests/it/wsapi.rs | //! general eth api tests with websocket provider
use alloy_primitives::U256;
use alloy_provider::Provider;
use anvil::{NodeConfig, spawn};
#[tokio::test(flavor = "multi_thread")]
async fn can_get_block_number_ws() {
let (api, handle) = spawn(NodeConfig::test()).await;
let block_num = api.block_number().unwrap();
assert_eq!(block_num, U256::ZERO);
let provider = handle.ws_provider();
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, block_num.to::<u64>());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_dev_get_balance_ws() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.ws_provider();
let genesis_balance = handle.genesis_balance();
for acc in handle.genesis_accounts() {
let balance = provider.get_balance(acc).await.unwrap();
assert_eq!(balance, genesis_balance);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/eip4844.rs | crates/anvil/tests/it/eip4844.rs | use crate::utils::{http_provider, http_provider_with_signer};
use alloy_consensus::{SidecarBuilder, SimpleCoder, Transaction};
use alloy_eips::{
Typed2718,
eip4844::{BLOB_TX_MIN_BLOB_GASPRICE, DATA_GAS_PER_BLOB, MAX_DATA_GAS_PER_BLOCK_DENCUN},
};
use alloy_network::{EthereumWallet, ReceiptResponse, TransactionBuilder, TransactionBuilder4844};
use alloy_primitives::{Address, U256, b256};
use alloy_provider::{Provider, ProviderBuilder};
use alloy_rpc_types::{BlockId, TransactionRequest};
use alloy_serde::WithOtherFields;
use anvil::{NodeConfig, spawn};
use foundry_evm::hardfork::EthereumHardfork;
use foundry_test_utils::rpc;
#[tokio::test(flavor = "multi_thread")]
async fn can_send_eip4844_transaction() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (_api, handle) = spawn(node_config).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let from = wallets[0].address();
let to = wallets[1].address();
let provider = http_provider(&handle.http_endpoint());
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(b"Hello World");
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_nonce(0)
.with_max_fee_per_blob_gas(gas_price + 1)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_blob_sidecar(sidecar)
.value(U256::from(5));
let mut tx = WithOtherFields::new(tx);
tx.populate_blob_hashes();
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
assert_eq!(receipt.blob_gas_used, Some(131072));
assert_eq!(receipt.blob_gas_price, Some(0x1)); // 1 wei
}
#[tokio::test(flavor = "multi_thread")]
async fn can_send_eip4844_transaction_fork() {
let node_config = NodeConfig::test()
.with_eth_rpc_url(Some(rpc::next_http_archive_rpc_url()))
.with_fork_block_number(Some(23432306u64))
.with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (api, handle) = spawn(node_config).await;
let provider = handle.http_provider();
let accounts = provider.get_accounts().await.unwrap();
let alice = accounts[0];
let bob = accounts[1];
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(b"Blobs are fun!");
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(alice)
.with_to(bob)
.with_blob_sidecar(sidecar.clone());
let pending_tx = provider.send_transaction(tx.into()).await.unwrap();
let receipt = pending_tx.get_receipt().await.unwrap();
let tx_hash = receipt.transaction_hash;
let _blobs = api.anvil_get_blob_by_tx_hash(tx_hash).unwrap().unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn can_send_eip4844_transaction_eth_send_transaction() {
let node_config = NodeConfig::test()
.with_eth_rpc_url(Some(rpc::next_http_archive_rpc_url()))
.with_fork_block_number(Some(23552208u64))
.with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (api, handle) = spawn(node_config).await;
let provider = ProviderBuilder::new().connect(handle.http_endpoint().as_str()).await.unwrap();
let accounts = provider.get_accounts().await.unwrap();
let alice = accounts[0];
let bob = accounts[1];
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(b"Blobs are fun!");
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(alice)
.with_to(bob)
.with_blob_sidecar(sidecar.clone());
let pending_tx = provider.send_transaction(tx).await.unwrap();
let receipt = pending_tx.get_receipt().await.unwrap();
let tx_hash = receipt.transaction_hash;
let _blobs = api.anvil_get_blob_by_tx_hash(tx_hash).unwrap().unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn can_send_multiple_blobs_in_one_tx() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (_api, handle) = spawn(node_config).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let from = wallets[0].address();
let to = wallets[1].address();
let provider = http_provider(&handle.http_endpoint());
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let large_data = vec![1u8; DATA_GAS_PER_BLOB as usize * 5]; // 131072 is DATA_GAS_PER_BLOB and also BYTE_PER_BLOB
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(&large_data);
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_nonce(0)
.with_max_fee_per_blob_gas(gas_price + 1)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_blob_sidecar(sidecar);
let mut tx = WithOtherFields::new(tx);
tx.populate_blob_hashes();
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
assert_eq!(receipt.blob_gas_used, Some(MAX_DATA_GAS_PER_BLOCK_DENCUN));
assert_eq!(receipt.blob_gas_price, Some(0x1)); // 1 wei
}
#[tokio::test(flavor = "multi_thread")]
async fn cannot_exceed_six_blobs() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (_api, handle) = spawn(node_config).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let from = wallets[0].address();
let to = wallets[1].address();
let provider = http_provider(&handle.http_endpoint());
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let large_data = vec![1u8; DATA_GAS_PER_BLOB as usize * 6]; // 131072 is DATA_GAS_PER_BLOB and also BYTE_PER_BLOB
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(&large_data);
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_nonce(0)
.with_max_fee_per_blob_gas(gas_price + 1)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_blob_sidecar(sidecar);
let mut tx = WithOtherFields::new(tx);
tx.populate_blob_hashes();
let err = provider.send_transaction(tx).await.unwrap_err();
assert!(err.to_string().contains("too many blobs"));
}
#[tokio::test(flavor = "multi_thread")]
async fn can_mine_blobs_when_exceeds_max_blobs() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (api, handle) = spawn(node_config).await;
api.anvil_set_auto_mine(false).await.unwrap();
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let from = wallets[0].address();
let to = wallets[1].address();
let provider = http_provider(&handle.http_endpoint());
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let first_batch = vec![1u8; DATA_GAS_PER_BLOB as usize * 3];
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(&first_batch);
let num_blobs_first = sidecar.clone().take().len() as u64;
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_nonce(0)
.with_max_fee_per_blob_gas(gas_price + 1)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_blob_sidecar(sidecar);
let mut tx = WithOtherFields::new(tx);
tx.populate_blob_hashes();
let first_tx = provider.send_transaction(tx.clone()).await.unwrap();
let second_batch = vec![1u8; DATA_GAS_PER_BLOB as usize * 2];
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(&second_batch);
let num_blobs_second = sidecar.clone().take().len() as u64;
let sidecar = sidecar.build().unwrap();
tx.set_blob_sidecar(sidecar);
tx.set_nonce(1);
tx.populate_blob_hashes();
let second_tx = provider.send_transaction(tx).await.unwrap();
api.mine_one().await;
let first_receipt = first_tx.get_receipt().await.unwrap();
api.mine_one().await;
let second_receipt = second_tx.get_receipt().await.unwrap();
let (first_block, second_block) = tokio::join!(
provider.get_block_by_number(first_receipt.block_number.unwrap().into()),
provider.get_block_by_number(second_receipt.block_number.unwrap().into())
);
assert_eq!(
first_block.unwrap().unwrap().header.blob_gas_used,
Some(DATA_GAS_PER_BLOB * num_blobs_first)
);
assert_eq!(
second_block.unwrap().unwrap().header.blob_gas_used,
Some(DATA_GAS_PER_BLOB * num_blobs_second)
);
// Mined in two different blocks
assert_eq!(first_receipt.block_number.unwrap() + 1, second_receipt.block_number.unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_check_blob_fields_on_genesis() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (_api, handle) = spawn(node_config).await;
let provider = http_provider(&handle.http_endpoint());
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
assert_eq!(block.header.blob_gas_used, Some(0));
assert_eq!(block.header.excess_blob_gas, Some(0));
}
#[expect(clippy::disallowed_macros)]
#[tokio::test(flavor = "multi_thread")]
async fn can_correctly_estimate_blob_gas_with_recommended_fillers() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (_api, handle) = spawn(node_config).await;
let provider = http_provider(&handle.http_endpoint());
let accounts = provider.get_accounts().await.unwrap();
let alice = accounts[0];
let bob = accounts[1];
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(b"Blobs are fun!");
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default().with_to(bob).with_blob_sidecar(sidecar);
let tx = WithOtherFields::new(tx);
// Send the transaction and wait for the broadcast.
let pending_tx = provider.send_transaction(tx).await.unwrap();
println!("Pending transaction... {}", pending_tx.tx_hash());
// Wait for the transaction to be included and get the receipt.
let receipt = pending_tx.get_receipt().await.unwrap();
// Grab the processed transaction.
let tx = provider.get_transaction_by_hash(receipt.transaction_hash).await.unwrap().unwrap();
println!(
"Transaction included in block {}",
receipt.block_number.expect("Failed to get block number")
);
assert!(tx.max_fee_per_blob_gas().unwrap() >= BLOB_TX_MIN_BLOB_GASPRICE);
assert_eq!(receipt.from, alice);
assert_eq!(receipt.to, Some(bob));
assert_eq!(
receipt.blob_gas_used.expect("Expected to be EIP-4844 transaction"),
DATA_GAS_PER_BLOB
);
}
#[expect(clippy::disallowed_macros)]
#[tokio::test(flavor = "multi_thread")]
async fn can_correctly_estimate_blob_gas_with_recommended_fillers_with_signer() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (_api, handle) = spawn(node_config).await;
let signer = handle.dev_wallets().next().unwrap();
let wallet: EthereumWallet = signer.clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), wallet);
let accounts = provider.get_accounts().await.unwrap();
let alice = accounts[0];
let bob = accounts[1];
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(b"Blobs are fun!");
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default().with_to(bob).with_blob_sidecar(sidecar);
let tx = WithOtherFields::new(tx);
// Send the transaction and wait for the broadcast.
let pending_tx = provider.send_transaction(tx).await.unwrap();
println!("Pending transaction... {}", pending_tx.tx_hash());
// Wait for the transaction to be included and get the receipt.
let receipt = pending_tx.get_receipt().await.unwrap();
// Grab the processed transaction.
let tx = provider.get_transaction_by_hash(receipt.transaction_hash).await.unwrap().unwrap();
println!(
"Transaction included in block {}",
receipt.block_number.expect("Failed to get block number")
);
assert!(tx.max_fee_per_blob_gas().unwrap() >= BLOB_TX_MIN_BLOB_GASPRICE);
assert_eq!(receipt.from, alice);
assert_eq!(receipt.to, Some(bob));
assert_eq!(
receipt.blob_gas_used.expect("Expected to be EIP-4844 transaction"),
DATA_GAS_PER_BLOB
);
}
// <https://github.com/foundry-rs/foundry/issues/9924>
#[tokio::test]
async fn can_bypass_sidecar_requirement() {
crate::init_tracing();
let node_config = NodeConfig::test()
.with_hardfork(Some(EthereumHardfork::Cancun.into()))
.with_auto_impersonate(true);
let (api, handle) = spawn(node_config).await;
let provider = http_provider(&handle.http_endpoint());
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let from = Address::random();
let to = Address::random();
api.anvil_set_balance(from, U256::from(60262144030131080_u128)).await.unwrap();
let tx = TransactionRequest {
from: Some(from),
to: Some(alloy_primitives::TxKind::Call(to)),
nonce: Some(0),
value: Some(U256::from(0)),
max_fee_per_blob_gas: Some(gas_price + 1),
max_fee_per_gas: Some(eip1559_est.max_fee_per_gas),
max_priority_fee_per_gas: Some(eip1559_est.max_priority_fee_per_gas),
blob_versioned_hashes: Some(vec![b256!(
"0x01d5446006b21888d0267829344ab8624fdf1b425445a8ae1ca831bf1b8fbcd4"
)]),
sidecar: None,
transaction_type: Some(3),
..Default::default()
};
let receipt = provider
.send_transaction(WithOtherFields::new(tx))
.await
.unwrap()
.get_receipt()
.await
.unwrap();
assert!(receipt.status());
let tx = provider.get_transaction_by_hash(receipt.transaction_hash).await.unwrap().unwrap();
assert_eq!(tx.inner.ty(), 3);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_blobs_by_versioned_hash() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Prague.into()));
let (api, handle) = spawn(node_config).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let from = wallets[0].address();
let to = wallets[1].address();
let provider = http_provider(&handle.http_endpoint());
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(b"Hello World");
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_nonce(0)
.with_max_fee_per_blob_gas(gas_price + 1)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_blob_sidecar(sidecar.clone())
.value(U256::from(5));
let mut tx = WithOtherFields::new(tx);
tx.populate_blob_hashes();
let _receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let hash = sidecar.versioned_hash_for_blob(0).unwrap();
// api.anvil_set_auto_mine(true).await.unwrap();
let blob = api.anvil_get_blob_by_versioned_hash(hash).unwrap().unwrap();
assert_eq!(blob, sidecar.blobs[0]);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_blobs_by_tx_hash() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Prague.into()));
let (api, handle) = spawn(node_config).await;
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let from = wallets[0].address();
let to = wallets[1].address();
let provider = http_provider(&handle.http_endpoint());
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(b"Hello World");
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_nonce(0)
.with_max_fee_per_blob_gas(gas_price + 1)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_blob_sidecar(sidecar.clone())
.value(U256::from(5));
let mut tx = WithOtherFields::new(tx);
tx.populate_blob_hashes();
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let hash = receipt.transaction_hash;
api.anvil_set_auto_mine(true).await.unwrap();
let blobs = api.anvil_get_blob_by_tx_hash(hash).unwrap().unwrap();
assert_eq!(blobs, sidecar.blobs);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/eip7702.rs | crates/anvil/tests/it/eip7702.rs | use crate::utils::http_provider;
use alloy_consensus::{SignableTransaction, transaction::TxEip7702};
use alloy_network::{ReceiptResponse, TransactionBuilder, TxSignerSync};
use alloy_primitives::{U256, bytes};
use alloy_provider::{PendingTransactionConfig, Provider};
use alloy_rpc_types::{Authorization, TransactionRequest};
use alloy_serde::WithOtherFields;
use alloy_signer::{Signature, SignerSync};
use anvil::{NodeConfig, spawn};
use foundry_evm::hardfork::EthereumHardfork;
#[tokio::test(flavor = "multi_thread")]
async fn can_send_eip7702_tx() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Prague.into()));
let (_api, handle) = spawn(node_config).await;
let provider = http_provider(&handle.http_endpoint());
let wallets = handle.dev_wallets().collect::<Vec<_>>();
// deploy simple contract forwarding calldata to LOG0
// PUSH7(CALLDATASIZE PUSH0 PUSH0 CALLDATACOPY CALLDATASIZE PUSH0 LOG0) PUSH0 MSTORE PUSH1(7)
// PUSH1(25) RETURN
let logger_bytecode = bytes!("66365f5f37365fa05f5260076019f3");
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let from = wallets[0].address();
let tx = TransactionRequest::default()
.with_from(from)
.into_create()
.with_nonce(0)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_input(logger_bytecode);
let receipt = provider
.send_transaction(WithOtherFields::new(tx))
.await
.unwrap()
.get_receipt()
.await
.unwrap();
assert!(receipt.status());
let contract = receipt.contract_address.unwrap();
let authorization = Authorization {
chain_id: U256::from(31337u64),
address: contract,
nonce: provider.get_transaction_count(from).await.unwrap(),
};
let signature = wallets[0].sign_hash_sync(&authorization.signature_hash()).unwrap();
let authorization = authorization.into_signed(signature);
let log_data = bytes!("11112222");
let mut tx = TxEip7702 {
max_fee_per_gas: eip1559_est.max_fee_per_gas,
max_priority_fee_per_gas: eip1559_est.max_priority_fee_per_gas,
gas_limit: 100000,
chain_id: 31337,
to: from,
input: bytes!("11112222"),
authorization_list: vec![authorization],
..Default::default()
};
let signature = wallets[1].sign_transaction_sync(&mut tx).unwrap();
let tx = tx.into_signed(signature);
let mut encoded = Vec::new();
tx.eip2718_encode(&mut encoded);
let receipt =
provider.send_raw_transaction(&encoded).await.unwrap().get_receipt().await.unwrap();
let log = &receipt.inner.inner.logs()[0];
// assert that log was from EOA which signed authorization
assert_eq!(log.address(), from);
assert_eq!(log.topics().len(), 0);
assert_eq!(log.data().data, log_data);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_send_eip7702_request() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Prague.into()));
let (api, handle) = spawn(node_config).await;
let provider = http_provider(&handle.http_endpoint());
let wallets = handle.dev_wallets().collect::<Vec<_>>();
// deploy simple contract forwarding calldata to LOG0
// PUSH7(CALLDATASIZE PUSH0 PUSH0 CALLDATACOPY CALLDATASIZE PUSH0 LOG0) PUSH0 MSTORE PUSH1(7)
// PUSH1(25) RETURN
let logger_bytecode = bytes!("66365f5f37365fa05f5260076019f3");
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let from = wallets[0].address();
let tx = TransactionRequest::default()
.with_from(from)
.into_create()
.with_nonce(0)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_input(logger_bytecode);
let receipt = provider
.send_transaction(WithOtherFields::new(tx))
.await
.unwrap()
.get_receipt()
.await
.unwrap();
assert!(receipt.status());
let contract = receipt.contract_address.unwrap();
let authorization = Authorization {
chain_id: U256::from(31337u64),
address: contract,
nonce: provider.get_transaction_count(from).await.unwrap(),
};
let signature = wallets[0].sign_hash_sync(&authorization.signature_hash()).unwrap();
let authorization = authorization.into_signed(signature);
let log_data = bytes!("11112222");
let tx = TxEip7702 {
max_fee_per_gas: eip1559_est.max_fee_per_gas,
max_priority_fee_per_gas: eip1559_est.max_priority_fee_per_gas,
gas_limit: 100000,
chain_id: 31337,
to: from,
input: bytes!("11112222"),
authorization_list: vec![authorization],
..Default::default()
};
let sender = wallets[1].address();
let request = TransactionRequest::from_transaction(tx).with_from(sender);
api.anvil_impersonate_account(sender).await.unwrap();
let txhash = api.send_transaction(WithOtherFields::new(request)).await.unwrap();
let txhash = provider
.watch_pending_transaction(PendingTransactionConfig::new(txhash))
.await
.unwrap()
.await
.unwrap();
let receipt = provider.get_transaction_receipt(txhash).await.unwrap().unwrap();
let log = &receipt.inner.inner.logs()[0];
// assert that log was from EOA which signed authorization
assert_eq!(log.address(), from);
assert_eq!(log.topics().len(), 0);
assert_eq!(log.data().data, log_data);
}
#[tokio::test(flavor = "multi_thread")]
async fn eip7702_authorization_bypass() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Prague.into()));
let (api, handle) = spawn(node_config).await;
let provider = http_provider(&handle.http_endpoint());
let wallets = handle.dev_wallets().collect::<Vec<_>>();
// deploy simple contract forwarding calldata to LOG0
// PUSH7(CALLDATASIZE PUSH0 PUSH0 CALLDATACOPY CALLDATASIZE PUSH0 LOG0) PUSH0 MSTORE PUSH1(7)
// PUSH1(25) RETURN
let logger_bytecode = bytes!("66365f5f37365fa05f5260076019f3");
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let from = wallets[0].address();
let tx = TransactionRequest::default()
.with_from(from)
.into_create()
.with_nonce(0)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_input(logger_bytecode);
let receipt = provider
.send_transaction(WithOtherFields::new(tx))
.await
.unwrap()
.get_receipt()
.await
.unwrap();
assert!(receipt.status());
let contract = receipt.contract_address.unwrap();
let authorization = Authorization {
chain_id: U256::from(31337u64),
address: contract,
nonce: provider.get_transaction_count(from).await.unwrap(),
};
let fake_auth_sig = Signature::new(U256::ZERO, U256::ZERO, true);
api.anvil_impersonate_signature(fake_auth_sig.as_bytes().into(), from).await.unwrap();
let authorization = authorization.into_signed(fake_auth_sig);
let log_data = bytes!("11112222");
let mut tx = TxEip7702 {
max_fee_per_gas: eip1559_est.max_fee_per_gas,
max_priority_fee_per_gas: eip1559_est.max_priority_fee_per_gas,
gas_limit: 100000,
chain_id: 31337,
to: from,
input: bytes!("11112222"),
authorization_list: vec![authorization],
..Default::default()
};
let signature = wallets[1].sign_transaction_sync(&mut tx).unwrap();
let tx = tx.into_signed(signature);
let mut encoded = Vec::new();
tx.eip2718_encode(&mut encoded);
let receipt =
provider.send_raw_transaction(&encoded).await.unwrap().get_receipt().await.unwrap();
let log = &receipt.inner.inner.logs()[0];
// assert that log was from EOA which signed authorization
assert_eq!(log.address(), from);
assert_eq!(log.topics().len(), 0);
assert_eq!(log.data().data, log_data);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/beacon_api.rs | crates/anvil/tests/it/beacon_api.rs | use crate::utils::http_provider;
use alloy_consensus::{Blob, SidecarBuilder, SimpleCoder, Transaction};
use alloy_network::{TransactionBuilder, TransactionBuilder4844};
use alloy_primitives::{B256, FixedBytes, U256, b256};
use alloy_provider::Provider;
use alloy_rpc_types::TransactionRequest;
use alloy_rpc_types_beacon::{genesis::GenesisResponse, sidecar::GetBlobsResponse};
use alloy_serde::WithOtherFields;
use anvil::{NodeConfig, spawn};
use foundry_evm::hardfork::EthereumHardfork;
use ssz::Decode;
#[tokio::test(flavor = "multi_thread")]
async fn test_beacon_api_get_blob_sidecars() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (_api, handle) = spawn(node_config).await;
// Test Beacon API endpoint using HTTP client
let client = reqwest::Client::new();
let url = format!("{}/eth/v1/beacon/blob_sidecars/latest", handle.http_endpoint());
// This endpoint is deprecated, so we expect a 410 Gone response
let response = client.get(&url).send().await.unwrap();
assert_eq!(
response.text().await.unwrap(),
r#"{"code":410,"message":"This endpoint is deprecated. Use `GET /eth/v1/beacon/blobs/{block_id}` instead."}"#,
"Expected deprecation message for blob_sidecars endpoint"
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_beacon_api_get_blobs() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (api, handle) = spawn(node_config).await;
// Disable auto-mining so we can include multiple transactions in the same block
api.anvil_set_auto_mine(false).await.unwrap();
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let from = wallets[0].address();
let to = wallets[1].address();
let provider = http_provider(&handle.http_endpoint());
let eip1559_est = provider.estimate_eip1559_fees().await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
// Create multiple blob transactions to be included in the same block
let blob_data =
[b"Hello Beacon API - Blob 1", b"Hello Beacon API - Blob 2", b"Hello Beacon API - Blob 3"];
let mut pending_txs = Vec::new();
// Send all transactions without waiting for receipts
for (i, data) in blob_data.iter().enumerate() {
let sidecar: SidecarBuilder<SimpleCoder> = SidecarBuilder::from_slice(data.as_slice());
let sidecar = sidecar.build().unwrap();
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_nonce(i as u64)
.with_max_fee_per_blob_gas(gas_price + 1)
.with_max_fee_per_gas(eip1559_est.max_fee_per_gas)
.with_max_priority_fee_per_gas(eip1559_est.max_priority_fee_per_gas)
.with_blob_sidecar(sidecar)
.value(U256::from(100));
let mut tx = WithOtherFields::new(tx);
tx.populate_blob_hashes();
let pending = provider.send_transaction(tx).await.unwrap();
pending_txs.push(pending);
}
// Mine a block to include all transactions
api.evm_mine(None).await.unwrap();
// Get receipts for all transactions
let mut receipts = Vec::new();
for pending in pending_txs {
let receipt = pending.get_receipt().await.unwrap();
receipts.push(receipt);
}
// Verify all transactions were included in the same block
let block_number = receipts[0].block_number.unwrap();
for (i, receipt) in receipts.iter().enumerate() {
assert_eq!(
receipt.block_number.unwrap(),
block_number,
"Transaction {i} was not included in block {block_number}"
);
}
// Extract the actual versioned hashes from the mined transactions
let mut actual_versioned_hashes = Vec::new();
for receipt in &receipts {
let tx = provider.get_transaction_by_hash(receipt.transaction_hash).await.unwrap().unwrap();
if let Some(blob_versioned_hashes) = tx.blob_versioned_hashes() {
actual_versioned_hashes.extend(blob_versioned_hashes.iter().copied());
}
}
// Test Beacon API endpoint using HTTP client
let client = reqwest::Client::new();
let url = format!("{}/eth/v1/beacon/blobs/{}", handle.http_endpoint(), block_number);
let response = client.get(&url).send().await.unwrap();
assert_eq!(response.status(), reqwest::StatusCode::OK);
assert_eq!(
response.headers().get("content-type").and_then(|h| h.to_str().ok()),
Some("application/json"),
"Expected application/json content-type header"
);
let blobs_response: GetBlobsResponse = response.json().await.unwrap();
// Verify response structure
assert!(!blobs_response.execution_optimistic);
assert!(!blobs_response.finalized);
// Verify we have blob data from all transactions
assert_eq!(blobs_response.data.len(), 3, "Expected 3 blobs from 3 transactions");
// Test response with SSZ encoding
let url = format!("{}/eth/v1/beacon/blobs/{}", handle.http_endpoint(), block_number);
let response = client
.get(&url)
.header(axum::http::header::ACCEPT, "application/octet-stream")
.send()
.await
.unwrap();
assert_eq!(response.status(), reqwest::StatusCode::OK);
assert_eq!(
response.headers().get("content-type").and_then(|h| h.to_str().ok()),
Some("application/octet-stream"),
"Expected application/octet-stream content-type header"
);
let body_bytes = response.bytes().await.unwrap();
// Decode the SSZ-encoded blobs in a spawned thread with larger stack to handle recursion
let decoded_blobs = std::thread::Builder::new()
.stack_size(8 * 1024 * 1024) // 8MB stack for SSZ decoding of large blobs
.spawn(move || Vec::<Blob>::from_ssz_bytes(&body_bytes))
.expect("Failed to spawn decode thread")
.join()
.expect("Decode thread panicked")
.expect("Failed to decode SSZ-encoded blobs");
// Verify we got exactly 3 blobs
assert_eq!(
decoded_blobs.len(),
3,
"Expected 3 blobs from SSZ-encoded response, got {}",
decoded_blobs.len()
);
// Verify the decoded blobs match the JSON response blobs
for (i, (decoded, json)) in decoded_blobs.iter().zip(blobs_response.data.iter()).enumerate() {
assert_eq!(decoded, json, "Blob {i} mismatch between SSZ and JSON responses");
}
// Test filtering with versioned_hashes query parameter - single hash
let url = format!(
"{}/eth/v1/beacon/blobs/{}?versioned_hashes={}",
handle.http_endpoint(),
block_number,
actual_versioned_hashes[1]
);
let response = client.get(&url).send().await.unwrap();
let status = response.status();
if status != reqwest::StatusCode::OK {
let error_body = response.text().await.unwrap();
panic!("Expected OK status, got {status}: {error_body}");
}
let blobs_response: GetBlobsResponse = response.json().await.unwrap();
assert_eq!(
blobs_response.data.len(),
1,
"Expected 1 blob when filtering by single versioned_hash"
);
// Test filtering with versioned_hashes query parameter - multiple versioned_hashes
// (comma-separated)
let url = format!(
"{}/eth/v1/beacon/blobs/{}?versioned_hashes={},{}",
handle.http_endpoint(),
block_number,
actual_versioned_hashes[0],
actual_versioned_hashes[2]
);
let response = client.get(&url).send().await.unwrap();
assert_eq!(response.status(), reqwest::StatusCode::OK);
let blobs_response: GetBlobsResponse = response.json().await.unwrap();
assert_eq!(
blobs_response.data.len(),
2,
"Expected 2 blobs when filtering by two versioned_hashes"
);
// Test filtering with non-existent versioned_hash
let non_existent_hash =
b256!("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef");
let url = format!(
"{}/eth/v1/beacon/blobs/{}?versioned_hashes={}",
handle.http_endpoint(),
block_number,
non_existent_hash
);
let response = client.get(&url).send().await.unwrap();
assert_eq!(response.status(), reqwest::StatusCode::OK);
let blobs_response: GetBlobsResponse = response.json().await.unwrap();
assert_eq!(
blobs_response.data.len(),
0,
"Expected 0 blobs when filtering by non-existent versioned_hash"
);
// Test with special block identifiers
let test_ids = vec!["latest", "finalized", "safe", "earliest"];
for block_id in test_ids {
let url = format!("{}/eth/v1/beacon/blobs/{}", handle.http_endpoint(), block_id);
assert_eq!(client.get(&url).send().await.unwrap().status(), reqwest::StatusCode::OK);
}
let url = format!("{}/eth/v1/beacon/blobs/pending", handle.http_endpoint());
assert_eq!(client.get(&url).send().await.unwrap().status(), reqwest::StatusCode::NOT_FOUND);
// Test with hex block number
let url = format!("{}/eth/v1/beacon/blobs/0x{block_number:x}", handle.http_endpoint());
let response = client.get(&url).send().await.unwrap();
assert_eq!(response.status(), reqwest::StatusCode::OK);
// Test with non-existent block
let url = format!("{}/eth/v1/beacon/blobs/999999", handle.http_endpoint());
let response = client.get(&url).send().await.unwrap();
assert_eq!(response.status(), reqwest::StatusCode::NOT_FOUND);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_beacon_api_get_genesis() {
let node_config = NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()));
let (_api, handle) = spawn(node_config).await;
// Test Beacon API genesis endpoint using HTTP client
let client = reqwest::Client::new();
let url = format!("{}/eth/v1/beacon/genesis", handle.http_endpoint());
let response = client.get(&url).send().await.unwrap();
assert_eq!(response.status(), reqwest::StatusCode::OK);
let genesis_response: GenesisResponse = response.json().await.unwrap();
assert!(genesis_response.data.genesis_time > 0);
assert_eq!(genesis_response.data.genesis_validators_root, B256::ZERO);
assert_eq!(
genesis_response.data.genesis_fork_version,
FixedBytes::from([0x00, 0x00, 0x00, 0x00])
);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/utils.rs | crates/anvil/tests/it/utils.rs | use alloy_network::{Ethereum, EthereumWallet};
use alloy_provider::{
Identity, RootProvider,
fillers::{ChainIdFiller, FillProvider, GasFiller, JoinFill, NonceFiller, WalletFiller},
};
use foundry_common::provider::{
ProviderBuilder, RetryProvider, RetryProviderWithSigner, get_http_provider,
};
pub fn http_provider(http_endpoint: &str) -> RetryProvider {
get_http_provider(http_endpoint)
}
pub fn http_provider_with_signer(
http_endpoint: &str,
signer: EthereumWallet,
) -> RetryProviderWithSigner {
ProviderBuilder::new(http_endpoint)
.build_with_wallet(signer)
.expect("failed to build Alloy HTTP provider with signer")
}
pub fn ws_provider_with_signer(
ws_endpoint: &str,
signer: EthereumWallet,
) -> RetryProviderWithSigner {
ProviderBuilder::new(ws_endpoint)
.build_with_wallet(signer)
.expect("failed to build Alloy WS provider with signer")
}
/// Currently required to get around <https://github.com/alloy-rs/alloy/issues/296>
pub async fn connect_pubsub(conn_str: &str) -> RootProvider {
alloy_provider::ProviderBuilder::default().connect(conn_str).await.unwrap()
}
type PubsubSigner = FillProvider<
JoinFill<
JoinFill<
Identity,
JoinFill<
GasFiller,
JoinFill<
alloy_provider::fillers::BlobGasFiller,
JoinFill<NonceFiller, ChainIdFiller>,
>,
>,
>,
WalletFiller<EthereumWallet>,
>,
RootProvider,
Ethereum,
>;
pub async fn connect_pubsub_with_wallet(conn_str: &str, wallet: EthereumWallet) -> PubsubSigner {
alloy_provider::ProviderBuilder::new().wallet(wallet).connect(conn_str).await.unwrap()
}
pub async fn ipc_provider_with_wallet(
ipc_endpoint: &str,
wallet: EthereumWallet,
) -> RetryProviderWithSigner {
ProviderBuilder::new(ipc_endpoint)
.build_with_wallet(wallet)
.expect("failed to build Alloy IPC provider with signer")
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/simulate.rs | crates/anvil/tests/it/simulate.rs | //! general eth api tests
use alloy_primitives::{TxKind, U256, address};
use alloy_rpc_types::{
BlockOverrides,
request::TransactionRequest,
simulate::{SimBlock, SimulatePayload},
state::{AccountOverride, StateOverridesBuilder},
};
use anvil::{NodeConfig, spawn};
use foundry_test_utils::rpc;
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_simulate_v1() {
crate::init_tracing();
let (api, _) =
spawn(NodeConfig::test().with_eth_rpc_url(Some(rpc::next_http_archive_rpc_url()))).await;
let block_overrides =
Some(BlockOverrides { base_fee: Some(U256::from(9)), ..Default::default() });
let account_override =
AccountOverride { balance: Some(U256::from(999999999999u64)), ..Default::default() };
let state_overrides = Some(
StateOverridesBuilder::with_capacity(1)
.append(address!("0xc000000000000000000000000000000000000001"), account_override)
.build(),
);
let tx_request = TransactionRequest {
from: Some(address!("0xc000000000000000000000000000000000000001")),
to: Some(TxKind::from(address!("0xc000000000000000000000000000000000000001"))),
value: Some(U256::from(1)),
..Default::default()
};
let payload = SimulatePayload {
block_state_calls: vec![SimBlock {
block_overrides,
state_overrides,
calls: vec![tx_request],
}],
trace_transfers: true,
validation: false,
return_full_transactions: true,
};
let _res = api.simulate_v1(payload, None).await.unwrap();
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/anvil_api.rs | crates/anvil/tests/it/anvil_api.rs | //! tests for custom anvil endpoints
use crate::{
abi::{self, BUSD, Greeter, Multicall},
fork::fork_config,
utils::http_provider_with_signer,
};
use alloy_consensus::{SignableTransaction, TxEip1559};
use alloy_network::{EthereumWallet, TransactionBuilder, TxSignerSync};
use alloy_primitives::{Address, Bytes, TxKind, U256, address, fixed_bytes};
use alloy_provider::{Provider, ext::TxPoolApi};
use alloy_rpc_types::{
BlockId, BlockNumberOrTag, TransactionRequest,
anvil::{
ForkedNetwork, Forking, Metadata, MineOptions, NodeEnvironment, NodeForkConfig, NodeInfo,
},
};
use alloy_serde::WithOtherFields;
use anvil::{NodeConfig, eth::api::CLIENT_VERSION, spawn};
use anvil_core::{
eth::EthRequest,
types::{ReorgOptions, TransactionData},
};
use foundry_evm::hardfork::EthereumHardfork;
use revm::primitives::hardfork::SpecId;
use std::{
str::FromStr,
time::{Duration, SystemTime},
};
#[tokio::test(flavor = "multi_thread")]
async fn can_set_gas_price() {
let (api, handle) =
spawn(NodeConfig::test().with_hardfork(Some(EthereumHardfork::Berlin.into()))).await;
let provider = handle.http_provider();
let gas_price = U256::from(1337);
api.anvil_set_min_gas_price(gas_price).await.unwrap();
assert_eq!(gas_price.to::<u128>(), provider.get_gas_price().await.unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_set_block_gas_limit() {
let (api, _) =
spawn(NodeConfig::test().with_hardfork(Some(EthereumHardfork::Berlin.into()))).await;
let block_gas_limit = U256::from(1337);
assert!(api.evm_set_block_gas_limit(block_gas_limit).unwrap());
// Mine a new block, and check the new block gas limit
api.mine_one().await;
let latest_block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
assert_eq!(block_gas_limit.to::<u64>(), latest_block.header.gas_limit);
}
// Ref <https://github.com/foundry-rs/foundry/issues/2341>
#[tokio::test(flavor = "multi_thread")]
async fn can_set_storage() {
let (api, _handle) = spawn(NodeConfig::test()).await;
let s = r#"{"jsonrpc": "2.0", "method": "hardhat_setStorageAt", "id": 1, "params": ["0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56", "0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49", "0x0000000000000000000000000000000000000000000000000000000000003039"]}"#;
let req = serde_json::from_str::<EthRequest>(s).unwrap();
let (addr, slot, val) = match req.clone() {
EthRequest::SetStorageAt(addr, slot, val) => (addr, slot, val),
_ => unreachable!(),
};
api.execute(req).await;
let storage_value = api.storage_at(addr, slot, None).await.unwrap();
assert_eq!(val, storage_value);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_impersonate_account() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let impersonate = Address::random();
let to = Address::random();
let val = U256::from(1337);
let funding = U256::from(1e18 as u64);
// fund the impersonated account
api.anvil_set_balance(impersonate, funding).await.unwrap();
let balance = api.balance(impersonate, None).await.unwrap();
assert_eq!(balance, funding);
let tx = TransactionRequest::default().with_from(impersonate).with_to(to).with_value(val);
let tx = WithOtherFields::new(tx);
let res = provider.send_transaction(tx.clone()).await;
res.unwrap_err();
api.anvil_impersonate_account(impersonate).await.unwrap();
assert!(api.accounts().unwrap().contains(&impersonate));
let res = provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
assert_eq!(res.from, impersonate);
let nonce = provider.get_transaction_count(impersonate).await.unwrap();
assert_eq!(nonce, 1);
let balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance, val);
api.anvil_stop_impersonating_account(impersonate).await.unwrap();
let res = provider.send_transaction(tx).await;
res.unwrap_err();
}
#[tokio::test(flavor = "multi_thread")]
async fn can_auto_impersonate_account() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let impersonate = Address::random();
let to = Address::random();
let val = U256::from(1337);
let funding = U256::from(1e18 as u64);
// fund the impersonated account
api.anvil_set_balance(impersonate, funding).await.unwrap();
let balance = api.balance(impersonate, None).await.unwrap();
assert_eq!(balance, funding);
let tx = TransactionRequest::default().with_from(impersonate).with_to(to).with_value(val);
let tx = WithOtherFields::new(tx);
let res = provider.send_transaction(tx.clone()).await;
res.unwrap_err();
api.anvil_auto_impersonate_account(true).await.unwrap();
let res = provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
assert_eq!(res.from, impersonate);
let nonce = provider.get_transaction_count(impersonate).await.unwrap();
assert_eq!(nonce, 1);
let balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance, val);
api.anvil_auto_impersonate_account(false).await.unwrap();
let res = provider.send_transaction(tx).await;
res.unwrap_err();
// explicitly impersonated accounts get returned by `eth_accounts`
api.anvil_impersonate_account(impersonate).await.unwrap();
assert!(api.accounts().unwrap().contains(&impersonate));
}
#[tokio::test(flavor = "multi_thread")]
async fn can_impersonate_contract() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let greeter_contract = Greeter::deploy(&provider, "Hello World!".to_string()).await.unwrap();
let impersonate = greeter_contract.address().to_owned();
let to = Address::random();
let val = U256::from(1337);
// // fund the impersonated account
api.anvil_set_balance(impersonate, U256::from(1e18 as u64)).await.unwrap();
let tx = TransactionRequest::default().with_from(impersonate).to(to).with_value(val);
let tx = WithOtherFields::new(tx);
let res = provider.send_transaction(tx.clone()).await;
res.unwrap_err();
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Hello World!", greeting);
api.anvil_impersonate_account(impersonate).await.unwrap();
let res = provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
assert_eq!(res.from, impersonate);
let balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance, val);
api.anvil_stop_impersonating_account(impersonate).await.unwrap();
let res = provider.send_transaction(tx).await;
res.unwrap_err();
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Hello World!", greeting);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_impersonate_gnosis_safe() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
// <https://help.safe.global/en/articles/40824-i-don-t-remember-my-safe-address-where-can-i-find-it>
let safe = address!("0xA063Cb7CFd8E57c30c788A0572CBbf2129ae56B6");
let code = provider.get_code_at(safe).await.unwrap();
assert!(!code.is_empty());
api.anvil_impersonate_account(safe).await.unwrap();
let code = provider.get_code_at(safe).await.unwrap();
assert!(!code.is_empty());
let balance = U256::from(1e18 as u64);
// fund the impersonated account
api.anvil_set_balance(safe, balance).await.unwrap();
let on_chain_balance = provider.get_balance(safe).await.unwrap();
assert_eq!(on_chain_balance, balance);
api.anvil_stop_impersonating_account(safe).await.unwrap();
let code = provider.get_code_at(safe).await.unwrap();
// code is added back after stop impersonating
assert!(!code.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_impersonate_multiple_accounts() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let impersonate0 = Address::random();
let impersonate1 = Address::random();
let to = Address::random();
let val = U256::from(1337);
let funding = U256::from(1e18 as u64);
// fund the impersonated accounts
api.anvil_set_balance(impersonate0, funding).await.unwrap();
api.anvil_set_balance(impersonate1, funding).await.unwrap();
let tx = TransactionRequest::default().with_from(impersonate0).to(to).with_value(val);
let tx = WithOtherFields::new(tx);
api.anvil_impersonate_account(impersonate0).await.unwrap();
api.anvil_impersonate_account(impersonate1).await.unwrap();
let res0 = provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
assert_eq!(res0.from, impersonate0);
let nonce = provider.get_transaction_count(impersonate0).await.unwrap();
assert_eq!(nonce, 1);
let receipt = provider.get_transaction_receipt(res0.transaction_hash).await.unwrap().unwrap();
assert_eq!(res0.inner, receipt.inner);
let res1 = provider
.send_transaction(tx.with_from(impersonate1))
.await
.unwrap()
.get_receipt()
.await
.unwrap();
assert_eq!(res1.from, impersonate1);
let nonce = provider.get_transaction_count(impersonate1).await.unwrap();
assert_eq!(nonce, 1);
let receipt = provider.get_transaction_receipt(res1.transaction_hash).await.unwrap().unwrap();
assert_eq!(res1.inner, receipt.inner);
assert_ne!(res0.inner, res1.inner);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_mine_manually() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let start_num = provider.get_block_number().await.unwrap();
for (idx, _) in std::iter::repeat_n((), 10).enumerate() {
api.evm_mine(None).await.unwrap();
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, start_num + idx as u64 + 1);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_set_next_timestamp() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap();
let next_timestamp = now + Duration::from_secs(60);
// mock timestamp
api.evm_set_next_block_timestamp(next_timestamp.as_secs()).unwrap();
api.evm_mine(None).await.unwrap();
let block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
assert_eq!(block.header.number, 1);
assert_eq!(block.header.timestamp, next_timestamp.as_secs());
api.evm_mine(None).await.unwrap();
let next = provider.get_block(BlockId::default()).await.unwrap().unwrap();
assert_eq!(next.header.number, 2);
assert!(next.header.timestamp >= block.header.timestamp);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_evm_set_time() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap();
let timestamp = now + Duration::from_secs(120);
// mock timestamp
api.evm_set_time(timestamp.as_secs()).unwrap();
// mine a block
api.evm_mine(None).await.unwrap();
let block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
assert!(block.header.timestamp >= timestamp.as_secs());
api.evm_mine(None).await.unwrap();
let next = provider.get_block(BlockId::default()).await.unwrap().unwrap();
assert!(next.header.timestamp >= block.header.timestamp);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_evm_set_time_in_past() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap();
let timestamp = now - Duration::from_secs(120);
// mock timestamp
api.evm_set_time(timestamp.as_secs()).unwrap();
// mine a block
api.evm_mine(None).await.unwrap();
let block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
assert!(block.header.timestamp >= timestamp.as_secs());
assert!(block.header.timestamp < now.as_secs());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_timestamp_interval() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
api.evm_mine(None).await.unwrap();
let interval = 10;
for _ in 0..5 {
let block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
// mock timestamp
api.evm_set_block_timestamp_interval(interval).unwrap();
api.evm_mine(None).await.unwrap();
let new_block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
assert_eq!(new_block.header.timestamp, block.header.timestamp + interval);
}
let block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
let next_timestamp = block.header.timestamp + 50;
api.evm_set_next_block_timestamp(next_timestamp).unwrap();
api.evm_mine(None).await.unwrap();
let block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
assert_eq!(block.header.timestamp, next_timestamp);
api.evm_mine(None).await.unwrap();
let block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
// interval also works after setting the next timestamp manually
assert_eq!(block.header.timestamp, next_timestamp + interval);
assert!(api.evm_remove_block_timestamp_interval().unwrap());
api.evm_mine(None).await.unwrap();
let new_block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
// offset is applied correctly after resetting the interval
assert!(new_block.header.timestamp > block.header.timestamp);
api.evm_mine(None).await.unwrap();
let another_block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
// check interval is disabled
assert!(another_block.header.timestamp - new_block.header.timestamp < interval);
}
// <https://github.com/foundry-rs/foundry/issues/2341>
#[tokio::test(flavor = "multi_thread")]
async fn test_can_set_storage_bsc_fork() {
let (api, handle) =
spawn(NodeConfig::test().with_eth_rpc_url(Some("https://bsc-dataseed.binance.org/"))).await;
let provider = handle.http_provider();
let busd_addr = address!("0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56");
let idx = U256::from_str("0xa6eef7e35abe7026729641147f7915573c7e97b47efa546f5f6e3230263bcb49")
.unwrap();
let value = fixed_bytes!("0000000000000000000000000000000000000000000000000000000000003039");
api.anvil_set_storage_at(busd_addr, idx, value).await.unwrap();
let storage = api.storage_at(busd_addr, idx, None).await.unwrap();
assert_eq!(storage, value);
let busd_contract = BUSD::new(busd_addr, &provider);
let balance = busd_contract
.balanceOf(address!("0x0000000000000000000000000000000000000000"))
.call()
.await
.unwrap();
assert_eq!(balance, U256::from(12345u64));
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_node_info() {
let (api, handle) = spawn(NodeConfig::test()).await;
let node_info = api.anvil_node_info().await.unwrap();
let provider = handle.http_provider();
let block_number = provider.get_block_number().await.unwrap();
let block = provider.get_block(BlockId::from(block_number)).await.unwrap().unwrap();
let hard_fork: &str = SpecId::OSAKA.into();
let expected_node_info = NodeInfo {
current_block_number: 0_u64,
current_block_timestamp: 1,
current_block_hash: block.header.hash,
hard_fork: hard_fork.to_string(),
transaction_order: "fees".to_owned(),
environment: NodeEnvironment {
base_fee: U256::from_str("0x3b9aca00").unwrap().to(),
chain_id: 0x7a69,
gas_limit: U256::from_str("0x1c9c380").unwrap().to(),
gas_price: U256::from_str("0x77359400").unwrap().to(),
},
fork_config: NodeForkConfig {
fork_url: None,
fork_block_number: None,
fork_retry_backoff: None,
},
};
assert_eq!(node_info, expected_node_info);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_metadata() {
let (api, handle) = spawn(NodeConfig::test()).await;
let metadata = api.anvil_metadata().await.unwrap();
let provider = handle.http_provider();
let block_number = provider.get_block_number().await.unwrap();
let chain_id = provider.get_chain_id().await.unwrap();
let block = provider.get_block(BlockId::from(block_number)).await.unwrap().unwrap();
let expected_metadata = Metadata {
latest_block_hash: block.header.hash,
latest_block_number: block_number,
chain_id,
client_version: CLIENT_VERSION.to_string(),
instance_id: api.instance_id(),
forked_network: None,
snapshots: Default::default(),
};
assert_eq!(metadata, expected_metadata);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_metadata_on_fork() {
let (api, handle) =
spawn(NodeConfig::test().with_eth_rpc_url(Some("https://bsc-dataseed.binance.org/"))).await;
let provider = handle.http_provider();
let metadata = api.anvil_metadata().await.unwrap();
let block_number = provider.get_block_number().await.unwrap();
let chain_id = provider.get_chain_id().await.unwrap();
let block = provider.get_block(BlockId::from(block_number)).await.unwrap().unwrap();
let expected_metadata = Metadata {
latest_block_hash: block.header.hash,
latest_block_number: block_number,
chain_id,
client_version: CLIENT_VERSION.to_string(),
instance_id: api.instance_id(),
forked_network: Some(ForkedNetwork {
chain_id,
fork_block_number: block_number,
fork_block_hash: block.header.hash,
}),
snapshots: Default::default(),
};
assert_eq!(metadata, expected_metadata);
}
#[tokio::test(flavor = "multi_thread")]
async fn metadata_changes_on_reset() {
let (api, _) =
spawn(NodeConfig::test().with_eth_rpc_url(Some("https://bsc-dataseed.binance.org/"))).await;
let metadata = api.anvil_metadata().await.unwrap();
let instance_id = metadata.instance_id;
api.anvil_reset(Some(Forking { json_rpc_url: None, block_number: None })).await.unwrap();
let new_metadata = api.anvil_metadata().await.unwrap();
let new_instance_id = new_metadata.instance_id;
assert_ne!(instance_id, new_instance_id);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_get_transaction_receipt() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
// set the base fee
let new_base_fee = U256::from(1000);
api.anvil_set_next_block_base_fee_per_gas(new_base_fee).await.unwrap();
// send a EIP-1559 transaction
let to = Address::random();
let val = U256::from(1337);
let tx = TransactionRequest::default().with_to(to).with_value(val);
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
// the block should have the new base fee
let block = provider.get_block(BlockId::default()).await.unwrap().unwrap();
assert_eq!(block.header.base_fee_per_gas.unwrap(), new_base_fee.to::<u64>());
// mine blocks
api.evm_mine(None).await.unwrap();
// the transaction receipt should have the original effective gas price
let new_receipt = provider.get_transaction_receipt(receipt.transaction_hash).await.unwrap();
assert_eq!(receipt.effective_gas_price, new_receipt.unwrap().effective_gas_price);
}
// test can set chain id
#[tokio::test(flavor = "multi_thread")]
async fn test_set_chain_id() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 31337);
let chain_id = 1234;
api.anvil_set_chain_id(chain_id).await.unwrap();
let chain_id = provider.get_chain_id().await.unwrap();
assert_eq!(chain_id, 1234);
}
// <https://github.com/foundry-rs/foundry/issues/6096>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_revert_next_block_timestamp() {
let (api, _handle) = spawn(fork_config()).await;
// Mine a new block, and check the new block gas limit
api.mine_one().await;
let latest_block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
let state_snapshot = api.evm_snapshot().await.unwrap();
api.mine_one().await;
api.evm_revert(state_snapshot).await.unwrap();
let block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
assert_eq!(block, latest_block);
api.mine_one().await;
let block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
assert!(block.header.timestamp >= latest_block.header.timestamp);
}
// test that after a snapshot revert, the env block is reset
// to its correct value (block number, etc.)
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_revert_call_latest_block_timestamp() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
// Mine a new block, and check the new block gas limit
api.mine_one().await;
let latest_block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
let state_snapshot = api.evm_snapshot().await.unwrap();
api.mine_one().await;
api.evm_revert(state_snapshot).await.unwrap();
let multicall_contract =
Multicall::new(address!("0xeefba1e63905ef1d7acba5a8513c70307c1ce441"), &provider);
let timestamp = multicall_contract.getCurrentBlockTimestamp().call().await.unwrap();
assert_eq!(timestamp, U256::from(latest_block.header.timestamp));
let difficulty = multicall_contract.getCurrentBlockDifficulty().call().await.unwrap();
assert_eq!(difficulty, U256::from(latest_block.header.difficulty));
let gaslimit = multicall_contract.getCurrentBlockGasLimit().call().await.unwrap();
assert_eq!(gaslimit, U256::from(latest_block.header.gas_limit));
let coinbase = multicall_contract.getCurrentBlockCoinbase().call().await.unwrap();
assert_eq!(coinbase, latest_block.header.beneficiary);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_remove_pool_transactions() {
let (api, handle) =
spawn(NodeConfig::test().with_blocktime(Some(Duration::from_secs(5)))).await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let from = wallet.address();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let sender = Address::random();
let to = Address::random();
let val = U256::from(1337);
let tx = TransactionRequest::default().with_from(sender).with_to(to).with_value(val);
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx.with_from(from)).await.unwrap().register().await.unwrap();
let initial_txs = provider.txpool_inspect().await.unwrap();
assert_eq!(initial_txs.pending.len(), 1);
api.anvil_remove_pool_transactions(wallet.address()).await.unwrap();
let final_txs = provider.txpool_inspect().await.unwrap();
assert_eq!(final_txs.pending.len(), 0);
}
#[tokio::test(flavor = "multi_thread")]
#[ignore = "flaky"]
async fn test_reorg() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
// Test calls
// Populate chain
for i in 0..10 {
let tx = TransactionRequest::default()
.to(accounts[0].address())
.value(U256::from(i))
.from(accounts[1].address());
let tx = WithOtherFields::new(tx);
api.send_transaction(tx).await.unwrap();
let tx = TransactionRequest::default()
.to(accounts[1].address())
.value(U256::from(i))
.from(accounts[2].address());
let tx = WithOtherFields::new(tx);
api.send_transaction(tx).await.unwrap();
}
// Define transactions
let mut txs = vec![];
for i in 0..3 {
let from = accounts[i].address();
let to = accounts[i + 1].address();
for j in 0..5 {
let tx = TransactionRequest::default().from(from).to(to).value(U256::from(j));
txs.push((TransactionData::JSON(tx), i as u64));
}
}
let prev_height = provider.get_block_number().await.unwrap();
api.anvil_reorg(ReorgOptions { depth: 7, tx_block_pairs: txs }).await.unwrap();
let reorged_height = provider.get_block_number().await.unwrap();
assert_eq!(reorged_height, prev_height);
// The first 3 reorged blocks should have 5 transactions each
for num in 14..17 {
let block = provider.get_block_by_number(num.into()).full().await.unwrap();
let block = block.unwrap();
assert_eq!(block.transactions.len(), 5);
}
// Verify that historic blocks are still accessible
for num in (0..14).rev() {
let _ = provider.get_block_by_number(num.into()).full().await.unwrap();
}
// Send a few more transaction to verify the chain can still progress
for i in 0..3 {
let tx = TransactionRequest::default()
.to(accounts[0].address())
.value(U256::from(i))
.from(accounts[1].address());
let tx = WithOtherFields::new(tx);
api.send_transaction(tx).await.unwrap();
}
// Test reverting code
let greeter = abi::Greeter::deploy(provider.clone(), "Reorg".to_string()).await.unwrap();
api.anvil_reorg(ReorgOptions { depth: 5, tx_block_pairs: vec![] }).await.unwrap();
let code = api.get_code(*greeter.address(), Some(BlockId::latest())).await.unwrap();
assert_eq!(code, Bytes::default());
// Test reverting contract storage
let storage =
abi::SimpleStorage::deploy(provider.clone(), "initial value".to_string()).await.unwrap();
api.evm_mine(Some(MineOptions::Options { timestamp: None, blocks: Some(5) })).await.unwrap();
let _ = storage
.setValue("ReorgMe".to_string())
.from(accounts[0].address())
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
api.anvil_reorg(ReorgOptions { depth: 3, tx_block_pairs: vec![] }).await.unwrap();
let value = storage.getValue().call().await.unwrap();
assert_eq!("initial value".to_string(), value);
api.mine_one().await;
api.mine_one().await;
// Test raw transaction data
let mut tx = TxEip1559 {
chain_id: api.chain_id(),
to: TxKind::Call(accounts[1].address()),
value: U256::from(100),
max_priority_fee_per_gas: 1000000000000,
max_fee_per_gas: 10000000000000,
gas_limit: 21000,
..Default::default()
};
let signature = accounts[5].sign_transaction_sync(&mut tx).unwrap();
let tx = tx.into_signed(signature);
let mut encoded = vec![];
tx.eip2718_encode(&mut encoded);
let pre_bal = provider.get_balance(accounts[5].address()).await.unwrap();
api.anvil_reorg(ReorgOptions {
depth: 1,
tx_block_pairs: vec![(TransactionData::Raw(encoded.into()), 0)],
})
.await
.unwrap();
let post_bal = provider.get_balance(accounts[5].address()).await.unwrap();
assert_ne!(pre_bal, post_bal);
// Test reorg depth exceeding current height
let res = api.anvil_reorg(ReorgOptions { depth: 100, tx_block_pairs: vec![] }).await;
assert!(res.is_err());
// Test reorg tx pairs exceeds chain length
let res = api
.anvil_reorg(ReorgOptions {
depth: 1,
tx_block_pairs: vec![(TransactionData::JSON(TransactionRequest::default()), 10)],
})
.await;
assert!(res.is_err());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_rollback() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
// Mine 5 blocks
for _ in 0..5 {
api.mine_one().await;
}
// Get block 4 for later comparison
let block4 = provider.get_block(4.into()).await.unwrap().unwrap();
// Rollback with None should rollback 1 block
api.anvil_rollback(None).await.unwrap();
// Assert we're at block 4 and the block contents are kept the same
let head = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
assert_eq!(head, block4);
// Get block 1 for comparison
let block1 = provider.get_block(1.into()).await.unwrap().unwrap();
// Rollback to block 1
let depth = 3; // from block 4 to block 1
api.anvil_rollback(Some(depth)).await.unwrap();
// Assert we're at block 1 and the block contents are kept the same
let head = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
assert_eq!(head, block1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_arb_get_block() {
let (api, _handle) = spawn(NodeConfig::test().with_chain_id(Some(421611u64))).await;
// Mine two blocks
api.mine_one().await;
api.mine_one().await;
let best_number = api.block_number().unwrap().to::<u64>();
assert_eq!(best_number, 2);
let block = api.block_by_number(1.into()).await.unwrap().unwrap();
assert_eq!(block.header.number, 1);
}
// Set next_block_timestamp same as previous block
// api.evm_set_next_block_timestamp(0).unwrap();
#[tokio::test(flavor = "multi_thread")]
async fn test_mine_blk_with_prev_timestamp() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let init_blk = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let init_number = init_blk.header.number;
let init_timestamp = init_blk.header.timestamp;
// mock timestamp
api.evm_set_next_block_timestamp(init_timestamp).unwrap();
api.mine_one().await;
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let next_blk_num = block.header.number;
let next_blk_timestamp = block.header.timestamp;
assert_eq!(next_blk_num, init_number + 1);
assert_eq!(next_blk_timestamp, init_timestamp);
// Sleep for 1 second
tokio::time::sleep(Duration::from_secs(1)).await;
// Subsequent block should have a greater timestamp than previous block
api.mine_one().await;
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let third_blk_num = block.header.number;
let third_blk_timestamp = block.header.timestamp;
assert_eq!(third_blk_num, init_number + 2);
assert_ne!(third_blk_timestamp, next_blk_timestamp);
assert!(third_blk_timestamp > next_blk_timestamp);
}
// increase time by 0 seconds i.e next_block_timestamp = prev_block_timestamp
// api.evm_increase_time(0).unwrap();
#[tokio::test(flavor = "multi_thread")]
async fn test_increase_time_by_zero() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let init_blk = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let init_number = init_blk.header.number;
let init_timestamp = init_blk.header.timestamp;
let _ = api.evm_increase_time(U256::ZERO).await;
api.mine_one().await;
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let next_blk_num = block.header.number;
let next_blk_timestamp = block.header.timestamp;
assert_eq!(next_blk_num, init_number + 1);
assert_eq!(next_blk_timestamp, init_timestamp);
}
// evm_mine(MineOptions::Timestamp(prev_block_timestamp))
#[tokio::test(flavor = "multi_thread")]
async fn evm_mine_blk_with_same_timestamp() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let init_blk = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let init_number = init_blk.header.number;
let init_timestamp = init_blk.header.timestamp;
api.evm_mine(Some(MineOptions::Timestamp(Some(init_timestamp)))).await.unwrap();
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let next_blk_num = block.header.number;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/gas.rs | crates/anvil/tests/it/gas.rs | //! Gas related tests
use crate::utils::http_provider_with_signer;
use alloy_network::{EthereumWallet, TransactionBuilder};
use alloy_primitives::{Address, U64, U256, uint};
use alloy_provider::Provider;
use alloy_rpc_types::{BlockId, TransactionRequest};
use alloy_serde::WithOtherFields;
use anvil::{NodeConfig, eth::fees::INITIAL_BASE_FEE, spawn};
const GAS_TRANSFER: u64 = 21_000;
#[tokio::test(flavor = "multi_thread")]
async fn test_gas_limit_applied_from_config() {
let (api, _handle) = spawn(NodeConfig::test().with_gas_limit(Some(10_000_000))).await;
assert_eq!(api.gas_limit(), uint!(10_000_000_U256));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_gas_limit_disabled_from_config() {
let (api, _handle) = spawn(NodeConfig::test().disable_block_gas_limit(true)).await;
// see https://github.com/foundry-rs/foundry/pull/8933
assert_eq!(api.gas_limit(), U256::from(U64::MAX));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_basefee_full_block() {
let (_api, handle) = spawn(
NodeConfig::test().with_base_fee(Some(INITIAL_BASE_FEE)).with_gas_limit(Some(GAS_TRANSFER)),
)
.await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let tx = TransactionRequest::default().to(Address::random()).with_value(U256::from(1337));
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let base_fee = provider
.get_block(BlockId::latest())
.await
.unwrap()
.unwrap()
.header
.base_fee_per_gas
.unwrap();
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let next_base_fee = provider
.get_block(BlockId::latest())
.await
.unwrap()
.unwrap()
.header
.base_fee_per_gas
.unwrap();
assert!(next_base_fee > base_fee);
// max increase, full block
assert_eq!(next_base_fee, INITIAL_BASE_FEE + 125_000_000);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_basefee_half_block() {
let (_api, handle) = spawn(
NodeConfig::test()
.with_base_fee(Some(INITIAL_BASE_FEE))
.with_gas_limit(Some(GAS_TRANSFER * 2)),
)
.await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let tx = TransactionRequest::default().to(Address::random()).with_value(U256::from(1337));
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let tx = TransactionRequest::default().to(Address::random()).with_value(U256::from(1337));
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let next_base_fee = provider
.get_block(BlockId::latest())
.await
.unwrap()
.unwrap()
.header
.base_fee_per_gas
.unwrap();
// unchanged, half block
assert_eq!(next_base_fee, { INITIAL_BASE_FEE });
}
#[tokio::test(flavor = "multi_thread")]
async fn test_basefee_empty_block() {
let (api, handle) = spawn(NodeConfig::test().with_base_fee(Some(INITIAL_BASE_FEE))).await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let tx = TransactionRequest::default().with_to(Address::random()).with_value(U256::from(1337));
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let base_fee = provider
.get_block(BlockId::latest())
.await
.unwrap()
.unwrap()
.header
.base_fee_per_gas
.unwrap();
// mine empty block
api.mine_one().await;
let next_base_fee = provider
.get_block(BlockId::latest())
.await
.unwrap()
.unwrap()
.header
.base_fee_per_gas
.unwrap();
// empty block, decreased base fee
assert!(next_base_fee < base_fee);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_respect_base_fee() {
let base_fee = 50u128;
let (_api, handle) = spawn(NodeConfig::test().with_base_fee(Some(base_fee as u64))).await;
let provider = handle.http_provider();
let tx = TransactionRequest::default().with_to(Address::random()).with_value(U256::from(100));
let mut tx = WithOtherFields::new(tx);
let mut underpriced = tx.clone();
underpriced.set_gas_price(base_fee - 1);
let res = provider.send_transaction(underpriced).await;
assert!(res.is_err());
assert!(res.unwrap_err().to_string().contains("max fee per gas less than block base fee"));
tx.set_gas_price(base_fee);
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_tip_above_fee_cap() {
let base_fee = 50u128;
let (_api, handle) = spawn(NodeConfig::test().with_base_fee(Some(base_fee as u64))).await;
let provider = handle.http_provider();
let tx = TransactionRequest::default()
.max_fee_per_gas(base_fee)
.max_priority_fee_per_gas(base_fee + 1)
.with_to(Address::random())
.with_value(U256::from(100));
let tx = WithOtherFields::new(tx);
let res = provider.send_transaction(tx.clone()).await;
assert!(res.is_err());
assert!(
res.unwrap_err()
.to_string()
.contains("max priority fee per gas higher than max fee per gas")
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_can_use_fee_history() {
let base_fee = 50u128;
let (_api, handle) = spawn(NodeConfig::test().with_base_fee(Some(base_fee as u64))).await;
let provider = handle.http_provider();
for _ in 0..10 {
let fee_history = provider.get_fee_history(1, Default::default(), &[]).await.unwrap();
let next_base_fee = *fee_history.base_fee_per_gas.last().unwrap();
let tx = TransactionRequest::default()
.with_to(Address::random())
.with_value(U256::from(100))
.with_gas_price(next_base_fee);
let tx = WithOtherFields::new(tx);
let receipt =
provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
assert!(receipt.inner.inner.is_success());
let fee_history_after = provider.get_fee_history(1, Default::default(), &[]).await.unwrap();
let latest_fee_history_fee = *fee_history_after.base_fee_per_gas.first().unwrap() as u64;
let latest_block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
assert_eq!(latest_block.header.base_fee_per_gas.unwrap(), latest_fee_history_fee);
assert_eq!(latest_fee_history_fee, next_base_fee as u64);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_estimate_gas_empty_data() {
let (api, handle) = spawn(NodeConfig::test()).await;
let accounts = handle.dev_accounts().collect::<Vec<_>>();
let from = accounts[0];
let to = accounts[1];
let tx_without_data =
TransactionRequest::default().with_from(from).with_to(to).with_value(U256::from(1));
let gas_without_data = api
.estimate_gas(WithOtherFields::new(tx_without_data), None, Default::default())
.await
.unwrap();
let tx_with_empty_data = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_value(U256::from(1))
.with_input(vec![]);
let gas_with_empty_data = api
.estimate_gas(WithOtherFields::new(tx_with_empty_data), None, Default::default())
.await
.unwrap();
let tx_with_data = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_value(U256::from(1))
.with_input(vec![0x12, 0x34]);
let gas_with_data = api
.estimate_gas(WithOtherFields::new(tx_with_data), None, Default::default())
.await
.unwrap();
assert_eq!(gas_without_data, U256::from(GAS_TRANSFER));
assert_eq!(gas_with_empty_data, U256::from(GAS_TRANSFER));
assert!(gas_with_data > U256::from(GAS_TRANSFER));
assert_eq!(gas_without_data, gas_with_empty_data);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/main.rs | crates/anvil/tests/it/main.rs | mod abi;
mod anvil;
mod anvil_api;
mod api;
mod beacon_api;
mod eip4844;
mod eip7702;
mod fork;
mod gas;
mod genesis;
mod ipc;
mod logs;
mod optimism;
mod otterscan;
mod proof;
mod pubsub;
mod revert;
mod sign;
mod simulate;
mod state;
mod traces;
mod transaction;
mod txpool;
pub mod utils;
mod wsapi;
pub use foundry_test_utils::init_tracing;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/anvil.rs | crates/anvil/tests/it/anvil.rs | //! tests for anvil specific logic
use alloy_consensus::EMPTY_ROOT_HASH;
use alloy_eips::BlockNumberOrTag;
use alloy_network::{ReceiptResponse, TransactionBuilder};
use alloy_primitives::{Address, B256, U256, bytes, hex};
use alloy_provider::Provider;
use alloy_rpc_types::TransactionRequest;
use alloy_sol_types::SolCall;
use anvil::{NodeConfig, spawn};
use foundry_evm::hardfork::EthereumHardfork;
#[tokio::test(flavor = "multi_thread")]
async fn test_can_change_mining_mode() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
assert!(api.anvil_get_auto_mine().unwrap());
assert!(api.anvil_get_interval_mining().unwrap().is_none());
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, 0);
api.anvil_set_interval_mining(1).unwrap();
assert!(!api.anvil_get_auto_mine().unwrap());
assert!(matches!(api.anvil_get_interval_mining().unwrap(), Some(1)));
// changing the mining mode will instantly mine a new block
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, 0);
tokio::time::sleep(std::time::Duration::from_millis(700)).await;
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, 1);
// assert that no block is mined when the interval is set to 0
api.anvil_set_interval_mining(0).unwrap();
assert!(!api.anvil_get_auto_mine().unwrap());
assert!(api.anvil_get_interval_mining().unwrap().is_none());
tokio::time::sleep(std::time::Duration::from_millis(1000)).await;
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_default_dev_keys() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let dev_accounts = handle.dev_accounts().collect::<Vec<_>>();
let accounts = provider.get_accounts().await.unwrap();
assert_eq!(dev_accounts, accounts);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_set_empty_code() {
let (api, _handle) = spawn(NodeConfig::test()).await;
let addr = Address::random();
api.anvil_set_code(addr, Vec::new().into()).await.unwrap();
let code = api.get_code(addr, None).await.unwrap();
assert!(code.as_ref().is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_can_set_genesis_timestamp() {
let genesis_timestamp = 1000u64;
let (_api, handle) =
spawn(NodeConfig::test().with_genesis_timestamp(genesis_timestamp.into())).await;
let provider = handle.http_provider();
assert_eq!(
genesis_timestamp,
provider.get_block(0.into()).await.unwrap().unwrap().header.timestamp
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_can_use_default_genesis_timestamp() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
assert_ne!(0u64, provider.get_block(0.into()).await.unwrap().unwrap().header.timestamp);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_can_handle_large_timestamp() {
let (api, _handle) = spawn(NodeConfig::test()).await;
let num = 317071597274;
api.evm_set_next_block_timestamp(num).unwrap();
api.mine_one().await;
let block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
assert_eq!(block.header.timestamp, num);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_shanghai_fields() {
let (api, _handle) =
spawn(NodeConfig::test().with_hardfork(Some(EthereumHardfork::Shanghai.into()))).await;
api.mine_one().await;
let block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
assert_eq!(block.header.withdrawals_root, Some(EMPTY_ROOT_HASH));
assert_eq!(block.withdrawals, Some(Default::default()));
assert!(block.header.blob_gas_used.is_none());
assert!(block.header.excess_blob_gas.is_none());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_cancun_fields() {
let (api, _handle) =
spawn(NodeConfig::test().with_hardfork(Some(EthereumHardfork::Cancun.into()))).await;
api.mine_one().await;
let block = api.block_by_number(BlockNumberOrTag::Latest).await.unwrap().unwrap();
assert_eq!(block.header.withdrawals_root, Some(EMPTY_ROOT_HASH));
assert_eq!(block.withdrawals, Some(Default::default()));
assert!(block.header.blob_gas_used.is_some());
assert!(block.header.excess_blob_gas.is_some());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_can_set_genesis_block_number() {
let (_api, handle) = spawn(NodeConfig::test().with_genesis_block_number(Some(1337u64))).await;
let provider = handle.http_provider();
let block_number = provider.get_block_number().await.unwrap();
assert_eq!(block_number, 1337u64);
assert_eq!(1337, provider.get_block(1337.into()).await.unwrap().unwrap().header.number);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_can_use_default_genesis_block_number() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
assert_eq!(0, provider.get_block(0.into()).await.unwrap().unwrap().header.number);
}
/// Verify that genesis block number affects both RPC and EVM execution layer.
#[tokio::test(flavor = "multi_thread")]
async fn test_number_opcode_reflects_genesis_block_number() {
let genesis_number: u64 = 4242;
let (api, handle) =
spawn(NodeConfig::test().with_genesis_block_number(Some(genesis_number))).await;
let provider = handle.http_provider();
// RPC layer should return configured genesis number
let bn = provider.get_block_number().await.unwrap();
assert_eq!(bn, genesis_number);
// Deploy bytecode that returns block.number
// 0x43 (NUMBER) 0x5f (PUSH0) 0x52 (MSTORE) 0x60 0x20 (PUSH1 0x20) 0x5f (PUSH0) 0xf3 (RETURN)
let target = Address::random();
api.anvil_set_code(target, bytes!("435f5260205ff3")).await.unwrap();
// EVM execution should reflect genesis number (+ 1 for pending block)
let tx = alloy_rpc_types::TransactionRequest::default().with_to(target);
let out = provider.call(tx.into()).await.unwrap();
let returned = U256::from_be_slice(out.as_ref());
assert_eq!(returned, U256::from(genesis_number + 1));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_anvil_recover_signature() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
alloy_sol_types::sol! {
#[sol(rpc)]
contract TestRecover {
function testRecover(bytes32 hash, uint8 v, bytes32 r, bytes32 s, address expected) external pure {
address recovered = ecrecover(hash, v, r, s);
require(recovered == expected, "ecrecover failed: address mismatch");
}
}
}
let bytecode = hex::decode(
"0x60808060405234601557610125908161001a8239f35b5f80fdfe60808060405260043610156011575f80fd5b5f3560e01c63bff0b743146023575f80fd5b3460eb5760a036600319011260eb5760243560ff811680910360eb576084356001600160a01b038116929083900360eb5760805f916020936004358252848201526044356040820152606435606082015282805260015afa1560e0575f516001600160a01b031603609057005b60405162461bcd60e51b815260206004820152602260248201527f65637265636f766572206661696c65643a2061646472657373206d69736d61746044820152610c6d60f31b6064820152608490fd5b6040513d5f823e3d90fd5b5f80fdfea264697066735822122006368b42bca31c97f2c409a1cc5186dc899d4255ecc28db7bbb0ad285dc82ae464736f6c634300081c0033",
).unwrap();
let tx = TransactionRequest::default().with_deploy_code(bytecode);
let receipt = provider.send_transaction(tx.into()).await.unwrap().get_receipt().await.unwrap();
let contract_address = receipt.contract_address().unwrap();
let contract = TestRecover::new(contract_address, &provider);
let sig = alloy_primitives::hex::decode("11".repeat(65)).unwrap();
let r = B256::from_slice(&sig[0..32]);
let s = B256::from_slice(&sig[32..64]);
let v = sig[64];
let fake_hash = B256::random();
let expected = alloy_primitives::address!("0x1234567890123456789012345678901234567890");
api.anvil_impersonate_signature(sig.clone().into(), expected).await.unwrap();
let result = contract.testRecover(fake_hash, v, r, s, expected).call().await;
assert!(result.is_ok(), "ecrecover failed: {:?}", result.err());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fake_signature_transaction() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
alloy_sol_types::sol! {
#[sol(rpc)]
contract TestRecover {
function testRecover(bytes32 hash, uint8 v, bytes32 r, bytes32 s, address expected) external pure {
address recovered = ecrecover(hash, v, r, s);
require(recovered == expected, "ecrecover failed: address mismatch");
}
}
}
let bytecode = hex::decode(
"0x60808060405234601557610125908161001a8239f35b5f80fdfe60808060405260043610156011575f80fd5b5f3560e01c63bff0b743146023575f80fd5b3460eb5760a036600319011260eb5760243560ff811680910360eb576084356001600160a01b038116929083900360eb5760805f916020936004358252848201526044356040820152606435606082015282805260015afa1560e0575f516001600160a01b031603609057005b60405162461bcd60e51b815260206004820152602260248201527f65637265636f766572206661696c65643a2061646472657373206d69736d61746044820152610c6d60f31b6064820152608490fd5b6040513d5f823e3d90fd5b5f80fdfea264697066735822122006368b42bca31c97f2c409a1cc5186dc899d4255ecc28db7bbb0ad285dc82ae464736f6c634300081c0033",
).unwrap();
let tx = TransactionRequest::default().with_deploy_code(bytecode);
let _receipt = provider.send_transaction(tx.into()).await.unwrap().get_receipt().await.unwrap();
let sig = alloy_primitives::hex::decode("11".repeat(65)).unwrap();
let r = B256::from_slice(&sig[0..32]);
let s = B256::from_slice(&sig[32..64]);
let v = sig[64];
let fake_hash = B256::random();
let expected = alloy_primitives::address!("0x1234567890123456789012345678901234567890");
api.anvil_impersonate_signature(sig.clone().into(), expected).await.unwrap();
let calldata = TestRecover::testRecoverCall { hash: fake_hash, v, r, s, expected }.abi_encode();
let tx = TransactionRequest::default().with_input(calldata);
let pending = provider.send_transaction(tx.into()).await.unwrap();
let result = pending.get_receipt().await;
assert!(result.is_ok(), "ecrecover failed: {:?}", result.err());
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/txpool.rs | crates/anvil/tests/it/txpool.rs | //! txpool related tests
use alloy_network::{ReceiptResponse, TransactionBuilder};
use alloy_primitives::U256;
use alloy_provider::{Provider, ext::TxPoolApi};
use alloy_rpc_types::TransactionRequest;
use alloy_serde::WithOtherFields;
use anvil::{NodeConfig, spawn};
#[tokio::test(flavor = "multi_thread")]
async fn geth_txpool() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
api.anvil_set_auto_mine(false).await.unwrap();
let account = provider.get_accounts().await.unwrap().remove(0);
let value = U256::from(42);
let gas_price = 221435145689u128;
let tx = TransactionRequest::default()
.with_to(account)
.with_from(account)
.with_value(value)
.with_gas_price(gas_price);
let tx = WithOtherFields::new(tx);
// send a few transactions
for _ in 0..10 {
let _ = provider.send_transaction(tx.clone()).await.unwrap();
}
// we gave a 20s block time, should be plenty for us to get the txpool's content
let status = provider.txpool_status().await.unwrap();
assert_eq!(status.pending, 10);
assert_eq!(status.queued, 0);
let inspect = provider.txpool_inspect().await.unwrap();
assert!(inspect.queued.is_empty());
let summary = inspect.pending.get(&account).unwrap();
for i in 0..10 {
let tx_summary = summary.get(&i.to_string()).unwrap();
assert_eq!(tx_summary.gas_price, gas_price);
assert_eq!(tx_summary.value, value);
assert_eq!(tx_summary.gas, 21000);
assert_eq!(tx_summary.to.unwrap(), account);
}
let content = provider.txpool_content().await.unwrap();
assert!(content.queued.is_empty());
let content = content.pending.get(&account).unwrap();
for nonce in 0..10 {
assert!(content.contains_key(&nonce.to_string()));
}
}
// Cf. https://github.com/foundry-rs/foundry/issues/11239
#[tokio::test(flavor = "multi_thread")]
async fn accepts_spend_after_funding_when_pool_checks_disabled() {
// Spawn with pool balance checks disabled
let (api, handle) = spawn(NodeConfig::test().with_disable_pool_balance_checks(true)).await;
let provider = handle.http_provider();
// Work with pending pool (no automine)
api.anvil_set_auto_mine(false).await.unwrap();
// Funder is a dev account controlled by the node
let funder = provider.get_accounts().await.unwrap().remove(0);
// Recipient/spender is a random address with zero balance that we'll impersonate
let spender = alloy_primitives::Address::random();
api.anvil_set_balance(spender, U256::from(0u64)).await.unwrap();
api.anvil_impersonate_account(spender).await.unwrap();
// Ensure tx1 (funding) has higher gas price so it's mined before tx2 within the same block
let gas_price_fund = 2_000_000_000_000u128; // 2_000 gwei
let gas_price_spend = 1_000_000_000u128; // 1 gwei
let fund_value = U256::from(1_000_000_000_000_000_000u128); // 1 ether
// tx1: fund spender from funder
let tx1 = TransactionRequest::default()
.with_from(funder)
.with_to(spender)
.with_value(fund_value)
.with_gas_price(gas_price_fund);
let tx1 = WithOtherFields::new(tx1);
// tx2: spender attempts to send value greater than their pre-funding balance (0),
// which would normally be rejected by pool balance checks, but should be accepted when disabled
let spend_value = fund_value - U256::from(21_000u64) * U256::from(gas_price_spend);
let tx2 = TransactionRequest::default()
.with_from(spender)
.with_to(funder)
.with_value(spend_value)
.with_gas_price(gas_price_spend);
let tx2 = WithOtherFields::new(tx2);
// Publish both transactions (funding first, then spend-before-funding-is-mined)
let sent1 = provider.send_transaction(tx1).await.unwrap();
let sent2 = provider.send_transaction(tx2).await.unwrap();
// Both should be accepted into the pool (pending)
let status = provider.txpool_status().await.unwrap();
assert_eq!(status.pending, 2);
assert_eq!(status.queued, 0);
// Mine a block and ensure both succeed
api.evm_mine(None).await.unwrap();
let receipt1 = sent1.get_receipt().await.unwrap();
let receipt2 = sent2.get_receipt().await.unwrap();
assert!(receipt1.status());
assert!(receipt2.status());
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/logs.rs | crates/anvil/tests/it/logs.rs | //! log/event related tests
use crate::{
abi::SimpleStorage::{self},
utils::{http_provider_with_signer, ws_provider_with_signer},
};
use alloy_network::EthereumWallet;
use alloy_primitives::{B256, map::B256HashSet};
use alloy_provider::Provider;
use alloy_rpc_types::{BlockNumberOrTag, Filter};
use anvil::{NodeConfig, spawn};
use futures::StreamExt;
#[tokio::test(flavor = "multi_thread")]
async fn get_past_events() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let account = wallet.address();
let signer: EthereumWallet = wallet.into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let contract =
SimpleStorage::deploy(provider.clone(), "initial value".to_string()).await.unwrap();
let _ = contract
.setValue("hi".to_string())
.from(account)
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
let simple_storage_address = *contract.address();
let filter = Filter::new()
.address(simple_storage_address)
.topic1(B256::from(account.into_word()))
.from_block(BlockNumberOrTag::from(0));
let logs = provider
.get_logs(&filter)
.await
.unwrap()
.into_iter()
.map(|log| log.log_decode::<SimpleStorage::ValueChanged>().unwrap())
.collect::<Vec<_>>();
// 2 events, 1 in constructor, 1 in call
assert_eq!(logs[0].inner.newValue, "initial value");
assert_eq!(logs[1].inner.newValue, "hi");
assert_eq!(logs.len(), 2);
// and we can fetch the events at a block hash
// let hash = provider.get_block(1).await.unwrap().unwrap().hash.unwrap();
let hash =
provider.get_block_by_number(BlockNumberOrTag::from(1)).await.unwrap().unwrap().header.hash;
let filter = Filter::new()
.address(simple_storage_address)
.topic1(B256::from(account.into_word()))
.at_block_hash(hash);
let logs = provider
.get_logs(&filter)
.await
.unwrap()
.into_iter()
.map(|log| log.log_decode::<SimpleStorage::ValueChanged>().unwrap())
.collect::<Vec<_>>();
assert_eq!(logs[0].inner.newValue, "initial value");
assert_eq!(logs.len(), 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn get_all_events() {
let (api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let account = wallet.address();
let signer: EthereumWallet = wallet.into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let contract =
SimpleStorage::deploy(provider.clone(), "initial value".to_string()).await.unwrap();
api.anvil_set_auto_mine(false).await.unwrap();
let pre_logs =
provider.get_logs(&Filter::new().from_block(BlockNumberOrTag::Earliest)).await.unwrap();
assert_eq!(pre_logs.len(), 1);
let pre_logs =
provider.get_logs(&Filter::new().from_block(BlockNumberOrTag::Number(0))).await.unwrap();
assert_eq!(pre_logs.len(), 1);
// spread logs across several blocks
let num_tx = 10;
let tx = contract.setValue("hi".to_string()).from(account);
for _ in 0..num_tx {
let tx = tx.send().await.unwrap();
api.mine_one().await;
tx.get_receipt().await.unwrap();
}
let logs =
provider.get_logs(&Filter::new().from_block(BlockNumberOrTag::Earliest)).await.unwrap();
let num_logs = num_tx + pre_logs.len();
assert_eq!(logs.len(), num_logs);
// test that logs returned from get_logs and get_transaction_receipt have
// the same log_index, block_number, and transaction_hash
let mut tasks = vec![];
let mut seen_tx_hashes = B256HashSet::default();
for log in &logs {
if seen_tx_hashes.contains(&log.transaction_hash.unwrap()) {
continue;
}
tasks.push(provider.get_transaction_receipt(log.transaction_hash.unwrap()));
seen_tx_hashes.insert(log.transaction_hash.unwrap());
}
let receipt_logs = futures::future::join_all(tasks)
.await
.into_iter()
.collect::<Result<Vec<_>, _>>()
.unwrap()
.into_iter()
.flat_map(|receipt| receipt.unwrap().inner.inner.inner.receipt.logs)
.collect::<Vec<_>>();
assert_eq!(receipt_logs.len(), logs.len());
for (receipt_log, log) in receipt_logs.iter().zip(logs.iter()) {
assert_eq!(receipt_log.transaction_hash, log.transaction_hash);
assert_eq!(receipt_log.block_number, log.block_number);
assert_eq!(receipt_log.log_index, log.log_index);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn watch_events() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let account = wallet.address();
let signer: EthereumWallet = wallet.into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer.clone());
let contract1 =
SimpleStorage::deploy(provider.clone(), "initial value".to_string()).await.unwrap();
// Spawn the event listener.
let event1 = contract1.event_filter::<SimpleStorage::ValueChanged>();
let mut stream1 = event1.watch().await.unwrap().into_stream();
// Also set up a subscription for the same thing.
let ws = ws_provider_with_signer(&handle.ws_endpoint(), signer.clone());
let contract2 = SimpleStorage::new(*contract1.address(), ws);
let event2 = contract2.event_filter::<SimpleStorage::ValueChanged>();
let mut stream2 = event2.watch().await.unwrap().into_stream();
let num_tx = 3;
let starting_block_number = provider.get_block_number().await.unwrap();
for i in 0..num_tx {
contract1
.setValue(i.to_string())
.from(account)
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
let log = stream1.next().await.unwrap().unwrap();
let log2 = stream2.next().await.unwrap().unwrap();
assert_eq!(log.0.newValue, log2.0.newValue);
assert_eq!(log.0.newValue, i.to_string());
assert_eq!(log.1.block_number.unwrap(), starting_block_number + i + 1);
let hash = provider
.get_block_by_number(BlockNumberOrTag::from(starting_block_number + i + 1))
.await
.unwrap()
.unwrap()
.header
.hash;
assert_eq!(log.1.block_hash.unwrap(), hash);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/fork.rs | crates/anvil/tests/it/fork.rs | //! various fork related test
use crate::{
abi::{ERC721, Greeter},
utils::{http_provider, http_provider_with_signer},
};
use alloy_chains::NamedChain;
use alloy_eips::{
eip7840::BlobParams,
eip7910::{EthConfig, SystemContract},
};
use alloy_network::{EthereumWallet, ReceiptResponse, TransactionBuilder, TransactionResponse};
use alloy_primitives::{Address, Bytes, TxHash, TxKind, U64, U256, address, b256, bytes, uint};
use alloy_provider::Provider;
use alloy_rpc_types::{
AccountInfo, BlockId, BlockNumberOrTag,
anvil::Forking,
request::{TransactionInput, TransactionRequest},
state::EvmOverrides,
};
use alloy_serde::WithOtherFields;
use alloy_signer_local::PrivateKeySigner;
use anvil::{EthereumHardfork, NodeConfig, NodeHandle, PrecompileFactory, eth::EthApi, spawn};
use foundry_common::provider::get_http_provider;
use foundry_config::Config;
use foundry_evm_networks::NetworkConfigs;
use foundry_test_utils::rpc::{self, next_http_rpc_endpoint, next_rpc_endpoint};
use futures::StreamExt;
use std::{
collections::{BTreeMap, BTreeSet},
sync::Arc,
thread::sleep,
time::Duration,
};
const BLOCK_NUMBER: u64 = 14_608_400u64;
const DEAD_BALANCE_AT_BLOCK_NUMBER: u128 = 12_556_069_338_441_120_059_867u128;
const BLOCK_TIMESTAMP: u64 = 1_650_274_250u64;
/// Represents an anvil fork of an anvil node
#[expect(unused)]
pub struct LocalFork {
origin_api: EthApi,
origin_handle: NodeHandle,
fork_api: EthApi,
fork_handle: NodeHandle,
}
#[expect(dead_code)]
impl LocalFork {
/// Spawns two nodes with the test config
pub async fn new() -> Self {
Self::setup(NodeConfig::test(), NodeConfig::test()).await
}
/// Spawns two nodes where one is a fork of the other
pub async fn setup(origin: NodeConfig, fork: NodeConfig) -> Self {
let (origin_api, origin_handle) = spawn(origin).await;
let (fork_api, fork_handle) =
spawn(fork.with_eth_rpc_url(Some(origin_handle.http_endpoint()))).await;
Self { origin_api, origin_handle, fork_api, fork_handle }
}
}
pub fn fork_config() -> NodeConfig {
NodeConfig::test()
.with_eth_rpc_url(Some(rpc::next_http_archive_rpc_url()))
.with_fork_block_number(Some(BLOCK_NUMBER))
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_gas_limit_applied_from_config() {
let (api, _handle) = spawn(fork_config().with_gas_limit(Some(10_000_000))).await;
assert_eq!(api.gas_limit(), uint!(10_000_000_U256));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_gas_limit_disabled_from_config() {
let (api, handle) = spawn(fork_config().disable_block_gas_limit(true)).await;
// see https://github.com/foundry-rs/foundry/pull/8933
assert_eq!(api.gas_limit(), U256::from(U64::MAX));
// try to mine a couple blocks
let provider = handle.http_provider();
let tx = TransactionRequest::default()
.to(Address::random())
.value(U256::from(1337u64))
.from(handle.dev_wallets().next().unwrap().address());
let tx = WithOtherFields::new(tx);
let _ = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let tx = TransactionRequest::default()
.to(Address::random())
.value(U256::from(1337u64))
.from(handle.dev_wallets().next().unwrap().address());
let tx = WithOtherFields::new(tx);
let _ = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_spawn_fork() {
let (api, _handle) = spawn(fork_config()).await;
assert!(api.is_fork());
let head = api.block_number().unwrap();
assert_eq!(head, U256::from(BLOCK_NUMBER))
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_eth_get_balance() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
for _ in 0..10 {
let addr = Address::random();
let balance = api.balance(addr, None).await.unwrap();
let provider_balance = provider.get_balance(addr).await.unwrap();
assert_eq!(balance, provider_balance)
}
}
// <https://github.com/foundry-rs/foundry/issues/4082>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_eth_get_balance_after_mine() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let info = api.anvil_node_info().await.unwrap();
let number = info.fork_config.fork_block_number.unwrap();
assert_eq!(number, BLOCK_NUMBER);
let address = Address::random();
let _balance = provider.get_balance(address).await.unwrap();
api.evm_mine(None).await.unwrap();
let _balance = provider.get_balance(address).await.unwrap();
}
// <https://github.com/foundry-rs/foundry/issues/4082>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_eth_get_code_after_mine() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let info = api.anvil_node_info().await.unwrap();
let number = info.fork_config.fork_block_number.unwrap();
assert_eq!(number, BLOCK_NUMBER);
let address = Address::random();
let _code = provider.get_code_at(address).block_id(BlockId::number(1)).await.unwrap();
api.evm_mine(None).await.unwrap();
let _code = provider.get_code_at(address).block_id(BlockId::number(1)).await.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_eth_get_code() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
for _ in 0..10 {
let addr = Address::random();
let code = api.get_code(addr, None).await.unwrap();
let provider_code = provider.get_code_at(addr).await.unwrap();
assert_eq!(code, provider_code)
}
let addresses: Vec<Address> = vec![
"0x6b175474e89094c44da98b954eedeac495271d0f".parse().unwrap(),
"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48".parse().unwrap(),
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2".parse().unwrap(),
"0x1F98431c8aD98523631AE4a59f267346ea31F984".parse().unwrap(),
"0x68b3465833fb72A70ecDF485E0e4C7bD8665Fc45".parse().unwrap(),
];
for address in addresses {
let prev_code = api
.get_code(address, Some(BlockNumberOrTag::Number(BLOCK_NUMBER - 10).into()))
.await
.unwrap();
let code = api.get_code(address, None).await.unwrap();
let provider_code = provider.get_code_at(address).await.unwrap();
assert_eq!(code, prev_code);
assert_eq!(code, provider_code);
assert!(!code.as_ref().is_empty());
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_eth_get_nonce() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
for _ in 0..10 {
let addr = Address::random();
let api_nonce = api.transaction_count(addr, None).await.unwrap().to::<u64>();
let provider_nonce = provider.get_transaction_count(addr).await.unwrap();
assert_eq!(api_nonce, provider_nonce);
}
let addr = Config::DEFAULT_SENDER;
let api_nonce = api.transaction_count(addr, None).await.unwrap().to::<u64>();
let provider_nonce = provider.get_transaction_count(addr).await.unwrap();
assert_eq!(api_nonce, provider_nonce);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_optimism_with_transaction_hash() {
use std::str::FromStr;
// Fork to a block with a specific transaction
let fork_tx_hash =
TxHash::from_str("fcb864b5a50f0f0b111dbbf9e9167b2cb6179dfd6270e1ad53aac6049c0ec038")
.unwrap();
let (api, _handle) = spawn(
NodeConfig::test()
.with_eth_rpc_url(Some(rpc::next_rpc_endpoint(NamedChain::Optimism)))
.with_fork_transaction_hash(Some(fork_tx_hash)),
)
.await;
// Make sure the fork starts from previous block
let block_number = api.block_number().unwrap().to::<u64>();
assert_eq!(block_number, 125777954 - 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_eth_fee_history() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let count = 10u64;
let _history =
api.fee_history(U256::from(count), BlockNumberOrTag::Latest, vec![]).await.unwrap();
let _provider_history =
provider.get_fee_history(count, BlockNumberOrTag::Latest, &[]).await.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_reset() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let accounts: Vec<_> = handle.dev_wallets().collect();
let from = accounts[0].address();
let to = accounts[1].address();
let block_number = provider.get_block_number().await.unwrap();
let balance_before = provider.get_balance(to).await.unwrap();
let amount = handle.genesis_balance().checked_div(U256::from(2u64)).unwrap();
let initial_nonce = provider.get_transaction_count(from).await.unwrap();
let tx = TransactionRequest::default().to(to).value(amount).from(from);
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
assert_eq!(tx.transaction_index, Some(0));
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce + 1);
let to_balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance_before.saturating_add(amount), to_balance);
api.anvil_reset(Some(Forking { json_rpc_url: None, block_number: Some(block_number) }))
.await
.unwrap();
// reset block number
assert_eq!(block_number, provider.get_block_number().await.unwrap());
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce);
let balance = provider.get_balance(from).await.unwrap();
assert_eq!(balance, handle.genesis_balance());
let balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance, handle.genesis_balance());
// reset to latest
api.anvil_reset(Some(Forking::default())).await.unwrap();
let new_block_num = provider.get_block_number().await.unwrap();
assert!(new_block_num > block_number);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_reset_setup() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let dead_addr: Address = "000000000000000000000000000000000000dEaD".parse().unwrap();
let block_number = provider.get_block_number().await.unwrap();
assert_eq!(block_number, 0);
let local_balance = provider.get_balance(dead_addr).await.unwrap();
assert_eq!(local_balance, U256::ZERO);
api.anvil_reset(Some(Forking {
json_rpc_url: Some(rpc::next_http_archive_rpc_url()),
block_number: Some(BLOCK_NUMBER),
}))
.await
.unwrap();
let block_number = provider.get_block_number().await.unwrap();
assert_eq!(block_number, BLOCK_NUMBER);
let remote_balance = provider.get_balance(dead_addr).await.unwrap();
assert_eq!(remote_balance, U256::from(DEAD_BALANCE_AT_BLOCK_NUMBER));
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_state_snapshotting() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let state_snapshot = api.evm_snapshot().await.unwrap();
let accounts: Vec<_> = handle.dev_wallets().collect();
let from = accounts[0].address();
let to = accounts[1].address();
let block_number = provider.get_block_number().await.unwrap();
let initial_nonce = provider.get_transaction_count(from).await.unwrap();
let balance_before = provider.get_balance(to).await.unwrap();
let amount = handle.genesis_balance().checked_div(U256::from(2u64)).unwrap();
let provider = handle.http_provider();
let tx = TransactionRequest::default().to(to).value(amount).from(from);
let tx = WithOtherFields::new(tx);
let _ = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let provider = handle.http_provider();
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce + 1);
let to_balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance_before.saturating_add(amount), to_balance);
assert!(api.evm_revert(state_snapshot).await.unwrap());
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce);
let balance = provider.get_balance(from).await.unwrap();
assert_eq!(balance, handle.genesis_balance());
let balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance, handle.genesis_balance());
assert_eq!(block_number, provider.get_block_number().await.unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_state_snapshotting_repeated() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let state_snapshot = api.evm_snapshot().await.unwrap();
let accounts: Vec<_> = handle.dev_wallets().collect();
let from = accounts[0].address();
let to = accounts[1].address();
let block_number = provider.get_block_number().await.unwrap();
let initial_nonce = provider.get_transaction_count(from).await.unwrap();
let balance_before = provider.get_balance(to).await.unwrap();
let amount = handle.genesis_balance().checked_div(U256::from(92u64)).unwrap();
let tx = TransactionRequest::default().to(to).value(amount).from(from);
let tx = WithOtherFields::new(tx);
let tx_provider = handle.http_provider();
let _ = tx_provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce + 1);
let to_balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance_before.saturating_add(amount), to_balance);
let _second_state_snapshot = api.evm_snapshot().await.unwrap();
assert!(api.evm_revert(state_snapshot).await.unwrap());
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce);
let balance = provider.get_balance(from).await.unwrap();
assert_eq!(balance, handle.genesis_balance());
let balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance, handle.genesis_balance());
assert_eq!(block_number, provider.get_block_number().await.unwrap());
// invalidated
// TODO enable after <https://github.com/foundry-rs/foundry/pull/6366>
// assert!(!api.evm_revert(second_snapshot).await.unwrap());
// nothing is reverted, snapshot gone
assert!(!api.evm_revert(state_snapshot).await.unwrap());
}
// <https://github.com/foundry-rs/foundry/issues/6463>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_state_snapshotting_blocks() {
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let state_snapshot = api.evm_snapshot().await.unwrap();
let accounts: Vec<_> = handle.dev_wallets().collect();
let from = accounts[0].address();
let to = accounts[1].address();
let block_number = provider.get_block_number().await.unwrap();
let initial_nonce = provider.get_transaction_count(from).await.unwrap();
let balance_before = provider.get_balance(to).await.unwrap();
let amount = handle.genesis_balance().checked_div(U256::from(2u64)).unwrap();
// send the transaction
let tx = TransactionRequest::default().to(to).value(amount).from(from);
let tx = WithOtherFields::new(tx);
let _ = provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let block_number_after = provider.get_block_number().await.unwrap();
assert_eq!(block_number_after, block_number + 1);
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce + 1);
let to_balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance_before.saturating_add(amount), to_balance);
assert!(api.evm_revert(state_snapshot).await.unwrap());
assert_eq!(initial_nonce, provider.get_transaction_count(from).await.unwrap());
let block_number_after = provider.get_block_number().await.unwrap();
assert_eq!(block_number_after, block_number);
// repeat transaction
let _ = provider.send_transaction(tx.clone()).await.unwrap().get_receipt().await.unwrap();
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce + 1);
// revert again: nothing to revert since state snapshot gone
assert!(!api.evm_revert(state_snapshot).await.unwrap());
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, initial_nonce + 1);
let block_number_after = provider.get_block_number().await.unwrap();
assert_eq!(block_number_after, block_number + 1);
}
/// tests that the remote state and local state are kept separate.
/// changes don't make into the read only Database that holds the remote state, which is flushed to
/// a cache file.
#[tokio::test(flavor = "multi_thread")]
async fn test_separate_states() {
let (api, handle) = spawn(fork_config().with_fork_block_number(Some(14723772u64))).await;
let provider = handle.http_provider();
let addr: Address = "000000000000000000000000000000000000dEaD".parse().unwrap();
let remote_balance = provider.get_balance(addr).await.unwrap();
assert_eq!(remote_balance, U256::from(12556104082473169733500u128));
api.anvil_set_balance(addr, U256::from(1337u64)).await.unwrap();
let balance = provider.get_balance(addr).await.unwrap();
assert_eq!(balance, U256::from(1337u64));
let fork = api.get_fork().unwrap();
let fork_db = fork.database.read().await;
let acc = fork_db
.maybe_inner()
.expect("could not get fork db inner")
.db()
.accounts
.read()
.get(&addr)
.cloned()
.unwrap();
assert_eq!(acc.balance, remote_balance);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_deploy_greeter_on_fork() {
let (_api, handle) = spawn(fork_config().with_fork_block_number(Some(14723772u64))).await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.into();
let provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let greeter_contract = Greeter::deploy(&provider, "Hello World!".to_string()).await.unwrap();
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Hello World!", greeting);
let greeter_contract = Greeter::deploy(&provider, "Hello World!".to_string()).await.unwrap();
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Hello World!", greeting);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_reset_properly() {
let (origin_api, origin_handle) = spawn(NodeConfig::test()).await;
let account = origin_handle.dev_accounts().next().unwrap();
let origin_provider = origin_handle.http_provider();
let origin_nonce = 1u64;
origin_api.anvil_set_nonce(account, U256::from(origin_nonce)).await.unwrap();
assert_eq!(origin_nonce, origin_provider.get_transaction_count(account).await.unwrap());
let (fork_api, fork_handle) =
spawn(NodeConfig::test().with_eth_rpc_url(Some(origin_handle.http_endpoint()))).await;
let fork_provider = fork_handle.http_provider();
let fork_tx_provider = http_provider(&fork_handle.http_endpoint());
assert_eq!(origin_nonce, fork_provider.get_transaction_count(account).await.unwrap());
let to = Address::random();
let to_balance = fork_provider.get_balance(to).await.unwrap();
let tx = TransactionRequest::default().from(account).to(to).value(U256::from(1337u64));
let tx = WithOtherFields::new(tx);
let tx = fork_tx_provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
// nonce incremented by 1
assert_eq!(origin_nonce + 1, fork_provider.get_transaction_count(account).await.unwrap());
// resetting to origin state
fork_api.anvil_reset(Some(Forking::default())).await.unwrap();
// nonce reset to origin
assert_eq!(origin_nonce, fork_provider.get_transaction_count(account).await.unwrap());
// balance is reset
assert_eq!(to_balance, fork_provider.get_balance(to).await.unwrap());
// tx does not exist anymore
assert!(fork_tx_provider.get_transaction_by_hash(tx.transaction_hash).await.unwrap().is_none())
}
// Ref: <https://github.com/foundry-rs/foundry/issues/8684>
#[tokio::test(flavor = "multi_thread")]
async fn can_reset_fork_to_new_fork() {
let eth_rpc_url = next_rpc_endpoint(NamedChain::Mainnet);
let (api, handle) = spawn(NodeConfig::test().with_eth_rpc_url(Some(eth_rpc_url))).await;
let provider = handle.http_provider();
let op = address!("0xC0d3c0d3c0D3c0D3C0d3C0D3C0D3c0d3c0d30007"); // L2CrossDomainMessenger - Dead on mainnet.
let tx = TransactionRequest::default().with_to(op).with_input("0x54fd4d50");
let tx = WithOtherFields::new(tx);
let mainnet_call_output = provider.call(tx).await.unwrap();
assert_eq!(mainnet_call_output, Bytes::new()); // 0x
let optimism = next_rpc_endpoint(NamedChain::Optimism);
api.anvil_reset(Some(Forking {
json_rpc_url: Some(optimism.to_string()),
block_number: Some(124659890),
}))
.await
.unwrap();
let code = provider.get_code_at(op).await.unwrap();
assert_ne!(code, Bytes::new());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_timestamp() {
let start = std::time::Instant::now();
let (api, handle) = spawn(fork_config()).await;
let provider = handle.http_provider();
let block = provider.get_block(BlockId::Number(BLOCK_NUMBER.into())).await.unwrap().unwrap();
assert_eq!(block.header.timestamp, BLOCK_TIMESTAMP);
let accounts: Vec<_> = handle.dev_wallets().collect();
let from = accounts[0].address();
let tx =
TransactionRequest::default().to(Address::random()).value(U256::from(1337u64)).from(from);
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let status = tx.inner.inner.inner.receipt.status.coerce_status();
assert!(status);
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let elapsed = start.elapsed().as_secs() + 1;
// ensure the diff between the new mined block and the original block is within the elapsed time
let diff = block.header.timestamp - BLOCK_TIMESTAMP;
assert!(diff <= elapsed, "diff={diff}, elapsed={elapsed}");
let start = std::time::Instant::now();
// reset to check timestamp works after resetting
api.anvil_reset(Some(Forking { json_rpc_url: None, block_number: Some(BLOCK_NUMBER) }))
.await
.unwrap();
let block = provider.get_block(BlockId::Number(BLOCK_NUMBER.into())).await.unwrap().unwrap();
assert_eq!(block.header.timestamp, BLOCK_TIMESTAMP);
let tx =
TransactionRequest::default().to(Address::random()).value(U256::from(1337u64)).from(from);
let tx = WithOtherFields::new(tx);
let _ = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap(); // FIXME: Awaits endlessly here.
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let elapsed = start.elapsed().as_secs() + 1;
let diff = block.header.timestamp - BLOCK_TIMESTAMP;
assert!(diff <= elapsed);
// ensure that after setting a timestamp manually, then next block time is correct
let start = std::time::Instant::now();
api.anvil_reset(Some(Forking { json_rpc_url: None, block_number: Some(BLOCK_NUMBER) }))
.await
.unwrap();
api.evm_set_next_block_timestamp(BLOCK_TIMESTAMP + 1).unwrap();
let tx =
TransactionRequest::default().to(Address::random()).value(U256::from(1337u64)).from(from);
let tx = WithOtherFields::new(tx);
let _tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
assert_eq!(block.header.timestamp, BLOCK_TIMESTAMP + 1);
let tx =
TransactionRequest::default().to(Address::random()).value(U256::from(1337u64)).from(from);
let tx = WithOtherFields::new(tx);
let _ = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
let elapsed = start.elapsed().as_secs() + 1;
let diff = block.header.timestamp - (BLOCK_TIMESTAMP + 1);
assert!(diff <= elapsed);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_set_empty_code() {
let (api, _handle) = spawn(fork_config()).await;
let addr = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984".parse().unwrap();
let code = api.get_code(addr, None).await.unwrap();
assert!(!code.as_ref().is_empty());
api.anvil_set_code(addr, Vec::new().into()).await.unwrap();
let code = api.get_code(addr, None).await.unwrap();
assert!(code.as_ref().is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_can_send_tx() {
let (api, handle) =
spawn(fork_config().with_blocktime(Some(std::time::Duration::from_millis(800)))).await;
let wallet = PrivateKeySigner::random();
let signer = wallet.address();
let provider = handle.http_provider();
// let provider = SignerMiddleware::new(provider, wallet);
api.anvil_set_balance(signer, U256::MAX).await.unwrap();
api.anvil_impersonate_account(signer).await.unwrap(); // Added until WalletFiller for alloy-provider is fixed.
let balance = provider.get_balance(signer).await.unwrap();
assert_eq!(balance, U256::MAX);
let addr = Address::random();
let val = U256::from(1337u64);
let tx = TransactionRequest::default().to(addr).value(val).from(signer);
let tx = WithOtherFields::new(tx);
// broadcast it via the eth_sendTransaction API
let _ = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let balance = provider.get_balance(addr).await.unwrap();
assert_eq!(balance, val);
}
// <https://github.com/foundry-rs/foundry/issues/1920>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_nft_set_approve_all() {
let (api, handle) = spawn(
fork_config()
.with_fork_block_number(Some(14812197u64))
.with_blocktime(Some(Duration::from_secs(5)))
.with_chain_id(1u64.into()),
)
.await;
// create and fund a random wallet
let wallet = PrivateKeySigner::random();
let signer = wallet.address();
api.anvil_set_balance(signer, U256::from(1000e18)).await.unwrap();
let provider = handle.http_provider();
// pick a random nft <https://opensea.io/assets/ethereum/0x9c8ff314c9bc7f6e59a9d9225fb22946427edc03/154>
let nouns_addr: Address = "0x9c8ff314c9bc7f6e59a9d9225fb22946427edc03".parse().unwrap();
let owner: Address = "0x052564eb0fd8b340803df55def89c25c432f43f4".parse().unwrap();
let token_id: U256 = U256::from(154u64);
let nouns = ERC721::new(nouns_addr, provider.clone());
let real_owner = nouns.ownerOf(token_id).call().await.unwrap();
assert_eq!(real_owner, owner);
let approval = nouns.setApprovalForAll(nouns_addr, true);
let tx = TransactionRequest::default()
.from(owner)
.to(nouns_addr)
.with_input(approval.calldata().to_owned());
let tx = WithOtherFields::new(tx);
api.anvil_impersonate_account(owner).await.unwrap();
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let status = tx.inner.inner.inner.receipt.status.coerce_status();
assert!(status);
// transfer: impersonate real owner and transfer nft
api.anvil_impersonate_account(real_owner).await.unwrap();
api.anvil_set_balance(real_owner, U256::from(10000e18 as u64)).await.unwrap();
let call = nouns.transferFrom(real_owner, signer, token_id);
let tx = TransactionRequest::default()
.from(real_owner)
.to(nouns_addr)
.with_input(call.calldata().to_owned());
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let status = tx.inner.inner.inner.receipt.status.coerce_status();
assert!(status);
let real_owner = nouns.ownerOf(token_id).call().await.unwrap();
assert_eq!(real_owner, wallet.address());
}
// <https://github.com/foundry-rs/foundry/issues/2261>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_with_custom_chain_id() {
// spawn a forked node with some random chainId
let (api, handle) = spawn(
fork_config()
.with_fork_block_number(Some(14812197u64))
.with_blocktime(Some(Duration::from_secs(5)))
.with_chain_id(3145u64.into()),
)
.await;
// get the eth chainId and the txn chainId
let eth_chain_id = api.eth_chain_id();
let txn_chain_id = api.chain_id();
// get the chainId in the config
let config_chain_id = handle.config().chain_id;
// check that the chainIds are the same
assert_eq!(eth_chain_id.unwrap().unwrap().to::<u64>(), 3145u64);
assert_eq!(txn_chain_id, 3145u64);
assert_eq!(config_chain_id, Some(3145u64));
}
// <https://github.com/foundry-rs/foundry/issues/1920>
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_can_send_opensea_tx() {
let (api, handle) = spawn(
fork_config()
.with_fork_block_number(Some(14983338u64))
.with_blocktime(Some(Duration::from_millis(5000))),
)
.await;
let sender: Address = "0x8fdbae54b6d9f3fc2c649e3dd4602961967fd42f".parse().unwrap();
// transfer: impersonate real sender
api.anvil_impersonate_account(sender).await.unwrap();
let provider = handle.http_provider();
let input: Bytes = "0xfb0f3ee1000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003ff2e795f5000000000000000000000000000023f28ae3e9756ba982a6290f9081b6a84900b758000000000000000000000000004c00500000ad104d7dbd00e3ae0a5c00560c0000000000000000000000000003235b597a78eabcb08ffcb4d97411073211dbcb0000000000000000000000000000000000000000000000000000000000000e72000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000062ad47c20000000000000000000000000000000000000000000000000000000062d43104000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000df44e65d2a2cf40000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f00000000007b02230091a7ed01230072f7006a004d60a8d4e71d599b8104250f00000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000001c6bf526340000000000000000000000000008de9c5a032463c561423387a9648c5c7bcc5bc900000000000000000000000000000000000000000000000000005543df729c0000000000000000000000000006eb234847a9e3a546539aac57a071c01dc3f398600000000000000000000000000000000000000000000000000000000000000416d39b5352353a22cf2d44faa696c2089b03137a13b5acfee0366306f2678fede043bc8c7e422f6f13a3453295a4a063dac7ee6216ab7bade299690afc77397a51c00000000000000000000000000000000000000000000000000000000000000".parse().unwrap();
let to: Address = "0x00000000006c3852cbef3e08e8df289169ede581".parse().unwrap();
let tx = TransactionRequest::default()
.from(sender)
.to(to)
.value(U256::from(20000000000000000u64))
.with_input(input)
.with_gas_price(22180711707u128)
.with_gas_limit(150_000);
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let status = tx.inner.inner.inner.receipt.status.coerce_status();
assert!(status);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_fork_base_fee() {
let (api, handle) = spawn(fork_config()).await;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/otterscan.rs | crates/anvil/tests/it/otterscan.rs | //! Tests for otterscan endpoints.
use crate::abi::Multicall;
use alloy_network::TransactionResponse;
use alloy_primitives::{Address, Bytes, U256, address};
use alloy_provider::Provider;
use alloy_rpc_types::{
BlockNumberOrTag, TransactionRequest,
trace::otterscan::{InternalOperation, OperationType, TraceEntry},
};
use alloy_serde::WithOtherFields;
use alloy_sol_types::{SolCall, SolError, SolValue, sol};
use anvil::{NodeConfig, spawn};
use foundry_evm::hardfork::EthereumHardfork;
use std::collections::VecDeque;
#[tokio::test(flavor = "multi_thread")]
async fn erigon_get_header_by_number() {
let (api, _handle) = spawn(NodeConfig::test()).await;
api.mine_one().await;
let res0 = api.erigon_get_header_by_number(0.into()).await.unwrap().unwrap();
assert_eq!(res0.header.number, 0);
let res1 = api.erigon_get_header_by_number(1.into()).await.unwrap().unwrap();
assert_eq!(res1.header.number, 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_api_level() {
let (api, _handle) = spawn(NodeConfig::test()).await;
assert_eq!(api.ots_get_api_level().await.unwrap(), 8);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_internal_operations_contract_deploy() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let sender = handle.dev_accounts().next().unwrap();
let contract_receipt =
Multicall::deploy_builder(&provider).send().await.unwrap().get_receipt().await.unwrap();
let res = api.ots_get_internal_operations(contract_receipt.transaction_hash).await.unwrap();
assert_eq!(
res,
[InternalOperation {
r#type: OperationType::OpCreate,
from: sender,
to: contract_receipt.contract_address.unwrap(),
value: U256::from(0)
}],
);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_internal_operations_contract_transfer() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let accounts: Vec<_> = handle.dev_wallets().collect();
let from = accounts[0].address();
let to = accounts[1].address();
let amount = handle.genesis_balance().checked_div(U256::from(2u64)).unwrap();
let tx = TransactionRequest::default().to(to).value(amount).from(from);
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let res = api.ots_get_internal_operations(receipt.transaction_hash).await.unwrap();
assert_eq!(
res,
[InternalOperation { r#type: OperationType::OpTransfer, from, to, value: amount }],
);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_internal_operations_contract_create2() {
sol!(
#[sol(rpc, bytecode = "60808060405234601557610147908161001a8239f35b5f80fdfe6080600436101561000e575f80fd5b5f3560e01c636cd5c39b14610021575f80fd5b346100d0575f3660031901126100d0575f602082810191825282526001600160401b03916040810191838311828410176100d4578261008960405f959486958252606081019486865281518091608084015e81018660808201520360208101845201826100ee565b519082734e59b44847b379578588920ca78fbf26c0b4956c5af1903d156100e8573d9081116100d4576040516100c991601f01601f1916602001906100ee565b156100d057005b5f80fd5b634e487b7160e01b5f52604160045260245ffd5b506100c9565b601f909101601f19168101906001600160401b038211908210176100d45760405256fea2646970667358221220f76968e121fc002b537029df51a2aecca0793282491baf84b872ffbfbfb1c9d764736f6c63430008190033")]
contract Contract {
address constant CREATE2_DEPLOYER = 0x4e59b44847b379578588920cA78FbF26c0B4956C;
function deployContract() public {
uint256 salt = 0;
uint256 code = 0;
bytes memory creationCode = abi.encodePacked(code);
(bool success,) = address(CREATE2_DEPLOYER).call(abi.encodePacked(salt, creationCode));
require(success);
}
}
);
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let contract = Contract::deploy(&provider).await.unwrap();
let receipt = contract.deployContract().send().await.unwrap().get_receipt().await.unwrap();
let res = api.ots_get_internal_operations(receipt.transaction_hash).await.unwrap();
assert_eq!(
res,
[InternalOperation {
r#type: OperationType::OpCreate2,
from: address!("0x4e59b44847b379578588920cA78FbF26c0B4956C"),
to: address!("0x347bcdad821abc09b8c275881b368de36476b62c"),
value: U256::from(0),
}],
);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_internal_operations_contract_selfdestruct_london() {
ots_get_internal_operations_contract_selfdestruct(EthereumHardfork::London).await;
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_internal_operations_contract_selfdestruct_cancun() {
ots_get_internal_operations_contract_selfdestruct(EthereumHardfork::Cancun).await;
}
async fn ots_get_internal_operations_contract_selfdestruct(hardfork: EthereumHardfork) {
sol!(
#[sol(rpc, bytecode = "608080604052607f908160108239f3fe6004361015600c57600080fd5b6000803560e01c6375fc8e3c14602157600080fd5b346046578060031936011260465773dcdd539da22bffaa499dbea4d37d086dde196e75ff5b80fdfea264697066735822122080a9ad005cc408b2d4e30ca11216d8e310700fbcdf58a629d6edbb91531f9c6164736f6c63430008190033")]
contract Contract {
constructor() payable {}
function goodbye() public {
selfdestruct(payable(0xDcDD539DA22bfFAa499dBEa4d37d086Dde196E75));
}
}
);
let (api, handle) = spawn(NodeConfig::test().with_hardfork(Some(hardfork.into()))).await;
let provider = handle.http_provider();
let sender = handle.dev_accounts().next().unwrap();
let value = U256::from(69);
let contract_address =
Contract::deploy_builder(&provider).from(sender).value(value).deploy().await.unwrap();
let contract = Contract::new(contract_address, &provider);
let receipt = contract.goodbye().send().await.unwrap().get_receipt().await.unwrap();
let expected_to = address!("0xDcDD539DA22bfFAa499dBEa4d37d086Dde196E75");
let expected_value = value;
let res = api.ots_get_internal_operations(receipt.transaction_hash).await.unwrap();
assert_eq!(
res,
[InternalOperation {
r#type: OperationType::OpSelfDestruct,
from: contract_address,
to: expected_to,
value: expected_value,
}],
);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_has_code() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let sender = handle.dev_accounts().next().unwrap();
api.mine_one().await;
let contract_address = sender.create(0);
// no code in the address before deploying
assert!(!api.ots_has_code(contract_address, BlockNumberOrTag::Number(1)).await.unwrap());
let contract_builder = Multicall::deploy_builder(&provider);
let contract_receipt = contract_builder.send().await.unwrap().get_receipt().await.unwrap();
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, contract_receipt.block_number.unwrap());
// code is detected after deploying
assert!(api.ots_has_code(contract_address, BlockNumberOrTag::Number(num)).await.unwrap());
// code is not detected for the previous block
assert!(!api.ots_has_code(contract_address, BlockNumberOrTag::Number(num - 1)).await.unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_call_ots_trace_transaction() {
sol!(
#[sol(rpc, bytecode = "608080604052346026575f80546001600160a01b0319163317905561025e908161002b8239f35b5f80fdfe6080604081815260049081361015610015575f80fd5b5f925f3560e01c9081636a6758fe1461019a5750806396385e3914610123578063a1325397146101115763c04062261461004d575f80fd5b5f3660031901126100d5578051633533ac7f60e11b81526020818481305afa80156100cb576100d9575b50303b156100d55780516396385e3960e01b8152915f83828183305af180156100cb576100a2578380f35b919250906001600160401b0383116100b8575052005b604190634e487b7160e01b5f525260245ffd5b82513d5f823e3d90fd5b5f80fd5b6020813d602011610109575b816100f2602093836101b3565b810103126100d55751801515036100d5575f610077565b3d91506100e5565b346100d5575f3660031901126100d557005b5090346100d5575f3660031901126100d5575f805481908190819047906001600160a01b03165af1506101546101ea565b50815163a132539760e01b6020820190815282825292909182820191906001600160401b038311848410176100b8575f8086868686525190305af4506101986101ea565b005b346100d5575f3660031901126100d55780600160209252f35b601f909101601f19168101906001600160401b038211908210176101d657604052565b634e487b7160e01b5f52604160045260245ffd5b3d15610223573d906001600160401b0382116101d65760405191610218601f8201601f1916602001846101b3565b82523d5f602084013e565b60609056fea264697066735822122099817ea378044f1f6434272aeb1f3f01a734645e599e69b4caf2ba7a4fb65f9d64736f6c63430008190033")]
contract Contract {
address private owner;
constructor() {
owner = msg.sender;
}
function run() payable public {
this.do_staticcall();
this.do_call();
}
function do_staticcall() external view returns (bool) {
return true;
}
function do_call() external {
owner.call{value: address(this).balance}("");
address(this).delegatecall(abi.encodeWithSignature("do_delegatecall()"));
}
function do_delegatecall() external {}
}
);
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let wallets = handle.dev_wallets().collect::<Vec<_>>();
let sender = wallets[0].address();
let contract_address = Contract::deploy_builder(&provider).from(sender).deploy().await.unwrap();
let contract = Contract::new(contract_address, &provider);
let receipt =
contract.run().value(U256::from(1337)).send().await.unwrap().get_receipt().await.unwrap();
let res = api.ots_trace_transaction(receipt.transaction_hash).await.unwrap();
let expected = vec![
TraceEntry {
r#type: "CALL".to_string(),
depth: 0,
from: sender,
to: contract_address,
value: Some(U256::from(1337)),
input: Contract::runCall::SELECTOR.into(),
output: Bytes::new(),
},
TraceEntry {
r#type: "STATICCALL".to_string(),
depth: 1,
from: contract_address,
to: contract_address,
value: Some(U256::ZERO),
input: Contract::do_staticcallCall::SELECTOR.into(),
output: true.abi_encode().into(),
},
TraceEntry {
r#type: "CALL".to_string(),
depth: 1,
from: contract_address,
to: contract_address,
value: Some(U256::ZERO),
input: Contract::do_callCall::SELECTOR.into(),
output: Bytes::new(),
},
TraceEntry {
r#type: "CALL".to_string(),
depth: 2,
from: contract_address,
to: sender,
value: Some(U256::from(1337)),
input: Bytes::new(),
output: Bytes::new(),
},
TraceEntry {
r#type: "DELEGATECALL".to_string(),
depth: 2,
from: contract_address,
to: contract_address,
value: Some(U256::ZERO),
input: Contract::do_delegatecallCall::SELECTOR.into(),
output: Bytes::new(),
},
];
assert_eq!(res, expected);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_transaction_error() {
sol!(
#[sol(rpc, bytecode = "6080806040523460135760a3908160188239f35b5f80fdfe60808060405260043610156011575f80fd5b5f3560e01c63f67f4650146023575f80fd5b346069575f3660031901126069576346b7545f60e11b81526020600482015260126024820152712932bb32b93a29ba3934b733a337b7a130b960711b6044820152606490fd5b5f80fdfea264697066735822122069222918090d4d3ddc6a9c8b6ef282464076c71f923a0e8618ed25489b87f12b64736f6c63430008190033")]
contract Contract {
error CustomError(string msg);
function trigger_revert() public {
revert CustomError("RevertStringFooBar");
}
}
);
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let contract = Contract::deploy(&provider).await.unwrap();
let receipt = contract.trigger_revert().send().await.unwrap().get_receipt().await.unwrap();
let err = api.ots_get_transaction_error(receipt.transaction_hash).await.unwrap();
let expected = Contract::CustomError { msg: String::from("RevertStringFooBar") }.abi_encode();
assert_eq!(err, Bytes::from(expected));
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_transaction_error_no_error() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
// Send a successful transaction
let tx = TransactionRequest::default().to(Address::random()).value(U256::from(100));
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let res = api.ots_get_transaction_error(receipt.transaction_hash).await.unwrap();
assert!(res.is_empty(), "{res}");
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_block_details() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let tx = TransactionRequest::default().to(Address::random()).value(U256::from(100));
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let result = api.ots_get_block_details(1.into()).await.unwrap();
assert_eq!(result.block.transaction_count, 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_block_details_by_hash() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let tx = TransactionRequest::default().to(Address::random()).value(U256::from(100));
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
let block_hash = receipt.block_hash.unwrap();
let result = api.ots_get_block_details_by_hash(block_hash).await.unwrap();
assert_eq!(result.block.transaction_count, 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_block_transactions() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
// disable automine
api.anvil_set_auto_mine(false).await.unwrap();
let mut hashes = VecDeque::new();
for i in 0..10 {
let tx =
TransactionRequest::default().to(Address::random()).value(U256::from(100)).nonce(i);
let tx = WithOtherFields::new(tx);
let pending_receipt =
provider.send_transaction(tx).await.unwrap().register().await.unwrap();
hashes.push_back(*pending_receipt.tx_hash());
}
api.mine_one().await;
let page_size = 3;
for page in 0..4 {
let result = api.ots_get_block_transactions(1, page, page_size).await.unwrap();
assert!(result.receipts.len() <= page_size);
let len = result.receipts.len();
assert!(len <= page_size);
assert!(result.fullblock.transaction_count == result.receipts.len());
result.receipts.iter().enumerate().for_each(|(i, receipt)| {
let expected = hashes.pop_front();
assert_eq!(expected, Some(receipt.receipt.transaction_hash));
assert_eq!(expected, result.fullblock.block.transactions.hashes().nth(i));
});
}
assert!(hashes.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_search_transactions_before() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let sender = handle.dev_accounts().next().unwrap();
let mut hashes = vec![];
for i in 0..7 {
let tx =
TransactionRequest::default().to(Address::random()).value(U256::from(100)).nonce(i);
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
hashes.push(receipt.transaction_hash);
}
let page_size = 2;
let mut block = 0;
for i in 0..4 {
let result = api.ots_search_transactions_before(sender, block, page_size).await.unwrap();
assert_eq!(result.first_page, i == 0);
assert_eq!(result.last_page, i == 3);
// check each individual hash
result.txs.iter().for_each(|tx| {
assert_eq!(hashes.pop(), Some(tx.tx_hash()));
});
block = result.txs.last().unwrap().block_number.unwrap();
}
assert!(hashes.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_search_transactions_after() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let sender = handle.dev_accounts().next().unwrap();
let mut hashes = VecDeque::new();
for i in 0..7 {
let tx =
TransactionRequest::default().to(Address::random()).value(U256::from(100)).nonce(i);
let tx = WithOtherFields::new(tx);
let receipt = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
hashes.push_front(receipt.transaction_hash);
}
let page_size = 2;
let mut block = 0;
for i in 0..4 {
let result = api.ots_search_transactions_after(sender, block, page_size).await.unwrap();
assert_eq!(result.first_page, i == 3);
assert_eq!(result.last_page, i == 0);
// check each individual hash
result.txs.iter().rev().for_each(|tx| {
assert_eq!(hashes.pop_back(), Some(tx.tx_hash()));
});
block = result.txs.first().unwrap().block_number.unwrap();
}
assert!(hashes.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_transaction_by_sender_and_nonce() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let sender = handle.dev_accounts().next().unwrap();
let tx1 = WithOtherFields::new(
TransactionRequest::default()
.from(sender)
.to(Address::random())
.value(U256::from(100))
.nonce(0),
);
let tx2 = WithOtherFields::new(
TransactionRequest::default()
.from(sender)
.to(Address::random())
.value(U256::from(100))
.nonce(1),
);
let receipt1 = provider.send_transaction(tx1).await.unwrap().get_receipt().await.unwrap();
let receipt2 = provider.send_transaction(tx2).await.unwrap().get_receipt().await.unwrap();
let result1 =
api.ots_get_transaction_by_sender_and_nonce(sender, U256::from(0)).await.unwrap().unwrap();
let result2 =
api.ots_get_transaction_by_sender_and_nonce(sender, U256::from(1)).await.unwrap().unwrap();
assert_eq!(result1, receipt1.transaction_hash);
assert_eq!(result2, receipt2.transaction_hash);
}
#[tokio::test(flavor = "multi_thread")]
async fn ots_get_contract_creator() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let sender = handle.dev_accounts().next().unwrap();
let receipt =
Multicall::deploy_builder(&provider).send().await.unwrap().get_receipt().await.unwrap();
let contract_address = receipt.contract_address.unwrap();
let creator = api.ots_get_contract_creator(contract_address).await.unwrap().unwrap();
assert_eq!(creator.creator, sender);
assert_eq!(creator.hash, receipt.transaction_hash);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/transaction.rs | crates/anvil/tests/it/transaction.rs | use crate::{
abi::{Greeter, Multicall, SimpleStorage},
utils::{connect_pubsub, http_provider_with_signer},
};
use alloy_consensus::Transaction;
use alloy_network::{EthereumWallet, TransactionBuilder, TransactionResponse};
use alloy_primitives::{Address, Bytes, FixedBytes, U256, address, hex, map::B256HashSet};
use alloy_provider::{Provider, WsConnect};
use alloy_rpc_types::{
AccessList, AccessListItem, BlockId, BlockNumberOrTag, BlockOverrides, BlockTransactions,
TransactionRequest,
state::{AccountOverride, EvmOverrides, StateOverride, StateOverridesBuilder},
};
use alloy_serde::WithOtherFields;
use alloy_sol_types::SolValue;
use anvil::{NodeConfig, spawn};
use eyre::Ok;
use foundry_evm::hardfork::EthereumHardfork;
use futures::{FutureExt, StreamExt, future::join_all};
use revm::primitives::eip7825::TX_GAS_LIMIT_CAP;
use std::{str::FromStr, time::Duration};
use tokio::time::timeout;
#[tokio::test(flavor = "multi_thread")]
async fn can_transfer_eth() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let nonce = provider.get_transaction_count(from).await.unwrap();
assert!(nonce == 0);
let balance_before = provider.get_balance(to).await.unwrap();
let amount = handle.genesis_balance().checked_div(U256::from(2u64)).unwrap();
// craft the tx
// specify the `from` field so that the client knows which account to use
let tx = TransactionRequest::default().to(to).value(amount).from(from);
let tx = WithOtherFields::new(tx);
// broadcast it via the eth_sendTransaction API
let tx = provider.send_transaction(tx).await.unwrap();
let tx = tx.get_receipt().await.unwrap();
assert_eq!(tx.block_number, Some(1));
assert_eq!(tx.transaction_index, Some(0));
let nonce = provider.get_transaction_count(from).await.unwrap();
assert_eq!(nonce, 1);
let to_balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance_before.saturating_add(amount), to_balance);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_order_transactions() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
// disable automine
api.anvil_set_auto_mine(false).await.unwrap();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let amount = handle.genesis_balance().checked_div(U256::from(2u64)).unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
// craft the tx with lower price
let mut tx = TransactionRequest::default().to(to).from(from).value(amount);
tx.set_gas_price(gas_price);
let tx = WithOtherFields::new(tx);
let tx_lower = provider.send_transaction(tx).await.unwrap();
// craft the tx with higher price
let mut tx = TransactionRequest::default().to(from).from(to).value(amount);
tx.set_gas_price(gas_price + 1);
let tx = WithOtherFields::new(tx);
let tx_higher = provider.send_transaction(tx).await.unwrap();
// manually mine the block with the transactions
api.mine_one().await;
let higher_price = tx_higher.get_receipt().await.unwrap().transaction_hash;
let lower_price = tx_lower.get_receipt().await.unwrap().transaction_hash;
// get the block, await receipts
let block = provider.get_block(BlockId::latest()).await.unwrap().unwrap();
assert_eq!(block.transactions, BlockTransactions::Hashes(vec![higher_price, lower_price]))
}
#[tokio::test(flavor = "multi_thread")]
async fn can_respect_nonces() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let nonce = provider.get_transaction_count(from).await.unwrap();
let amount = handle.genesis_balance().checked_div(U256::from(3u64)).unwrap();
let tx = TransactionRequest::default().to(to).value(amount).from(from).nonce(nonce + 1);
let tx = WithOtherFields::new(tx);
// send the transaction with higher nonce than on chain
let higher_pending_tx = provider.send_transaction(tx).await.unwrap();
// ensure the listener for ready transactions times out
let mut listener = api.new_ready_transactions();
let res = timeout(Duration::from_millis(1500), listener.next()).await;
res.unwrap_err();
let tx = TransactionRequest::default().to(to).value(amount).from(from).nonce(nonce);
let tx = WithOtherFields::new(tx);
// send with the actual nonce which is mined immediately
let tx = provider.send_transaction(tx).await.unwrap();
let tx = tx.get_receipt().await.unwrap();
// this will unblock the currently pending tx
let higher_tx = higher_pending_tx.get_receipt().await.unwrap(); // Awaits endlessly here due to alloy/#389
let block = provider.get_block(1.into()).await.unwrap().unwrap();
assert_eq!(2, block.transactions.len());
assert_eq!(
BlockTransactions::Hashes(vec![tx.transaction_hash, higher_tx.transaction_hash]),
block.transactions
);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_replace_transaction() {
let (api, handle) = spawn(NodeConfig::test()).await;
// disable auto mining
api.anvil_set_auto_mine(false).await.unwrap();
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let nonce = provider.get_transaction_count(from).await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let amount = handle.genesis_balance().checked_div(U256::from(3u64)).unwrap();
let tx = TransactionRequest::default().to(to).value(amount).from(from).nonce(nonce);
let mut tx = WithOtherFields::new(tx);
tx.set_gas_price(gas_price);
// send transaction with lower gas price
let _lower_priced_pending_tx = provider.send_transaction(tx.clone()).await.unwrap();
tx.set_gas_price(gas_price + 1);
// send the same transaction with higher gas price
let higher_priced_pending_tx = provider.send_transaction(tx).await.unwrap();
let higher_tx_hash = *higher_priced_pending_tx.tx_hash();
// mine exactly one block
api.mine_one().await;
let block = provider.get_block(1.into()).await.unwrap().unwrap();
assert_eq!(block.transactions.len(), 1);
assert_eq!(BlockTransactions::Hashes(vec![higher_tx_hash]), block.transactions);
// FIXME: Unable to get receipt despite hotfix in https://github.com/alloy-rs/alloy/pull/614
// lower priced transaction was replaced
// let _lower_priced_receipt = lower_priced_pending_tx.get_receipt().await.unwrap();
// let higher_priced_receipt = higher_priced_pending_tx.get_receipt().await.unwrap();
// assert_eq!(1, block.transactions.len());
// assert_eq!(
// BlockTransactions::Hashes(vec![higher_priced_receipt.transaction_hash]),
// block.transactions
// );
}
#[tokio::test(flavor = "multi_thread")]
async fn can_reject_too_high_gas_limits() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let gas_limit = api.gas_limit().to::<u64>();
let amount = handle.genesis_balance().checked_div(U256::from(3u64)).unwrap();
let tx =
TransactionRequest::default().to(to).value(amount).from(from).with_gas_limit(gas_limit);
let mut tx = WithOtherFields::new(tx);
// send transaction with the exact gas limit
let pending = provider.send_transaction(tx.clone()).await.unwrap();
let pending_receipt = pending.get_receipt().await;
assert!(pending_receipt.is_ok());
tx.set_gas_limit(gas_limit + 1);
// send transaction with higher gas limit
let pending = provider.send_transaction(tx.clone()).await;
assert!(pending.is_err());
let err = pending.unwrap_err();
assert!(err.to_string().contains("gas too high"));
api.anvil_set_balance(from, U256::MAX).await.unwrap();
tx.set_gas_limit(gas_limit);
let pending = provider.send_transaction(tx).await;
let _ = pending.unwrap();
}
// <https://github.com/foundry-rs/foundry/issues/8094>
#[tokio::test(flavor = "multi_thread")]
async fn can_mine_large_gas_limit() {
let (_, handle) = spawn(NodeConfig::test().disable_block_gas_limit(true)).await;
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let gas_limit = anvil::DEFAULT_GAS_LIMIT;
let amount = handle.genesis_balance().checked_div(U256::from(3u64)).unwrap();
let tx =
TransactionRequest::default().to(to).value(amount).from(from).with_gas_limit(gas_limit);
// send transaction with higher gas limit
let pending = provider.send_transaction(WithOtherFields::new(tx)).await.unwrap();
let _resp = pending.get_receipt().await.unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn can_reject_underpriced_replacement() {
let (api, handle) = spawn(NodeConfig::test()).await;
// disable auto mining
api.anvil_set_auto_mine(false).await.unwrap();
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let nonce = provider.get_transaction_count(from).await.unwrap();
let gas_price = provider.get_gas_price().await.unwrap();
let amount = handle.genesis_balance().checked_div(U256::from(3u64)).unwrap();
let tx = TransactionRequest::default().to(to).value(amount).from(from).nonce(nonce);
let mut tx = WithOtherFields::new(tx);
tx.set_gas_price(gas_price + 1);
// send transaction with higher gas price
let higher_priced_pending_tx = provider.send_transaction(tx.clone()).await.unwrap();
tx.set_gas_price(gas_price);
// send the same transaction with lower gas price
let lower_priced_pending_tx = provider.send_transaction(tx).await;
let replacement_err = lower_priced_pending_tx.unwrap_err();
assert!(replacement_err.to_string().contains("replacement transaction underpriced"));
// mine exactly one block
api.mine_one().await;
let higher_priced_receipt = higher_priced_pending_tx.get_receipt().await.unwrap();
// ensure that only the higher priced tx was mined
let block = provider.get_block(1.into()).await.unwrap().unwrap();
assert_eq!(1, block.transactions.len());
assert_eq!(
BlockTransactions::Hashes(vec![higher_priced_receipt.transaction_hash]),
block.transactions
);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_deploy_greeter_http() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let signer: EthereumWallet = wallet.clone().into();
let alloy_provider = http_provider_with_signer(&handle.http_endpoint(), signer);
let alloy_greeter_addr =
Greeter::deploy_builder(alloy_provider.clone(), "Hello World!".to_string())
// .legacy() unimplemented! in alloy
.deploy()
.await
.unwrap();
let alloy_greeter = Greeter::new(alloy_greeter_addr, alloy_provider);
let greeting = alloy_greeter.greet().call().await.unwrap();
assert_eq!("Hello World!", greeting);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_deploy_and_mine_manually() {
let (api, handle) = spawn(NodeConfig::test()).await;
// can mine in auto-mine mode
api.evm_mine(None).await.unwrap();
// disable auto mine
api.anvil_set_auto_mine(false).await.unwrap();
// can mine in manual mode
api.evm_mine(None).await.unwrap();
let provider = handle.http_provider();
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
let greeter_builder =
Greeter::deploy_builder(provider.clone(), "Hello World!".to_string()).from(from);
let greeter_calldata = greeter_builder.calldata();
let tx = TransactionRequest::default().from(from).with_input(greeter_calldata.to_owned());
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap();
// mine block with tx manually
api.evm_mine(None).await.unwrap();
let receipt = tx.get_receipt().await.unwrap();
let address = receipt.contract_address.unwrap();
let greeter_contract = Greeter::new(address, provider);
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Hello World!", greeting);
let set_greeting = greeter_contract.setGreeting("Another Message".to_string());
let tx = set_greeting.send().await.unwrap();
// mine block manually
api.evm_mine(None).await.unwrap();
let _tx = tx.get_receipt().await.unwrap();
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Another Message", greeting);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_mine_automatically() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
// disable auto mine
api.anvil_set_auto_mine(false).await.unwrap();
let wallet = handle.dev_wallets().next().unwrap();
let greeter_builder = Greeter::deploy_builder(provider.clone(), "Hello World!".to_string())
.from(wallet.address());
let greeter_calldata = greeter_builder.calldata();
let tx = TransactionRequest::default()
.from(wallet.address())
.with_input(greeter_calldata.to_owned());
let tx = WithOtherFields::new(tx);
let sent_tx = provider.send_transaction(tx).await.unwrap();
// re-enable auto mine
api.anvil_set_auto_mine(true).await.unwrap();
let receipt = sent_tx.get_receipt().await.unwrap();
assert_eq!(receipt.block_number, Some(1));
}
#[tokio::test(flavor = "multi_thread")]
async fn can_call_greeter_historic() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let wallet = handle.dev_wallets().next().unwrap();
let greeter_addr = Greeter::deploy_builder(provider.clone(), "Hello World!".to_string())
.from(wallet.address())
.deploy()
.await
.unwrap();
let greeter_contract = Greeter::new(greeter_addr, provider.clone());
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Hello World!", greeting);
let block_number = provider.get_block_number().await.unwrap();
let _receipt = greeter_contract
.setGreeting("Another Message".to_string())
.send()
.await
.unwrap()
.get_receipt()
.await
.unwrap();
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Another Message", greeting);
// min
api.mine_one().await;
// returns previous state
let greeting =
greeter_contract.greet().block(BlockId::Number(block_number.into())).call().await.unwrap();
assert_eq!("Hello World!", greeting);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_deploy_greeter_ws() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.ws_provider();
let wallet = handle.dev_wallets().next().unwrap();
let greeter_addr = Greeter::deploy_builder(provider.clone(), "Hello World!".to_string())
.from(wallet.address())
// .legacy() unimplemented! in alloy
.deploy()
.await
.unwrap();
let greeter_contract = Greeter::new(greeter_addr, provider.clone());
let greeting = greeter_contract.greet().call().await.unwrap();
assert_eq!("Hello World!", greeting);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_deploy_get_code() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.ws_provider();
let wallet = handle.dev_wallets().next().unwrap();
let greeter_addr = Greeter::deploy_builder(provider.clone(), "Hello World!".to_string())
.from(wallet.address())
.deploy()
.await
.unwrap();
let code = provider.get_code_at(greeter_addr).await.unwrap();
assert!(!code.as_ref().is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn get_blocktimestamp_works() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let contract = Multicall::deploy(provider.clone()).await.unwrap();
let timestamp = contract.getCurrentBlockTimestamp().call().await.unwrap();
assert!(timestamp > U256::from(1));
let latest_block =
api.block_by_number(alloy_rpc_types::BlockNumberOrTag::Latest).await.unwrap().unwrap();
let timestamp = contract.getCurrentBlockTimestamp().call().await.unwrap();
assert_eq!(timestamp.to::<u64>(), latest_block.header.timestamp);
// repeat call same result
let timestamp = contract.getCurrentBlockTimestamp().call().await.unwrap();
assert_eq!(timestamp.to::<u64>(), latest_block.header.timestamp);
// mock timestamp
let next_timestamp = timestamp.to::<u64>() + 1337;
api.evm_set_next_block_timestamp(next_timestamp).unwrap();
let timestamp =
contract.getCurrentBlockTimestamp().block(BlockId::pending()).call().await.unwrap();
assert_eq!(timestamp, U256::from(next_timestamp));
// repeat call same result
let timestamp =
contract.getCurrentBlockTimestamp().block(BlockId::pending()).call().await.unwrap();
assert_eq!(timestamp, U256::from(next_timestamp));
}
#[tokio::test(flavor = "multi_thread")]
async fn call_past_state() {
let (api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let wallet = handle.dev_wallets().next().unwrap();
let contract_addr =
SimpleStorage::deploy_builder(provider.clone(), "initial value".to_string())
.from(wallet.address())
.deploy()
.await
.unwrap();
let contract = SimpleStorage::new(contract_addr, provider.clone());
let deployed_block = provider.get_block_number().await.unwrap();
let value = contract.getValue().call().await.unwrap();
assert_eq!(value, "initial value");
let gas_price = api.gas_price();
let set_tx = contract.setValue("hi".to_string()).gas_price(gas_price + 1);
let _receipt = set_tx.send().await.unwrap().get_receipt().await.unwrap();
// assert new value
let value = contract.getValue().call().await.unwrap();
assert_eq!(value, "hi");
// assert previous value
let value =
contract.getValue().block(BlockId::Number(deployed_block.into())).call().await.unwrap();
assert_eq!(value, "initial value");
let hash = provider.get_block(BlockId::Number(1.into())).await.unwrap().unwrap().header.hash;
let value = contract.getValue().block(BlockId::Hash(hash.into())).call().await.unwrap();
assert_eq!(value, "initial value");
}
#[tokio::test(flavor = "multi_thread")]
async fn can_handle_multiple_concurrent_transfers_with_same_nonce() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.ws_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let nonce = provider.get_transaction_count(from).await.unwrap();
// explicitly set the nonce
let tx = TransactionRequest::default()
.to(to)
.value(U256::from(100))
.from(from)
.nonce(nonce)
.with_gas_limit(21000);
let tx = WithOtherFields::new(tx);
let mut tasks = Vec::new();
for _ in 0..10 {
let tx = tx.clone();
let provider = provider.clone();
let task = tokio::task::spawn(async move {
provider.send_transaction(tx).await.unwrap().get_receipt().await
});
tasks.push(task);
}
// only one succeeded
let successful_tx =
join_all(tasks).await.into_iter().filter(|res| res.as_ref().is_ok()).count();
assert_eq!(successful_tx, 1);
assert_eq!(provider.get_transaction_count(from).await.unwrap(), 1u64);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_handle_multiple_concurrent_deploys_with_same_nonce() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.ws_provider();
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
let nonce = provider.get_transaction_count(from).await.unwrap();
let mut tasks = Vec::new();
let greeter = Greeter::deploy_builder(provider.clone(), "Hello World!".to_string());
let greeter_calldata = greeter.calldata();
let tx = TransactionRequest::default()
.from(from)
.with_input(greeter_calldata.to_owned())
.nonce(nonce)
.with_gas_limit(300_000);
let tx = WithOtherFields::new(tx);
for _ in 0..10 {
let provider = provider.clone();
let tx = tx.clone();
let task = tokio::task::spawn(async move {
Ok(provider.send_transaction(tx).await?.get_receipt().await.unwrap())
});
tasks.push(task);
}
// only one succeeded
let successful_tx =
join_all(tasks).await.into_iter().filter(|res| res.as_ref().unwrap().is_ok()).count();
assert_eq!(successful_tx, 1);
assert_eq!(provider.get_transaction_count(from).await.unwrap(), 1u64);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_handle_multiple_concurrent_transactions_with_same_nonce() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.ws_provider();
let wallet = handle.dev_wallets().next().unwrap();
let from = wallet.address();
let greeter_contract =
Greeter::deploy(provider.clone(), "Hello World!".to_string()).await.unwrap();
let nonce = provider.get_transaction_count(from).await.unwrap();
let mut tasks = Vec::new();
let deploy = Greeter::deploy_builder(provider.clone(), "Hello World!".to_string());
let deploy_calldata = deploy.calldata();
let deploy_tx = TransactionRequest::default()
.from(from)
.with_input(deploy_calldata.to_owned())
.nonce(nonce)
.with_gas_limit(300_000);
let deploy_tx = WithOtherFields::new(deploy_tx);
let set_greeting = greeter_contract.setGreeting("Hello".to_string());
let set_greeting_calldata = set_greeting.calldata();
let set_greeting_tx = TransactionRequest::default()
.from(from)
.with_input(set_greeting_calldata.to_owned())
.nonce(nonce)
.with_gas_limit(300_000);
let set_greeting_tx = WithOtherFields::new(set_greeting_tx);
for idx in 0..10 {
let provider = provider.clone();
let task = if idx % 2 == 0 {
let tx = deploy_tx.clone();
tokio::task::spawn(async move {
Ok(provider.send_transaction(tx).await?.get_receipt().await.unwrap())
})
} else {
let tx = set_greeting_tx.clone();
tokio::task::spawn(async move {
Ok(provider.send_transaction(tx).await?.get_receipt().await.unwrap())
})
};
tasks.push(task);
}
// only one succeeded
let successful_tx =
join_all(tasks).await.into_iter().filter(|res| res.as_ref().unwrap().is_ok()).count();
assert_eq!(successful_tx, 1);
assert_eq!(provider.get_transaction_count(from).await.unwrap(), nonce + 1);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_pending_transaction() {
let (api, handle) = spawn(NodeConfig::test()).await;
// disable auto mining so we can check if we can return pending tx from the mempool
api.anvil_set_auto_mine(false).await.unwrap();
let provider = handle.http_provider();
let from = handle.dev_wallets().next().unwrap().address();
let tx = TransactionRequest::default().from(from).value(U256::from(1337)).to(Address::random());
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap();
let pending = provider.get_transaction_by_hash(*tx.tx_hash()).await;
assert!(pending.is_ok());
api.mine_one().await;
let mined = provider.get_transaction_by_hash(*tx.tx_hash()).await.unwrap().unwrap();
assert_eq!(mined.tx_hash(), pending.unwrap().unwrap().tx_hash());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_listen_full_pending_transaction() {
let (api, handle) = spawn(NodeConfig::test()).await;
// Disable auto-mining so transactions remain pending
api.anvil_set_auto_mine(false).await.unwrap();
let provider = alloy_provider::ProviderBuilder::new()
.connect_ws(WsConnect::new(handle.ws_endpoint()))
.await
.unwrap();
// Subscribe to full pending transactions
let sub = provider.subscribe_full_pending_transactions().await;
tokio::time::sleep(Duration::from_millis(1000)).await;
let mut stream = sub.expect("Failed to subscribe to pending tx").into_stream().take(5);
let from = handle.dev_wallets().next().unwrap().address();
let tx = TransactionRequest::default().from(from).value(U256::from(1337)).to(Address::random());
let tx = provider.send_transaction(tx).await.unwrap();
// Wait for the subscription to yield a transaction
let received = stream.next().await.expect("Failed to receive pending tx");
assert_eq!(received.tx_hash(), *tx.tx_hash());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_raw_transaction() {
let (api, handle) = spawn(NodeConfig::test()).await;
// first test the pending tx, disable auto mine
api.anvil_set_auto_mine(false).await.unwrap();
let provider = handle.http_provider();
let from = handle.dev_wallets().next().unwrap().address();
let tx = TransactionRequest::default().from(from).value(U256::from(1488)).to(Address::random());
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap();
let res1 = api.raw_transaction(*tx.tx_hash()).await;
assert!(res1.is_ok());
api.mine_one().await;
let res2 = api.raw_transaction(*tx.tx_hash()).await;
assert_eq!(res1.unwrap(), res2.unwrap());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_first_nonce_is_zero() {
let (api, handle) = spawn(NodeConfig::test()).await;
api.anvil_set_auto_mine(false).await.unwrap();
let provider = handle.http_provider();
let from = handle.dev_wallets().next().unwrap().address();
let nonce = provider.get_transaction_count(from).block_id(BlockId::pending()).await.unwrap();
assert_eq!(nonce, 0);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_handle_different_sender_nonce_calculation() {
let (api, handle) = spawn(NodeConfig::test()).await;
api.anvil_set_auto_mine(false).await.unwrap();
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from_first = accounts[0].address();
let from_second = accounts[1].address();
let tx_count = 10u64;
// send a bunch of tx to the mempool and check nonce is returned correctly
for idx in 1..=tx_count {
let tx_from_first = TransactionRequest::default()
.from(from_first)
.value(U256::from(1337u64))
.to(Address::random());
let tx_from_first = WithOtherFields::new(tx_from_first);
let _tx = provider.send_transaction(tx_from_first).await.unwrap();
let nonce_from_first =
provider.get_transaction_count(from_first).block_id(BlockId::pending()).await.unwrap();
assert_eq!(nonce_from_first, idx);
let tx_from_second = TransactionRequest::default()
.from(from_second)
.value(U256::from(1337u64))
.to(Address::random());
let tx_from_second = WithOtherFields::new(tx_from_second);
let _tx = provider.send_transaction(tx_from_second).await.unwrap();
let nonce_from_second =
provider.get_transaction_count(from_second).block_id(BlockId::pending()).await.unwrap();
assert_eq!(nonce_from_second, idx);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn includes_pending_tx_for_transaction_count() {
let (api, handle) = spawn(NodeConfig::test()).await;
api.anvil_set_auto_mine(false).await.unwrap();
let provider = handle.http_provider();
let from = handle.dev_wallets().next().unwrap().address();
let tx_count = 10u64;
// send a bunch of tx to the mempool and check nonce is returned correctly
for idx in 1..=tx_count {
let tx =
TransactionRequest::default().from(from).value(U256::from(1337)).to(Address::random());
let tx = WithOtherFields::new(tx);
let _tx = provider.send_transaction(tx).await.unwrap();
let nonce =
provider.get_transaction_count(from).block_id(BlockId::pending()).await.unwrap();
assert_eq!(nonce, idx);
}
api.mine_one().await;
let nonce = provider.get_transaction_count(from).block_id(BlockId::pending()).await.unwrap();
assert_eq!(nonce, tx_count);
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_historic_info() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let provider = handle.http_provider();
let accounts = handle.dev_wallets().collect::<Vec<_>>();
let from = accounts[0].address();
let to = accounts[1].address();
let amount = handle.genesis_balance().checked_div(U256::from(2u64)).unwrap();
let tx = TransactionRequest::default().to(to).value(amount).from(from);
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap();
let _ = tx.get_receipt().await.unwrap();
let nonce_pre =
provider.get_transaction_count(from).block_id(BlockId::number(0)).await.unwrap();
let nonce_post = provider.get_transaction_count(from).await.unwrap();
assert!(nonce_pre < nonce_post);
let balance_pre = provider.get_balance(from).block_id(BlockId::number(0)).await.unwrap();
let balance_post = provider.get_balance(from).await.unwrap();
assert!(balance_post < balance_pre);
let to_balance = provider.get_balance(to).await.unwrap();
assert_eq!(balance_pre.saturating_add(amount), to_balance);
}
// <https://github.com/eth-brownie/brownie/issues/1549>
#[tokio::test(flavor = "multi_thread")]
async fn test_tx_receipt() {
let (_api, handle) = spawn(NodeConfig::test()).await;
let wallet = handle.dev_wallets().next().unwrap();
let provider = handle.http_provider();
let tx = TransactionRequest::default().to(Address::random()).value(U256::from(1337));
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
assert!(tx.to.is_some());
let greeter_deploy = Greeter::deploy_builder(provider.clone(), "Hello World!".to_string());
let greeter_calldata = greeter_deploy.calldata();
let tx = TransactionRequest::default()
.from(wallet.address())
.with_input(greeter_calldata.to_owned());
let tx = WithOtherFields::new(tx);
let tx = provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap();
// `to` field is none if it's a contract creation transaction: https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionreceipt
assert!(tx.to.is_none());
assert!(tx.contract_address.is_some());
}
#[tokio::test(flavor = "multi_thread")]
async fn can_stream_pending_transactions() {
let (_api, handle) =
spawn(NodeConfig::test().with_blocktime(Some(Duration::from_secs(2)))).await;
let num_txs = 5;
let provider = handle.http_provider();
let ws_provider = connect_pubsub(&handle.ws_endpoint()).await;
let accounts = provider.get_accounts().await.unwrap();
let tx =
TransactionRequest::default().from(accounts[0]).to(accounts[0]).value(U256::from(1e18));
let mut sending = futures::future::join_all(
std::iter::repeat_n(tx.clone(), num_txs)
.enumerate()
.map(|(nonce, tx)| tx.nonce(nonce as u64))
.map(|tx| async {
let tx = WithOtherFields::new(tx);
provider.send_transaction(tx).await.unwrap().get_receipt().await.unwrap()
}),
)
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/tests/it/ipc.rs | crates/anvil/tests/it/ipc.rs | //! IPC tests
use crate::{init_tracing, utils::connect_pubsub};
use alloy_primitives::U256;
use alloy_provider::Provider;
use anvil::{NodeConfig, spawn};
use futures::StreamExt;
use tempfile::TempDir;
fn ipc_config() -> (Option<TempDir>, NodeConfig) {
let path;
let dir;
if cfg!(unix) {
let tmp = tempfile::tempdir().unwrap();
path = tmp.path().join("anvil.ipc").to_string_lossy().into_owned();
dir = Some(tmp);
} else {
dir = None;
path = format!(r"\\.\pipe\anvil_test_{}.ipc", rand::random::<u64>());
}
let config = NodeConfig::test().with_ipc(Some(Some(path)));
(dir, config)
}
#[tokio::test(flavor = "multi_thread")]
async fn can_get_block_number_ipc() {
init_tracing();
let (_dir, config) = ipc_config();
let (api, handle) = spawn(config).await;
let block_num = api.block_number().unwrap();
assert_eq!(block_num, U256::ZERO);
let provider = handle.ipc_provider().unwrap();
let num = provider.get_block_number().await.unwrap();
assert_eq!(num, block_num.to::<u64>());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_sub_new_heads_ipc() {
init_tracing();
let (_dir, config) = ipc_config();
let (api, handle) = spawn(config).await;
let provider = connect_pubsub(handle.ipc_path().unwrap().as_str()).await;
// mine a block every 1 seconds
api.anvil_set_interval_mining(1).unwrap();
let blocks = provider.subscribe_blocks().await.unwrap().into_stream();
let blocks = blocks.take(3).collect::<Vec<_>>().await;
let block_numbers = blocks.into_iter().map(|b| b.number).collect::<Vec<_>>();
assert_eq!(block_numbers, vec![1, 2, 3]);
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/rpc/src/lib.rs | crates/anvil/rpc/src/lib.rs | //! # anvil-rpc
//!
//! JSON-RPC types.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
/// JSON-RPC request bindings
pub mod request;
/// JSON-RPC response bindings
pub mod response;
/// JSON-RPC error bindings
pub mod error;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/rpc/src/response.rs | crates/anvil/rpc/src/response.rs | use crate::{
error::RpcError,
request::{Id, Version},
};
use serde::{Deserialize, Serialize};
/// Response of a _single_ rpc call
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct RpcResponse {
// JSON RPC version
jsonrpc: Version,
#[serde(skip_serializing_if = "Option::is_none")]
id: Option<Id>,
#[serde(flatten)]
result: ResponseResult,
}
impl From<RpcError> for RpcResponse {
fn from(e: RpcError) -> Self {
Self { jsonrpc: Version::V2, id: None, result: ResponseResult::Error(e) }
}
}
impl RpcResponse {
pub fn new(id: Id, content: impl Into<ResponseResult>) -> Self {
Self { jsonrpc: Version::V2, id: Some(id), result: content.into() }
}
pub fn invalid_request(id: Id) -> Self {
Self::new(id, RpcError::invalid_request())
}
}
/// Represents the result of a call either success or error
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub enum ResponseResult {
#[serde(rename = "result")]
Success(serde_json::Value),
#[serde(rename = "error")]
Error(RpcError),
}
impl ResponseResult {
pub fn success<S>(content: S) -> Self
where
S: Serialize + 'static,
{
Self::Success(serde_json::to_value(&content).unwrap())
}
pub fn error(error: RpcError) -> Self {
Self::Error(error)
}
}
impl From<RpcError> for ResponseResult {
fn from(err: RpcError) -> Self {
Self::error(err)
}
}
/// Synchronous response
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
#[serde(untagged)]
pub enum Response {
/// single json rpc response
Single(RpcResponse),
/// batch of several responses
Batch(Vec<RpcResponse>),
}
impl Response {
/// Creates new [`Response`] with the given [`RpcError`].
pub fn error(error: RpcError) -> Self {
RpcResponse::new(Id::Null, ResponseResult::Error(error)).into()
}
}
impl From<RpcError> for Response {
fn from(err: RpcError) -> Self {
Self::error(err)
}
}
impl From<RpcResponse> for Response {
fn from(resp: RpcResponse) -> Self {
Self::Single(resp)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/rpc/src/error.rs | crates/anvil/rpc/src/error.rs | //! JSON-RPC error bindings
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::{borrow::Cow, fmt};
/// Represents a JSON-RPC error
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct RpcError {
pub code: ErrorCode,
/// error message
pub message: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<serde_json::Value>,
}
impl RpcError {
/// New [`RpcError`] with the given [`ErrorCode`].
pub const fn new(code: ErrorCode) -> Self {
Self { message: Cow::Borrowed(code.message()), code, data: None }
}
/// Creates a new `ParseError` error.
pub const fn parse_error() -> Self {
Self::new(ErrorCode::ParseError)
}
/// Creates a new `MethodNotFound` error.
pub const fn method_not_found() -> Self {
Self::new(ErrorCode::MethodNotFound)
}
/// Creates a new `InvalidRequest` error.
pub const fn invalid_request() -> Self {
Self::new(ErrorCode::InvalidRequest)
}
/// Creates a new `InternalError` error.
pub const fn internal_error() -> Self {
Self::new(ErrorCode::InternalError)
}
/// Creates a new `InvalidParams` error.
pub fn invalid_params<M>(message: M) -> Self
where
M: Into<String>,
{
Self { code: ErrorCode::InvalidParams, message: message.into().into(), data: None }
}
/// Creates a new `InternalError` error with a message.
pub fn internal_error_with<M>(message: M) -> Self
where
M: Into<String>,
{
Self { code: ErrorCode::InternalError, message: message.into().into(), data: None }
}
/// Creates a new RPC error for when a transaction was rejected.
pub fn transaction_rejected<M>(message: M) -> Self
where
M: Into<String>,
{
Self { code: ErrorCode::TransactionRejected, message: message.into().into(), data: None }
}
}
impl fmt::Display for RpcError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}: {}", self.code.message(), self.message)
}
}
/// List of JSON-RPC error codes
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ErrorCode {
/// Server received Invalid JSON.
/// server side error while parsing JSON
ParseError,
/// send invalid request object.
InvalidRequest,
/// method does not exist or valid
MethodNotFound,
/// invalid method parameter.
InvalidParams,
/// internal call error
InternalError,
/// Failed to send transaction, See also <https://github.com/MetaMask/rpc-errors/blob/main/src/error-constants.ts>
TransactionRejected,
/// Custom geth error code, <https://github.com/vapory-legacy/wiki/blob/master/JSON-RPC-Error-Codes-Improvement-Proposal.md>
ExecutionError,
/// Used for server specific errors.
ServerError(i64),
}
impl ErrorCode {
/// Returns the error code as `i64`
pub fn code(&self) -> i64 {
match *self {
Self::ParseError => -32700,
Self::InvalidRequest => -32600,
Self::MethodNotFound => -32601,
Self::InvalidParams => -32602,
Self::InternalError => -32603,
Self::TransactionRejected => -32003,
Self::ExecutionError => 3,
Self::ServerError(c) => c,
}
}
/// Returns the message associated with the error
pub const fn message(&self) -> &'static str {
match *self {
Self::ParseError => "Parse error",
Self::InvalidRequest => "Invalid request",
Self::MethodNotFound => "Method not found",
Self::InvalidParams => "Invalid params",
Self::InternalError => "Internal error",
Self::TransactionRejected => "Transaction rejected",
Self::ServerError(_) => "Server error",
Self::ExecutionError => "Execution error",
}
}
}
impl Serialize for ErrorCode {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_i64(self.code())
}
}
impl<'a> Deserialize<'a> for ErrorCode {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'a>,
{
i64::deserialize(deserializer).map(Into::into)
}
}
impl From<i64> for ErrorCode {
fn from(code: i64) -> Self {
match code {
-32700 => Self::ParseError,
-32600 => Self::InvalidRequest,
-32601 => Self::MethodNotFound,
-32602 => Self::InvalidParams,
-32603 => Self::InternalError,
-32003 => Self::TransactionRejected,
3 => Self::ExecutionError,
_ => Self::ServerError(code),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/rpc/src/request.rs | crates/anvil/rpc/src/request.rs | use serde::{Deserialize, Serialize};
use std::fmt;
/// A JSON-RPC request object, a method call
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct RpcMethodCall {
/// The version of the protocol
pub jsonrpc: Version,
/// The name of the method to execute
pub method: String,
/// An array or object containing the parameters to be passed to the function.
#[serde(default = "no_params")]
pub params: RequestParams,
/// The identifier for this request issued by the client,
/// An [Id] must be a String, null or a number.
/// If missing it's considered a notification in [Version::V2]
pub id: Id,
}
impl RpcMethodCall {
pub fn id(&self) -> Id {
self.id.clone()
}
}
/// Represents a JSON-RPC request which is considered a notification (missing [Id] optional
/// [Version])
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct RpcNotification {
pub jsonrpc: Option<Version>,
pub method: String,
#[serde(default = "no_params")]
pub params: RequestParams,
}
/// Representation of a single JSON-RPC call
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum RpcCall {
/// the RPC method to invoke
MethodCall(RpcMethodCall),
/// A notification (no [Id] provided)
Notification(RpcNotification),
/// Invalid call
Invalid {
/// id or [Id::Null]
#[serde(default = "null_id")]
id: Id,
},
}
/// Represents a JSON-RPC request.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
#[serde(untagged)]
pub enum Request {
/// single json rpc request [RpcCall]
Single(RpcCall),
/// batch of several requests
Batch(Vec<RpcCall>),
}
/// Request parameters
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(untagged, deny_unknown_fields)]
pub enum RequestParams {
/// no parameters provided
None,
/// An array of JSON values
Array(Vec<serde_json::Value>),
/// a map of JSON values
Object(serde_json::Map<String, serde_json::Value>),
}
impl From<RequestParams> for serde_json::Value {
fn from(params: RequestParams) -> Self {
match params {
RequestParams::None => Self::Null,
RequestParams::Array(arr) => arr.into(),
RequestParams::Object(obj) => obj.into(),
}
}
}
fn no_params() -> RequestParams {
RequestParams::None
}
/// Represents the version of the RPC protocol
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum Version {
#[serde(rename = "2.0")]
V2,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum Id {
String(String),
Number(i64),
Null,
}
impl fmt::Display for Id {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::String(s) => s.fmt(f),
Self::Number(n) => n.fmt(f),
Self::Null => f.write_str("null"),
}
}
}
fn null_id() -> Id {
Id::Null
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_serialize_batch() {
let batch = Request::Batch(vec![
RpcCall::MethodCall(RpcMethodCall {
jsonrpc: Version::V2,
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![
serde_json::Value::from(999),
serde_json::Value::from(1337),
]),
id: Id::Number(1),
}),
RpcCall::Notification(RpcNotification {
jsonrpc: Some(Version::V2),
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![serde_json::Value::from(999)]),
}),
]);
let obj = serde_json::to_string(&batch).unwrap();
assert_eq!(
obj,
r#"[{"jsonrpc":"2.0","method":"eth_method","params":[999,1337],"id":1},{"jsonrpc":"2.0","method":"eth_method","params":[999]}]"#
);
}
#[test]
fn can_deserialize_batch() {
let s = r#"[{}, {"jsonrpc": "2.0", "method": "eth_call", "params": [1337,420], "id": 1},{"jsonrpc": "2.0", "method": "notify", "params": [999]}]"#;
let obj: Request = serde_json::from_str(s).unwrap();
assert_eq!(
obj,
Request::Batch(vec![
RpcCall::Invalid { id: Id::Null },
RpcCall::MethodCall(RpcMethodCall {
jsonrpc: Version::V2,
method: "eth_call".to_owned(),
params: RequestParams::Array(vec![
serde_json::Value::from(1337),
serde_json::Value::from(420)
]),
id: Id::Number(1)
}),
RpcCall::Notification(RpcNotification {
jsonrpc: Some(Version::V2),
method: "notify".to_owned(),
params: RequestParams::Array(vec![serde_json::Value::from(999)])
})
])
)
}
#[test]
fn can_serialize_method() {
let m = RpcMethodCall {
jsonrpc: Version::V2,
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![
serde_json::Value::from(999),
serde_json::Value::from(1337),
]),
id: Id::Number(1),
};
let obj = serde_json::to_string(&m).unwrap();
assert_eq!(obj, r#"{"jsonrpc":"2.0","method":"eth_method","params":[999,1337],"id":1}"#);
}
#[test]
fn can_serialize_call_notification() {
let n = RpcCall::Notification(RpcNotification {
jsonrpc: Some(Version::V2),
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![serde_json::Value::from(999)]),
});
let obj = serde_json::to_string(&n).unwrap();
assert_eq!(obj, r#"{"jsonrpc":"2.0","method":"eth_method","params":[999]}"#);
}
#[test]
fn can_serialize_notification() {
let n = RpcNotification {
jsonrpc: Some(Version::V2),
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![
serde_json::Value::from(999),
serde_json::Value::from(1337),
]),
};
let obj = serde_json::to_string(&n).unwrap();
assert_eq!(obj, r#"{"jsonrpc":"2.0","method":"eth_method","params":[999,1337]}"#);
}
#[test]
fn can_deserialize_notification() {
let s = r#"{"jsonrpc": "2.0", "method": "eth_method", "params": [999,1337]}"#;
let obj: RpcNotification = serde_json::from_str(s).unwrap();
assert_eq!(
obj,
RpcNotification {
jsonrpc: Some(Version::V2),
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![
serde_json::Value::from(999),
serde_json::Value::from(1337)
])
}
);
let s = r#"{"jsonrpc": "2.0", "method": "foobar"}"#;
let obj: RpcNotification = serde_json::from_str(s).unwrap();
assert_eq!(
obj,
RpcNotification {
jsonrpc: Some(Version::V2),
method: "foobar".to_owned(),
params: RequestParams::None,
}
);
let s = r#"{"jsonrpc": "2.0", "method": "eth_method", "params": [999,1337], "id": 1}"#;
let obj: Result<RpcNotification, _> = serde_json::from_str(s);
assert!(obj.is_err());
}
#[test]
fn can_deserialize_call() {
let s = r#"{"jsonrpc": "2.0", "method": "eth_method", "params": [999]}"#;
let obj: RpcCall = serde_json::from_str(s).unwrap();
assert_eq!(
obj,
RpcCall::Notification(RpcNotification {
jsonrpc: Some(Version::V2),
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![serde_json::Value::from(999)])
})
);
let s = r#"{"jsonrpc": "2.0", "method": "eth_method", "params": [999], "id": 1}"#;
let obj: RpcCall = serde_json::from_str(s).unwrap();
assert_eq!(
obj,
RpcCall::MethodCall(RpcMethodCall {
jsonrpc: Version::V2,
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![serde_json::Value::from(999)]),
id: Id::Number(1)
})
);
let s = r#"{"jsonrpc": "2.0", "method": "eth_method", "params": [], "id": 1}"#;
let obj: RpcCall = serde_json::from_str(s).unwrap();
assert_eq!(
obj,
RpcCall::MethodCall(RpcMethodCall {
jsonrpc: Version::V2,
method: "eth_method".to_owned(),
params: RequestParams::Array(vec![]),
id: Id::Number(1)
})
);
let s = r#"{"jsonrpc": "2.0", "method": "eth_method", "params": null, "id": 1}"#;
let obj: RpcCall = serde_json::from_str(s).unwrap();
assert_eq!(
obj,
RpcCall::MethodCall(RpcMethodCall {
jsonrpc: Version::V2,
method: "eth_method".to_owned(),
params: RequestParams::None,
id: Id::Number(1)
})
);
let s = r#"{"jsonrpc": "2.0", "method": "eth_method", "id": 1}"#;
let obj: RpcCall = serde_json::from_str(s).unwrap();
assert_eq!(
obj,
RpcCall::MethodCall(RpcMethodCall {
jsonrpc: Version::V2,
method: "eth_method".to_owned(),
params: RequestParams::None,
id: Id::Number(1)
})
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/bin/main.rs | crates/anvil/bin/main.rs | //! The `anvil` CLI: a fast local Ethereum development node, akin to Hardhat Network, Tenderly.
use anvil::args::run;
#[global_allocator]
static ALLOC: foundry_cli::utils::Allocator = foundry_cli::utils::new_allocator();
fn main() {
if let Err(err) = run() {
let _ = foundry_common::sh_err!("{err:?}");
std::process::exit(1);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/core/src/lib.rs | crates/anvil/core/src/lib.rs | //! # anvil-core
//!
//! Core Ethereum types for Anvil.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
/// Various Ethereum types
pub mod eth;
/// Additional useful types
pub mod types;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/core/src/types.rs | crates/anvil/core/src/types.rs | use alloy_primitives::Bytes;
use alloy_rpc_types::TransactionRequest;
use serde::Deserialize;
/// Represents the options used in `anvil_reorg`
#[derive(Debug, Clone, Deserialize)]
pub struct ReorgOptions {
// The depth of the reorg
pub depth: u64,
// List of transaction requests and blocks pairs to be mined into the new chain
pub tx_block_pairs: Vec<(TransactionData, u64)>,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
#[expect(clippy::large_enum_variant)]
pub enum TransactionData {
JSON(TransactionRequest),
Raw(Bytes),
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/core/src/eth/block.rs | crates/anvil/core/src/eth/block.rs | use super::transaction::TransactionInfo;
use alloy_consensus::{
BlockBody, EMPTY_OMMER_ROOT_HASH, Header, proofs::calculate_transaction_root,
};
use foundry_primitives::FoundryReceiptEnvelope;
// Type alias to optionally support impersonated transactions
type Transaction = crate::eth::transaction::MaybeImpersonatedTransaction;
/// Type alias for Ethereum Block with Anvil's transaction type
pub type Block = alloy_consensus::Block<Transaction>;
/// Container type that gathers all block data
#[derive(Clone, Debug)]
pub struct BlockInfo {
pub block: Block,
pub transactions: Vec<TransactionInfo>,
pub receipts: Vec<FoundryReceiptEnvelope>,
}
/// Helper function to create a new block with Header and Anvil transactions
///
/// Note: if the `impersonate-tx` feature is enabled this will also accept
/// `MaybeImpersonatedTransaction`.
pub fn create_block<T>(mut header: Header, transactions: impl IntoIterator<Item = T>) -> Block
where
T: Into<Transaction>,
{
let transactions: Vec<_> = transactions.into_iter().map(Into::into).collect();
let transactions_root = calculate_transaction_root(&transactions);
header.transactions_root = transactions_root;
header.ommers_hash = EMPTY_OMMER_ROOT_HASH;
let body = BlockBody { transactions, ommers: Vec::new(), withdrawals: None };
Block::new(header, body)
}
#[cfg(test)]
mod tests {
use alloy_primitives::{
Address, B64, B256, Bloom, U256, b256,
hex::{self, FromHex},
};
use alloy_rlp::Decodable;
use super::*;
use std::str::FromStr;
#[test]
fn header_rlp_roundtrip() {
let mut header = Header {
parent_hash: Default::default(),
ommers_hash: Default::default(),
beneficiary: Default::default(),
state_root: Default::default(),
transactions_root: Default::default(),
receipts_root: Default::default(),
logs_bloom: Default::default(),
difficulty: Default::default(),
number: 124u64,
gas_limit: Default::default(),
gas_used: 1337u64,
timestamp: 0,
extra_data: Default::default(),
mix_hash: Default::default(),
nonce: B64::with_last_byte(99),
withdrawals_root: Default::default(),
blob_gas_used: Default::default(),
excess_blob_gas: Default::default(),
parent_beacon_block_root: Default::default(),
base_fee_per_gas: None,
requests_hash: None,
};
let encoded = alloy_rlp::encode(&header);
let decoded: Header = Header::decode(&mut encoded.as_ref()).unwrap();
assert_eq!(header, decoded);
header.base_fee_per_gas = Some(12345u64);
let encoded = alloy_rlp::encode(&header);
let decoded: Header = Header::decode(&mut encoded.as_ref()).unwrap();
assert_eq!(header, decoded);
}
#[test]
fn test_encode_block_header() {
use alloy_rlp::Encodable;
let expected = hex::decode("f901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000").unwrap();
let mut data = vec![];
let header = Header {
parent_hash: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
ommers_hash: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
beneficiary: Address::from_str("0000000000000000000000000000000000000000").unwrap(),
state_root: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
transactions_root: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
receipts_root: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
logs_bloom: Bloom::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(),
difficulty: U256::from(2222),
number: 0xd05u64,
gas_limit: 0x115cu64,
gas_used: 0x15b3u64,
timestamp: 0x1a0au64,
extra_data: hex::decode("7788").unwrap().into(),
mix_hash: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
nonce: B64::ZERO,
base_fee_per_gas: None,
requests_hash: None,
};
header.encode(&mut data);
assert_eq!(hex::encode(&data), hex::encode(expected));
assert_eq!(header.length(), data.len());
}
#[test]
// Test vector from: https://eips.ethereum.org/EIPS/eip-2481
fn test_decode_block_header() {
let data = hex::decode("f901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000").unwrap();
let expected = Header {
parent_hash: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
ommers_hash: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
beneficiary: Address::from_str("0000000000000000000000000000000000000000").unwrap(),
state_root: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
transactions_root: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
receipts_root: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
logs_bloom: <[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(),
difficulty: U256::from(2222),
number: 0xd05u64,
gas_limit: 0x115cu64,
gas_used: 0x15b3u64,
timestamp: 0x1a0au64,
extra_data: hex::decode("7788").unwrap().into(),
mix_hash: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
nonce: B64::ZERO,
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
base_fee_per_gas: None,
requests_hash: None,
};
let header = Header::decode(&mut data.as_slice()).unwrap();
assert_eq!(header, expected);
}
#[test]
// Test vector from: https://github.com/ethereum/tests/blob/f47bbef4da376a49c8fc3166f09ab8a6d182f765/BlockchainTests/ValidBlocks/bcEIP1559/baseFee.json#L15-L36
fn test_eip1559_block_header_hash() {
let expected_hash =
b256!("0x6a251c7c3c5dca7b42407a3752ff48f3bbca1fab7f9868371d9918daf1988d1f");
let header = Header {
parent_hash: B256::from_str("e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2a").unwrap(),
ommers_hash: B256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(),
beneficiary: Address::from_str("ba5e000000000000000000000000000000000000").unwrap(),
state_root: B256::from_str("ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7").unwrap(),
transactions_root: B256::from_str("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf").unwrap(),
receipts_root: B256::from_str("29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9").unwrap(),
logs_bloom: Bloom::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(),
difficulty: U256::from(0x020000),
number: 1u64,
gas_limit: U256::from(0x016345785d8a0000u128).to::<u64>(),
gas_used: U256::from(0x015534).to::<u64>(),
timestamp: 0x079e,
extra_data: hex::decode("42").unwrap().into(),
mix_hash: B256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
nonce: B64::ZERO,
base_fee_per_gas: Some(875),
withdrawals_root: None,
blob_gas_used: None,
excess_blob_gas: None,
parent_beacon_block_root: None,
requests_hash: None,
};
assert_eq!(header.hash_slow(), expected_hash);
}
#[test]
// Test vector from network
fn block_network_roundtrip() {
use alloy_rlp::Encodable;
let data = hex::decode("f9034df90348a0fbdbd8d2d0ac5f14bd5fa90e547fe6f1d15019c724f8e7b60972d381cd5d9cf8a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794c9577e7945db22e38fc060909f2278c7746b0f9ba05017cfa3b0247e35197215ae8d610265ffebc8edca8ea66d6567eb0adecda867a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018355bb7b871fffffffffffff808462bd0e1ab9014bf90148a00000000000000000000000000000000000000000000000000000000000000000f85494319fa8f1bc4e53410e92d10d918659b16540e60a945a573efb304d04c1224cd012313e827eca5dce5d94a9c831c5a268031176ebf5f3de5051e8cba0dbfe94c9577e7945db22e38fc060909f2278c7746b0f9b808400000000f8c9b841a6946f2d16f68338cbcbd8b117374ab421128ce422467088456bceba9d70c34106128e6d4564659cf6776c08a4186063c0a05f7cffd695c10cf26a6f301b67f800b8412b782100c18c35102dc0a37ece1a152544f04ad7dc1868d18a9570f744ace60870f822f53d35e89a2ea9709ccbf1f4a25ee5003944faa845d02dde0a41d5704601b841d53caebd6c8a82456e85c2806a9e08381f959a31fb94a77e58f00e38ad97b2e0355b8519ab2122662cbe022f2a4ef7ff16adc0b2d5dcd123181ec79705116db300a063746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365880000000000000000c0c0").unwrap();
let block = Block::decode(&mut data.as_slice()).unwrap();
// encode and check that it matches the original data
let mut encoded = Vec::new();
block.encode(&mut encoded);
assert_eq!(data, encoded);
// check that length of encoding is the same as the output of `length`
assert_eq!(block.length(), encoded.len());
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/core/src/eth/subscription.rs | crates/anvil/core/src/eth/subscription.rs | //! Subscription types
use alloy_primitives::hex;
use rand::{Rng, distr::Alphanumeric, rng};
use std::fmt;
/// Unique subscription id
#[derive(Clone, PartialEq, Eq, Hash, serde::Deserialize, serde::Serialize)]
#[serde(untagged)]
pub enum SubscriptionId {
/// numerical sub id
Number(u64),
/// string sub id, a hash for example
String(String),
}
impl SubscriptionId {
/// Generates a new random hex identifier
pub fn random_hex() -> Self {
Self::String(hex_id())
}
}
impl fmt::Display for SubscriptionId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Number(num) => num.fmt(f),
Self::String(s) => s.fmt(f),
}
}
}
impl fmt::Debug for SubscriptionId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Number(num) => num.fmt(f),
Self::String(s) => s.fmt(f),
}
}
}
/// Provides random hex identifier with a certain length
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct HexIdProvider {
len: usize,
}
impl HexIdProvider {
/// Generates a random hex encoded Id
pub fn generate(&self) -> String {
let id: String =
(&mut rng()).sample_iter(Alphanumeric).map(char::from).take(self.len).collect();
let out = hex::encode(id);
format!("0x{out}")
}
}
impl Default for HexIdProvider {
fn default() -> Self {
Self { len: 16 }
}
}
/// Returns a new random hex identifier
pub fn hex_id() -> String {
HexIdProvider::default().generate()
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/core/src/eth/mod.rs | crates/anvil/core/src/eth/mod.rs | use crate::{eth::subscription::SubscriptionId, types::ReorgOptions};
use alloy_primitives::{Address, B64, B256, Bytes, TxHash, U256};
use alloy_rpc_types::{
BlockId, BlockNumberOrTag as BlockNumber, BlockOverrides, Filter, Index,
anvil::{Forking, MineOptions},
pubsub::{Params as SubscriptionParams, SubscriptionKind},
request::TransactionRequest,
simulate::SimulatePayload,
state::StateOverride,
trace::{
filter::TraceFilter,
geth::{GethDebugTracingCallOptions, GethDebugTracingOptions},
},
};
use alloy_serde::WithOtherFields;
use foundry_common::serde_helpers::{
deserialize_number, deserialize_number_opt, deserialize_number_seq,
};
pub mod block;
pub mod subscription;
pub mod transaction;
pub mod wallet;
pub mod serde_helpers;
use self::serde_helpers::*;
/// Wrapper type that ensures the type is named `params`
#[derive(Clone, Debug, PartialEq, Eq, serde::Deserialize)]
pub struct Params<T: Default> {
#[serde(default)]
pub params: T,
}
/// Represents ethereum JSON-RPC API
#[derive(Clone, Debug, serde::Deserialize)]
#[serde(tag = "method", content = "params")]
#[allow(clippy::large_enum_variant)]
pub enum EthRequest {
#[serde(rename = "web3_clientVersion", with = "empty_params")]
Web3ClientVersion(()),
#[serde(rename = "web3_sha3", with = "sequence")]
Web3Sha3(Bytes),
/// Returns the current Ethereum protocol version.
#[serde(rename = "eth_protocolVersion", with = "empty_params")]
EthProtocolVersion(()),
#[serde(rename = "eth_chainId", with = "empty_params")]
EthChainId(()),
#[serde(rename = "eth_networkId", alias = "net_version", with = "empty_params")]
EthNetworkId(()),
#[serde(rename = "net_listening", with = "empty_params")]
NetListening(()),
/// Returns the number of hashes per second with which the node is mining.
#[serde(rename = "eth_hashrate", with = "empty_params")]
EthHashrate(()),
#[serde(rename = "eth_gasPrice", with = "empty_params")]
EthGasPrice(()),
#[serde(rename = "eth_maxPriorityFeePerGas", with = "empty_params")]
EthMaxPriorityFeePerGas(()),
#[serde(rename = "eth_blobBaseFee", with = "empty_params")]
EthBlobBaseFee(()),
#[serde(rename = "eth_accounts", alias = "eth_requestAccounts", with = "empty_params")]
EthAccounts(()),
#[serde(rename = "eth_blockNumber", with = "empty_params")]
EthBlockNumber(()),
/// Returns the client coinbase address.
#[serde(rename = "eth_coinbase", with = "empty_params")]
EthCoinbase(()),
#[serde(rename = "eth_getBalance")]
EthGetBalance(Address, Option<BlockId>),
#[serde(rename = "eth_getAccount")]
EthGetAccount(Address, Option<BlockId>),
#[serde(rename = "eth_getAccountInfo")]
EthGetAccountInfo(Address, Option<BlockId>),
#[serde(rename = "eth_getStorageAt")]
EthGetStorageAt(Address, U256, Option<BlockId>),
#[serde(rename = "eth_getBlockByHash")]
EthGetBlockByHash(B256, bool),
#[serde(rename = "eth_getBlockByNumber")]
EthGetBlockByNumber(
#[serde(deserialize_with = "lenient_block_number::lenient_block_number")] BlockNumber,
bool,
),
#[serde(rename = "eth_getTransactionCount")]
EthGetTransactionCount(Address, Option<BlockId>),
#[serde(rename = "eth_getBlockTransactionCountByHash", with = "sequence")]
EthGetTransactionCountByHash(B256),
#[serde(
rename = "eth_getBlockTransactionCountByNumber",
deserialize_with = "lenient_block_number::lenient_block_number_seq"
)]
EthGetTransactionCountByNumber(BlockNumber),
#[serde(rename = "eth_getUncleCountByBlockHash", with = "sequence")]
EthGetUnclesCountByHash(B256),
#[serde(
rename = "eth_getUncleCountByBlockNumber",
deserialize_with = "lenient_block_number::lenient_block_number_seq"
)]
EthGetUnclesCountByNumber(BlockNumber),
#[serde(rename = "eth_getCode")]
EthGetCodeAt(Address, Option<BlockId>),
/// Returns the account and storage values of the specified account including the Merkle-proof.
/// This call can be used to verify that the data you are pulling from is not tampered with.
#[serde(rename = "eth_getProof")]
EthGetProof(Address, Vec<B256>, Option<BlockId>),
/// The sign method calculates an Ethereum specific signature with:
#[serde(rename = "eth_sign")]
EthSign(Address, Bytes),
/// The sign method calculates an Ethereum specific signature, equivalent to eth_sign:
/// <https://docs.metamask.io/wallet/reference/personal_sign/>
#[serde(rename = "personal_sign")]
PersonalSign(Bytes, Address),
#[serde(rename = "eth_signTransaction", with = "sequence")]
EthSignTransaction(Box<WithOtherFields<TransactionRequest>>),
/// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md).
#[serde(rename = "eth_signTypedData")]
EthSignTypedData(Address, serde_json::Value),
/// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md).
#[serde(rename = "eth_signTypedData_v3")]
EthSignTypedDataV3(Address, serde_json::Value),
/// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md), and includes full support of arrays and recursive data structures.
#[serde(rename = "eth_signTypedData_v4")]
EthSignTypedDataV4(Address, alloy_dyn_abi::TypedData),
#[serde(rename = "eth_sendTransaction", with = "sequence")]
EthSendTransaction(Box<WithOtherFields<TransactionRequest>>),
#[serde(rename = "eth_sendTransactionSync", with = "sequence")]
EthSendTransactionSync(Box<WithOtherFields<TransactionRequest>>),
#[serde(rename = "eth_sendRawTransaction", with = "sequence")]
EthSendRawTransaction(Bytes),
#[serde(rename = "eth_sendRawTransactionSync", with = "sequence")]
EthSendRawTransactionSync(Bytes),
#[serde(rename = "eth_call")]
EthCall(
WithOtherFields<TransactionRequest>,
#[serde(default)] Option<BlockId>,
#[serde(default)] Option<StateOverride>,
#[serde(default)] Option<Box<BlockOverrides>>,
),
#[serde(rename = "eth_simulateV1")]
EthSimulateV1(SimulatePayload, #[serde(default)] Option<BlockId>),
#[serde(rename = "eth_createAccessList")]
EthCreateAccessList(WithOtherFields<TransactionRequest>, #[serde(default)] Option<BlockId>),
#[serde(rename = "eth_estimateGas")]
EthEstimateGas(
WithOtherFields<TransactionRequest>,
#[serde(default)] Option<BlockId>,
#[serde(default)] Option<StateOverride>,
#[serde(default)] Option<Box<BlockOverrides>>,
),
#[serde(rename = "eth_fillTransaction", with = "sequence")]
EthFillTransaction(WithOtherFields<TransactionRequest>),
#[serde(rename = "eth_getTransactionByHash", with = "sequence")]
EthGetTransactionByHash(TxHash),
/// Returns the blob for a given blob versioned hash.
#[serde(rename = "anvil_getBlobByHash", with = "sequence")]
GetBlobByHash(B256),
/// Returns the blobs for a given transaction hash.
#[serde(rename = "anvil_getBlobsByTransactionHash", with = "sequence")]
GetBlobByTransactionHash(TxHash),
/// Returns the blobs for a given transaction hash.
#[serde(rename = "anvil_getBlobSidecarsByBlockId", with = "sequence")]
GetBlobSidecarsByBlockId(BlockId),
/// Returns the genesis time for the chain
#[serde(rename = "anvil_getGenesisTime", with = "empty_params")]
GetGenesisTime(()),
#[serde(rename = "eth_getTransactionByBlockHashAndIndex")]
EthGetTransactionByBlockHashAndIndex(B256, Index),
#[serde(rename = "eth_getTransactionByBlockNumberAndIndex")]
EthGetTransactionByBlockNumberAndIndex(BlockNumber, Index),
#[serde(rename = "eth_getRawTransactionByHash", with = "sequence")]
EthGetRawTransactionByHash(TxHash),
#[serde(rename = "eth_getRawTransactionByBlockHashAndIndex")]
EthGetRawTransactionByBlockHashAndIndex(B256, Index),
#[serde(rename = "eth_getRawTransactionByBlockNumberAndIndex")]
EthGetRawTransactionByBlockNumberAndIndex(BlockNumber, Index),
#[serde(rename = "eth_getTransactionReceipt", with = "sequence")]
EthGetTransactionReceipt(B256),
#[serde(rename = "eth_getBlockReceipts", with = "sequence")]
EthGetBlockReceipts(BlockId),
#[serde(rename = "eth_getUncleByBlockHashAndIndex")]
EthGetUncleByBlockHashAndIndex(B256, Index),
#[serde(rename = "eth_getUncleByBlockNumberAndIndex")]
EthGetUncleByBlockNumberAndIndex(
#[serde(deserialize_with = "lenient_block_number::lenient_block_number")] BlockNumber,
Index,
),
#[serde(rename = "eth_getLogs", with = "sequence")]
EthGetLogs(Filter),
/// Creates a filter object, based on filter options, to notify when the state changes (logs).
#[serde(rename = "eth_newFilter", with = "sequence")]
EthNewFilter(Filter),
/// Polling method for a filter, which returns an array of logs which occurred since last poll.
#[serde(rename = "eth_getFilterChanges", with = "sequence")]
EthGetFilterChanges(String),
/// Creates a filter in the node, to notify when a new block arrives.
/// To check if the state has changed, call `eth_getFilterChanges`.
#[serde(rename = "eth_newBlockFilter", with = "empty_params")]
EthNewBlockFilter(()),
/// Creates a filter in the node, to notify when new pending transactions arrive.
/// To check if the state has changed, call `eth_getFilterChanges`.
#[serde(rename = "eth_newPendingTransactionFilter", with = "empty_params")]
EthNewPendingTransactionFilter(()),
/// Returns an array of all logs matching filter with given id.
#[serde(rename = "eth_getFilterLogs", with = "sequence")]
EthGetFilterLogs(String),
/// Removes the filter, returns true if the filter was installed
#[serde(rename = "eth_uninstallFilter", with = "sequence")]
EthUninstallFilter(String),
#[serde(rename = "eth_getWork", with = "empty_params")]
EthGetWork(()),
#[serde(rename = "eth_submitWork")]
EthSubmitWork(B64, B256, B256),
#[serde(rename = "eth_submitHashrate")]
EthSubmitHashRate(U256, B256),
#[serde(rename = "eth_feeHistory")]
EthFeeHistory(
#[serde(deserialize_with = "deserialize_number")] U256,
BlockNumber,
#[serde(default)] Vec<f64>,
),
#[serde(rename = "eth_syncing", with = "empty_params")]
EthSyncing(()),
#[serde(rename = "eth_config", with = "empty_params")]
EthConfig(()),
/// geth's `debug_getRawTransaction` endpoint
#[serde(rename = "debug_getRawTransaction", with = "sequence")]
DebugGetRawTransaction(TxHash),
/// geth's `debug_traceTransaction` endpoint
#[serde(rename = "debug_traceTransaction")]
DebugTraceTransaction(B256, #[serde(default)] GethDebugTracingOptions),
/// geth's `debug_traceCall` endpoint
#[serde(rename = "debug_traceCall")]
DebugTraceCall(
WithOtherFields<TransactionRequest>,
#[serde(default)] Option<BlockId>,
#[serde(default)] GethDebugTracingCallOptions,
),
/// reth's `debug_codeByHash` endpoint
#[serde(rename = "debug_codeByHash")]
DebugCodeByHash(B256, #[serde(default)] Option<BlockId>),
/// reth's `debug_dbGet` endpoint
#[serde(rename = "debug_dbGet")]
DebugDbGet(String),
/// Trace transaction endpoint for parity's `trace_transaction`
#[serde(rename = "trace_transaction", with = "sequence")]
TraceTransaction(B256),
/// Trace transaction endpoint for parity's `trace_block`
#[serde(
rename = "trace_block",
deserialize_with = "lenient_block_number::lenient_block_number_seq"
)]
TraceBlock(BlockNumber),
// Return filtered traces over blocks
#[serde(rename = "trace_filter", with = "sequence")]
TraceFilter(TraceFilter),
// Custom endpoints, they're not extracted to a separate type out of serde convenience
/// send transactions impersonating specific account and contract addresses.
#[serde(
rename = "anvil_impersonateAccount",
alias = "hardhat_impersonateAccount",
with = "sequence"
)]
ImpersonateAccount(Address),
/// Stops impersonating an account if previously set with `anvil_impersonateAccount`
#[serde(
rename = "anvil_stopImpersonatingAccount",
alias = "hardhat_stopImpersonatingAccount",
with = "sequence"
)]
StopImpersonatingAccount(Address),
/// Will make every account impersonated
#[serde(
rename = "anvil_autoImpersonateAccount",
alias = "hardhat_autoImpersonateAccount",
with = "sequence"
)]
AutoImpersonateAccount(bool),
/// Registers a signature/address pair for faking `ecrecover` results
#[serde(rename = "anvil_impersonateSignature", with = "sequence")]
ImpersonateSignature(Bytes, Address),
/// Returns true if automatic mining is enabled, and false.
#[serde(rename = "anvil_getAutomine", alias = "hardhat_getAutomine", with = "empty_params")]
GetAutoMine(()),
/// Mines a series of blocks
#[serde(rename = "anvil_mine", alias = "hardhat_mine")]
Mine(
/// Number of blocks to mine, if not set `1` block is mined
#[serde(default, deserialize_with = "deserialize_number_opt")]
Option<U256>,
/// The time interval between each block in seconds, defaults to `1` seconds
/// The interval is applied only to blocks mined in the given method invocation, not to
/// blocks mined afterwards. Set this to `0` to instantly mine _all_ blocks
#[serde(default, deserialize_with = "deserialize_number_opt")]
Option<U256>,
),
/// Enables or disables, based on the single boolean argument, the automatic mining of new
/// blocks with each new transaction submitted to the network.
#[serde(rename = "anvil_setAutomine", alias = "evm_setAutomine", with = "sequence")]
SetAutomine(bool),
/// Sets the mining behavior to interval with the given interval (seconds)
#[serde(rename = "anvil_setIntervalMining", alias = "evm_setIntervalMining", with = "sequence")]
SetIntervalMining(u64),
/// Gets the current mining behavior
#[serde(rename = "anvil_getIntervalMining", with = "empty_params")]
GetIntervalMining(()),
/// Removes transactions from the pool
#[serde(rename = "anvil_dropTransaction", alias = "hardhat_dropTransaction", with = "sequence")]
DropTransaction(B256),
/// Removes transactions from the pool
#[serde(
rename = "anvil_dropAllTransactions",
alias = "hardhat_dropAllTransactions",
with = "empty_params"
)]
DropAllTransactions(),
/// Reset the fork to a fresh forked state, and optionally update the fork config
#[serde(rename = "anvil_reset", alias = "hardhat_reset")]
Reset(#[serde(default)] Option<Params<Option<Forking>>>),
/// Sets the backend rpc url
#[serde(rename = "anvil_setRpcUrl", with = "sequence")]
SetRpcUrl(String),
/// Modifies the balance of an account.
#[serde(
rename = "anvil_setBalance",
alias = "hardhat_setBalance",
alias = "tenderly_setBalance"
)]
SetBalance(Address, #[serde(deserialize_with = "deserialize_number")] U256),
/// Increases the balance of an account.
#[serde(
rename = "anvil_addBalance",
alias = "hardhat_addBalance",
alias = "tenderly_addBalance"
)]
AddBalance(Address, #[serde(deserialize_with = "deserialize_number")] U256),
/// Modifies the ERC20 balance of an account.
#[serde(
rename = "anvil_dealERC20",
alias = "hardhat_dealERC20",
alias = "anvil_setERC20Balance"
)]
DealERC20(Address, Address, #[serde(deserialize_with = "deserialize_number")] U256),
/// Sets the ERC20 allowance for a spender
#[serde(rename = "anvil_setERC20Allowance")]
SetERC20Allowance(
Address,
Address,
Address,
#[serde(deserialize_with = "deserialize_number")] U256,
),
/// Sets the code of a contract
#[serde(rename = "anvil_setCode", alias = "hardhat_setCode")]
SetCode(Address, Bytes),
/// Sets the nonce of an address
#[serde(rename = "anvil_setNonce", alias = "hardhat_setNonce", alias = "evm_setAccountNonce")]
SetNonce(Address, #[serde(deserialize_with = "deserialize_number")] U256),
/// Writes a single slot of the account's storage
#[serde(rename = "anvil_setStorageAt", alias = "hardhat_setStorageAt")]
SetStorageAt(
Address,
/// slot
U256,
/// value
B256,
),
/// Sets the coinbase address
#[serde(rename = "anvil_setCoinbase", alias = "hardhat_setCoinbase", with = "sequence")]
SetCoinbase(Address),
/// Sets the chain id
#[serde(rename = "anvil_setChainId", with = "sequence")]
SetChainId(u64),
/// Enable or disable logging
#[serde(
rename = "anvil_setLoggingEnabled",
alias = "hardhat_setLoggingEnabled",
with = "sequence"
)]
SetLogging(bool),
/// Set the minimum gas price for the node
#[serde(
rename = "anvil_setMinGasPrice",
alias = "hardhat_setMinGasPrice",
deserialize_with = "deserialize_number_seq"
)]
SetMinGasPrice(U256),
/// Sets the base fee of the next block
#[serde(
rename = "anvil_setNextBlockBaseFeePerGas",
alias = "hardhat_setNextBlockBaseFeePerGas",
deserialize_with = "deserialize_number_seq"
)]
SetNextBlockBaseFeePerGas(U256),
/// Sets the specific timestamp
/// Accepts timestamp (Unix epoch) with millisecond precision and returns the number of seconds
/// between the given timestamp and the current time.
#[serde(
rename = "anvil_setTime",
alias = "evm_setTime",
deserialize_with = "deserialize_number_seq"
)]
EvmSetTime(U256),
/// Serializes the current state (including contracts code, contract's storage, accounts
/// properties, etc.) into a saveable data blob
#[serde(rename = "anvil_dumpState", alias = "hardhat_dumpState")]
DumpState(#[serde(default)] Option<Params<Option<bool>>>),
/// Adds state previously dumped with `DumpState` to the current chain
#[serde(rename = "anvil_loadState", alias = "hardhat_loadState", with = "sequence")]
LoadState(Bytes),
/// Retrieves the Anvil node configuration params
#[serde(rename = "anvil_nodeInfo", with = "empty_params")]
NodeInfo(()),
/// Retrieves the Anvil node metadata.
#[serde(rename = "anvil_metadata", alias = "hardhat_metadata", with = "empty_params")]
AnvilMetadata(()),
// Ganache compatible calls
/// Snapshot the state of the blockchain at the current block.
///
/// Ref <https://github.com/trufflesuite/ganache/blob/ef1858d5d6f27e4baeb75cccd57fb3dc77a45ae8/src/chains/ethereum/ethereum/RPC-METHODS.md#evm_snapshot>
#[serde(rename = "anvil_snapshot", alias = "evm_snapshot", with = "empty_params")]
EvmSnapshot(()),
/// Revert the state of the blockchain to a previous snapshot.
/// Takes a single parameter, which is the snapshot id to revert to.
///
/// Ref <https://github.com/trufflesuite/ganache/blob/ef1858d5d6f27e4baeb75cccd57fb3dc77a45ae8/src/chains/ethereum/ethereum/RPC-METHODS.md#evm_revert>
#[serde(
rename = "anvil_revert",
alias = "evm_revert",
deserialize_with = "deserialize_number_seq"
)]
EvmRevert(U256),
/// Jump forward in time by the given amount of time, in seconds.
#[serde(
rename = "anvil_increaseTime",
alias = "evm_increaseTime",
deserialize_with = "deserialize_number_seq"
)]
EvmIncreaseTime(U256),
/// Similar to `evm_increaseTime` but takes the exact timestamp that you want in the next block
#[serde(
rename = "anvil_setNextBlockTimestamp",
alias = "evm_setNextBlockTimestamp",
deserialize_with = "deserialize_number_seq"
)]
EvmSetNextBlockTimeStamp(U256),
/// Set the exact gas limit that you want in the next block
#[serde(
rename = "anvil_setBlockGasLimit",
alias = "evm_setBlockGasLimit",
deserialize_with = "deserialize_number_seq"
)]
EvmSetBlockGasLimit(U256),
/// Similar to `evm_increaseTime` but takes sets a block timestamp `interval`.
///
/// The timestamp of the next block will be computed as `lastBlock_timestamp + interval`.
#[serde(rename = "anvil_setBlockTimestampInterval", with = "sequence")]
EvmSetBlockTimeStampInterval(u64),
/// Removes a `anvil_setBlockTimestampInterval` if it exists
#[serde(rename = "anvil_removeBlockTimestampInterval", with = "empty_params")]
EvmRemoveBlockTimeStampInterval(()),
/// Mine a single block
#[serde(rename = "evm_mine")]
EvmMine(#[serde(default)] Option<Params<Option<MineOptions>>>),
/// Mine a single block and return detailed data
///
/// This behaves exactly as `EvmMine` but returns different output, for compatibility reasons
/// this is a separate call since `evm_mine` is not an anvil original.
#[serde(rename = "anvil_mine_detailed", alias = "evm_mine_detailed")]
EvmMineDetailed(#[serde(default)] Option<Params<Option<MineOptions>>>),
/// Execute a transaction regardless of signature status
#[serde(rename = "eth_sendUnsignedTransaction", with = "sequence")]
EthSendUnsignedTransaction(Box<WithOtherFields<TransactionRequest>>),
/// Turn on call traces for transactions that are returned to the user when they execute a
/// transaction (instead of just txhash/receipt)
#[serde(rename = "anvil_enableTraces", with = "empty_params")]
EnableTraces(()),
/// Returns the number of transactions currently pending for inclusion in the next block(s), as
/// well as the ones that are being scheduled for future execution only.
/// Ref: <https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_status>
#[serde(rename = "txpool_status", with = "empty_params")]
TxPoolStatus(()),
/// Returns a summary of all the transactions currently pending for inclusion in the next
/// block(s), as well as the ones that are being scheduled for future execution only.
/// Ref: <https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_inspect>
#[serde(rename = "txpool_inspect", with = "empty_params")]
TxPoolInspect(()),
/// Returns the details of all transactions currently pending for inclusion in the next
/// block(s), as well as the ones that are being scheduled for future execution only.
/// Ref: <https://geth.ethereum.org/docs/rpc/ns-txpool#txpool_content>
#[serde(rename = "txpool_content", with = "empty_params")]
TxPoolContent(()),
/// Otterscan's `ots_getApiLevel` endpoint
/// Otterscan currently requires this endpoint, even though it's not part of the ots_*
/// <https://github.com/otterscan/otterscan/blob/071d8c55202badf01804f6f8d53ef9311d4a9e47/src/useProvider.ts#L71>
/// Related upstream issue: <https://github.com/otterscan/otterscan/issues/1081>
#[serde(rename = "erigon_getHeaderByNumber")]
ErigonGetHeaderByNumber(
#[serde(deserialize_with = "lenient_block_number::lenient_block_number_seq")] BlockNumber,
),
/// Otterscan's `ots_getApiLevel` endpoint
/// Used as a simple API versioning scheme for the ots_* namespace
#[serde(rename = "ots_getApiLevel", with = "empty_params")]
OtsGetApiLevel(()),
/// Otterscan's `ots_getInternalOperations` endpoint
/// Traces internal ETH transfers, contracts creation (CREATE/CREATE2) and self-destructs for a
/// certain transaction.
#[serde(rename = "ots_getInternalOperations", with = "sequence")]
OtsGetInternalOperations(B256),
/// Otterscan's `ots_hasCode` endpoint
/// Check if an ETH address contains code at a certain block number.
#[serde(rename = "ots_hasCode")]
OtsHasCode(
Address,
#[serde(deserialize_with = "lenient_block_number::lenient_block_number", default)]
BlockNumber,
),
/// Otterscan's `ots_traceTransaction` endpoint
/// Trace a transaction and generate a trace call tree.
#[serde(rename = "ots_traceTransaction", with = "sequence")]
OtsTraceTransaction(B256),
/// Otterscan's `ots_getTransactionError` endpoint
/// Given a transaction hash, returns its raw revert reason.
#[serde(rename = "ots_getTransactionError", with = "sequence")]
OtsGetTransactionError(B256),
/// Otterscan's `ots_getBlockDetails` endpoint
/// Given a block number, return its data. Similar to the standard eth_getBlockByNumber/Hash
/// method, but can be optimized by excluding unnecessary data such as transactions and
/// logBloom
#[serde(rename = "ots_getBlockDetails")]
OtsGetBlockDetails(
#[serde(deserialize_with = "lenient_block_number::lenient_block_number_seq", default)]
BlockNumber,
),
/// Otterscan's `ots_getBlockDetails` endpoint
/// Same as `ots_getBlockDetails`, but receiving a block hash instead of number
#[serde(rename = "ots_getBlockDetailsByHash", with = "sequence")]
OtsGetBlockDetailsByHash(B256),
/// Otterscan's `ots_getBlockTransactions` endpoint
/// Gets paginated transaction data for a certain block. Return data is similar to
/// eth_getBlockBy* + eth_getTransactionReceipt.
#[serde(rename = "ots_getBlockTransactions")]
OtsGetBlockTransactions(u64, usize, usize),
/// Otterscan's `ots_searchTransactionsBefore` endpoint
/// Address history navigation. searches backwards from certain point in time.
#[serde(rename = "ots_searchTransactionsBefore")]
OtsSearchTransactionsBefore(Address, u64, usize),
/// Otterscan's `ots_searchTransactionsAfter` endpoint
/// Address history navigation. searches forward from certain point in time.
#[serde(rename = "ots_searchTransactionsAfter")]
OtsSearchTransactionsAfter(Address, u64, usize),
/// Otterscan's `ots_getTransactionBySenderAndNonce` endpoint
/// Given a sender address and a nonce, returns the tx hash or null if not found. It returns
/// only the tx hash on success, you can use the standard eth_getTransactionByHash after that
/// to get the full transaction data.
#[serde(rename = "ots_getTransactionBySenderAndNonce")]
OtsGetTransactionBySenderAndNonce(
Address,
#[serde(deserialize_with = "deserialize_number")] U256,
),
/// Returns the transaction by sender and nonce
/// Returns the full transaction data.
#[serde(rename = "eth_getTransactionBySenderAndNonce")]
EthGetTransactionBySenderAndNonce(
Address,
#[serde(deserialize_with = "deserialize_number")] U256,
),
/// Otterscan's `ots_getTransactionBySenderAndNonce` endpoint
/// Given an ETH contract address, returns the tx hash and the direct address who created the
/// contract.
#[serde(rename = "ots_getContractCreator", with = "sequence")]
OtsGetContractCreator(Address),
/// Removes transactions from the pool by sender origin.
#[serde(rename = "anvil_removePoolTransactions", with = "sequence")]
RemovePoolTransactions(Address),
/// Reorg the chain
#[serde(rename = "anvil_reorg")]
Reorg(ReorgOptions),
/// Rollback the chain
#[serde(rename = "anvil_rollback", with = "sequence")]
Rollback(Option<u64>),
/// Wallet
#[serde(rename = "wallet_getCapabilities", with = "empty_params")]
WalletGetCapabilities(()),
/// Add an address to the delegation capability of the wallet
#[serde(rename = "anvil_addCapability", with = "sequence")]
AnvilAddCapability(Address),
/// Set the executor (sponsor) wallet
#[serde(rename = "anvil_setExecutor", with = "sequence")]
AnvilSetExecutor(String),
}
/// Represents ethereum JSON-RPC API
#[derive(Clone, Debug, PartialEq, Eq, serde::Deserialize)]
#[serde(tag = "method", content = "params")]
pub enum EthPubSub {
/// Subscribe to an eth subscription
#[serde(rename = "eth_subscribe")]
EthSubscribe(SubscriptionKind, #[serde(default)] Box<SubscriptionParams>),
/// Unsubscribe from an eth subscription
#[serde(rename = "eth_unsubscribe", with = "sequence")]
EthUnSubscribe(SubscriptionId),
}
/// Container type for either a request or a pub sub
#[derive(Clone, Debug, serde::Deserialize)]
#[serde(untagged)]
pub enum EthRpcCall {
Request(Box<EthRequest>),
PubSub(EthPubSub),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_web3_client_version() {
let s = r#"{"method": "web3_clientVersion", "params":[]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_web3_sha3() {
let s = r#"{"method": "web3_sha3", "params":["0x68656c6c6f20776f726c64"]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_eth_accounts() {
let s = r#"{"method": "eth_accounts", "params":[]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_eth_network_id() {
let s = r#"{"method": "eth_networkId", "params":[]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_eth_get_proof() {
let s = r#"{"method":"eth_getProof","params":["0x7F0d15C7FAae65896648C8273B6d7E43f58Fa842",["0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"],"latest"]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_eth_chain_id() {
let s = r#"{"method": "eth_chainId", "params":[]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_net_listening() {
let s = r#"{"method": "net_listening", "params":[]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_eth_block_number() {
let s = r#"{"method": "eth_blockNumber", "params":[]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_eth_max_priority_fee() {
let s = r#"{"method": "eth_maxPriorityFeePerGas", "params":[]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_eth_syncing() {
let s = r#"{"method": "eth_syncing", "params":[]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_custom_impersonate_account() {
let s = r#"{"method": "anvil_impersonateAccount", "params":
["0xd84de507f3fada7df80908082d3239466db55a71"]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_custom_stop_impersonate_account() {
let s = r#"{"method": "anvil_stopImpersonatingAccount", "params":
["0x364d6D0333432C3Ac016Ca832fb8594A8cE43Ca6"]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_custom_auto_impersonate_account() {
let s = r#"{"method": "anvil_autoImpersonateAccount", "params": [true]}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_custom_get_automine() {
let s = r#"{"method": "anvil_getAutomine", "params": []}"#;
let value: serde_json::Value = serde_json::from_str(s).unwrap();
let _req = serde_json::from_value::<EthRequest>(value).unwrap();
}
#[test]
fn test_custom_mine() {
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/core/src/eth/wallet.rs | crates/anvil/core/src/eth/wallet.rs | pub use alloy_eip5792::*;
#[derive(Debug, thiserror::Error)]
pub enum WalletError {
/// The transaction value is not 0.
///
/// The value should be 0 to prevent draining the sequencer.
#[error("transaction value must be zero for delegated transactions")]
ValueNotZero,
/// The from field is set on the transaction.
///
/// Requests with the from field are rejected, since it is implied that it will always be the
/// sequencer.
#[error("transaction 'from' field should not be set for delegated transactions")]
FromSet,
/// The nonce field is set on the transaction.
///
/// Requests with the nonce field set are rejected, as this is managed by the sequencer.
#[error("transaction nonce should not be set for delegated transactions")]
NonceSet,
/// An authorization item was invalid.
///
/// The item is invalid if it tries to delegate an account to a contract that is not
/// whitelisted.
#[error("invalid authorization address: contract is not whitelisted for delegation")]
InvalidAuthorization,
/// The to field of the transaction was invalid.
///
/// The destination is invalid if:
///
/// - There is no bytecode at the destination, or
/// - The bytecode is not an EIP-7702 delegation designator, or
/// - The delegation designator points to a contract that is not whitelisted
#[error("transaction destination is not a valid delegated account")]
IllegalDestination,
/// The transaction request was invalid.
///
/// This is likely an internal error, as most of the request is built by the sequencer.
#[error("invalid transaction request format")]
InvalidTransactionRequest,
/// An internal error occurred.
#[error("internal server error occurred")]
InternalError,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/core/src/eth/serde_helpers.rs | crates/anvil/core/src/eth/serde_helpers.rs | //! custom serde helper functions
pub mod sequence {
use serde::{
Deserialize, Deserializer, Serialize, Serializer, de::DeserializeOwned, ser::SerializeSeq,
};
pub fn serialize<S, T>(val: &T, s: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
T: Serialize,
{
let mut seq = s.serialize_seq(Some(1))?;
seq.serialize_element(val)?;
seq.end()
}
pub fn deserialize<'de, T, D>(d: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
T: DeserializeOwned,
{
let mut seq = Vec::<T>::deserialize(d)?;
if seq.len() != 1 {
return Err(serde::de::Error::custom(format!(
"expected params sequence with length 1 but got {}",
seq.len()
)));
}
Ok(seq.remove(0))
}
}
/// A module that deserializes `[]` optionally
pub mod empty_params {
use serde::{Deserialize, Deserializer};
pub fn deserialize<'de, D>(d: D) -> Result<(), D::Error>
where
D: Deserializer<'de>,
{
let seq = Option::<Vec<()>>::deserialize(d)?.unwrap_or_default();
if !seq.is_empty() {
return Err(serde::de::Error::custom(format!(
"expected params sequence with length 0 but got {}",
seq.len()
)));
}
Ok(())
}
}
/// A module that deserializes either a BlockNumberOrTag, or a simple number.
pub mod lenient_block_number {
pub use alloy_eips::eip1898::LenientBlockNumberOrTag;
use alloy_rpc_types::BlockNumberOrTag;
use serde::{Deserialize, Deserializer};
/// deserializes either a BlockNumberOrTag, or a simple number.
pub use alloy_eips::eip1898::lenient_block_number_or_tag::deserialize as lenient_block_number;
/// Same as `lenient_block_number` but requires to be `[num; 1]`
pub fn lenient_block_number_seq<'de, D>(deserializer: D) -> Result<BlockNumberOrTag, D::Error>
where
D: Deserializer<'de>,
{
let num = <[LenientBlockNumberOrTag; 1]>::deserialize(deserializer)?[0].into();
Ok(num)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/core/src/eth/transaction/mod.rs | crates/anvil/core/src/eth/transaction/mod.rs | //! Transaction related types
use alloy_consensus::{
Signed, Transaction, TxEnvelope, Typed2718, crypto::RecoveryError, transaction::Recovered,
};
use alloy_eips::eip2718::Encodable2718;
use alloy_primitives::{Address, B256, Bytes, TxHash};
use alloy_rlp::{Decodable, Encodable};
use alloy_rpc_types::Transaction as RpcTransaction;
use bytes::BufMut;
use foundry_evm::traces::CallTraceNode;
use foundry_primitives::FoundryTxEnvelope;
use revm::interpreter::InstructionResult;
use serde::{Deserialize, Serialize};
use std::ops::Deref;
/// A wrapper for [FoundryTxEnvelope] that allows impersonating accounts.
///
/// This is a helper that carries the `impersonated` sender so that the right hash
/// [FoundryTxEnvelope::impersonated_hash] can be created.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct MaybeImpersonatedTransaction {
transaction: FoundryTxEnvelope,
impersonated_sender: Option<Address>,
}
impl Typed2718 for MaybeImpersonatedTransaction {
fn ty(&self) -> u8 {
self.transaction.ty()
}
}
impl MaybeImpersonatedTransaction {
/// Creates a new wrapper for the given transaction
pub fn new(transaction: FoundryTxEnvelope) -> Self {
Self { transaction, impersonated_sender: None }
}
/// Creates a new impersonated transaction wrapper using the given sender
pub fn impersonated(transaction: FoundryTxEnvelope, impersonated_sender: Address) -> Self {
Self { transaction, impersonated_sender: Some(impersonated_sender) }
}
/// Recovers the Ethereum address which was used to sign the transaction.
pub fn recover(&self) -> Result<Address, RecoveryError> {
if let Some(sender) = self.impersonated_sender {
return Ok(sender);
}
self.transaction.recover()
}
/// Returns whether the transaction is impersonated
pub fn is_impersonated(&self) -> bool {
self.impersonated_sender.is_some()
}
/// Returns the hash of the transaction
pub fn hash(&self) -> B256 {
if let Some(sender) = self.impersonated_sender {
return self.transaction.impersonated_hash(sender);
}
self.transaction.hash()
}
/// Converts the transaction into an [`RpcTransaction`]
pub fn into_rpc_transaction(self) -> RpcTransaction {
let hash = self.hash();
let from = self.recover().unwrap_or_default();
let envelope = self.transaction.try_into_eth().expect("cant build deposit transactions");
// NOTE: we must update the hash because the tx can be impersonated, this requires forcing
// the hash
let inner_envelope = match envelope {
TxEnvelope::Legacy(t) => {
let (tx, sig, _) = t.into_parts();
TxEnvelope::Legacy(Signed::new_unchecked(tx, sig, hash))
}
TxEnvelope::Eip2930(t) => {
let (tx, sig, _) = t.into_parts();
TxEnvelope::Eip2930(Signed::new_unchecked(tx, sig, hash))
}
TxEnvelope::Eip1559(t) => {
let (tx, sig, _) = t.into_parts();
TxEnvelope::Eip1559(Signed::new_unchecked(tx, sig, hash))
}
TxEnvelope::Eip4844(t) => {
let (tx, sig, _) = t.into_parts();
TxEnvelope::Eip4844(Signed::new_unchecked(tx, sig, hash))
}
TxEnvelope::Eip7702(t) => {
let (tx, sig, _) = t.into_parts();
TxEnvelope::Eip7702(Signed::new_unchecked(tx, sig, hash))
}
};
RpcTransaction {
block_hash: None,
block_number: None,
transaction_index: None,
effective_gas_price: None,
inner: Recovered::new_unchecked(inner_envelope, from),
}
}
}
impl Encodable2718 for MaybeImpersonatedTransaction {
fn encode_2718_len(&self) -> usize {
self.transaction.encode_2718_len()
}
fn encode_2718(&self, out: &mut dyn BufMut) {
self.transaction.encode_2718(out)
}
}
impl Encodable for MaybeImpersonatedTransaction {
fn encode(&self, out: &mut dyn bytes::BufMut) {
self.transaction.encode(out)
}
}
impl From<MaybeImpersonatedTransaction> for FoundryTxEnvelope {
fn from(value: MaybeImpersonatedTransaction) -> Self {
value.transaction
}
}
impl From<FoundryTxEnvelope> for MaybeImpersonatedTransaction {
fn from(value: FoundryTxEnvelope) -> Self {
Self::new(value)
}
}
impl Decodable for MaybeImpersonatedTransaction {
fn decode(buf: &mut &[u8]) -> alloy_rlp::Result<Self> {
FoundryTxEnvelope::decode(buf).map(Self::new)
}
}
impl AsRef<FoundryTxEnvelope> for MaybeImpersonatedTransaction {
fn as_ref(&self) -> &FoundryTxEnvelope {
&self.transaction
}
}
impl Deref for MaybeImpersonatedTransaction {
type Target = FoundryTxEnvelope;
fn deref(&self) -> &Self::Target {
&self.transaction
}
}
impl From<MaybeImpersonatedTransaction> for RpcTransaction {
fn from(value: MaybeImpersonatedTransaction) -> Self {
value.into_rpc_transaction()
}
}
/// Queued transaction
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct PendingTransaction {
/// The actual transaction
pub transaction: MaybeImpersonatedTransaction,
/// the recovered sender of this transaction
sender: Address,
/// hash of `transaction`, so it can easily be reused with encoding and hashing again
hash: TxHash,
}
impl PendingTransaction {
pub fn new(transaction: FoundryTxEnvelope) -> Result<Self, RecoveryError> {
let sender = transaction.recover()?;
let hash = transaction.hash();
Ok(Self { transaction: MaybeImpersonatedTransaction::new(transaction), sender, hash })
}
pub fn with_impersonated(transaction: FoundryTxEnvelope, sender: Address) -> Self {
let hash = transaction.impersonated_hash(sender);
Self {
transaction: MaybeImpersonatedTransaction::impersonated(transaction, sender),
sender,
hash,
}
}
/// Converts a [`MaybeImpersonatedTransaction`] into a [`PendingTransaction`].
pub fn from_maybe_impersonated(
transaction: MaybeImpersonatedTransaction,
) -> Result<Self, RecoveryError> {
if let Some(impersonated) = transaction.impersonated_sender {
Ok(Self::with_impersonated(transaction.transaction, impersonated))
} else {
Self::new(transaction.transaction)
}
}
pub fn nonce(&self) -> u64 {
self.transaction.nonce()
}
pub fn hash(&self) -> &TxHash {
&self.hash
}
pub fn sender(&self) -> &Address {
&self.sender
}
}
/// Represents all relevant information of an executed transaction
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct TransactionInfo {
pub transaction_hash: B256,
pub transaction_index: u64,
pub from: Address,
pub to: Option<Address>,
pub contract_address: Option<Address>,
pub traces: Vec<CallTraceNode>,
pub exit: InstructionResult,
pub out: Option<Bytes>,
pub nonce: u64,
pub gas_used: u64,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/server/src/config.rs | crates/anvil/server/src/config.rs | use crate::HeaderValue;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::str::FromStr;
/// Additional server options.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(feature = "clap", derive(clap::Parser), command(next_help_heading = "Server options"))]
pub struct ServerConfig {
/// The cors `allow_origin` header
#[cfg_attr(feature = "clap", arg(long, default_value = "*"))]
pub allow_origin: HeaderValueWrapper,
/// Disable CORS.
#[cfg_attr(feature = "clap", arg(long, conflicts_with = "allow_origin"))]
pub no_cors: bool,
/// Disable the default request body size limit. At time of writing the default limit is 2MB.
#[cfg_attr(feature = "clap", arg(long))]
pub no_request_size_limit: bool,
}
impl ServerConfig {
/// Sets the "allow origin" header for CORS.
pub fn with_allow_origin(mut self, allow_origin: impl Into<HeaderValueWrapper>) -> Self {
self.allow_origin = allow_origin.into();
self
}
/// Whether to enable CORS.
pub fn set_cors(mut self, cors: bool) -> Self {
self.no_cors = !cors;
self
}
}
impl Default for ServerConfig {
fn default() -> Self {
Self {
allow_origin: "*".parse::<HeaderValue>().unwrap().into(),
no_cors: false,
no_request_size_limit: false,
}
}
}
#[derive(Clone, Debug)]
pub struct HeaderValueWrapper(pub HeaderValue);
impl FromStr for HeaderValueWrapper {
type Err = <HeaderValue as FromStr>::Err;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(s.parse()?))
}
}
impl Serialize for HeaderValueWrapper {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.0.to_str().map_err(serde::ser::Error::custom)?)
}
}
impl<'de> Deserialize<'de> for HeaderValueWrapper {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
Ok(Self(s.parse().map_err(serde::de::Error::custom)?))
}
}
impl std::ops::Deref for HeaderValueWrapper {
type Target = HeaderValue;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<HeaderValueWrapper> for HeaderValue {
fn from(wrapper: HeaderValueWrapper) -> Self {
wrapper.0
}
}
impl From<HeaderValue> for HeaderValueWrapper {
fn from(header: HeaderValue) -> Self {
Self(header)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/server/src/lib.rs | crates/anvil/server/src/lib.rs | //! Bootstrap [axum] RPC servers.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate tracing;
use anvil_rpc::{
error::RpcError,
request::RpcMethodCall,
response::{ResponseResult, RpcResponse},
};
use axum::{
Router,
extract::DefaultBodyLimit,
http::{HeaderValue, Method, header},
routing::{MethodRouter, post},
};
use serde::de::DeserializeOwned;
use std::fmt;
use tower_http::{cors::CorsLayer, trace::TraceLayer};
mod config;
pub use config::ServerConfig;
mod error;
mod handler;
mod pubsub;
pub use pubsub::{PubSubContext, PubSubRpcHandler};
mod ws;
#[cfg(feature = "ipc")]
pub mod ipc;
/// Configures an [`axum::Router`] that handles JSON-RPC calls via both HTTP and WS.
pub fn http_ws_router<Http, Ws>(config: ServerConfig, http: Http, ws: Ws) -> Router
where
Http: RpcHandler,
Ws: PubSubRpcHandler,
{
router_inner(config, post(handler::handle).get(ws::handle_ws), (http, ws))
}
/// Configures an [`axum::Router`] that handles JSON-RPC calls via HTTP.
pub fn http_router<Http>(config: ServerConfig, http: Http) -> Router
where
Http: RpcHandler,
{
router_inner(config, post(handler::handle), (http, ()))
}
fn router_inner<S: Clone + Send + Sync + 'static>(
config: ServerConfig,
root_method_router: MethodRouter<S>,
state: S,
) -> Router {
let ServerConfig { allow_origin, no_cors, no_request_size_limit } = config;
let mut router = Router::new()
.route("/", root_method_router)
.with_state(state)
.layer(TraceLayer::new_for_http());
if !no_cors {
// See [`tower_http::cors`](https://docs.rs/tower-http/latest/tower_http/cors/index.html)
// for more details.
router = router.layer(
CorsLayer::new()
.allow_origin(allow_origin.0)
.allow_headers([header::CONTENT_TYPE])
.allow_methods([Method::GET, Method::POST]),
);
}
if no_request_size_limit {
router = router.layer(DefaultBodyLimit::disable());
}
router
}
/// Helper trait that is used to execute ethereum rpc calls
#[async_trait::async_trait]
pub trait RpcHandler: Clone + Send + Sync + 'static {
/// The request type to expect
type Request: DeserializeOwned + Send + Sync + fmt::Debug;
/// Invoked when the request was received
async fn on_request(&self, request: Self::Request) -> ResponseResult;
/// Invoked for every incoming `RpcMethodCall`
///
/// This will attempt to deserialize a `{ "method" : "<name>", "params": "<params>" }` message
/// into the `Request` type of this handler. If a `Request` instance was deserialized
/// successfully, [`Self::on_request`] will be invoked.
///
/// **Note**: override this function if the expected `Request` deviates from `{ "method" :
/// "<name>", "params": "<params>" }`
async fn on_call(&self, call: RpcMethodCall) -> RpcResponse {
trace!(target: "rpc", id = ?call.id , method = ?call.method, params = ?call.params, "received method call");
let RpcMethodCall { method, params, id, .. } = call;
let params: serde_json::Value = params.into();
let call = serde_json::json!({
"method": &method,
"params": params
});
match serde_json::from_value::<Self::Request>(call) {
Ok(req) => {
let result = self.on_request(req).await;
RpcResponse::new(id, result)
}
Err(err) => {
let err = err.to_string();
if err.contains("unknown variant") {
error!(target: "rpc", ?method, "failed to deserialize method due to unknown variant");
RpcResponse::new(id, RpcError::method_not_found())
} else {
error!(target: "rpc", ?method, ?err, "failed to deserialize method");
RpcResponse::new(id, RpcError::invalid_params(err))
}
}
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/server/src/pubsub.rs | crates/anvil/server/src/pubsub.rs | use crate::{RpcHandler, error::RequestError, handler::handle_request};
use anvil_rpc::{
error::RpcError,
request::Request,
response::{Response, ResponseResult},
};
use futures::{FutureExt, Sink, SinkExt, Stream, StreamExt};
use parking_lot::Mutex;
use serde::de::DeserializeOwned;
use std::{
collections::VecDeque,
fmt,
hash::Hash,
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
/// The general purpose trait for handling RPC requests and subscriptions
#[async_trait::async_trait]
pub trait PubSubRpcHandler: Clone + Send + Sync + Unpin + 'static {
/// The request type to expect
type Request: DeserializeOwned + Send + Sync + fmt::Debug;
/// The identifier to use for subscriptions
type SubscriptionId: Hash + PartialEq + Eq + Send + Sync + fmt::Debug;
/// The subscription type this handle may create
type Subscription: Stream<Item = serde_json::Value> + Send + Sync + Unpin;
/// Invoked when the request was received
async fn on_request(&self, request: Self::Request, cx: PubSubContext<Self>) -> ResponseResult;
}
type Subscriptions<SubscriptionId, Subscription> = Arc<Mutex<Vec<(SubscriptionId, Subscription)>>>;
/// Contains additional context and tracks subscriptions
pub struct PubSubContext<Handler: PubSubRpcHandler> {
/// all active subscriptions `id -> Stream`
subscriptions: Subscriptions<Handler::SubscriptionId, Handler::Subscription>,
}
impl<Handler: PubSubRpcHandler> PubSubContext<Handler> {
/// Adds new active subscription
///
/// Returns the previous subscription, if any
pub fn add_subscription(
&self,
id: Handler::SubscriptionId,
subscription: Handler::Subscription,
) -> Option<Handler::Subscription> {
let mut subscriptions = self.subscriptions.lock();
let mut removed = None;
if let Some(idx) = subscriptions.iter().position(|(i, _)| id == *i) {
trace!(target: "rpc", ?id, "removed subscription");
removed = Some(subscriptions.swap_remove(idx).1);
}
trace!(target: "rpc", ?id, "added subscription");
subscriptions.push((id, subscription));
removed
}
/// Removes an existing subscription
pub fn remove_subscription(
&self,
id: &Handler::SubscriptionId,
) -> Option<Handler::Subscription> {
let mut subscriptions = self.subscriptions.lock();
if let Some(idx) = subscriptions.iter().position(|(i, _)| id == i) {
trace!(target: "rpc", ?id, "removed subscription");
return Some(subscriptions.swap_remove(idx).1);
}
None
}
}
impl<Handler: PubSubRpcHandler> Clone for PubSubContext<Handler> {
fn clone(&self) -> Self {
Self { subscriptions: Arc::clone(&self.subscriptions) }
}
}
impl<Handler: PubSubRpcHandler> Default for PubSubContext<Handler> {
fn default() -> Self {
Self { subscriptions: Arc::new(Mutex::new(Vec::new())) }
}
}
/// A compatibility helper type to use common `RpcHandler` functions
struct ContextAwareHandler<Handler: PubSubRpcHandler> {
handler: Handler,
context: PubSubContext<Handler>,
}
impl<Handler: PubSubRpcHandler> Clone for ContextAwareHandler<Handler> {
fn clone(&self) -> Self {
Self { handler: self.handler.clone(), context: self.context.clone() }
}
}
#[async_trait::async_trait]
impl<Handler: PubSubRpcHandler> RpcHandler for ContextAwareHandler<Handler> {
type Request = Handler::Request;
async fn on_request(&self, request: Self::Request) -> ResponseResult {
self.handler.on_request(request, self.context.clone()).await
}
}
/// Represents a connection to a client via websocket
///
/// Contains the state for the entire connection
pub struct PubSubConnection<Handler: PubSubRpcHandler, Connection> {
/// the handler for the websocket connection
handler: Handler,
/// contains all the subscription related context
context: PubSubContext<Handler>,
/// The established connection
connection: Connection,
/// currently in progress requests
processing: Vec<Pin<Box<dyn Future<Output = Response> + Send>>>,
/// pending messages to send
pending: VecDeque<String>,
}
impl<Handler: PubSubRpcHandler, Connection> PubSubConnection<Handler, Connection> {
pub fn new(connection: Connection, handler: Handler) -> Self {
Self {
connection,
handler,
context: Default::default(),
pending: Default::default(),
processing: Default::default(),
}
}
/// Returns a compatibility `RpcHandler`
fn compat_helper(&self) -> ContextAwareHandler<Handler> {
ContextAwareHandler { handler: self.handler.clone(), context: self.context.clone() }
}
fn process_request(&mut self, req: serde_json::Result<Request>) {
let handler = self.compat_helper();
self.processing.push(Box::pin(async move {
match req {
Ok(req) => handle_request(req, handler)
.await
.unwrap_or_else(|| Response::error(RpcError::invalid_request())),
Err(err) => {
error!(target: "rpc", ?err, "invalid request");
Response::error(RpcError::invalid_request())
}
}
}));
}
}
impl<Handler, Connection> Future for PubSubConnection<Handler, Connection>
where
Handler: PubSubRpcHandler,
Connection: Sink<String> + Stream<Item = Result<Option<Request>, RequestError>> + Unpin,
<Connection as Sink<String>>::Error: fmt::Debug,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let pin = self.get_mut();
loop {
// drive the websocket
while matches!(pin.connection.poll_ready_unpin(cx), Poll::Ready(Ok(()))) {
// only start sending if socket is ready
if let Some(msg) = pin.pending.pop_front() {
if let Err(err) = pin.connection.start_send_unpin(msg) {
error!(target: "rpc", ?err, "Failed to send message");
}
} else {
break;
}
}
// Ensure any pending messages are flushed
// this needs to be called manually for tungsenite websocket: <https://github.com/foundry-rs/foundry/issues/6345>
if let Poll::Ready(Err(err)) = pin.connection.poll_flush_unpin(cx) {
trace!(target: "rpc", ?err, "websocket err");
// close the connection
return Poll::Ready(());
}
loop {
match pin.connection.poll_next_unpin(cx) {
Poll::Ready(Some(req)) => match req {
Ok(Some(req)) => {
pin.process_request(Ok(req));
}
Err(err) => match err {
RequestError::Axum(err) => {
trace!(target: "rpc", ?err, "client disconnected");
return Poll::Ready(());
}
RequestError::Io(err) => {
trace!(target: "rpc", ?err, "client disconnected");
return Poll::Ready(());
}
RequestError::Serde(err) => {
pin.process_request(Err(err));
}
RequestError::Disconnect => {
trace!(target: "rpc", "client disconnected");
return Poll::Ready(());
}
},
_ => {}
},
Poll::Ready(None) => {
trace!(target: "rpc", "socket connection finished");
return Poll::Ready(());
}
Poll::Pending => break,
}
}
let mut progress = false;
for n in (0..pin.processing.len()).rev() {
let mut req = pin.processing.swap_remove(n);
match req.poll_unpin(cx) {
Poll::Ready(resp) => {
if let Ok(text) = serde_json::to_string(&resp) {
pin.pending.push_back(text);
progress = true;
}
}
Poll::Pending => pin.processing.push(req),
}
}
{
// process subscription events
let mut subscriptions = pin.context.subscriptions.lock();
'outer: for n in (0..subscriptions.len()).rev() {
let (id, mut sub) = subscriptions.swap_remove(n);
'inner: loop {
match sub.poll_next_unpin(cx) {
Poll::Ready(Some(res)) => {
if let Ok(text) = serde_json::to_string(&res) {
pin.pending.push_back(text);
progress = true;
}
}
Poll::Ready(None) => continue 'outer,
Poll::Pending => break 'inner,
}
}
subscriptions.push((id, sub));
}
}
if !progress {
return Poll::Pending;
}
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/server/src/error.rs | crates/anvil/server/src/error.rs | //! Error variants used to unify different connection streams
/// An error that can occur when reading an incoming request
#[derive(Debug, thiserror::Error)]
pub enum RequestError {
#[error(transparent)]
Axum(#[from] axum::Error),
#[error(transparent)]
Serde(#[from] serde_json::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("Disconnect")]
Disconnect,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/server/src/ws.rs | crates/anvil/server/src/ws.rs | use crate::{PubSubRpcHandler, error::RequestError, pubsub::PubSubConnection};
use anvil_rpc::request::Request;
use axum::{
extract::{
State, WebSocketUpgrade,
ws::{Message, WebSocket},
},
response::Response,
};
use futures::{Sink, Stream, ready};
use std::{
pin::Pin,
task::{Context, Poll},
};
/// Handles incoming Websocket upgrade
///
/// This is the entrypoint invoked by the axum server for a websocket request
pub async fn handle_ws<Http, Ws: PubSubRpcHandler>(
ws: WebSocketUpgrade,
State((_, handler)): State<(Http, Ws)>,
) -> Response {
ws.on_upgrade(|socket| PubSubConnection::new(SocketConn(socket), handler))
}
#[pin_project::pin_project]
struct SocketConn(#[pin] WebSocket);
impl Stream for SocketConn {
type Item = Result<Option<Request>, RequestError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match ready!(self.project().0.poll_next(cx)) {
Some(msg) => Poll::Ready(Some(on_message(msg))),
_ => Poll::Ready(None),
}
}
}
impl Sink<String> for SocketConn {
type Error = axum::Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().0.poll_ready(cx)
}
fn start_send(self: Pin<&mut Self>, item: String) -> Result<(), Self::Error> {
self.project().0.start_send(Message::Text(item.into()))
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().0.poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().0.poll_close(cx)
}
}
fn on_message(msg: Result<Message, axum::Error>) -> Result<Option<Request>, RequestError> {
match msg? {
Message::Text(text) => Ok(Some(serde_json::from_str(&text)?)),
Message::Binary(data) => {
// the binary payload type is the request as-is but as bytes, if this is a valid
// `Request` then we can deserialize the Json from the data Vec
Ok(Some(serde_json::from_slice(&data)?))
}
Message::Close(_) => {
trace!(target: "rpc::ws", "ws client disconnected");
Err(RequestError::Disconnect)
}
_ => Ok(None),
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/server/src/handler.rs | crates/anvil/server/src/handler.rs | use crate::RpcHandler;
use anvil_rpc::{
error::RpcError,
request::{Request, RpcCall},
response::{Response, RpcResponse},
};
use axum::{
Json,
extract::{State, rejection::JsonRejection},
};
use futures::{FutureExt, future};
/// Handles incoming JSON-RPC Request.
// NOTE: `handler` must come first because the `request` extractor consumes the request body.
pub async fn handle<Http: RpcHandler, Ws>(
State((handler, _)): State<(Http, Ws)>,
request: Result<Json<Request>, JsonRejection>,
) -> Json<Response> {
Json(match request {
Ok(Json(req)) => handle_request(req, handler)
.await
.unwrap_or_else(|| Response::error(RpcError::invalid_request())),
Err(err) => {
warn!(target: "rpc", ?err, "invalid request");
Response::error(RpcError::invalid_request())
}
})
}
/// Handle the JSON-RPC [Request]
///
/// This will try to deserialize the payload into the request type of the handler and if successful
/// invoke the handler
pub async fn handle_request<Handler: RpcHandler>(
req: Request,
handler: Handler,
) -> Option<Response> {
/// processes batch calls
fn responses_as_batch(outs: Vec<Option<RpcResponse>>) -> Option<Response> {
let batch: Vec<_> = outs.into_iter().flatten().collect();
(!batch.is_empty()).then_some(Response::Batch(batch))
}
match req {
Request::Single(call) => handle_call(call, handler).await.map(Response::Single),
Request::Batch(calls) => {
future::join_all(calls.into_iter().map(move |call| handle_call(call, handler.clone())))
.map(responses_as_batch)
.await
}
}
}
/// handle a single RPC method call
async fn handle_call<Handler: RpcHandler>(call: RpcCall, handler: Handler) -> Option<RpcResponse> {
match call {
RpcCall::MethodCall(call) => {
trace!(target: "rpc", id = ?call.id , method = ?call.method, "handling call");
Some(handler.on_call(call).await)
}
RpcCall::Notification(notification) => {
trace!(target: "rpc", method = ?notification.method, "received rpc notification");
None
}
RpcCall::Invalid { id } => {
warn!(target: "rpc", ?id, "invalid rpc call");
Some(RpcResponse::invalid_request(id))
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/anvil/server/src/ipc.rs | crates/anvil/server/src/ipc.rs | //! IPC handling
use crate::{PubSubRpcHandler, error::RequestError, pubsub::PubSubConnection};
use anvil_rpc::request::Request;
use bytes::{BufMut, BytesMut};
use futures::{Sink, Stream, StreamExt, ready};
use interprocess::local_socket::{self as ls, tokio::prelude::*};
use std::{
io,
pin::Pin,
task::{Context, Poll},
};
/// An IPC connection for anvil
///
/// A Future that listens for incoming connections and spawns new connections
pub struct IpcEndpoint<Handler> {
/// the handler for the websocket connection
handler: Handler,
/// The path to the socket
path: String,
}
impl<Handler: PubSubRpcHandler> IpcEndpoint<Handler> {
/// Creates a new endpoint with the given handler
pub fn new(handler: Handler, path: String) -> Self {
Self { handler, path }
}
/// Returns a stream of incoming connection handlers.
///
/// This establishes the IPC endpoint, converts the incoming connections into handled
/// connections.
#[instrument(target = "ipc", skip_all)]
pub fn incoming(self) -> io::Result<impl Stream<Item = impl Future<Output = ()>>> {
let Self { handler, path } = self;
trace!(%path, "starting IPC server");
if cfg!(unix) {
// ensure the file does not exist
if std::fs::remove_file(&path).is_ok() {
warn!(%path, "removed existing file");
}
}
let name = to_name(path.as_ref())?;
let listener = ls::ListenerOptions::new().name(name).create_tokio()?;
let connections = futures::stream::unfold(listener, |listener| async move {
let conn = listener.accept().await;
Some((conn, listener))
});
trace!("established connection listener");
Ok(connections.filter_map(move |stream| {
let handler = handler.clone();
async move {
match stream {
Ok(stream) => {
trace!("successful incoming IPC connection");
let framed = tokio_util::codec::Decoder::framed(JsonRpcCodec, stream);
Some(PubSubConnection::new(IpcConn(framed), handler))
}
Err(err) => {
trace!(%err, "unsuccessful incoming IPC connection");
None
}
}
}
}))
}
}
#[pin_project::pin_project]
struct IpcConn<T>(#[pin] T);
impl<T> Stream for IpcConn<T>
where
T: Stream<Item = io::Result<String>>,
{
type Item = Result<Option<Request>, RequestError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
fn on_request(msg: io::Result<String>) -> Result<Option<Request>, RequestError> {
let text = msg?;
Ok(Some(serde_json::from_str(&text)?))
}
match ready!(self.project().0.poll_next(cx)) {
Some(req) => Poll::Ready(Some(on_request(req))),
_ => Poll::Ready(None),
}
}
}
impl<T> Sink<String> for IpcConn<T>
where
T: Sink<String, Error = io::Error>,
{
type Error = io::Error;
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// NOTE: we always flush here this prevents any backpressure buffer in the underlying
// `Framed` impl that would cause stalled requests
self.project().0.poll_flush(cx)
}
fn start_send(self: Pin<&mut Self>, item: String) -> Result<(), Self::Error> {
self.project().0.start_send(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().0.poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.project().0.poll_close(cx)
}
}
struct JsonRpcCodec;
// Adapted from <https://github.com/paritytech/jsonrpc/blob/38af3c9439aa75481805edf6c05c6622a5ab1e70/server-utils/src/stream_codec.rs#L47-L105>
impl tokio_util::codec::Decoder for JsonRpcCodec {
type Item = String;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<Self::Item>> {
const fn is_whitespace(byte: u8) -> bool {
matches!(byte, 0x0D | 0x0A | 0x20 | 0x09)
}
let mut depth = 0;
let mut in_str = false;
let mut is_escaped = false;
let mut start_idx = 0;
let mut whitespaces = 0;
for idx in 0..buf.as_ref().len() {
let byte = buf.as_ref()[idx];
if (byte == b'{' || byte == b'[') && !in_str {
if depth == 0 {
start_idx = idx;
}
depth += 1;
} else if (byte == b'}' || byte == b']') && !in_str {
depth -= 1;
} else if byte == b'"' && !is_escaped {
in_str = !in_str;
} else if is_whitespace(byte) {
whitespaces += 1;
}
is_escaped = byte == b'\\' && !is_escaped && in_str;
if depth == 0 && idx != start_idx && idx - start_idx + 1 > whitespaces {
let bts = buf.split_to(idx + 1);
return match String::from_utf8(bts.as_ref().to_vec()) {
Ok(val) => Ok(Some(val)),
Err(_) => Ok(None),
};
}
}
Ok(None)
}
}
impl tokio_util::codec::Encoder<String> for JsonRpcCodec {
type Error = io::Error;
fn encode(&mut self, msg: String, buf: &mut BytesMut) -> io::Result<()> {
buf.extend_from_slice(msg.as_bytes());
// Add newline character
buf.put_u8(b'\n');
Ok(())
}
}
fn to_name(path: &std::ffi::OsStr) -> io::Result<ls::Name<'_>> {
if cfg!(windows) && !path.as_encoded_bytes().starts_with(br"\\.\pipe\") {
ls::ToNsName::to_ns_name::<ls::GenericNamespaced>(path)
} else {
ls::ToFsName::to_fs_name::<ls::GenericFilePath>(path)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/op.rs | crates/debugger/src/op.rs | /// Named parameter of an EVM opcode.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct OpcodeParam {
/// The name of the parameter.
pub(crate) name: &'static str,
/// The index of the parameter on the stack. This is relative to the top of the stack.
pub(crate) index: usize,
}
impl OpcodeParam {
/// Returns the list of named parameters for the given opcode, accounts for special opcodes
/// requiring immediate bytes to determine stack items.
#[inline]
pub(crate) fn of(op: u8) -> &'static [Self] {
MAP[op as usize]
}
}
static MAP: [&[OpcodeParam]; 256] = {
let mut table = [[].as_slice(); 256];
let mut i = 0;
while i < 256 {
table[i] = map_opcode(i as u8);
i += 1;
}
table
};
const fn map_opcode(op: u8) -> &'static [OpcodeParam] {
macro_rules! map {
($($op:literal($($idx:literal : $name:literal),* $(,)?)),* $(,)?) => {
match op {
$($op => &[
$(OpcodeParam {
name: $name,
index: $idx,
}),*
]),*
}
};
}
// https://www.evm.codes
// https://raw.githubusercontent.com/duneanalytics/evm.codes/refs/heads/main/opcodes.json
//
// jq -r '
// def mkargs(input):
// input
// | split(" | ")
// | to_entries
// | map("\(.key): \"\(.value)\"")
// | join(", ");
// to_entries[]
// | "0x\(.key)(\(mkargs(.value.input))),"
// ' opcodes.json
//
// NOTE: the labels generated for `DUPN` and `SWAPN` have incorrect indices and have been
// manually adjusted in the `map!` macro below.
map! {
0x00(),
0x01(0: "a", 1: "b"),
0x02(0: "a", 1: "b"),
0x03(0: "a", 1: "b"),
0x04(0: "a", 1: "b"),
0x05(0: "a", 1: "b"),
0x06(0: "a", 1: "b"),
0x07(0: "a", 1: "b"),
0x08(0: "a", 1: "b", 2: "N"),
0x09(0: "a", 1: "b", 2: "N"),
0x0a(0: "a", 1: "exponent"),
0x0b(0: "b", 1: "x"),
0x0c(),
0x0d(),
0x0e(),
0x0f(),
0x10(0: "a", 1: "b"),
0x11(0: "a", 1: "b"),
0x12(0: "a", 1: "b"),
0x13(0: "a", 1: "b"),
0x14(0: "a", 1: "b"),
0x15(0: "a"),
0x16(0: "a", 1: "b"),
0x17(0: "a", 1: "b"),
0x18(0: "a", 1: "b"),
0x19(0: "a"),
0x1a(0: "i", 1: "x"),
0x1b(0: "shift", 1: "value"),
0x1c(0: "shift", 1: "value"),
0x1d(0: "shift", 1: "value"),
0x1e(),
0x1f(),
0x20(0: "offset", 1: "size"),
0x21(),
0x22(),
0x23(),
0x24(),
0x25(),
0x26(),
0x27(),
0x28(),
0x29(),
0x2a(),
0x2b(),
0x2c(),
0x2d(),
0x2e(),
0x2f(),
0x30(),
0x31(0: "address"),
0x32(),
0x33(),
0x34(),
0x35(0: "i"),
0x36(),
0x37(0: "destOffset", 1: "offset", 2: "size"),
0x38(),
0x39(0: "destOffset", 1: "offset", 2: "size"),
0x3a(),
0x3b(0: "address"),
0x3c(0: "address", 1: "destOffset", 2: "offset", 3: "size"),
0x3d(),
0x3e(0: "destOffset", 1: "offset", 2: "size"),
0x3f(0: "address"),
0x40(0: "blockNumber"),
0x41(),
0x42(),
0x43(),
0x44(),
0x45(),
0x46(),
0x47(),
0x48(),
0x49(0: "index"),
0x4a(),
0x4b(),
0x4c(),
0x4d(),
0x4e(),
0x4f(),
0x50(0: "y"),
0x51(0: "offset"),
0x52(0: "offset", 1: "value"),
0x53(0: "offset", 1: "value"),
0x54(0: "key"),
0x55(0: "key", 1: "value"),
0x56(0: "counter"),
0x57(0: "counter", 1: "b"),
0x58(),
0x59(),
0x5a(),
0x5b(),
0x5c(0: "key"),
0x5d(0: "key", 1: "value"),
0x5e(0: "destOffset", 1: "offset", 2: "size"),
0x5f(),
0x60(),
0x61(),
0x62(),
0x63(),
0x64(),
0x65(),
0x66(),
0x67(),
0x68(),
0x69(),
0x6a(),
0x6b(),
0x6c(),
0x6d(),
0x6e(),
0x6f(),
0x70(),
0x71(),
0x72(),
0x73(),
0x74(),
0x75(),
0x76(),
0x77(),
0x78(),
0x79(),
0x7a(),
0x7b(),
0x7c(),
0x7d(),
0x7e(),
0x7f(),
// DUPN
0x80(0x00: "dup_value"),
0x81(0x01: "dup_value"),
0x82(0x02: "dup_value"),
0x83(0x03: "dup_value"),
0x84(0x04: "dup_value"),
0x85(0x05: "dup_value"),
0x86(0x06: "dup_value"),
0x87(0x07: "dup_value"),
0x88(0x08: "dup_value"),
0x89(0x09: "dup_value"),
0x8a(0x0a: "dup_value"),
0x8b(0x0b: "dup_value"),
0x8c(0x0c: "dup_value"),
0x8d(0x0d: "dup_value"),
0x8e(0x0e: "dup_value"),
0x8f(0x0f: "dup_value"),
// SWAPN
0x90(0: "a", 0x01: "swap_value"),
0x91(0: "a", 0x02: "swap_value"),
0x92(0: "a", 0x03: "swap_value"),
0x93(0: "a", 0x04: "swap_value"),
0x94(0: "a", 0x05: "swap_value"),
0x95(0: "a", 0x06: "swap_value"),
0x96(0: "a", 0x07: "swap_value"),
0x97(0: "a", 0x08: "swap_value"),
0x98(0: "a", 0x09: "swap_value"),
0x99(0: "a", 0x0a: "swap_value"),
0x9a(0: "a", 0x0b: "swap_value"),
0x9b(0: "a", 0x0c: "swap_value"),
0x9c(0: "a", 0x0d: "swap_value"),
0x9d(0: "a", 0x0e: "swap_value"),
0x9e(0: "a", 0x0f: "swap_value"),
0x9f(0: "a", 0x10: "swap_value"),
0xa0(0: "offset", 1: "size"),
0xa1(0: "offset", 1: "size", 2: "topic"),
0xa2(0: "offset", 1: "size", 2: "topic1", 3: "topic2"),
0xa3(0: "offset", 1: "size", 2: "topic1", 3: "topic2", 4: "topic3"),
0xa4(0: "offset", 1: "size", 2: "topic1", 3: "topic2", 4: "topic3", 5: "topic4"),
0xa5(),
0xa6(),
0xa7(),
0xa8(),
0xa9(),
0xaa(),
0xab(),
0xac(),
0xad(),
0xae(),
0xaf(),
0xb0(),
0xb1(),
0xb2(),
0xb3(),
0xb4(),
0xb5(),
0xb6(),
0xb7(),
0xb8(),
0xb9(),
0xba(),
0xbb(),
0xbc(),
0xbd(),
0xbe(),
0xbf(),
0xc0(),
0xc1(),
0xc2(),
0xc3(),
0xc4(),
0xc5(),
0xc6(),
0xc7(),
0xc8(),
0xc9(),
0xca(),
0xcb(),
0xcc(),
0xcd(),
0xce(),
0xcf(),
0xd0(0: "offset"),
0xd1(),
0xd2(),
0xd3(0: "mem_offset", 1: "offset", 2: "size"),
0xd4(),
0xd5(),
0xd6(),
0xd7(),
0xd8(),
0xd9(),
0xda(),
0xdb(),
0xdc(),
0xdd(),
0xde(),
0xdf(),
0xe0(),
0xe1(0: "condition"),
0xe2(0: "case"),
0xe3(),
0xe4(),
0xe5(),
0xe6(),
0xe7(),
0xe8(),
0xe9(),
0xea(),
0xeb(),
0xec(0: "value", 1: "salt", 2: "input_offset", 3: "input_size"),
0xed(),
0xee(0: "aux_data_offset", 1: "aux_data_size"),
0xef(),
0xf0(0: "value", 1: "offset", 2: "size"),
0xf1(0: "gas", 1: "address", 2: "value", 3: "argsOffset", 4: "argsSize", 5: "retOffset", 6: "retSize"),
0xf2(0: "gas", 1: "address", 2: "value", 3: "argsOffset", 4: "argsSize", 5: "retOffset", 6: "retSize"),
0xf3(0: "offset", 1: "size"),
0xf4(0: "gas", 1: "address", 2: "argsOffset", 3: "argsSize", 4: "retOffset", 5: "retSize"),
0xf5(0: "value", 1: "offset", 2: "size", 3: "salt"),
0xf6(),
0xf7(0: "offset"),
0xf8(0: "target_address", 1: "input_offset", 2: "input_size", 3: "value"),
0xf9(0: "target_address", 1: "input_offset", 2: "input_size"),
0xfa(0: "gas", 1: "address", 2: "argsOffset", 3: "argsSize", 4: "retOffset", 5: "retSize"),
0xfb(0: "target_address", 1: "input_offset", 2: "input_size"),
0xfc(),
0xfd(0: "offset", 1: "size"),
0xfe(),
0xff(0: "address"),
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/node.rs | crates/debugger/src/node.rs | use alloy_primitives::{Address, Bytes};
use foundry_evm_traces::{CallKind, CallTraceArena};
use revm_inspectors::tracing::types::{CallTraceStep, TraceMemberOrder};
use serde::{Deserialize, Serialize};
/// Represents a part of the execution frame before the next call or end of the execution.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct DebugNode {
/// Execution context.
///
/// Note that this is the address of the *code*, not necessarily the address of the storage.
pub address: Address,
/// The kind of call this is.
pub kind: CallKind,
/// Calldata of the call.
pub calldata: Bytes,
/// The debug steps.
pub steps: Vec<CallTraceStep>,
}
impl DebugNode {
/// Creates a new debug node.
pub fn new(
address: Address,
kind: CallKind,
steps: Vec<CallTraceStep>,
calldata: Bytes,
) -> Self {
Self { address, kind, steps, calldata }
}
}
/// Flattens given [CallTraceArena] into a list of [DebugNode]s.
///
/// This is done by recursively traversing the call tree and collecting the steps in-between the
/// calls.
pub fn flatten_call_trace(arena: CallTraceArena, out: &mut Vec<DebugNode>) {
#[derive(Debug, Clone, Copy)]
struct PendingNode {
node_idx: usize,
steps_count: usize,
}
fn inner(arena: &CallTraceArena, node_idx: usize, out: &mut Vec<PendingNode>) {
let mut pending = PendingNode { node_idx, steps_count: 0 };
let node = &arena.nodes()[node_idx];
for order in &node.ordering {
match order {
TraceMemberOrder::Call(idx) => {
out.push(pending);
pending.steps_count = 0;
inner(arena, node.children[*idx], out);
}
TraceMemberOrder::Step(_) => {
pending.steps_count += 1;
}
_ => {}
}
}
out.push(pending);
}
let mut nodes = Vec::new();
inner(&arena, 0, &mut nodes);
let mut arena_nodes = arena.into_nodes();
for pending in nodes {
let steps = {
let other_steps =
arena_nodes[pending.node_idx].trace.steps.split_off(pending.steps_count);
std::mem::replace(&mut arena_nodes[pending.node_idx].trace.steps, other_steps)
};
// Skip nodes with empty steps as there's nothing to display for them.
if steps.is_empty() {
continue;
}
let call = &arena_nodes[pending.node_idx].trace;
let calldata = if call.kind.is_any_create() { Bytes::new() } else { call.data.clone() };
let node = DebugNode::new(call.address, call.kind, steps, calldata);
out.push(node);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/builder.rs | crates/debugger/src/builder.rs | //! Debugger builder.
use crate::{DebugNode, Debugger, node::flatten_call_trace};
use alloy_primitives::{Address, map::AddressHashMap};
use foundry_common::get_contract_name;
use foundry_evm_core::Breakpoints;
use foundry_evm_traces::{CallTraceArena, CallTraceDecoder, Traces, debug::ContractSources};
/// Debugger builder.
#[derive(Debug, Default)]
#[must_use = "builders do nothing unless you call `build` on them"]
pub struct DebuggerBuilder {
/// Debug traces returned from the EVM execution.
debug_arena: Vec<DebugNode>,
/// Identified contracts.
identified_contracts: AddressHashMap<String>,
/// Map of source files.
sources: ContractSources,
/// Map of the debugger breakpoints.
breakpoints: Breakpoints,
}
impl DebuggerBuilder {
/// Creates a new debugger builder.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Extends the debug arena.
#[inline]
pub fn traces(mut self, traces: Traces) -> Self {
for (_, arena) in traces {
self = self.trace_arena(arena.arena);
}
self
}
/// Extends the debug arena.
#[inline]
pub fn trace_arena(mut self, arena: CallTraceArena) -> Self {
flatten_call_trace(arena, &mut self.debug_arena);
self
}
/// Extends the identified contracts from multiple decoders.
#[inline]
pub fn decoders(mut self, decoders: &[CallTraceDecoder]) -> Self {
for decoder in decoders {
self = self.decoder(decoder);
}
self
}
/// Extends the identified contracts from a decoder.
#[inline]
pub fn decoder(self, decoder: &CallTraceDecoder) -> Self {
let c = decoder.contracts.iter().map(|(k, v)| (*k, get_contract_name(v).to_string()));
self.identified_contracts(c)
}
/// Extends the identified contracts.
#[inline]
pub fn identified_contracts(
mut self,
identified_contracts: impl IntoIterator<Item = (Address, String)>,
) -> Self {
self.identified_contracts.extend(identified_contracts);
self
}
/// Sets the sources for the debugger.
#[inline]
pub fn sources(mut self, sources: ContractSources) -> Self {
self.sources = sources;
self
}
/// Sets the breakpoints for the debugger.
#[inline]
pub fn breakpoints(mut self, breakpoints: Breakpoints) -> Self {
self.breakpoints = breakpoints;
self
}
/// Builds the debugger.
#[inline]
pub fn build(self) -> Debugger {
let Self { debug_arena, identified_contracts, sources, breakpoints } = self;
Debugger::new(debug_arena, identified_contracts, sources, breakpoints)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/lib.rs | crates/debugger/src/lib.rs | //! # foundry-debugger
//!
//! Interactive Solidity TUI debugger and debugger data file dumper
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate foundry_common;
#[macro_use]
extern crate tracing;
mod op;
mod builder;
mod debugger;
mod dump;
mod tui;
mod node;
pub use node::DebugNode;
pub use builder::DebuggerBuilder;
pub use debugger::Debugger;
pub use tui::{ExitReason, TUI};
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/dump.rs | crates/debugger/src/dump.rs | use crate::{DebugNode, debugger::DebuggerContext};
use alloy_primitives::map::AddressMap;
use foundry_common::fs::write_json_file;
use foundry_compilers::{
artifacts::sourcemap::{Jump, SourceElement},
multi::MultiCompilerLanguage,
};
use foundry_evm_core::ic::PcIcMap;
use foundry_evm_traces::debug::{ArtifactData, ContractSources, SourceData};
use serde::Serialize;
use std::{collections::HashMap, path::Path};
/// Dumps debugger data to a JSON file.
pub(crate) fn dump(path: &Path, context: &DebuggerContext) -> eyre::Result<()> {
write_json_file(path, &DebuggerDump::new(context))?;
Ok(())
}
/// Holds info of debugger dump.
#[derive(Serialize)]
struct DebuggerDump<'a> {
contracts: ContractsDump<'a>,
debug_arena: &'a [DebugNode],
}
impl<'a> DebuggerDump<'a> {
fn new(debugger_context: &'a DebuggerContext) -> Self {
Self {
contracts: ContractsDump::new(debugger_context),
debug_arena: &debugger_context.debug_arena,
}
}
}
#[derive(Serialize)]
struct SourceElementDump {
offset: u32,
length: u32,
index: i32,
jump: u32,
modifier_depth: u32,
}
impl SourceElementDump {
fn new(v: &SourceElement) -> Self {
Self {
offset: v.offset(),
length: v.length(),
index: v.index_i32(),
jump: match v.jump() {
Jump::In => 0,
Jump::Out => 1,
Jump::Regular => 2,
},
modifier_depth: v.modifier_depth(),
}
}
}
#[derive(Serialize)]
struct ContractsDump<'a> {
identified_contracts: &'a AddressMap<String>,
sources: ContractsSourcesDump<'a>,
}
impl<'a> ContractsDump<'a> {
fn new(debugger_context: &'a DebuggerContext) -> Self {
Self {
identified_contracts: &debugger_context.identified_contracts,
sources: ContractsSourcesDump::new(&debugger_context.contracts_sources),
}
}
}
#[derive(Serialize)]
struct ContractsSourcesDump<'a> {
sources_by_id: HashMap<&'a str, HashMap<u32, SourceDataDump<'a>>>,
artifacts_by_name: HashMap<&'a str, Vec<ArtifactDataDump<'a>>>,
}
impl<'a> ContractsSourcesDump<'a> {
fn new(contracts_sources: &'a ContractSources) -> Self {
Self {
sources_by_id: contracts_sources
.sources_by_id
.iter()
.map(|(name, inner_map)| {
(
name.as_str(),
inner_map
.iter()
.map(|(id, source_data)| (*id, SourceDataDump::new(source_data)))
.collect(),
)
})
.collect(),
artifacts_by_name: contracts_sources
.artifacts_by_name
.iter()
.map(|(name, data)| {
(name.as_str(), data.iter().map(ArtifactDataDump::new).collect())
})
.collect(),
}
}
}
#[derive(Serialize)]
struct SourceDataDump<'a> {
source: &'a str,
language: MultiCompilerLanguage,
path: &'a Path,
}
impl<'a> SourceDataDump<'a> {
fn new(v: &'a SourceData) -> Self {
Self { source: &v.source, language: v.language, path: &v.path }
}
}
#[derive(Serialize)]
struct ArtifactDataDump<'a> {
source_map: Option<Vec<SourceElementDump>>,
source_map_runtime: Option<Vec<SourceElementDump>>,
pc_ic_map: Option<&'a PcIcMap>,
pc_ic_map_runtime: Option<&'a PcIcMap>,
build_id: &'a str,
file_id: u32,
}
impl<'a> ArtifactDataDump<'a> {
fn new(v: &'a ArtifactData) -> Self {
Self {
source_map: v
.source_map
.as_ref()
.map(|source_map| source_map.iter().map(SourceElementDump::new).collect()),
source_map_runtime: v
.source_map_runtime
.as_ref()
.map(|source_map| source_map.iter().map(SourceElementDump::new).collect()),
pc_ic_map: v.pc_ic_map.as_ref(),
pc_ic_map_runtime: v.pc_ic_map_runtime.as_ref(),
build_id: &v.build_id,
file_id: v.file_id,
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/debugger.rs | crates/debugger/src/debugger.rs | //! Debugger implementation.
use crate::{DebugNode, DebuggerBuilder, ExitReason, tui::TUI};
use alloy_primitives::map::AddressHashMap;
use eyre::Result;
use foundry_evm_core::Breakpoints;
use foundry_evm_traces::debug::ContractSources;
use std::path::Path;
pub struct DebuggerContext {
pub debug_arena: Vec<DebugNode>,
pub identified_contracts: AddressHashMap<String>,
/// Source map of contract sources
pub contracts_sources: ContractSources,
pub breakpoints: Breakpoints,
}
pub struct Debugger {
context: DebuggerContext,
}
impl Debugger {
/// Creates a new debugger builder.
#[inline]
pub fn builder() -> DebuggerBuilder {
DebuggerBuilder::new()
}
/// Creates a new debugger.
pub fn new(
debug_arena: Vec<DebugNode>,
identified_contracts: AddressHashMap<String>,
contracts_sources: ContractSources,
breakpoints: Breakpoints,
) -> Self {
Self {
context: DebuggerContext {
debug_arena,
identified_contracts,
contracts_sources,
breakpoints,
},
}
}
/// Starts the debugger TUI. Terminates the current process on failure or user exit.
pub fn run_tui_exit(mut self) -> ! {
let code = match self.try_run_tui() {
Ok(ExitReason::CharExit) => 0,
Err(e) => {
let _ = sh_eprintln!("{e}");
1
}
};
std::process::exit(code)
}
/// Starts the debugger TUI.
pub fn try_run_tui(&mut self) -> Result<ExitReason> {
eyre::ensure!(!self.context.debug_arena.is_empty(), "debug arena is empty");
let mut tui = TUI::new(&mut self.context);
tui.try_run()
}
/// Dumps debugger data to file.
pub fn dump_to_file(&mut self, path: &Path) -> Result<()> {
eyre::ensure!(!self.context.debug_arena.is_empty(), "debug arena is empty");
crate::dump::dump(path, &self.context)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/tui/draw.rs | crates/debugger/src/tui/draw.rs | //! TUI draw implementation.
use super::context::TUIContext;
use crate::op::OpcodeParam;
use foundry_compilers::artifacts::sourcemap::SourceElement;
use foundry_evm_core::buffer::{BufferKind, get_buffer_accesses};
use foundry_evm_traces::debug::SourceData;
use ratatui::{
Frame,
layout::{Alignment, Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
text::{Line, Span, Text},
widgets::{Block, Borders, List, ListItem, ListState, Paragraph, Wrap},
};
use revm_inspectors::tracing::types::CallKind;
use std::{collections::VecDeque, fmt::Write, io};
impl TUIContext<'_> {
/// Draws the TUI layout and subcomponents to the given terminal.
pub(crate) fn draw(&self, terminal: &mut super::DebuggerTerminal) -> io::Result<()> {
terminal.draw(|f| self.draw_layout(f)).map(drop)
}
fn draw_layout(&self, f: &mut Frame<'_>) {
// We need 100 columns to display a 32 byte word in the memory and stack panes.
let area = f.area();
let min_width = 100;
let min_height = 16;
if area.width < min_width || area.height < min_height {
self.size_too_small(f, min_width, min_height);
return;
}
// The horizontal layout draws these panes at 50% width.
let min_column_width_for_horizontal = 200;
if area.width >= min_column_width_for_horizontal {
self.horizontal_layout(f);
} else {
self.vertical_layout(f);
}
}
fn size_too_small(&self, f: &mut Frame<'_>, min_width: u16, min_height: u16) {
let mut lines = Vec::with_capacity(4);
let l1 = "Terminal size too small:";
lines.push(Line::from(l1));
let area = f.area();
let width_color = if area.width >= min_width { Color::Green } else { Color::Red };
let height_color = if area.height >= min_height { Color::Green } else { Color::Red };
let l2 = vec![
Span::raw("Width = "),
Span::styled(area.width.to_string(), Style::new().fg(width_color)),
Span::raw(" Height = "),
Span::styled(area.height.to_string(), Style::new().fg(height_color)),
];
lines.push(Line::from(l2));
let l3 = "Needed for current config:";
lines.push(Line::from(l3));
let l4 = format!("Width = {min_width} Height = {min_height}");
lines.push(Line::from(l4));
let paragraph =
Paragraph::new(lines).alignment(Alignment::Center).wrap(Wrap { trim: true });
f.render_widget(paragraph, area)
}
/// Draws the layout in vertical mode.
///
/// ```text
/// |-----------------------------|
/// | op |
/// |-----------------------------|
/// | stack |
/// |-----------------------------|
/// | buf |
/// |-----------------------------|
/// | |
/// | src |
/// | |
/// |-----------------------------|
/// ```
fn vertical_layout(&self, f: &mut Frame<'_>) {
let area = f.area();
let h_height = if self.show_shortcuts { 4 } else { 0 };
// NOTE: `Layout::split` always returns a slice of the same length as the number of
// constraints, so the `else` branch is unreachable.
// Split off footer.
let [app, footer] = Layout::new(
Direction::Vertical,
[Constraint::Ratio(100 - h_height, 100), Constraint::Ratio(h_height, 100)],
)
.split(area)[..] else {
unreachable!()
};
// Split the app in 4 vertically to construct all the panes.
let [op_pane, stack_pane, memory_pane, src_pane] = Layout::new(
Direction::Vertical,
[
Constraint::Ratio(1, 6),
Constraint::Ratio(1, 6),
Constraint::Ratio(1, 6),
Constraint::Ratio(3, 6),
],
)
.split(app)[..] else {
unreachable!()
};
if self.show_shortcuts {
self.draw_footer(f, footer);
}
self.draw_src(f, src_pane);
self.draw_op_list(f, op_pane);
self.draw_stack(f, stack_pane);
self.draw_buffer(f, memory_pane);
}
/// Draws the layout in horizontal mode.
///
/// ```text
/// |-----------------|-----------|
/// | op | stack |
/// |-----------------|-----------|
/// | | |
/// | src | buf |
/// | | |
/// |-----------------|-----------|
/// ```
fn horizontal_layout(&self, f: &mut Frame<'_>) {
let area = f.area();
let h_height = if self.show_shortcuts { 4 } else { 0 };
// Split off footer.
let [app, footer] = Layout::new(
Direction::Vertical,
[Constraint::Ratio(100 - h_height, 100), Constraint::Ratio(h_height, 100)],
)
.split(area)[..] else {
unreachable!()
};
// Split app in 2 horizontally.
let [app_left, app_right] =
Layout::new(Direction::Horizontal, [Constraint::Ratio(1, 2), Constraint::Ratio(1, 2)])
.split(app)[..]
else {
unreachable!()
};
// Split left pane in 2 vertically to opcode list and source.
let [op_pane, src_pane] =
Layout::new(Direction::Vertical, [Constraint::Ratio(1, 4), Constraint::Ratio(3, 4)])
.split(app_left)[..]
else {
unreachable!()
};
// Split right pane horizontally to construct stack and memory.
let [stack_pane, memory_pane] =
Layout::new(Direction::Vertical, [Constraint::Ratio(1, 4), Constraint::Ratio(3, 4)])
.split(app_right)[..]
else {
unreachable!()
};
if self.show_shortcuts {
self.draw_footer(f, footer);
}
self.draw_src(f, src_pane);
self.draw_op_list(f, op_pane);
self.draw_stack(f, stack_pane);
self.draw_buffer(f, memory_pane);
}
fn draw_footer(&self, f: &mut Frame<'_>, area: Rect) {
let l1 = "[q]: quit | [k/j]: prev/next op | [a/s]: prev/next jump | [c/C]: prev/next call | [g/G]: start/end | [b]: cycle memory/calldata/returndata buffers";
let l2 = "[t]: stack labels | [m]: buffer decoding | [shift + j/k]: scroll stack | [ctrl + j/k]: scroll buffer | ['<char>]: goto breakpoint | [h] toggle help";
let dimmed = Style::new().add_modifier(Modifier::DIM);
let lines =
vec![Line::from(Span::styled(l1, dimmed)), Line::from(Span::styled(l2, dimmed))];
let paragraph =
Paragraph::new(lines).alignment(Alignment::Center).wrap(Wrap { trim: false });
f.render_widget(paragraph, area);
}
fn draw_src(&self, f: &mut Frame<'_>, area: Rect) {
let (text_output, source_name) = self.src_text(area);
let call_kind_text = match self.call_kind() {
CallKind::Create | CallKind::Create2 => "Contract creation",
CallKind::Call => "Contract call",
CallKind::StaticCall => "Contract staticcall",
CallKind::CallCode => "Contract callcode",
CallKind::DelegateCall => "Contract delegatecall",
CallKind::AuthCall => "Contract authcall",
};
let title = format!(
"{} {} ",
call_kind_text,
source_name.map(|s| format!("| {s}")).unwrap_or_default()
);
let block = Block::default().title(title).borders(Borders::ALL);
let paragraph = Paragraph::new(text_output).block(block).wrap(Wrap { trim: false });
f.render_widget(paragraph, area);
}
fn src_text(&self, area: Rect) -> (Text<'_>, Option<&str>) {
let (source_element, source) = match self.src_map() {
Ok(r) => r,
Err(e) => return (Text::from(e), None),
};
// We are handed a vector of SourceElements that give us a span of sourcecode that is
// currently being executed. This includes an offset and length.
// This vector is in instruction pointer order, meaning the location of the instruction
// minus `sum(push_bytes[..pc])`.
let offset = source_element.offset() as usize;
let len = source_element.length() as usize;
let max = source.source.len();
// Split source into before, relevant, and after chunks, split by line, for formatting.
let actual_start = offset.min(max);
let actual_end = (offset + len).min(max);
let mut before: Vec<_> = source.source[..actual_start].split_inclusive('\n').collect();
let actual: Vec<_> =
source.source[actual_start..actual_end].split_inclusive('\n').collect();
let mut after: VecDeque<_> = source.source[actual_end..].split_inclusive('\n').collect();
let num_lines = before.len() + actual.len() + after.len();
let height = area.height as usize;
let needed_highlight = actual.len();
let mid_len = before.len() + actual.len();
// adjust what text we show of the source code
let (start_line, end_line) = if needed_highlight > height {
// highlighted section is more lines than we have available
let start_line = before.len().saturating_sub(1);
(start_line, before.len() + needed_highlight)
} else if height > num_lines {
// we can fit entire source
(0, num_lines)
} else {
let remaining = height - needed_highlight;
let mut above = remaining / 2;
let mut below = remaining / 2;
if below > after.len() {
// unused space below the highlight
above += below - after.len();
} else if above > before.len() {
// we have unused space above the highlight
below += above - before.len();
} else {
// no unused space
}
// since above is subtracted from before.len(), and the resulting
// start_line is used to index into before, above must be at least
// 1 to avoid out-of-range accesses.
if above == 0 {
above = 1;
}
(before.len().saturating_sub(above), mid_len + below)
};
// Unhighlighted line number: gray.
let u_num = Style::new().fg(Color::Gray);
// Unhighlighted text: default, dimmed.
let u_text = Style::new().add_modifier(Modifier::DIM);
// Highlighted line number: cyan.
let h_num = Style::new().fg(Color::Cyan);
// Highlighted text: cyan, bold.
let h_text = Style::new().fg(Color::Cyan).add_modifier(Modifier::BOLD);
let mut lines = SourceLines::new(start_line, end_line);
// We check if there is other text on the same line before the highlight starts.
if let Some(last) = before.pop() {
let last_has_nl = last.ends_with('\n');
if last_has_nl {
before.push(last);
}
for line in &before[start_line..] {
lines.push(u_num, line, u_text);
}
let first = if !last_has_nl {
lines.push_raw(h_num, &[Span::raw(last), Span::styled(actual[0], h_text)]);
1
} else {
0
};
// Skip the first line if it has already been handled above.
for line in &actual[first..] {
lines.push(h_num, line, h_text);
}
} else {
// No text before the current line.
for line in &actual {
lines.push(h_num, line, h_text);
}
}
// Fill in the rest of the line as unhighlighted.
if let Some(last) = actual.last()
&& !last.ends_with('\n')
&& let Some(post) = after.pop_front()
&& let Some(last) = lines.lines.last_mut()
{
last.spans.push(Span::raw(post));
}
// Add after highlighted text.
while mid_len + after.len() > end_line {
after.pop_back();
}
for line in after {
lines.push(u_num, line, u_text);
}
// pad with empty to each line to ensure the previous text is cleared
for line in &mut lines.lines {
// note that the \n is not included in the line length
if area.width as usize > line.width() + 1 {
line.push_span(Span::raw(" ".repeat(area.width as usize - line.width() - 1)));
}
}
(Text::from(lines.lines), source.path.to_str())
}
/// Returns source map, source code and source name of the current line.
fn src_map(&self) -> Result<(SourceElement, &SourceData), String> {
let address = self.address();
let Some(contract_name) = self.debugger_context.identified_contracts.get(address) else {
return Err(format!("Unknown contract at address {address}"));
};
self.debugger_context
.contracts_sources
.find_source_mapping(
contract_name,
self.current_step().pc as u32,
self.debug_call().kind.is_any_create(),
)
.ok_or_else(|| format!("No source map for contract {contract_name}"))
}
fn draw_op_list(&self, f: &mut Frame<'_>, area: Rect) {
let debug_steps = self.debug_steps();
let max_pc = debug_steps.iter().map(|step| step.pc).max().unwrap_or(0);
let max_pc_len = hex_digits(max_pc);
let items = debug_steps
.iter()
.enumerate()
.map(|(i, step)| {
let mut content = String::with_capacity(64);
write!(content, "{:0>max_pc_len$x}|", step.pc).unwrap();
if let Some(op) = self.opcode_list.get(i) {
content.push_str(op);
}
ListItem::new(Span::styled(content, Style::new().fg(Color::White)))
})
.collect::<Vec<_>>();
let title = format!(
"Address: {} | PC: {} | Gas used in call: {}",
self.address(),
self.current_step().pc,
self.current_step().gas_used,
);
let block = Block::default().title(title).borders(Borders::ALL);
let list = List::new(items)
.block(block)
.highlight_symbol("▶")
.highlight_style(Style::new().fg(Color::White).bg(Color::DarkGray))
.scroll_padding(1);
let mut state = ListState::default().with_selected(Some(self.current_step));
f.render_stateful_widget(list, area, &mut state);
}
fn draw_stack(&self, f: &mut Frame<'_>, area: Rect) {
let step = self.current_step();
let stack = step.stack.as_ref();
let stack_len = stack.map_or(0, |s| s.len());
let min_len = decimal_digits(stack_len).max(2);
let params = OpcodeParam::of(step.op.get());
let text: Vec<Line<'_>> = stack
.map(|stack| {
stack
.iter()
.rev()
.enumerate()
.skip(self.draw_memory.current_stack_startline)
.map(|(i, stack_item)| {
let param = params.iter().find(|param| param.index == i);
let mut spans = Vec::with_capacity(1 + 32 * 2 + 3);
// Stack index.
spans.push(Span::styled(
format!("{i:0min_len$}| "),
Style::new().fg(Color::White),
));
// Item hex bytes.
hex_bytes_spans(&stack_item.to_be_bytes::<32>(), &mut spans, |_, _| {
if param.is_some() {
Style::new().fg(Color::Cyan)
} else {
Style::new().fg(Color::White)
}
});
if self.stack_labels
&& let Some(param) = param
{
spans.push(Span::raw("| "));
spans.push(Span::raw(param.name));
}
spans.push(Span::raw("\n"));
Line::from(spans)
})
.collect()
})
.unwrap_or_default();
let title = format!("Stack: {stack_len}");
let block = Block::default().title(title).borders(Borders::ALL);
let paragraph = Paragraph::new(text).block(block).wrap(Wrap { trim: true });
f.render_widget(paragraph, area);
}
fn draw_buffer(&self, f: &mut Frame<'_>, area: Rect) {
let call = self.debug_call();
let step = self.current_step();
let buf = match self.active_buffer {
BufferKind::Memory => step.memory.as_ref().unwrap().as_ref(),
BufferKind::Calldata => call.calldata.as_ref(),
BufferKind::Returndata => step.returndata.as_ref(),
};
let min_len = hex_digits(buf.len());
// Color memory region based on read/write.
let mut offset = None;
let mut len = None;
let mut write_offset = None;
let mut write_size = None;
let mut color = None;
let stack_len = step.stack.as_ref().map_or(0, |s| s.len());
if stack_len > 0
&& let Some(stack) = step.stack.as_ref()
&& let Some(accesses) = get_buffer_accesses(step.op.get(), stack)
{
if let Some(read_access) = accesses.read {
offset = Some(read_access.1.offset);
len = Some(read_access.1.len);
color = Some(Color::Cyan);
}
if let Some(write_access) = accesses.write
&& self.active_buffer == BufferKind::Memory
{
write_offset = Some(write_access.offset);
write_size = Some(write_access.len);
}
}
// color word on previous write op
// TODO: technically it's possible for this to conflict with the current op, ie, with
// subsequent MCOPYs, but solc can't seem to generate that code even with high optimizer
// settings
if self.current_step > 0 {
let prev_step = self.current_step - 1;
let prev_step = &self.debug_steps()[prev_step];
if let Some(stack) = prev_step.stack.as_ref()
&& let Some(write_access) =
get_buffer_accesses(prev_step.op.get(), stack).and_then(|a| a.write)
&& self.active_buffer == BufferKind::Memory
{
offset = Some(write_access.offset);
len = Some(write_access.len);
color = Some(Color::Green);
}
}
let height = area.height as usize;
let end_line = self.draw_memory.current_buf_startline + height;
let text: Vec<Line<'_>> = buf
.chunks(32)
.enumerate()
.skip(self.draw_memory.current_buf_startline)
.take_while(|(i, _)| *i < end_line)
.map(|(i, buf_word)| {
let mut spans = Vec::with_capacity(1 + 32 * 2 + 1 + 32 / 4 + 1);
// Buffer index.
spans.push(Span::styled(
format!("{:0min_len$x}| ", i * 32),
Style::new().fg(Color::White),
));
// Word hex bytes.
hex_bytes_spans(buf_word, &mut spans, |j, _| {
let mut byte_color = Color::White;
let mut end = None;
let idx = i * 32 + j;
if let (Some(offset), Some(len), Some(color)) = (offset, len, color) {
end = Some(offset + len);
if (offset..offset + len).contains(&idx) {
// [offset, offset + len] is the memory region to be colored.
// If a byte at row i and column j in the memory panel
// falls in this region, set the color.
byte_color = color;
}
}
if let (Some(write_offset), Some(write_size)) = (write_offset, write_size) {
// check for overlap with read region
let write_end = write_offset + write_size;
if let Some(read_end) = end {
let read_start = offset.unwrap();
if (write_offset..write_end).contains(&read_end) {
// if it contains end, start from write_start up to read_end
if (write_offset..read_end).contains(&idx) {
return Style::new().fg(Color::Yellow);
}
} else if (write_offset..write_end).contains(&read_start) {
// otherwise if it contains read start, start from read_start up to
// write_end
if (read_start..write_end).contains(&idx) {
return Style::new().fg(Color::Yellow);
}
}
}
if (write_offset..write_end).contains(&idx) {
byte_color = Color::Red;
}
}
Style::new().fg(byte_color)
});
if self.buf_utf {
spans.push(Span::raw("|"));
for utf in buf_word.chunks(4) {
if let Ok(utf_str) = std::str::from_utf8(utf) {
spans.push(Span::raw(utf_str.replace('\0', ".")));
} else {
spans.push(Span::raw("."));
}
}
}
spans.push(Span::raw("\n"));
Line::from(spans)
})
.collect();
let title = self.active_buffer.title(buf.len());
let block = Block::default().title(title).borders(Borders::ALL);
let paragraph = Paragraph::new(text).block(block).wrap(Wrap { trim: true });
f.render_widget(paragraph, area);
}
}
/// Wrapper around a list of [`Line`]s that prepends the line number on each new line.
struct SourceLines<'a> {
lines: Vec<Line<'a>>,
start_line: usize,
max_line_num: usize,
}
impl<'a> SourceLines<'a> {
fn new(start_line: usize, end_line: usize) -> Self {
Self { lines: Vec::new(), start_line, max_line_num: decimal_digits(end_line) }
}
fn push(&mut self, line_number_style: Style, line: &'a str, line_style: Style) {
self.push_raw(line_number_style, &[Span::styled(line, line_style)]);
}
fn push_raw(&mut self, line_number_style: Style, spans: &[Span<'a>]) {
let mut line_spans = Vec::with_capacity(4);
let line_number = format!(
"{number: >width$} ",
number = self.start_line + self.lines.len() + 1,
width = self.max_line_num
);
line_spans.push(Span::styled(line_number, line_number_style));
// Space between line number and line text.
line_spans.push(Span::raw(" "));
line_spans.extend_from_slice(spans);
self.lines.push(Line::from(line_spans));
}
}
fn hex_bytes_spans(bytes: &[u8], spans: &mut Vec<Span<'_>>, f: impl Fn(usize, u8) -> Style) {
for (i, &byte) in bytes.iter().enumerate() {
if i > 0 {
spans.push(Span::raw(" "));
}
spans.push(Span::styled(alloy_primitives::hex::encode([byte]), f(i, byte)));
}
}
/// Returns the number of decimal digits in the given number.
///
/// This is the same as `n.to_string().len()`.
fn decimal_digits(n: usize) -> usize {
n.checked_ilog10().unwrap_or(0) as usize + 1
}
/// Returns the number of hexadecimal digits in the given number.
///
/// This is the same as `format!("{n:x}").len()`.
fn hex_digits(n: usize) -> usize {
n.checked_ilog(16).unwrap_or(0) as usize + 1
}
#[cfg(test)]
mod tests {
#[test]
fn decimal_digits() {
assert_eq!(super::decimal_digits(0), 1);
assert_eq!(super::decimal_digits(1), 1);
assert_eq!(super::decimal_digits(2), 1);
assert_eq!(super::decimal_digits(9), 1);
assert_eq!(super::decimal_digits(10), 2);
assert_eq!(super::decimal_digits(11), 2);
assert_eq!(super::decimal_digits(50), 2);
assert_eq!(super::decimal_digits(99), 2);
assert_eq!(super::decimal_digits(100), 3);
assert_eq!(super::decimal_digits(101), 3);
assert_eq!(super::decimal_digits(201), 3);
assert_eq!(super::decimal_digits(999), 3);
assert_eq!(super::decimal_digits(1000), 4);
assert_eq!(super::decimal_digits(1001), 4);
}
#[test]
fn hex_digits() {
assert_eq!(super::hex_digits(0), 1);
assert_eq!(super::hex_digits(1), 1);
assert_eq!(super::hex_digits(2), 1);
assert_eq!(super::hex_digits(9), 1);
assert_eq!(super::hex_digits(10), 1);
assert_eq!(super::hex_digits(11), 1);
assert_eq!(super::hex_digits(15), 1);
assert_eq!(super::hex_digits(16), 2);
assert_eq!(super::hex_digits(17), 2);
assert_eq!(super::hex_digits(0xff), 2);
assert_eq!(super::hex_digits(0x100), 3);
assert_eq!(super::hex_digits(0x101), 3);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/tui/mod.rs | crates/debugger/src/tui/mod.rs | //! The debugger TUI.
use crossterm::{
event::{self, DisableMouseCapture, EnableMouseCapture},
execute,
terminal::{EnterAlternateScreen, LeaveAlternateScreen, disable_raw_mode, enable_raw_mode},
};
use eyre::Result;
use ratatui::{
Terminal,
backend::{Backend, CrosstermBackend},
};
use std::{io, ops::ControlFlow, sync::Arc};
mod context;
use crate::debugger::DebuggerContext;
use context::TUIContext;
mod draw;
type DebuggerTerminal = Terminal<CrosstermBackend<io::Stdout>>;
/// Debugger exit reason.
#[derive(Debug)]
pub enum ExitReason {
/// Exit using 'q'.
CharExit,
}
/// The debugger TUI.
pub struct TUI<'a> {
debugger_context: &'a mut DebuggerContext,
}
impl<'a> TUI<'a> {
/// Creates a new debugger.
pub fn new(debugger_context: &'a mut DebuggerContext) -> Self {
Self { debugger_context }
}
/// Starts the debugger TUI.
pub fn try_run(&mut self) -> Result<ExitReason> {
let backend = CrosstermBackend::new(io::stdout());
let terminal = Terminal::new(backend)?;
TerminalGuard::with(terminal, |terminal| self.run_inner(terminal))
}
#[instrument(target = "debugger", name = "run", skip_all, ret)]
fn run_inner(&mut self, terminal: &mut DebuggerTerminal) -> Result<ExitReason> {
let mut cx = TUIContext::new(self.debugger_context);
cx.init();
loop {
cx.draw(terminal)?;
match cx.handle_event(event::read()?) {
ControlFlow::Continue(()) => {}
ControlFlow::Break(reason) => return Ok(reason),
}
}
}
}
type PanicHandler = Box<dyn Fn(&std::panic::PanicHookInfo<'_>) + 'static + Sync + Send>;
/// Handles terminal state.
#[must_use]
struct TerminalGuard<B: Backend + io::Write> {
terminal: Terminal<B>,
hook: Option<Arc<PanicHandler>>,
}
impl<B: Backend + io::Write> TerminalGuard<B> {
fn with<T>(terminal: Terminal<B>, mut f: impl FnMut(&mut Terminal<B>) -> T) -> T {
let mut guard = Self { terminal, hook: None };
guard.setup();
f(&mut guard.terminal)
}
fn setup(&mut self) {
let previous = Arc::new(std::panic::take_hook());
self.hook = Some(previous.clone());
// We need to restore the terminal state before displaying the panic message.
// TODO: Use `std::panic::update_hook` when it's stable
std::panic::set_hook(Box::new(move |info| {
Self::half_restore(&mut std::io::stdout());
(previous)(info)
}));
let _ = enable_raw_mode();
let _ = execute!(*self.terminal.backend_mut(), EnterAlternateScreen, EnableMouseCapture);
let _ = self.terminal.hide_cursor();
let _ = self.terminal.clear();
}
fn restore(&mut self) {
if !std::thread::panicking() {
// Drop the current hook to guarantee that `self.hook` is the only reference to it.
let _ = std::panic::take_hook();
// Restore the previous panic hook.
let prev = self.hook.take().unwrap();
let prev = match Arc::try_unwrap(prev) {
Ok(prev) => prev,
Err(_) => unreachable!("`self.hook` is not the only reference to the panic hook"),
};
std::panic::set_hook(prev);
// NOTE: Our panic handler calls this function, so we only have to call it here if we're
// not panicking.
Self::half_restore(self.terminal.backend_mut());
}
let _ = self.terminal.show_cursor();
}
fn half_restore(w: &mut impl io::Write) {
let _ = disable_raw_mode();
let _ = execute!(*w, LeaveAlternateScreen, DisableMouseCapture);
}
}
impl<B: Backend + io::Write> Drop for TerminalGuard<B> {
#[inline]
fn drop(&mut self) {
self.restore();
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/debugger/src/tui/context.rs | crates/debugger/src/tui/context.rs | //! Debugger context and event handler implementation.
use crate::{DebugNode, ExitReason, debugger::DebuggerContext};
use alloy_primitives::{Address, hex};
use crossterm::event::{Event, KeyCode, KeyEvent, KeyModifiers, MouseEvent, MouseEventKind};
use foundry_evm_core::buffer::BufferKind;
use revm::bytecode::opcode::OpCode;
use revm_inspectors::tracing::types::{CallKind, CallTraceStep};
use std::ops::ControlFlow;
/// This is currently used to remember last scroll position so screen doesn't wiggle as much.
#[derive(Default)]
pub(crate) struct DrawMemory {
pub(crate) inner_call_index: usize,
pub(crate) current_buf_startline: usize,
pub(crate) current_stack_startline: usize,
}
pub(crate) struct TUIContext<'a> {
pub(crate) debugger_context: &'a mut DebuggerContext,
/// Buffer for keys prior to execution, i.e. '10' + 'k' => move up 10 operations.
pub(crate) key_buffer: String,
/// Current step in the debug steps.
pub(crate) current_step: usize,
pub(crate) draw_memory: DrawMemory,
pub(crate) opcode_list: Vec<String>,
pub(crate) last_index: usize,
pub(crate) stack_labels: bool,
/// Whether to decode active buffer as utf8 or not.
pub(crate) buf_utf: bool,
pub(crate) show_shortcuts: bool,
/// The currently active buffer (memory, calldata, returndata) to be drawn.
pub(crate) active_buffer: BufferKind,
}
impl<'a> TUIContext<'a> {
pub(crate) fn new(debugger_context: &'a mut DebuggerContext) -> Self {
TUIContext {
debugger_context,
key_buffer: String::with_capacity(64),
current_step: 0,
draw_memory: DrawMemory::default(),
opcode_list: Vec::new(),
last_index: 0,
stack_labels: false,
buf_utf: false,
show_shortcuts: true,
active_buffer: BufferKind::Memory,
}
}
pub(crate) fn init(&mut self) {
self.gen_opcode_list();
}
pub(crate) fn debug_arena(&self) -> &[DebugNode] {
&self.debugger_context.debug_arena
}
pub(crate) fn debug_call(&self) -> &DebugNode {
&self.debug_arena()[self.draw_memory.inner_call_index]
}
/// Returns the current call address.
pub(crate) fn address(&self) -> &Address {
&self.debug_call().address
}
/// Returns the current call kind.
pub(crate) fn call_kind(&self) -> CallKind {
self.debug_call().kind
}
/// Returns the current debug steps.
pub(crate) fn debug_steps(&self) -> &[CallTraceStep] {
&self.debug_call().steps
}
/// Returns the current debug step.
pub(crate) fn current_step(&self) -> &CallTraceStep {
&self.debug_steps()[self.current_step]
}
fn gen_opcode_list(&mut self) {
self.opcode_list.clear();
let debug_steps =
&self.debugger_context.debug_arena[self.draw_memory.inner_call_index].steps;
for step in debug_steps {
self.opcode_list.push(pretty_opcode(step));
}
}
fn gen_opcode_list_if_necessary(&mut self) {
if self.last_index != self.draw_memory.inner_call_index {
self.gen_opcode_list();
self.last_index = self.draw_memory.inner_call_index;
}
}
fn active_buffer(&self) -> &[u8] {
match self.active_buffer {
BufferKind::Memory => self.current_step().memory.as_ref().unwrap().as_bytes(),
BufferKind::Calldata => &self.debug_call().calldata,
BufferKind::Returndata => &self.current_step().returndata,
}
}
}
impl TUIContext<'_> {
pub(crate) fn handle_event(&mut self, event: Event) -> ControlFlow<ExitReason> {
let ret = match event {
Event::Key(event) => self.handle_key_event(event),
Event::Mouse(event) => self.handle_mouse_event(event),
_ => ControlFlow::Continue(()),
};
// Generate the list after the event has been handled.
self.gen_opcode_list_if_necessary();
ret
}
fn handle_key_event(&mut self, event: KeyEvent) -> ControlFlow<ExitReason> {
// Breakpoints
if let KeyCode::Char(c) = event.code
&& c.is_alphabetic()
&& self.key_buffer.starts_with('\'')
{
self.handle_breakpoint(c);
return ControlFlow::Continue(());
}
let control = event.modifiers.contains(KeyModifiers::CONTROL);
match event.code {
// Exit
KeyCode::Char('q') => return ControlFlow::Break(ExitReason::CharExit),
// Scroll up the memory buffer
KeyCode::Char('k') | KeyCode::Up if control => self.repeat(|this| {
this.draw_memory.current_buf_startline =
this.draw_memory.current_buf_startline.saturating_sub(1);
}),
// Scroll down the memory buffer
KeyCode::Char('j') | KeyCode::Down if control => self.repeat(|this| {
let max_buf = (this.active_buffer().len() / 32).saturating_sub(1);
if this.draw_memory.current_buf_startline < max_buf {
this.draw_memory.current_buf_startline += 1;
}
}),
// Move up
KeyCode::Char('k') | KeyCode::Up => self.repeat(Self::step_back),
// Move down
KeyCode::Char('j') | KeyCode::Down => self.repeat(Self::step),
// Scroll up the stack
KeyCode::Char('K') => self.repeat(|this| {
this.draw_memory.current_stack_startline =
this.draw_memory.current_stack_startline.saturating_sub(1);
}),
// Scroll down the stack
KeyCode::Char('J') => self.repeat(|this| {
let max_stack =
this.current_step().stack.as_ref().map_or(0, |s| s.len()).saturating_sub(1);
if this.draw_memory.current_stack_startline < max_stack {
this.draw_memory.current_stack_startline += 1;
}
}),
// Cycle buffers
KeyCode::Char('b') => {
self.active_buffer = self.active_buffer.next();
self.draw_memory.current_buf_startline = 0;
}
// Go to top of file
KeyCode::Char('g') => {
self.draw_memory.inner_call_index = 0;
self.current_step = 0;
}
// Go to bottom of file
KeyCode::Char('G') => {
self.draw_memory.inner_call_index = self.debug_arena().len() - 1;
self.current_step = self.n_steps() - 1;
}
// Go to previous call
KeyCode::Char('c') => {
self.draw_memory.inner_call_index =
self.draw_memory.inner_call_index.saturating_sub(1);
self.current_step = self.n_steps() - 1;
}
// Go to next call
KeyCode::Char('C') => {
if self.debug_arena().len() > self.draw_memory.inner_call_index + 1 {
self.draw_memory.inner_call_index += 1;
self.current_step = 0;
}
}
// Step forward
KeyCode::Char('s') => self.repeat(|this| {
let remaining_steps = &this.debug_steps()[this.current_step..];
if let Some((i, _)) =
remaining_steps.iter().enumerate().skip(1).find(|(i, step)| {
let prev = &remaining_steps[*i - 1];
is_jump(step, prev)
})
{
this.current_step += i
}
}),
// Step backwards
KeyCode::Char('a') => self.repeat(|this| {
let ops = &this.debug_steps()[..this.current_step];
this.current_step = ops
.iter()
.enumerate()
.skip(1)
.rev()
.find(|&(i, op)| {
let prev = &ops[i - 1];
is_jump(op, prev)
})
.map(|(i, _)| i)
.unwrap_or_default();
}),
// Toggle stack labels
KeyCode::Char('t') => self.stack_labels = !self.stack_labels,
// Toggle memory UTF-8 decoding
KeyCode::Char('m') => self.buf_utf = !self.buf_utf,
// Toggle help notice
KeyCode::Char('h') => self.show_shortcuts = !self.show_shortcuts,
// Numbers for repeating commands or breakpoints
KeyCode::Char(
other @ ('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | '\''),
) => {
// Early return to not clear the buffer.
self.key_buffer.push(other);
return ControlFlow::Continue(());
}
// Unknown/unhandled key code
_ => {}
};
self.key_buffer.clear();
ControlFlow::Continue(())
}
fn handle_breakpoint(&mut self, c: char) {
// Find the location of the called breakpoint in the whole debug arena (at this address with
// this pc)
if let Some((caller, pc)) = self.debugger_context.breakpoints.get(&c) {
for (i, node) in self.debug_arena().iter().enumerate() {
if node.address == *caller
&& let Some(step) = node.steps.iter().position(|step| step.pc == *pc)
{
self.draw_memory.inner_call_index = i;
self.current_step = step;
break;
}
}
}
self.key_buffer.clear();
}
fn handle_mouse_event(&mut self, event: MouseEvent) -> ControlFlow<ExitReason> {
match event.kind {
MouseEventKind::ScrollUp => self.step_back(),
MouseEventKind::ScrollDown => self.step(),
_ => {}
}
ControlFlow::Continue(())
}
fn step_back(&mut self) {
if self.current_step > 0 {
self.current_step -= 1;
} else if self.draw_memory.inner_call_index > 0 {
self.draw_memory.inner_call_index -= 1;
self.current_step = self.n_steps() - 1;
}
}
fn step(&mut self) {
if self.current_step < self.n_steps() - 1 {
self.current_step += 1;
} else if self.draw_memory.inner_call_index < self.debug_arena().len() - 1 {
self.draw_memory.inner_call_index += 1;
self.current_step = 0;
}
}
/// Calls a closure `f` the number of times specified in the key buffer, and at least once.
fn repeat(&mut self, mut f: impl FnMut(&mut Self)) {
for _ in 0..buffer_as_number(&self.key_buffer) {
f(self);
}
}
fn n_steps(&self) -> usize {
self.debug_steps().len()
}
}
/// Grab number from buffer. Used for something like '10k' to move up 10 operations
fn buffer_as_number(s: &str) -> usize {
const MIN: usize = 1;
const MAX: usize = 100_000;
s.parse().unwrap_or(MIN).clamp(MIN, MAX)
}
fn pretty_opcode(step: &CallTraceStep) -> String {
if let Some(immediate) = step.immediate_bytes.as_ref().filter(|b| !b.is_empty()) {
format!("{}(0x{})", step.op, hex::encode(immediate))
} else {
step.op.to_string()
}
}
fn is_jump(step: &CallTraceStep, prev: &CallTraceStep) -> bool {
if !matches!(prev.op, OpCode::JUMP | OpCode::JUMPI) {
return false;
}
let immediate_len = prev.immediate_bytes.as_ref().map_or(0, |b| b.len());
step.pc != prev.pc + 1 + immediate_len
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-views-derive/src/lib.rs | linera-views-derive/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! The procedural macros for the crate `linera-views`.
use proc_macro::TokenStream;
use proc_macro2::{Span, TokenStream as TokenStream2};
use quote::{format_ident, quote};
use syn::{parse_macro_input, parse_quote, Error, ItemStruct, Type};
#[derive(Debug, deluxe::ParseAttributes)]
#[deluxe(attributes(view))]
struct StructAttrs {
context: Option<syn::Type>,
}
struct Constraints<'a> {
input_constraints: Vec<&'a syn::WherePredicate>,
impl_generics: syn::ImplGenerics<'a>,
type_generics: syn::TypeGenerics<'a>,
}
impl<'a> Constraints<'a> {
fn get(item: &'a syn::ItemStruct) -> Self {
let (impl_generics, type_generics, maybe_where_clause) = item.generics.split_for_impl();
let input_constraints = maybe_where_clause
.map(|w| w.predicates.iter())
.into_iter()
.flatten()
.collect();
Self {
input_constraints,
impl_generics,
type_generics,
}
}
}
fn get_extended_entry(e: Type) -> Result<TokenStream2, Error> {
let syn::Type::Path(typepath) = &e else {
return Err(Error::new_spanned(e, "Expected a path type"));
};
let Some(path_segment) = typepath.path.segments.first() else {
return Err(Error::new_spanned(&typepath.path, "Path has no segments"));
};
let ident = &path_segment.ident;
let arguments = &path_segment.arguments;
Ok(quote! { #ident :: #arguments })
}
fn generate_view_code(input: ItemStruct, root: bool) -> Result<TokenStream2, Error> {
// Validate that all fields are named
for field in &input.fields {
if field.ident.is_none() {
return Err(Error::new_spanned(field, "All fields must be named."));
}
}
let Constraints {
input_constraints,
impl_generics,
type_generics,
} = Constraints::get(&input);
let attrs: StructAttrs = deluxe::parse_attributes(&input)
.map_err(|e| Error::new_spanned(&input, format!("Failed to parse attributes: {e}")))?;
let context = attrs.context.or_else(|| {
input.generics.type_params().next().map(|param| {
let ident = ¶m.ident;
parse_quote! { #ident }
})
}).ok_or_else(|| {
Error::new_spanned(
&input,
"Missing context: either add a generic type parameter or specify the context with #[view(context = YourContextType)]"
)
})?;
let struct_name = &input.ident;
let field_types: Vec<_> = input.fields.iter().map(|field| &field.ty).collect();
let mut name_quotes = Vec::new();
let mut rollback_quotes = Vec::new();
let mut pre_save_quotes = Vec::new();
let mut delete_view_quotes = Vec::new();
let mut clear_quotes = Vec::new();
let mut has_pending_changes_quotes = Vec::new();
let mut num_init_keys_quotes = Vec::new();
let mut pre_load_keys_quotes = Vec::new();
let mut post_load_keys_quotes = Vec::new();
let num_fields = input.fields.len();
for (idx, e) in input.fields.iter().enumerate() {
let name = e.ident.clone().unwrap();
let delete_view_ident = format_ident!("deleted{}", idx);
let g = get_extended_entry(e.ty.clone())?;
name_quotes.push(quote! { #name });
rollback_quotes.push(quote! { self.#name.rollback(); });
pre_save_quotes.push(quote! { let #delete_view_ident = self.#name.pre_save(batch)?; });
delete_view_quotes.push(quote! { #delete_view_ident });
clear_quotes.push(quote! { self.#name.clear(); });
has_pending_changes_quotes.push(quote! {
if self.#name.has_pending_changes().await {
return true;
}
});
num_init_keys_quotes.push(quote! { #g :: NUM_INIT_KEYS });
let derive_key_logic = if num_fields < 256 {
let idx_u8 = idx as u8;
quote! {
let __linera_reserved_index = #idx_u8;
let __linera_reserved_base_key = context.base_key().derive_tag_key(linera_views::views::MIN_VIEW_TAG, &__linera_reserved_index)?;
}
} else {
assert!(num_fields < 65536);
let idx_u16 = idx as u16;
quote! {
let __linera_reserved_index = #idx_u16;
let __linera_reserved_base_key = context.base_key().derive_tag_key(linera_views::views::MIN_VIEW_TAG, &__linera_reserved_index)?;
}
};
pre_load_keys_quotes.push(quote! {
#derive_key_logic
keys.extend(#g :: pre_load(&context.clone_with_base_key(__linera_reserved_base_key))?);
});
post_load_keys_quotes.push(quote! {
#derive_key_logic
let __linera_reserved_pos_next = __linera_reserved_pos + #g :: NUM_INIT_KEYS;
let #name = #g :: post_load(context.clone_with_base_key(__linera_reserved_base_key), &values[__linera_reserved_pos..__linera_reserved_pos_next])?;
__linera_reserved_pos = __linera_reserved_pos_next;
});
}
// derive_key_logic above adds one byte to the key as a tag, and then either one or two more
// bytes for field indices, depending on how many fields there are. Thus, we need to trim 2
// bytes if there are less than 256 child fields (then the field index fits within one byte),
// or 3 bytes if there are more.
let trim_key_logic = if num_fields < 256 {
quote! {
let __bytes_to_trim = 2;
}
} else {
quote! {
let __bytes_to_trim = 3;
}
};
let first_name_quote = name_quotes.first().ok_or(Error::new_spanned(
&input,
"Struct must have at least one field",
))?;
let load_metrics = if root && cfg!(feature = "metrics") {
quote! {
#[cfg(not(target_arch = "wasm32"))]
linera_views::metrics::increment_counter(
&linera_views::metrics::LOAD_VIEW_COUNTER,
stringify!(#struct_name),
&context.base_key().bytes,
);
#[cfg(not(target_arch = "wasm32"))]
use linera_views::metrics::prometheus_util::MeasureLatency as _;
let _latency = linera_views::metrics::LOAD_VIEW_LATENCY.measure_latency();
}
} else {
quote! {}
};
Ok(quote! {
impl #impl_generics linera_views::views::View for #struct_name #type_generics
where
#context: linera_views::context::Context,
#(#input_constraints,)*
#(#field_types: linera_views::views::View<Context = #context>,)*
{
const NUM_INIT_KEYS: usize = #(<#field_types as linera_views::views::View>::NUM_INIT_KEYS)+*;
type Context = #context;
fn context(&self) -> #context {
use linera_views::{context::Context as _};
#trim_key_logic
let context = self.#first_name_quote.context();
context.clone_with_trimmed_key(__bytes_to_trim)
}
fn pre_load(context: &#context) -> Result<Vec<Vec<u8>>, linera_views::ViewError> {
use linera_views::context::Context as _;
let mut keys = Vec::new();
#(#pre_load_keys_quotes)*
Ok(keys)
}
fn post_load(context: #context, values: &[Option<Vec<u8>>]) -> Result<Self, linera_views::ViewError> {
use linera_views::context::Context as _;
let mut __linera_reserved_pos = 0;
#(#post_load_keys_quotes)*
Ok(Self {#(#name_quotes),*})
}
async fn load(context: #context) -> Result<Self, linera_views::ViewError> {
use linera_views::{context::Context as _, store::ReadableKeyValueStore as _};
#load_metrics
if Self::NUM_INIT_KEYS == 0 {
Self::post_load(context, &[])
} else {
let keys = Self::pre_load(&context)?;
let values = context.store().read_multi_values_bytes(&keys).await?;
Self::post_load(context, &values)
}
}
fn rollback(&mut self) {
#(#rollback_quotes)*
}
async fn has_pending_changes(&self) -> bool {
#(#has_pending_changes_quotes)*
false
}
fn pre_save(&self, batch: &mut linera_views::batch::Batch) -> Result<bool, linera_views::ViewError> {
#(#pre_save_quotes)*
Ok( #(#delete_view_quotes)&&* )
}
fn post_save(&mut self) {
#(self.#name_quotes.post_save();)*
}
fn clear(&mut self) {
#(#clear_quotes)*
}
}
})
}
fn generate_root_view_code(input: ItemStruct) -> TokenStream2 {
let Constraints {
input_constraints,
impl_generics,
type_generics,
} = Constraints::get(&input);
let struct_name = &input.ident;
let increment_counter = if cfg!(feature = "metrics") {
quote! {
#[cfg(not(target_arch = "wasm32"))]
linera_views::metrics::increment_counter(
&linera_views::metrics::SAVE_VIEW_COUNTER,
stringify!(#struct_name),
&self.context().base_key().bytes,
);
}
} else {
quote! {}
};
quote! {
impl #impl_generics linera_views::views::RootView for #struct_name #type_generics
where
#(#input_constraints,)*
Self: linera_views::views::View,
{
async fn save(&mut self) -> Result<(), linera_views::ViewError> {
use linera_views::{context::Context as _, batch::Batch, store::WritableKeyValueStore as _, views::View as _};
#increment_counter
let mut batch = Batch::new();
self.pre_save(&mut batch)?;
if !batch.is_empty() {
self.context().store().write_batch(batch).await?;
}
self.post_save();
Ok(())
}
}
}
}
fn generate_hash_view_code(input: ItemStruct) -> Result<TokenStream2, Error> {
// Validate that all fields are named
for field in &input.fields {
if field.ident.is_none() {
return Err(Error::new_spanned(field, "All fields must be named."));
}
}
let Constraints {
input_constraints,
impl_generics,
type_generics,
} = Constraints::get(&input);
let struct_name = &input.ident;
let field_types = input.fields.iter().map(|field| &field.ty);
let mut field_hashes_mut = Vec::new();
let mut field_hashes = Vec::new();
for e in &input.fields {
let name = e.ident.as_ref().unwrap();
field_hashes_mut.push(quote! { hasher.write_all(self.#name.hash_mut().await?.as_ref())?; });
field_hashes.push(quote! { hasher.write_all(self.#name.hash().await?.as_ref())?; });
}
Ok(quote! {
impl #impl_generics linera_views::views::HashableView for #struct_name #type_generics
where
#(#field_types: linera_views::views::HashableView,)*
#(#input_constraints,)*
Self: linera_views::views::View,
{
type Hasher = linera_views::sha3::Sha3_256;
async fn hash_mut(&mut self) -> Result<<Self::Hasher as linera_views::views::Hasher>::Output, linera_views::ViewError> {
use linera_views::views::Hasher as _;
use std::io::Write as _;
let mut hasher = Self::Hasher::default();
#(#field_hashes_mut)*
Ok(hasher.finalize())
}
async fn hash(&self) -> Result<<Self::Hasher as linera_views::views::Hasher>::Output, linera_views::ViewError> {
use linera_views::views::Hasher as _;
use std::io::Write as _;
let mut hasher = Self::Hasher::default();
#(#field_hashes)*
Ok(hasher.finalize())
}
}
})
}
fn generate_crypto_hash_code(input: ItemStruct) -> TokenStream2 {
let Constraints {
input_constraints,
impl_generics,
type_generics,
} = Constraints::get(&input);
let field_types = input.fields.iter().map(|field| &field.ty);
let struct_name = &input.ident;
let hash_type = syn::Ident::new(&format!("{struct_name}Hash"), Span::call_site());
quote! {
impl #impl_generics linera_views::views::CryptoHashView
for #struct_name #type_generics
where
#(#field_types: linera_views::views::HashableView,)*
#(#input_constraints,)*
Self: linera_views::views::View,
{
async fn crypto_hash(&self) -> Result<linera_base::crypto::CryptoHash, linera_views::ViewError> {
use linera_base::crypto::{BcsHashable, CryptoHash};
use linera_views::{
generic_array::GenericArray,
sha3::{digest::OutputSizeUser, Sha3_256},
views::HashableView as _,
};
#[derive(serde::Serialize, serde::Deserialize)]
struct #hash_type(GenericArray<u8, <Sha3_256 as OutputSizeUser>::OutputSize>);
impl<'de> BcsHashable<'de> for #hash_type {}
let hash = self.hash().await?;
Ok(CryptoHash::new(&#hash_type(hash)))
}
async fn crypto_hash_mut(&mut self) -> Result<linera_base::crypto::CryptoHash, linera_views::ViewError> {
use linera_base::crypto::{BcsHashable, CryptoHash};
use linera_views::{
generic_array::GenericArray,
sha3::{digest::OutputSizeUser, Sha3_256},
views::HashableView as _,
};
#[derive(serde::Serialize, serde::Deserialize)]
struct #hash_type(GenericArray<u8, <Sha3_256 as OutputSizeUser>::OutputSize>);
impl<'de> BcsHashable<'de> for #hash_type {}
let hash = self.hash_mut().await?;
Ok(CryptoHash::new(&#hash_type(hash)))
}
}
}
}
fn generate_clonable_view_code(input: ItemStruct) -> Result<TokenStream2, Error> {
// Validate that all fields are named
for field in &input.fields {
if field.ident.is_none() {
return Err(Error::new_spanned(field, "All fields must be named."));
}
}
let Constraints {
input_constraints,
impl_generics,
type_generics,
} = Constraints::get(&input);
let struct_name = &input.ident;
let mut clone_constraints = vec![];
let mut clone_fields = vec![];
for field in &input.fields {
let name = &field.ident;
let ty = &field.ty;
clone_constraints.push(quote! { #ty: ClonableView });
clone_fields.push(quote! { #name: self.#name.clone_unchecked()? });
}
Ok(quote! {
impl #impl_generics linera_views::views::ClonableView for #struct_name #type_generics
where
#(#input_constraints,)*
#(#clone_constraints,)*
Self: linera_views::views::View,
{
fn clone_unchecked(&mut self) -> Result<Self, linera_views::ViewError> {
Ok(Self {
#(#clone_fields,)*
})
}
}
})
}
fn to_token_stream(input: Result<TokenStream2, Error>) -> TokenStream {
match input {
Ok(tokens) => tokens.into(),
Err(err) => err.to_compile_error().into(),
}
}
#[proc_macro_derive(View, attributes(view))]
pub fn derive_view(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemStruct);
let input = generate_view_code(input, false);
to_token_stream(input)
}
fn derive_hash_view_token_stream2(input: ItemStruct) -> Result<TokenStream2, Error> {
let mut stream = generate_view_code(input.clone(), false)?;
stream.extend(generate_hash_view_code(input)?);
Ok(stream)
}
#[proc_macro_derive(HashableView, attributes(view))]
pub fn derive_hash_view(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemStruct);
let stream = derive_hash_view_token_stream2(input);
to_token_stream(stream)
}
fn derive_root_view_token_stream2(input: ItemStruct) -> Result<TokenStream2, Error> {
let mut stream = generate_view_code(input.clone(), true)?;
stream.extend(generate_root_view_code(input));
Ok(stream)
}
#[proc_macro_derive(RootView, attributes(view))]
pub fn derive_root_view(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemStruct);
let stream = derive_root_view_token_stream2(input);
to_token_stream(stream)
}
fn derive_crypto_hash_view_token_stream2(input: ItemStruct) -> Result<TokenStream2, Error> {
let mut stream = generate_view_code(input.clone(), false)?;
stream.extend(generate_hash_view_code(input.clone())?);
stream.extend(generate_crypto_hash_code(input));
Ok(stream)
}
#[proc_macro_derive(CryptoHashView, attributes(view))]
pub fn derive_crypto_hash_view(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemStruct);
let stream = derive_crypto_hash_view_token_stream2(input);
to_token_stream(stream)
}
fn derive_crypto_hash_root_view_token_stream2(input: ItemStruct) -> Result<TokenStream2, Error> {
let mut stream = generate_view_code(input.clone(), true)?;
stream.extend(generate_root_view_code(input.clone()));
stream.extend(generate_hash_view_code(input.clone())?);
stream.extend(generate_crypto_hash_code(input));
Ok(stream)
}
#[proc_macro_derive(CryptoHashRootView, attributes(view))]
pub fn derive_crypto_hash_root_view(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemStruct);
let stream = derive_crypto_hash_root_view_token_stream2(input);
to_token_stream(stream)
}
#[cfg(test)]
fn derive_hashable_root_view_token_stream2(input: ItemStruct) -> Result<TokenStream2, Error> {
let mut stream = generate_view_code(input.clone(), true)?;
stream.extend(generate_root_view_code(input.clone()));
stream.extend(generate_hash_view_code(input)?);
Ok(stream)
}
#[proc_macro_derive(HashableRootView, attributes(view))]
#[cfg(test)]
pub fn derive_hashable_root_view(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemStruct);
let stream = derive_hashable_root_view_token_stream2(input);
to_token_stream(stream)
}
#[proc_macro_derive(ClonableView, attributes(view))]
pub fn derive_clonable_view(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as ItemStruct);
match generate_clonable_view_code(input) {
Ok(tokens) => tokens.into(),
Err(err) => err.to_compile_error().into(),
}
}
#[cfg(test)]
pub mod tests {
use quote::quote;
use syn::{parse_quote, AngleBracketedGenericArguments};
use crate::*;
fn pretty(tokens: TokenStream2) -> String {
prettyplease::unparse(
&syn::parse2::<syn::File>(tokens).expect("failed to parse test output"),
)
}
#[test]
fn test_generate_view_code() {
for context in SpecificContextInfo::test_cases() {
let input = context.test_view_input();
insta::assert_snapshot!(
format!(
"test_generate_view_code{}_{}",
if cfg!(feature = "metrics") {
"_metrics"
} else {
""
},
context.name,
),
pretty(generate_view_code(input, true).unwrap())
);
}
}
#[test]
fn test_generate_hash_view_code() {
for context in SpecificContextInfo::test_cases() {
let input = context.test_view_input();
insta::assert_snapshot!(
format!("test_generate_hash_view_code_{}", context.name),
pretty(generate_hash_view_code(input).unwrap())
);
}
}
#[test]
fn test_generate_root_view_code() {
for context in SpecificContextInfo::test_cases() {
let input = context.test_view_input();
insta::assert_snapshot!(
format!(
"test_generate_root_view_code{}_{}",
if cfg!(feature = "metrics") {
"_metrics"
} else {
""
},
context.name,
),
pretty(generate_root_view_code(input))
);
}
}
#[test]
fn test_generate_crypto_hash_code() {
for context in SpecificContextInfo::test_cases() {
let input = context.test_view_input();
insta::assert_snapshot!(pretty(generate_crypto_hash_code(input)));
}
}
#[test]
fn test_generate_clonable_view_code() {
for context in SpecificContextInfo::test_cases() {
let input = context.test_view_input();
insta::assert_snapshot!(pretty(generate_clonable_view_code(input).unwrap()));
}
}
#[derive(Clone)]
pub struct SpecificContextInfo {
name: String,
attribute: Option<TokenStream2>,
context: Type,
generics: AngleBracketedGenericArguments,
where_clause: Option<TokenStream2>,
}
impl SpecificContextInfo {
pub fn empty() -> Self {
SpecificContextInfo {
name: "C".to_string(),
attribute: None,
context: syn::parse_quote! { C },
generics: syn::parse_quote! { <C> },
where_clause: None,
}
}
pub fn new(context: syn::Type) -> Self {
let name = quote! { #context };
SpecificContextInfo {
name: format!("{name}")
.replace(' ', "")
.replace([':', '<', '>'], "_"),
attribute: Some(quote! { #[view(context = #context)] }),
context,
generics: parse_quote! { <> },
where_clause: None,
}
}
/// Sets the `where_clause` to a dummy value for test cases with a where clause.
///
/// Also adds a `MyParam` generic type parameter to the `generics` field, which is the type
/// constrained by the dummy predicate in the `where_clause`.
pub fn with_dummy_where_clause(mut self) -> Self {
self.generics.args.push(parse_quote! { MyParam });
self.where_clause = Some(quote! {
where MyParam: Send + Sync + 'static,
});
self.name.push_str("_with_where");
self
}
pub fn test_cases() -> impl Iterator<Item = Self> {
Some(Self::empty())
.into_iter()
.chain(
[
syn::parse_quote! { CustomContext },
syn::parse_quote! { custom::path::to::ContextType },
syn::parse_quote! { custom::GenericContext<T> },
]
.into_iter()
.map(Self::new),
)
.flat_map(|case| [case.clone(), case.with_dummy_where_clause()])
}
pub fn test_view_input(&self) -> ItemStruct {
let SpecificContextInfo {
attribute,
context,
generics,
where_clause,
..
} = self;
parse_quote! {
#attribute
struct TestView #generics
#where_clause
{
register: RegisterView<#context, usize>,
collection: CollectionView<#context, usize, RegisterView<#context, usize>>,
}
}
}
}
// Failure scenario tests
#[test]
fn test_tuple_struct_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<C>(RegisterView<C, u64>);
};
let result = generate_view_code(input, false);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("All fields must be named"));
}
#[test]
fn test_empty_struct_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<C> {}
};
let result = generate_view_code(input, false);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("Struct must have at least one field"));
}
#[test]
fn test_missing_context_no_generics_failure() {
let input: ItemStruct = parse_quote! {
struct TestView {
register: RegisterView<CustomContext, u64>,
}
};
let result = generate_view_code(input, false);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("Missing context"));
}
#[test]
fn test_missing_context_empty_generics_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<> {
register: RegisterView<CustomContext, u64>,
}
};
let result = generate_view_code(input, false);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("Missing context"));
}
#[test]
fn test_non_path_type_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<C> {
field: fn() -> i32,
}
};
let result = generate_view_code(input, false);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("Expected a path type"));
}
#[test]
fn test_unnamed_field_in_hash_view_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<C>(RegisterView<C, u64>);
};
let result = generate_hash_view_code(input);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("All fields must be named"));
}
#[test]
fn test_unnamed_field_in_clonable_view_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<C>(RegisterView<C, u64>);
};
let result = generate_clonable_view_code(input);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("All fields must be named"));
}
#[test]
fn test_array_type_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<C> {
field: [u8; 32],
}
};
let result = generate_view_code(input, false);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("Expected a path type"));
}
#[test]
fn test_reference_type_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<C> {
field: &'static str,
}
};
let result = generate_view_code(input, false);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("Expected a path type"));
}
#[test]
fn test_pointer_type_failure() {
let input: ItemStruct = parse_quote! {
struct TestView<C> {
field: *const i32,
}
};
let result = generate_view_code(input, false);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("Expected a path type"));
}
#[test]
fn test_generate_root_view_code_with_empty_struct() {
let input: ItemStruct = parse_quote! {
struct TestView<C> {}
};
// Root view generation depends on view generation, so this should fail at the view level
let result = generate_view_code(input.clone(), true);
assert!(result.is_err());
let error_msg = result.unwrap_err().to_string();
assert!(error_msg.contains("Struct must have at least one field"));
}
#[test]
fn test_generate_functions_behavior_differences() {
// Some generation functions validate field types while others don't
let input: ItemStruct = parse_quote! {
struct TestView<C> {
field: fn() -> i32,
}
};
// View code generation validates field types and should fail
let view_result = generate_view_code(input.clone(), false);
assert!(view_result.is_err());
let error_msg = view_result.unwrap_err().to_string();
assert!(error_msg.contains("Expected a path type"));
// Hash view generation doesn't validate field types in the same way
let hash_result = generate_hash_view_code(input.clone());
assert!(hash_result.is_ok());
// Crypto hash code generation also succeeds
let _result = generate_crypto_hash_code(input);
}
#[test]
fn test_crypto_hash_code_generation_failure() {
// Crypto hash code generation should succeed as it doesn't validate field types directly
let input: ItemStruct = parse_quote! {
struct TestView<C> {
register: RegisterView<C, usize>,
}
};
let _result = generate_crypto_hash_code(input);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/scripts/check_copyright_header/src/main.rs | scripts/check_copyright_header/src/main.rs | use std::{
env,
fs::File,
io::{BufRead, BufReader},
};
use thiserror::Error;
#[derive(Error, Debug, PartialEq)]
pub enum CheckFileHeaderError {
#[error("Unexpected line reached")]
UnexpectedLineReachedError,
#[error("Incorrect copyright header, Zefchain Labs header not found")]
ZefchainLabsHeaderNotFoundError,
#[error("Separation line not found")]
SeparationLineNotFoundError,
#[error("Incorrect copyright header")]
IncorrectCopyrightHeaderError,
}
fn check_file_header(
lines: impl IntoIterator<Item = Result<String, std::io::Error>>,
) -> Result<(), CheckFileHeaderError> {
let mut found_zefchain_labs_header = false;
let mut is_end_of_header = false;
for line in lines {
let line = line.expect("Failed to read line");
if !is_end_of_header {
if line == "// Copyright (c) Zefchain Labs, Inc." {
found_zefchain_labs_header = true;
continue;
}
if line.starts_with("// Copyright (c)") {
continue;
}
if line == "// SPDX-License-Identifier: Apache-2.0" {
is_end_of_header = true;
continue;
}
return Err(CheckFileHeaderError::UnexpectedLineReachedError);
} else {
if !found_zefchain_labs_header {
return Err(CheckFileHeaderError::ZefchainLabsHeaderNotFoundError);
}
if line.is_empty() {
// Found separation line
return Ok(());
} else {
return Err(CheckFileHeaderError::SeparationLineNotFoundError);
}
}
}
Err(CheckFileHeaderError::IncorrectCopyrightHeaderError)
}
fn main() -> std::process::ExitCode {
let args = env::args().skip(1).collect::<Vec<_>>();
let mut exit_code = std::process::ExitCode::SUCCESS;
for file_path in args {
let file = File::open(&file_path).expect("Failed to open file");
let reader = BufReader::new(file);
if let Err(e) = check_file_header(reader.lines()) {
exit_code = std::process::ExitCode::FAILURE;
eprintln!("{}: {}", file_path, e);
}
}
exit_code
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_valid_file_with_header() {
let lines = vec![
"// Copyright (c) Zefchain Labs, Inc.",
"// SPDX-License-Identifier: Apache-2.0",
"",
"// Rest of the file...",
]
.into_iter()
.map(String::from)
.map(Result::Ok);
assert!(check_file_header(lines).is_ok());
}
#[test]
fn test_valid_file_with_multiple_headers() {
let lines = vec![
"// Copyright (c) Zefchain Labs, Inc.",
"// Copyright (c) Some Other Company",
"// SPDX-License-Identifier: Apache-2.0",
"",
"// Rest of the file...",
]
.into_iter()
.map(String::from)
.map(Result::Ok);
assert!(check_file_header(lines).is_ok());
}
#[test]
fn test_invalid_file_missing_zefchain_header() {
let lines = vec![
"// SPDX-License-Identifier: Apache-2.0",
"",
"// Rest of the file...",
]
.into_iter()
.map(String::from)
.map(Result::Ok);
assert_eq!(
check_file_header(lines).unwrap_err(),
CheckFileHeaderError::ZefchainLabsHeaderNotFoundError,
);
}
#[test]
fn test_invalid_file_incorrect_zefchain_header() {
let lines = vec![
"// Copyright (c) Some Other Company",
"// SPDX-License-Identifier: Apache-2.0",
"",
"// Rest of the file...",
]
.into_iter()
.map(String::from)
.map(Result::Ok);
assert_eq!(
check_file_header(lines).unwrap_err(),
CheckFileHeaderError::ZefchainLabsHeaderNotFoundError,
);
}
#[test]
fn test_invalid_file_unexpected_line() {
let lines = vec![
"// Copyright (c) Zefchain Labs, Inc.",
"// SPDX-License-Identifier: Apache-2.0",
"Unexpected line",
"",
"// Rest of the file...",
]
.into_iter()
.map(String::from)
.map(Result::Ok);
assert_eq!(
check_file_header(lines).unwrap_err(),
CheckFileHeaderError::SeparationLineNotFoundError,
);
}
#[test]
fn test_invalid_file_empty_line_before_header() {
let lines = vec![
"",
"// SPDX-License-Identifier: Apache-2.0",
"",
"// Rest of the file...",
]
.into_iter()
.map(String::from)
.map(Result::Ok);
assert_eq!(
check_file_header(lines).unwrap_err(),
CheckFileHeaderError::UnexpectedLineReachedError,
);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/build.rs | linera-execution/build.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
fn main() {
cfg_aliases::cfg_aliases! {
web: { all(target_arch = "wasm32", feature = "web") },
with_fs: { all(not(target_arch = "wasm32"), feature = "fs") },
with_metrics: { all(not(target_arch = "wasm32"), feature = "metrics") },
with_graphql: { not(web) },
with_testing: { any(test, feature = "test") },
with_tokio_multi_thread: { not(target_arch = "wasm32") },
with_wasmer: { feature = "wasmer" },
with_revm: { all(not(web), feature = "revm") },
with_wasmtime: { all(not(target_arch = "wasm32"), feature = "wasmtime") },
// If you change this, don't forget to update `WasmRuntime` and
// `WasmRuntime::default_with_sanitizer`
with_wasm_runtime: { any(with_wasmer, with_wasmtime) },
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/execution_state_actor.rs | linera-execution/src/execution_state_actor.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Handle requests from the synchronous execution thread of user applications.
use std::collections::{BTreeMap, BTreeSet};
use custom_debug_derive::Debug;
use futures::{channel::mpsc, StreamExt as _};
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::{
data_types::{
Amount, ApplicationPermissions, ArithmeticError, BlobContent, BlockHeight, OracleResponse,
Timestamp,
},
ensure, hex_debug, hex_vec_debug, http,
identifiers::{Account, AccountOwner, BlobId, BlobType, ChainId, EventId, StreamId},
ownership::ChainOwnership,
time::Instant,
};
use linera_views::{batch::Batch, context::Context, views::View};
use oneshot::Sender;
use reqwest::{header::HeaderMap, Client, Url};
use crate::{
execution::UserAction,
runtime::ContractSyncRuntime,
system::{CreateApplicationResult, OpenChainConfig},
util::{OracleResponseExt as _, RespondExt as _},
ApplicationDescription, ApplicationId, ExecutionError, ExecutionRuntimeContext,
ExecutionStateView, JsVec, Message, MessageContext, MessageKind, ModuleId, Operation,
OperationContext, OutgoingMessage, ProcessStreamsContext, QueryContext, QueryOutcome,
ResourceController, SystemMessage, TransactionTracker, UserContractCode, UserServiceCode,
};
/// Actor for handling requests to the execution state.
pub struct ExecutionStateActor<'a, C> {
state: &'a mut ExecutionStateView<C>,
txn_tracker: &'a mut TransactionTracker,
resource_controller: &'a mut ResourceController<Option<AccountOwner>>,
}
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
/// Histogram of the latency to load a contract bytecode.
pub static LOAD_CONTRACT_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"load_contract_latency",
"Load contract latency",
&[],
exponential_bucket_latencies(250.0),
)
});
/// Histogram of the latency to load a service bytecode.
pub static LOAD_SERVICE_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"load_service_latency",
"Load service latency",
&[],
exponential_bucket_latencies(250.0),
)
});
}
pub(crate) type ExecutionStateSender = mpsc::UnboundedSender<ExecutionRequest>;
impl<'a, C> ExecutionStateActor<'a, C>
where
C: Context + Clone + 'static,
C::Extra: ExecutionRuntimeContext,
{
/// Creates a new execution state actor.
pub fn new(
state: &'a mut ExecutionStateView<C>,
txn_tracker: &'a mut TransactionTracker,
resource_controller: &'a mut ResourceController<Option<AccountOwner>>,
) -> Self {
Self {
state,
txn_tracker,
resource_controller,
}
}
pub(crate) async fn load_contract(
&mut self,
id: ApplicationId,
) -> Result<(UserContractCode, ApplicationDescription), ExecutionError> {
#[cfg(with_metrics)]
let _latency = metrics::LOAD_CONTRACT_LATENCY.measure_latency();
let blob_id = id.description_blob_id();
let description = match self.txn_tracker.get_blob_content(&blob_id) {
Some(blob) => bcs::from_bytes(blob.bytes())?,
None => {
self.state
.system
.describe_application(id, self.txn_tracker)
.await?
}
};
let code = self
.state
.context()
.extra()
.get_user_contract(&description, self.txn_tracker)
.await?;
Ok((code, description))
}
pub(crate) async fn load_service(
&mut self,
id: ApplicationId,
) -> Result<(UserServiceCode, ApplicationDescription), ExecutionError> {
#[cfg(with_metrics)]
let _latency = metrics::LOAD_SERVICE_LATENCY.measure_latency();
let blob_id = id.description_blob_id();
let description = match self.txn_tracker.get_blob_content(&blob_id) {
Some(blob) => bcs::from_bytes(blob.bytes())?,
None => {
self.state
.system
.describe_application(id, self.txn_tracker)
.await?
}
};
let code = self
.state
.context()
.extra()
.get_user_service(&description, self.txn_tracker)
.await?;
Ok((code, description))
}
// TODO(#1416): Support concurrent I/O.
pub(crate) async fn handle_request(
&mut self,
request: ExecutionRequest,
) -> Result<(), ExecutionError> {
use ExecutionRequest::*;
match request {
#[cfg(not(web))]
LoadContract { id, callback } => {
let (code, description) = self.load_contract(id).await?;
callback.respond((code, description))
}
#[cfg(not(web))]
LoadService { id, callback } => {
let (code, description) = self.load_service(id).await?;
callback.respond((code, description))
}
ChainBalance { callback } => {
let balance = *self.state.system.balance.get();
callback.respond(balance);
}
OwnerBalance { owner, callback } => {
let balance = self
.state
.system
.balances
.get(&owner)
.await?
.unwrap_or_default();
callback.respond(balance);
}
OwnerBalances { callback } => {
callback.respond(self.state.system.balances.index_values().await?);
}
BalanceOwners { callback } => {
let owners = self.state.system.balances.indices().await?;
callback.respond(owners);
}
Transfer {
source,
destination,
amount,
signer,
application_id,
callback,
} => {
let maybe_message = self
.state
.system
.transfer(signer, Some(application_id), source, destination, amount)
.await?;
self.txn_tracker.add_outgoing_messages(maybe_message);
callback.respond(());
}
Claim {
source,
destination,
amount,
signer,
application_id,
callback,
} => {
let maybe_message = self
.state
.system
.claim(
signer,
Some(application_id),
source.owner,
source.chain_id,
destination,
amount,
)
.await?;
self.txn_tracker.add_outgoing_messages(maybe_message);
callback.respond(());
}
SystemTimestamp { callback } => {
let timestamp = *self.state.system.timestamp.get();
callback.respond(timestamp);
}
ChainOwnership { callback } => {
let ownership = self.state.system.ownership.get().clone();
callback.respond(ownership);
}
ContainsKey { id, key, callback } => {
let view = self.state.users.try_load_entry(&id).await?;
let result = match view {
Some(view) => view.contains_key(&key).await?,
None => false,
};
callback.respond(result);
}
ContainsKeys { id, keys, callback } => {
let view = self.state.users.try_load_entry(&id).await?;
let result = match view {
Some(view) => view.contains_keys(&keys).await?,
None => vec![false; keys.len()],
};
callback.respond(result);
}
ReadMultiValuesBytes { id, keys, callback } => {
let view = self.state.users.try_load_entry(&id).await?;
let values = match view {
Some(view) => view.multi_get(&keys).await?,
None => vec![None; keys.len()],
};
callback.respond(values);
}
ReadValueBytes { id, key, callback } => {
let view = self.state.users.try_load_entry(&id).await?;
let result = match view {
Some(view) => view.get(&key).await?,
None => None,
};
callback.respond(result);
}
FindKeysByPrefix {
id,
key_prefix,
callback,
} => {
let view = self.state.users.try_load_entry(&id).await?;
let result = match view {
Some(view) => view.find_keys_by_prefix(&key_prefix).await?,
None => Vec::new(),
};
callback.respond(result);
}
FindKeyValuesByPrefix {
id,
key_prefix,
callback,
} => {
let view = self.state.users.try_load_entry(&id).await?;
let result = match view {
Some(view) => view.find_key_values_by_prefix(&key_prefix).await?,
None => Vec::new(),
};
callback.respond(result);
}
WriteBatch {
id,
batch,
callback,
} => {
let mut view = self.state.users.try_load_entry_mut(&id).await?;
view.write_batch(batch).await?;
callback.respond(());
}
OpenChain {
ownership,
balance,
parent_id,
block_height,
application_permissions,
timestamp,
callback,
} => {
let config = OpenChainConfig {
ownership,
balance,
application_permissions,
};
let chain_id = self
.state
.system
.open_chain(config, parent_id, block_height, timestamp, self.txn_tracker)
.await?;
callback.respond(chain_id);
}
CloseChain {
application_id,
callback,
} => {
let app_permissions = self.state.system.application_permissions.get();
if !app_permissions.can_close_chain(&application_id) {
callback.respond(Err(ExecutionError::UnauthorizedApplication(application_id)));
} else {
self.state.system.close_chain();
callback.respond(Ok(()));
}
}
ChangeApplicationPermissions {
application_id,
application_permissions,
callback,
} => {
let app_permissions = self.state.system.application_permissions.get();
if !app_permissions.can_change_application_permissions(&application_id) {
callback.respond(Err(ExecutionError::UnauthorizedApplication(application_id)));
} else {
self.state
.system
.application_permissions
.set(application_permissions);
callback.respond(Ok(()));
}
}
PeekApplicationIndex { callback } => {
let index = self.txn_tracker.peek_application_index();
callback.respond(index)
}
CreateApplication {
chain_id,
block_height,
module_id,
parameters,
required_application_ids,
callback,
} => {
let create_application_result = self
.state
.system
.create_application(
chain_id,
block_height,
module_id,
parameters,
required_application_ids,
self.txn_tracker,
)
.await?;
callback.respond(create_application_result);
}
PerformHttpRequest {
request,
http_responses_are_oracle_responses,
callback,
} => {
let system = &mut self.state.system;
let response = self
.txn_tracker
.oracle(|| async {
let headers = request
.headers
.into_iter()
.map(|http::Header { name, value }| {
Ok((name.parse()?, value.try_into()?))
})
.collect::<Result<HeaderMap, ExecutionError>>()?;
let url = Url::parse(&request.url)?;
let host = url
.host_str()
.ok_or_else(|| ExecutionError::UnauthorizedHttpRequest(url.clone()))?;
let (_epoch, committee) = system
.current_committee()
.ok_or_else(|| ExecutionError::UnauthorizedHttpRequest(url.clone()))?;
let allowed_hosts = &committee.policy().http_request_allow_list;
ensure!(
allowed_hosts.contains(host),
ExecutionError::UnauthorizedHttpRequest(url)
);
let request = Client::new()
.request(request.method.into(), url)
.body(request.body)
.headers(headers);
#[cfg(not(web))]
let request = request.timeout(linera_base::time::Duration::from_millis(
committee.policy().http_request_timeout_ms,
));
let response = request.send().await?;
let mut response_size_limit =
committee.policy().maximum_http_response_bytes;
if http_responses_are_oracle_responses {
response_size_limit = response_size_limit
.min(committee.policy().maximum_oracle_response_bytes);
}
Ok(OracleResponse::Http(
Self::receive_http_response(response, response_size_limit).await?,
))
})
.await?
.to_http_response()?;
callback.respond(response);
}
ReadBlobContent { blob_id, callback } => {
let content = if let Some(content) = self.txn_tracker.get_blob_content(&blob_id) {
content.clone()
} else {
let content = self.state.system.read_blob_content(blob_id).await?;
if blob_id.blob_type == BlobType::Data {
self.resource_controller
.with_state(&mut self.state.system)
.await?
.track_blob_read(content.bytes().len() as u64)?;
}
self.state
.system
.blob_used(self.txn_tracker, blob_id)
.await?;
content
};
callback.respond(content)
}
AssertBlobExists { blob_id, callback } => {
self.state.system.assert_blob_exists(blob_id).await?;
// Treating this as reading a size-0 blob for fee purposes.
if blob_id.blob_type == BlobType::Data {
self.resource_controller
.with_state(&mut self.state.system)
.await?
.track_blob_read(0)?;
}
let is_new = self
.state
.system
.blob_used(self.txn_tracker, blob_id)
.await?;
if is_new {
self.txn_tracker
.replay_oracle_response(OracleResponse::Blob(blob_id))?;
}
callback.respond(());
}
Emit {
stream_id,
value,
callback,
} => {
let count = self
.state
.system
.stream_event_counts
.get_mut_or_default(&stream_id)
.await?;
let index = *count;
*count = count.checked_add(1).ok_or(ArithmeticError::Overflow)?;
self.resource_controller
.with_state(&mut self.state.system)
.await?
.track_event_published(&value)?;
self.txn_tracker.add_event(stream_id, index, value);
callback.respond(index)
}
ReadEvent { event_id, callback } => {
let context = self.state.context();
let extra = context.extra();
let event = self
.txn_tracker
.oracle(|| async {
let event = extra
.get_event(event_id.clone())
.await?
.ok_or(ExecutionError::EventsNotFound(vec![event_id.clone()]))?;
Ok(OracleResponse::Event(event_id.clone(), event))
})
.await?
.to_event(&event_id)?;
self.resource_controller
.with_state(&mut self.state.system)
.await?
.track_event_read(event.len() as u64)?;
callback.respond(event);
}
SubscribeToEvents {
chain_id,
stream_id,
subscriber_app_id,
callback,
} => {
let subscriptions = self
.state
.system
.event_subscriptions
.get_mut_or_default(&(chain_id, stream_id.clone()))
.await?;
let next_index = if subscriptions.applications.insert(subscriber_app_id) {
subscriptions.next_index
} else {
0
};
self.txn_tracker.add_stream_to_process(
subscriber_app_id,
chain_id,
stream_id,
0,
next_index,
);
callback.respond(());
}
UnsubscribeFromEvents {
chain_id,
stream_id,
subscriber_app_id,
callback,
} => {
let key = (chain_id, stream_id.clone());
let subscriptions = self
.state
.system
.event_subscriptions
.get_mut_or_default(&key)
.await?;
subscriptions.applications.remove(&subscriber_app_id);
if subscriptions.applications.is_empty() {
self.state.system.event_subscriptions.remove(&key)?;
}
if let crate::GenericApplicationId::User(app_id) = stream_id.application_id {
self.txn_tracker
.remove_stream_to_process(app_id, chain_id, stream_id);
}
callback.respond(());
}
GetApplicationPermissions { callback } => {
let app_permissions = self.state.system.application_permissions.get();
callback.respond(app_permissions.clone());
}
QueryServiceOracle {
deadline,
application_id,
next_block_height,
query,
callback,
} => {
let state = &mut self.state;
let local_time = self.txn_tracker.local_time();
let created_blobs = self.txn_tracker.created_blobs().clone();
let bytes = self
.txn_tracker
.oracle(|| async {
let context = QueryContext {
chain_id: state.context().extra().chain_id(),
next_block_height,
local_time,
};
let QueryOutcome {
response,
operations,
} = Box::pin(state.query_user_application_with_deadline(
application_id,
context,
query,
deadline,
created_blobs,
))
.await?;
ensure!(
operations.is_empty(),
ExecutionError::ServiceOracleQueryOperations(operations)
);
Ok(OracleResponse::Service(response))
})
.await?
.to_service_response()?;
callback.respond(bytes);
}
AddOutgoingMessage { message, callback } => {
self.txn_tracker.add_outgoing_message(message);
callback.respond(());
}
SetLocalTime {
local_time,
callback,
} => {
self.txn_tracker.set_local_time(local_time);
callback.respond(());
}
AssertBefore {
timestamp,
callback,
} => {
let result = if !self
.txn_tracker
.replay_oracle_response(OracleResponse::Assert)?
{
// There are no recorded oracle responses, so we check the local time.
let local_time = self.txn_tracker.local_time();
if local_time >= timestamp {
Err(ExecutionError::AssertBefore {
timestamp,
local_time,
})
} else {
Ok(())
}
} else {
Ok(())
};
callback.respond(result);
}
AddCreatedBlob { blob, callback } => {
self.txn_tracker.add_created_blob(blob);
callback.respond(());
}
ValidationRound { round, callback } => {
let validation_round = self
.txn_tracker
.oracle(|| async { Ok(OracleResponse::Round(round)) })
.await?
.to_round()?;
callback.respond(validation_round);
}
TotalStorageSize {
application,
callback,
} => {
let view = self.state.users.try_load_entry(&application).await?;
let result = match view {
Some(view) => {
let total_size = view.total_size();
(total_size.key, total_size.value)
}
None => (0, 0),
};
callback.respond(result);
}
AllowApplicationLogs { callback } => {
let allow = self
.state
.context()
.extra()
.execution_runtime_config()
.allow_application_logs;
callback.respond(allow);
}
#[cfg(web)]
Log { message, level } => {
// Output directly to browser console with clean formatting
let formatted: js_sys::JsString = format!("[CONTRACT {level}] {message}").into();
match level {
tracing::log::Level::Trace | tracing::log::Level::Debug => {
web_sys::console::debug_1(&formatted)
}
tracing::log::Level::Info => web_sys::console::log_1(&formatted),
tracing::log::Level::Warn => web_sys::console::warn_1(&formatted),
tracing::log::Level::Error => web_sys::console::error_1(&formatted),
}
}
}
Ok(())
}
/// Calls `process_streams` for all applications that are subscribed to streams with new
/// events or that have new subscriptions.
async fn process_subscriptions(
&mut self,
context: ProcessStreamsContext,
) -> Result<(), ExecutionError> {
// Keep track of which streams we have already processed. This is to guard against
// applications unsubscribing and subscribing in the process_streams call itself.
let mut processed = BTreeSet::new();
loop {
let to_process = self
.txn_tracker
.take_streams_to_process()
.into_iter()
.filter_map(|(app_id, updates)| {
let updates = updates
.into_iter()
.filter_map(|update| {
if !processed.insert((
app_id,
update.chain_id,
update.stream_id.clone(),
)) {
return None;
}
Some(update)
})
.collect::<Vec<_>>();
if updates.is_empty() {
return None;
}
Some((app_id, updates))
})
.collect::<BTreeMap<_, _>>();
if to_process.is_empty() {
return Ok(());
}
for (app_id, updates) in to_process {
self.run_user_action(
app_id,
UserAction::ProcessStreams(context, updates),
None,
None,
)
.await?;
}
}
}
pub(crate) async fn run_user_action(
&mut self,
application_id: ApplicationId,
action: UserAction,
refund_grant_to: Option<Account>,
grant: Option<&mut Amount>,
) -> Result<(), ExecutionError> {
self.run_user_action_with_runtime(application_id, action, refund_grant_to, grant)
.await
}
// TODO(#5034): unify with `contract_and_dependencies`
pub(crate) async fn service_and_dependencies(
&mut self,
application: ApplicationId,
) -> Result<(Vec<UserServiceCode>, Vec<ApplicationDescription>), ExecutionError> {
// cyclic futures are illegal so we need to either box the frames or keep our own
// stack
let mut stack = vec![application];
let mut codes = vec![];
let mut descriptions = vec![];
while let Some(id) = stack.pop() {
let (code, description) = self.load_service(id).await?;
stack.extend(description.required_application_ids.iter().rev().copied());
codes.push(code);
descriptions.push(description);
}
codes.reverse();
descriptions.reverse();
Ok((codes, descriptions))
}
// TODO(#5034): unify with `service_and_dependencies`
async fn contract_and_dependencies(
&mut self,
application: ApplicationId,
) -> Result<(Vec<UserContractCode>, Vec<ApplicationDescription>), ExecutionError> {
// cyclic futures are illegal so we need to either box the frames or keep our own
// stack
let mut stack = vec![application];
let mut codes = vec![];
let mut descriptions = vec![];
while let Some(id) = stack.pop() {
let (code, description) = self.load_contract(id).await?;
stack.extend(description.required_application_ids.iter().rev().copied());
codes.push(code);
descriptions.push(description);
}
codes.reverse();
descriptions.reverse();
Ok((codes, descriptions))
}
async fn run_user_action_with_runtime(
&mut self,
application_id: ApplicationId,
action: UserAction,
refund_grant_to: Option<Account>,
grant: Option<&mut Amount>,
) -> Result<(), ExecutionError> {
let chain_id = self.state.context().extra().chain_id();
let mut cloned_grant = grant.as_ref().map(|x| **x);
let initial_balance = self
.resource_controller
.with_state_and_grant(&mut self.state.system, cloned_grant.as_mut())
.await?
.balance()?;
let controller = ResourceController::new(
self.resource_controller.policy().clone(),
self.resource_controller.tracker,
initial_balance,
);
let (execution_state_sender, mut execution_state_receiver) =
futures::channel::mpsc::unbounded();
let (codes, descriptions): (Vec<_>, Vec<_>) =
self.contract_and_dependencies(application_id).await?;
let allow_application_logs = self
.state
.context()
.extra()
.execution_runtime_config()
.allow_application_logs;
let contract_runtime_task = self
.state
.context()
.extra()
.thread_pool()
.run_send(JsVec(codes), move |codes| async move {
let runtime = ContractSyncRuntime::new(
execution_state_sender,
chain_id,
refund_grant_to,
controller,
&action,
allow_application_logs,
);
for (code, description) in codes.0.into_iter().zip(descriptions) {
runtime.preload_contract(
ApplicationId::from(&description),
code,
description,
)?;
}
runtime.run_action(application_id, chain_id, action)
})
.await;
while let Some(request) = execution_state_receiver.next().await {
self.handle_request(request).await?;
}
let (result, controller) = contract_runtime_task.await??;
self.txn_tracker.add_operation_result(result);
self.resource_controller
.with_state_and_grant(&mut self.state.system, grant)
.await?
.merge_balance(initial_balance, controller.balance()?)?;
self.resource_controller.tracker = controller.tracker;
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/lib.rs | linera-execution/src/lib.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module manages the execution of the system application and the user applications in a
//! Linera chain.
pub mod committee;
pub mod evm;
mod execution;
pub mod execution_state_actor;
#[cfg(with_graphql)]
mod graphql;
mod policy;
mod resources;
mod runtime;
pub mod system;
#[cfg(with_testing)]
pub mod test_utils;
mod transaction_tracker;
mod util;
mod wasm;
use std::{any::Any, collections::BTreeMap, fmt, ops::RangeInclusive, str::FromStr, sync::Arc};
use allocative::Allocative;
use async_graphql::SimpleObject;
use async_trait::async_trait;
use custom_debug_derive::Debug;
use derive_more::Display;
#[cfg(web)]
use js_sys::wasm_bindgen::JsValue;
use linera_base::{
abi::Abi,
crypto::{BcsHashable, CryptoHash},
data_types::{
Amount, ApplicationDescription, ApplicationPermissions, ArithmeticError, Blob, BlockHeight,
Bytecode, DecompressionError, Epoch, NetworkDescription, SendMessageRequest, StreamUpdate,
Timestamp,
},
doc_scalar, ensure, hex_debug, http,
identifiers::{
Account, AccountOwner, ApplicationId, BlobId, BlobType, ChainId, DataBlobHash, EventId,
GenericApplicationId, ModuleId, StreamId, StreamName,
},
ownership::ChainOwnership,
vm::VmRuntime,
};
use linera_views::{batch::Batch, ViewError};
use serde::{Deserialize, Serialize};
use system::AdminOperation;
use thiserror::Error;
pub use web_thread_pool::Pool as ThreadPool;
use web_thread_select as web_thread;
#[cfg(with_revm)]
use crate::evm::EvmExecutionError;
use crate::system::{EpochEventData, EPOCH_STREAM_NAME};
#[cfg(with_testing)]
use crate::test_utils::dummy_chain_description;
#[cfg(all(with_testing, with_wasm_runtime))]
pub use crate::wasm::test as wasm_test;
#[cfg(with_wasm_runtime)]
pub use crate::wasm::{
BaseRuntimeApi, ContractEntrypoints, ContractRuntimeApi, RuntimeApiData, ServiceEntrypoints,
ServiceRuntimeApi, WasmContractModule, WasmExecutionError, WasmServiceModule,
};
pub use crate::{
committee::Committee,
execution::{ExecutionStateView, ServiceRuntimeEndpoint},
execution_state_actor::{ExecutionRequest, ExecutionStateActor},
policy::ResourceControlPolicy,
resources::{BalanceHolder, ResourceController, ResourceTracker},
runtime::{
ContractSyncRuntimeHandle, ServiceRuntimeRequest, ServiceSyncRuntime,
ServiceSyncRuntimeHandle,
},
system::{
SystemExecutionStateView, SystemMessage, SystemOperation, SystemQuery, SystemResponse,
},
transaction_tracker::{TransactionOutcome, TransactionTracker},
};
/// The `Linera.sol` library code to be included in solidity smart
/// contracts using Linera features.
pub const LINERA_SOL: &str = include_str!("../solidity/Linera.sol");
pub const LINERA_TYPES_SOL: &str = include_str!("../solidity/LineraTypes.sol");
/// The maximum length of a stream name.
const MAX_STREAM_NAME_LEN: usize = 64;
/// An implementation of [`UserContractModule`].
#[derive(Clone)]
pub struct UserContractCode(Box<dyn UserContractModule>);
/// An implementation of [`UserServiceModule`].
#[derive(Clone)]
pub struct UserServiceCode(Box<dyn UserServiceModule>);
/// An implementation of [`UserContract`].
pub type UserContractInstance = Box<dyn UserContract>;
/// An implementation of [`UserService`].
pub type UserServiceInstance = Box<dyn UserService>;
/// A factory trait to obtain a [`UserContract`] from a [`UserContractModule`]
pub trait UserContractModule: dyn_clone::DynClone + Any + web_thread::Post + Send + Sync {
fn instantiate(
&self,
runtime: ContractSyncRuntimeHandle,
) -> Result<UserContractInstance, ExecutionError>;
}
impl<T: UserContractModule + Send + Sync + 'static> From<T> for UserContractCode {
fn from(module: T) -> Self {
Self(Box::new(module))
}
}
dyn_clone::clone_trait_object!(UserContractModule);
/// A factory trait to obtain a [`UserService`] from a [`UserServiceModule`]
pub trait UserServiceModule: dyn_clone::DynClone + Any + web_thread::Post + Send + Sync {
fn instantiate(
&self,
runtime: ServiceSyncRuntimeHandle,
) -> Result<UserServiceInstance, ExecutionError>;
}
impl<T: UserServiceModule + Send + Sync + 'static> From<T> for UserServiceCode {
fn from(module: T) -> Self {
Self(Box::new(module))
}
}
dyn_clone::clone_trait_object!(UserServiceModule);
impl UserServiceCode {
fn instantiate(
&self,
runtime: ServiceSyncRuntimeHandle,
) -> Result<UserServiceInstance, ExecutionError> {
self.0.instantiate(runtime)
}
}
impl UserContractCode {
fn instantiate(
&self,
runtime: ContractSyncRuntimeHandle,
) -> Result<UserContractInstance, ExecutionError> {
self.0.instantiate(runtime)
}
}
pub struct JsVec<T>(pub Vec<T>);
#[cfg(web)]
const _: () = {
// TODO(#2775): add a vtable pointer into the JsValue rather than assuming the
// implementor
impl web_thread::AsJs for UserContractCode {
fn to_js(&self) -> Result<JsValue, JsValue> {
((&*self.0) as &dyn Any)
.downcast_ref::<WasmContractModule>()
.expect("we only support Wasm modules on the Web for now")
.to_js()
}
fn from_js(value: JsValue) -> Result<Self, JsValue> {
WasmContractModule::from_js(value).map(Into::into)
}
}
impl web_thread::Post for UserContractCode {
fn transferables(&self) -> js_sys::Array {
self.0.transferables()
}
}
impl web_thread::AsJs for UserServiceCode {
fn to_js(&self) -> Result<JsValue, JsValue> {
((&*self.0) as &dyn Any)
.downcast_ref::<WasmServiceModule>()
.expect("we only support Wasm modules on the Web for now")
.to_js()
}
fn from_js(value: JsValue) -> Result<Self, JsValue> {
WasmServiceModule::from_js(value).map(Into::into)
}
}
impl web_thread::Post for UserServiceCode {
fn transferables(&self) -> js_sys::Array {
self.0.transferables()
}
}
impl<T: web_thread::AsJs> web_thread::AsJs for JsVec<T> {
fn to_js(&self) -> Result<JsValue, JsValue> {
let array = self
.0
.iter()
.map(T::to_js)
.collect::<Result<js_sys::Array, _>>()?;
Ok(array.into())
}
fn from_js(value: JsValue) -> Result<Self, JsValue> {
let array = js_sys::Array::from(&value);
let v = array
.into_iter()
.map(T::from_js)
.collect::<Result<Vec<_>, _>>()?;
Ok(JsVec(v))
}
}
impl<T: web_thread::Post> web_thread::Post for JsVec<T> {
fn transferables(&self) -> js_sys::Array {
let mut array = js_sys::Array::new();
for x in &self.0 {
array = array.concat(&x.transferables());
}
array
}
}
};
/// A type for errors happening during execution.
#[derive(Error, Debug)]
pub enum ExecutionError {
#[error(transparent)]
ViewError(#[from] ViewError),
#[error(transparent)]
ArithmeticError(#[from] ArithmeticError),
#[error("User application reported an error: {0}")]
UserError(String),
#[cfg(with_wasm_runtime)]
#[error(transparent)]
WasmError(#[from] WasmExecutionError),
#[cfg(with_revm)]
#[error(transparent)]
EvmError(#[from] EvmExecutionError),
#[error(transparent)]
DecompressionError(#[from] DecompressionError),
#[error("The given promise is invalid or was polled once already")]
InvalidPromise,
#[error("Attempted to perform a reentrant call to application {0}")]
ReentrantCall(ApplicationId),
#[error(
"Application {caller_id} attempted to perform a cross-application to {callee_id} call \
from `finalize`"
)]
CrossApplicationCallInFinalize {
caller_id: Box<ApplicationId>,
callee_id: Box<ApplicationId>,
},
#[error("Failed to load bytecode from storage {0:?}")]
ApplicationBytecodeNotFound(Box<ApplicationDescription>),
// TODO(#2927): support dynamic loading of modules on the Web
#[error("Unsupported dynamic application load: {0:?}")]
UnsupportedDynamicApplicationLoad(Box<ApplicationId>),
#[error("Excessive number of bytes read from storage")]
ExcessiveRead,
#[error("Excessive number of bytes written to storage")]
ExcessiveWrite,
#[error("Block execution required too much fuel for VM {0}")]
MaximumFuelExceeded(VmRuntime),
#[error("Services running as oracles in block took longer than allowed")]
MaximumServiceOracleExecutionTimeExceeded,
#[error("Service running as an oracle produced a response that's too large")]
ServiceOracleResponseTooLarge,
#[error("Serialized size of the block exceeds limit")]
BlockTooLarge,
#[error("HTTP response exceeds the size limit of {limit} bytes, having at least {size} bytes")]
HttpResponseSizeLimitExceeded { limit: u64, size: u64 },
#[error("Runtime failed to respond to application")]
MissingRuntimeResponse,
#[error("Application is not authorized to perform system operations on this chain: {0:}")]
UnauthorizedApplication(ApplicationId),
#[error("Failed to make network reqwest: {0}")]
ReqwestError(#[from] reqwest::Error),
#[error("Encountered I/O error: {0}")]
IoError(#[from] std::io::Error),
#[error("More recorded oracle responses than expected")]
UnexpectedOracleResponse,
#[error("Invalid JSON: {0}")]
JsonError(#[from] serde_json::Error),
#[error(transparent)]
BcsError(#[from] bcs::Error),
#[error("Recorded response for oracle query has the wrong type")]
OracleResponseMismatch,
#[error("Service oracle query tried to create operations: {0:?}")]
ServiceOracleQueryOperations(Vec<Operation>),
#[error("Assertion failed: local time {local_time} is not earlier than {timestamp}")]
AssertBefore {
timestamp: Timestamp,
local_time: Timestamp,
},
#[error("Stream names can be at most {MAX_STREAM_NAME_LEN} bytes.")]
StreamNameTooLong,
#[error("Blob exceeds size limit")]
BlobTooLarge,
#[error("Bytecode exceeds size limit")]
BytecodeTooLarge,
#[error("Attempt to perform an HTTP request to an unauthorized host: {0:?}")]
UnauthorizedHttpRequest(reqwest::Url),
#[error("Attempt to perform an HTTP request to an invalid URL")]
InvalidUrlForHttpRequest(#[from] url::ParseError),
#[error("Worker thread failure: {0:?}")]
Thread(#[from] web_thread::Error),
#[error("The chain being queried is not active {0}")]
InactiveChain(ChainId),
#[error("Blobs not found: {0:?}")]
BlobsNotFound(Vec<BlobId>),
#[error("Events not found: {0:?}")]
EventsNotFound(Vec<EventId>),
#[error("Invalid HTTP header name used for HTTP request")]
InvalidHeaderName(#[from] reqwest::header::InvalidHeaderName),
#[error("Invalid HTTP header value used for HTTP request")]
InvalidHeaderValue(#[from] reqwest::header::InvalidHeaderValue),
#[error("No NetworkDescription found in storage")]
NoNetworkDescriptionFound,
#[error("{epoch:?} is not recognized by chain {chain_id:}")]
InvalidEpoch { chain_id: ChainId, epoch: Epoch },
#[error("Transfer must have positive amount")]
IncorrectTransferAmount,
#[error("Transfer from owned account must be authenticated by the right owner")]
UnauthenticatedTransferOwner,
#[error("The transferred amount must not exceed the balance of the current account {account}: {balance}")]
InsufficientBalance {
balance: Amount,
account: AccountOwner,
},
#[error("Required execution fees exceeded the total funding available. Fees {fees}, available balance: {balance}")]
FeesExceedFunding { fees: Amount, balance: Amount },
#[error("Claim must have positive amount")]
IncorrectClaimAmount,
#[error("Claim must be authenticated by the right owner")]
UnauthenticatedClaimOwner,
#[error("Admin operations are only allowed on the admin chain.")]
AdminOperationOnNonAdminChain,
#[error("Failed to create new committee: expected {expected}, but got {provided}")]
InvalidCommitteeEpoch { expected: Epoch, provided: Epoch },
#[error("Failed to remove committee")]
InvalidCommitteeRemoval,
#[error("No recorded response for oracle query")]
MissingOracleResponse,
#[error("process_streams was not called for all stream updates")]
UnprocessedStreams,
#[error("Internal error: {0}")]
InternalError(&'static str),
#[error("UpdateStreams is outdated")]
OutdatedUpdateStreams,
}
impl ExecutionError {
/// Returns whether this error is caused by an issue in the local node.
///
/// Returns `false` whenever the error could be caused by a bad message from a peer.
pub fn is_local(&self) -> bool {
match self {
ExecutionError::ArithmeticError(_)
| ExecutionError::UserError(_)
| ExecutionError::DecompressionError(_)
| ExecutionError::InvalidPromise
| ExecutionError::CrossApplicationCallInFinalize { .. }
| ExecutionError::ReentrantCall(_)
| ExecutionError::ApplicationBytecodeNotFound(_)
| ExecutionError::UnsupportedDynamicApplicationLoad(_)
| ExecutionError::ExcessiveRead
| ExecutionError::ExcessiveWrite
| ExecutionError::MaximumFuelExceeded(_)
| ExecutionError::MaximumServiceOracleExecutionTimeExceeded
| ExecutionError::ServiceOracleResponseTooLarge
| ExecutionError::BlockTooLarge
| ExecutionError::HttpResponseSizeLimitExceeded { .. }
| ExecutionError::UnauthorizedApplication(_)
| ExecutionError::UnexpectedOracleResponse
| ExecutionError::JsonError(_)
| ExecutionError::BcsError(_)
| ExecutionError::OracleResponseMismatch
| ExecutionError::ServiceOracleQueryOperations(_)
| ExecutionError::AssertBefore { .. }
| ExecutionError::StreamNameTooLong
| ExecutionError::BlobTooLarge
| ExecutionError::BytecodeTooLarge
| ExecutionError::UnauthorizedHttpRequest(_)
| ExecutionError::InvalidUrlForHttpRequest(_)
| ExecutionError::InactiveChain(_)
| ExecutionError::BlobsNotFound(_)
| ExecutionError::EventsNotFound(_)
| ExecutionError::InvalidHeaderName(_)
| ExecutionError::InvalidHeaderValue(_)
| ExecutionError::InvalidEpoch { .. }
| ExecutionError::IncorrectTransferAmount
| ExecutionError::UnauthenticatedTransferOwner
| ExecutionError::InsufficientBalance { .. }
| ExecutionError::FeesExceedFunding { .. }
| ExecutionError::IncorrectClaimAmount
| ExecutionError::UnauthenticatedClaimOwner
| ExecutionError::AdminOperationOnNonAdminChain
| ExecutionError::InvalidCommitteeEpoch { .. }
| ExecutionError::InvalidCommitteeRemoval
| ExecutionError::MissingOracleResponse
| ExecutionError::UnprocessedStreams
| ExecutionError::OutdatedUpdateStreams
| ExecutionError::ViewError(ViewError::NotFound(_)) => false,
#[cfg(with_wasm_runtime)]
ExecutionError::WasmError(_) => false,
#[cfg(with_revm)]
ExecutionError::EvmError(..) => false,
ExecutionError::MissingRuntimeResponse
| ExecutionError::ViewError(_)
| ExecutionError::ReqwestError(_)
| ExecutionError::Thread(_)
| ExecutionError::NoNetworkDescriptionFound
| ExecutionError::InternalError(_)
| ExecutionError::IoError(_) => true,
}
}
}
/// The public entry points provided by the contract part of an application.
pub trait UserContract {
/// Instantiate the application state on the chain that owns the application.
fn instantiate(&mut self, argument: Vec<u8>) -> Result<(), ExecutionError>;
/// Applies an operation from the current block.
fn execute_operation(&mut self, operation: Vec<u8>) -> Result<Vec<u8>, ExecutionError>;
/// Applies a message originating from a cross-chain message.
fn execute_message(&mut self, message: Vec<u8>) -> Result<(), ExecutionError>;
/// Reacts to new events on streams this application subscribes to.
fn process_streams(&mut self, updates: Vec<StreamUpdate>) -> Result<(), ExecutionError>;
/// Finishes execution of the current transaction.
fn finalize(&mut self) -> Result<(), ExecutionError>;
}
/// The public entry points provided by the service part of an application.
pub trait UserService {
/// Executes unmetered read-only queries on the state of this application.
fn handle_query(&mut self, argument: Vec<u8>) -> Result<Vec<u8>, ExecutionError>;
}
/// Configuration options for the execution runtime available to applications.
#[derive(Clone, Copy, Default)]
pub struct ExecutionRuntimeConfig {
/// Whether contract log messages should be output.
/// This is typically enabled for clients but disabled for validators.
pub allow_application_logs: bool,
}
/// Requirements for the `extra` field in our state views (and notably the
/// [`ExecutionStateView`]).
#[cfg_attr(not(web), async_trait)]
#[cfg_attr(web, async_trait(?Send))]
pub trait ExecutionRuntimeContext {
fn chain_id(&self) -> ChainId;
fn thread_pool(&self) -> &Arc<ThreadPool>;
fn execution_runtime_config(&self) -> ExecutionRuntimeConfig;
fn user_contracts(&self) -> &Arc<papaya::HashMap<ApplicationId, UserContractCode>>;
fn user_services(&self) -> &Arc<papaya::HashMap<ApplicationId, UserServiceCode>>;
async fn get_user_contract(
&self,
description: &ApplicationDescription,
txn_tracker: &TransactionTracker,
) -> Result<UserContractCode, ExecutionError>;
async fn get_user_service(
&self,
description: &ApplicationDescription,
txn_tracker: &TransactionTracker,
) -> Result<UserServiceCode, ExecutionError>;
async fn get_blob(&self, blob_id: BlobId) -> Result<Option<Blob>, ViewError>;
async fn get_event(&self, event_id: EventId) -> Result<Option<Vec<u8>>, ViewError>;
async fn get_network_description(&self) -> Result<Option<NetworkDescription>, ViewError>;
/// Returns the committees for the epochs in the given range.
async fn get_committees(
&self,
epoch_range: RangeInclusive<Epoch>,
) -> Result<BTreeMap<Epoch, Committee>, ExecutionError> {
let net_description = self
.get_network_description()
.await?
.ok_or(ExecutionError::NoNetworkDescriptionFound)?;
let committee_hashes = futures::future::join_all(
(epoch_range.start().0..=epoch_range.end().0).map(|epoch| async move {
if epoch == 0 {
// Genesis epoch is stored in NetworkDescription.
Ok((epoch, net_description.genesis_committee_blob_hash))
} else {
let event_id = EventId {
chain_id: net_description.admin_chain_id,
stream_id: StreamId::system(EPOCH_STREAM_NAME),
index: epoch,
};
let event = self
.get_event(event_id.clone())
.await?
.ok_or_else(|| ExecutionError::EventsNotFound(vec![event_id]))?;
let event_data: EpochEventData = bcs::from_bytes(&event)?;
Ok((epoch, event_data.blob_hash))
}
}),
)
.await;
let missing_events = committee_hashes
.iter()
.filter_map(|result| {
if let Err(ExecutionError::EventsNotFound(event_ids)) = result {
return Some(event_ids);
}
None
})
.flatten()
.cloned()
.collect::<Vec<_>>();
ensure!(
missing_events.is_empty(),
ExecutionError::EventsNotFound(missing_events)
);
let committee_hashes = committee_hashes
.into_iter()
.collect::<Result<Vec<_>, _>>()?;
let committees = futures::future::join_all(committee_hashes.into_iter().map(
|(epoch, committee_hash)| async move {
let blob_id = BlobId::new(committee_hash, BlobType::Committee);
let committee_blob = self
.get_blob(blob_id)
.await?
.ok_or_else(|| ExecutionError::BlobsNotFound(vec![blob_id]))?;
Ok((Epoch(epoch), bcs::from_bytes(committee_blob.bytes())?))
},
))
.await;
let missing_blobs = committees
.iter()
.filter_map(|result| {
if let Err(ExecutionError::BlobsNotFound(blob_ids)) = result {
return Some(blob_ids);
}
None
})
.flatten()
.cloned()
.collect::<Vec<_>>();
ensure!(
missing_blobs.is_empty(),
ExecutionError::BlobsNotFound(missing_blobs)
);
committees.into_iter().collect()
}
async fn contains_blob(&self, blob_id: BlobId) -> Result<bool, ViewError>;
async fn contains_event(&self, event_id: EventId) -> Result<bool, ViewError>;
#[cfg(with_testing)]
async fn add_blobs(
&self,
blobs: impl IntoIterator<Item = Blob> + Send,
) -> Result<(), ViewError>;
#[cfg(with_testing)]
async fn add_events(
&self,
events: impl IntoIterator<Item = (EventId, Vec<u8>)> + Send,
) -> Result<(), ViewError>;
}
#[derive(Clone, Copy, Debug)]
pub struct OperationContext {
/// The current chain ID.
pub chain_id: ChainId,
/// The authenticated owner of the operation, if any.
#[debug(skip_if = Option::is_none)]
pub authenticated_owner: Option<AccountOwner>,
/// The current block height.
pub height: BlockHeight,
/// The consensus round number, if this is a block that gets validated in a multi-leader round.
pub round: Option<u32>,
/// The timestamp of the block containing the operation.
pub timestamp: Timestamp,
}
#[derive(Clone, Copy, Debug)]
pub struct MessageContext {
/// The current chain ID.
pub chain_id: ChainId,
/// The chain ID where the message originated from.
pub origin: ChainId,
/// Whether the message was rejected by the original receiver and is now bouncing back.
pub is_bouncing: bool,
/// The authenticated owner of the operation that created the message, if any.
#[debug(skip_if = Option::is_none)]
pub authenticated_owner: Option<AccountOwner>,
/// Where to send a refund for the unused part of each grant after execution, if any.
#[debug(skip_if = Option::is_none)]
pub refund_grant_to: Option<Account>,
/// The current block height.
pub height: BlockHeight,
/// The consensus round number, if this is a block that gets validated in a multi-leader round.
pub round: Option<u32>,
/// The timestamp of the block executing the message.
pub timestamp: Timestamp,
}
#[derive(Clone, Copy, Debug)]
pub struct ProcessStreamsContext {
/// The current chain ID.
pub chain_id: ChainId,
/// The current block height.
pub height: BlockHeight,
/// The consensus round number, if this is a block that gets validated in a multi-leader round.
pub round: Option<u32>,
/// The timestamp of the current block.
pub timestamp: Timestamp,
}
impl From<MessageContext> for ProcessStreamsContext {
fn from(context: MessageContext) -> Self {
Self {
chain_id: context.chain_id,
height: context.height,
round: context.round,
timestamp: context.timestamp,
}
}
}
impl From<OperationContext> for ProcessStreamsContext {
fn from(context: OperationContext) -> Self {
Self {
chain_id: context.chain_id,
height: context.height,
round: context.round,
timestamp: context.timestamp,
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct FinalizeContext {
/// The current chain ID.
pub chain_id: ChainId,
/// The authenticated owner of the operation, if any.
#[debug(skip_if = Option::is_none)]
pub authenticated_owner: Option<AccountOwner>,
/// The current block height.
pub height: BlockHeight,
/// The consensus round number, if this is a block that gets validated in a multi-leader round.
pub round: Option<u32>,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct QueryContext {
/// The current chain ID.
pub chain_id: ChainId,
/// The height of the next block on this chain.
pub next_block_height: BlockHeight,
/// The local time in the node executing the query.
pub local_time: Timestamp,
}
pub trait BaseRuntime {
type Read: fmt::Debug + Send + Sync;
type ContainsKey: fmt::Debug + Send + Sync;
type ContainsKeys: fmt::Debug + Send + Sync;
type ReadMultiValuesBytes: fmt::Debug + Send + Sync;
type ReadValueBytes: fmt::Debug + Send + Sync;
type FindKeysByPrefix: fmt::Debug + Send + Sync;
type FindKeyValuesByPrefix: fmt::Debug + Send + Sync;
/// The current chain ID.
fn chain_id(&mut self) -> Result<ChainId, ExecutionError>;
/// The current block height.
fn block_height(&mut self) -> Result<BlockHeight, ExecutionError>;
/// The current application ID.
fn application_id(&mut self) -> Result<ApplicationId, ExecutionError>;
/// The current application creator's chain ID.
fn application_creator_chain_id(&mut self) -> Result<ChainId, ExecutionError>;
/// The current application parameters.
fn application_parameters(&mut self) -> Result<Vec<u8>, ExecutionError>;
/// Reads the system timestamp.
fn read_system_timestamp(&mut self) -> Result<Timestamp, ExecutionError>;
/// Reads the balance of the chain.
fn read_chain_balance(&mut self) -> Result<Amount, ExecutionError>;
/// Reads the owner balance.
fn read_owner_balance(&mut self, owner: AccountOwner) -> Result<Amount, ExecutionError>;
/// Reads the balances from all owners.
fn read_owner_balances(&mut self) -> Result<Vec<(AccountOwner, Amount)>, ExecutionError>;
/// Reads balance owners.
fn read_balance_owners(&mut self) -> Result<Vec<AccountOwner>, ExecutionError>;
/// Reads the current ownership configuration for this chain.
fn chain_ownership(&mut self) -> Result<ChainOwnership, ExecutionError>;
/// Tests whether a key exists in the key-value store
#[cfg(feature = "test")]
fn contains_key(&mut self, key: Vec<u8>) -> Result<bool, ExecutionError> {
let promise = self.contains_key_new(key)?;
self.contains_key_wait(&promise)
}
/// Creates the promise to test whether a key exists in the key-value store
fn contains_key_new(&mut self, key: Vec<u8>) -> Result<Self::ContainsKey, ExecutionError>;
/// Resolves the promise to test whether a key exists in the key-value store
fn contains_key_wait(&mut self, promise: &Self::ContainsKey) -> Result<bool, ExecutionError>;
/// Tests whether multiple keys exist in the key-value store
#[cfg(feature = "test")]
fn contains_keys(&mut self, keys: Vec<Vec<u8>>) -> Result<Vec<bool>, ExecutionError> {
let promise = self.contains_keys_new(keys)?;
self.contains_keys_wait(&promise)
}
/// Creates the promise to test whether multiple keys exist in the key-value store
fn contains_keys_new(
&mut self,
keys: Vec<Vec<u8>>,
) -> Result<Self::ContainsKeys, ExecutionError>;
/// Resolves the promise to test whether multiple keys exist in the key-value store
fn contains_keys_wait(
&mut self,
promise: &Self::ContainsKeys,
) -> Result<Vec<bool>, ExecutionError>;
/// Reads several keys from the key-value store
#[cfg(feature = "test")]
fn read_multi_values_bytes(
&mut self,
keys: Vec<Vec<u8>>,
) -> Result<Vec<Option<Vec<u8>>>, ExecutionError> {
let promise = self.read_multi_values_bytes_new(keys)?;
self.read_multi_values_bytes_wait(&promise)
}
/// Creates the promise to access several keys from the key-value store
fn read_multi_values_bytes_new(
&mut self,
keys: Vec<Vec<u8>>,
) -> Result<Self::ReadMultiValuesBytes, ExecutionError>;
/// Resolves the promise to access several keys from the key-value store
fn read_multi_values_bytes_wait(
&mut self,
promise: &Self::ReadMultiValuesBytes,
) -> Result<Vec<Option<Vec<u8>>>, ExecutionError>;
/// Reads the key from the key-value store
#[cfg(feature = "test")]
fn read_value_bytes(&mut self, key: Vec<u8>) -> Result<Option<Vec<u8>>, ExecutionError> {
let promise = self.read_value_bytes_new(key)?;
self.read_value_bytes_wait(&promise)
}
/// Creates the promise to access a key from the key-value store
fn read_value_bytes_new(
&mut self,
key: Vec<u8>,
) -> Result<Self::ReadValueBytes, ExecutionError>;
/// Resolves the promise to access a key from the key-value store
fn read_value_bytes_wait(
&mut self,
promise: &Self::ReadValueBytes,
) -> Result<Option<Vec<u8>>, ExecutionError>;
/// Creates the promise to access keys having a specific prefix
fn find_keys_by_prefix_new(
&mut self,
key_prefix: Vec<u8>,
) -> Result<Self::FindKeysByPrefix, ExecutionError>;
/// Resolves the promise to access keys having a specific prefix
fn find_keys_by_prefix_wait(
&mut self,
promise: &Self::FindKeysByPrefix,
) -> Result<Vec<Vec<u8>>, ExecutionError>;
/// Reads the data from the key/values having a specific prefix.
#[cfg(feature = "test")]
#[expect(clippy::type_complexity)]
fn find_key_values_by_prefix(
&mut self,
key_prefix: Vec<u8>,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, ExecutionError> {
let promise = self.find_key_values_by_prefix_new(key_prefix)?;
self.find_key_values_by_prefix_wait(&promise)
}
/// Creates the promise to access key/values having a specific prefix
fn find_key_values_by_prefix_new(
&mut self,
key_prefix: Vec<u8>,
) -> Result<Self::FindKeyValuesByPrefix, ExecutionError>;
/// Resolves the promise to access key/values having a specific prefix
#[expect(clippy::type_complexity)]
fn find_key_values_by_prefix_wait(
&mut self,
promise: &Self::FindKeyValuesByPrefix,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, ExecutionError>;
/// Makes an HTTP request to the given URL and returns the answer, if any.
fn perform_http_request(
&mut self,
request: http::Request,
) -> Result<http::Response, ExecutionError>;
/// Ensures that the current time at block validation is `< timestamp`. Note that block
/// validation happens at or after the block timestamp, but isn't necessarily the same.
///
/// Cannot be used in fast blocks: A block using this call should be proposed by a regular
/// owner, not a super owner.
fn assert_before(&mut self, timestamp: Timestamp) -> Result<(), ExecutionError>;
/// Reads a data blob specified by a given hash.
fn read_data_blob(&mut self, hash: DataBlobHash) -> Result<Vec<u8>, ExecutionError>;
/// Asserts the existence of a data blob with the given hash.
fn assert_data_blob_exists(&mut self, hash: DataBlobHash) -> Result<(), ExecutionError>;
/// Returns true if the corresponding contract uses a zero amount of storage.
fn has_empty_storage(&mut self, application: ApplicationId) -> Result<bool, ExecutionError>;
/// Returns the maximum blob size from the `ResourceControlPolicy`.
fn maximum_blob_size(&mut self) -> Result<u64, ExecutionError>;
/// Returns whether contract log messages should be output.
/// This is typically enabled for clients but disabled for validators.
fn allow_application_logs(&mut self) -> Result<bool, ExecutionError>;
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/policy.rs | linera-execution/src/policy.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module contains types related to fees and pricing.
//! Defines the economic parameters and hard limits for resource consumption
//! within the Linera network. It specifies prices for fundamental units like fuel,
//! individual read/write operations, costs per byte read/written,
//! base costs for messages and operations, and costs associated with publishing blobs.
//! It also sets overarching limits such as the maximum fuel allowed per block,
//! the maximum block size, and limits on concurrent operations.
use std::{collections::BTreeSet, fmt};
use allocative::Allocative;
use linera_base::{
data_types::{Amount, ArithmeticError, BlobContent, CompressedBytecode, Resources},
ensure,
identifiers::BlobType,
vm::VmRuntime,
};
use serde::{Deserialize, Serialize};
use crate::ExecutionError;
/// A collection of prices and limits associated with block execution.
#[derive(Eq, PartialEq, Hash, Clone, Debug, Serialize, Deserialize, Allocative)]
pub struct ResourceControlPolicy {
/// The price per unit of fuel (aka gas) for Wasm execution.
pub wasm_fuel_unit: Amount,
/// The price per unit of fuel (aka gas) for EVM execution.
pub evm_fuel_unit: Amount,
/// The price of one read operation.
pub read_operation: Amount,
/// The price of one write operation.
pub write_operation: Amount,
/// The price of accessing one byte from the runtime.
pub byte_runtime: Amount,
/// The price of reading a byte.
pub byte_read: Amount,
/// The price of writing a byte
pub byte_written: Amount,
/// The base price to read a blob.
pub blob_read: Amount,
/// The base price to publish a blob.
pub blob_published: Amount,
/// The price to read a blob, per byte.
pub blob_byte_read: Amount,
/// The price to publish a blob, per byte.
pub blob_byte_published: Amount,
/// The price of increasing storage by a byte.
// TODO(#1536): This is not fully supported.
pub byte_stored: Amount,
/// The base price of adding an operation to a block.
pub operation: Amount,
/// The additional price for each byte in the argument of a user operation.
pub operation_byte: Amount,
/// The base price of sending a message from a block.
pub message: Amount,
/// The additional price for each byte in the argument of a user message.
pub message_byte: Amount,
/// The price per query to a service as an oracle.
pub service_as_oracle_query: Amount,
/// The price for a performing an HTTP request.
pub http_request: Amount,
// TODO(#1538): Cap the number of transactions per block and the total size of their
// arguments.
/// The maximum amount of Wasm fuel a block can consume.
pub maximum_wasm_fuel_per_block: u64,
/// The maximum amount of EVM fuel a block can consume.
pub maximum_evm_fuel_per_block: u64,
/// The maximum time in milliseconds that a block can spend executing services as oracles.
pub maximum_service_oracle_execution_ms: u64,
/// The maximum size of a block. This includes the block proposal itself as well as
/// the execution outcome.
pub maximum_block_size: u64,
/// The maximum size of decompressed contract or service bytecode, in bytes.
pub maximum_bytecode_size: u64,
/// The maximum size of a blob.
pub maximum_blob_size: u64,
/// The maximum number of published blobs per block.
pub maximum_published_blobs: u64,
/// The maximum size of a block proposal.
pub maximum_block_proposal_size: u64,
/// The maximum data to read per block
pub maximum_bytes_read_per_block: u64,
/// The maximum data to write per block
pub maximum_bytes_written_per_block: u64,
/// The maximum size in bytes of an oracle response.
pub maximum_oracle_response_bytes: u64,
/// The maximum size in bytes of a received HTTP response.
pub maximum_http_response_bytes: u64,
/// The maximum amount of time allowed to wait for an HTTP response.
pub http_request_timeout_ms: u64,
/// The list of hosts that contracts and services can send HTTP requests to.
pub http_request_allow_list: BTreeSet<String>,
}
impl fmt::Display for ResourceControlPolicy {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let ResourceControlPolicy {
wasm_fuel_unit,
evm_fuel_unit,
read_operation,
write_operation,
byte_runtime,
byte_read,
byte_written,
blob_read,
blob_published,
blob_byte_read,
blob_byte_published,
byte_stored,
operation,
operation_byte,
message,
message_byte,
service_as_oracle_query,
http_request,
maximum_wasm_fuel_per_block,
maximum_evm_fuel_per_block,
maximum_service_oracle_execution_ms,
maximum_block_size,
maximum_blob_size,
maximum_published_blobs,
maximum_bytecode_size,
maximum_block_proposal_size,
maximum_bytes_read_per_block,
maximum_bytes_written_per_block,
maximum_oracle_response_bytes,
maximum_http_response_bytes,
http_request_allow_list,
http_request_timeout_ms,
} = self;
write!(
f,
"Resource control policy:\n\
{wasm_fuel_unit:.2} cost per Wasm fuel unit\n\
{evm_fuel_unit:.2} cost per EVM fuel unit\n\
{read_operation:.2} cost per read operation\n\
{write_operation:.2} cost per write operation\n\
{byte_runtime:.2} cost per runtime byte read operation\n\
{byte_read:.2} cost per byte read\n\
{byte_written:.2} cost per byte written\n\
{blob_read:.2} base cost per read blob\n\
{blob_published:.2} base cost per published blob\n\
{blob_byte_read:.2} cost of reading blobs, per byte\n\
{blob_byte_published:.2} cost of publishing blobs, per byte\n\
{byte_stored:.2} cost per byte stored\n\
{operation:.2} per operation\n\
{operation_byte:.2} per byte in the argument of an operation\n\
{service_as_oracle_query:.2} per query to a service as an oracle\n\
{message:.2} per outgoing messages\n\
{message_byte:.2} per byte in the argument of an outgoing messages\n\
{http_request:.2} per HTTP request performed\n\
{maximum_wasm_fuel_per_block} maximum Wasm fuel per block\n\
{maximum_evm_fuel_per_block} maximum EVM fuel per block\n\
{maximum_service_oracle_execution_ms} ms maximum service-as-oracle execution time per \
block\n\
{maximum_block_size} maximum size of a block\n\
{maximum_blob_size} maximum size of a data blob, bytecode or other binary blob\n\
{maximum_published_blobs} maximum number of blobs published per block\n\
{maximum_bytecode_size} maximum size of service and contract bytecode\n\
{maximum_block_proposal_size} maximum size of a block proposal\n\
{maximum_bytes_read_per_block} maximum number of bytes read per block\n\
{maximum_bytes_written_per_block} maximum number of bytes written per block\n\
{maximum_oracle_response_bytes} maximum number of bytes of an oracle response\n\
{maximum_http_response_bytes} maximum number of bytes of an HTTP response\n\
{http_request_timeout_ms} ms timeout for HTTP requests\n\
HTTP hosts allowed for contracts and services: {http_request_allow_list:#?}\n",
)?;
Ok(())
}
}
impl Default for ResourceControlPolicy {
fn default() -> Self {
Self::no_fees()
}
}
impl ResourceControlPolicy {
/// Creates a policy with no cost for anything.
///
/// This can be used in tests or benchmarks.
pub fn no_fees() -> Self {
Self {
wasm_fuel_unit: Amount::ZERO,
evm_fuel_unit: Amount::ZERO,
read_operation: Amount::ZERO,
write_operation: Amount::ZERO,
byte_runtime: Amount::ZERO,
byte_read: Amount::ZERO,
byte_written: Amount::ZERO,
blob_read: Amount::ZERO,
blob_published: Amount::ZERO,
blob_byte_read: Amount::ZERO,
blob_byte_published: Amount::ZERO,
byte_stored: Amount::ZERO,
operation: Amount::ZERO,
operation_byte: Amount::ZERO,
message: Amount::ZERO,
message_byte: Amount::ZERO,
service_as_oracle_query: Amount::ZERO,
http_request: Amount::ZERO,
maximum_wasm_fuel_per_block: u64::MAX,
maximum_evm_fuel_per_block: u64::MAX,
maximum_service_oracle_execution_ms: u64::MAX,
maximum_block_size: u64::MAX,
maximum_blob_size: u64::MAX,
maximum_published_blobs: u64::MAX,
maximum_bytecode_size: u64::MAX,
maximum_block_proposal_size: u64::MAX,
maximum_bytes_read_per_block: u64::MAX,
maximum_bytes_written_per_block: u64::MAX,
maximum_oracle_response_bytes: u64::MAX,
maximum_http_response_bytes: u64::MAX,
http_request_timeout_ms: u64::MAX,
http_request_allow_list: BTreeSet::new(),
}
}
/// The maximum fuel per block according to the `VmRuntime`.
pub fn maximum_fuel_per_block(&self, vm_runtime: VmRuntime) -> u64 {
match vm_runtime {
VmRuntime::Wasm => self.maximum_wasm_fuel_per_block,
VmRuntime::Evm => self.maximum_evm_fuel_per_block,
}
}
/// Creates a policy with no cost for anything except fuel.
///
/// This can be used in tests that need whole numbers in their chain balance.
#[cfg(with_testing)]
pub fn only_fuel() -> Self {
Self {
wasm_fuel_unit: Amount::from_micros(1),
evm_fuel_unit: Amount::from_micros(1),
..Self::no_fees()
}
}
/// Creates a policy where all categories have a small non-zero cost.
#[cfg(with_testing)]
pub fn all_categories() -> Self {
Self {
wasm_fuel_unit: Amount::from_nanos(1),
evm_fuel_unit: Amount::from_nanos(1),
byte_read: Amount::from_attos(100),
byte_written: Amount::from_attos(1_000),
blob_read: Amount::from_nanos(1),
blob_published: Amount::from_nanos(10),
blob_byte_read: Amount::from_attos(100),
blob_byte_published: Amount::from_attos(1_000),
operation: Amount::from_attos(10),
operation_byte: Amount::from_attos(1),
message: Amount::from_attos(10),
message_byte: Amount::from_attos(1),
http_request: Amount::from_micros(1),
..Self::no_fees()
}
}
/// Creates a policy that matches the Testnet.
pub fn testnet() -> Self {
Self {
wasm_fuel_unit: Amount::from_nanos(10),
evm_fuel_unit: Amount::from_nanos(10),
byte_runtime: Amount::from_nanos(1),
byte_read: Amount::from_nanos(10),
byte_written: Amount::from_nanos(100),
blob_read: Amount::from_nanos(100),
blob_published: Amount::from_nanos(1000),
blob_byte_read: Amount::from_nanos(10),
blob_byte_published: Amount::from_nanos(100),
read_operation: Amount::from_micros(10),
write_operation: Amount::from_micros(20),
byte_stored: Amount::from_nanos(10),
message_byte: Amount::from_nanos(100),
operation_byte: Amount::from_nanos(10),
operation: Amount::from_micros(10),
message: Amount::from_micros(10),
service_as_oracle_query: Amount::from_millis(10),
http_request: Amount::from_micros(50),
maximum_wasm_fuel_per_block: 100_000_000,
maximum_evm_fuel_per_block: 100_000_000,
maximum_service_oracle_execution_ms: 10_000,
maximum_block_size: 1_000_000,
maximum_blob_size: 1_000_000,
maximum_published_blobs: 10,
maximum_bytecode_size: 10_000_000,
maximum_block_proposal_size: 13_000_000,
maximum_bytes_read_per_block: 100_000_000,
maximum_bytes_written_per_block: 10_000_000,
maximum_oracle_response_bytes: 10_000,
maximum_http_response_bytes: 10_000,
http_request_timeout_ms: 20_000,
http_request_allow_list: BTreeSet::new(),
}
}
pub fn total_price(&self, resources: &Resources) -> Result<Amount, ArithmeticError> {
let mut amount = Amount::ZERO;
amount.try_add_assign(self.fuel_price(resources.wasm_fuel, VmRuntime::Wasm)?)?;
amount.try_add_assign(self.fuel_price(resources.evm_fuel, VmRuntime::Evm)?)?;
amount.try_add_assign(self.read_operations_price(resources.read_operations)?)?;
amount.try_add_assign(self.bytes_runtime_price(resources.bytes_runtime)?)?;
amount.try_add_assign(self.write_operations_price(resources.write_operations)?)?;
amount.try_add_assign(self.bytes_read_price(resources.bytes_to_read as u64)?)?;
amount.try_add_assign(self.bytes_written_price(resources.bytes_to_write as u64)?)?;
amount.try_add_assign(
self.blob_byte_read
.try_mul(resources.blob_bytes_to_read as u128)?
.try_add(self.blob_read.try_mul(resources.blobs_to_read as u128)?)?,
)?;
amount.try_add_assign(
self.blob_byte_published
.try_mul(resources.blob_bytes_to_publish as u128)?
.try_add(
self.blob_published
.try_mul(resources.blobs_to_publish as u128)?,
)?,
)?;
amount.try_add_assign(self.message.try_mul(resources.messages as u128)?)?;
amount.try_add_assign(self.message_bytes_price(resources.message_size as u64)?)?;
amount.try_add_assign(self.bytes_stored_price(resources.storage_size_delta as u64)?)?;
amount.try_add_assign(
self.service_as_oracle_queries_price(resources.service_as_oracle_queries)?,
)?;
amount.try_add_assign(self.http_requests_price(resources.http_requests)?)?;
Ok(amount)
}
pub(crate) fn operation_bytes_price(&self, size: u64) -> Result<Amount, ArithmeticError> {
self.operation_byte.try_mul(size as u128)
}
pub(crate) fn message_bytes_price(&self, size: u64) -> Result<Amount, ArithmeticError> {
self.message_byte.try_mul(size as u128)
}
pub(crate) fn read_operations_price(&self, count: u32) -> Result<Amount, ArithmeticError> {
self.read_operation.try_mul(count as u128)
}
pub(crate) fn write_operations_price(&self, count: u32) -> Result<Amount, ArithmeticError> {
self.write_operation.try_mul(count as u128)
}
pub(crate) fn bytes_runtime_price(&self, count: u32) -> Result<Amount, ArithmeticError> {
self.byte_runtime.try_mul(count as u128)
}
pub(crate) fn bytes_read_price(&self, count: u64) -> Result<Amount, ArithmeticError> {
self.byte_read.try_mul(count as u128)
}
pub(crate) fn bytes_written_price(&self, count: u64) -> Result<Amount, ArithmeticError> {
self.byte_written.try_mul(count as u128)
}
pub(crate) fn blob_read_price(&self, count: u64) -> Result<Amount, ArithmeticError> {
self.blob_byte_read
.try_mul(count as u128)?
.try_add(self.blob_read)
}
pub(crate) fn blob_published_price(&self, count: u64) -> Result<Amount, ArithmeticError> {
self.blob_byte_published
.try_mul(count as u128)?
.try_add(self.blob_published)
}
// TODO(#1536): This is not fully implemented.
#[allow(dead_code)]
pub(crate) fn bytes_stored_price(&self, count: u64) -> Result<Amount, ArithmeticError> {
self.byte_stored.try_mul(count as u128)
}
/// Returns how much it would cost to perform `count` queries to services running as oracles.
pub(crate) fn service_as_oracle_queries_price(
&self,
count: u32,
) -> Result<Amount, ArithmeticError> {
self.service_as_oracle_query.try_mul(count as u128)
}
pub(crate) fn http_requests_price(&self, count: u32) -> Result<Amount, ArithmeticError> {
self.http_request.try_mul(count as u128)
}
fn fuel_unit_price(&self, vm_runtime: VmRuntime) -> Amount {
match vm_runtime {
VmRuntime::Wasm => self.wasm_fuel_unit,
VmRuntime::Evm => self.evm_fuel_unit,
}
}
pub(crate) fn fuel_price(
&self,
fuel: u64,
vm_runtime: VmRuntime,
) -> Result<Amount, ArithmeticError> {
self.fuel_unit_price(vm_runtime).try_mul(u128::from(fuel))
}
/// Returns how much fuel can be paid with the given balance.
pub(crate) fn remaining_fuel(&self, balance: Amount, vm_runtime: VmRuntime) -> u64 {
let fuel_unit = self.fuel_unit_price(vm_runtime);
u64::try_from(balance.saturating_ratio(fuel_unit)).unwrap_or(u64::MAX)
}
pub fn check_blob_size(&self, content: &BlobContent) -> Result<(), ExecutionError> {
ensure!(
u64::try_from(content.bytes().len())
.ok()
.is_some_and(|size| size <= self.maximum_blob_size),
ExecutionError::BlobTooLarge
);
match content.blob_type() {
BlobType::ContractBytecode | BlobType::ServiceBytecode | BlobType::EvmBytecode => {
ensure!(
CompressedBytecode::decompressed_size_at_most(
content.bytes(),
self.maximum_bytecode_size
)?,
ExecutionError::BytecodeTooLarge
);
}
BlobType::Data
| BlobType::ApplicationDescription
| BlobType::Committee
| BlobType::ChainDescription => {}
}
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/transaction_tracker.rs | linera-execution/src/transaction_tracker.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{BTreeMap, BTreeSet},
future::Future,
mem, vec,
};
use custom_debug_derive::Debug;
use linera_base::{
data_types::{Blob, BlobContent, Event, OracleResponse, StreamUpdate, Timestamp},
ensure,
identifiers::{ApplicationId, BlobId, ChainId, StreamId},
};
use crate::{ExecutionError, OutgoingMessage};
type AppStreamUpdates = BTreeMap<(ChainId, StreamId), (u32, u32)>;
/// Tracks oracle responses and execution outcomes of an ongoing transaction execution, as well
/// as replayed oracle responses.
#[derive(Debug, Default)]
pub struct TransactionTracker {
#[debug(skip_if = Option::is_none)]
replaying_oracle_responses: Option<vec::IntoIter<OracleResponse>>,
#[debug(skip_if = Vec::is_empty)]
oracle_responses: Vec<OracleResponse>,
#[debug(skip_if = Vec::is_empty)]
outgoing_messages: Vec<OutgoingMessage>,
/// The current local time.
local_time: Timestamp,
/// The index of the current transaction in the block.
transaction_index: u32,
next_application_index: u32,
next_chain_index: u32,
/// Events recorded by contracts' `emit` calls.
events: Vec<Event>,
/// Blobs created by contracts.
///
/// As of right now, blobs created by the contracts are one of the following types:
/// - [`Data`]
/// - [`ContractBytecode`]
/// - [`ServiceBytecode`]
/// - [`EvmBytecode`]
/// - [`ApplicationDescription`]
/// - [`ChainDescription`]
blobs: BTreeMap<BlobId, BlobContent>,
/// The blobs created in the previous transactions.
previously_created_blobs: BTreeMap<BlobId, BlobContent>,
/// Operation result.
operation_result: Option<Vec<u8>>,
/// Streams that have been updated but not yet processed during this transaction.
streams_to_process: BTreeMap<ApplicationId, AppStreamUpdates>,
/// Published blobs this transaction refers to by [`BlobId`].
blobs_published: BTreeSet<BlobId>,
}
/// The [`TransactionTracker`] contents after a transaction has finished.
#[derive(Debug, Default)]
pub struct TransactionOutcome {
#[debug(skip_if = Vec::is_empty)]
pub oracle_responses: Vec<OracleResponse>,
#[debug(skip_if = Vec::is_empty)]
pub outgoing_messages: Vec<OutgoingMessage>,
pub next_application_index: u32,
pub next_chain_index: u32,
/// Events recorded by contracts' `emit` calls.
pub events: Vec<Event>,
/// Blobs created by contracts.
pub blobs: Vec<Blob>,
/// Operation result.
pub operation_result: Vec<u8>,
/// Blobs published by this transaction.
pub blobs_published: BTreeSet<BlobId>,
}
impl TransactionTracker {
pub fn new(
local_time: Timestamp,
transaction_index: u32,
next_application_index: u32,
next_chain_index: u32,
oracle_responses: Option<Vec<OracleResponse>>,
blobs: &[Vec<Blob>],
) -> Self {
let mut previously_created_blobs = BTreeMap::new();
for tx_blobs in blobs {
for blob in tx_blobs {
previously_created_blobs.insert(blob.id(), blob.content().clone());
}
}
TransactionTracker {
local_time,
transaction_index,
next_application_index,
next_chain_index,
replaying_oracle_responses: oracle_responses.map(Vec::into_iter),
previously_created_blobs,
..Self::default()
}
}
pub fn with_blobs(mut self, blobs: BTreeMap<BlobId, BlobContent>) -> Self {
self.blobs = blobs;
self
}
pub fn local_time(&self) -> Timestamp {
self.local_time
}
pub fn set_local_time(&mut self, local_time: Timestamp) {
self.local_time = local_time;
}
pub fn transaction_index(&self) -> u32 {
self.transaction_index
}
pub fn peek_application_index(&self) -> u32 {
self.next_application_index
}
pub fn next_application_index(&mut self) -> u32 {
let index = self.next_application_index;
self.next_application_index += 1;
index
}
pub fn next_chain_index(&mut self) -> u32 {
let index = self.next_chain_index;
self.next_chain_index += 1;
index
}
pub fn add_outgoing_message(&mut self, message: OutgoingMessage) {
self.outgoing_messages.push(message);
}
pub fn add_outgoing_messages(&mut self, messages: impl IntoIterator<Item = OutgoingMessage>) {
for message in messages {
self.add_outgoing_message(message);
}
}
pub fn add_event(&mut self, stream_id: StreamId, index: u32, value: Vec<u8>) {
self.events.push(Event {
stream_id,
index,
value,
});
}
pub fn get_blob_content(&self, blob_id: &BlobId) -> Option<&BlobContent> {
if let Some(content) = self.blobs.get(blob_id) {
return Some(content);
}
self.previously_created_blobs.get(blob_id)
}
pub fn add_created_blob(&mut self, blob: Blob) {
self.blobs.insert(blob.id(), blob.into_content());
}
pub fn add_published_blob(&mut self, blob_id: BlobId) {
self.blobs_published.insert(blob_id);
}
pub fn created_blobs(&self) -> &BTreeMap<BlobId, BlobContent> {
&self.blobs
}
pub fn add_operation_result(&mut self, result: Option<Vec<u8>>) {
self.operation_result = result
}
/// In replay mode, returns the next recorded oracle response. Otherwise executes `f` and
/// records and returns the result. `f` is the implementation of the actual oracle and is
/// only called in validation mode, so it does not have to be fully deterministic.
pub async fn oracle<F, G>(&mut self, f: F) -> Result<&OracleResponse, ExecutionError>
where
F: FnOnce() -> G,
G: Future<Output = Result<OracleResponse, ExecutionError>>,
{
let response = match self.next_replayed_oracle_response()? {
Some(response) => response,
None => f().await?,
};
self.oracle_responses.push(response);
Ok(self.oracle_responses.last().unwrap())
}
pub fn add_stream_to_process(
&mut self,
application_id: ApplicationId,
chain_id: ChainId,
stream_id: StreamId,
previous_index: u32,
next_index: u32,
) {
if next_index == previous_index {
return; // No new events in the stream.
}
self.streams_to_process
.entry(application_id)
.or_default()
.entry((chain_id, stream_id))
.and_modify(|(pi, ni)| {
*pi = (*pi).min(previous_index);
*ni = (*ni).max(next_index);
})
.or_insert_with(|| (previous_index, next_index));
}
pub fn remove_stream_to_process(
&mut self,
application_id: ApplicationId,
chain_id: ChainId,
stream_id: StreamId,
) {
let Some(streams) = self.streams_to_process.get_mut(&application_id) else {
return;
};
if streams.remove(&(chain_id, stream_id)).is_some() && streams.is_empty() {
self.streams_to_process.remove(&application_id);
}
}
pub fn take_streams_to_process(&mut self) -> BTreeMap<ApplicationId, Vec<StreamUpdate>> {
mem::take(&mut self.streams_to_process)
.into_iter()
.map(|(app_id, streams)| {
let updates = streams
.into_iter()
.map(
|((chain_id, stream_id), (previous_index, next_index))| StreamUpdate {
chain_id,
stream_id,
previous_index,
next_index,
},
)
.collect();
(app_id, updates)
})
.collect()
}
/// Adds the oracle response to the record.
/// If replaying, it also checks that it matches the next replayed one and returns `true`.
pub fn replay_oracle_response(
&mut self,
oracle_response: OracleResponse,
) -> Result<bool, ExecutionError> {
let replaying = if let Some(recorded_response) = self.next_replayed_oracle_response()? {
ensure!(
recorded_response == oracle_response,
ExecutionError::OracleResponseMismatch
);
true
} else {
false
};
self.oracle_responses.push(oracle_response);
Ok(replaying)
}
/// If in replay mode, returns the next oracle response, or an error if it is missing.
///
/// If not in replay mode, `None` is returned, and the caller must execute the actual oracle
/// to obtain the value.
///
/// In both cases, the value (returned or obtained from the oracle) must be recorded using
/// `add_oracle_response`.
fn next_replayed_oracle_response(&mut self) -> Result<Option<OracleResponse>, ExecutionError> {
let Some(responses) = &mut self.replaying_oracle_responses else {
return Ok(None); // Not in replay mode.
};
let response = responses
.next()
.ok_or_else(|| ExecutionError::MissingOracleResponse)?;
Ok(Some(response))
}
pub fn into_outcome(self) -> Result<TransactionOutcome, ExecutionError> {
let TransactionTracker {
replaying_oracle_responses,
oracle_responses,
outgoing_messages,
local_time: _,
transaction_index: _,
next_application_index,
next_chain_index,
events,
blobs,
previously_created_blobs: _,
operation_result,
streams_to_process,
blobs_published,
} = self;
ensure!(
streams_to_process.is_empty(),
ExecutionError::UnprocessedStreams
);
if let Some(mut responses) = replaying_oracle_responses {
ensure!(
responses.next().is_none(),
ExecutionError::UnexpectedOracleResponse
);
}
let blobs = blobs
.into_iter()
.map(|(blob_id, content)| Blob::new_with_hash_unchecked(blob_id, content))
.collect::<Vec<_>>();
Ok(TransactionOutcome {
outgoing_messages,
oracle_responses,
next_application_index,
next_chain_index,
events,
blobs,
operation_result: operation_result.unwrap_or_default(),
blobs_published,
})
}
}
#[cfg(with_testing)]
impl TransactionTracker {
/// Creates a new [`TransactionTracker`] for testing, with default values and the given
/// oracle responses.
pub fn new_replaying(oracle_responses: Vec<OracleResponse>) -> Self {
TransactionTracker::new(Timestamp::from(0), 0, 0, 0, Some(oracle_responses), &[])
}
/// Creates a new [`TransactionTracker`] for testing, with default values and oracle responses
/// for the given blobs.
pub fn new_replaying_blobs<T>(blob_ids: T) -> Self
where
T: IntoIterator,
T::Item: std::borrow::Borrow<BlobId>,
{
use std::borrow::Borrow;
let oracle_responses = blob_ids
.into_iter()
.map(|blob_id| OracleResponse::Blob(*blob_id.borrow()))
.collect();
TransactionTracker::new_replaying(oracle_responses)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/resources.rs | linera-execution/src/resources.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! This module tracks the resources used during the execution of a transaction.
use std::{sync::Arc, time::Duration};
use custom_debug_derive::Debug;
use linera_base::{
data_types::{Amount, ArithmeticError, Blob},
ensure,
identifiers::AccountOwner,
ownership::ChainOwnership,
vm::VmRuntime,
};
use linera_views::{context::Context, ViewError};
use serde::Serialize;
use crate::{ExecutionError, Message, Operation, ResourceControlPolicy, SystemExecutionStateView};
#[derive(Clone, Debug, Default)]
pub struct ResourceController<Account = Amount, Tracker = ResourceTracker> {
/// The (fixed) policy used to charge fees and control resource usage.
policy: Arc<ResourceControlPolicy>,
/// How the resources were used so far.
pub tracker: Tracker,
/// The account paying for the resource usage.
pub account: Account,
}
impl<Account, Tracker> ResourceController<Account, Tracker> {
/// Creates a new resource controller with the given policy and account.
pub fn new(policy: Arc<ResourceControlPolicy>, tracker: Tracker, account: Account) -> Self {
Self {
policy,
tracker,
account,
}
}
/// Returns a reference to the policy.
pub fn policy(&self) -> &Arc<ResourceControlPolicy> {
&self.policy
}
/// Returns a reference to the tracker.
pub fn tracker(&self) -> &Tracker {
&self.tracker
}
}
/// The runtime size of an `Amount`.
pub const RUNTIME_AMOUNT_SIZE: u32 = 16;
/// The runtime size of a `ApplicationId`.
pub const RUNTIME_APPLICATION_ID_SIZE: u32 = 32;
/// The runtime size of a `BlockHeight`.
pub const RUNTIME_BLOCK_HEIGHT_SIZE: u32 = 8;
/// The runtime size of a `ChainId`.
pub const RUNTIME_CHAIN_ID_SIZE: u32 = 32;
/// The runtime size of a `Timestamp`.
pub const RUNTIME_TIMESTAMP_SIZE: u32 = 8;
/// The runtime size of the weight of an owner.
pub const RUNTIME_OWNER_WEIGHT_SIZE: u32 = 8;
/// The runtime constant part size of the `ChainOwnership`.
/// It consists of one `u32` and four `TimeDelta` which are the constant part of
/// the `ChainOwnership`. The way we do it is not optimal:
/// TODO(#4164): Implement a procedure for computing naive sizes.
pub const RUNTIME_CONSTANT_CHAIN_OWNERSHIP_SIZE: u32 = 4 + 4 * 8;
#[cfg(test)]
mod tests {
use std::mem::size_of;
use linera_base::{
data_types::{Amount, BlockHeight, Timestamp},
identifiers::{ApplicationId, ChainId},
};
use crate::resources::{
RUNTIME_AMOUNT_SIZE, RUNTIME_APPLICATION_ID_SIZE, RUNTIME_BLOCK_HEIGHT_SIZE,
RUNTIME_CHAIN_ID_SIZE, RUNTIME_OWNER_WEIGHT_SIZE, RUNTIME_TIMESTAMP_SIZE,
};
#[test]
fn test_size_of_runtime_operations() {
assert_eq!(RUNTIME_AMOUNT_SIZE as usize, size_of::<Amount>());
assert_eq!(
RUNTIME_APPLICATION_ID_SIZE as usize,
size_of::<ApplicationId>()
);
assert_eq!(RUNTIME_BLOCK_HEIGHT_SIZE as usize, size_of::<BlockHeight>());
assert_eq!(RUNTIME_CHAIN_ID_SIZE as usize, size_of::<ChainId>());
assert_eq!(RUNTIME_TIMESTAMP_SIZE as usize, size_of::<Timestamp>());
assert_eq!(RUNTIME_OWNER_WEIGHT_SIZE as usize, size_of::<u64>());
}
}
/// The resources used so far by an execution process.
/// Acts as an accumulator for all resources consumed during
/// a specific execution flow. This could be the execution of a block,
/// the processing of a single message, or a specific phase within these
/// broader operations.
#[derive(Copy, Debug, Clone, Default)]
pub struct ResourceTracker {
/// The total size of the block so far.
pub block_size: u64,
/// The EVM fuel used so far.
pub evm_fuel: u64,
/// The Wasm fuel used so far.
pub wasm_fuel: u64,
/// The number of read operations.
pub read_operations: u32,
/// The number of write operations.
pub write_operations: u32,
/// The size of bytes read from runtime.
pub bytes_runtime: u32,
/// The number of bytes read.
pub bytes_read: u64,
/// The number of bytes written.
pub bytes_written: u64,
/// The number of blobs read.
pub blobs_read: u32,
/// The number of blobs published.
pub blobs_published: u32,
/// The number of blob bytes read.
pub blob_bytes_read: u64,
/// The number of blob bytes published.
pub blob_bytes_published: u64,
/// The number of events read.
pub events_read: u32,
/// The number of events published.
pub events_published: u32,
/// The number of event bytes read.
pub event_bytes_read: u64,
/// The number of event bytes published.
pub event_bytes_published: u64,
/// The change in the number of bytes being stored by user applications.
pub bytes_stored: i32,
/// The number of operations executed.
pub operations: u32,
/// The total size of the arguments of user operations.
pub operation_bytes: u64,
/// The number of outgoing messages created (system and user).
pub messages: u32,
/// The total size of the arguments of outgoing user messages.
pub message_bytes: u64,
/// The number of HTTP requests performed.
pub http_requests: u32,
/// The number of calls to services as oracles.
pub service_oracle_queries: u32,
/// The time spent executing services as oracles.
pub service_oracle_execution: Duration,
/// The amount allocated to message grants.
pub grants: Amount,
}
impl ResourceTracker {
fn fuel(&self, vm_runtime: VmRuntime) -> u64 {
match vm_runtime {
VmRuntime::Wasm => self.wasm_fuel,
VmRuntime::Evm => self.evm_fuel,
}
}
}
/// How to access the balance of an account.
pub trait BalanceHolder {
fn balance(&self) -> Result<Amount, ArithmeticError>;
fn try_add_assign(&mut self, other: Amount) -> Result<(), ArithmeticError>;
fn try_sub_assign(&mut self, other: Amount) -> Result<(), ArithmeticError>;
}
// The main accounting functions for a ResourceController.
impl<Account, Tracker> ResourceController<Account, Tracker>
where
Account: BalanceHolder,
Tracker: AsRef<ResourceTracker> + AsMut<ResourceTracker>,
{
/// Obtains the balance of the account. The only possible error is an arithmetic
/// overflow, which should not happen in practice due to final token supply.
pub fn balance(&self) -> Result<Amount, ArithmeticError> {
self.account.balance()
}
/// Operates a 3-way merge by transferring the difference between `initial`
/// and `other` to `self`.
pub fn merge_balance(&mut self, initial: Amount, other: Amount) -> Result<(), ExecutionError> {
if other <= initial {
let sub_amount = initial.try_sub(other).expect("other <= initial");
self.account.try_sub_assign(sub_amount).map_err(|_| {
ExecutionError::FeesExceedFunding {
fees: sub_amount,
balance: self.balance().unwrap_or(Amount::MAX),
}
})?;
} else {
self.account
.try_add_assign(other.try_sub(initial).expect("other > initial"))?;
}
Ok(())
}
/// Subtracts an amount from a balance and reports an error if that is impossible.
fn update_balance(&mut self, fees: Amount) -> Result<(), ExecutionError> {
self.account
.try_sub_assign(fees)
.map_err(|_| ExecutionError::FeesExceedFunding {
fees,
balance: self.balance().unwrap_or(Amount::MAX),
})?;
Ok(())
}
/// Obtains the amount of fuel that could be spent by consuming the entire balance.
pub(crate) fn remaining_fuel(&self, vm_runtime: VmRuntime) -> u64 {
let balance = self.balance().unwrap_or(Amount::MAX);
let fuel = self.tracker.as_ref().fuel(vm_runtime);
let maximum_fuel_per_block = self.policy.maximum_fuel_per_block(vm_runtime);
self.policy
.remaining_fuel(balance, vm_runtime)
.min(maximum_fuel_per_block.saturating_sub(fuel))
}
/// Tracks the allocation of a grant.
pub fn track_grant(&mut self, grant: Amount) -> Result<(), ExecutionError> {
self.tracker.as_mut().grants.try_add_assign(grant)?;
self.update_balance(grant)
}
/// Tracks the execution of an operation in block.
pub fn track_operation(&mut self, operation: &Operation) -> Result<(), ExecutionError> {
self.tracker.as_mut().operations = self
.tracker
.as_mut()
.operations
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.operation)?;
match operation {
Operation::System(_) => Ok(()),
Operation::User { bytes, .. } => {
let size = bytes.len();
self.tracker.as_mut().operation_bytes = self
.tracker
.as_mut()
.operation_bytes
.checked_add(size as u64)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.operation_bytes_price(size as u64)?)?;
Ok(())
}
}
}
/// Tracks the creation of an outgoing message.
pub fn track_message(&mut self, message: &Message) -> Result<(), ExecutionError> {
self.tracker.as_mut().messages = self
.tracker
.as_mut()
.messages
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.message)?;
match message {
Message::System(_) => Ok(()),
Message::User { bytes, .. } => {
let size = bytes.len();
self.tracker.as_mut().message_bytes = self
.tracker
.as_mut()
.message_bytes
.checked_add(size as u64)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.message_bytes_price(size as u64)?)?;
Ok(())
}
}
}
/// Tracks the execution of an HTTP request.
pub fn track_http_request(&mut self) -> Result<(), ExecutionError> {
self.tracker.as_mut().http_requests = self
.tracker
.as_ref()
.http_requests
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.http_request)
}
/// Tracks a number of fuel units used.
pub(crate) fn track_fuel(
&mut self,
fuel: u64,
vm_runtime: VmRuntime,
) -> Result<(), ExecutionError> {
match vm_runtime {
VmRuntime::Wasm => {
self.tracker.as_mut().wasm_fuel = self
.tracker
.as_ref()
.wasm_fuel
.checked_add(fuel)
.ok_or(ArithmeticError::Overflow)?;
ensure!(
self.tracker.as_ref().wasm_fuel <= self.policy.maximum_wasm_fuel_per_block,
ExecutionError::MaximumFuelExceeded(vm_runtime)
);
}
VmRuntime::Evm => {
self.tracker.as_mut().evm_fuel = self
.tracker
.as_ref()
.evm_fuel
.checked_add(fuel)
.ok_or(ArithmeticError::Overflow)?;
ensure!(
self.tracker.as_ref().evm_fuel <= self.policy.maximum_evm_fuel_per_block,
ExecutionError::MaximumFuelExceeded(vm_runtime)
);
}
}
self.update_balance(self.policy.fuel_price(fuel, vm_runtime)?)
}
/// Tracks runtime reading of `ChainId`
pub(crate) fn track_runtime_chain_id(&mut self) -> Result<(), ExecutionError> {
self.track_size_runtime_operations(RUNTIME_CHAIN_ID_SIZE)
}
/// Tracks runtime reading of `BlockHeight`
pub(crate) fn track_runtime_block_height(&mut self) -> Result<(), ExecutionError> {
self.track_size_runtime_operations(RUNTIME_BLOCK_HEIGHT_SIZE)
}
/// Tracks runtime reading of `ApplicationId`
pub(crate) fn track_runtime_application_id(&mut self) -> Result<(), ExecutionError> {
self.track_size_runtime_operations(RUNTIME_APPLICATION_ID_SIZE)
}
/// Tracks runtime reading of application parameters.
pub(crate) fn track_runtime_application_parameters(
&mut self,
parameters: &[u8],
) -> Result<(), ExecutionError> {
let parameters_len = parameters.len() as u32;
self.track_size_runtime_operations(parameters_len)
}
/// Tracks runtime reading of `Timestamp`
pub(crate) fn track_runtime_timestamp(&mut self) -> Result<(), ExecutionError> {
self.track_size_runtime_operations(RUNTIME_TIMESTAMP_SIZE)
}
/// Tracks runtime reading of balance
pub(crate) fn track_runtime_balance(&mut self) -> Result<(), ExecutionError> {
self.track_size_runtime_operations(RUNTIME_AMOUNT_SIZE)
}
/// Tracks runtime reading of owner balances
pub(crate) fn track_runtime_owner_balances(
&mut self,
owner_balances: &[(AccountOwner, Amount)],
) -> Result<(), ExecutionError> {
let mut size = 0;
for (account_owner, _) in owner_balances {
size += account_owner.size() + RUNTIME_AMOUNT_SIZE;
}
self.track_size_runtime_operations(size)
}
/// Tracks runtime reading of owners
pub(crate) fn track_runtime_owners(
&mut self,
owners: &[AccountOwner],
) -> Result<(), ExecutionError> {
let mut size = 0;
for owner in owners {
size += owner.size();
}
self.track_size_runtime_operations(size)
}
/// Tracks runtime reading of owners
pub(crate) fn track_runtime_chain_ownership(
&mut self,
chain_ownership: &ChainOwnership,
) -> Result<(), ExecutionError> {
let mut size = 0;
for account_owner in &chain_ownership.super_owners {
size += account_owner.size();
}
for account_owner in chain_ownership.owners.keys() {
size += account_owner.size() + RUNTIME_OWNER_WEIGHT_SIZE;
}
size += RUNTIME_CONSTANT_CHAIN_OWNERSHIP_SIZE;
self.track_size_runtime_operations(size)
}
/// Tracks runtime operations.
fn track_size_runtime_operations(&mut self, size: u32) -> Result<(), ExecutionError> {
self.tracker.as_mut().bytes_runtime = self
.tracker
.as_mut()
.bytes_runtime
.checked_add(size)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.bytes_runtime_price(size)?)
}
/// Tracks a read operation.
pub(crate) fn track_read_operation(&mut self) -> Result<(), ExecutionError> {
self.tracker.as_mut().read_operations = self
.tracker
.as_mut()
.read_operations
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.read_operations_price(1)?)
}
/// Tracks a write operation.
pub(crate) fn track_write_operations(&mut self, count: u32) -> Result<(), ExecutionError> {
self.tracker.as_mut().write_operations = self
.tracker
.as_mut()
.write_operations
.checked_add(count)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.write_operations_price(count)?)
}
/// Tracks a number of bytes read.
pub(crate) fn track_bytes_read(&mut self, count: u64) -> Result<(), ExecutionError> {
self.tracker.as_mut().bytes_read = self
.tracker
.as_mut()
.bytes_read
.checked_add(count)
.ok_or(ArithmeticError::Overflow)?;
if self.tracker.as_mut().bytes_read >= self.policy.maximum_bytes_read_per_block {
return Err(ExecutionError::ExcessiveRead);
}
self.update_balance(self.policy.bytes_read_price(count)?)?;
Ok(())
}
/// Tracks a number of bytes written.
pub(crate) fn track_bytes_written(&mut self, count: u64) -> Result<(), ExecutionError> {
self.tracker.as_mut().bytes_written = self
.tracker
.as_mut()
.bytes_written
.checked_add(count)
.ok_or(ArithmeticError::Overflow)?;
if self.tracker.as_mut().bytes_written >= self.policy.maximum_bytes_written_per_block {
return Err(ExecutionError::ExcessiveWrite);
}
self.update_balance(self.policy.bytes_written_price(count)?)?;
Ok(())
}
/// Tracks a number of blob bytes written.
pub(crate) fn track_blob_read(&mut self, count: u64) -> Result<(), ExecutionError> {
{
let tracker = self.tracker.as_mut();
tracker.blob_bytes_read = tracker
.blob_bytes_read
.checked_add(count)
.ok_or(ArithmeticError::Overflow)?;
tracker.blobs_read = tracker
.blobs_read
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
}
self.update_balance(self.policy.blob_read_price(count)?)?;
Ok(())
}
/// Tracks a number of blob bytes published.
pub fn track_blob_published(&mut self, blob: &Blob) -> Result<(), ExecutionError> {
self.policy.check_blob_size(blob.content())?;
let size = blob.content().bytes().len() as u64;
if blob.is_committee_blob() {
return Ok(());
}
{
let tracker = self.tracker.as_mut();
tracker.blob_bytes_published = tracker
.blob_bytes_published
.checked_add(size)
.ok_or(ArithmeticError::Overflow)?;
tracker.blobs_published = tracker
.blobs_published
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
}
self.update_balance(self.policy.blob_published_price(size)?)?;
Ok(())
}
/// Tracks a number of event bytes read.
pub(crate) fn track_event_read(&mut self, count: u64) -> Result<(), ExecutionError> {
{
let tracker = self.tracker.as_mut();
tracker.event_bytes_read = tracker
.event_bytes_read
.checked_add(count)
.ok_or(ArithmeticError::Overflow)?;
tracker.events_read = tracker
.events_read
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
}
self.update_balance(self.policy.blob_read_price(count)?)?;
Ok(())
}
/// Tracks a number of event bytes published.
pub(crate) fn track_event_published(
&mut self,
event_bytes: &[u8],
) -> Result<(), ExecutionError> {
let size = event_bytes.len() as u64;
{
let tracker = self.tracker.as_mut();
tracker.event_bytes_published = tracker
.event_bytes_published
.checked_add(size)
.ok_or(ArithmeticError::Overflow)?;
tracker.events_published = tracker
.events_published
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
}
self.update_balance(self.policy.blob_published_price(size)?)?;
Ok(())
}
/// Tracks a change in the number of bytes stored.
// TODO(#1536): This is not fully implemented.
#[allow(dead_code)]
pub(crate) fn track_stored_bytes(&mut self, delta: i32) -> Result<(), ExecutionError> {
self.tracker.as_mut().bytes_stored = self
.tracker
.as_mut()
.bytes_stored
.checked_add(delta)
.ok_or(ArithmeticError::Overflow)?;
Ok(())
}
/// Returns the remaining time services can spend executing as oracles.
pub(crate) fn remaining_service_oracle_execution_time(
&self,
) -> Result<Duration, ExecutionError> {
let tracker = self.tracker.as_ref();
let spent_execution_time = tracker.service_oracle_execution;
let limit = Duration::from_millis(self.policy.maximum_service_oracle_execution_ms);
limit
.checked_sub(spent_execution_time)
.ok_or(ExecutionError::MaximumServiceOracleExecutionTimeExceeded)
}
/// Tracks a call to a service to run as an oracle.
pub(crate) fn track_service_oracle_call(&mut self) -> Result<(), ExecutionError> {
self.tracker.as_mut().service_oracle_queries = self
.tracker
.as_mut()
.service_oracle_queries
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
self.update_balance(self.policy.service_as_oracle_query)
}
/// Tracks the time spent executing the service as an oracle.
pub(crate) fn track_service_oracle_execution(
&mut self,
execution_time: Duration,
) -> Result<(), ExecutionError> {
let tracker = self.tracker.as_mut();
let spent_execution_time = &mut tracker.service_oracle_execution;
let limit = Duration::from_millis(self.policy.maximum_service_oracle_execution_ms);
*spent_execution_time = spent_execution_time.saturating_add(execution_time);
ensure!(
*spent_execution_time < limit,
ExecutionError::MaximumServiceOracleExecutionTimeExceeded
);
Ok(())
}
/// Tracks the size of a response produced by an oracle.
pub(crate) fn track_service_oracle_response(
&mut self,
response_bytes: usize,
) -> Result<(), ExecutionError> {
ensure!(
response_bytes as u64 <= self.policy.maximum_oracle_response_bytes,
ExecutionError::ServiceOracleResponseTooLarge
);
Ok(())
}
}
impl<Account, Tracker> ResourceController<Account, Tracker>
where
Tracker: AsMut<ResourceTracker>,
{
/// Tracks the serialized size of a block, or parts of it.
pub fn track_block_size_of(&mut self, data: &impl Serialize) -> Result<(), ExecutionError> {
self.track_block_size(bcs::serialized_size(data)?)
}
/// Tracks the serialized size of a block, or parts of it.
pub fn track_block_size(&mut self, size: usize) -> Result<(), ExecutionError> {
let tracker = self.tracker.as_mut();
tracker.block_size = u64::try_from(size)
.ok()
.and_then(|size| tracker.block_size.checked_add(size))
.ok_or(ExecutionError::BlockTooLarge)?;
ensure!(
tracker.block_size <= self.policy.maximum_block_size,
ExecutionError::BlockTooLarge
);
Ok(())
}
}
impl ResourceController<Option<AccountOwner>, ResourceTracker> {
/// Provides a reference to the current execution state and obtains a temporary object
/// where the accounting functions of [`ResourceController`] are available.
pub async fn with_state<'a, C>(
&mut self,
view: &'a mut SystemExecutionStateView<C>,
) -> Result<ResourceController<Sources<'a>, &mut ResourceTracker>, ViewError>
where
C: Context + Clone + 'static,
{
self.with_state_and_grant(view, None).await
}
/// Provides a reference to the current execution state as well as an optional grant,
/// and obtains a temporary object where the accounting functions of
/// [`ResourceController`] are available.
pub async fn with_state_and_grant<'a, C>(
&mut self,
view: &'a mut SystemExecutionStateView<C>,
grant: Option<&'a mut Amount>,
) -> Result<ResourceController<Sources<'a>, &mut ResourceTracker>, ViewError>
where
C: Context + Clone + 'static,
{
let mut sources = Vec::new();
// First, use the grant (e.g. for messages) and otherwise use the chain account
// (e.g. for blocks and operations).
if let Some(grant) = grant {
sources.push(grant);
} else {
sources.push(view.balance.get_mut());
}
// Then the local account, if any. Currently, any negative fee (e.g. storage
// refund) goes preferably to this account.
if let Some(owner) = &self.account {
if let Some(balance) = view.balances.get_mut(owner).await? {
sources.push(balance);
}
}
Ok(ResourceController {
policy: self.policy.clone(),
tracker: &mut self.tracker,
account: Sources { sources },
})
}
}
// The simplest `BalanceHolder` is an `Amount`.
impl BalanceHolder for Amount {
fn balance(&self) -> Result<Amount, ArithmeticError> {
Ok(*self)
}
fn try_add_assign(&mut self, other: Amount) -> Result<(), ArithmeticError> {
self.try_add_assign(other)
}
fn try_sub_assign(&mut self, other: Amount) -> Result<(), ArithmeticError> {
self.try_sub_assign(other)
}
}
// This is also needed for the default instantiation `ResourceController<Amount, ResourceTracker>`.
// See https://doc.rust-lang.org/std/convert/trait.AsMut.html#reflexivity for general context.
impl AsMut<ResourceTracker> for ResourceTracker {
fn as_mut(&mut self) -> &mut Self {
self
}
}
impl AsRef<ResourceTracker> for ResourceTracker {
fn as_ref(&self) -> &Self {
self
}
}
/// A temporary object holding a number of references to funding sources.
pub struct Sources<'a> {
sources: Vec<&'a mut Amount>,
}
impl BalanceHolder for Sources<'_> {
fn balance(&self) -> Result<Amount, ArithmeticError> {
let mut amount = Amount::ZERO;
for source in &self.sources {
amount.try_add_assign(**source)?;
}
Ok(amount)
}
fn try_add_assign(&mut self, other: Amount) -> Result<(), ArithmeticError> {
// Try to credit the owner account first.
// TODO(#1648): This may need some additional design work.
let source = self.sources.last_mut().expect("at least one source");
source.try_add_assign(other)
}
fn try_sub_assign(&mut self, mut other: Amount) -> Result<(), ArithmeticError> {
for source in &mut self.sources {
if source.try_sub_assign(other).is_ok() {
return Ok(());
}
other.try_sub_assign(**source).expect("*source < other");
**source = Amount::ZERO;
}
if other > Amount::ZERO {
Err(ArithmeticError::Underflow)
} else {
Ok(())
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/system.rs | linera-execution/src/system.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
#[cfg(test)]
#[path = "./unit_tests/system_tests.rs"]
mod tests;
use std::collections::{BTreeMap, BTreeSet, HashSet};
use allocative::Allocative;
use custom_debug_derive::Debug;
use linera_base::{
crypto::CryptoHash,
data_types::{
Amount, ApplicationPermissions, ArithmeticError, Blob, BlobContent, BlockHeight,
ChainDescription, ChainOrigin, Epoch, InitialChainConfig, OracleResponse, Timestamp,
},
ensure, hex_debug,
identifiers::{Account, AccountOwner, BlobId, BlobType, ChainId, EventId, ModuleId, StreamId},
ownership::{ChainOwnership, TimeoutConfig},
};
use linera_views::{
context::Context,
map_view::MapView,
register_view::RegisterView,
set_view::SetView,
views::{ClonableView, ReplaceContext, View},
};
use serde::{Deserialize, Serialize};
#[cfg(test)]
use crate::test_utils::SystemExecutionState;
use crate::{
committee::Committee, util::OracleResponseExt as _, ApplicationDescription, ApplicationId,
ExecutionError, ExecutionRuntimeContext, MessageContext, MessageKind, OperationContext,
OutgoingMessage, QueryContext, QueryOutcome, ResourceController, TransactionTracker,
};
/// The event stream name for new epochs and committees.
pub static EPOCH_STREAM_NAME: &[u8] = &[0];
/// The event stream name for removed epochs.
pub static REMOVED_EPOCH_STREAM_NAME: &[u8] = &[1];
/// The data stored in an epoch creation event.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EpochEventData {
/// The hash of the committee blob for this epoch.
pub blob_hash: CryptoHash,
/// The timestamp when the epoch was created on the admin chain.
pub timestamp: Timestamp,
}
/// The number of times the [`SystemOperation::OpenChain`] was executed.
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::register_int_counter_vec;
use prometheus::IntCounterVec;
pub static OPEN_CHAIN_COUNT: LazyLock<IntCounterVec> = LazyLock::new(|| {
register_int_counter_vec(
"open_chain_count",
"The number of times the `OpenChain` operation was executed",
&[],
)
});
}
/// A view accessing the execution state of the system of a chain.
#[derive(Debug, ClonableView, View, Allocative)]
#[allocative(bound = "C")]
pub struct SystemExecutionStateView<C> {
/// How the chain was created. May be unknown for inactive chains.
pub description: RegisterView<C, Option<ChainDescription>>,
/// The number identifying the current configuration.
pub epoch: RegisterView<C, Epoch>,
/// The admin of the chain.
pub admin_id: RegisterView<C, Option<ChainId>>,
/// The committees that we trust, indexed by epoch number.
// Not using a `MapView` because the set active of committees is supposed to be
// small. Plus, currently, we would create the `BTreeMap` anyway in various places
// (e.g. the `OpenChain` operation).
pub committees: RegisterView<C, BTreeMap<Epoch, Committee>>,
/// Ownership of the chain.
pub ownership: RegisterView<C, ChainOwnership>,
/// Balance of the chain. (Available to any user able to create blocks in the chain.)
pub balance: RegisterView<C, Amount>,
/// Balances attributed to a given owner.
pub balances: MapView<C, AccountOwner, Amount>,
/// The timestamp of the most recent block.
pub timestamp: RegisterView<C, Timestamp>,
/// Whether this chain has been closed.
pub closed: RegisterView<C, bool>,
/// Permissions for applications on this chain.
pub application_permissions: RegisterView<C, ApplicationPermissions>,
/// Blobs that have been used or published on this chain.
pub used_blobs: SetView<C, BlobId>,
/// The event stream subscriptions of applications on this chain.
pub event_subscriptions: MapView<C, (ChainId, StreamId), EventSubscriptions>,
/// The number of events in the streams that this chain is writing to.
pub stream_event_counts: MapView<C, StreamId, u32>,
}
impl<C: Context, C2: Context> ReplaceContext<C2> for SystemExecutionStateView<C> {
type Target = SystemExecutionStateView<C2>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
SystemExecutionStateView {
description: self.description.with_context(ctx.clone()).await,
epoch: self.epoch.with_context(ctx.clone()).await,
admin_id: self.admin_id.with_context(ctx.clone()).await,
committees: self.committees.with_context(ctx.clone()).await,
ownership: self.ownership.with_context(ctx.clone()).await,
balance: self.balance.with_context(ctx.clone()).await,
balances: self.balances.with_context(ctx.clone()).await,
timestamp: self.timestamp.with_context(ctx.clone()).await,
closed: self.closed.with_context(ctx.clone()).await,
application_permissions: self.application_permissions.with_context(ctx.clone()).await,
used_blobs: self.used_blobs.with_context(ctx.clone()).await,
event_subscriptions: self.event_subscriptions.with_context(ctx.clone()).await,
stream_event_counts: self.stream_event_counts.with_context(ctx.clone()).await,
}
}
}
/// The applications subscribing to a particular stream, and the next event index.
#[derive(Debug, Default, Clone, Serialize, Deserialize, Allocative)]
pub struct EventSubscriptions {
/// The next event index, i.e. the total number of events in this stream that have already
/// been processed by this chain.
pub next_index: u32,
/// The applications that are subscribed to this stream.
pub applications: BTreeSet<ApplicationId>,
}
/// The initial configuration for a new chain.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Allocative)]
pub struct OpenChainConfig {
/// The ownership configuration of the new chain.
pub ownership: ChainOwnership,
/// The initial chain balance.
pub balance: Amount,
/// The initial application permissions.
pub application_permissions: ApplicationPermissions,
}
impl OpenChainConfig {
/// Creates an [`InitialChainConfig`] based on this [`OpenChainConfig`] and additional
/// parameters.
pub fn init_chain_config(
&self,
epoch: Epoch,
min_active_epoch: Epoch,
max_active_epoch: Epoch,
) -> InitialChainConfig {
InitialChainConfig {
application_permissions: self.application_permissions.clone(),
balance: self.balance,
epoch,
min_active_epoch,
max_active_epoch,
ownership: self.ownership.clone(),
}
}
}
/// A system operation.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Allocative)]
pub enum SystemOperation {
/// Transfers `amount` units of value from the given owner's account to the recipient.
/// If no owner is given, try to take the units out of the unattributed account.
Transfer {
owner: AccountOwner,
recipient: Account,
amount: Amount,
},
/// Claims `amount` units of value from the given owner's account in the remote
/// `target` chain. Depending on its configuration, the `target` chain may refuse to
/// process the message.
Claim {
owner: AccountOwner,
target_id: ChainId,
recipient: Account,
amount: Amount,
},
/// Creates (or activates) a new chain.
/// This will automatically subscribe to the future committees created by `admin_id`.
OpenChain(OpenChainConfig),
/// Closes the chain.
CloseChain,
/// Changes the ownership of the chain.
ChangeOwnership {
/// Super owners can propose fast blocks in the first round, and regular blocks in any round.
#[debug(skip_if = Vec::is_empty)]
super_owners: Vec<AccountOwner>,
/// The regular owners, with their weights that determine how often they are round leader.
#[debug(skip_if = Vec::is_empty)]
owners: Vec<(AccountOwner, u64)>,
/// The leader of the first single-leader round. If not set, this is random like other rounds.
#[debug(skip_if = Option::is_none)]
first_leader: Option<AccountOwner>,
/// The number of initial rounds after 0 in which all owners are allowed to propose blocks.
multi_leader_rounds: u32,
/// Whether the multi-leader rounds are unrestricted, i.e. not limited to chain owners.
/// This should only be `true` on chains with restrictive application permissions and an
/// application-based mechanism to select block proposers.
open_multi_leader_rounds: bool,
/// The timeout configuration: how long fast, multi-leader and single-leader rounds last.
timeout_config: TimeoutConfig,
},
/// Changes the application permissions configuration on this chain.
ChangeApplicationPermissions(ApplicationPermissions),
/// Publishes a new application module.
PublishModule { module_id: ModuleId },
/// Publishes a new data blob.
PublishDataBlob { blob_hash: CryptoHash },
/// Verifies that the given blob exists. Otherwise the block fails.
VerifyBlob { blob_id: BlobId },
/// Creates a new application.
CreateApplication {
module_id: ModuleId,
#[serde(with = "serde_bytes")]
#[debug(with = "hex_debug")]
parameters: Vec<u8>,
#[serde(with = "serde_bytes")]
#[debug(with = "hex_debug", skip_if = Vec::is_empty)]
instantiation_argument: Vec<u8>,
#[debug(skip_if = Vec::is_empty)]
required_application_ids: Vec<ApplicationId>,
},
/// Operations that are only allowed on the admin chain.
Admin(AdminOperation),
/// Processes an event about a new epoch and committee.
ProcessNewEpoch(Epoch),
/// Processes an event about a removed epoch and committee.
ProcessRemovedEpoch(Epoch),
/// Updates the event stream trackers.
UpdateStreams(Vec<(ChainId, StreamId, u32)>),
}
/// Operations that are only allowed on the admin chain.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Allocative)]
pub enum AdminOperation {
/// Publishes a new committee as a blob. This can be assigned to an epoch using
/// [`AdminOperation::CreateCommittee`] in a later block.
PublishCommitteeBlob { blob_hash: CryptoHash },
/// Registers a new committee. Other chains can then migrate to the new epoch by executing
/// [`SystemOperation::ProcessNewEpoch`].
CreateCommittee { epoch: Epoch, blob_hash: CryptoHash },
/// Removes a committee. Other chains should execute [`SystemOperation::ProcessRemovedEpoch`],
/// so that blocks from the retired epoch will not be accepted until they are followed (hence
/// re-certified) by a block certified by a recent committee.
RemoveCommittee { epoch: Epoch },
}
/// A system message meant to be executed on a remote chain.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Allocative)]
pub enum SystemMessage {
/// Credits `amount` units of value to the account `target` -- unless the message is
/// bouncing, in which case `source` is credited instead.
Credit {
target: AccountOwner,
amount: Amount,
source: AccountOwner,
},
/// Withdraws `amount` units of value from the account and starts a transfer to credit
/// the recipient. The message must be properly authenticated. Receiver chains may
/// refuse it depending on their configuration.
Withdraw {
owner: AccountOwner,
amount: Amount,
recipient: Account,
},
}
/// A query to the system state.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
pub struct SystemQuery;
/// The response to a system query.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
pub struct SystemResponse {
pub chain_id: ChainId,
pub balance: Amount,
}
/// Optional user message attached to a transfer.
#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Hash, Default, Debug, Serialize, Deserialize)]
pub struct UserData(pub Option<[u8; 32]>);
impl UserData {
pub fn from_option_string(opt_str: Option<String>) -> Result<Self, usize> {
// Convert the Option<String> to Option<[u8; 32]>
let option_array = match opt_str {
Some(s) => {
// Convert the String to a Vec<u8>
let vec = s.into_bytes();
if vec.len() <= 32 {
// Create an array from the Vec<u8>
let mut array = [b' '; 32];
// Copy bytes from the vector into the array
let len = vec.len().min(32);
array[..len].copy_from_slice(&vec[..len]);
Some(array)
} else {
return Err(vec.len());
}
}
None => None,
};
// Return the UserData with the converted Option<[u8; 32]>
Ok(UserData(option_array))
}
}
#[derive(Debug)]
pub struct CreateApplicationResult {
pub app_id: ApplicationId,
}
impl<C> SystemExecutionStateView<C>
where
C: Context + Clone + 'static,
C::Extra: ExecutionRuntimeContext,
{
/// Invariant for the states of active chains.
pub fn is_active(&self) -> bool {
self.description.get().is_some()
&& self.ownership.get().is_active()
&& self.current_committee().is_some()
&& self.admin_id.get().is_some()
}
/// Returns the current committee, if any.
pub fn current_committee(&self) -> Option<(Epoch, &Committee)> {
let epoch = self.epoch.get();
let committee = self.committees.get().get(epoch)?;
Some((*epoch, committee))
}
async fn get_event(&self, event_id: EventId) -> Result<Vec<u8>, ExecutionError> {
match self.context().extra().get_event(event_id.clone()).await? {
None => Err(ExecutionError::EventsNotFound(vec![event_id])),
Some(vec) => Ok(vec),
}
}
/// Executes the sender's side of an operation and returns a list of actions to be
/// taken.
pub async fn execute_operation(
&mut self,
context: OperationContext,
operation: SystemOperation,
txn_tracker: &mut TransactionTracker,
resource_controller: &mut ResourceController<Option<AccountOwner>>,
) -> Result<Option<(ApplicationId, Vec<u8>)>, ExecutionError> {
use SystemOperation::*;
let mut new_application = None;
match operation {
OpenChain(config) => {
let _chain_id = self
.open_chain(
config,
context.chain_id,
context.height,
context.timestamp,
txn_tracker,
)
.await?;
#[cfg(with_metrics)]
metrics::OPEN_CHAIN_COUNT.with_label_values(&[]).inc();
}
ChangeOwnership {
super_owners,
owners,
first_leader,
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config,
} => {
self.ownership.set(ChainOwnership {
super_owners: super_owners.into_iter().collect(),
owners: owners.into_iter().collect(),
first_leader,
multi_leader_rounds,
open_multi_leader_rounds,
timeout_config,
});
}
ChangeApplicationPermissions(application_permissions) => {
self.application_permissions.set(application_permissions);
}
CloseChain => self.close_chain(),
Transfer {
owner,
amount,
recipient,
} => {
let maybe_message = self
.transfer(context.authenticated_owner, None, owner, recipient, amount)
.await?;
txn_tracker.add_outgoing_messages(maybe_message);
}
Claim {
owner,
target_id,
recipient,
amount,
} => {
let maybe_message = self
.claim(
context.authenticated_owner,
None,
owner,
target_id,
recipient,
amount,
)
.await?;
txn_tracker.add_outgoing_messages(maybe_message);
}
Admin(admin_operation) => {
ensure!(
*self.admin_id.get() == Some(context.chain_id),
ExecutionError::AdminOperationOnNonAdminChain
);
match admin_operation {
AdminOperation::PublishCommitteeBlob { blob_hash } => {
self.blob_published(
&BlobId::new(blob_hash, BlobType::Committee),
txn_tracker,
)?;
}
AdminOperation::CreateCommittee { epoch, blob_hash } => {
self.check_next_epoch(epoch)?;
let blob_id = BlobId::new(blob_hash, BlobType::Committee);
let committee =
bcs::from_bytes(self.read_blob_content(blob_id).await?.bytes())?;
self.blob_used(txn_tracker, blob_id).await?;
self.committees.get_mut().insert(epoch, committee);
self.epoch.set(epoch);
let event_data = EpochEventData {
blob_hash,
timestamp: context.timestamp,
};
txn_tracker.add_event(
StreamId::system(EPOCH_STREAM_NAME),
epoch.0,
bcs::to_bytes(&event_data)?,
);
}
AdminOperation::RemoveCommittee { epoch } => {
ensure!(
self.committees.get_mut().remove(&epoch).is_some(),
ExecutionError::InvalidCommitteeRemoval
);
txn_tracker.add_event(
StreamId::system(REMOVED_EPOCH_STREAM_NAME),
epoch.0,
vec![],
);
}
}
}
PublishModule { module_id } => {
for blob_id in module_id.bytecode_blob_ids() {
self.blob_published(&blob_id, txn_tracker)?;
}
}
CreateApplication {
module_id,
parameters,
instantiation_argument,
required_application_ids,
} => {
let CreateApplicationResult { app_id } = self
.create_application(
context.chain_id,
context.height,
module_id,
parameters,
required_application_ids,
txn_tracker,
)
.await?;
new_application = Some((app_id, instantiation_argument));
}
PublishDataBlob { blob_hash } => {
self.blob_published(&BlobId::new(blob_hash, BlobType::Data), txn_tracker)?;
}
VerifyBlob { blob_id } => {
self.assert_blob_exists(blob_id).await?;
resource_controller
.with_state(self)
.await?
.track_blob_read(0)?;
self.blob_used(txn_tracker, blob_id).await?;
}
ProcessNewEpoch(epoch) => {
self.check_next_epoch(epoch)?;
let admin_id = self
.admin_id
.get()
.ok_or_else(|| ExecutionError::InactiveChain(context.chain_id))?;
let event_id = EventId {
chain_id: admin_id,
stream_id: StreamId::system(EPOCH_STREAM_NAME),
index: epoch.0,
};
let bytes = txn_tracker
.oracle(|| async {
let bytes = self.get_event(event_id.clone()).await?;
Ok(OracleResponse::Event(event_id.clone(), bytes))
})
.await?
.to_event(&event_id)?;
let event_data: EpochEventData = bcs::from_bytes(&bytes)?;
let blob_id = BlobId::new(event_data.blob_hash, BlobType::Committee);
let committee = bcs::from_bytes(self.read_blob_content(blob_id).await?.bytes())?;
self.blob_used(txn_tracker, blob_id).await?;
self.committees.get_mut().insert(epoch, committee);
self.epoch.set(epoch);
}
ProcessRemovedEpoch(epoch) => {
ensure!(
self.committees.get_mut().remove(&epoch).is_some(),
ExecutionError::InvalidCommitteeRemoval
);
let admin_id = self
.admin_id
.get()
.ok_or_else(|| ExecutionError::InactiveChain(context.chain_id))?;
let event_id = EventId {
chain_id: admin_id,
stream_id: StreamId::system(REMOVED_EPOCH_STREAM_NAME),
index: epoch.0,
};
txn_tracker
.oracle(|| async {
let bytes = self.get_event(event_id.clone()).await?;
Ok(OracleResponse::Event(event_id, bytes))
})
.await?;
}
UpdateStreams(streams) => {
let mut missing_events = Vec::new();
for (chain_id, stream_id, next_index) in streams {
let subscriptions = self
.event_subscriptions
.get_mut_or_default(&(chain_id, stream_id.clone()))
.await?;
ensure!(
subscriptions.next_index < next_index,
ExecutionError::OutdatedUpdateStreams
);
for application_id in &subscriptions.applications {
txn_tracker.add_stream_to_process(
*application_id,
chain_id,
stream_id.clone(),
subscriptions.next_index,
next_index,
);
}
subscriptions.next_index = next_index;
let index = next_index
.checked_sub(1)
.ok_or(ArithmeticError::Underflow)?;
let event_id = EventId {
chain_id,
stream_id,
index,
};
let context = self.context();
let extra = context.extra();
txn_tracker
.oracle(|| async {
if !extra.contains_event(event_id.clone()).await? {
missing_events.push(event_id.clone());
}
Ok(OracleResponse::EventExists(event_id))
})
.await?;
}
ensure!(
missing_events.is_empty(),
ExecutionError::EventsNotFound(missing_events)
);
}
}
Ok(new_application)
}
/// Returns an error if the `provided` epoch is not exactly one higher than the chain's current
/// epoch.
fn check_next_epoch(&self, provided: Epoch) -> Result<(), ExecutionError> {
let expected = self.epoch.get().try_add_one()?;
ensure!(
provided == expected,
ExecutionError::InvalidCommitteeEpoch { provided, expected }
);
Ok(())
}
async fn credit(&mut self, owner: &AccountOwner, amount: Amount) -> Result<(), ExecutionError> {
if owner == &AccountOwner::CHAIN {
let new_balance = self.balance.get().saturating_add(amount);
self.balance.set(new_balance);
} else {
let balance = self.balances.get_mut_or_default(owner).await?;
*balance = balance.saturating_add(amount);
}
Ok(())
}
async fn credit_or_send_message(
&mut self,
source: AccountOwner,
recipient: Account,
amount: Amount,
) -> Result<Option<OutgoingMessage>, ExecutionError> {
let source_chain_id = self.context().extra().chain_id();
if recipient.chain_id == source_chain_id {
// Handle same-chain transfer locally.
let target = recipient.owner;
self.credit(&target, amount).await?;
Ok(None)
} else {
// Handle cross-chain transfer with message.
let message = SystemMessage::Credit {
amount,
source,
target: recipient.owner,
};
Ok(Some(
OutgoingMessage::new(recipient.chain_id, message).with_kind(MessageKind::Tracked),
))
}
}
pub async fn transfer(
&mut self,
authenticated_owner: Option<AccountOwner>,
authenticated_application_id: Option<ApplicationId>,
source: AccountOwner,
recipient: Account,
amount: Amount,
) -> Result<Option<OutgoingMessage>, ExecutionError> {
if source == AccountOwner::CHAIN {
ensure!(
authenticated_owner.is_some()
&& self.ownership.get().is_owner(&authenticated_owner.unwrap()),
ExecutionError::UnauthenticatedTransferOwner
);
} else {
ensure!(
authenticated_owner == Some(source)
|| authenticated_application_id.map(AccountOwner::from) == Some(source),
ExecutionError::UnauthenticatedTransferOwner
);
}
ensure!(
amount > Amount::ZERO,
ExecutionError::IncorrectTransferAmount
);
self.debit(&source, amount).await?;
self.credit_or_send_message(source, recipient, amount).await
}
pub async fn claim(
&mut self,
authenticated_owner: Option<AccountOwner>,
authenticated_application_id: Option<ApplicationId>,
source: AccountOwner,
target_id: ChainId,
recipient: Account,
amount: Amount,
) -> Result<Option<OutgoingMessage>, ExecutionError> {
ensure!(
authenticated_owner == Some(source)
|| authenticated_application_id.map(AccountOwner::from) == Some(source),
ExecutionError::UnauthenticatedClaimOwner
);
ensure!(amount > Amount::ZERO, ExecutionError::IncorrectClaimAmount);
let current_chain_id = self.context().extra().chain_id();
if target_id == current_chain_id {
// Handle same-chain claim locally by processing the withdraw operation directly
self.debit(&source, amount).await?;
self.credit_or_send_message(source, recipient, amount).await
} else {
// Handle cross-chain claim with Withdraw message
let message = SystemMessage::Withdraw {
amount,
owner: source,
recipient,
};
Ok(Some(
OutgoingMessage::new(target_id, message)
.with_authenticated_owner(authenticated_owner),
))
}
}
/// Debits an [`Amount`] of tokens from an account's balance.
async fn debit(
&mut self,
account: &AccountOwner,
amount: Amount,
) -> Result<(), ExecutionError> {
let balance = if account == &AccountOwner::CHAIN {
self.balance.get_mut()
} else {
self.balances.get_mut(account).await?.ok_or_else(|| {
ExecutionError::InsufficientBalance {
balance: Amount::ZERO,
account: *account,
}
})?
};
balance
.try_sub_assign(amount)
.map_err(|_| ExecutionError::InsufficientBalance {
balance: *balance,
account: *account,
})?;
if account != &AccountOwner::CHAIN && balance.is_zero() {
self.balances.remove(account)?;
}
Ok(())
}
/// Executes a cross-chain message that represents the recipient's side of an operation.
pub async fn execute_message(
&mut self,
context: MessageContext,
message: SystemMessage,
) -> Result<Vec<OutgoingMessage>, ExecutionError> {
let mut outcome = Vec::new();
use SystemMessage::*;
match message {
Credit {
amount,
source,
target,
} => {
let receiver = if context.is_bouncing { source } else { target };
self.credit(&receiver, amount).await?;
}
Withdraw {
amount,
owner,
recipient,
} => {
self.debit(&owner, amount).await?;
if let Some(message) = self
.credit_or_send_message(owner, recipient, amount)
.await?
{
outcome.push(message);
}
}
}
Ok(outcome)
}
/// Initializes the system application state on a newly opened chain.
/// Returns `Ok(true)` if the chain was already initialized, `Ok(false)` if it wasn't.
pub async fn initialize_chain(&mut self, chain_id: ChainId) -> Result<bool, ExecutionError> {
if self.description.get().is_some() {
// already initialized
return Ok(true);
}
let description_blob = self
.read_blob_content(BlobId::new(chain_id.0, BlobType::ChainDescription))
.await?;
let description: ChainDescription = bcs::from_bytes(description_blob.bytes())?;
let InitialChainConfig {
ownership,
epoch,
balance,
min_active_epoch,
max_active_epoch,
application_permissions,
} = description.config().clone();
self.timestamp.set(description.timestamp());
self.description.set(Some(description));
self.epoch.set(epoch);
let committees = self
.context()
.extra()
.get_committees(min_active_epoch..=max_active_epoch)
.await?;
let admin_chain_id = self
.context()
.extra()
.get_network_description()
.await?
.ok_or(ExecutionError::NoNetworkDescriptionFound)?
.admin_chain_id;
self.committees.set(committees);
self.admin_id.set(Some(admin_chain_id));
self.ownership.set(ownership);
self.balance.set(balance);
self.application_permissions.set(application_permissions);
Ok(false)
}
pub fn handle_query(
&mut self,
context: QueryContext,
_query: SystemQuery,
) -> QueryOutcome<SystemResponse> {
let response = SystemResponse {
chain_id: context.chain_id,
balance: *self.balance.get(),
};
QueryOutcome {
response,
operations: vec![],
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/runtime.rs | linera-execution/src/runtime.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::{hash_map, BTreeMap, HashMap, HashSet},
mem,
ops::{Deref, DerefMut},
sync::{Arc, Mutex},
};
use custom_debug_derive::Debug;
use linera_base::{
data_types::{
Amount, ApplicationPermissions, ArithmeticError, Blob, BlockHeight, Bytecode,
SendMessageRequest, Timestamp,
},
ensure, http,
identifiers::{
Account, AccountOwner, ChainId, EventId, GenericApplicationId, StreamId, StreamName,
},
ownership::ChainOwnership,
time::Instant,
vm::VmRuntime,
};
use linera_views::batch::Batch;
use oneshot::Receiver;
use crate::{
execution::UserAction,
execution_state_actor::{ExecutionRequest, ExecutionStateSender},
resources::ResourceController,
system::CreateApplicationResult,
util::{ReceiverExt, UnboundedSenderExt},
ApplicationDescription, ApplicationId, BaseRuntime, ContractRuntime, DataBlobHash,
ExecutionError, FinalizeContext, Message, MessageContext, MessageKind, ModuleId, Operation,
OutgoingMessage, QueryContext, QueryOutcome, ServiceRuntime, UserContractCode,
UserContractInstance, UserServiceCode, UserServiceInstance, MAX_STREAM_NAME_LEN,
};
#[cfg(test)]
#[path = "unit_tests/runtime_tests.rs"]
mod tests;
pub trait WithContext {
type UserContext;
type Code;
}
impl WithContext for UserContractInstance {
type UserContext = Timestamp;
type Code = UserContractCode;
}
impl WithContext for UserServiceInstance {
type UserContext = ();
type Code = UserServiceCode;
}
#[cfg(test)]
impl WithContext for Arc<dyn std::any::Any + Send + Sync> {
type UserContext = ();
type Code = ();
}
#[derive(Debug)]
pub struct SyncRuntime<UserInstance: WithContext>(Option<SyncRuntimeHandle<UserInstance>>);
pub type ContractSyncRuntime = SyncRuntime<UserContractInstance>;
pub struct ServiceSyncRuntime {
runtime: SyncRuntime<UserServiceInstance>,
current_context: QueryContext,
}
#[derive(Debug)]
pub struct SyncRuntimeHandle<UserInstance: WithContext>(
Arc<Mutex<SyncRuntimeInternal<UserInstance>>>,
);
pub type ContractSyncRuntimeHandle = SyncRuntimeHandle<UserContractInstance>;
pub type ServiceSyncRuntimeHandle = SyncRuntimeHandle<UserServiceInstance>;
/// Runtime data tracked during the execution of a transaction on the synchronous thread.
#[derive(Debug)]
pub struct SyncRuntimeInternal<UserInstance: WithContext> {
/// The current chain ID.
chain_id: ChainId,
/// The height of the next block that will be added to this chain. During operations
/// and messages, this is the current block height.
height: BlockHeight,
/// The current consensus round. Only available during block validation in multi-leader rounds.
round: Option<u32>,
/// The current message being executed, if there is one.
#[debug(skip_if = Option::is_none)]
executing_message: Option<ExecutingMessage>,
/// How to interact with the storage view of the execution state.
execution_state_sender: ExecutionStateSender,
/// If applications are being finalized.
///
/// If [`true`], disables cross-application calls.
is_finalizing: bool,
/// Applications that need to be finalized.
applications_to_finalize: Vec<ApplicationId>,
/// Application instances loaded in this transaction.
preloaded_applications: HashMap<ApplicationId, (UserInstance::Code, ApplicationDescription)>,
/// Application instances loaded in this transaction.
loaded_applications: HashMap<ApplicationId, LoadedApplication<UserInstance>>,
/// The current stack of application descriptions.
call_stack: Vec<ApplicationStatus>,
/// The set of the IDs of the applications that are in the `call_stack`.
active_applications: HashSet<ApplicationId>,
/// The operations scheduled during this query.
scheduled_operations: Vec<Operation>,
/// Track application states based on views.
view_user_states: BTreeMap<ApplicationId, ViewUserState>,
/// The deadline this runtime should finish executing.
///
/// Used to limit the execution time of services running as oracles.
deadline: Option<Instant>,
/// Where to send a refund for the unused part of the grant after execution, if any.
#[debug(skip_if = Option::is_none)]
refund_grant_to: Option<Account>,
/// Controller to track fuel and storage consumption.
resource_controller: ResourceController,
/// Additional context for the runtime.
user_context: UserInstance::UserContext,
/// Whether contract log messages should be output.
allow_application_logs: bool,
}
/// The runtime status of an application.
#[derive(Debug)]
struct ApplicationStatus {
/// The caller application ID, if forwarded during the call.
caller_id: Option<ApplicationId>,
/// The application ID.
id: ApplicationId,
/// The application description.
description: ApplicationDescription,
/// The authenticated owner for the execution thread, if any.
signer: Option<AccountOwner>,
}
/// A loaded application instance.
#[derive(Debug)]
struct LoadedApplication<Instance> {
instance: Arc<Mutex<Instance>>,
description: ApplicationDescription,
}
impl<Instance> LoadedApplication<Instance> {
/// Creates a new [`LoadedApplication`] entry from the `instance` and its `description`.
fn new(instance: Instance, description: ApplicationDescription) -> Self {
LoadedApplication {
instance: Arc::new(Mutex::new(instance)),
description,
}
}
}
impl<Instance> Clone for LoadedApplication<Instance> {
// Manual implementation is needed to prevent the derive macro from adding an `Instance: Clone`
// bound
fn clone(&self) -> Self {
LoadedApplication {
instance: self.instance.clone(),
description: self.description.clone(),
}
}
}
#[derive(Debug)]
enum Promise<T> {
Ready(T),
Pending(Receiver<T>),
}
impl<T> Promise<T> {
fn force(&mut self) -> Result<(), ExecutionError> {
if let Promise::Pending(receiver) = self {
let value = receiver
.recv_ref()
.map_err(|oneshot::RecvError| ExecutionError::MissingRuntimeResponse)?;
*self = Promise::Ready(value);
}
Ok(())
}
fn read(self) -> Result<T, ExecutionError> {
match self {
Promise::Pending(receiver) => {
let value = receiver.recv_response()?;
Ok(value)
}
Promise::Ready(value) => Ok(value),
}
}
}
/// Manages a set of pending queries returning values of type `T`.
#[derive(Debug, Default)]
struct QueryManager<T> {
/// The queries in progress.
pending_queries: BTreeMap<u32, Promise<T>>,
/// The number of queries ever registered so far. Used for the index of the next query.
query_count: u32,
/// The number of active queries.
active_query_count: u32,
}
impl<T> QueryManager<T> {
fn register(&mut self, receiver: Receiver<T>) -> Result<u32, ExecutionError> {
let id = self.query_count;
self.pending_queries.insert(id, Promise::Pending(receiver));
self.query_count = self
.query_count
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
self.active_query_count = self
.active_query_count
.checked_add(1)
.ok_or(ArithmeticError::Overflow)?;
Ok(id)
}
fn wait(&mut self, id: u32) -> Result<T, ExecutionError> {
let promise = self
.pending_queries
.remove(&id)
.ok_or(ExecutionError::InvalidPromise)?;
let value = promise.read()?;
self.active_query_count -= 1;
Ok(value)
}
fn force_all(&mut self) -> Result<(), ExecutionError> {
for promise in self.pending_queries.values_mut() {
promise.force()?;
}
Ok(())
}
}
type Keys = Vec<Vec<u8>>;
type Value = Vec<u8>;
type KeyValues = Vec<(Vec<u8>, Vec<u8>)>;
#[derive(Debug, Default)]
struct ViewUserState {
/// The contains-key queries in progress.
contains_key_queries: QueryManager<bool>,
/// The contains-keys queries in progress.
contains_keys_queries: QueryManager<Vec<bool>>,
/// The read-value queries in progress.
read_value_queries: QueryManager<Option<Value>>,
/// The read-multi-values queries in progress.
read_multi_values_queries: QueryManager<Vec<Option<Value>>>,
/// The find-keys queries in progress.
find_keys_queries: QueryManager<Keys>,
/// The find-key-values queries in progress.
find_key_values_queries: QueryManager<KeyValues>,
}
impl ViewUserState {
fn force_all_pending_queries(&mut self) -> Result<(), ExecutionError> {
self.contains_key_queries.force_all()?;
self.contains_keys_queries.force_all()?;
self.read_value_queries.force_all()?;
self.read_multi_values_queries.force_all()?;
self.find_keys_queries.force_all()?;
self.find_key_values_queries.force_all()?;
Ok(())
}
}
impl<UserInstance: WithContext> Deref for SyncRuntime<UserInstance> {
type Target = SyncRuntimeHandle<UserInstance>;
fn deref(&self) -> &Self::Target {
self.0.as_ref().expect(
"`SyncRuntime` should not be used after its `inner` contents have been moved out",
)
}
}
impl<UserInstance: WithContext> DerefMut for SyncRuntime<UserInstance> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.0.as_mut().expect(
"`SyncRuntime` should not be used after its `inner` contents have been moved out",
)
}
}
impl<UserInstance: WithContext> Drop for SyncRuntime<UserInstance> {
fn drop(&mut self) {
// Ensure the `loaded_applications` are cleared to prevent circular references in
// the runtime
if let Some(handle) = self.0.take() {
handle.inner().loaded_applications.clear();
}
}
}
impl<UserInstance: WithContext> SyncRuntimeInternal<UserInstance> {
#[expect(clippy::too_many_arguments)]
fn new(
chain_id: ChainId,
height: BlockHeight,
round: Option<u32>,
executing_message: Option<ExecutingMessage>,
execution_state_sender: ExecutionStateSender,
deadline: Option<Instant>,
refund_grant_to: Option<Account>,
resource_controller: ResourceController,
user_context: UserInstance::UserContext,
allow_application_logs: bool,
) -> Self {
Self {
chain_id,
height,
round,
executing_message,
execution_state_sender,
is_finalizing: false,
applications_to_finalize: Vec::new(),
preloaded_applications: HashMap::new(),
loaded_applications: HashMap::new(),
call_stack: Vec::new(),
active_applications: HashSet::new(),
view_user_states: BTreeMap::new(),
deadline,
refund_grant_to,
resource_controller,
scheduled_operations: Vec::new(),
user_context,
allow_application_logs,
}
}
/// Returns the [`ApplicationStatus`] of the current application.
///
/// The current application is the last to be pushed to the `call_stack`.
///
/// # Panics
///
/// If the call stack is empty.
fn current_application(&self) -> &ApplicationStatus {
self.call_stack
.last()
.expect("Call stack is unexpectedly empty")
}
/// Inserts a new [`ApplicationStatus`] to the end of the `call_stack`.
///
/// Ensures the application's ID is also tracked in the `active_applications` set.
fn push_application(&mut self, status: ApplicationStatus) {
self.active_applications.insert(status.id);
self.call_stack.push(status);
}
/// Removes the [`current_application`][`Self::current_application`] from the `call_stack`.
///
/// Ensures the application's ID is also removed from the `active_applications` set.
///
/// # Panics
///
/// If the call stack is empty.
fn pop_application(&mut self) -> ApplicationStatus {
let status = self
.call_stack
.pop()
.expect("Can't remove application from empty call stack");
assert!(self.active_applications.remove(&status.id));
status
}
/// Ensures that a call to `application_id` is not-reentrant.
///
/// Returns an error if there already is an entry for `application_id` in the call stack.
fn check_for_reentrancy(
&mut self,
application_id: ApplicationId,
) -> Result<(), ExecutionError> {
ensure!(
!self.active_applications.contains(&application_id),
ExecutionError::ReentrantCall(application_id)
);
Ok(())
}
}
impl SyncRuntimeInternal<UserContractInstance> {
/// Loads a contract instance, initializing it with this runtime if needed.
fn load_contract_instance(
&mut self,
this: SyncRuntimeHandle<UserContractInstance>,
id: ApplicationId,
) -> Result<LoadedApplication<UserContractInstance>, ExecutionError> {
match self.loaded_applications.entry(id) {
hash_map::Entry::Occupied(entry) => Ok(entry.get().clone()),
hash_map::Entry::Vacant(entry) => {
// First time actually using the application. Let's see if the code was
// pre-loaded.
let (code, description) = match self.preloaded_applications.entry(id) {
// TODO(#2927): support dynamic loading of modules on the Web
#[cfg(web)]
hash_map::Entry::Vacant(_) => {
drop(this);
return Err(ExecutionError::UnsupportedDynamicApplicationLoad(Box::new(
id,
)));
}
#[cfg(not(web))]
hash_map::Entry::Vacant(entry) => {
let (code, description) = self
.execution_state_sender
.send_request(move |callback| ExecutionRequest::LoadContract {
id,
callback,
})?
.recv_response()?;
entry.insert((code, description)).clone()
}
hash_map::Entry::Occupied(entry) => entry.get().clone(),
};
let instance = code.instantiate(this)?;
self.applications_to_finalize.push(id);
Ok(entry
.insert(LoadedApplication::new(instance, description))
.clone())
}
}
}
/// Configures the runtime for executing a call to a different contract.
fn prepare_for_call(
&mut self,
this: ContractSyncRuntimeHandle,
authenticated: bool,
callee_id: ApplicationId,
) -> Result<Arc<Mutex<UserContractInstance>>, ExecutionError> {
self.check_for_reentrancy(callee_id)?;
ensure!(
!self.is_finalizing,
ExecutionError::CrossApplicationCallInFinalize {
caller_id: Box::new(self.current_application().id),
callee_id: Box::new(callee_id),
}
);
// Load the application.
let application = self.load_contract_instance(this, callee_id)?;
let caller = self.current_application();
let caller_id = caller.id;
let caller_signer = caller.signer;
// Make the call to user code.
let authenticated_owner = match caller_signer {
Some(signer) if authenticated => Some(signer),
_ => None,
};
let authenticated_caller_id = authenticated.then_some(caller_id);
self.push_application(ApplicationStatus {
caller_id: authenticated_caller_id,
id: callee_id,
description: application.description,
// Allow further nested calls to be authenticated if this one is.
signer: authenticated_owner,
});
Ok(application.instance)
}
/// Cleans up the runtime after the execution of a call to a different contract.
fn finish_call(&mut self) {
self.pop_application();
}
/// Runs the service in a separate thread as an oracle.
fn run_service_oracle_query(
&mut self,
application_id: ApplicationId,
query: Vec<u8>,
) -> Result<Vec<u8>, ExecutionError> {
let timeout = self
.resource_controller
.remaining_service_oracle_execution_time()?;
let execution_start = Instant::now();
let deadline = Some(execution_start + timeout);
let response = self
.execution_state_sender
.send_request(|callback| ExecutionRequest::QueryServiceOracle {
deadline,
application_id,
next_block_height: self.height,
query,
callback,
})?
.recv_response()?;
self.resource_controller
.track_service_oracle_execution(execution_start.elapsed())?;
self.resource_controller
.track_service_oracle_response(response.len())?;
Ok(response)
}
}
impl SyncRuntimeInternal<UserServiceInstance> {
/// Initializes a service instance with this runtime.
fn load_service_instance(
&mut self,
this: SyncRuntimeHandle<UserServiceInstance>,
id: ApplicationId,
) -> Result<LoadedApplication<UserServiceInstance>, ExecutionError> {
match self.loaded_applications.entry(id) {
hash_map::Entry::Occupied(entry) => Ok(entry.get().clone()),
hash_map::Entry::Vacant(entry) => {
// First time actually using the application. Let's see if the code was
// pre-loaded.
let (code, description) = match self.preloaded_applications.entry(id) {
// TODO(#2927): support dynamic loading of modules on the Web
#[cfg(web)]
hash_map::Entry::Vacant(_) => {
drop(this);
return Err(ExecutionError::UnsupportedDynamicApplicationLoad(Box::new(
id,
)));
}
#[cfg(not(web))]
hash_map::Entry::Vacant(entry) => {
let (code, description) = self
.execution_state_sender
.send_request(move |callback| ExecutionRequest::LoadService {
id,
callback,
})?
.recv_response()?;
entry.insert((code, description)).clone()
}
hash_map::Entry::Occupied(entry) => entry.get().clone(),
};
let instance = code.instantiate(this)?;
self.applications_to_finalize.push(id);
Ok(entry
.insert(LoadedApplication::new(instance, description))
.clone())
}
}
}
}
impl<UserInstance: WithContext> SyncRuntime<UserInstance> {
fn into_inner(mut self) -> Option<SyncRuntimeInternal<UserInstance>> {
let handle = self.0.take().expect(
"`SyncRuntime` should not be used after its `inner` contents have been moved out",
);
let runtime = Arc::into_inner(handle.0)?
.into_inner()
.expect("`SyncRuntime` should run in a single thread which should not panic");
Some(runtime)
}
}
impl<UserInstance: WithContext> From<SyncRuntimeInternal<UserInstance>>
for SyncRuntimeHandle<UserInstance>
{
fn from(runtime: SyncRuntimeInternal<UserInstance>) -> Self {
SyncRuntimeHandle(Arc::new(Mutex::new(runtime)))
}
}
impl<UserInstance: WithContext> SyncRuntimeHandle<UserInstance> {
fn inner(&self) -> std::sync::MutexGuard<'_, SyncRuntimeInternal<UserInstance>> {
self.0
.try_lock()
.expect("Synchronous runtimes run on a single execution thread")
}
}
impl<UserInstance: WithContext> BaseRuntime for SyncRuntimeHandle<UserInstance>
where
Self: ContractOrServiceRuntime,
{
type Read = ();
type ReadValueBytes = u32;
type ContainsKey = u32;
type ContainsKeys = u32;
type ReadMultiValuesBytes = u32;
type FindKeysByPrefix = u32;
type FindKeyValuesByPrefix = u32;
fn chain_id(&mut self) -> Result<ChainId, ExecutionError> {
let mut this = self.inner();
let chain_id = this.chain_id;
this.resource_controller.track_runtime_chain_id()?;
Ok(chain_id)
}
fn block_height(&mut self) -> Result<BlockHeight, ExecutionError> {
let mut this = self.inner();
let height = this.height;
this.resource_controller.track_runtime_block_height()?;
Ok(height)
}
fn application_id(&mut self) -> Result<ApplicationId, ExecutionError> {
let mut this = self.inner();
let application_id = this.current_application().id;
this.resource_controller.track_runtime_application_id()?;
Ok(application_id)
}
fn application_creator_chain_id(&mut self) -> Result<ChainId, ExecutionError> {
let mut this = self.inner();
let application_creator_chain_id = this.current_application().description.creator_chain_id;
this.resource_controller.track_runtime_application_id()?;
Ok(application_creator_chain_id)
}
fn application_parameters(&mut self) -> Result<Vec<u8>, ExecutionError> {
let mut this = self.inner();
let parameters = this.current_application().description.parameters.clone();
this.resource_controller
.track_runtime_application_parameters(¶meters)?;
Ok(parameters)
}
fn read_system_timestamp(&mut self) -> Result<Timestamp, ExecutionError> {
let mut this = self.inner();
let timestamp = this
.execution_state_sender
.send_request(|callback| ExecutionRequest::SystemTimestamp { callback })?
.recv_response()?;
this.resource_controller.track_runtime_timestamp()?;
Ok(timestamp)
}
fn read_chain_balance(&mut self) -> Result<Amount, ExecutionError> {
let mut this = self.inner();
let balance = this
.execution_state_sender
.send_request(|callback| ExecutionRequest::ChainBalance { callback })?
.recv_response()?;
this.resource_controller.track_runtime_balance()?;
Ok(balance)
}
fn read_owner_balance(&mut self, owner: AccountOwner) -> Result<Amount, ExecutionError> {
let mut this = self.inner();
let balance = this
.execution_state_sender
.send_request(|callback| ExecutionRequest::OwnerBalance { owner, callback })?
.recv_response()?;
this.resource_controller.track_runtime_balance()?;
Ok(balance)
}
fn read_owner_balances(&mut self) -> Result<Vec<(AccountOwner, Amount)>, ExecutionError> {
let mut this = self.inner();
let owner_balances = this
.execution_state_sender
.send_request(|callback| ExecutionRequest::OwnerBalances { callback })?
.recv_response()?;
this.resource_controller
.track_runtime_owner_balances(&owner_balances)?;
Ok(owner_balances)
}
fn read_balance_owners(&mut self) -> Result<Vec<AccountOwner>, ExecutionError> {
let mut this = self.inner();
let owners = this
.execution_state_sender
.send_request(|callback| ExecutionRequest::BalanceOwners { callback })?
.recv_response()?;
this.resource_controller.track_runtime_owners(&owners)?;
Ok(owners)
}
fn chain_ownership(&mut self) -> Result<ChainOwnership, ExecutionError> {
let mut this = self.inner();
let chain_ownership = this
.execution_state_sender
.send_request(|callback| ExecutionRequest::ChainOwnership { callback })?
.recv_response()?;
this.resource_controller
.track_runtime_chain_ownership(&chain_ownership)?;
Ok(chain_ownership)
}
fn contains_key_new(&mut self, key: Vec<u8>) -> Result<Self::ContainsKey, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
this.resource_controller.track_read_operation()?;
let receiver = this
.execution_state_sender
.send_request(move |callback| ExecutionRequest::ContainsKey { id, key, callback })?;
let state = this.view_user_states.entry(id).or_default();
state.contains_key_queries.register(receiver)
}
fn contains_key_wait(&mut self, promise: &Self::ContainsKey) -> Result<bool, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
let state = this.view_user_states.entry(id).or_default();
let value = state.contains_key_queries.wait(*promise)?;
Ok(value)
}
fn contains_keys_new(
&mut self,
keys: Vec<Vec<u8>>,
) -> Result<Self::ContainsKeys, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
this.resource_controller.track_read_operation()?;
let receiver = this
.execution_state_sender
.send_request(move |callback| ExecutionRequest::ContainsKeys { id, keys, callback })?;
let state = this.view_user_states.entry(id).or_default();
state.contains_keys_queries.register(receiver)
}
fn contains_keys_wait(
&mut self,
promise: &Self::ContainsKeys,
) -> Result<Vec<bool>, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
let state = this.view_user_states.entry(id).or_default();
let value = state.contains_keys_queries.wait(*promise)?;
Ok(value)
}
fn read_multi_values_bytes_new(
&mut self,
keys: Vec<Vec<u8>>,
) -> Result<Self::ReadMultiValuesBytes, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
this.resource_controller.track_read_operation()?;
let receiver = this.execution_state_sender.send_request(move |callback| {
ExecutionRequest::ReadMultiValuesBytes { id, keys, callback }
})?;
let state = this.view_user_states.entry(id).or_default();
state.read_multi_values_queries.register(receiver)
}
fn read_multi_values_bytes_wait(
&mut self,
promise: &Self::ReadMultiValuesBytes,
) -> Result<Vec<Option<Vec<u8>>>, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
let state = this.view_user_states.entry(id).or_default();
let values = state.read_multi_values_queries.wait(*promise)?;
for value in &values {
if let Some(value) = &value {
this.resource_controller
.track_bytes_read(value.len() as u64)?;
}
}
Ok(values)
}
fn read_value_bytes_new(
&mut self,
key: Vec<u8>,
) -> Result<Self::ReadValueBytes, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
this.resource_controller.track_read_operation()?;
let receiver = this
.execution_state_sender
.send_request(move |callback| ExecutionRequest::ReadValueBytes { id, key, callback })?;
let state = this.view_user_states.entry(id).or_default();
state.read_value_queries.register(receiver)
}
fn read_value_bytes_wait(
&mut self,
promise: &Self::ReadValueBytes,
) -> Result<Option<Vec<u8>>, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
let value = {
let state = this.view_user_states.entry(id).or_default();
state.read_value_queries.wait(*promise)?
};
if let Some(value) = &value {
this.resource_controller
.track_bytes_read(value.len() as u64)?;
}
Ok(value)
}
fn find_keys_by_prefix_new(
&mut self,
key_prefix: Vec<u8>,
) -> Result<Self::FindKeysByPrefix, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
this.resource_controller.track_read_operation()?;
let receiver = this.execution_state_sender.send_request(move |callback| {
ExecutionRequest::FindKeysByPrefix {
id,
key_prefix,
callback,
}
})?;
let state = this.view_user_states.entry(id).or_default();
state.find_keys_queries.register(receiver)
}
fn find_keys_by_prefix_wait(
&mut self,
promise: &Self::FindKeysByPrefix,
) -> Result<Vec<Vec<u8>>, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
let keys = {
let state = this.view_user_states.entry(id).or_default();
state.find_keys_queries.wait(*promise)?
};
let mut read_size = 0;
for key in &keys {
read_size += key.len();
}
this.resource_controller
.track_bytes_read(read_size as u64)?;
Ok(keys)
}
fn find_key_values_by_prefix_new(
&mut self,
key_prefix: Vec<u8>,
) -> Result<Self::FindKeyValuesByPrefix, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
this.resource_controller.track_read_operation()?;
let receiver = this.execution_state_sender.send_request(move |callback| {
ExecutionRequest::FindKeyValuesByPrefix {
id,
key_prefix,
callback,
}
})?;
let state = this.view_user_states.entry(id).or_default();
state.find_key_values_queries.register(receiver)
}
fn find_key_values_by_prefix_wait(
&mut self,
promise: &Self::FindKeyValuesByPrefix,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, ExecutionError> {
let mut this = self.inner();
let id = this.current_application().id;
let state = this.view_user_states.entry(id).or_default();
let key_values = state.find_key_values_queries.wait(*promise)?;
let mut read_size = 0;
for (key, value) in &key_values {
read_size += key.len() + value.len();
}
this.resource_controller
.track_bytes_read(read_size as u64)?;
Ok(key_values)
}
fn perform_http_request(
&mut self,
request: http::Request,
) -> Result<http::Response, ExecutionError> {
let mut this = self.inner();
let app_permissions = this
.execution_state_sender
.send_request(|callback| ExecutionRequest::GetApplicationPermissions { callback })?
.recv_response()?;
let app_id = this.current_application().id;
ensure!(
app_permissions.can_make_http_requests(&app_id),
ExecutionError::UnauthorizedApplication(app_id)
);
this.resource_controller.track_http_request()?;
this.execution_state_sender
.send_request(|callback| ExecutionRequest::PerformHttpRequest {
request,
http_responses_are_oracle_responses:
Self::LIMIT_HTTP_RESPONSE_SIZE_TO_ORACLE_RESPONSE_SIZE,
callback,
})?
.recv_response()
}
fn assert_before(&mut self, timestamp: Timestamp) -> Result<(), ExecutionError> {
let this = self.inner();
this.execution_state_sender
.send_request(|callback| ExecutionRequest::AssertBefore {
timestamp,
callback,
})?
.recv_response()?
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/execution.rs | linera-execution/src/execution.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{
collections::BTreeMap,
ops::{Deref, DerefMut},
vec,
};
use allocative::Allocative;
use futures::{FutureExt, StreamExt};
use linera_base::{
crypto::{BcsHashable, CryptoHash},
data_types::{BlobContent, BlockHeight, StreamUpdate},
identifiers::{AccountOwner, BlobId, ChainId, StreamId},
time::Instant,
};
use linera_views::{
context::Context,
historical_hash_wrapper::HistoricallyHashableView,
key_value_store_view::KeyValueStoreView,
map_view::MapView,
reentrant_collection_view::ReentrantCollectionView,
views::{ClonableView, ReplaceContext, View},
ViewError,
};
use serde::{Deserialize, Serialize};
#[cfg(with_testing)]
use {
crate::{
ResourceControlPolicy, ResourceTracker, TestExecutionRuntimeContext, UserContractCode,
},
linera_base::data_types::Blob,
linera_views::context::MemoryContext,
std::sync::Arc,
};
use super::{execution_state_actor::ExecutionRequest, runtime::ServiceRuntimeRequest};
use crate::{
execution_state_actor::ExecutionStateActor, resources::ResourceController,
system::SystemExecutionStateView, ApplicationDescription, ApplicationId, ExecutionError,
ExecutionRuntimeContext, JsVec, MessageContext, OperationContext, ProcessStreamsContext, Query,
QueryContext, QueryOutcome, ServiceSyncRuntime, Timestamp, TransactionTracker,
};
/// An inner view accessing the execution state of a chain, for hashing purposes.
#[derive(Debug, ClonableView, View, Allocative)]
#[allocative(bound = "C")]
pub struct ExecutionStateViewInner<C> {
/// System application.
pub system: SystemExecutionStateView<C>,
/// User applications.
pub users: ReentrantCollectionView<C, ApplicationId, KeyValueStoreView<C>>,
/// The heights of previous blocks that sent messages to the same recipients.
pub previous_message_blocks: MapView<C, ChainId, BlockHeight>,
/// The heights of previous blocks that published events to the same streams.
pub previous_event_blocks: MapView<C, StreamId, BlockHeight>,
}
impl<C: Context, C2: Context> ReplaceContext<C2> for ExecutionStateViewInner<C> {
type Target = ExecutionStateViewInner<C2>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
ExecutionStateViewInner {
system: self.system.with_context(ctx.clone()).await,
users: self.users.with_context(ctx.clone()).await,
previous_message_blocks: self.previous_message_blocks.with_context(ctx.clone()).await,
previous_event_blocks: self.previous_event_blocks.with_context(ctx.clone()).await,
}
}
}
/// A view accessing the execution state of a chain.
#[derive(Debug, ClonableView, View, Allocative)]
#[allocative(bound = "C")]
pub struct ExecutionStateView<C> {
inner: HistoricallyHashableView<C, ExecutionStateViewInner<C>>,
}
impl<C> Deref for ExecutionStateView<C> {
type Target = ExecutionStateViewInner<C>;
fn deref(&self) -> &ExecutionStateViewInner<C> {
self.inner.deref()
}
}
impl<C> DerefMut for ExecutionStateView<C> {
fn deref_mut(&mut self) -> &mut ExecutionStateViewInner<C> {
self.inner.deref_mut()
}
}
impl<C> ExecutionStateView<C>
where
C: Context + Clone + 'static,
C::Extra: ExecutionRuntimeContext,
{
pub async fn crypto_hash_mut(&mut self) -> Result<CryptoHash, ViewError> {
#[derive(Serialize, Deserialize)]
struct ExecutionStateViewHash([u8; 32]);
impl BcsHashable<'_> for ExecutionStateViewHash {}
let hash = self.inner.historical_hash().await?;
Ok(CryptoHash::new(&ExecutionStateViewHash(hash.into())))
}
}
impl<C: Context, C2: Context> ReplaceContext<C2> for ExecutionStateView<C> {
type Target = ExecutionStateView<C2>;
async fn with_context(
&mut self,
ctx: impl FnOnce(&Self::Context) -> C2 + Clone,
) -> Self::Target {
ExecutionStateView {
inner: self.inner.with_context(ctx.clone()).await,
}
}
}
/// How to interact with a long-lived service runtime.
pub struct ServiceRuntimeEndpoint {
/// How to receive requests.
pub incoming_execution_requests: futures::channel::mpsc::UnboundedReceiver<ExecutionRequest>,
/// How to query the runtime.
pub runtime_request_sender: std::sync::mpsc::Sender<ServiceRuntimeRequest>,
}
#[cfg(with_testing)]
impl ExecutionStateView<MemoryContext<TestExecutionRuntimeContext>>
where
MemoryContext<TestExecutionRuntimeContext>: Context + Clone + 'static,
{
/// Simulates the instantiation of an application.
pub async fn simulate_instantiation(
&mut self,
contract: UserContractCode,
local_time: linera_base::data_types::Timestamp,
application_description: ApplicationDescription,
instantiation_argument: Vec<u8>,
contract_blob: Blob,
service_blob: Blob,
) -> Result<(), ExecutionError> {
let chain_id = application_description.creator_chain_id;
assert_eq!(chain_id, self.context().extra().chain_id);
let context = OperationContext {
chain_id,
authenticated_owner: None,
height: application_description.block_height,
round: None,
timestamp: local_time,
};
let action = UserAction::Instantiate(context, instantiation_argument);
let next_application_index = application_description.application_index + 1;
let next_chain_index = 0;
let application_id = From::from(&application_description);
let blob = Blob::new_application_description(&application_description);
self.system.used_blobs.insert(&blob.id())?;
self.system.used_blobs.insert(&contract_blob.id())?;
self.system.used_blobs.insert(&service_blob.id())?;
self.context()
.extra()
.user_contracts()
.pin()
.insert(application_id, contract);
self.context()
.extra()
.add_blobs([
contract_blob,
service_blob,
Blob::new_application_description(&application_description),
])
.await?;
let tracker = ResourceTracker::default();
let policy = ResourceControlPolicy::no_fees();
let mut resource_controller = ResourceController::new(Arc::new(policy), tracker, None);
let mut txn_tracker = TransactionTracker::new(
local_time,
0,
next_application_index,
next_chain_index,
None,
&[],
);
txn_tracker.add_created_blob(blob);
Box::pin(
ExecutionStateActor::new(self, &mut txn_tracker, &mut resource_controller)
.run_user_action(application_id, action, context.refund_grant_to(), None),
)
.await?;
Ok(())
}
}
pub enum UserAction {
Instantiate(OperationContext, Vec<u8>),
Operation(OperationContext, Vec<u8>),
Message(MessageContext, Vec<u8>),
ProcessStreams(ProcessStreamsContext, Vec<StreamUpdate>),
}
impl UserAction {
pub(crate) fn signer(&self) -> Option<AccountOwner> {
match self {
UserAction::Instantiate(context, _) => context.authenticated_owner,
UserAction::Operation(context, _) => context.authenticated_owner,
UserAction::ProcessStreams(_, _) => None,
UserAction::Message(context, _) => context.authenticated_owner,
}
}
pub(crate) fn height(&self) -> BlockHeight {
match self {
UserAction::Instantiate(context, _) => context.height,
UserAction::Operation(context, _) => context.height,
UserAction::ProcessStreams(context, _) => context.height,
UserAction::Message(context, _) => context.height,
}
}
pub(crate) fn round(&self) -> Option<u32> {
match self {
UserAction::Instantiate(context, _) => context.round,
UserAction::Operation(context, _) => context.round,
UserAction::ProcessStreams(context, _) => context.round,
UserAction::Message(context, _) => context.round,
}
}
pub(crate) fn timestamp(&self) -> Timestamp {
match self {
UserAction::Instantiate(context, _) => context.timestamp,
UserAction::Operation(context, _) => context.timestamp,
UserAction::ProcessStreams(context, _) => context.timestamp,
UserAction::Message(context, _) => context.timestamp,
}
}
}
impl<C> ExecutionStateView<C>
where
C: Context + Clone + 'static,
C::Extra: ExecutionRuntimeContext,
{
pub async fn query_application(
&mut self,
context: QueryContext,
query: Query,
endpoint: Option<&mut ServiceRuntimeEndpoint>,
) -> Result<QueryOutcome, ExecutionError> {
assert_eq!(context.chain_id, self.context().extra().chain_id());
match query {
Query::System(query) => {
let outcome = self.system.handle_query(context, query);
Ok(outcome.into())
}
Query::User {
application_id,
bytes,
} => {
let outcome = match endpoint {
Some(endpoint) => {
self.query_user_application_with_long_lived_service(
application_id,
context,
bytes,
&mut endpoint.incoming_execution_requests,
&mut endpoint.runtime_request_sender,
)
.await?
}
None => {
self.query_user_application(application_id, context, bytes)
.await?
}
};
Ok(outcome.into())
}
}
}
async fn query_user_application(
&mut self,
application_id: ApplicationId,
context: QueryContext,
query: Vec<u8>,
) -> Result<QueryOutcome<Vec<u8>>, ExecutionError> {
self.query_user_application_with_deadline(
application_id,
context,
query,
None,
BTreeMap::new(),
)
.await
}
pub(crate) async fn query_user_application_with_deadline(
&mut self,
application_id: ApplicationId,
context: QueryContext,
query: Vec<u8>,
deadline: Option<Instant>,
created_blobs: BTreeMap<BlobId, BlobContent>,
) -> Result<QueryOutcome<Vec<u8>>, ExecutionError> {
let (execution_state_sender, mut execution_state_receiver) =
futures::channel::mpsc::unbounded();
let mut txn_tracker = TransactionTracker::default().with_blobs(created_blobs);
let mut resource_controller = ResourceController::default();
let thread_pool = self.context().extra().thread_pool().clone();
let mut actor = ExecutionStateActor::new(self, &mut txn_tracker, &mut resource_controller);
let (codes, descriptions) = actor.service_and_dependencies(application_id).await?;
let service_runtime_task = thread_pool
.run_send(JsVec(codes), move |codes| async move {
let mut runtime = ServiceSyncRuntime::new_with_deadline(
execution_state_sender,
context,
deadline,
);
for (code, description) in codes.0.into_iter().zip(descriptions) {
runtime.preload_service(
ApplicationId::from(&description),
code,
description,
)?;
}
runtime.run_query(application_id, query)
})
.await;
while let Some(request) = execution_state_receiver.next().await {
actor.handle_request(request).await?;
}
service_runtime_task.await?
}
async fn query_user_application_with_long_lived_service(
&mut self,
application_id: ApplicationId,
context: QueryContext,
query: Vec<u8>,
incoming_execution_requests: &mut futures::channel::mpsc::UnboundedReceiver<
ExecutionRequest,
>,
runtime_request_sender: &mut std::sync::mpsc::Sender<ServiceRuntimeRequest>,
) -> Result<QueryOutcome<Vec<u8>>, ExecutionError> {
let (outcome_sender, outcome_receiver) = oneshot::channel();
let mut outcome_receiver = outcome_receiver.fuse();
runtime_request_sender
.send(ServiceRuntimeRequest::Query {
application_id,
context,
query,
callback: outcome_sender,
})
.expect("Service runtime thread should only stop when `request_sender` is dropped");
let mut txn_tracker = TransactionTracker::default();
let mut resource_controller = ResourceController::default();
let mut actor = ExecutionStateActor::new(self, &mut txn_tracker, &mut resource_controller);
loop {
futures::select! {
maybe_request = incoming_execution_requests.next() => {
if let Some(request) = maybe_request {
actor.handle_request(request).await?;
}
}
outcome = &mut outcome_receiver => {
return outcome.map_err(|_| ExecutionError::MissingRuntimeResponse)?;
}
}
}
}
pub async fn list_applications(
&self,
) -> Result<Vec<(ApplicationId, ApplicationDescription)>, ExecutionError> {
let mut applications = vec![];
for app_id in self.users.indices().await? {
let blob_id = app_id.description_blob_id();
let blob_content = self.system.read_blob_content(blob_id).await?;
let application_description = bcs::from_bytes(blob_content.bytes())?;
applications.push((app_id, application_description));
}
Ok(applications)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/graphql.rs | linera-execution/src/graphql.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::collections::BTreeMap;
use linera_base::{
crypto::ValidatorPublicKey,
data_types::{Amount, ChainDescription, Epoch, Timestamp},
doc_scalar,
identifiers::{AccountOwner, ChainId},
ownership::ChainOwnership,
};
use linera_views::{context::Context, map_view::MapView};
use crate::{
committee::{Committee, ValidatorState},
policy::ResourceControlPolicy,
system::UserData,
ExecutionStateView, SystemExecutionStateView,
};
doc_scalar!(UserData, "Optional user message attached to a transfer");
async_graphql::scalar!(
ResourceControlPolicy,
"ResourceControlPolicyScalar",
"A collection of prices and limits associated with block execution"
);
#[async_graphql::Object(cache_control(no_cache))]
impl Committee {
#[graphql(derived(name = "validators"))]
async fn _validators(&self) -> &BTreeMap<ValidatorPublicKey, ValidatorState> {
self.validators()
}
#[graphql(derived(name = "total_votes"))]
async fn _total_votes(&self) -> u64 {
self.total_votes()
}
#[graphql(derived(name = "quorum_threshold"))]
async fn _quorum_threshold(&self) -> u64 {
self.quorum_threshold()
}
#[graphql(derived(name = "validity_threshold"))]
async fn _validity_threshold(&self) -> u64 {
self.validity_threshold()
}
#[graphql(derived(name = "policy"))]
async fn _policy(&self) -> &ResourceControlPolicy {
self.policy()
}
}
#[async_graphql::Object(cache_control(no_cache))]
impl<C: Send + Sync + Context> ExecutionStateView<C> {
#[graphql(derived(name = "system"))]
async fn _system(&self) -> &SystemExecutionStateView<C> {
&self.system
}
}
#[async_graphql::Object(cache_control(no_cache))]
impl<C: Send + Sync + Context> SystemExecutionStateView<C> {
#[graphql(derived(name = "description"))]
async fn _description(&self) -> &Option<ChainDescription> {
self.description.get()
}
#[graphql(derived(name = "epoch"))]
async fn _epoch(&self) -> &Epoch {
self.epoch.get()
}
#[graphql(derived(name = "admin_id"))]
async fn _admin_id(&self) -> &Option<ChainId> {
self.admin_id.get()
}
#[graphql(derived(name = "committees"))]
async fn _committees(&self) -> &BTreeMap<Epoch, Committee> {
self.committees.get()
}
#[graphql(derived(name = "ownership"))]
async fn _ownership(&self) -> &ChainOwnership {
self.ownership.get()
}
#[graphql(derived(name = "balance"))]
async fn _balance(&self) -> &Amount {
self.balance.get()
}
#[graphql(derived(name = "balances"))]
async fn _balances(&self) -> &MapView<C, AccountOwner, Amount> {
&self.balances
}
#[graphql(derived(name = "timestamp"))]
async fn _timestamp(&self) -> &Timestamp {
self.timestamp.get()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/committee.rs | linera-execution/src/committee.rs | // Copyright (c) Facebook, Inc. and its affiliates.
// Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use std::{borrow::Cow, collections::BTreeMap, str::FromStr};
use allocative::Allocative;
use linera_base::crypto::{AccountPublicKey, CryptoError, ValidatorPublicKey};
use serde::{Deserialize, Serialize};
use crate::policy::ResourceControlPolicy;
/// The identity of a validator.
#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Debug)]
pub struct ValidatorName(pub ValidatorPublicKey);
impl Serialize for ValidatorName {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
serializer.serialize_str(&self.to_string())
} else {
serializer.serialize_newtype_struct("ValidatorName", &self.0)
}
}
}
impl<'de> Deserialize<'de> for ValidatorName {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let s = String::deserialize(deserializer)?;
let value = Self::from_str(&s).map_err(serde::de::Error::custom)?;
Ok(value)
} else {
#[derive(Deserialize)]
#[serde(rename = "ValidatorName")]
struct ValidatorNameDerived(ValidatorPublicKey);
let value = ValidatorNameDerived::deserialize(deserializer)?;
Ok(Self(value.0))
}
}
}
/// Public state of a validator.
#[derive(Eq, PartialEq, Hash, Clone, Debug, Serialize, Deserialize, Allocative)]
pub struct ValidatorState {
/// The network address (in a string format understood by the networking layer).
pub network_address: String,
/// The voting power.
pub votes: u64,
/// The public key of the account associated with the validator.
pub account_public_key: AccountPublicKey,
}
/// A set of validators (identified by their public keys) and their voting rights.
#[derive(Eq, PartialEq, Hash, Clone, Debug, Default, Allocative)]
#[cfg_attr(with_graphql, derive(async_graphql::InputObject))]
pub struct Committee {
/// The validators in the committee.
pub validators: BTreeMap<ValidatorPublicKey, ValidatorState>,
/// The sum of all voting rights.
total_votes: u64,
/// The threshold to form a quorum.
quorum_threshold: u64,
/// The threshold to prove the validity of a statement. I.e. the assumption is that strictly
/// less than `validity_threshold` are faulty.
validity_threshold: u64,
/// The policy agreed on for this epoch.
policy: ResourceControlPolicy,
}
impl Serialize for Committee {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
if serializer.is_human_readable() {
CommitteeFull::from(self).serialize(serializer)
} else {
CommitteeMinimal::from(self).serialize(serializer)
}
}
}
impl<'de> Deserialize<'de> for Committee {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
if deserializer.is_human_readable() {
let committee_full = CommitteeFull::deserialize(deserializer)?;
Committee::try_from(committee_full).map_err(serde::de::Error::custom)
} else {
let committee_minimal = CommitteeMinimal::deserialize(deserializer)?;
Ok(Committee::from(committee_minimal))
}
}
}
#[derive(Serialize, Deserialize)]
#[serde(rename = "Committee")]
struct CommitteeFull<'a> {
validators: Cow<'a, BTreeMap<ValidatorPublicKey, ValidatorState>>,
total_votes: u64,
quorum_threshold: u64,
validity_threshold: u64,
policy: Cow<'a, ResourceControlPolicy>,
}
#[derive(Serialize, Deserialize)]
#[serde(rename = "Committee")]
struct CommitteeMinimal<'a> {
validators: Cow<'a, BTreeMap<ValidatorPublicKey, ValidatorState>>,
policy: Cow<'a, ResourceControlPolicy>,
}
impl TryFrom<CommitteeFull<'static>> for Committee {
type Error = String;
fn try_from(committee_full: CommitteeFull) -> Result<Committee, Self::Error> {
let CommitteeFull {
validators,
total_votes,
quorum_threshold,
validity_threshold,
policy,
} = committee_full;
let committee = Committee::new(validators.into_owned(), policy.into_owned());
if total_votes != committee.total_votes {
Err(format!(
"invalid committee: total_votes is {}; should be {}",
total_votes, committee.total_votes,
))
} else if quorum_threshold != committee.quorum_threshold {
Err(format!(
"invalid committee: quorum_threshold is {}; should be {}",
quorum_threshold, committee.quorum_threshold,
))
} else if validity_threshold != committee.validity_threshold {
Err(format!(
"invalid committee: validity_threshold is {}; should be {}",
validity_threshold, committee.validity_threshold,
))
} else {
Ok(committee)
}
}
}
impl<'a> From<&'a Committee> for CommitteeFull<'a> {
fn from(committee: &'a Committee) -> CommitteeFull<'a> {
let Committee {
validators,
total_votes,
quorum_threshold,
validity_threshold,
policy,
} = committee;
CommitteeFull {
validators: Cow::Borrowed(validators),
total_votes: *total_votes,
quorum_threshold: *quorum_threshold,
validity_threshold: *validity_threshold,
policy: Cow::Borrowed(policy),
}
}
}
impl From<CommitteeMinimal<'static>> for Committee {
fn from(committee_min: CommitteeMinimal) -> Committee {
let CommitteeMinimal { validators, policy } = committee_min;
Committee::new(validators.into_owned(), policy.into_owned())
}
}
impl<'a> From<&'a Committee> for CommitteeMinimal<'a> {
fn from(committee: &'a Committee) -> CommitteeMinimal<'a> {
let Committee {
validators,
total_votes: _,
quorum_threshold: _,
validity_threshold: _,
policy,
} = committee;
CommitteeMinimal {
validators: Cow::Borrowed(validators),
policy: Cow::Borrowed(policy),
}
}
}
impl std::fmt::Display for ValidatorName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
self.0.fmt(f)
}
}
impl std::str::FromStr for ValidatorName {
type Err = CryptoError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(ValidatorName(ValidatorPublicKey::from_str(s)?))
}
}
impl From<ValidatorPublicKey> for ValidatorName {
fn from(value: ValidatorPublicKey) -> Self {
Self(value)
}
}
impl Committee {
pub fn new(
validators: BTreeMap<ValidatorPublicKey, ValidatorState>,
policy: ResourceControlPolicy,
) -> Self {
let total_votes = validators.values().fold(0, |sum, state| sum + state.votes);
// The validity threshold is f + 1, where f is maximal so that it is less than a third.
// So the threshold is N / 3, rounded up.
let validity_threshold = total_votes.div_ceil(3);
// The quorum threshold is minimal such that any two quorums intersect in at least one
// validity threshold.
let quorum_threshold = (total_votes + validity_threshold).div_ceil(2);
Committee {
validators,
total_votes,
quorum_threshold,
validity_threshold,
policy,
}
}
#[cfg(with_testing)]
pub fn make_simple(keys: Vec<(ValidatorPublicKey, AccountPublicKey)>) -> Self {
let map = keys
.into_iter()
.map(|(validator_key, account_key)| {
(
validator_key,
ValidatorState {
network_address: "Tcp:localhost:8080".to_string(),
votes: 100,
account_public_key: account_key,
},
)
})
.collect();
Committee::new(map, ResourceControlPolicy::default())
}
pub fn weight(&self, author: &ValidatorPublicKey) -> u64 {
match self.validators.get(author) {
Some(state) => state.votes,
None => 0,
}
}
pub fn keys_and_weights(&self) -> impl Iterator<Item = (ValidatorPublicKey, u64)> + '_ {
self.validators
.iter()
.map(|(name, validator)| (*name, validator.votes))
}
pub fn account_keys_and_weights(&self) -> impl Iterator<Item = (AccountPublicKey, u64)> + '_ {
self.validators
.values()
.map(|validator| (validator.account_public_key, validator.votes))
}
pub fn network_address(&self, author: &ValidatorPublicKey) -> Option<&str> {
self.validators
.get(author)
.map(|state| state.network_address.as_ref())
}
pub fn quorum_threshold(&self) -> u64 {
self.quorum_threshold
}
pub fn validity_threshold(&self) -> u64 {
self.validity_threshold
}
pub fn validators(&self) -> &BTreeMap<ValidatorPublicKey, ValidatorState> {
&self.validators
}
pub fn validator_addresses(&self) -> impl Iterator<Item = (ValidatorPublicKey, &str)> {
self.validators
.iter()
.map(|(name, validator)| (*name, &*validator.network_address))
}
pub fn total_votes(&self) -> u64 {
self.total_votes
}
pub fn policy(&self) -> &ResourceControlPolicy {
&self.policy
}
/// Returns a mutable reference to this committee's [`ResourceControlPolicy`].
pub fn policy_mut(&mut self) -> &mut ResourceControlPolicy {
&mut self.policy
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/evm/inputs.rs | linera-execution/src/evm/inputs.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Code specific to the input of functions, that is selectors,
//! constructor argument and instantiation argument.
use alloy_primitives::Bytes;
use linera_base::{
crypto::CryptoHash,
data_types::StreamUpdate,
ensure,
identifiers::{ApplicationId, ChainId, GenericApplicationId, StreamId, StreamName},
};
use revm_primitives::{address, Address, B256, U256};
use crate::EvmExecutionError;
alloy_sol_types::sol! {
struct InternalApplicationId {
bytes32 application_description_hash;
}
struct InternalGenericApplicationId {
uint8 choice;
InternalApplicationId user;
}
struct InternalStreamName {
bytes stream_name;
}
struct InternalStreamId {
InternalGenericApplicationId application_id;
InternalStreamName stream_name;
}
struct InternalChainId {
bytes32 value;
}
struct InternalStreamUpdate {
InternalChainId chain_id;
InternalStreamId stream_id;
uint32 previous_index;
uint32 next_index;
}
function process_streams(InternalStreamUpdate[] internal_streams);
}
fn crypto_hash_to_internal_crypto_hash(hash: CryptoHash) -> B256 {
let hash = <[u64; 4]>::from(hash);
let hash = linera_base::crypto::u64_array_to_be_bytes(hash);
hash.into()
}
impl From<ApplicationId> for InternalApplicationId {
fn from(application_id: ApplicationId) -> InternalApplicationId {
let application_description_hash =
crypto_hash_to_internal_crypto_hash(application_id.application_description_hash);
InternalApplicationId {
application_description_hash,
}
}
}
impl From<GenericApplicationId> for InternalGenericApplicationId {
fn from(generic_application_id: GenericApplicationId) -> InternalGenericApplicationId {
match generic_application_id {
GenericApplicationId::System => {
let application_description_hash = B256::ZERO;
InternalGenericApplicationId {
choice: 0,
user: InternalApplicationId {
application_description_hash,
},
}
}
GenericApplicationId::User(application_id) => InternalGenericApplicationId {
choice: 1,
user: application_id.into(),
},
}
}
}
impl From<ChainId> for InternalChainId {
fn from(chain_id: ChainId) -> InternalChainId {
let value = crypto_hash_to_internal_crypto_hash(chain_id.0);
InternalChainId { value }
}
}
impl From<StreamName> for InternalStreamName {
fn from(stream_name: StreamName) -> InternalStreamName {
let stream_name = Bytes::from(stream_name.0);
InternalStreamName { stream_name }
}
}
impl From<StreamId> for InternalStreamId {
fn from(stream_id: StreamId) -> InternalStreamId {
let application_id = stream_id.application_id.into();
let stream_name = stream_id.stream_name.into();
InternalStreamId {
application_id,
stream_name,
}
}
}
impl From<StreamUpdate> for InternalStreamUpdate {
fn from(stream_update: StreamUpdate) -> InternalStreamUpdate {
let chain_id = stream_update.chain_id.into();
let stream_id = stream_update.stream_id.into();
InternalStreamUpdate {
chain_id,
stream_id,
previous_index: stream_update.previous_index,
next_index: stream_update.next_index,
}
}
}
// This is the precompile address that contains the Linera specific
// functionalities accessed from the EVM.
pub(crate) const PRECOMPILE_ADDRESS: Address = address!("000000000000000000000000000000000000000b");
// This is the zero address used when no address can be obtained from `authenticated_owner`
// and `authenticated_caller_id`. This scenario does not occur if an Address20 user calls or
// if an EVM contract calls another EVM contract.
pub(crate) const ZERO_ADDRESS: Address = address!("0000000000000000000000000000000000000000");
// This is the address being used for service calls.
pub(crate) const SERVICE_ADDRESS: Address = address!("0000000000000000000000000000000000002000");
/// This is the address used for getting ethers and transfering them to.
pub(crate) const FAUCET_ADDRESS: Address = address!("0000000000000000000000000000000000004000");
pub(crate) const FAUCET_BALANCE: U256 = U256::from_limbs([
0xffffffffffffffff,
0xffffffffffffffff,
0xffffffffffffffff,
0x7fffffffffffffff,
]);
/// This is the selector of `execute_message` that should be called
/// only from a submitted message
pub(crate) const EXECUTE_MESSAGE_SELECTOR: &[u8] = &[173, 125, 234, 205];
/// This is the selector of `process_streams` that should be called
/// only from a submitted message
pub(crate) const PROCESS_STREAMS_SELECTOR: &[u8] = &[254, 72, 102, 28];
/// This is the selector of `instantiate` that should be called
/// only when creating a new instance of a shared contract
pub(crate) const INSTANTIATE_SELECTOR: &[u8] = &[156, 163, 60, 158];
pub(crate) fn forbid_execute_operation_origin(vec: &[u8]) -> Result<(), EvmExecutionError> {
ensure!(
vec != EXECUTE_MESSAGE_SELECTOR,
EvmExecutionError::IllegalOperationCall("function execute_message".to_string(),)
);
ensure!(
vec != PROCESS_STREAMS_SELECTOR,
EvmExecutionError::IllegalOperationCall("function process_streams".to_string(),)
);
ensure!(
vec != INSTANTIATE_SELECTOR,
EvmExecutionError::IllegalOperationCall("function instantiate".to_string(),)
);
Ok(())
}
pub(crate) fn ensure_message_length(
actual_length: usize,
min_length: usize,
) -> Result<(), EvmExecutionError> {
ensure!(
actual_length >= min_length,
EvmExecutionError::OperationIsTooShort
);
Ok(())
}
pub(crate) fn ensure_selector_presence(
module: &[u8],
selector: &[u8],
fct_name: &str,
) -> Result<(), EvmExecutionError> {
ensure!(
has_selector(module, selector),
EvmExecutionError::MissingFunction(fct_name.to_string())
);
Ok(())
}
pub(crate) fn has_selector(module: &[u8], selector: &[u8]) -> bool {
let push4 = 0x63; // An EVM instruction
let mut vec = vec![push4];
vec.extend(selector);
module.windows(5).any(|window| window == vec)
}
pub(crate) fn get_revm_instantiation_bytes(value: Vec<u8>) -> Vec<u8> {
use alloy_primitives::Bytes;
use alloy_sol_types::{sol, SolCall};
sol! {
function instantiate(bytes value);
}
let bytes = Bytes::from(value);
let argument = instantiateCall { value: bytes };
argument.abi_encode()
}
pub(crate) fn get_revm_execute_message_bytes(value: Vec<u8>) -> Vec<u8> {
use alloy_primitives::Bytes;
use alloy_sol_types::{sol, SolCall};
sol! {
function execute_message(bytes value);
}
let value = Bytes::from(value);
let argument = execute_messageCall { value };
argument.abi_encode()
}
pub(crate) fn get_revm_process_streams_bytes(streams: Vec<StreamUpdate>) -> Vec<u8> {
use alloy_sol_types::SolCall;
let internal_streams = streams.into_iter().map(StreamUpdate::into).collect();
let fct_call = process_streamsCall { internal_streams };
fct_call.abi_encode()
}
#[cfg(test)]
mod tests {
use revm_primitives::keccak256;
use crate::evm::inputs::{
process_streamsCall, EXECUTE_MESSAGE_SELECTOR, INSTANTIATE_SELECTOR,
PROCESS_STREAMS_SELECTOR,
};
// The function keccak256 is not const so we cannot build the execute_message
// selector directly.
#[test]
fn check_execute_message_selector() {
let selector = &keccak256("execute_message(bytes)".as_bytes())[..4];
assert_eq!(selector, EXECUTE_MESSAGE_SELECTOR);
}
#[test]
fn check_process_streams_selector() {
use alloy_sol_types::SolCall;
assert_eq!(
process_streamsCall::SIGNATURE,
"process_streams(((bytes32),((uint8,(bytes32)),(bytes)),uint32,uint32)[])"
);
assert_eq!(process_streamsCall::SELECTOR, PROCESS_STREAMS_SELECTOR);
}
#[test]
fn check_instantiate_selector() {
let selector = &keccak256("instantiate(bytes)".as_bytes())[..4];
assert_eq!(selector, INSTANTIATE_SELECTOR);
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/evm/database.rs | linera-execution/src/evm/database.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Code specific to the usage of the [Revm](https://bluealloy.github.io/revm/) runtime.
//! Here we implement the Database traits of Revm.
use std::{
collections::HashMap,
sync::{Arc, Mutex},
};
use linera_base::{data_types::Amount, ensure, identifiers::Account, vm::VmRuntime};
use linera_views::common::from_bytes_option;
use revm::{primitives::keccak256, Database, DatabaseCommit, DatabaseRef};
use revm_context::BlockEnv;
use revm_context_interface::block::BlobExcessGasAndPrice;
use revm_database::{AccountState, DBErrorMarker};
use revm_primitives::{address, Address, B256, U256};
use revm_state::{AccountInfo, Bytecode, EvmState};
use crate::{
evm::inputs::{FAUCET_ADDRESS, FAUCET_BALANCE, ZERO_ADDRESS},
BaseRuntime, Batch, ContractRuntime, EvmExecutionError, ExecutionError, ServiceRuntime,
};
// The runtime costs are not available in service operations.
// We need to set a limit to gas usage in order to avoid blocking
// the validator.
// We set up the limit similarly to Infura to 20 million.
pub const EVM_SERVICE_GAS_LIMIT: u64 = 20_000_000;
/// The cost of loading from storage.
const SLOAD_COST: u64 = 2100;
/// The cost of storing a non-zero value in the storage for the first time.
const SSTORE_COST_SET: u64 = 20000;
/// The cost of not changing the state of the variable in the storage.
const SSTORE_COST_NO_OPERATION: u64 = 100;
/// The cost of overwriting the storage to a different value.
const SSTORE_COST_RESET: u64 = 2900;
/// The refund from releasing data.
const SSTORE_REFUND_RELEASE: u64 = 4800;
/// The number of key writes, reads, release, and no change in EVM has to be accounted for.
/// Then we remove those costs from the final bill.
#[derive(Clone, Default)]
pub(crate) struct StorageStats {
key_no_operation: u64,
key_reset: u64,
key_set: u64,
key_release: u64,
key_read: u64,
}
impl StorageStats {
pub fn storage_costs(&self) -> u64 {
let mut storage_costs = 0;
storage_costs += self.key_no_operation * SSTORE_COST_NO_OPERATION;
storage_costs += self.key_reset * SSTORE_COST_RESET;
storage_costs += self.key_set * SSTORE_COST_SET;
storage_costs += self.key_read * SLOAD_COST;
storage_costs
}
pub fn storage_refund(&self) -> u64 {
self.key_release * SSTORE_REFUND_RELEASE
}
}
/// This is the encapsulation of the `Runtime` corresponding to the contract.
pub(crate) struct DatabaseRuntime<Runtime> {
/// This is the storage statistics of the read/write in order to adjust gas costs.
storage_stats: Arc<Mutex<StorageStats>>,
/// This is the EVM address of the contract.
/// At the creation, it is set to `Address::ZERO` and then later set to the correct value.
pub contract_address: Address,
/// The caller to the smart contract.
pub caller: Address,
/// The value of the call to the smart contract.
pub value: U256,
/// The runtime of the contract.
pub runtime: Arc<Mutex<Runtime>>,
/// The uncommitted changes to the contract.
pub changes: EvmState,
/// Whether the contract has been instantiated in REVM.
pub is_revm_instantiated: bool,
/// The error that can occur during runtime.
pub error: Arc<Mutex<Option<String>>>,
}
impl<Runtime> Clone for DatabaseRuntime<Runtime> {
fn clone(&self) -> Self {
Self {
storage_stats: self.storage_stats.clone(),
contract_address: self.contract_address,
caller: self.caller,
value: self.value,
runtime: self.runtime.clone(),
changes: self.changes.clone(),
is_revm_instantiated: self.is_revm_instantiated,
error: self.error.clone(),
}
}
}
#[repr(u8)]
pub enum KeyCategory {
AccountInfo,
AccountState,
Storage,
}
impl<Runtime: BaseRuntime> DatabaseRuntime<Runtime> {
/// Encodes the `index` of the EVM storage associated to the smart contract
/// in a Linera key.
fn get_linera_key(key_prefix: &[u8], index: U256) -> Vec<u8> {
let mut key = key_prefix.to_vec();
key.extend(index.as_le_slice());
key
}
/// Returns the tag associated to the contract.
fn get_address_key(prefix: u8, address: Address) -> Vec<u8> {
let mut key = vec![prefix];
key.extend(address);
key
}
/// Creates a new `DatabaseRuntime`.
pub fn new(runtime: Runtime) -> Self {
let storage_stats = StorageStats::default();
// We cannot acquire a lock on runtime here.
// So, we set the contract_address to a default value
// and update it later.
Self {
storage_stats: Arc::new(Mutex::new(storage_stats)),
contract_address: Address::ZERO,
caller: Address::ZERO,
value: U256::ZERO,
runtime: Arc::new(Mutex::new(runtime)),
changes: HashMap::new(),
is_revm_instantiated: false,
error: Arc::new(Mutex::new(None)),
}
}
/// Returns the current storage states and clears it to default.
pub fn take_storage_stats(&self) -> StorageStats {
let mut storage_stats_read = self.storage_stats.lock().unwrap();
let storage_stats = storage_stats_read.clone();
*storage_stats_read = StorageStats::default();
storage_stats
}
/// Insert error into the database
pub fn insert_error(&self, exec_error: ExecutionError) {
let mut error = self.error.lock().unwrap();
*error = Some(format!("Runtime error {:?}", exec_error));
}
/// Process the error.
pub fn process_any_error(&self) -> Result<(), EvmExecutionError> {
let error = self.error.lock().unwrap();
if let Some(error) = error.clone() {
return Err(EvmExecutionError::RuntimeError(error.clone()));
}
Ok(())
}
}
impl DBErrorMarker for ExecutionError {}
impl<Runtime> Database for DatabaseRuntime<Runtime>
where
Runtime: BaseRuntime,
{
type Error = ExecutionError;
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, ExecutionError> {
self.basic_ref(address)
}
fn code_by_hash(&mut self, _code_hash: B256) -> Result<Bytecode, ExecutionError> {
panic!("Functionality code_by_hash not implemented");
}
fn storage(&mut self, address: Address, index: U256) -> Result<U256, ExecutionError> {
self.storage_ref(address, index)
}
fn block_hash(&mut self, number: u64) -> Result<B256, ExecutionError> {
<Self as DatabaseRef>::block_hash_ref(self, number)
}
}
impl<Runtime> DatabaseCommit for DatabaseRuntime<Runtime>
where
Runtime: BaseRuntime,
{
fn commit(&mut self, changes: EvmState) {
self.changes = changes;
}
}
impl<Runtime> DatabaseRef for DatabaseRuntime<Runtime>
where
Runtime: BaseRuntime,
{
type Error = ExecutionError;
/// The `basic_ref` is the function for reading the state of the application.
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, ExecutionError> {
if address == FAUCET_ADDRESS {
return Ok(Some(AccountInfo {
balance: FAUCET_BALANCE,
..AccountInfo::default()
}));
}
if !self.changes.is_empty() {
// This case occurs in only one scenario:
// * A service call to a contract that has not yet been
// initialized by a contract call.
// When we do a service calls to a contract that has
// already been initialized, then changes will be empty.
let account = self.changes.get(&address);
return Ok(account.map(|account| account.info.clone()));
}
let mut runtime = self.runtime.lock().unwrap();
let account_owner = address.into();
// The balances being used are the ones of Linera. So, we need to
// access them at first.
let balance = runtime.read_owner_balance(account_owner)?;
let balance: U256 = balance.into();
let key_info = Self::get_address_key(KeyCategory::AccountInfo as u8, address);
let promise = runtime.read_value_bytes_new(key_info)?;
let result = runtime.read_value_bytes_wait(&promise)?;
let mut account_info = match result {
None => AccountInfo::default(),
Some(bytes) => bcs::from_bytes(&bytes)?,
};
// The design is the following:
// * The funds have been deposited in deposit_funds.
// * The order of the operations is the following:
// + Access to the storage (this functions) of relevant accounts.
// + Transfer according to the input.
// + Running the constructor.
// * So, the transfer is done twice: One at deposit_funds.
// Another in the transfer by REVM.
// * So, we need to correct the balances so that when Revm
// is doing the transfer, the balance are the ones after
// deposit_funds.
let start_balance = if self.caller == address {
balance + self.value
} else if self.contract_address == address {
assert!(
balance >= self.value,
"We should have balance >= self.value"
);
balance - self.value
} else {
balance
};
account_info.balance = start_balance;
// We return an account as there is no difference between
// a default account and the absence of account.
Ok(Some(account_info))
}
fn code_by_hash_ref(&self, _code_hash: B256) -> Result<Bytecode, ExecutionError> {
panic!("Functionality code_by_hash_ref not implemented");
}
fn storage_ref(&self, address: Address, index: U256) -> Result<U256, ExecutionError> {
if !self.changes.is_empty() {
let account = self.changes.get(&address).unwrap();
return Ok(match account.storage.get(&index) {
None => U256::ZERO,
Some(slot) => slot.present_value(),
});
}
let key_prefix = Self::get_address_key(KeyCategory::Storage as u8, address);
let key = Self::get_linera_key(&key_prefix, index);
{
let mut storage_stats = self.storage_stats.lock().unwrap();
storage_stats.key_read += 1;
}
let result = {
let mut runtime = self.runtime.lock().unwrap();
let promise = runtime.read_value_bytes_new(key)?;
runtime.read_value_bytes_wait(&promise)
}?;
Ok(from_bytes_option::<U256>(&result)?.unwrap_or_default())
}
fn block_hash_ref(&self, number: u64) -> Result<B256, ExecutionError> {
Ok(keccak256(number.to_string().as_bytes()))
}
}
impl<Runtime> DatabaseRuntime<Runtime>
where
Runtime: ContractRuntime,
{
/// Effectively commits changes to storage.
pub fn commit_changes(&mut self) -> Result<(), ExecutionError> {
let mut storage_stats = self.storage_stats.lock().unwrap();
let mut runtime = self.runtime.lock().unwrap();
let mut batch = Batch::new();
for (address, account) in &self.changes {
if address == &FAUCET_ADDRESS {
// We do not write the faucet address nor expect any coherency from it.
continue;
}
let owner = (*address).into();
let linera_balance: U256 = runtime.read_owner_balance(owner)?.into();
let revm_balance = account.info.balance;
ensure!(
linera_balance == revm_balance,
EvmExecutionError::IncoherentBalances(*address, linera_balance, revm_balance)
);
if !account.is_touched() {
continue;
}
let key_prefix = Self::get_address_key(KeyCategory::Storage as u8, *address);
let key_info = Self::get_address_key(KeyCategory::AccountInfo as u8, *address);
let key_state = Self::get_address_key(KeyCategory::AccountState as u8, *address);
if account.is_selfdestructed() {
batch.delete_key_prefix(key_prefix);
batch.put_key_value(key_info, &AccountInfo::default())?;
batch.put_key_value(key_state, &AccountState::NotExisting)?;
} else {
let is_newly_created = account.is_created();
// We write here the state of the user in question. But that does not matter
batch.put_key_value(key_info, &account.info)?;
let account_state = if is_newly_created {
batch.delete_key_prefix(key_prefix.clone());
AccountState::StorageCleared
} else {
let promise = runtime.read_value_bytes_new(key_state.clone())?;
let result = runtime.read_value_bytes_wait(&promise)?;
let account_state =
from_bytes_option::<AccountState>(&result)?.unwrap_or_default();
if account_state.is_storage_cleared() {
AccountState::StorageCleared
} else {
AccountState::Touched
}
};
batch.put_key_value(key_state, &account_state)?;
for (index, value) in &account.storage {
if value.present_value() == value.original_value() {
storage_stats.key_no_operation += 1;
} else {
let key = Self::get_linera_key(&key_prefix, *index);
if value.original_value() == U256::ZERO {
batch.put_key_value(key, &value.present_value())?;
storage_stats.key_set += 1;
} else if value.present_value() == U256::ZERO {
batch.delete_key(key);
storage_stats.key_release += 1;
} else {
batch.put_key_value(key, &value.present_value())?;
storage_stats.key_reset += 1;
}
}
}
}
}
runtime.write_batch(batch)?;
self.changes.clear();
Ok(())
}
}
impl<Runtime> DatabaseRuntime<Runtime>
where
Runtime: BaseRuntime,
{
/// Reads the nonce of the user
pub fn get_nonce(&self, address: &Address) -> Result<u64, ExecutionError> {
let account_info: Option<AccountInfo> = self.basic_ref(*address)?;
Ok(match account_info {
None => 0,
Some(account_info) => account_info.nonce,
})
}
pub fn get_deployed_bytecode(&self) -> Result<Vec<u8>, ExecutionError> {
let account_info = self.basic_ref(self.contract_address)?;
Ok(match account_info {
None => Vec::new(),
Some(account_info) => {
let bytecode = account_info
.code
.ok_or(EvmExecutionError::MissingBytecode)?;
bytecode.bytes_ref().to_vec()
}
})
}
/// Sets the EVM contract address from the value Address::ZERO.
/// The value is set from the `ApplicationId`.
pub fn set_contract_address(&mut self) -> Result<(), ExecutionError> {
let mut runtime = self.runtime.lock().unwrap();
let application_id = runtime.application_id()?;
self.contract_address = application_id.evm_address();
Ok(())
}
/// A contract is called initialized if the execution of the constructor
/// with the constructor argument yield the storage and the deployed
/// bytecode. The deployed bytecode is stored in the storage of the
/// bytecode address.
/// We determine whether the contract is already initialized, sets the
/// `is_revm_initialized` and then returns the result.
pub fn set_is_initialized(&mut self) -> Result<bool, ExecutionError> {
let mut runtime = self.runtime.lock().unwrap();
let evm_address = runtime.application_id()?.evm_address();
let key_info = Self::get_address_key(KeyCategory::AccountInfo as u8, evm_address);
let promise = runtime.contains_key_new(key_info)?;
let result = runtime.contains_key_wait(&promise)?;
self.is_revm_instantiated = result;
Ok(result)
}
pub fn get_block_env(&self) -> Result<BlockEnv, ExecutionError> {
let mut runtime = self.runtime.lock().unwrap();
// The block height being used
let block_height_linera = runtime.block_height()?;
let block_height_evm = block_height_linera.0;
// This is the receiver address of all the gas spent in the block.
let beneficiary = address!("00000000000000000000000000000000000000bb");
// The difficulty which is no longer relevant after The Merge.
let difficulty = U256::ZERO;
// We do not have access to the Resources so we keep it to the maximum
// and the control is done elsewhere.
let gas_limit = u64::MAX;
// The timestamp. Both the EVM and Linera use the same UNIX epoch.
// But the Linera epoch is in microseconds since the start and the
// Ethereum epoch is in seconds
let timestamp_linera = runtime.read_system_timestamp()?;
let timestamp_evm = timestamp_linera.micros() / 1_000_000;
// The base fee is the minimum fee for executing a transaction.
// We have no such concept in Linera.
let basefee = 0;
let chain_id = runtime.chain_id()?;
let entry = format!("{}{}", chain_id, block_height_linera);
// The randomness beacon being used.
let prevrandao = keccak256(entry.as_bytes());
// The blob excess gas and price is not relevant to the execution
// on Linera. We set up a default value as in REVM.
let entry = BlobExcessGasAndPrice {
excess_blob_gas: 0,
blob_gasprice: 1,
};
let blob_excess_gas_and_price = Some(entry);
Ok(BlockEnv {
number: block_height_evm,
beneficiary,
difficulty,
gas_limit,
timestamp: timestamp_evm,
basefee,
prevrandao: Some(prevrandao),
blob_excess_gas_and_price,
})
}
pub fn constructor_argument(&self) -> Result<Vec<u8>, ExecutionError> {
let mut runtime = self.runtime.lock().unwrap();
let constructor_argument = runtime.application_parameters()?;
Ok(serde_json::from_slice::<Vec<u8>>(&constructor_argument)?)
}
}
impl<Runtime> DatabaseRuntime<Runtime>
where
Runtime: ContractRuntime,
{
pub fn get_contract_block_env(&self) -> Result<BlockEnv, ExecutionError> {
let mut block_env = self.get_block_env()?;
let mut runtime = self.runtime.lock().unwrap();
// We use the gas_limit from the runtime
let gas_limit = runtime.maximum_fuel_per_block(VmRuntime::Evm)?;
block_env.gas_limit = gas_limit;
Ok(block_env)
}
pub fn deposit_funds(&self) -> Result<(), ExecutionError> {
if self.value != U256::ZERO {
if self.caller == ZERO_ADDRESS {
let error = EvmExecutionError::UnknownSigner;
return Err(error.into());
}
let source = self.caller.into();
let amount = Amount::try_from(self.value).map_err(EvmExecutionError::from)?;
let mut runtime = self.runtime.lock().expect("The lock should be possible");
let chain_id = runtime.chain_id()?;
let application_id = runtime.application_id()?;
let owner = application_id.into();
let destination = Account { chain_id, owner };
let authenticated_caller = runtime.authenticated_caller_id()?;
if authenticated_caller.is_none() {
runtime.transfer(source, destination, amount)?;
}
}
Ok(())
}
}
impl<Runtime> DatabaseRuntime<Runtime>
where
Runtime: ServiceRuntime,
{
pub fn get_service_block_env(&self) -> Result<BlockEnv, ExecutionError> {
let mut block_env = self.get_block_env()?;
block_env.gas_limit = EVM_SERVICE_GAS_LIMIT;
Ok(block_env)
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/evm/revm.rs | linera-execution/src/evm/revm.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Code specific to the usage of the [Revm](https://bluealloy.github.io/revm/) runtime.
use core::ops::Range;
use std::{
collections::BTreeSet,
convert::TryFrom,
sync::{Arc, Mutex},
};
#[cfg(with_metrics)]
use linera_base::prometheus_util::MeasureLatency as _;
use linera_base::{
crypto::CryptoHash,
data_types::{
Amount, ApplicationDescription, Bytecode, Resources, SendMessageRequest, StreamUpdate,
},
ensure,
identifiers::{Account, AccountOwner, ApplicationId, ChainId, ModuleId, StreamName},
vm::{EvmInstantiation, EvmOperation, EvmQuery, VmRuntime},
};
use revm::{primitives::Bytes, InspectCommitEvm, InspectEvm, Inspector};
use revm_context::{
result::{ExecutionResult, Output, SuccessReason},
BlockEnv, Cfg, ContextTr, Evm, Journal, JournalTr, LocalContextTr as _, TxEnv,
};
use revm_database::WrapDatabaseRef;
use revm_handler::{
instructions::EthInstructions, EthPrecompiles, MainnetContext, PrecompileProvider,
};
use revm_interpreter::{
CallInput, CallInputs, CallOutcome, CallValue, CreateInputs, CreateOutcome, CreateScheme, Gas,
InputsImpl, InstructionResult, InterpreterResult,
};
use revm_primitives::{hardfork::SpecId, Address, Log, TxKind, U256};
use revm_state::EvmState;
use serde::{Deserialize, Serialize};
use crate::{
evm::{
data_types::AmountU256,
database::{DatabaseRuntime, StorageStats, EVM_SERVICE_GAS_LIMIT},
inputs::{
ensure_message_length, ensure_selector_presence, forbid_execute_operation_origin,
get_revm_execute_message_bytes, get_revm_instantiation_bytes,
get_revm_process_streams_bytes, has_selector, EXECUTE_MESSAGE_SELECTOR, FAUCET_ADDRESS,
INSTANTIATE_SELECTOR, PRECOMPILE_ADDRESS, PROCESS_STREAMS_SELECTOR, SERVICE_ADDRESS,
ZERO_ADDRESS,
},
},
BaseRuntime, ContractRuntime, ContractSyncRuntimeHandle, DataBlobHash, EvmExecutionError,
EvmRuntime, ExecutionError, ServiceRuntime, ServiceSyncRuntimeHandle, UserContract,
UserContractInstance, UserContractModule, UserService, UserServiceInstance, UserServiceModule,
};
/// The selector when calling for `InterpreterResult`. This is a fictional
/// selector that does not correspond to a real function.
const INTERPRETER_RESULT_SELECTOR: &[u8] = &[1, 2, 3, 4];
/// The selector when accessing for the deployed bytecode. This is a fictional
/// selector that does not correspond to a real function.
const GET_DEPLOYED_BYTECODE_SELECTOR: &[u8] = &[21, 34, 55, 89];
/// The json serialization of a trivial vector.
const JSON_EMPTY_VECTOR: &[u8] = &[91, 93];
#[cfg(with_metrics)]
mod metrics {
use std::sync::LazyLock;
use linera_base::prometheus_util::{exponential_bucket_latencies, register_histogram_vec};
use prometheus::HistogramVec;
pub static CONTRACT_INSTANTIATION_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"evm_contract_instantiation_latency",
"EVM contract instantiation latency",
&[],
exponential_bucket_latencies(1.0),
)
});
pub static SERVICE_INSTANTIATION_LATENCY: LazyLock<HistogramVec> = LazyLock::new(|| {
register_histogram_vec(
"evm_service_instantiation_latency",
"EVM service instantiation latency",
&[],
exponential_bucket_latencies(1.0),
)
});
}
#[derive(Clone)]
pub enum EvmContractModule {
#[cfg(with_revm)]
Revm { module: Vec<u8> },
}
impl EvmContractModule {
/// Creates a new [`EvmContractModule`] using the EVM module with the provided `contract_bytecode`.
pub fn new(
contract_bytecode: Bytecode,
runtime: EvmRuntime,
) -> Result<Self, EvmExecutionError> {
match runtime {
#[cfg(with_revm)]
EvmRuntime::Revm => Self::from_revm(contract_bytecode),
}
}
/// Creates a new [`EvmContractModule`] using the EVM module in `contract_bytecode_file`.
#[cfg(with_fs)]
pub fn from_file(
contract_bytecode_file: impl AsRef<std::path::Path>,
runtime: EvmRuntime,
) -> Result<Self, EvmExecutionError> {
Self::new(
Bytecode::load_from_file(contract_bytecode_file)
.map_err(anyhow::Error::from)
.map_err(EvmExecutionError::LoadContractModule)?,
runtime,
)
}
/// Creates a new [`EvmContractModule`] using Revm with the provided bytecode files.
pub fn from_revm(contract_bytecode: Bytecode) -> Result<Self, EvmExecutionError> {
let module = contract_bytecode.bytes;
Ok(EvmContractModule::Revm { module })
}
}
impl UserContractModule for EvmContractModule {
fn instantiate(
&self,
runtime: ContractSyncRuntimeHandle,
) -> Result<UserContractInstance, ExecutionError> {
#[cfg(with_metrics)]
let _instantiation_latency = metrics::CONTRACT_INSTANTIATION_LATENCY.measure_latency();
let instance: UserContractInstance = match self {
#[cfg(with_revm)]
EvmContractModule::Revm { module } => {
Box::new(RevmContractInstance::prepare(module.to_vec(), runtime))
}
};
Ok(instance)
}
}
/// A user service in a compiled EVM module.
#[derive(Clone)]
pub enum EvmServiceModule {
#[cfg(with_revm)]
Revm { module: Vec<u8> },
}
impl EvmServiceModule {
/// Creates a new [`EvmServiceModule`] using the EVM module with the provided bytecode.
pub fn new(service_bytecode: Bytecode, runtime: EvmRuntime) -> Result<Self, EvmExecutionError> {
match runtime {
#[cfg(with_revm)]
EvmRuntime::Revm => Self::from_revm(service_bytecode),
}
}
/// Creates a new [`EvmServiceModule`] using the EVM module in `service_bytecode_file`.
#[cfg(with_fs)]
pub fn from_file(
service_bytecode_file: impl AsRef<std::path::Path>,
runtime: EvmRuntime,
) -> Result<Self, EvmExecutionError> {
Self::new(
Bytecode::load_from_file(service_bytecode_file)
.map_err(anyhow::Error::from)
.map_err(EvmExecutionError::LoadServiceModule)?,
runtime,
)
}
/// Creates a new [`EvmServiceModule`] using Revm with the provided bytecode files.
pub fn from_revm(contract_bytecode: Bytecode) -> Result<Self, EvmExecutionError> {
let module = contract_bytecode.bytes;
Ok(EvmServiceModule::Revm { module })
}
}
impl UserServiceModule for EvmServiceModule {
fn instantiate(
&self,
runtime: ServiceSyncRuntimeHandle,
) -> Result<UserServiceInstance, ExecutionError> {
#[cfg(with_metrics)]
let _instantiation_latency = metrics::SERVICE_INSTANTIATION_LATENCY.measure_latency();
let instance: UserServiceInstance = match self {
#[cfg(with_revm)]
EvmServiceModule::Revm { module } => {
Box::new(RevmServiceInstance::prepare(module.to_vec(), runtime))
}
};
Ok(instance)
}
}
type Ctx<'a, Runtime> = MainnetContext<WrapDatabaseRef<&'a mut DatabaseRuntime<Runtime>>>;
fn address_to_user_application_id(address: Address) -> ApplicationId {
let mut vec = vec![0_u8; 32];
vec[..20].copy_from_slice(address.as_ref());
ApplicationId::new(CryptoHash::try_from(&vec as &[u8]).unwrap())
}
/// Some functionalities from the BaseRuntime
#[derive(Debug, Serialize, Deserialize)]
enum BaseRuntimePrecompile {
/// Calling `chain_id` of `BaseRuntime`
ChainId,
/// Calling `block_height_id` of `BaseRuntime`
BlockHeight,
/// Calling `application_creator_chain_id` of `BaseRuntime`
ApplicationCreatorChainId,
/// Calling `read_system_timestamp` of `BaseRuntime`
ReadSystemTimestamp,
/// Calling `read_chain_balance` of `BaseRuntime`
ReadChainBalance,
/// Calling `read_owner_balance` of `BaseRuntime`
ReadOwnerBalance(AccountOwner),
/// Calling `read_owner_balances` of `BaseRuntime`
ReadOwnerBalances,
/// Calling `read_balance_owners` of `BaseRuntime`
ReadBalanceOwners,
/// Calling `chain_ownership` of `BaseRuntime`
ChainOwnership,
/// Calling `read_data_blob` of `BaseRuntime`
ReadDataBlob(DataBlobHash),
/// Calling `assert_data_blob_exists` of `BaseRuntime`
AssertDataBlobExists(DataBlobHash),
}
/// Some functionalities from the ContractRuntime not in BaseRuntime
#[derive(Debug, Serialize, Deserialize)]
enum ContractRuntimePrecompile {
/// Calling `authenticated_owner` of `ContractRuntime`
AuthenticatedOwner,
/// Calling `message_origin_chain_id` of `ContractRuntime`
MessageOriginChainId,
/// Calling `message_is_bouncing` of `ContractRuntime`
MessageIsBouncing,
/// Calling `authenticated_caller_id` of `ContractRuntime`
AuthenticatedCallerId,
/// Calling `send_message` of `ContractRuntime`
SendMessage {
destination: ChainId,
message: Vec<u8>,
},
/// Calling `try_call_application` of `ContractRuntime`
TryCallApplication {
target: ApplicationId,
argument: Vec<u8>,
},
/// Calling `emit` of `ContractRuntime`
Emit {
stream_name: StreamName,
value: Vec<u8>,
},
/// Calling `read_event` of `ContractRuntime`
ReadEvent {
chain_id: ChainId,
stream_name: StreamName,
index: u32,
},
/// Calling `subscribe_to_events` of `ContractRuntime`
SubscribeToEvents {
chain_id: ChainId,
application_id: ApplicationId,
stream_name: StreamName,
},
/// Calling `unsubscribe_from_events` of `ContractRuntime`
UnsubscribeFromEvents {
chain_id: ChainId,
application_id: ApplicationId,
stream_name: StreamName,
},
/// Calling `query_service` of `ContractRuntime`
QueryService {
application_id: ApplicationId,
query: Vec<u8>,
},
/// Calling `validation_round` of `ContractRuntime`
ValidationRound,
/// Calling `transfer` of `ContractRuntime`
Transfer {
account: Account,
amount: AmountU256,
},
}
/// Some functionalities from the ServiceRuntime not in BaseRuntime
#[derive(Debug, Serialize, Deserialize)]
enum ServiceRuntimePrecompile {
/// Calling `try_query_application` of `ServiceRuntime`
TryQueryApplication {
target: ApplicationId,
argument: Vec<u8>,
},
}
/// Key prefixes used to transmit precompiles.
#[derive(Debug, Serialize, Deserialize)]
enum RuntimePrecompile {
Base(BaseRuntimePrecompile),
Contract(ContractRuntimePrecompile),
Service(ServiceRuntimePrecompile),
}
fn get_precompile_output(output: Vec<u8>, gas_limit: u64) -> InterpreterResult {
// The gas usage is set to `gas_limit` and no spending is being done on it.
// This means that for Revm, it looks like the precompile call costs nothing.
// This is because the costs of the EVM precompile calls is accounted for
// separately in Linera.
let output = Bytes::from(output);
let result = InstructionResult::default();
let gas = Gas::new(gas_limit);
InterpreterResult {
result,
output,
gas,
}
}
fn get_argument<Ctx: ContextTr>(context: &mut Ctx, input: &CallInput) -> Vec<u8> {
match input {
CallInput::Bytes(bytes) => bytes.to_vec(),
CallInput::SharedBuffer(range) => {
match context.local().shared_memory_buffer_slice(range.clone()) {
None => Vec::new(),
Some(slice) => slice.to_vec(),
}
}
}
}
fn get_value(call_value: &CallValue) -> Result<U256, EvmExecutionError> {
match call_value {
CallValue::Transfer(value) => Ok(*value),
CallValue::Apparent(_) => Err(EvmExecutionError::NoDelegateCall),
}
}
fn get_precompile_argument<Ctx: ContextTr>(
context: &mut Ctx,
inputs: &InputsImpl,
) -> Result<Vec<u8>, ExecutionError> {
Ok(get_argument(context, &inputs.input))
}
fn get_call_service_argument<Ctx: ContextTr>(
context: &mut Ctx,
inputs: &CallInputs,
) -> Result<Vec<u8>, ExecutionError> {
ensure!(
get_value(&inputs.value)? == U256::ZERO,
EvmExecutionError::NoTransferInServices
);
let mut argument = INTERPRETER_RESULT_SELECTOR.to_vec();
argument.extend(&get_argument(context, &inputs.input));
Ok(argument)
}
fn get_call_contract_argument<Ctx: ContextTr>(
context: &mut Ctx,
inputs: &CallInputs,
) -> Result<(Vec<u8>, usize), ExecutionError> {
let mut final_argument = INTERPRETER_RESULT_SELECTOR.to_vec();
let value = get_value(&inputs.value)?;
let argument = get_argument(context, &inputs.input);
let n_input = argument.len();
let evm_operation = EvmOperation { value, argument };
let argument = bcs::to_bytes(&evm_operation)?;
final_argument.extend(&argument);
Ok((final_argument, n_input))
}
fn base_runtime_call<Runtime: BaseRuntime>(
request: BaseRuntimePrecompile,
context: &mut Ctx<'_, Runtime>,
) -> Result<Vec<u8>, ExecutionError> {
let mut runtime = context.db().0.runtime.lock().unwrap();
match request {
BaseRuntimePrecompile::ChainId => {
let chain_id = runtime.chain_id()?;
Ok(bcs::to_bytes(&chain_id)?)
}
BaseRuntimePrecompile::BlockHeight => {
let block_height = runtime.block_height()?;
Ok(bcs::to_bytes(&block_height)?)
}
BaseRuntimePrecompile::ApplicationCreatorChainId => {
let chain_id = runtime.application_creator_chain_id()?;
Ok(bcs::to_bytes(&chain_id)?)
}
BaseRuntimePrecompile::ReadSystemTimestamp => {
let timestamp = runtime.read_system_timestamp()?;
Ok(bcs::to_bytes(×tamp)?)
}
BaseRuntimePrecompile::ReadChainBalance => {
let balance: linera_base::data_types::Amount = runtime.read_chain_balance()?;
let balance: AmountU256 = balance.into();
Ok(bcs::to_bytes(&balance)?)
}
BaseRuntimePrecompile::ReadOwnerBalance(account_owner) => {
let balance = runtime.read_owner_balance(account_owner)?;
let balance = Into::<U256>::into(balance);
Ok(bcs::to_bytes(&balance)?)
}
BaseRuntimePrecompile::ReadOwnerBalances => {
let owner_balances = runtime.read_owner_balances()?;
let owner_balances = owner_balances
.into_iter()
.map(|(account_owner, balance)| (account_owner, balance.into()))
.collect::<Vec<(AccountOwner, AmountU256)>>();
Ok(bcs::to_bytes(&owner_balances)?)
}
BaseRuntimePrecompile::ReadBalanceOwners => {
let owners = runtime.read_balance_owners()?;
Ok(bcs::to_bytes(&owners)?)
}
BaseRuntimePrecompile::ChainOwnership => {
let chain_ownership = runtime.chain_ownership()?;
Ok(bcs::to_bytes(&chain_ownership)?)
}
BaseRuntimePrecompile::ReadDataBlob(hash) => runtime.read_data_blob(hash),
BaseRuntimePrecompile::AssertDataBlobExists(hash) => {
runtime.assert_data_blob_exists(hash)?;
Ok(Vec::new())
}
}
}
fn precompile_addresses() -> BTreeSet<Address> {
let mut addresses = BTreeSet::new();
for address in EthPrecompiles::default().warm_addresses() {
addresses.insert(address);
}
addresses.insert(PRECOMPILE_ADDRESS);
addresses
}
#[derive(Debug, Default)]
struct ContractPrecompile {
inner: EthPrecompiles,
}
impl<'a, Runtime: ContractRuntime> PrecompileProvider<Ctx<'a, Runtime>> for ContractPrecompile {
type Output = InterpreterResult;
fn set_spec(&mut self, spec: <<Ctx<'a, Runtime> as ContextTr>::Cfg as Cfg>::Spec) -> bool {
<EthPrecompiles as PrecompileProvider<Ctx<'a, Runtime>>>::set_spec(&mut self.inner, spec)
}
fn run(
&mut self,
context: &mut Ctx<'a, Runtime>,
address: &Address,
inputs: &InputsImpl,
is_static: bool,
gas_limit: u64,
) -> Result<Option<InterpreterResult>, String> {
if address == &PRECOMPILE_ADDRESS {
let output = Self::call_or_fail(inputs, context)
.map_err(|error| format!("ContractPrecompile error: {error}"))?;
return Ok(Some(get_precompile_output(output, gas_limit)));
}
self.inner
.run(context, address, inputs, is_static, gas_limit)
}
fn warm_addresses(&self) -> Box<impl Iterator<Item = Address>> {
let mut addresses = self.inner.warm_addresses().collect::<Vec<Address>>();
addresses.push(PRECOMPILE_ADDRESS);
Box::new(addresses.into_iter())
}
fn contains(&self, address: &Address) -> bool {
address == &PRECOMPILE_ADDRESS || self.inner.contains(address)
}
}
fn get_evm_destination<Runtime: ContractRuntime>(
context: &mut Ctx<'_, Runtime>,
account: Account,
) -> Result<Option<Address>, ExecutionError> {
let mut runtime = context.db().0.runtime.lock().unwrap();
if runtime.chain_id()? != account.chain_id {
return Ok(None);
}
Ok(account.owner.to_evm_address())
}
/// We are doing transfers of value from a source to a destination.
fn revm_transfer<Runtime: ContractRuntime>(
context: &mut Ctx<'_, Runtime>,
source: Address,
destination: Address,
value: U256,
) -> Result<(), ExecutionError> {
if let Some(error) = context.journal().transfer(source, destination, value)? {
let error = format!("{error:?}");
let error = EvmExecutionError::TransactError(error);
return Err(error.into());
}
Ok(())
}
impl<'a> ContractPrecompile {
fn contract_runtime_call<Runtime: ContractRuntime>(
request: ContractRuntimePrecompile,
context: &mut Ctx<'a, Runtime>,
) -> Result<Vec<u8>, ExecutionError> {
match request {
ContractRuntimePrecompile::AuthenticatedOwner => {
let mut runtime = context.db().0.runtime.lock().unwrap();
let account_owner = runtime.authenticated_owner()?;
Ok(bcs::to_bytes(&account_owner)?)
}
ContractRuntimePrecompile::MessageOriginChainId => {
let mut runtime = context.db().0.runtime.lock().unwrap();
let origin_chain_id = runtime.message_origin_chain_id()?;
Ok(bcs::to_bytes(&origin_chain_id)?)
}
ContractRuntimePrecompile::MessageIsBouncing => {
let mut runtime = context.db().0.runtime.lock().unwrap();
let result = runtime.message_is_bouncing()?;
Ok(bcs::to_bytes(&result)?)
}
ContractRuntimePrecompile::AuthenticatedCallerId => {
let mut runtime = context.db().0.runtime.lock().unwrap();
let application_id = runtime.authenticated_caller_id()?;
Ok(bcs::to_bytes(&application_id)?)
}
ContractRuntimePrecompile::SendMessage {
destination,
message,
} => {
let authenticated = true;
let is_tracked = true;
let grant = Resources::default();
let send_message_request = SendMessageRequest {
destination,
authenticated,
is_tracked,
grant,
message,
};
let mut runtime = context.db().0.runtime.lock().unwrap();
runtime.send_message(send_message_request)?;
Ok(vec![])
}
ContractRuntimePrecompile::TryCallApplication { target, argument } => {
let authenticated = true;
let mut runtime = context.db().0.runtime.lock().unwrap();
ensure!(
target != runtime.application_id()?,
EvmExecutionError::NoSelfCall
);
runtime.try_call_application(authenticated, target, argument)
}
ContractRuntimePrecompile::Emit { stream_name, value } => {
let mut runtime = context.db().0.runtime.lock().unwrap();
let result = runtime.emit(stream_name, value)?;
Ok(bcs::to_bytes(&result)?)
}
ContractRuntimePrecompile::ReadEvent {
chain_id,
stream_name,
index,
} => {
let mut runtime = context.db().0.runtime.lock().unwrap();
runtime.read_event(chain_id, stream_name, index)
}
ContractRuntimePrecompile::SubscribeToEvents {
chain_id,
application_id,
stream_name,
} => {
let mut runtime = context.db().0.runtime.lock().unwrap();
runtime.subscribe_to_events(chain_id, application_id, stream_name)?;
Ok(vec![])
}
ContractRuntimePrecompile::UnsubscribeFromEvents {
chain_id,
application_id,
stream_name,
} => {
let mut runtime = context.db().0.runtime.lock().unwrap();
runtime.unsubscribe_from_events(chain_id, application_id, stream_name)?;
Ok(vec![])
}
ContractRuntimePrecompile::QueryService {
application_id,
query,
} => {
let mut runtime = context.db().0.runtime.lock().unwrap();
ensure!(
application_id != runtime.application_id()?,
EvmExecutionError::NoSelfCall
);
runtime.query_service(application_id, query)
}
ContractRuntimePrecompile::ValidationRound => {
let mut runtime = context.db().0.runtime.lock().unwrap();
let value = runtime.validation_round()?;
Ok(bcs::to_bytes(&value)?)
}
ContractRuntimePrecompile::Transfer { account, amount } => {
if amount.0 != U256::ZERO {
let destination = {
let destination = get_evm_destination(context, account)?;
destination.unwrap_or(FAUCET_ADDRESS)
};
let application_id = {
let mut runtime = context.db().0.runtime.lock().unwrap();
let application_id = runtime.application_id()?;
let source = application_id.into();
let value = Amount::try_from(amount.0).map_err(EvmExecutionError::from)?;
runtime.transfer(source, account, value)?;
application_id
};
let source: Address = application_id.evm_address();
revm_transfer(context, source, destination, amount.0)?;
}
Ok(vec![])
}
}
}
fn call_or_fail<Runtime: ContractRuntime>(
inputs: &InputsImpl,
context: &mut Ctx<'a, Runtime>,
) -> Result<Vec<u8>, ExecutionError> {
let input = get_precompile_argument(context, inputs)?;
match bcs::from_bytes(&input)? {
RuntimePrecompile::Base(base_tag) => base_runtime_call(base_tag, context),
RuntimePrecompile::Contract(contract_tag) => {
Self::contract_runtime_call(contract_tag, context)
}
RuntimePrecompile::Service(_) => Err(EvmExecutionError::PrecompileError(
"Service tags are not available in GeneralContractCall".to_string(),
)
.into()),
}
}
}
#[derive(Debug, Default)]
struct ServicePrecompile {
inner: EthPrecompiles,
}
impl<'a> ServicePrecompile {
fn service_runtime_call<Runtime: ServiceRuntime>(
request: ServiceRuntimePrecompile,
context: &mut Ctx<'a, Runtime>,
) -> Result<Vec<u8>, ExecutionError> {
let mut runtime = context.db().0.runtime.lock().unwrap();
match request {
ServiceRuntimePrecompile::TryQueryApplication { target, argument } => {
ensure!(
target != runtime.application_id()?,
EvmExecutionError::NoSelfCall
);
runtime.try_query_application(target, argument)
}
}
}
fn call_or_fail<Runtime: ServiceRuntime>(
inputs: &InputsImpl,
context: &mut Ctx<'a, Runtime>,
) -> Result<Vec<u8>, ExecutionError> {
let input = get_precompile_argument(context, inputs)?;
match bcs::from_bytes(&input)? {
RuntimePrecompile::Base(base_tag) => base_runtime_call(base_tag, context),
RuntimePrecompile::Contract(_) => Err(EvmExecutionError::PrecompileError(
"Contract calls are not available in GeneralServiceCall".to_string(),
)
.into()),
RuntimePrecompile::Service(service_tag) => {
Self::service_runtime_call(service_tag, context)
}
}
}
}
impl<'a, Runtime: ServiceRuntime> PrecompileProvider<Ctx<'a, Runtime>> for ServicePrecompile {
type Output = InterpreterResult;
fn set_spec(&mut self, spec: <<Ctx<'a, Runtime> as ContextTr>::Cfg as Cfg>::Spec) -> bool {
<EthPrecompiles as PrecompileProvider<Ctx<'a, Runtime>>>::set_spec(&mut self.inner, spec)
}
fn run(
&mut self,
context: &mut Ctx<'a, Runtime>,
address: &Address,
inputs: &InputsImpl,
is_static: bool,
gas_limit: u64,
) -> Result<Option<InterpreterResult>, String> {
if address == &PRECOMPILE_ADDRESS {
let output = Self::call_or_fail(inputs, context)
.map_err(|error| format!("ServicePrecompile error: {error}"))?;
return Ok(Some(get_precompile_output(output, gas_limit)));
}
self.inner
.run(context, address, inputs, is_static, gas_limit)
}
fn warm_addresses(&self) -> Box<impl Iterator<Item = Address>> {
let mut addresses = self.inner.warm_addresses().collect::<Vec<Address>>();
addresses.push(PRECOMPILE_ADDRESS);
Box::new(addresses.into_iter())
}
fn contains(&self, address: &Address) -> bool {
address == &PRECOMPILE_ADDRESS || self.inner.contains(address)
}
}
fn map_result_create_outcome<Runtime: BaseRuntime>(
database: &DatabaseRuntime<Runtime>,
result: Result<Option<CreateOutcome>, ExecutionError>,
) -> Option<CreateOutcome> {
match result {
Err(error) => {
database.insert_error(error);
// The use of Revert immediately stops the execution.
let result = InstructionResult::Revert;
let output = Bytes::default();
let gas = Gas::default();
let result = InterpreterResult {
result,
output,
gas,
};
Some(CreateOutcome {
result,
address: None,
})
}
Ok(result) => result,
}
}
fn map_result_call_outcome<Runtime: BaseRuntime>(
database: &DatabaseRuntime<Runtime>,
result: Result<Option<CallOutcome>, ExecutionError>,
) -> Option<CallOutcome> {
match result {
Err(error) => {
database.insert_error(error);
// The use of Revert immediately stops the execution.
let result = InstructionResult::Revert;
let output = Bytes::default();
let gas = Gas::default();
let result = InterpreterResult {
result,
output,
gas,
};
let memory_offset = Range::default();
Some(CallOutcome {
result,
memory_offset,
})
}
Ok(result) => result,
}
}
fn get_interpreter_result(
result: &[u8],
inputs: &mut CallInputs,
) -> Result<InterpreterResult, ExecutionError> {
let mut result = bcs::from_bytes::<InterpreterResult>(result)?;
// This effectively means that no cost is incurred by the call to another contract.
// This is fine since the costs are incurred by the other contract itself.
result.gas = Gas::new(inputs.gas_limit);
Ok(result)
}
struct CallInterceptorContract<Runtime> {
db: DatabaseRuntime<Runtime>,
// This is the contract address of the contract being created.
contract_address: Address,
precompile_addresses: BTreeSet<Address>,
error: Arc<Mutex<Option<U256>>>,
}
impl<Runtime> Clone for CallInterceptorContract<Runtime> {
fn clone(&self) -> Self {
Self {
db: self.db.clone(),
contract_address: self.contract_address,
precompile_addresses: self.precompile_addresses.clone(),
error: self.error.clone(),
}
}
}
impl<'a, Runtime: ContractRuntime> Inspector<Ctx<'a, Runtime>>
for CallInterceptorContract<Runtime>
{
fn create(
&mut self,
context: &mut Ctx<'a, Runtime>,
inputs: &mut CreateInputs,
) -> Option<CreateOutcome> {
let result = self.create_or_fail(context, inputs);
map_result_create_outcome(&self.db, result)
}
fn call(
&mut self,
context: &mut Ctx<'a, Runtime>,
inputs: &mut CallInputs,
) -> Option<CallOutcome> {
let result = self.call_or_fail(context, inputs);
map_result_call_outcome(&self.db, result)
}
}
impl<Runtime: ContractRuntime> CallInterceptorContract<Runtime> {
/// Gets the expected `ApplicationId`. We need to transfer
/// native tokens before the application is created (see below).
/// Therefore, we need to pre-compute the obtained application ID.
fn get_expected_application_id(
runtime: &mut Runtime,
module_id: ModuleId,
) -> Result<ApplicationId, ExecutionError> {
let chain_id = runtime.chain_id()?;
let block_height = runtime.block_height()?;
let application_index = runtime.peek_application_index()?;
let parameters = JSON_EMPTY_VECTOR.to_vec(); // No constructor
let required_application_ids = Vec::new();
let application_description = ApplicationDescription {
module_id,
creator_chain_id: chain_id,
block_height,
application_index,
parameters: parameters.clone(),
required_application_ids,
};
Ok(ApplicationId::from(&application_description))
}
/// Publishes the `inputs`.
fn publish_create_inputs(
context: &mut Ctx<'_, Runtime>,
inputs: &mut CreateInputs,
) -> Result<ModuleId, ExecutionError> {
let contract = linera_base::data_types::Bytecode::new(inputs.init_code.to_vec());
let service = linera_base::data_types::Bytecode::new(vec![]);
let mut runtime = context.db().0.runtime.lock().unwrap();
runtime.publish_module(contract, service, VmRuntime::Evm)
}
/// The function `fn create` of the inspector trait is called
/// when a contract is going to be instantiated. Since the
/// function can have some error case which are not supported
/// in `fn create`, we call a `fn create_or_fail` that can
/// return errors.
/// When the database runtime is created, the EVM contract
/// may or may not have been created. Therefore, at startup
/// we have `is_revm_instantiated = false`. That boolean
/// can be updated after `set_is_initialized`.
///
/// The inspector can do two things:
/// * It can change the inputs in `CreateInputs`. Here we
/// change the address being created.
/// * It can return some specific CreateInput to be used.
///
/// Therefore, the first case of the call is going to
/// be about the creation of the contract with just the
/// address being the one chosen by Linera.
///
/// The second case occurs when the first contract has
/// been created and that contract starts making new
/// contracts.
/// In relation to bytecode, the following notions are
/// relevant:
/// * The bytecode is created from the compilation.
/// * The bytecode concatenated with the constructor
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | true |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/evm/mod.rs | linera-execution/src/evm/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Support for user applications compiled for the EVM
//!
//! We are using Revm for implementing it.
#![cfg(with_revm)]
mod data_types;
mod database;
pub mod inputs;
pub mod revm;
use linera_base::data_types::AmountConversionError;
use revm_context::result::{HaltReason, Output, SuccessReason};
use revm_primitives::{Address, Log, U256};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum EvmExecutionError {
#[error(transparent)]
AmountConversionError(#[from] AmountConversionError),
#[error("Failed to load contract EVM module: {_0}")]
LoadContractModule(#[source] anyhow::Error),
#[error("Failed to load service EVM module: {_0}")]
LoadServiceModule(#[source] anyhow::Error),
#[error("Commit error {0}")]
CommitError(String),
#[error("It is illegal to call {0} from an operation")]
IllegalOperationCall(String),
#[error("runtime error")]
RuntimeError(String),
#[error("The balances are incoherent for address {0}, balances {1}, {2}")]
IncoherentBalances(Address, U256, U256),
#[error("Unknown signer")]
UnknownSigner,
#[error("No delegate call")]
NoDelegateCall,
#[error("No transfer in services")]
NoTransferInServices,
#[error("No transfer in Wasm application call")]
NoTransferInRuntimeCall,
#[error("The function {0} is being called but is missing from the bytecode API")]
MissingFunction(String),
#[error("Incorrect contract creation: {0}")]
IncorrectContractCreation(String),
#[error("The operation should contain the evm selector and so have length 4 or more")]
OperationIsTooShort,
#[error("Missing bytecode")]
MissingBytecode,
#[error("Contracts cannot call themselves")]
NoSelfCall,
#[error("Transact error {0}")]
TransactError(String),
#[error("Impossible to create contracts in services")]
NoContractCreationInService,
#[error("Transact commit error {0}")]
TransactCommitError(String),
#[error("Precompile error: {0}")]
PrecompileError(String),
#[error("The operation was reverted with {gas_used} gas used and output {output:?}")]
Revert {
gas_used: u64,
output: revm_primitives::Bytes,
},
#[error("The operation was halted with {gas_used} gas used due to {reason:?}")]
Halt { gas_used: u64, reason: HaltReason },
#[error("The interpreter did not return, reason={:?}, gas_used={}, gas_refunded={}, logs={:?}, output={:?}",
reason, gas_used, gas_refunded, logs, output)]
NoReturnInterpreter {
reason: SuccessReason,
gas_used: u64,
gas_refunded: u64,
logs: Vec<Log>,
output: Output,
},
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/evm/data_types.rs | linera-execution/src/evm/data_types.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use alloy_primitives::U256;
use linera_base::data_types::Amount;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
#[derive(Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Hash, Default, Debug)]
/// An encapsulation of U256 in order to have a specific serialization
pub struct AmountU256(pub U256);
impl Serialize for AmountU256 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
use serde::ser::SerializeTuple;
let v: [u8; 32] = self.0.to_be_bytes();
let mut tuple = serializer.serialize_tuple(32)?;
for byte in &v {
tuple.serialize_element(byte)?;
}
tuple.end()
}
}
impl<'de> Deserialize<'de> for AmountU256 {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let bytes: [u8; 32] = Deserialize::deserialize(deserializer)?;
let value = U256::from_be_bytes(bytes);
Ok(AmountU256(value))
}
}
impl From<Amount> for AmountU256 {
fn from(amount: Amount) -> AmountU256 {
AmountU256(amount.into())
}
}
#[cfg(test)]
mod tests {
use linera_base::data_types::Amount;
use crate::evm::data_types::AmountU256;
#[test]
fn check_bcs_serialization() -> anyhow::Result<()> {
let value = 7837438347454859505557763535536363636;
let value = Amount::from_tokens(value);
let value = AmountU256::from(value);
let vec = bcs::to_bytes(&value)?;
assert_eq!(vec.len(), 32);
assert_eq!(value, bcs::from_bytes(&vec)?);
Ok(())
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/util/mod.rs | linera-execution/src/util/mod.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Helper traits and functions.
mod sync_response;
use futures::channel::mpsc;
use linera_base::{data_types::OracleResponse, http::Response, identifiers::EventId};
pub use self::sync_response::SyncSender;
use crate::ExecutionError;
/// Extension trait to help with sending requests to an actor.
///
/// Prepares a channel for the actor to send a response back to the sender of the request.
pub trait UnboundedSenderExt<Request> {
/// Sends a request built by `builder`, returning a [`oneshot::Receiver`] for receiving the
/// `Response`.
fn send_request<Response>(
&self,
builder: impl FnOnce(oneshot::Sender<Response>) -> Request,
) -> Result<oneshot::Receiver<Response>, ExecutionError>
where
Response: Send;
// TODO(#1416)
#[allow(dead_code)]
/// Sends a synchronous request built by `builder`, blocking until the `Response` is received.
fn send_sync_request<Response>(
&self,
builder: impl FnOnce(SyncSender<Response>) -> Request,
) -> Result<Response, ExecutionError>
where
Response: Send;
}
impl<Request> UnboundedSenderExt<Request> for mpsc::UnboundedSender<Request>
where
Request: Send,
{
fn send_request<Response>(
&self,
builder: impl FnOnce(oneshot::Sender<Response>) -> Request,
) -> Result<oneshot::Receiver<Response>, ExecutionError>
where
Response: Send,
{
let (response_sender, response_receiver) = oneshot::channel();
let request = builder(response_sender);
self.unbounded_send(request).map_err(|send_error| {
assert!(
send_error.is_disconnected(),
"`send_request` should only be used with unbounded senders"
);
ExecutionError::MissingRuntimeResponse
})?;
Ok(response_receiver)
}
// TODO(#1416)
#[allow(dead_code)]
fn send_sync_request<Response>(
&self,
builder: impl FnOnce(SyncSender<Response>) -> Request,
) -> Result<Response, ExecutionError>
where
Response: Send,
{
let (response_sender, response_receiver) = sync_response::channel();
let request = builder(response_sender);
self.unbounded_send(request).map_err(|send_error| {
assert!(
send_error.is_disconnected(),
"`send_request` should only be used with unbounded senders"
);
ExecutionError::MissingRuntimeResponse
})?;
response_receiver
.recv()
.map_err(|_| ExecutionError::MissingRuntimeResponse)
}
}
/// Extension trait to help with receiving responses with a [`oneshot::Receiver`].
pub trait ReceiverExt<T> {
/// Receives a response `T`, or returns an [`ExecutionError`] if the sender endpoint is closed.
fn recv_response(self) -> Result<T, ExecutionError>;
}
impl<T> ReceiverExt<T> for oneshot::Receiver<T> {
fn recv_response(self) -> Result<T, ExecutionError> {
self.recv()
.map_err(|oneshot::RecvError| ExecutionError::MissingRuntimeResponse)
}
}
/// Helper trait to send a response and log on failure.
pub trait RespondExt {
type Response;
/// Responds to a request using the `response_sender` channel endpoint.
fn respond(self, response: Self::Response);
}
impl<Response> RespondExt for oneshot::Sender<Response> {
type Response = Response;
fn respond(self, response: Self::Response) {
if self.send(response).is_err() {
tracing::debug!("Request sent to `RuntimeActor` was canceled");
}
}
}
impl<Response> RespondExt for SyncSender<Response> {
type Response = Response;
fn respond(self, response: Self::Response) {
if self.send(response).is_err() {
tracing::debug!("Request sent to `RuntimeActor` was canceled");
}
}
}
pub(crate) trait OracleResponseExt {
fn to_round(&self) -> Result<Option<u32>, ExecutionError>;
fn to_service_response(&self) -> Result<Vec<u8>, ExecutionError>;
fn to_http_response(&self) -> Result<Response, ExecutionError>;
fn to_event(&self, event_id: &EventId) -> Result<Vec<u8>, ExecutionError>;
}
impl OracleResponseExt for OracleResponse {
fn to_round(&self) -> Result<Option<u32>, ExecutionError> {
match self {
OracleResponse::Round(round) => Ok(*round),
_ => Err(ExecutionError::OracleResponseMismatch),
}
}
fn to_service_response(&self) -> Result<Vec<u8>, ExecutionError> {
match self {
OracleResponse::Service(bytes) => Ok(bytes.clone()),
_ => Err(ExecutionError::OracleResponseMismatch),
}
}
fn to_http_response(&self) -> Result<Response, ExecutionError> {
match self {
OracleResponse::Http(response) => Ok(response.clone()),
_ => Err(ExecutionError::OracleResponseMismatch),
}
}
fn to_event(&self, event_id: &EventId) -> Result<Vec<u8>, ExecutionError> {
match self {
OracleResponse::Event(recorded_event_id, event) if recorded_event_id == event_id => {
Ok(event.clone())
}
_ => Err(ExecutionError::OracleResponseMismatch),
}
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/util/sync_response.rs | linera-execution/src/util/sync_response.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
//! Types useful for sending synchronous responses from a [`RuntimeActor`]
// TODO(#1416)
#[allow(dead_code)]
/// Creates a channel that wraps a [`oneshot`] channel with the [`Sender`] type not
/// implementing [`std::future::Future`].
///
/// This forces the channel to be used in a blocking manner.
pub fn channel<T>() -> (SyncSender<T>, SyncReceiver<T>) {
let (sender, receiver) = oneshot::channel();
(SyncSender(sender), SyncReceiver(receiver))
}
/// A wrapper around [`oneshot::Sender`] that is connected to a synchronous [`SyncReceiver`].
pub struct SyncSender<T>(oneshot::Sender<T>);
impl<T> SyncSender<T> {
/// Sends a `message` to the synchronous [`SyncReceiver`] endpoint.
pub fn send(self, message: T) -> Result<(), oneshot::SendError<T>> {
self.0.send(message)
}
}
// TODO(#1416)
#[allow(dead_code)]
/// A wrapper around [`oneshot::Receiver`] that is connected to a synchronous [`SyncSender`].
///
/// This type does not implement [`std::future::Future`], so it can't be used to receive
/// messages asynchronously.
pub struct SyncReceiver<T>(oneshot::Receiver<T>);
// TODO(#1416)
#[allow(dead_code)]
impl<T> SyncReceiver<T> {
/// Blocks until a message from the [`SyncSender`] endpoint is received.
pub fn recv(self) -> Result<T, oneshot::RecvError> {
self.0.recv()
}
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
linera-io/linera-protocol | https://github.com/linera-io/linera-protocol/blob/69f159d2106f7fc70870ce70074c55850bf1303b/linera-execution/src/unit_tests/system_tests.rs | linera-execution/src/unit_tests/system_tests.rs | // Copyright (c) Zefchain Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
use linera_base::data_types::{Blob, BlockHeight, Bytecode};
#[cfg(with_testing)]
use linera_base::vm::VmRuntime;
use linera_views::context::MemoryContext;
use super::*;
use crate::{test_utils::dummy_chain_description, ExecutionStateView, TestExecutionRuntimeContext};
/// Returns an execution state view and a matching operation context, for epoch 1, with root
/// chain 0 as the admin ID and one empty committee.
async fn new_view_and_context() -> (
ExecutionStateView<MemoryContext<TestExecutionRuntimeContext>>,
OperationContext,
) {
let description = dummy_chain_description(5);
let context = OperationContext {
chain_id: ChainId::from(&description),
authenticated_owner: None,
height: BlockHeight::from(7),
round: Some(0),
timestamp: Default::default(),
};
let state = SystemExecutionState {
description: Some(description),
epoch: Epoch(1),
admin_id: Some(dummy_chain_description(0).id()),
committees: BTreeMap::new(),
..SystemExecutionState::default()
};
let view = state.into_view().await;
(view, context)
}
fn expected_application_id(
context: &OperationContext,
module_id: &ModuleId,
parameters: Vec<u8>,
required_application_ids: Vec<ApplicationId>,
application_index: u32,
) -> ApplicationId {
let description = ApplicationDescription {
module_id: *module_id,
creator_chain_id: context.chain_id,
block_height: context.height,
application_index,
parameters,
required_application_ids,
};
From::from(&description)
}
#[tokio::test]
async fn application_message_index() -> anyhow::Result<()> {
let (mut view, context) = new_view_and_context().await;
let contract = Bytecode::new(b"contract".into());
let service = Bytecode::new(b"service".into());
let contract_blob = Blob::new_contract_bytecode(contract.compress());
let service_blob = Blob::new_service_bytecode(service.compress());
let vm_runtime = VmRuntime::Wasm;
let module_id = ModuleId::new(contract_blob.id().hash, service_blob.id().hash, vm_runtime);
let operation = SystemOperation::CreateApplication {
module_id,
parameters: vec![],
instantiation_argument: vec![],
required_application_ids: vec![],
};
let mut txn_tracker = TransactionTracker::default();
view.context()
.extra()
.add_blobs([contract_blob, service_blob])
.await?;
let mut controller = ResourceController::default();
let new_application = view
.system
.execute_operation(context, operation, &mut txn_tracker, &mut controller)
.await?;
let id = expected_application_id(&context, &module_id, vec![], vec![], 0);
assert_eq!(new_application, Some((id, vec![])));
Ok(())
}
#[tokio::test]
async fn open_chain_message_index() {
let (mut view, context) = new_view_and_context().await;
let owner = linera_base::crypto::AccountPublicKey::test_key(0).into();
let ownership = ChainOwnership::single(owner);
let config = OpenChainConfig {
ownership,
balance: Amount::ZERO,
application_permissions: Default::default(),
};
let mut txn_tracker = TransactionTracker::default();
let operation = SystemOperation::OpenChain(config.clone());
let mut controller = ResourceController::default();
let new_application = view
.system
.execute_operation(context, operation, &mut txn_tracker, &mut controller)
.await
.unwrap();
assert_eq!(new_application, None);
assert_eq!(
txn_tracker.into_outcome().unwrap().blobs[0].id().blob_type,
BlobType::ChainDescription,
);
}
/// Tests if an account is removed from storage if it is drained.
#[tokio::test]
async fn empty_accounts_are_removed() -> anyhow::Result<()> {
let owner = AccountOwner::from(CryptoHash::test_hash("account owner"));
let amount = Amount::from_tokens(99);
let mut view = SystemExecutionState {
description: Some(dummy_chain_description(0)),
balances: BTreeMap::from([(owner, amount)]),
..SystemExecutionState::default()
}
.into_view()
.await;
view.system.debit(&owner, amount).await?;
assert!(view.system.balances.indices().await?.is_empty());
Ok(())
}
| rust | Apache-2.0 | 69f159d2106f7fc70870ce70074c55850bf1303b | 2026-01-04T15:33:08.660695Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.