repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
GyulyVGC/sniffnet | https://github.com/GyulyVGC/sniffnet/blob/a748d0a04dfc6f6c3be206d79c5df4f6beeeab85/src/gui/types/conf.rs | src/gui/types/conf.rs | use crate::gui::pages::types::running_page::RunningPage;
use crate::gui::pages::types::settings_page::SettingsPage;
use crate::gui::types::config_window::ConfigWindow;
use crate::gui::types::export_pcap::ExportPcap;
use crate::gui::types::filters::Filters;
use crate::gui::types::settings::Settings;
use crate::networking::types::capture_context::CaptureSourcePicklist;
use crate::networking::types::config_device::ConfigDevice;
use crate::networking::types::data_representation::DataRepr;
use crate::report::types::sort_type::SortType;
#[cfg(not(test))]
use crate::utils::error_logger::{ErrorLogger, Location};
#[cfg(not(test))]
use crate::{SNIFFNET_LOWERCASE, location};
#[cfg(not(test))]
use confy::ConfyError;
use serde::{Deserialize, Deserializer, Serialize};
pub static CONF: std::sync::LazyLock<Conf> = std::sync::LazyLock::new(Conf::load);
/// Application configurations structure
///
/// This structure holds all the configuration parameters for the application,
/// and is serialized/deserialized using `confy` crate to store/load from disk.
///
/// ### IMPORTANT NOTE
///
/// In order to load it in a robust, fault-tolerant, backward-compatible way,
/// there are various constraints to be satisfied when deserializing:
/// - missing fields must be filled with default values
/// - the main struct and all nested structs must implement `Default` and be annotated with `#[serde(default)]`
/// - this populates missing fields from the struct's `Default` implementation
/// - invalid fields must be replaced with default values
/// - all fields must be annotated with `#[serde(deserialize_with = "deserialize_or_default")]`
/// - this populates invalid fields from the field's type `Default` implementation
/// - extra fields must be ignored
/// - this is the default behavior of `serde`
/// - right after deserialization, certain fields must be sanitized
/// - this is to ensure that fields deserialized correctly but with "weird" values are fixed
#[derive(Serialize, Deserialize, Default, Clone, PartialEq, Debug)]
#[serde(default)]
pub struct Conf {
/// Capture source picklist, to select the source of the capture
#[serde(deserialize_with = "deserialize_or_default")]
pub capture_source_picklist: CaptureSourcePicklist,
/// Import path for PCAP file
#[serde(deserialize_with = "deserialize_or_default")]
pub import_pcap_path: String,
/// Remembers the last opened setting page
#[serde(deserialize_with = "deserialize_or_default")]
pub last_opened_setting: SettingsPage,
/// Remembers the last opened running page
#[serde(deserialize_with = "deserialize_or_default")]
pub last_opened_page: RunningPage,
/// Data representation
#[serde(deserialize_with = "deserialize_or_default")]
pub data_repr: DataRepr,
/// Host sort type (overview page)
#[serde(deserialize_with = "deserialize_or_default")]
pub host_sort_type: SortType,
/// Service sort type (overview page)
#[serde(deserialize_with = "deserialize_or_default")]
pub service_sort_type: SortType,
/// Report sort type (inspect page)
#[serde(deserialize_with = "deserialize_or_default")]
pub report_sort_type: SortType,
// ---------------------------------------------------------------------------------------------
/// Window configuration, such as size and position
#[serde(deserialize_with = "deserialize_or_default")]
pub window: ConfigWindow,
/// Last selected network device name
#[serde(deserialize_with = "deserialize_or_default")]
pub device: ConfigDevice,
/// BPF filter program to be applied to the capture
#[serde(deserialize_with = "deserialize_or_default")]
pub filters: Filters,
/// Information about PCAP file export
#[serde(deserialize_with = "deserialize_or_default")]
pub export_pcap: ExportPcap,
/// Parameters from settings pages
#[serde(deserialize_with = "deserialize_or_default")]
pub settings: Settings,
}
impl Conf {
const FILE_NAME: &'static str = "conf";
/// This should only be used directly to load fresh configurations;
/// use `CONF` instead to access the initial instance
#[cfg(not(test))]
fn load() -> Self {
let mut conf = if let Ok(conf) = confy::load::<Conf>(SNIFFNET_LOWERCASE, Self::FILE_NAME) {
conf
} else {
let _ = Conf::default().store();
Conf::default()
};
// sanitize Conf...
// check scale factor validity
if !(0.3..=3.0).contains(&conf.settings.scale_factor) {
conf.settings.scale_factor = 1.0;
}
// sanitize window parameters
conf.window.sanitize(conf.settings.scale_factor);
// check sound volume validity
if !(0..=100).contains(&conf.settings.notifications.volume) {
conf.settings.notifications.volume = 50;
}
conf
}
#[cfg(not(test))]
pub fn store(self) -> Result<(), ConfyError> {
confy::store(SNIFFNET_LOWERCASE, Self::FILE_NAME, self).log_err(location!())
}
}
#[allow(clippy::unnecessary_wraps)]
pub(crate) fn deserialize_or_default<'de, T, D>(deserializer: D) -> Result<T, D::Error>
where
T: Deserialize<'de> + Default,
D: Deserializer<'de>,
{
Ok(T::deserialize(deserializer).unwrap_or_default())
}
#[cfg(test)]
mod tests {
use crate::gui::types::conf::Conf;
impl Conf {
pub fn test_path() -> String {
format!("{}/{}.toml", env!("CARGO_MANIFEST_DIR"), Self::FILE_NAME)
}
pub fn load() -> Self {
confy::load_path::<Conf>(Conf::test_path()).unwrap_or_else(|_| Conf::default())
}
pub fn store(self) -> Result<(), confy::ConfyError> {
confy::store_path(Conf::test_path(), self)
}
}
}
| rust | Apache-2.0 | a748d0a04dfc6f6c3be206d79c5df4f6beeeab85 | 2026-01-04T15:32:49.059067Z | false |
GyulyVGC/sniffnet | https://github.com/GyulyVGC/sniffnet/blob/a748d0a04dfc6f6c3be206d79c5df4f6beeeab85/src/gui/types/timing_events.rs | src/gui/types/timing_events.rs | use std::net::{IpAddr, Ipv4Addr};
use std::ops::Sub;
use std::time::Duration;
use crate::notifications::types::notifications::DataNotification;
pub struct TimingEvents {
/// Instant of the last window focus
focus: std::time::Instant,
/// Instant of the last press on Copy IP button, with the related IP address
copy_ip: (std::time::Instant, IpAddr),
/// Instant of the last thumbnail mode enter
thumbnail_enter: std::time::Instant,
/// Instant of the last click on the thumbnail window
thumbnail_click: std::time::Instant,
/// Instant of the last adjust of notifications settings threshold and storage of this
/// threshold while editing
threshold_adjust: (std::time::Instant, Option<DataNotification>),
}
impl TimingEvents {
const TIMEOUT_FOCUS: u64 = 200;
const TIMEOUT_COPY_IP: u64 = 1500;
const TIMEOUT_THUMBNAIL_ENTER: u64 = 1000;
const TIMEOUT_THUMBNAIL_CLICK: u64 = 800;
#[cfg(not(test))]
const TIMEOUT_THRESHOLD_ADJUST: u64 = 2000;
#[cfg(test)]
pub const TIMEOUT_THRESHOLD_ADJUST: u64 = 100;
pub fn focus_now(&mut self) {
self.focus = std::time::Instant::now();
}
pub fn was_just_focus(&self) -> bool {
self.focus.elapsed() < Duration::from_millis(TimingEvents::TIMEOUT_FOCUS)
}
pub fn copy_ip_now(&mut self, ip: IpAddr) {
self.copy_ip = (std::time::Instant::now(), ip);
}
pub fn was_just_copy_ip(&self, ip: &IpAddr) -> bool {
self.copy_ip.0.elapsed() < Duration::from_millis(TimingEvents::TIMEOUT_COPY_IP)
&& self.copy_ip.1.eq(ip)
}
pub fn thumbnail_enter_now(&mut self) {
self.thumbnail_enter = std::time::Instant::now();
}
pub fn was_just_thumbnail_enter(&self) -> bool {
self.thumbnail_enter.elapsed()
< Duration::from_millis(TimingEvents::TIMEOUT_THUMBNAIL_ENTER)
}
pub fn thumbnail_click_now(&mut self) {
self.thumbnail_click = std::time::Instant::now();
}
pub fn was_just_thumbnail_click(&self) -> bool {
self.thumbnail_click.elapsed()
< Duration::from_millis(TimingEvents::TIMEOUT_THUMBNAIL_CLICK)
}
pub fn threshold_adjust_now(&mut self, temp_threshold: DataNotification) {
self.threshold_adjust.0 = std::time::Instant::now();
self.threshold_adjust.1 = Some(temp_threshold);
}
/// If timeout has expired, take temporary threshold
pub fn threshold_adjust_expired_take(&mut self) -> Option<DataNotification> {
if self.threshold_adjust.0.elapsed()
> Duration::from_millis(TimingEvents::TIMEOUT_THRESHOLD_ADJUST)
{
self.threshold_adjust.1.take()
} else {
None
}
}
pub fn temp_threshold(&self) -> Option<DataNotification> {
self.threshold_adjust.1
}
}
impl Default for TimingEvents {
fn default() -> Self {
Self {
focus: std::time::Instant::now().sub(Duration::from_millis(400)),
copy_ip: (std::time::Instant::now(), IpAddr::V4(Ipv4Addr::UNSPECIFIED)),
thumbnail_enter: std::time::Instant::now(),
thumbnail_click: std::time::Instant::now(),
threshold_adjust: (std::time::Instant::now(), None),
}
}
}
| rust | Apache-2.0 | a748d0a04dfc6f6c3be206d79c5df4f6beeeab85 | 2026-01-04T15:32:49.059067Z | false |
GyulyVGC/sniffnet | https://github.com/GyulyVGC/sniffnet/blob/a748d0a04dfc6f6c3be206d79c5df4f6beeeab85/src/gui/types/mod.rs | src/gui/types/mod.rs | pub mod conf;
pub mod config_window;
pub mod export_pcap;
pub mod filters;
pub mod message;
pub mod settings;
pub mod timing_events;
| rust | Apache-2.0 | a748d0a04dfc6f6c3be206d79c5df4f6beeeab85 | 2026-01-04T15:32:49.059067Z | false |
GyulyVGC/sniffnet | https://github.com/GyulyVGC/sniffnet/blob/a748d0a04dfc6f6c3be206d79c5df4f6beeeab85/src/gui/types/message.rs | src/gui/types/message.rs | use iced::window;
use std::net::IpAddr;
use crate::gui::components::types::my_modal::MyModal;
use crate::gui::pages::types::running_page::RunningPage;
use crate::gui::pages::types::settings_page::SettingsPage;
use crate::gui::styles::types::gradient_type::GradientType;
use crate::networking::traffic_preview::TrafficPreview;
use crate::networking::types::capture_context::CaptureSourcePicklist;
use crate::networking::types::data_representation::DataRepr;
use crate::networking::types::host::{Host, HostMessage};
use crate::networking::types::info_traffic::InfoTraffic;
use crate::notifications::types::notifications::Notification;
use crate::report::types::search_parameters::SearchParameters;
use crate::report::types::sort_type::SortType;
use crate::utils::types::file_info::FileInfo;
use crate::utils::types::web_page::WebPage;
use crate::{Language, StyleType};
#[derive(Debug, Clone)]
/// Messages types that permit reacting to application interactions/subscriptions
pub enum Message {
/// Run tasks to initialize the app
StartApp(Option<window::Id>),
/// Animate welcome page
Welcome,
/// Sent by the backend parsing packets; includes the capture id, new data, new hosts batched data, and whether an offline capture has finished
TickRun(usize, InfoTraffic, Vec<HostMessage>, bool),
/// Capture source selected from the picklist
SetCaptureSource(CaptureSourcePicklist),
/// Select network device
DeviceSelection(String),
/// Toggle BPF filter checkbox
ToggleFilters,
/// Change BPF filter string
BpfFilter(String),
/// Select data representation to use
DataReprSelection(DataRepr),
/// Select report sort type to be displayed (inspect page)
ReportSortSelection(SortType),
/// Select host sort type to be displayed (overview page)
HostSortSelection(SortType),
/// Select service sort type to be displayed (overview page)
ServiceSortSelection(SortType),
/// Adds or removes the given host into/from the favorites
AddOrRemoveFavorite(Host, bool),
/// Open the supplied web page
OpenWebPage(WebPage),
/// Start sniffing packets
Start,
/// Stop sniffing process and return to initial page
Reset,
/// Change application style
Style(StyleType),
/// Deserialize a style from a path
LoadStyle(String),
/// Displays a modal
ShowModal(MyModal),
/// Opens the specified settings page
OpenSettings(SettingsPage),
/// Opens the last opened settings page
OpenLastSettings,
/// Hides the current modal
HideModal,
/// Hides the current setting page
CloseSettings,
/// Permits to change the current running page
ChangeRunningPage(RunningPage),
/// Select language
LanguageSelection(Language),
/// Set notification settings
UpdateNotificationSettings(Notification, bool),
/// Clear all received notifications
ClearAllNotifications,
/// Set notifications volume
ChangeVolume(u8),
/// Switch from a page to the next (previous) one if true (false), when the tab (shift+tab) key is pressed.
SwitchPage(bool),
/// The enter (return) key has been pressed
ReturnKeyPressed,
/// The esc key has been pressed
EscKeyPressed,
/// The reset button has been pressed or the backspace key has been pressed while running
ResetButtonPressed,
/// Ctrl+D keys have been pressed
CtrlDPressed,
/// Update search parameters of inspect page
Search(SearchParameters),
/// Update page result number in inspect
UpdatePageNumber(bool),
/// Left (false) or Right (true) arrow key has been pressed
ArrowPressed(bool),
/// Emit when the main window be focused
WindowFocused,
/// Enable or disable gradients
GradientsSelection(GradientType),
/// Set UI scale factor
ChangeScaleFactor(f32),
/// The app window position has been changed
WindowMoved(f32, f32),
/// The app window size has been changed
WindowResized(f32, f32),
/// The country MMDB custom path has been updated
CustomCountryDb(String),
/// The ASN MMDB custom path has been updated
CustomAsnDb(String),
/// Wrapper around the Quit message
QuitWrapper,
/// Save the configurations of the app and quit
Quit,
/// Copies the given string to clipboard
CopyIp(IpAddr),
/// Launch a new file dialog
OpenFile(String, FileInfo, fn(String) -> Message),
/// Toggle export pcap file
ToggleExportPcap,
/// The output PCAP directory has been updated
OutputPcapDir(String),
/// The output PCAP file name has been updated
OutputPcapFile(String),
/// Toggle thumbnail mode
ToggleThumbnail(bool),
/// Drag the window
Drag,
/// Ctrl+T keys have been pressed
CtrlTPressed,
/// Ctrl+Space keys have been pressed
CtrlSpacePressed,
/// Edit scale factor via keyboard shortcut
ScaleFactorShortcut(bool),
/// Set new release status
SetNewerReleaseStatus(Option<bool>),
/// Set the pcap import path
SetPcapImport(String),
/// Sent by the backend parsing packets at the end of an offline capture; includes all the pending hosts
PendingHosts(usize, Vec<HostMessage>),
/// Sent by offline captures: ticks without packets
OfflineGap(usize, u32),
/// Emitted every second to repeat certain tasks (such as fetching the network devices)
Periodic,
/// Expand or collapse the given logged notification
ExpandNotification(usize, bool),
/// Toggle remote notifications
ToggleRemoteNotifications,
/// The remote notifications URL has been updated
RemoteNotificationsUrl(String),
/// Pause or resume live capture
Freeze,
/// Traffic preview
TrafficPreview(TrafficPreview),
}
| rust | Apache-2.0 | a748d0a04dfc6f6c3be206d79c5df4f6beeeab85 | 2026-01-04T15:32:49.059067Z | false |
GyulyVGC/sniffnet | https://github.com/GyulyVGC/sniffnet/blob/a748d0a04dfc6f6c3be206d79c5df4f6beeeab85/src/gui/types/config_window.rs | src/gui/types/config_window.rs | use crate::gui::types::conf::deserialize_or_default;
use iced::{Point, Size};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug, Default)]
#[serde(default)]
pub struct ConfigWindow {
#[serde(deserialize_with = "deserialize_or_default")]
size: SizeTuple,
#[serde(deserialize_with = "deserialize_or_default")]
position: PositionTuple,
#[serde(deserialize_with = "deserialize_or_default")]
thumbnail_position: PositionTuple,
}
impl ConfigWindow {
#[cfg(test)]
pub(crate) fn new(
size: (f32, f32),
position: (f32, f32),
thumbnail_position: (f32, f32),
) -> Self {
Self {
size: SizeTuple(size.0, size.1),
position: PositionTuple(position.0, position.1),
thumbnail_position: PositionTuple(thumbnail_position.0, thumbnail_position.1),
}
}
#[cfg(not(test))]
pub(crate) fn sanitize(&mut self, scale_factor: f32) {
self.size.sanitize(scale_factor);
self.position.sanitize();
self.thumbnail_position.sanitize();
}
pub(crate) fn set_size(&mut self, width: f32, height: f32, scale_factor: f32) {
let mut size = SizeTuple(width, height);
size.sanitize(scale_factor);
self.size = size;
}
pub(crate) fn scale_size(&mut self, old_factor: f32, new_factor: f32) {
self.size.scale_and_sanitize(old_factor, new_factor);
}
pub(crate) fn set_position(&mut self, x: f32, y: f32, factor: f32) {
let mut position = PositionTuple(x, y);
position.scale_and_sanitize(factor);
self.position = position;
}
pub(crate) fn set_thumbnail_position(&mut self, x: f32, y: f32, factor: f32) {
let mut position = PositionTuple(x, y);
position.scale_and_sanitize(factor);
self.thumbnail_position = position;
}
pub(crate) fn size(&self) -> Size {
self.size.to_size()
}
pub(crate) fn position(&self) -> Point {
self.position.to_point()
}
pub(crate) fn thumbnail_position(&self) -> Point {
self.thumbnail_position.to_point()
}
}
#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug, Default)]
struct PositionTuple(f32, f32);
#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Debug)]
struct SizeTuple(f32, f32);
impl Default for SizeTuple {
fn default() -> Self {
Self(1190.0, 670.0)
}
}
impl SizeTuple {
// TODO: improve based on monitor size
fn sanitize(&mut self, scale_factor: f32) {
let min_size = 100.0 / scale_factor;
let max_size = 8192.0 / scale_factor;
let mut x = self.0;
let mut y = self.1;
if x < min_size {
x = min_size;
}
if y < min_size {
y = min_size;
}
if x > max_size {
x = max_size;
}
if y > max_size {
y = max_size;
}
self.0 = x;
self.1 = y;
}
fn scale_and_sanitize(&mut self, old_factor: f32, new_factor: f32) {
self.0 *= old_factor / new_factor;
self.1 *= old_factor / new_factor;
self.sanitize(new_factor);
}
fn to_size(self) -> Size {
Size {
width: self.0,
height: self.1,
}
}
}
impl PositionTuple {
// TODO: improve based on monitor size (and sanitized window size)
fn sanitize(&mut self) {
let min_pos = -50.0;
let max_pos_x = 1100.0;
let max_pos_y = 700.0;
let mut x = self.0;
let mut y = self.1;
if x < min_pos {
x = min_pos;
}
if y < min_pos {
y = min_pos;
}
if x > max_pos_x {
x = max_pos_x;
}
if y > max_pos_y {
y = max_pos_y;
}
self.0 = x;
self.1 = y;
}
fn scale_and_sanitize(&mut self, factor: f32) {
self.0 *= factor;
self.1 *= factor;
self.sanitize();
}
fn to_point(self) -> Point {
Point {
x: self.0,
y: self.1,
}
}
}
| rust | Apache-2.0 | a748d0a04dfc6f6c3be206d79c5df4f6beeeab85 | 2026-01-04T15:32:49.059067Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/benches/src/lib.rs | benches/src/lib.rs | //! Foundry benchmark runner.
use crate::results::{HyperfineOutput, HyperfineResult};
use eyre::{Result, WrapErr};
use foundry_common::{sh_eprintln, sh_println};
use foundry_compilers::project_util::TempProject;
use foundry_test_utils::util::clone_remote;
use once_cell::sync::Lazy;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use std::{
env,
path::{Path, PathBuf},
process::Command,
str::FromStr,
};
pub mod results;
/// Default number of runs for benchmarks
pub const RUNS: u32 = 5;
/// Configuration for repositories to benchmark
#[derive(Debug, Clone)]
pub struct RepoConfig {
pub name: String,
pub org: String,
pub repo: String,
pub rev: String,
}
impl FromStr for RepoConfig {
type Err = eyre::Error;
fn from_str(spec: &str) -> Result<Self> {
// Split by ':' first to separate repo path from optional rev
let parts: Vec<&str> = spec.splitn(2, ':').collect();
let repo_path = parts[0];
let custom_rev = parts.get(1).copied();
// Now split the repo path by '/'
let path_parts: Vec<&str> = repo_path.split('/').collect();
if path_parts.len() != 2 {
eyre::bail!("Invalid repo format '{}'. Expected 'org/repo' or 'org/repo:rev'", spec);
}
let org = path_parts[0];
let repo = path_parts[1];
// Try to find this repo in BENCHMARK_REPOS to get the full config
let existing_config = BENCHMARK_REPOS.iter().find(|r| r.org == org && r.repo == repo);
let config = if let Some(existing) = existing_config {
// Use existing config but allow custom rev to override
let mut config = existing.clone();
if let Some(rev) = custom_rev {
config.rev = rev.to_string();
}
config
} else {
// Create new config with custom rev or default
// Name should follow the format: org-repo (with hyphen)
Self {
name: format!("{org}-{repo}"),
org: org.to_string(),
repo: repo.to_string(),
rev: custom_rev.unwrap_or("main").to_string(),
}
};
let _ = sh_println!("Parsed repo spec '{spec}' -> {config:?}");
Ok(config)
}
}
/// Available repositories for benchmarking
pub fn default_benchmark_repos() -> Vec<RepoConfig> {
vec![
RepoConfig {
name: "ithacaxyz-account".to_string(),
org: "ithacaxyz".to_string(),
repo: "account".to_string(),
rev: "main".to_string(),
},
RepoConfig {
name: "solady".to_string(),
org: "Vectorized".to_string(),
repo: "solady".to_string(),
rev: "main".to_string(),
},
]
}
// Keep a lazy static for compatibility
pub static BENCHMARK_REPOS: Lazy<Vec<RepoConfig>> = Lazy::new(default_benchmark_repos);
/// Foundry versions to benchmark
///
/// To add more versions for comparison, install them first:
/// ```bash
/// foundryup --install stable
/// foundryup --install nightly
/// foundryup --install v0.2.0 # Example specific version
/// ```
///
/// Then add the version strings to this array. Supported formats:
/// - "stable" - Latest stable release
/// - "nightly" - Latest nightly build
/// - "v0.2.0" - Specific version tag
/// - "commit-hash" - Specific commit hash
/// - "nightly-rev" - Nightly build with specific revision
pub static FOUNDRY_VERSIONS: &[&str] = &["stable", "nightly"];
/// A benchmark project that represents a cloned repository ready for testing
pub struct BenchmarkProject {
pub name: String,
pub temp_project: TempProject,
pub root_path: PathBuf,
}
impl BenchmarkProject {
/// Set up a benchmark project by cloning the repository
#[allow(unused_must_use)]
pub fn setup(config: &RepoConfig) -> Result<Self> {
let temp_project =
TempProject::dapptools().wrap_err("Failed to create temporary project")?;
// Get root path before clearing
let root_path = temp_project.root().to_path_buf();
let root = root_path.to_str().unwrap();
// Remove all files in the directory
for entry in std::fs::read_dir(&root_path)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
std::fs::remove_dir_all(&path).ok();
} else {
std::fs::remove_file(&path).ok();
}
}
// Clone the repository
let repo_url = format!("https://github.com/{}/{}.git", config.org, config.repo);
clone_remote(&repo_url, root, true);
// Checkout specific revision if provided
if !config.rev.is_empty() && config.rev != "main" && config.rev != "master" {
let status = Command::new("git")
.current_dir(root)
.args(["checkout", &config.rev])
.status()
.wrap_err("Failed to checkout revision")?;
if !status.success() {
eyre::bail!("Git checkout failed for {}", config.name);
}
}
// Git submodules are already cloned via --recursive flag
// But npm dependencies still need to be installed
Self::install_npm_dependencies(&root_path)?;
sh_println!(" ✅ Project {} setup complete at {}", config.name, root);
Ok(Self { name: config.name.to_string(), root_path, temp_project })
}
/// Install npm dependencies if package.json exists
#[allow(unused_must_use)]
fn install_npm_dependencies(root: &Path) -> Result<()> {
if root.join("package.json").exists() {
sh_println!(" 📦 Running npm install...");
let status = Command::new("npm")
.current_dir(root)
.args(["install"])
.stdout(std::process::Stdio::inherit())
.stderr(std::process::Stdio::inherit())
.status()
.wrap_err("Failed to run npm install")?;
if !status.success() {
sh_println!(
" ⚠️ Warning: npm install failed with exit code: {:?}",
status.code()
);
} else {
sh_println!(" ✅ npm install completed successfully");
}
}
Ok(())
}
/// Run a command with hyperfine and return the results
///
/// # Arguments
/// * `benchmark_name` - Name of the benchmark for organizing output
/// * `version` - Foundry version being benchmarked
/// * `command` - The command to benchmark
/// * `runs` - Number of runs to perform
/// * `setup` - Optional setup command to run before the benchmark series (e.g., "forge build")
/// * `prepare` - Optional prepare command to run before each timing run (e.g., "forge clean")
/// * `conclude` - Optional conclude command to run after each timing run (e.g., cleanup)
/// * `verbose` - Whether to show command output
///
/// # Hyperfine flags used:
/// * `--runs` - Number of timing runs
/// * `--setup` - Execute before the benchmark series (not before each run)
/// * `--prepare` - Execute before each timing run
/// * `--conclude` - Execute after each timing run
/// * `--export-json` - Export results to JSON for parsing
/// * `--shell=bash` - Use bash for shell command execution
/// * `--show-output` - Show command output (when verbose)
#[allow(clippy::too_many_arguments)]
fn hyperfine(
&self,
benchmark_name: &str,
version: &str,
command: &str,
runs: u32,
setup: Option<&str>,
prepare: Option<&str>,
conclude: Option<&str>,
verbose: bool,
) -> Result<HyperfineResult> {
// Create structured temp directory for JSON output
// Format: <temp_dir>/<benchmark_name>/<version>/<repo_name>/<benchmark_name>.json
let temp_dir = std::env::temp_dir();
let json_dir =
temp_dir.join("foundry-bench").join(benchmark_name).join(version).join(&self.name);
std::fs::create_dir_all(&json_dir)?;
let json_path = json_dir.join(format!("{benchmark_name}.json"));
// Build hyperfine command
let mut hyperfine_cmd = Command::new("hyperfine");
hyperfine_cmd
.current_dir(&self.root_path)
.arg("--runs")
.arg(runs.to_string())
.arg("--export-json")
.arg(&json_path)
.arg("--shell=bash");
// Add optional setup command
if let Some(setup_cmd) = setup {
hyperfine_cmd.arg("--setup").arg(setup_cmd);
}
// Add optional prepare command
if let Some(prepare_cmd) = prepare {
hyperfine_cmd.arg("--prepare").arg(prepare_cmd);
}
// Add optional conclude command
if let Some(conclude_cmd) = conclude {
hyperfine_cmd.arg("--conclude").arg(conclude_cmd);
}
if verbose {
hyperfine_cmd.arg("--show-output");
hyperfine_cmd.stderr(std::process::Stdio::inherit());
hyperfine_cmd.stdout(std::process::Stdio::inherit());
}
// Add the benchmark command last
hyperfine_cmd.arg(command);
let status = hyperfine_cmd.status().wrap_err("Failed to run hyperfine")?;
if !status.success() {
eyre::bail!("Hyperfine failed for command: {}", command);
}
// Read and parse the JSON output
let json_content = std::fs::read_to_string(json_path)?;
let output: HyperfineOutput = serde_json::from_str(&json_content)?;
// Extract the first result (we only run one command at a time)
output.results.into_iter().next().ok_or_else(|| eyre::eyre!("No results from hyperfine"))
}
/// Benchmark forge test
pub fn bench_forge_test(
&self,
version: &str,
runs: u32,
verbose: bool,
) -> Result<HyperfineResult> {
// Build before running tests
self.hyperfine(
"forge_test",
version,
"forge test",
runs,
Some("forge build"),
None,
None,
verbose,
)
}
/// Benchmark forge build with cache
pub fn bench_forge_build_with_cache(
&self,
version: &str,
runs: u32,
verbose: bool,
) -> Result<HyperfineResult> {
self.hyperfine(
"forge_build_with_cache",
version,
"FOUNDRY_LINT_LINT_ON_BUILD=false forge build",
runs,
None,
Some("forge build"),
None,
verbose,
)
}
/// Benchmark forge build without cache
pub fn bench_forge_build_no_cache(
&self,
version: &str,
runs: u32,
verbose: bool,
) -> Result<HyperfineResult> {
// Clean before each timing run
self.hyperfine(
"forge_build_no_cache",
version,
"FOUNDRY_LINT_LINT_ON_BUILD=false forge build",
runs,
Some("forge clean"),
None,
Some("forge clean"),
verbose,
)
}
/// Benchmark forge fuzz tests
pub fn bench_forge_fuzz_test(
&self,
version: &str,
runs: u32,
verbose: bool,
) -> Result<HyperfineResult> {
// Build before running fuzz tests
self.hyperfine(
"forge_fuzz_test",
version,
r#"forge test --match-test "test[^(]*\([^)]+\)""#,
runs,
Some("forge build"),
None,
None,
verbose,
)
}
/// Benchmark forge coverage
pub fn bench_forge_coverage(
&self,
version: &str,
runs: u32,
verbose: bool,
) -> Result<HyperfineResult> {
// No setup needed, forge coverage builds internally
// Use --ir-minimum to avoid "Stack too deep" errors
self.hyperfine(
"forge_coverage",
version,
"forge coverage --ir-minimum",
runs,
None,
None,
None,
verbose,
)
}
/// Benchmark forge test with --isolate flag
pub fn bench_forge_isolate_test(
&self,
version: &str,
runs: u32,
verbose: bool,
) -> Result<HyperfineResult> {
// Build before running tests
self.hyperfine(
"forge_isolate_test",
version,
"forge test --isolate",
runs,
Some("forge build"),
None,
None,
verbose,
)
}
/// Get the root path of the project
pub fn root(&self) -> &Path {
&self.root_path
}
/// Run a specific benchmark by name
pub fn run(
&self,
benchmark: &str,
version: &str,
runs: u32,
verbose: bool,
) -> Result<HyperfineResult> {
match benchmark {
"forge_test" => self.bench_forge_test(version, runs, verbose),
"forge_build_no_cache" => self.bench_forge_build_no_cache(version, runs, verbose),
"forge_build_with_cache" => self.bench_forge_build_with_cache(version, runs, verbose),
"forge_fuzz_test" => self.bench_forge_fuzz_test(version, runs, verbose),
"forge_coverage" => self.bench_forge_coverage(version, runs, verbose),
"forge_isolate_test" => self.bench_forge_isolate_test(version, runs, verbose),
_ => eyre::bail!("Unknown benchmark: {}", benchmark),
}
}
}
/// Switch to a specific foundry version
#[allow(unused_must_use)]
pub fn switch_foundry_version(version: &str) -> Result<()> {
let output = Command::new("foundryup")
.args(["--use", version])
.output()
.wrap_err("Failed to run foundryup")?;
// Check if the error is about forge --version failing
let stderr = String::from_utf8_lossy(&output.stderr);
if stderr.contains("command failed") && stderr.contains("forge --version") {
eyre::bail!(
"Foundry binaries maybe corrupted. Please reinstall by running `foundryup --install <version>`"
);
}
if !output.status.success() {
sh_eprintln!("foundryup stderr: {stderr}");
eyre::bail!("Failed to switch to foundry version: {}", version);
}
sh_println!(" Successfully switched to version: {version}");
Ok(())
}
/// Get the current forge version
pub fn get_forge_version() -> Result<String> {
let output = Command::new("forge")
.args(["--version"])
.output()
.wrap_err("Failed to get forge version")?;
if !output.status.success() {
eyre::bail!("forge --version failed");
}
let version =
String::from_utf8(output.stdout).wrap_err("Invalid UTF-8 in forge version output")?;
Ok(version.lines().next().unwrap_or("unknown").to_string())
}
/// Get the full forge version details including commit hash and date
pub fn get_forge_version_details() -> Result<String> {
let output = Command::new("forge")
.args(["--version"])
.output()
.wrap_err("Failed to get forge version")?;
if !output.status.success() {
eyre::bail!("forge --version failed");
}
let full_output =
String::from_utf8(output.stdout).wrap_err("Invalid UTF-8 in forge version output")?;
// Extract relevant lines and format them
let lines: Vec<&str> = full_output.lines().collect();
if lines.len() >= 3 {
// Extract version, commit, and timestamp
let version = lines[0].trim();
let commit = lines[1].trim().replace("Commit SHA: ", "");
let timestamp = lines[2].trim().replace("Build Timestamp: ", "");
// Format as: "forge 1.2.3-nightly (51650ea 2025-06-27)"
let short_commit = &commit[..7]; // First 7 chars of commit hash
let date = timestamp.split('T').next().unwrap_or(×tamp);
Ok(format!("{version} ({short_commit} {date})"))
} else {
// Fallback to just the first line if format is unexpected
Ok(lines.first().unwrap_or(&"unknown").to_string())
}
}
/// Get Foundry versions to benchmark from environment variable or default
///
/// Reads from FOUNDRY_BENCH_VERSIONS environment variable if set,
/// otherwise returns the default versions from FOUNDRY_VERSIONS constant.
///
/// The environment variable should be a comma-separated list of versions,
/// e.g., "stable,nightly,v1.2.0"
pub fn get_benchmark_versions() -> Vec<String> {
if let Ok(versions_env) = env::var("FOUNDRY_BENCH_VERSIONS") {
versions_env.split(',').map(|s| s.trim().to_string()).filter(|s| !s.is_empty()).collect()
} else {
FOUNDRY_VERSIONS.iter().map(|&s| s.to_string()).collect()
}
}
/// Setup Repositories for benchmarking
pub fn setup_benchmark_repos() -> Vec<(RepoConfig, BenchmarkProject)> {
// Check for FOUNDRY_BENCH_REPOS environment variable
let repos = if let Ok(repos_env) = env::var("FOUNDRY_BENCH_REPOS") {
// Parse repo specs from the environment variable
// Format should be: "org1/repo1,org2/repo2"
repos_env
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<RepoConfig>())
.collect::<Result<Vec<_>>>()
.expect("Failed to parse FOUNDRY_BENCH_REPOS")
} else {
BENCHMARK_REPOS.clone()
};
repos
.par_iter()
.map(|repo_config| {
let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project");
(repo_config.clone(), project)
})
.collect()
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/benches/src/results.rs | benches/src/results.rs | use crate::RepoConfig;
use eyre::Result;
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, process::Command, thread};
/// Hyperfine benchmark result
#[derive(Debug, Deserialize, Serialize)]
pub struct HyperfineResult {
pub command: String,
pub mean: f64,
pub stddev: Option<f64>,
pub median: f64,
pub user: f64,
pub system: f64,
pub min: f64,
pub max: f64,
pub times: Vec<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub exit_codes: Option<Vec<i32>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub parameters: Option<HashMap<String, serde_json::Value>>,
}
/// Hyperfine JSON output format
#[derive(Debug, Deserialize, Serialize)]
pub struct HyperfineOutput {
pub results: Vec<HyperfineResult>,
}
/// Aggregated benchmark results
#[derive(Debug, Default)]
pub struct BenchmarkResults {
/// Map of benchmark_name -> version -> repo -> result
pub data: HashMap<String, HashMap<String, HashMap<String, HyperfineResult>>>,
/// Track the baseline version for comparison
pub baseline_version: Option<String>,
/// Map of version name -> full version details
pub version_details: HashMap<String, String>,
}
impl BenchmarkResults {
pub fn new() -> Self {
Self::default()
}
pub fn set_baseline_version(&mut self, version: String) {
self.baseline_version = Some(version);
}
pub fn add_result(
&mut self,
benchmark: &str,
version: &str,
repo: &str,
result: HyperfineResult,
) {
self.data
.entry(benchmark.to_string())
.or_default()
.entry(version.to_string())
.or_default()
.insert(repo.to_string(), result);
}
pub fn add_version_details(&mut self, version: &str, details: String) {
self.version_details.insert(version.to_string(), details);
}
pub fn generate_markdown(&self, versions: &[String], repos: &[RepoConfig]) -> String {
let mut output = String::new();
// Header
output.push_str("# Foundry Benchmark Results\n\n");
output.push_str(&format!(
"**Date**: {}\n\n",
chrono::Local::now().format("%Y-%m-%d %H:%M:%S")
));
// Summary
output.push_str("## Summary\n\n");
// Count actual repos that have results
let mut repos_with_results = std::collections::HashSet::new();
for version_data in self.data.values() {
for repo_data in version_data.values() {
for repo_name in repo_data.keys() {
repos_with_results.insert(repo_name.clone());
}
}
}
output.push_str(&format!(
"Benchmarked {} Foundry versions across {} repositories.\n\n",
versions.len(),
repos_with_results.len()
));
// Repositories tested
output.push_str("### Repositories Tested\n\n");
for (i, repo) in repos.iter().enumerate() {
output.push_str(&format!(
"{}. [{}/{}](https://github.com/{}/{})\n",
i + 1,
repo.org,
repo.repo,
repo.org,
repo.repo
));
}
output.push('\n');
// Versions tested
output.push_str("### Foundry Versions\n\n");
for version in versions {
if let Some(details) = self.version_details.get(version) {
output.push_str(&format!("- **{version}**: {}\n", details.trim()));
} else {
output.push_str(&format!("- {version}\n"));
}
}
output.push('\n');
// Results for each benchmark type
for (benchmark_name, version_data) in &self.data {
output.push_str(&self.generate_benchmark_table(
benchmark_name,
version_data,
versions,
repos,
));
}
// System info
output.push_str("## System Information\n\n");
output.push_str(&format!("- **OS**: {}\n", std::env::consts::OS));
output.push_str(&format!(
"- **CPU**: {}\n",
thread::available_parallelism().map_or(1, |n| n.get())
));
output.push_str(&format!(
"- **Rustc**: {}\n",
get_rustc_version().unwrap_or_else(|_| "unknown".to_string())
));
output
}
/// Generate a complete markdown table for a single benchmark type
///
/// This includes the section header, table header, separator, and all rows
fn generate_benchmark_table(
&self,
benchmark_name: &str,
version_data: &HashMap<String, HashMap<String, HyperfineResult>>,
versions: &[String],
repos: &[RepoConfig],
) -> String {
let mut output = String::new();
// Section header
output.push_str(&format!("## {}\n\n", format_benchmark_name(benchmark_name)));
// Create table header
output.push_str("| Repository |");
for version in versions {
output.push_str(&format!(" {version} |"));
}
output.push('\n');
// Table separator
output.push_str("|------------|");
for _ in versions {
output.push_str("----------|");
}
output.push('\n');
// Table rows
output.push_str(&generate_table_rows(version_data, versions, repos));
output.push('\n');
output
}
}
/// Generate table rows for benchmark results
///
/// This function creates the markdown table rows for each repository,
/// showing the benchmark results for each version.
fn generate_table_rows(
version_data: &HashMap<String, HashMap<String, HyperfineResult>>,
versions: &[String],
repos: &[RepoConfig],
) -> String {
let mut output = String::new();
for repo in repos {
output.push_str(&format!("| {} |", repo.name));
for version in versions {
let cell_content = get_benchmark_cell_content(version_data, version, &repo.name);
output.push_str(&format!(" {cell_content} |"));
}
output.push('\n');
}
output
}
/// Get the content for a single benchmark table cell
///
/// Returns the formatted duration or "N/A" if no data is available.
/// The nested if-let statements handle the following cases:
/// 1. Check if version data exists
/// 2. Check if repository data exists for this version
fn get_benchmark_cell_content(
version_data: &HashMap<String, HashMap<String, HyperfineResult>>,
version: &str,
repo_name: &str,
) -> String {
// Check if we have data for this version
if let Some(repo_data) = version_data.get(version) &&
// Check if we have data for this repository
let Some(result) = repo_data.get(repo_name)
{
return format_duration_seconds(result.mean);
}
"N/A".to_string()
}
pub fn format_benchmark_name(name: &str) -> String {
match name {
"forge_test" => "Forge Test",
"forge_build_no_cache" => "Forge Build (No Cache)",
"forge_build_with_cache" => "Forge Build (With Cache)",
"forge_fuzz_test" => "Forge Fuzz Test",
"forge_coverage" => "Forge Coverage",
"forge_isolate_test" => "Forge Test (Isolated)",
_ => name,
}
.to_string()
}
pub fn format_duration_seconds(seconds: f64) -> String {
if seconds < 0.001 {
format!("{:.2} ms", seconds * 1000.0)
} else if seconds < 1.0 {
format!("{seconds:.3} s")
} else if seconds < 60.0 {
format!("{seconds:.2} s")
} else {
let minutes = (seconds / 60.0).floor();
let remaining_seconds = seconds % 60.0;
format!("{minutes:.0}m {remaining_seconds:.1}s")
}
}
pub fn get_rustc_version() -> Result<String> {
let output = Command::new("rustc").arg("--version").output()?;
Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/benches/src/main.rs | benches/src/main.rs | use clap::Parser;
use eyre::{Result, WrapErr};
use foundry_bench::{
BENCHMARK_REPOS, BenchmarkProject, FOUNDRY_VERSIONS, RUNS, RepoConfig, get_forge_version,
get_forge_version_details,
results::{BenchmarkResults, HyperfineResult},
switch_foundry_version,
};
use foundry_common::sh_println;
use rayon::prelude::*;
use std::{fs, path::PathBuf, process::Command, sync::Mutex};
const ALL_BENCHMARKS: [&str; 6] = [
"forge_test",
"forge_build_no_cache",
"forge_build_with_cache",
"forge_fuzz_test",
"forge_coverage",
"forge_isolate_test",
];
/// Foundry Benchmark Runner
#[derive(Parser, Debug)]
#[clap(name = "foundry-bench", about = "Run Foundry benchmarks across multiple versions")]
struct Cli {
/// Comma-separated list of Foundry versions to test (e.g., stable,nightly,v1.2.0)
#[clap(long, value_delimiter = ',')]
versions: Option<Vec<String>>,
/// Force install Foundry versions
#[clap(long)]
force_install: bool,
/// Show verbose output
#[clap(long)]
verbose: bool,
/// Directory where the aggregated benchmark results will be written.
#[clap(long, default_value = ".")]
output_dir: PathBuf,
/// Name of the output file (default: LATEST.md)
#[clap(long, default_value = "LATEST.md")]
output_file: String,
/// Run only specific benchmarks (comma-separated:
/// forge_test,forge_build_no_cache,forge_build_with_cache,forge_fuzz_test,forge_coverage)
#[clap(long, value_delimiter = ',')]
benchmarks: Option<Vec<String>>,
/// Run only on specific repositories (comma-separated in org/repo[:rev] format:
/// ithacaxyz/account,Vectorized/solady:main,foundry-rs/foundry:v1.0.0)
#[clap(long, value_delimiter = ',')]
repos: Option<Vec<String>>,
}
/// Mutex to prevent concurrent foundryup calls
static FOUNDRY_LOCK: Mutex<()> = Mutex::new(());
fn switch_version_safe(version: &str) -> Result<()> {
let _lock = FOUNDRY_LOCK.lock().unwrap();
switch_foundry_version(version)
}
#[allow(unused_must_use)]
fn main() -> Result<()> {
color_eyre::install()?;
let cli = Cli::parse();
// Check if hyperfine is installed
let hyperfine_check = Command::new("hyperfine").arg("--version").output();
if hyperfine_check.is_err() || !hyperfine_check.unwrap().status.success() {
eyre::bail!(
"hyperfine is not installed. Please install it first: https://github.com/sharkdp/hyperfine"
);
}
// Determine versions to test
let versions = if let Some(v) = cli.versions {
v
} else {
FOUNDRY_VERSIONS.iter().map(|&s| s.to_string()).collect()
};
// Get repo configurations
let repos = if let Some(repo_specs) = cli.repos.clone() {
repo_specs.iter().map(|spec| spec.parse::<RepoConfig>()).collect::<Result<Vec<_>>>()?
} else {
BENCHMARK_REPOS.clone()
};
sh_println!("🚀 Foundry Benchmark Runner");
sh_println!("Running with versions: {}", versions.join(", "));
sh_println!(
"Running on repos: {}",
repos.iter().map(|r| format!("{}/{}", r.org, r.repo)).collect::<Vec<_>>().join(", ")
);
// Install versions if requested
if cli.force_install {
install_foundry_versions(&versions)?;
}
// Determine benchmarks to run
let benchmarks = if let Some(b) = cli.benchmarks {
b.into_iter().filter(|b| ALL_BENCHMARKS.contains(&b.as_str())).collect()
} else {
// Default: run all benchmarks except fuzz tests and coverage (which can be slow)
vec!["forge_test", "forge_build_no_cache", "forge_build_with_cache"]
.into_iter()
.map(String::from)
.collect::<Vec<_>>()
};
sh_println!("Running benchmarks: {}", benchmarks.join(", "));
let mut results = BenchmarkResults::new();
// Set the first version as baseline
if let Some(first_version) = versions.first() {
results.set_baseline_version(first_version.clone());
}
// Setup all projects upfront before version loop
sh_println!("📦 Setting up projects to benchmark");
let projects: Vec<(RepoConfig, BenchmarkProject)> = repos
.par_iter()
.map(|repo_config| -> Result<(RepoConfig, BenchmarkProject)> {
sh_println!("Setting up {}/{}", repo_config.org, repo_config.repo);
let project = BenchmarkProject::setup(repo_config).wrap_err(format!(
"Failed to setup project for {}/{}",
repo_config.org, repo_config.repo
))?;
Ok((repo_config.clone(), project))
})
.collect::<Result<Vec<_>>>()?;
sh_println!("✅ All projects setup complete");
// Create a list of all benchmark tasks (same for all versions)
let benchmark_tasks: Vec<_> = projects
.iter()
.flat_map(|(repo_config, project)| {
benchmarks
.iter()
.map(move |benchmark| (repo_config.clone(), project, benchmark.clone()))
})
.collect();
sh_println!("Will run {} benchmark tasks per version", benchmark_tasks.len());
// Run benchmarks for each version
for version in &versions {
sh_println!("🔧 Switching to Foundry version: {version}");
switch_version_safe(version)?;
// Verify the switch and capture full version details
let current = get_forge_version()?;
sh_println!("Current version: {}", current.trim());
// Get and store the full version details with commit hash and date
let version_details = get_forge_version_details()?;
results.add_version_details(version, version_details);
sh_println!("Running benchmark tasks for version {version}...");
// Run all benchmarks sequentially
let version_results = benchmark_tasks
.iter()
.map(|(repo_config, project, benchmark)| -> Result<(String, String, HyperfineResult)> {
sh_println!("Running {} on {}/{}", benchmark, repo_config.org, repo_config.repo);
// Determine runs based on benchmark type
let runs = match benchmark.as_str() {
"forge_coverage" => 1, // Coverage runs only once as an exception
_ => RUNS, // Use default RUNS constant for all other benchmarks
};
// Run the appropriate benchmark
let result = project.run(benchmark, version, runs, cli.verbose);
match result {
Ok(hyperfine_result) => {
sh_println!(
" {} on {}/{}: {:.3}s ± {:.3}s",
benchmark,
repo_config.org,
repo_config.repo,
hyperfine_result.mean,
hyperfine_result.stddev.unwrap_or(0.0)
);
Ok((repo_config.name.clone(), benchmark.clone(), hyperfine_result))
}
Err(e) => {
eyre::bail!(
"Benchmark {} failed for {}/{}: {}",
benchmark,
repo_config.org,
repo_config.repo,
e
);
}
}
})
.collect::<Result<Vec<_>>>()?;
// Add all collected results to the main results structure
for (repo_name, benchmark, hyperfine_result) in version_results {
results.add_result(&benchmark, version, &repo_name, hyperfine_result);
}
}
// Generate markdown report
sh_println!("📝 Generating report...");
let markdown = results.generate_markdown(&versions, &repos);
let output_path = cli.output_dir.join(cli.output_file);
fs::write(&output_path, markdown).wrap_err("Failed to write output file")?;
sh_println!("✅ Report written to: {}", output_path.display());
Ok(())
}
#[allow(unused_must_use)]
fn install_foundry_versions(versions: &[String]) -> Result<()> {
sh_println!("Installing Foundry versions...");
for version in versions {
sh_println!("Installing {version}...");
let status = Command::new("foundryup")
.args(["--install", version, "--force"])
.status()
.wrap_err("Failed to run foundryup")?;
if !status.success() {
eyre::bail!("Failed to install Foundry version: {}", version);
}
}
sh_println!("✅ All versions installed successfully");
Ok(())
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/sourcify.rs | crates/verify/src/sourcify.rs | use crate::{
provider::{VerificationContext, VerificationProvider},
retry::RETRY_CHECK_ON_VERIFY,
utils::ensure_solc_build_metadata,
verify::{ContractLanguage, VerifyArgs, VerifyCheckArgs},
};
use alloy_primitives::Address;
use async_trait::async_trait;
use eyre::{Context, Result, eyre};
use foundry_common::retry::RetryError;
use foundry_compilers::{
artifacts::{Source, StandardJsonCompilerInput, vyper::VyperInput},
solc::SolcLanguage,
};
use futures::FutureExt;
use reqwest::StatusCode;
use serde::{Deserialize, Serialize};
use std::path::Path;
use url::Url;
pub static SOURCIFY_URL: &str = "https://sourcify.dev/server/";
/// The type that can verify a contract on `sourcify`
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
pub struct SourcifyVerificationProvider;
#[async_trait]
impl VerificationProvider for SourcifyVerificationProvider {
async fn preflight_verify_check(
&mut self,
args: VerifyArgs,
context: VerificationContext,
) -> Result<()> {
let _ = self.prepare_verify_request(&args, &context).await?;
Ok(())
}
async fn verify(&mut self, args: VerifyArgs, context: VerificationContext) -> Result<()> {
let body = self.prepare_verify_request(&args, &context).await?;
let chain_id = args.etherscan.chain.unwrap_or_default().id();
if !args.skip_is_verified_check && self.is_contract_verified(&args).await? {
sh_println!(
"\nContract [{}] {:?} is already verified. Skipping verification.",
context.target_name,
args.address.to_string()
)?;
return Ok(());
}
trace!("submitting verification request {:?}", body);
let client = reqwest::Client::new();
let url =
Self::get_verify_url(args.verifier.verifier_url.as_deref(), chain_id, args.address);
let resp = args
.retry
.into_retry()
.run_async(|| {
async {
sh_println!(
"\nSubmitting verification for [{}] {:?}.",
context.target_name,
args.address.to_string()
)?;
let response = client
.post(&url)
.header("Content-Type", "application/json")
.body(serde_json::to_string(&body)?)
.send()
.await?;
let status = response.status();
match status {
StatusCode::CONFLICT => {
sh_println!("Contract source code already fully verified")?;
Ok(None)
}
StatusCode::ACCEPTED => {
let text = response.text().await?;
let verify_response: SourcifyVerificationResponse =
serde_json::from_str(&text)
.wrap_err("Failed to parse Sourcify verification response")?;
Ok(Some(verify_response))
}
_ => {
let error: serde_json::Value = response.json().await?;
eyre::bail!(
"Sourcify verification request for address ({}) \
failed with status code {status}\n\
Details: {error:#}",
args.address,
);
}
}
}
.boxed()
})
.await?;
if let Some(resp) = resp {
let job_url = Self::get_job_status_url(
args.verifier.verifier_url.as_deref(),
resp.verification_id.clone(),
);
sh_println!(
"Submitted contract for verification:\n\tVerification Job ID: `{}`\n\tURL: {}",
resp.verification_id,
job_url
)?;
if args.watch {
let check_args = VerifyCheckArgs {
id: resp.verification_id,
etherscan: args.etherscan,
retry: RETRY_CHECK_ON_VERIFY,
verifier: args.verifier,
};
return self.check(check_args).await;
}
}
Ok(())
}
async fn check(&self, args: VerifyCheckArgs) -> Result<()> {
let url = Self::get_job_status_url(args.verifier.verifier_url.as_deref(), args.id.clone());
args.retry
.into_retry()
.run_async_until_break(|| async {
let response = reqwest::get(&url)
.await
.wrap_err("Failed to request verification status")
.map_err(RetryError::Retry)?;
if response.status() == StatusCode::NOT_FOUND {
return Err(RetryError::Break(eyre!(
"No verification job found for ID {}",
args.id
)));
}
if !response.status().is_success() {
return Err(RetryError::Retry(eyre!(
"Failed to request verification status with status code {}",
response.status()
)));
}
let job_response: SourcifyJobResponse = response
.json()
.await
.wrap_err("Failed to parse job response")
.map_err(RetryError::Retry)?;
if !job_response.is_job_completed {
return Err(RetryError::Retry(eyre!("Verification is still pending...")));
}
if let Some(error) = job_response.error {
if error.custom_code == "already_verified" {
let _ = sh_println!("Contract source code already verified");
return Ok(());
}
return Err(RetryError::Break(eyre!(
"Verification job failed:\nError Code: `{}`\nMessage: `{}`",
error.custom_code,
error.message
)));
}
if let Some(contract_status) = job_response.contract.match_status {
let _ = sh_println!(
"Contract successfully verified:\nStatus: `{}`",
contract_status,
);
}
Ok(())
})
.await
.wrap_err("Checking verification result failed")
}
}
impl SourcifyVerificationProvider {
fn get_base_url(verifier_url: Option<&str>) -> Url {
// note(onbjerg): a little ugly but makes this infallible as we guarantee `SOURCIFY_URL` to
// be well formatted
Url::parse(verifier_url.unwrap_or(SOURCIFY_URL))
.unwrap_or_else(|_| Url::parse(SOURCIFY_URL).unwrap())
}
fn get_verify_url(
verifier_url: Option<&str>,
chain_id: u64,
contract_address: Address,
) -> String {
let base_url = Self::get_base_url(verifier_url);
format!("{base_url}v2/verify/{chain_id}/{contract_address}")
}
fn get_job_status_url(verifier_url: Option<&str>, job_id: String) -> String {
let base_url = Self::get_base_url(verifier_url);
format!("{base_url}v2/verify/{job_id}")
}
fn get_lookup_url(
verifier_url: Option<&str>,
chain_id: u64,
contract_address: Address,
) -> String {
let base_url = Self::get_base_url(verifier_url);
format!("{base_url}v2/contract/{chain_id}/{contract_address}")
}
/// Configures the API request to the sourcify API using the given [`VerifyArgs`].
async fn prepare_verify_request(
&self,
args: &VerifyArgs,
context: &VerificationContext,
) -> Result<SourcifyVerifyRequest> {
let lang = args.detect_language(context);
let contract_identifier = format!(
"{}:{}",
context
.target_path
.strip_prefix(context.project.root())
.unwrap_or(context.target_path.as_path())
.display(),
context.target_name
);
let creation_transaction_hash = args.creation_transaction_hash.map(|h| h.to_string());
match lang {
ContractLanguage::Solidity => {
let mut input: StandardJsonCompilerInput = context
.project
.standard_json_input(&context.target_path)
.wrap_err("Failed to get standard json input")?
.normalize_evm_version(&context.compiler_version);
let mut settings = context.compiler_settings.solc.settings.clone();
settings.libraries.libs = input
.settings
.libraries
.libs
.into_iter()
.map(|(f, libs)| {
(f.strip_prefix(context.project.root()).unwrap_or(&f).to_path_buf(), libs)
})
.collect();
settings.remappings = input.settings.remappings;
// remove all incompatible settings
settings.sanitize(&context.compiler_version, SolcLanguage::Solidity);
input.settings = settings;
let std_json_input = serde_json::to_value(&input)
.wrap_err("Failed to serialize standard json input")?;
let compiler_version =
ensure_solc_build_metadata(context.compiler_version.clone()).await?.to_string();
Ok(SourcifyVerifyRequest {
std_json_input,
compiler_version,
contract_identifier,
creation_transaction_hash,
})
}
ContractLanguage::Vyper => {
let path = Path::new(&context.target_path);
let sources = Source::read_all_from(path, &["vy", "vyi"])?;
let input = VyperInput::new(
sources,
context.clone().compiler_settings.vyper,
&context.compiler_version,
);
let std_json_input = serde_json::to_value(&input)
.wrap_err("Failed to serialize vyper json input")?;
let compiler_version = context.compiler_version.to_string();
Ok(SourcifyVerifyRequest {
std_json_input,
compiler_version,
contract_identifier,
creation_transaction_hash,
})
}
}
}
async fn is_contract_verified(&self, args: &VerifyArgs) -> Result<bool> {
let chain_id = args.etherscan.chain.unwrap_or_default().id();
let url =
Self::get_lookup_url(args.verifier.verifier_url.as_deref(), chain_id, args.address);
match reqwest::get(&url).await {
Ok(response) => {
if response.status().is_success() {
let contract_response: SourcifyContractResponse =
response.json().await.wrap_err("Failed to parse contract response")?;
let creation_exact = contract_response
.creation_match
.as_ref()
.map(|s| s == "exact_match")
.unwrap_or(false);
let runtime_exact = contract_response
.runtime_match
.as_ref()
.map(|s| s == "exact_match")
.unwrap_or(false);
Ok(creation_exact && runtime_exact)
} else {
Ok(false)
}
}
Err(error) => Err(error).wrap_err_with(|| {
format!("Failed to query verification status for {}", args.address)
}),
}
}
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct SourcifyVerifyRequest {
std_json_input: serde_json::Value,
compiler_version: String,
contract_identifier: String,
#[serde(skip_serializing_if = "Option::is_none")]
creation_transaction_hash: Option<String>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SourcifyVerificationResponse {
verification_id: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SourcifyJobResponse {
is_job_completed: bool,
contract: SourcifyContractResponse,
error: Option<SourcifyErrorResponse>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SourcifyContractResponse {
#[serde(rename = "match")]
match_status: Option<String>,
creation_match: Option<String>,
runtime_match: Option<String>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SourcifyErrorResponse {
custom_code: String,
message: String,
}
#[cfg(test)]
mod tests {
use super::*;
use clap::Parser;
use foundry_test_utils::forgetest_async;
forgetest_async!(creates_correct_verify_request_body, |prj, _cmd| {
prj.add_source("Counter", "contract Counter {}");
let args = VerifyArgs::parse_from([
"foundry-cli",
"0xd8509bee9c9bf012282ad33aba0d87241baf5064",
"src/Counter.sol:Counter",
"--compiler-version",
"0.8.19",
"--root",
&prj.root().to_string_lossy(),
]);
let context = args.resolve_context().await.unwrap();
let provider = SourcifyVerificationProvider::default();
let request = provider.prepare_verify_request(&args, &context).await.unwrap();
assert_eq!(request.compiler_version, "0.8.19+commit.7dd6d404");
assert_eq!(request.contract_identifier, "src/Counter.sol:Counter");
assert!(request.creation_transaction_hash.is_none());
assert!(request.std_json_input.is_object());
let json_obj = request.std_json_input.as_object().unwrap();
assert!(json_obj.contains_key("sources"));
assert!(json_obj.contains_key("settings"));
let sources = json_obj.get("sources").unwrap().as_object().unwrap();
assert!(sources.contains_key("src/Counter.sol"));
let counter_source = sources.get("src/Counter.sol").unwrap().as_object().unwrap();
let content = counter_source.get("content").unwrap().as_str().unwrap();
assert!(content.contains("contract Counter {}"));
});
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/lib.rs | crates/verify/src/lib.rs | //! Smart contract verification.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate foundry_common;
#[macro_use]
extern crate tracing;
mod etherscan;
pub mod provider;
pub mod bytecode;
pub use bytecode::VerifyBytecodeArgs;
pub mod retry;
pub use retry::RetryArgs;
mod sourcify;
pub mod verify;
pub use verify::{VerifierArgs, VerifyArgs, VerifyCheckArgs};
mod types;
mod utils;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/types.rs | crates/verify/src/types.rs | use eyre::Result;
use serde::{Deserialize, Serialize};
use std::{fmt, str::FromStr};
/// Enum to represent the type of verification: `full` or `partial`.
/// Ref: <https://docs.sourcify.dev/docs/full-vs-partial-match/>
#[derive(Debug, Clone, clap::ValueEnum, Default, PartialEq, Eq, Serialize, Deserialize, Copy)]
pub enum VerificationType {
#[default]
#[serde(rename = "full")]
Full,
#[serde(rename = "partial")]
Partial,
}
impl FromStr for VerificationType {
type Err = eyre::Error;
fn from_str(s: &str) -> Result<Self> {
match s {
"full" => Ok(Self::Full),
"partial" => Ok(Self::Partial),
_ => eyre::bail!("Invalid verification type"),
}
}
}
impl fmt::Display for VerificationType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Full => write!(f, "full"),
Self::Partial => write!(f, "partial"),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/verify.rs | crates/verify/src/verify.rs | //! The `forge verify-bytecode` command.
use crate::{
RetryArgs,
etherscan::EtherscanVerificationProvider,
provider::{VerificationContext, VerificationProvider, VerificationProviderType},
utils::is_host_only,
};
use alloy_primitives::{Address, TxHash, map::HashSet};
use alloy_provider::Provider;
use clap::{Parser, ValueEnum, ValueHint};
use eyre::Result;
use foundry_cli::{
opts::{EtherscanOpts, RpcOpts},
utils::{self, LoadConfig},
};
use foundry_common::{ContractsByArtifact, compile::ProjectCompiler};
use foundry_compilers::{artifacts::EvmVersion, compilers::solc::Solc, info::ContractInfo};
use foundry_config::{Config, SolcReq, figment, impl_figment_convert, impl_figment_convert_cast};
use itertools::Itertools;
use reqwest::Url;
use semver::BuildMetadata;
use std::path::PathBuf;
/// The programming language used for smart contract development.
///
/// This enum represents the supported contract languages for verification.
#[derive(Copy, Clone, Debug, Eq, PartialEq, ValueEnum)]
pub enum ContractLanguage {
/// Solidity programming language
Solidity,
/// Vyper programming language
Vyper,
}
/// Verification provider arguments
#[derive(Clone, Debug, Parser)]
pub struct VerifierArgs {
/// The contract verification provider to use.
#[arg(long, help_heading = "Verifier options", default_value = "sourcify", value_enum)]
pub verifier: VerificationProviderType,
/// The verifier API KEY, if using a custom provider.
#[arg(long, help_heading = "Verifier options", env = "VERIFIER_API_KEY")]
pub verifier_api_key: Option<String>,
/// The verifier URL, if using a custom provider.
#[arg(long, help_heading = "Verifier options", env = "VERIFIER_URL")]
pub verifier_url: Option<String>,
}
impl Default for VerifierArgs {
fn default() -> Self {
Self {
verifier: VerificationProviderType::Sourcify,
verifier_api_key: None,
verifier_url: None,
}
}
}
/// CLI arguments for `forge verify-contract`.
#[derive(Clone, Debug, Parser)]
pub struct VerifyArgs {
/// The address of the contract to verify.
pub address: Address,
/// The contract identifier in the form `<path>:<contractname>`.
pub contract: Option<ContractInfo>,
/// The ABI-encoded constructor arguments. Only for Etherscan.
#[arg(
long,
conflicts_with = "constructor_args_path",
value_name = "ARGS",
visible_alias = "encoded-constructor-args"
)]
pub constructor_args: Option<String>,
/// The path to a file containing the constructor arguments.
#[arg(long, value_hint = ValueHint::FilePath, value_name = "PATH")]
pub constructor_args_path: Option<PathBuf>,
/// Try to extract constructor arguments from on-chain creation code.
#[arg(long)]
pub guess_constructor_args: bool,
/// The hash of the transaction which created the contract. Optional for Sourcify.
#[arg(long)]
pub creation_transaction_hash: Option<TxHash>,
/// The `solc` version to use to build the smart contract.
#[arg(long, value_name = "VERSION")]
pub compiler_version: Option<String>,
/// The compilation profile to use to build the smart contract.
#[arg(long, value_name = "PROFILE_NAME")]
pub compilation_profile: Option<String>,
/// The number of optimization runs used to build the smart contract.
#[arg(long, visible_alias = "optimizer-runs", value_name = "NUM")]
pub num_of_optimizations: Option<usize>,
/// Flatten the source code before verifying.
#[arg(long)]
pub flatten: bool,
/// Do not compile the flattened smart contract before verifying (if --flatten is passed).
#[arg(short, long)]
pub force: bool,
/// Do not check if the contract is already verified before verifying.
#[arg(long)]
pub skip_is_verified_check: bool,
/// Wait for verification result after submission.
#[arg(long)]
pub watch: bool,
/// Set pre-linked libraries.
#[arg(long, help_heading = "Linker options", env = "DAPP_LIBRARIES")]
pub libraries: Vec<String>,
/// The project's root path.
///
/// By default root of the Git repository, if in one,
/// or the current working directory.
#[arg(long, value_hint = ValueHint::DirPath, value_name = "PATH")]
pub root: Option<PathBuf>,
/// Prints the standard json compiler input.
///
/// The standard json compiler input can be used to manually submit contract verification in
/// the browser.
#[arg(long, conflicts_with = "flatten")]
pub show_standard_json_input: bool,
/// Use the Yul intermediate representation compilation pipeline.
#[arg(long)]
pub via_ir: bool,
/// The EVM version to use.
///
/// Overrides the version specified in the config.
#[arg(long)]
pub evm_version: Option<EvmVersion>,
/// Do not auto-detect the `solc` version.
#[arg(long, help_heading = "Compiler options")]
pub no_auto_detect: bool,
/// Specify the solc version, or a path to a local solc, to build with.
///
/// Valid values are in the format `x.y.z`, `solc:x.y.z` or `path/to/solc`.
#[arg(long = "use", help_heading = "Compiler options", value_name = "SOLC_VERSION")]
pub use_solc: Option<String>,
#[command(flatten)]
pub etherscan: EtherscanOpts,
#[command(flatten)]
pub rpc: RpcOpts,
#[command(flatten)]
pub retry: RetryArgs,
#[command(flatten)]
pub verifier: VerifierArgs,
/// The contract language (`solidity` or `vyper`).
///
/// Defaults to `solidity` if none provided.
#[arg(long, value_enum)]
pub language: Option<ContractLanguage>,
}
impl_figment_convert!(VerifyArgs);
impl figment::Provider for VerifyArgs {
fn metadata(&self) -> figment::Metadata {
figment::Metadata::named("Verify Provider")
}
fn data(
&self,
) -> Result<figment::value::Map<figment::Profile, figment::value::Dict>, figment::Error> {
let mut dict = self.etherscan.dict();
dict.extend(self.rpc.dict());
if let Some(root) = self.root.as_ref() {
dict.insert("root".to_string(), figment::value::Value::serialize(root)?);
}
if let Some(optimizer_runs) = self.num_of_optimizations {
dict.insert("optimizer".to_string(), figment::value::Value::serialize(true)?);
dict.insert(
"optimizer_runs".to_string(),
figment::value::Value::serialize(optimizer_runs)?,
);
}
if let Some(evm_version) = self.evm_version {
dict.insert("evm_version".to_string(), figment::value::Value::serialize(evm_version)?);
}
if self.via_ir {
dict.insert("via_ir".to_string(), figment::value::Value::serialize(self.via_ir)?);
}
if self.no_auto_detect {
dict.insert("auto_detect_solc".to_string(), figment::value::Value::serialize(false)?);
}
if let Some(ref solc) = self.use_solc {
let solc = solc.trim_start_matches("solc:");
dict.insert("solc".to_string(), figment::value::Value::serialize(solc)?);
}
if let Some(api_key) = &self.verifier.verifier_api_key {
dict.insert("etherscan_api_key".into(), api_key.as_str().into());
}
Ok(figment::value::Map::from([(Config::selected_profile(), dict)]))
}
}
impl VerifyArgs {
/// Run the verify command to submit the contract's source code for verification on etherscan
pub async fn run(mut self) -> Result<()> {
let config = self.load_config()?;
if self.guess_constructor_args && config.get_rpc_url().is_none() {
eyre::bail!(
"You have to provide a valid RPC URL to use --guess-constructor-args feature"
)
}
// If chain is not set, we try to get it from the RPC.
// If RPC is not set, the default chain is used.
let chain = match config.get_rpc_url() {
Some(_) => {
let provider = utils::get_provider(&config)?;
utils::get_chain(config.chain, provider).await?
}
None => config.chain.unwrap_or_default(),
};
let context = self.resolve_context().await?;
// Set Etherscan options.
self.etherscan.chain = Some(chain);
self.etherscan.key = config.get_etherscan_config_with_chain(Some(chain))?.map(|c| c.key);
if self.show_standard_json_input {
let args = EtherscanVerificationProvider::default()
.create_verify_request(&self, &context)
.await?;
sh_println!("{}", args.source)?;
return Ok(());
}
let verifier_url = self.verifier.verifier_url.clone();
sh_println!("Start verifying contract `{}` deployed on {chain}", self.address)?;
if let Some(version) = &self.evm_version {
sh_println!("EVM version: {version}")?;
}
if let Some(version) = &self.compiler_version {
sh_println!("Compiler version: {version}")?;
}
if let Some(optimizations) = &self.num_of_optimizations {
sh_println!("Optimizations: {optimizations}")?
}
if let Some(args) = &self.constructor_args
&& !args.is_empty()
{
sh_println!("Constructor args: {args}")?
}
self.verifier.verifier.client(self.etherscan.key().as_deref(), self.etherscan.chain, self.verifier.verifier_url.is_some())?.verify(self, context).await.map_err(|err| {
if let Some(verifier_url) = verifier_url {
match Url::parse(&verifier_url) {
Ok(url) => {
if is_host_only(&url) {
return err.wrap_err(format!(
"Provided URL `{verifier_url}` is host only.\n Did you mean to use the API endpoint`{verifier_url}/api` ?"
))
}
}
Err(url_err) => {
return err.wrap_err(format!(
"Invalid URL {verifier_url} provided: {url_err}"
))
}
}
}
err
})
}
/// Returns the configured verification provider
pub fn verification_provider(&self) -> Result<Box<dyn VerificationProvider>> {
self.verifier.verifier.client(
self.etherscan.key().as_deref(),
self.etherscan.chain,
self.verifier.verifier_url.is_some(),
)
}
/// Resolves [VerificationContext] object either from entered contract name or by trying to
/// match bytecode located at given address.
pub async fn resolve_context(&self) -> Result<VerificationContext> {
let mut config = self.load_config()?;
config.libraries.extend(self.libraries.clone());
let project = config.project()?;
if let Some(ref contract) = self.contract {
let contract_path = if let Some(ref path) = contract.path {
project.root().join(PathBuf::from(path))
} else {
project.find_contract_path(&contract.name)?
};
let cache = project.read_cache_file().ok();
let mut version = if let Some(ref version) = self.compiler_version {
version.trim_start_matches('v').parse()?
} else if let Some(ref solc) = config.solc {
match solc {
SolcReq::Version(version) => version.to_owned(),
SolcReq::Local(solc) => Solc::new(solc)?.version,
}
} else if let Some(entry) =
cache.as_ref().and_then(|cache| cache.files.get(&contract_path).cloned())
{
let unique_versions = entry
.artifacts
.get(&contract.name)
.map(|artifacts| artifacts.keys().collect::<HashSet<_>>())
.unwrap_or_default();
if unique_versions.is_empty() {
eyre::bail!(
"No matching artifact found for {}. This could be due to:\n\
- Compiler version mismatch - the contract was compiled with a different Solidity version than what's being used for verification",
contract.name
);
} else if unique_versions.len() > 1 {
warn!(
"Ambiguous compiler versions found in cache: {}",
unique_versions.iter().join(", ")
);
eyre::bail!(
"Compiler version has to be set in `foundry.toml`. If the project was not deployed with foundry, specify the version through `--compiler-version` flag."
)
}
unique_versions.into_iter().next().unwrap().to_owned()
} else {
eyre::bail!(
"If cache is disabled, compiler version must be either provided with `--compiler-version` option or set in foundry.toml"
)
};
let settings = if let Some(profile) = &self.compilation_profile {
if profile == "default" {
&project.settings
} else if let Some(settings) = project.additional_settings.get(profile.as_str()) {
settings
} else {
eyre::bail!("Unknown compilation profile: {}", profile)
}
} else if let Some((cache, entry)) = cache
.as_ref()
.and_then(|cache| Some((cache, cache.files.get(&contract_path)?.clone())))
{
let profiles = entry
.artifacts
.get(&contract.name)
.and_then(|artifacts| {
let mut cached_artifacts = artifacts.get(&version);
// If we try to verify with specific build version and no cached artifacts
// found, then check if we have artifacts cached for same version but
// without any build metadata.
// This could happen when artifacts are built / cached
// with a version like `0.8.20` but verify is using a compiler-version arg
// as `0.8.20+commit.a1b79de6`.
// See <https://github.com/foundry-rs/foundry/issues/9510>.
if cached_artifacts.is_none() && version.build != BuildMetadata::EMPTY {
version.build = BuildMetadata::EMPTY;
cached_artifacts = artifacts.get(&version);
}
cached_artifacts
})
.map(|artifacts| artifacts.keys().collect::<HashSet<_>>())
.unwrap_or_default();
if profiles.is_empty() {
eyre::bail!(
"No matching artifact found for {} with compiler version {}. This could be due to:\n\
- Compiler version mismatch - the contract was compiled with a different Solidity version",
contract.name,
version
);
} else if profiles.len() > 1 {
eyre::bail!(
"Ambiguous compilation profiles found in cache: {}, please specify the profile through `--compilation-profile` flag",
profiles.iter().join(", ")
)
}
let profile = profiles.into_iter().next().unwrap().to_owned();
cache.profiles.get(&profile).expect("must be present")
} else if project.additional_settings.is_empty() {
&project.settings
} else {
eyre::bail!(
"If cache is disabled, compilation profile must be provided with `--compiler-version` option or set in foundry.toml"
)
};
VerificationContext::new(
contract_path,
contract.name.clone(),
version,
config,
settings.clone(),
)
} else {
if config.get_rpc_url().is_none() {
eyre::bail!("You have to provide a contract name or a valid RPC URL")
}
let provider = utils::get_provider(&config)?;
let code = provider.get_code_at(self.address).await?;
let output = ProjectCompiler::new().compile(&project)?;
let contracts = ContractsByArtifact::new(
output.artifact_ids().map(|(id, artifact)| (id, artifact.clone().into())),
);
let Some((artifact_id, _)) = contracts.find_by_deployed_code_exact(&code) else {
eyre::bail!(format!(
"Bytecode at {} does not match any local contracts",
self.address
))
};
let settings = project
.settings_profiles()
.find_map(|(name, settings)| {
(name == artifact_id.profile.as_str()).then_some(settings)
})
.expect("must be present");
VerificationContext::new(
artifact_id.source.clone(),
artifact_id.name.split('.').next().unwrap().to_owned(),
artifact_id.version.clone(),
config,
settings.clone(),
)
}
}
/// Detects the language for verification from source file extension, if none provided.
pub fn detect_language(&self, ctx: &VerificationContext) -> ContractLanguage {
self.language.unwrap_or_else(|| {
match ctx.target_path.extension().and_then(|e| e.to_str()) {
Some("vy") => ContractLanguage::Vyper,
_ => ContractLanguage::Solidity,
}
})
}
}
/// Check verification status arguments
#[derive(Clone, Debug, Parser)]
pub struct VerifyCheckArgs {
/// The verification ID.
///
/// For Etherscan - Submission GUID.
///
/// For Sourcify - Verification Job ID.
pub id: String,
#[command(flatten)]
pub retry: RetryArgs,
#[command(flatten)]
pub etherscan: EtherscanOpts,
#[command(flatten)]
pub verifier: VerifierArgs,
}
impl_figment_convert_cast!(VerifyCheckArgs);
impl VerifyCheckArgs {
/// Run the verify command to submit the contract's source code for verification on etherscan
pub async fn run(self) -> Result<()> {
sh_println!(
"Checking verification status on {}",
self.etherscan.chain.unwrap_or_default()
)?;
self.verifier
.verifier
.client(
self.etherscan.key().as_deref(),
self.etherscan.chain,
self.verifier.verifier_url.is_some(),
)?
.check(self)
.await
}
}
impl figment::Provider for VerifyCheckArgs {
fn metadata(&self) -> figment::Metadata {
figment::Metadata::named("Verify Check Provider")
}
fn data(
&self,
) -> Result<figment::value::Map<figment::Profile, figment::value::Dict>, figment::Error> {
let mut dict = self.etherscan.dict();
if let Some(api_key) = &self.etherscan.key {
dict.insert("etherscan_api_key".into(), api_key.as_str().into());
}
Ok(figment::value::Map::from([(Config::selected_profile(), dict)]))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_parse_verify_contract() {
let args: VerifyArgs = VerifyArgs::parse_from([
"foundry-cli",
"0x0000000000000000000000000000000000000000",
"src/Domains.sol:Domains",
"--via-ir",
]);
assert!(args.via_ir);
}
#[test]
fn can_parse_new_compiler_flags() {
let args: VerifyArgs = VerifyArgs::parse_from([
"foundry-cli",
"0x0000000000000000000000000000000000000000",
"src/Domains.sol:Domains",
"--no-auto-detect",
"--use",
"0.8.23",
]);
assert!(args.no_auto_detect);
assert_eq!(args.use_solc.as_deref(), Some("0.8.23"));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/bytecode.rs | crates/verify/src/bytecode.rs | //! The `forge verify-bytecode` command.
use crate::{
etherscan::EtherscanVerificationProvider,
utils::{
BytecodeType, JsonResult, check_and_encode_args, check_explorer_args, configure_env_block,
maybe_predeploy_contract,
},
verify::VerifierArgs,
};
use alloy_primitives::{Address, Bytes, TxKind, U256, hex};
use alloy_provider::{
Provider,
ext::TraceApi,
network::{
AnyTxEnvelope, TransactionBuilder, TransactionResponse, primitives::BlockTransactions,
},
};
use alloy_rpc_types::{
BlockId, BlockNumberOrTag, TransactionInput, TransactionRequest, TransactionTrait,
trace::parity::{Action, CreateAction, CreateOutput, TraceOutput},
};
use clap::{Parser, ValueHint};
use eyre::{Context, OptionExt, Result};
use foundry_cli::{
opts::EtherscanOpts,
utils::{self, LoadConfig, read_constructor_args_file},
};
use foundry_common::{SYSTEM_TRANSACTION_TYPE, is_known_system_sender, shell};
use foundry_compilers::{artifacts::EvmVersion, info::ContractInfo};
use foundry_config::{Config, figment, impl_figment_convert};
use foundry_evm::{
constants::DEFAULT_CREATE2_DEPLOYER,
core::AsEnvMut,
executors::EvmError,
utils::{configure_tx_env, configure_tx_req_env},
};
use revm::state::AccountInfo;
use std::path::PathBuf;
impl_figment_convert!(VerifyBytecodeArgs);
/// CLI arguments for `forge verify-bytecode`.
#[derive(Clone, Debug, Parser)]
pub struct VerifyBytecodeArgs {
/// The address of the contract to verify.
pub address: Address,
/// The contract identifier in the form `<path>:<contractname>`.
pub contract: ContractInfo,
/// The block at which the bytecode should be verified.
#[arg(long, value_name = "BLOCK")]
pub block: Option<BlockId>,
/// The constructor args to generate the creation code.
#[arg(
long,
num_args(1..),
conflicts_with_all = &["constructor_args_path", "encoded_constructor_args"],
value_name = "ARGS",
)]
pub constructor_args: Option<Vec<String>>,
/// The ABI-encoded constructor arguments.
#[arg(
long,
conflicts_with_all = &["constructor_args_path", "constructor_args"],
value_name = "HEX",
)]
pub encoded_constructor_args: Option<String>,
/// The path to a file containing the constructor arguments.
#[arg(
long,
value_hint = ValueHint::FilePath,
value_name = "PATH",
conflicts_with_all = &["constructor_args", "encoded_constructor_args"]
)]
pub constructor_args_path: Option<PathBuf>,
/// The rpc url to use for verification.
#[arg(short = 'r', long, value_name = "RPC_URL", env = "ETH_RPC_URL")]
pub rpc_url: Option<String>,
/// Etherscan options.
#[command(flatten)]
pub etherscan: EtherscanOpts,
/// Verifier options.
#[command(flatten)]
pub verifier: VerifierArgs,
/// The project's root path.
///
/// By default root of the Git repository, if in one,
/// or the current working directory.
#[arg(long, value_hint = ValueHint::DirPath, value_name = "PATH")]
pub root: Option<PathBuf>,
/// Ignore verification for creation or runtime bytecode.
#[arg(long, value_name = "BYTECODE_TYPE")]
pub ignore: Option<BytecodeType>,
}
impl figment::Provider for VerifyBytecodeArgs {
fn metadata(&self) -> figment::Metadata {
figment::Metadata::named("Verify Bytecode Provider")
}
fn data(
&self,
) -> Result<figment::value::Map<figment::Profile, figment::value::Dict>, figment::Error> {
let mut dict = self.etherscan.dict();
if let Some(api_key) = &self.verifier.verifier_api_key {
dict.insert("etherscan_api_key".into(), api_key.as_str().into());
}
if let Some(block) = &self.block {
dict.insert("block".into(), figment::value::Value::serialize(block)?);
}
if let Some(rpc_url) = &self.rpc_url {
dict.insert("eth_rpc_url".into(), rpc_url.to_string().into());
}
Ok(figment::value::Map::from([(Config::selected_profile(), dict)]))
}
}
impl VerifyBytecodeArgs {
/// Run the `verify-bytecode` command to verify the bytecode onchain against the locally built
/// bytecode.
pub async fn run(mut self) -> Result<()> {
// Setup
let config = self.load_config()?;
let provider = utils::get_provider(&config)?;
// If chain is not set, we try to get it from the RPC.
// If RPC is not set, the default chain is used.
let chain = match config.get_rpc_url() {
Some(_) => utils::get_chain(config.chain, &provider).await?,
None => config.chain.unwrap_or_default(),
};
// Set Etherscan options.
self.etherscan.chain = Some(chain);
self.etherscan.key = config.get_etherscan_config_with_chain(Some(chain))?.map(|c| c.key);
// Etherscan client
let etherscan =
EtherscanVerificationProvider.client(&self.etherscan, &self.verifier, &config)?;
// Get the bytecode at the address, bailing if it doesn't exist.
let code = provider.get_code_at(self.address).await?;
if code.is_empty() {
eyre::bail!("No bytecode found at address {}", self.address);
}
if !shell::is_json() {
sh_println!(
"Verifying bytecode for contract {} at address {}",
self.contract.name,
self.address
)?;
}
let mut json_results: Vec<JsonResult> = vec![];
// Get creation tx hash.
let creation_data = etherscan.contract_creation_data(self.address).await;
// Check if contract is a predeploy
let (creation_data, maybe_predeploy) = maybe_predeploy_contract(creation_data)?;
trace!(maybe_predeploy = ?maybe_predeploy);
// Get the constructor args using `source_code` endpoint.
let source_code = etherscan.contract_source_code(self.address).await?;
// Check if the contract name matches.
let name = source_code.items.first().map(|item| item.contract_name.to_owned());
if name.as_ref() != Some(&self.contract.name) {
eyre::bail!("Contract name mismatch");
}
// Obtain Etherscan compilation metadata.
let etherscan_metadata = source_code.items.first().unwrap();
// Obtain local artifact
let artifact = crate::utils::build_project(&self, &config)?;
// Get local bytecode (creation code)
let local_bytecode = artifact
.bytecode
.as_ref()
.and_then(|b| b.to_owned().into_bytes())
.ok_or_eyre("Unlinked bytecode is not supported for verification")?;
// Get and encode user provided constructor args
let provided_constructor_args = if let Some(path) = self.constructor_args_path.to_owned() {
// Read from file
Some(read_constructor_args_file(path)?)
} else {
self.constructor_args.to_owned()
}
.map(|args| check_and_encode_args(&artifact, args))
.transpose()?
.or(self.encoded_constructor_args.to_owned().map(hex::decode).transpose()?);
let mut constructor_args = if let Some(provided) = provided_constructor_args {
provided.into()
} else {
// If no constructor args were provided, try to retrieve them from the explorer.
check_explorer_args(source_code.clone())?
};
// This fails only when the contract expects constructor args but NONE were provided OR
// retrieved from explorer (in case of predeploys).
crate::utils::check_args_len(&artifact, &constructor_args)?;
if maybe_predeploy {
if !shell::is_json() {
sh_warn!(
"Attempting to verify predeployed contract at {:?}. Ignoring creation code verification.",
self.address
)?;
}
// Append constructor args to the local_bytecode.
trace!(%constructor_args);
let mut local_bytecode_vec = local_bytecode.to_vec();
local_bytecode_vec.extend_from_slice(&constructor_args);
// Deploy at genesis
let gen_blk_num = 0_u64;
let (mut fork_config, evm_opts) = config.clone().load_config_and_evm_opts()?;
let (mut env, mut executor) = crate::utils::get_tracing_executor(
&mut fork_config,
gen_blk_num,
etherscan_metadata.evm_version()?.unwrap_or(EvmVersion::default()),
evm_opts,
)
.await?;
env.evm_env.block_env.number = U256::ZERO;
let genesis_block = provider.get_block(gen_blk_num.into()).full().await?;
// Setup genesis tx and env.
let deployer = Address::with_last_byte(0x1);
let mut gen_tx_req = TransactionRequest::default()
.with_from(deployer)
.with_input(Bytes::from(local_bytecode_vec))
.into_create();
if let Some(ref block) = genesis_block {
configure_env_block(&mut env.as_env_mut(), block, config.networks);
gen_tx_req.max_fee_per_gas = block.header.base_fee_per_gas.map(|g| g as u128);
gen_tx_req.gas = Some(block.header.gas_limit);
gen_tx_req.gas_price = block.header.base_fee_per_gas.map(|g| g as u128);
}
configure_tx_req_env(&mut env.as_env_mut(), &gen_tx_req, None)
.wrap_err("Failed to configure tx request env")?;
// Seed deployer account with funds
let account_info = AccountInfo {
balance: U256::from(100 * 10_u128.pow(18)),
nonce: 0,
..Default::default()
};
executor.backend_mut().insert_account_info(deployer, account_info);
let fork_address = crate::utils::deploy_contract(
&mut executor,
&env,
config.evm_spec_id(),
gen_tx_req.to,
)?;
// Compare runtime bytecode
let (deployed_bytecode, onchain_runtime_code) = crate::utils::get_runtime_codes(
&mut executor,
&provider,
self.address,
fork_address,
None,
)
.await?;
let match_type = crate::utils::match_bytecodes(
deployed_bytecode.original_byte_slice(),
&onchain_runtime_code,
&constructor_args,
true,
config.bytecode_hash,
);
crate::utils::print_result(
match_type,
BytecodeType::Runtime,
&mut json_results,
etherscan_metadata,
&config,
);
if shell::is_json() {
sh_println!("{}", serde_json::to_string(&json_results)?)?;
}
return Ok(());
}
// We can unwrap directly as maybe_predeploy is false
let creation_data = creation_data.unwrap();
// Get transaction and receipt.
trace!(creation_tx_hash = ?creation_data.transaction_hash);
let transaction = provider
.get_transaction_by_hash(creation_data.transaction_hash)
.await
.or_else(|e| eyre::bail!("Couldn't fetch transaction from RPC: {:?}", e))?
.ok_or_else(|| {
eyre::eyre!("Transaction not found for hash {}", creation_data.transaction_hash)
})?;
let tx_hash = transaction.tx_hash();
let receipt = provider
.get_transaction_receipt(creation_data.transaction_hash)
.await
.or_else(|e| eyre::bail!("Couldn't fetch transaction receipt from RPC: {:?}", e))?;
let receipt = if let Some(receipt) = receipt {
receipt
} else {
eyre::bail!(
"Receipt not found for transaction hash {}",
creation_data.transaction_hash
);
};
let mut transaction: TransactionRequest = match transaction.inner.inner.inner() {
AnyTxEnvelope::Ethereum(tx) => tx.clone().into(),
AnyTxEnvelope::Unknown(_) => unreachable!("Unknown transaction type"),
};
// Extract creation code from creation tx input.
let maybe_creation_code = if receipt.to.is_none()
&& receipt.contract_address == Some(self.address)
{
match &transaction.input.input {
Some(input) => &input[..],
None => unreachable!("creation tx input is None"),
}
} else if receipt.to == Some(DEFAULT_CREATE2_DEPLOYER) {
match &transaction.input.input {
Some(input) => &input[32..],
None => unreachable!("creation tx input is None"),
}
} else {
// Try to get creation bytecode from tx trace.
let traces = provider
.trace_transaction(creation_data.transaction_hash)
.await
.unwrap_or_default();
let creation_bytecode =
traces.iter().find_map(|trace| match (&trace.trace.result, &trace.trace.action) {
(
Some(TraceOutput::Create(CreateOutput { address, .. })),
Action::Create(CreateAction { init, .. }),
) if *address == self.address => Some(init.clone()),
_ => None,
});
&creation_bytecode.ok_or_else(|| {
eyre::eyre!(
"Could not extract the creation code for contract at address {}",
self.address
)
})?
};
// In some cases, Etherscan will return incorrect constructor arguments. If this
// happens, try extracting arguments ourselves.
if !maybe_creation_code.ends_with(&constructor_args) {
trace!("mismatch of constructor args with etherscan");
// If local bytecode is longer than on-chain one, this is probably not a match.
if maybe_creation_code.len() >= local_bytecode.len() {
constructor_args =
Bytes::copy_from_slice(&maybe_creation_code[local_bytecode.len()..]);
trace!(
target: "forge::verify",
"setting constructor args to latest {} bytes of bytecode",
constructor_args.len()
);
}
}
// Append constructor args to the local_bytecode.
trace!(%constructor_args);
let mut local_bytecode_vec = local_bytecode.to_vec();
local_bytecode_vec.extend_from_slice(&constructor_args);
trace!(ignore = ?self.ignore);
// Check if `--ignore` is set to `creation`.
if !self.ignore.is_some_and(|b| b.is_creation()) {
// Compare creation code with locally built bytecode and `maybe_creation_code`.
let match_type = crate::utils::match_bytecodes(
local_bytecode_vec.as_slice(),
maybe_creation_code,
&constructor_args,
false,
config.bytecode_hash,
);
crate::utils::print_result(
match_type,
BytecodeType::Creation,
&mut json_results,
etherscan_metadata,
&config,
);
// If the creation code does not match, the runtime also won't match. Hence return.
if match_type.is_none() {
crate::utils::print_result(
None,
BytecodeType::Runtime,
&mut json_results,
etherscan_metadata,
&config,
);
if shell::is_json() {
sh_println!("{}", serde_json::to_string(&json_results)?)?;
}
return Ok(());
}
}
if !self.ignore.is_some_and(|b| b.is_runtime()) {
// Get contract creation block.
let simulation_block = match self.block {
Some(BlockId::Number(BlockNumberOrTag::Number(block))) => block,
Some(_) => eyre::bail!("Invalid block number"),
None => {
let provider = utils::get_provider(&config)?;
provider
.get_transaction_by_hash(creation_data.transaction_hash)
.await.or_else(|e| eyre::bail!("Couldn't fetch transaction from RPC: {:?}", e))?.ok_or_else(|| {
eyre::eyre!("Transaction not found for hash {}", creation_data.transaction_hash)
})?
.block_number.ok_or_else(|| {
eyre::eyre!("Failed to get block number of the contract creation tx, specify using the --block flag")
})?
}
};
// Fork the chain at `simulation_block`.
let (mut fork_config, evm_opts) = config.clone().load_config_and_evm_opts()?;
let (mut env, mut executor) = crate::utils::get_tracing_executor(
&mut fork_config,
simulation_block - 1, // env.fork_block_number
etherscan_metadata.evm_version()?.unwrap_or(EvmVersion::default()),
evm_opts,
)
.await?;
env.evm_env.block_env.number = U256::from(simulation_block);
let block = provider.get_block(simulation_block.into()).full().await?;
// Workaround for the NonceTooHigh issue as we're not simulating prior txs of the same
// block.
let prev_block_id = BlockId::number(simulation_block - 1);
// Use `transaction.from` instead of `creation_data.contract_creator` to resolve
// blockscout creation data discrepancy in case of CREATE2.
let prev_block_nonce = provider
.get_transaction_count(transaction.from.unwrap())
.block_id(prev_block_id)
.await?;
transaction.set_nonce(prev_block_nonce);
if let Some(ref block) = block {
configure_env_block(&mut env.as_env_mut(), block, config.networks);
let BlockTransactions::Full(ref txs) = block.transactions else {
return Err(eyre::eyre!("Could not get block txs"));
};
// Replay txes in block until the contract creation one.
for tx in txs {
trace!("replay tx::: {}", tx.tx_hash());
if is_known_system_sender(tx.from())
|| tx.transaction_type() == Some(SYSTEM_TRANSACTION_TYPE)
{
continue;
}
if tx.tx_hash() == tx_hash {
break;
}
configure_tx_env(&mut env.as_env_mut(), &tx.inner);
if let TxKind::Call(_) = tx.inner.kind() {
executor.transact_with_env(env.clone()).wrap_err_with(|| {
format!(
"Failed to execute transaction: {:?} in block {}",
tx.tx_hash(),
env.evm_env.block_env.number
)
})?;
} else if let Err(error) = executor.deploy_with_env(env.clone(), None) {
match error {
// Reverted transactions should be skipped
EvmError::Execution(_) => (),
error => {
return Err(error).wrap_err_with(|| {
format!(
"Failed to deploy transaction: {:?} in block {}",
tx.tx_hash(),
env.evm_env.block_env.number
)
});
}
}
}
}
}
// Replace the `input` with local creation code in the creation tx.
if let Some(TxKind::Call(to)) = transaction.kind() {
if to == DEFAULT_CREATE2_DEPLOYER {
let mut input = transaction.input.input.unwrap()[..32].to_vec(); // Salt
input.extend_from_slice(&local_bytecode_vec);
transaction.input = TransactionInput::both(Bytes::from(input));
// Deploy default CREATE2 deployer
executor.deploy_create2_deployer()?;
}
} else {
transaction.input = TransactionInput::both(Bytes::from(local_bytecode_vec));
}
// configure_req__env(&mut env, &transaction.inner);
configure_tx_req_env(&mut env.as_env_mut(), &transaction, None)
.wrap_err("Failed to configure tx request env")?;
let fork_address = crate::utils::deploy_contract(
&mut executor,
&env,
config.evm_spec_id(),
transaction.to,
)?;
// State committed using deploy_with_env, now get the runtime bytecode from the db.
let (fork_runtime_code, onchain_runtime_code) = crate::utils::get_runtime_codes(
&mut executor,
&provider,
self.address,
fork_address,
Some(simulation_block),
)
.await?;
// Compare the onchain runtime bytecode with the runtime code from the fork.
let match_type = crate::utils::match_bytecodes(
fork_runtime_code.original_byte_slice(),
&onchain_runtime_code,
&constructor_args,
true,
config.bytecode_hash,
);
crate::utils::print_result(
match_type,
BytecodeType::Runtime,
&mut json_results,
etherscan_metadata,
&config,
);
}
if shell::is_json() {
sh_println!("{}", serde_json::to_string(&json_results)?)?;
}
Ok(())
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/utils.rs | crates/verify/src/utils.rs | use crate::{bytecode::VerifyBytecodeArgs, types::VerificationType};
use alloy_dyn_abi::DynSolValue;
use alloy_primitives::{Address, Bytes, TxKind, U256};
use alloy_provider::{
Provider,
network::{AnyNetwork, AnyRpcBlock},
};
use alloy_rpc_types::BlockId;
use clap::ValueEnum;
use eyre::{OptionExt, Result};
use foundry_block_explorers::{
contract::{ContractCreationData, ContractMetadata, Metadata},
errors::EtherscanError,
utils::lookup_compiler_version,
};
use foundry_common::{
abi::encode_args, compile::ProjectCompiler, ignore_metadata_hash, provider::RetryProvider,
shell,
};
use foundry_compilers::artifacts::{BytecodeHash, CompactContractBytecode, EvmVersion};
use foundry_config::Config;
use foundry_evm::{
Env, EnvMut, constants::DEFAULT_CREATE2_DEPLOYER, core::AsEnvMut, executors::TracingExecutor,
opts::EvmOpts, traces::TraceMode, utils::apply_chain_and_block_specific_env_changes,
};
use foundry_evm_networks::NetworkConfigs;
use reqwest::Url;
use revm::{bytecode::Bytecode, database::Database, primitives::hardfork::SpecId};
use semver::{BuildMetadata, Version};
use serde::{Deserialize, Serialize};
use yansi::Paint;
/// Enum to represent the type of bytecode being verified
#[derive(Debug, Serialize, Deserialize, Clone, Copy, ValueEnum)]
pub enum BytecodeType {
#[serde(rename = "creation")]
Creation,
#[serde(rename = "runtime")]
Runtime,
}
impl BytecodeType {
/// Check if the bytecode type is creation
pub fn is_creation(&self) -> bool {
matches!(self, Self::Creation)
}
/// Check if the bytecode type is runtime
pub fn is_runtime(&self) -> bool {
matches!(self, Self::Runtime)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct JsonResult {
pub bytecode_type: BytecodeType,
pub match_type: Option<VerificationType>,
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
pub fn match_bytecodes(
local_bytecode: &[u8],
bytecode: &[u8],
constructor_args: &[u8],
is_runtime: bool,
bytecode_hash: BytecodeHash,
) -> Option<VerificationType> {
// 1. Try full match
if local_bytecode == bytecode {
// If the bytecode_hash = 'none' in Config. Then it's always a partial match according to
// sourcify definitions. Ref: https://docs.sourcify.dev/docs/full-vs-partial-match/.
if bytecode_hash == BytecodeHash::None {
return Some(VerificationType::Partial);
}
Some(VerificationType::Full)
} else {
is_partial_match(local_bytecode, bytecode, constructor_args, is_runtime)
.then_some(VerificationType::Partial)
}
}
pub fn build_project(
args: &VerifyBytecodeArgs,
config: &Config,
) -> Result<CompactContractBytecode> {
let project = config.project()?;
let compiler = ProjectCompiler::new();
let mut output = compiler.compile(&project)?;
let artifact = output
.remove_contract(&args.contract)
.ok_or_eyre("Build Error: Contract artifact not found locally")?;
Ok(artifact.into_contract_bytecode())
}
pub fn print_result(
res: Option<VerificationType>,
bytecode_type: BytecodeType,
json_results: &mut Vec<JsonResult>,
etherscan_config: &Metadata,
config: &Config,
) {
if let Some(res) = res {
if !shell::is_json() {
let _ = sh_println!(
"{} with status {}",
format!("{bytecode_type:?} code matched").green().bold(),
res.green().bold()
);
} else {
let json_res = JsonResult { bytecode_type, match_type: Some(res), message: None };
json_results.push(json_res);
}
} else if !shell::is_json() {
let _ = sh_err!(
"{bytecode_type:?} code did not match - this may be due to varying compiler settings"
);
let mismatches = find_mismatch_in_settings(etherscan_config, config);
for mismatch in mismatches {
let _ = sh_eprintln!("{}", mismatch.red().bold());
}
} else {
let json_res = JsonResult {
bytecode_type,
match_type: res,
message: Some(format!(
"{bytecode_type:?} code did not match - this may be due to varying compiler settings"
)),
};
json_results.push(json_res);
}
}
fn is_partial_match(
mut local_bytecode: &[u8],
mut bytecode: &[u8],
constructor_args: &[u8],
is_runtime: bool,
) -> bool {
// 1. Check length of constructor args
if constructor_args.is_empty() || is_runtime {
// Assume metadata is at the end of the bytecode
return try_extract_and_compare_bytecode(local_bytecode, bytecode);
}
// If not runtime, extract constructor args from the end of the bytecode
bytecode = &bytecode[..bytecode.len() - constructor_args.len()];
local_bytecode = &local_bytecode[..local_bytecode.len() - constructor_args.len()];
try_extract_and_compare_bytecode(local_bytecode, bytecode)
}
fn try_extract_and_compare_bytecode(mut local_bytecode: &[u8], mut bytecode: &[u8]) -> bool {
local_bytecode = ignore_metadata_hash(local_bytecode);
bytecode = ignore_metadata_hash(bytecode);
// Now compare the local code and bytecode
local_bytecode == bytecode
}
fn find_mismatch_in_settings(
etherscan_settings: &Metadata,
local_settings: &Config,
) -> Vec<String> {
let mut mismatches: Vec<String> = vec![];
if etherscan_settings.evm_version != local_settings.evm_version.to_string().to_lowercase() {
let str = format!(
"EVM version mismatch: local={}, onchain={}",
local_settings.evm_version, etherscan_settings.evm_version
);
mismatches.push(str);
}
let local_optimizer: u64 = if local_settings.optimizer == Some(true) { 1 } else { 0 };
if etherscan_settings.optimization_used != local_optimizer {
let str = format!(
"Optimizer mismatch: local={}, onchain={}",
local_settings.optimizer.unwrap_or(false),
etherscan_settings.optimization_used
);
mismatches.push(str);
}
if local_settings.optimizer_runs.is_some_and(|runs| etherscan_settings.runs != runs as u64)
|| (local_settings.optimizer_runs.is_none() && etherscan_settings.runs > 0)
{
let str = format!(
"Optimizer runs mismatch: local={}, onchain={}",
local_settings.optimizer_runs.map_or("unknown".to_string(), |runs| runs.to_string()),
etherscan_settings.runs
);
mismatches.push(str);
}
mismatches
}
pub fn maybe_predeploy_contract(
creation_data: Result<ContractCreationData, EtherscanError>,
) -> Result<(Option<ContractCreationData>, bool), eyre::ErrReport> {
let mut maybe_predeploy = false;
match creation_data {
Ok(creation_data) => Ok((Some(creation_data), maybe_predeploy)),
// Ref: https://explorer.mode.network/api?module=contract&action=getcontractcreation&contractaddresses=0xC0d3c0d3c0D3c0d3C0D3c0D3C0d3C0D3C0D30010
Err(EtherscanError::EmptyResult { status, message })
if status == "1" && message == "OK" =>
{
maybe_predeploy = true;
Ok((None, maybe_predeploy))
}
// Ref: https://api.basescan.org/api?module=contract&action=getcontractcreation&contractaddresses=0xC0d3c0d3c0D3c0d3C0D3c0D3C0d3C0D3C0D30010&apiKey=YourAPIKey
Err(EtherscanError::Serde { error: _, content }) if content.contains("GENESIS") => {
maybe_predeploy = true;
Ok((None, maybe_predeploy))
}
Err(e) => eyre::bail!("Error fetching creation data from verifier-url: {:?}", e),
}
}
pub fn check_and_encode_args(
artifact: &CompactContractBytecode,
args: Vec<String>,
) -> Result<Vec<u8>, eyre::ErrReport> {
if let Some(constructor) = artifact.abi.as_ref().and_then(|abi| abi.constructor()) {
if constructor.inputs.len() != args.len() {
eyre::bail!(
"Mismatch of constructor arguments length. Expected {}, got {}",
constructor.inputs.len(),
args.len()
);
}
encode_args(&constructor.inputs, &args).map(|args| DynSolValue::Tuple(args).abi_encode())
} else {
Ok(Vec::new())
}
}
pub fn check_explorer_args(source_code: ContractMetadata) -> Result<Bytes, eyre::ErrReport> {
if let Some(args) = source_code.items.first() {
Ok(args.constructor_arguments.clone())
} else {
eyre::bail!("No constructor arguments found from block explorer");
}
}
pub fn check_args_len(
artifact: &CompactContractBytecode,
args: &Bytes,
) -> Result<(), eyre::ErrReport> {
if let Some(constructor) = artifact.abi.as_ref().and_then(|abi| abi.constructor())
&& !constructor.inputs.is_empty()
&& args.is_empty()
{
eyre::bail!(
"Contract expects {} constructor argument(s), but none were provided",
constructor.inputs.len()
);
}
Ok(())
}
pub async fn get_tracing_executor(
fork_config: &mut Config,
fork_blk_num: u64,
evm_version: EvmVersion,
evm_opts: EvmOpts,
) -> Result<(Env, TracingExecutor)> {
fork_config.fork_block_number = Some(fork_blk_num);
fork_config.evm_version = evm_version;
let create2_deployer = evm_opts.create2_deployer;
let (env, fork, _chain, networks) =
TracingExecutor::get_fork_material(fork_config, evm_opts).await?;
let executor = TracingExecutor::new(
env.clone(),
fork,
Some(fork_config.evm_version),
TraceMode::Call,
networks,
create2_deployer,
None,
)?;
Ok((env, executor))
}
pub fn configure_env_block(env: &mut EnvMut<'_>, block: &AnyRpcBlock, config: NetworkConfigs) {
env.block.timestamp = U256::from(block.header.timestamp);
env.block.beneficiary = block.header.beneficiary;
env.block.difficulty = block.header.difficulty;
env.block.prevrandao = Some(block.header.mix_hash.unwrap_or_default());
env.block.basefee = block.header.base_fee_per_gas.unwrap_or_default();
env.block.gas_limit = block.header.gas_limit;
apply_chain_and_block_specific_env_changes::<AnyNetwork>(env.as_env_mut(), block, config);
}
pub fn deploy_contract(
executor: &mut TracingExecutor,
env: &Env,
spec_id: SpecId,
to: Option<TxKind>,
) -> Result<Address, eyre::ErrReport> {
let env = Env::new_with_spec_id(
env.evm_env.cfg_env.clone(),
env.evm_env.block_env.clone(),
env.tx.clone(),
spec_id,
);
if to.is_some_and(|to| to.is_call()) {
let TxKind::Call(to) = to.unwrap() else { unreachable!() };
if to != DEFAULT_CREATE2_DEPLOYER {
eyre::bail!(
"Transaction `to` address is not the default create2 deployer i.e the tx is not a contract creation tx."
);
}
let result = executor.transact_with_env(env)?;
trace!(transact_result = ?result.exit_reason);
if result.result.len() != 20 {
eyre::bail!(
"Failed to deploy contract on fork at block: call result is not exactly 20 bytes"
);
}
Ok(Address::from_slice(&result.result))
} else {
let deploy_result = executor.deploy_with_env(env, None)?;
trace!(deploy_result = ?deploy_result.raw.exit_reason);
Ok(deploy_result.address)
}
}
pub async fn get_runtime_codes(
executor: &mut TracingExecutor,
provider: &RetryProvider,
address: Address,
fork_address: Address,
block: Option<u64>,
) -> Result<(Bytecode, Bytes)> {
let fork_runtime_code = executor
.backend_mut()
.basic(fork_address)?
.ok_or_else(|| {
eyre::eyre!(
"Failed to get runtime code for contract deployed on fork at address {}",
fork_address
)
})?
.code
.ok_or_else(|| {
eyre::eyre!(
"Bytecode does not exist for contract deployed on fork at address {}",
fork_address
)
})?;
let onchain_runtime_code = if let Some(block) = block {
provider.get_code_at(address).block_id(BlockId::number(block)).await?
} else {
provider.get_code_at(address).await?
};
Ok((fork_runtime_code, onchain_runtime_code))
}
/// Returns `true` if the URL only consists of host.
///
/// This is used to check user input url for missing /api path
pub fn is_host_only(url: &Url) -> bool {
matches!(url.path(), "/" | "")
}
/// Given any solc [Version] return a [Version] with build metadata
///
/// # Example
///
/// ```ignore
/// use semver::{BuildMetadata, Version};
/// let version = Version::new(1, 2, 3);
/// let version = ensure_solc_build_metadata(version).await?;
/// assert_ne!(version.build, BuildMetadata::EMPTY);
/// ```
pub async fn ensure_solc_build_metadata(version: Version) -> Result<Version> {
if version.build != BuildMetadata::EMPTY {
Ok(version)
} else {
Ok(lookup_compiler_version(&version).await?)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_host_only() {
assert!(!is_host_only(&Url::parse("https://blockscout.net/api").unwrap()));
assert!(is_host_only(&Url::parse("https://blockscout.net/").unwrap()));
assert!(is_host_only(&Url::parse("https://blockscout.net").unwrap()));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/retry.rs | crates/verify/src/retry.rs | use clap::{Parser, builder::RangedU64ValueParser};
use foundry_common::retry::Retry;
use std::time::Duration;
/// Retry config used when waiting for verification
pub const RETRY_CHECK_ON_VERIFY: RetryArgs = RetryArgs { retries: 8, delay: 15 };
/// Retry config used when waiting for a created contract
pub const RETRY_VERIFY_ON_CREATE: RetryArgs = RetryArgs { retries: 15, delay: 5 };
/// Retry arguments for contract verification.
#[derive(Clone, Copy, Debug, Parser)]
#[command(about = "Allows to use retry arguments for contract verification")] // override doc
pub struct RetryArgs {
/// Number of attempts for retrying verification.
#[arg(
long,
value_parser = RangedU64ValueParser::<u32>::new().range(1..),
default_value = "5",
)]
pub retries: u32,
/// Optional delay to apply in between verification attempts, in seconds.
#[arg(
long,
value_parser = RangedU64ValueParser::<u32>::new().range(0..=180),
default_value = "5",
)]
pub delay: u32,
}
impl Default for RetryArgs {
fn default() -> Self {
RETRY_VERIFY_ON_CREATE
}
}
impl RetryArgs {
/// Converts the arguments into a `Retry` instance.
pub fn into_retry(self) -> Retry {
Retry::new(self.retries, Duration::from_secs(self.delay as u64))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cli() {
let args = RetryArgs::parse_from(["foundry-cli", "--retries", "10"]);
assert_eq!(args.retries, 10);
assert_eq!(args.delay, 5);
let args = RetryArgs::parse_from(["foundry-cli", "--delay", "10"]);
assert_eq!(args.retries, 5);
assert_eq!(args.delay, 10);
let args = RetryArgs::parse_from(["foundry-cli", "--retries", "10", "--delay", "10"]);
assert_eq!(args.retries, 10);
assert_eq!(args.delay, 10);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/provider.rs | crates/verify/src/provider.rs | use crate::{
etherscan::EtherscanVerificationProvider,
sourcify::SourcifyVerificationProvider,
verify::{VerifyArgs, VerifyCheckArgs},
};
use alloy_json_abi::JsonAbi;
use async_trait::async_trait;
use eyre::{OptionExt, Result};
use foundry_common::compile::ProjectCompiler;
use foundry_compilers::{
Graph, Project,
artifacts::{Metadata, Source, output_selection::OutputSelection},
compilers::solc::SolcCompiler,
multi::{MultiCompilerParser, MultiCompilerSettings},
solc::Solc,
};
use foundry_config::{Chain, Config, EtherscanConfigError};
use semver::Version;
use std::{fmt, path::PathBuf, str::FromStr};
/// Container with data required for contract verification.
#[derive(Debug, Clone)]
pub struct VerificationContext {
pub config: Config,
pub project: Project,
pub target_path: PathBuf,
pub target_name: String,
pub compiler_version: Version,
pub compiler_settings: MultiCompilerSettings,
}
impl VerificationContext {
pub fn new(
target_path: PathBuf,
target_name: String,
compiler_version: Version,
config: Config,
compiler_settings: MultiCompilerSettings,
) -> Result<Self> {
let mut project = config.project()?;
project.no_artifacts = true;
let solc = Solc::find_or_install(&compiler_version)?;
project.compiler.solc = Some(SolcCompiler::Specific(solc));
Ok(Self { config, project, target_name, target_path, compiler_version, compiler_settings })
}
/// Compiles target contract requesting only ABI and returns it.
pub fn get_target_abi(&self) -> Result<JsonAbi> {
let mut project = self.project.clone();
project.update_output_selection(|selection| {
*selection = OutputSelection::common_output_selection(["abi".to_string()])
});
let output = ProjectCompiler::new()
.quiet(true)
.files([self.target_path.clone()])
.compile(&project)?;
let artifact = output
.find(&self.target_path, &self.target_name)
.ok_or_eyre("failed to find target artifact when compiling for abi")?;
artifact.abi.clone().ok_or_eyre("target artifact does not have an ABI")
}
/// Compiles target file requesting only metadata and returns it.
pub fn get_target_metadata(&self) -> Result<Metadata> {
let mut project = self.project.clone();
project.update_output_selection(|selection| {
*selection = OutputSelection::common_output_selection(["metadata".to_string()]);
});
let output = ProjectCompiler::new()
.quiet(true)
.files([self.target_path.clone()])
.compile(&project)?;
let artifact = output
.find(&self.target_path, &self.target_name)
.ok_or_eyre("failed to find target artifact when compiling for metadata")?;
artifact.metadata.clone().ok_or_eyre("target artifact does not have metadata")
}
/// Returns [Vec] containing imports of the target file.
pub fn get_target_imports(&self) -> Result<Vec<PathBuf>> {
let mut sources = self.project.paths.read_input_files()?;
sources.insert(self.target_path.clone(), Source::read(&self.target_path)?);
let graph = Graph::<MultiCompilerParser>::resolve_sources(&self.project.paths, sources)?;
Ok(graph.imports(&self.target_path).into_iter().map(Into::into).collect())
}
}
/// An abstraction for various verification providers such as etherscan, sourcify, blockscout
#[async_trait]
pub trait VerificationProvider {
/// This should ensure the verify request can be prepared successfully.
///
/// Caution: Implementers must ensure that this _never_ sends the actual verify request
/// `[VerificationProvider::verify]`, instead this is supposed to evaluate whether the given
/// [`VerifyArgs`] are valid to begin with. This should prevent situations where there's a
/// contract deployment that's executed before the verify request and the subsequent verify task
/// fails due to misconfiguration.
async fn preflight_verify_check(
&mut self,
args: VerifyArgs,
context: VerificationContext,
) -> Result<()>;
/// Sends the actual verify request for the targeted contract.
async fn verify(&mut self, args: VerifyArgs, context: VerificationContext) -> Result<()>;
/// Checks whether the contract is verified.
async fn check(&self, args: VerifyCheckArgs) -> Result<()>;
}
impl FromStr for VerificationProviderType {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"e" | "etherscan" => Ok(Self::Etherscan),
"s" | "sourcify" => Ok(Self::Sourcify),
"b" | "blockscout" => Ok(Self::Blockscout),
"o" | "oklink" => Ok(Self::Oklink),
"c" | "custom" => Ok(Self::Custom),
_ => Err(format!("Unknown provider: {s}")),
}
}
}
impl fmt::Display for VerificationProviderType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Etherscan => {
write!(f, "etherscan")?;
}
Self::Sourcify => {
write!(f, "sourcify")?;
}
Self::Blockscout => {
write!(f, "blockscout")?;
}
Self::Oklink => {
write!(f, "oklink")?;
}
Self::Custom => {
write!(f, "custom")?;
}
};
Ok(())
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, clap::ValueEnum)]
pub enum VerificationProviderType {
Etherscan,
#[default]
Sourcify,
Blockscout,
Oklink,
/// Custom verification provider, requires compatibility with the Etherscan API.
Custom,
}
impl VerificationProviderType {
/// Returns the corresponding `VerificationProvider` for the key
pub fn client(
&self,
key: Option<&str>,
chain: Option<Chain>,
has_url: bool,
) -> Result<Box<dyn VerificationProvider>> {
let has_key = key.as_ref().is_some_and(|k| !k.is_empty());
// 1. If no verifier or `--verifier sourcify` is set and no API key provided, use Sourcify.
if !has_key && self.is_sourcify() {
sh_println!(
"Attempting to verify on Sourcify. Pass the --etherscan-api-key <API_KEY> to verify on Etherscan, \
or use the --verifier flag to verify on another provider."
)?;
return Ok(Box::<SourcifyVerificationProvider>::default());
}
// 2. If `--verifier etherscan` is explicitly set, check if chain is supported and
// enforce the API key requirement.
if self.is_etherscan() {
if let Some(chain) = chain
&& chain.etherscan_urls().is_none()
{
eyre::bail!(EtherscanConfigError::UnknownChain(
"when using Etherscan verifier".to_string(),
chain
))
}
if !has_key {
eyre::bail!("ETHERSCAN_API_KEY must be set to use Etherscan as a verifier")
}
return Ok(Box::<EtherscanVerificationProvider>::default());
}
// 3. If `--verifier blockscout | oklink | custom` is explicitly set, use the chosen
// verifier and make sure an URL was specified.
if matches!(self, Self::Blockscout | Self::Oklink | Self::Custom) {
if !has_url {
eyre::bail!("No verifier URL specified for verifier {}", self);
}
return Ok(Box::<EtherscanVerificationProvider>::default());
}
// 4. If no `--verifier` is specified but `ETHERSCAN_API_KEY` is set, default to Etherscan.
if has_key {
return Ok(Box::<EtherscanVerificationProvider>::default());
}
// 5. If no valid provider is specified, bail.
eyre::bail!(
"No valid verification provider specified. Pass the --verifier flag to specify a provider or set the ETHERSCAN_API_KEY environment variable to use Etherscan as a verifier."
)
}
pub fn is_sourcify(&self) -> bool {
matches!(self, Self::Sourcify)
}
pub fn is_etherscan(&self) -> bool {
matches!(self, Self::Etherscan)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/etherscan/standard_json.rs | crates/verify/src/etherscan/standard_json.rs | use super::{EtherscanSourceProvider, VerifyArgs};
use crate::{provider::VerificationContext, verify::ContractLanguage};
use eyre::{Context, Result};
use foundry_block_explorers::verify::CodeFormat;
use foundry_compilers::{
artifacts::{Source, StandardJsonCompilerInput, vyper::VyperInput},
solc::SolcLanguage,
};
use std::path::Path;
#[derive(Debug)]
pub struct EtherscanStandardJsonSource;
impl EtherscanSourceProvider for EtherscanStandardJsonSource {
fn source(
&self,
args: &VerifyArgs,
context: &VerificationContext,
) -> Result<(String, String, CodeFormat)> {
let mut input: StandardJsonCompilerInput = context
.project
.standard_json_input(&context.target_path)
.wrap_err("Failed to get standard json input")?
.normalize_evm_version(&context.compiler_version);
let lang = args.detect_language(context);
let code_format = match lang {
ContractLanguage::Solidity => CodeFormat::StandardJsonInput,
ContractLanguage::Vyper => CodeFormat::VyperJson,
};
let mut settings = context.compiler_settings.solc.settings.clone();
settings.libraries.libs = input
.settings
.libraries
.libs
.into_iter()
.map(|(f, libs)| {
(f.strip_prefix(context.project.root()).unwrap_or(&f).to_path_buf(), libs)
})
.collect();
settings.remappings = input.settings.remappings;
// remove all incompatible settings
settings.sanitize(&context.compiler_version, SolcLanguage::Solidity);
input.settings = settings;
let source = match lang {
ContractLanguage::Solidity => {
serde_json::to_string(&input).wrap_err("Failed to parse standard json input")?
}
ContractLanguage::Vyper => {
let path = Path::new(&context.target_path);
let sources = Source::read_all_from(path, &["vy", "vyi"])?;
let input = VyperInput::new(
sources,
context.clone().compiler_settings.vyper,
&context.compiler_version,
);
serde_json::to_string(&input).wrap_err("Failed to parse vyper json input")?
}
};
trace!(target: "forge::verify", standard_json=source, "determined standard json input");
let name = format!(
"{}:{}",
context
.target_path
.strip_prefix(context.project.root())
.unwrap_or(context.target_path.as_path())
.display(),
context.target_name.clone()
);
Ok((source, name, code_format))
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/etherscan/flatten.rs | crates/verify/src/etherscan/flatten.rs | use super::{EtherscanSourceProvider, VerifyArgs};
use crate::provider::VerificationContext;
use eyre::Result;
use foundry_block_explorers::verify::CodeFormat;
use foundry_common::flatten;
use foundry_compilers::{
AggregatedCompilerOutput,
artifacts::{BytecodeHash, Source, Sources},
buildinfo::RawBuildInfo,
compilers::{
Compiler, CompilerInput,
solc::{SolcCompiler, SolcLanguage, SolcVersionedInput},
},
solc::Solc,
};
use semver::{BuildMetadata, Version};
use std::path::Path;
#[derive(Debug)]
pub struct EtherscanFlattenedSource;
impl EtherscanSourceProvider for EtherscanFlattenedSource {
fn source(
&self,
args: &VerifyArgs,
context: &VerificationContext,
) -> Result<(String, String, CodeFormat)> {
let metadata = context.project.settings.solc.metadata.as_ref();
let bch = metadata.and_then(|m| m.bytecode_hash).unwrap_or_default();
eyre::ensure!(
bch == BytecodeHash::Ipfs,
"When using flattened source, bytecodeHash must be set to ipfs because Etherscan uses IPFS in its Compiler Settings when re-compiling your code. BytecodeHash is currently: {}. Hint: Set the bytecodeHash key in your foundry.toml :)",
bch,
);
let flattened_source = flatten(context.project.clone(), &context.target_path)?;
if !args.force {
// solc dry run of flattened code
self.check_flattened(
flattened_source.clone(),
&context.compiler_version,
&context.target_path,
)
.map_err(|err| {
eyre::eyre!(
"Failed to compile the flattened code locally: `{}`\
To skip this solc dry, have a look at the `--force` flag of this command.",
err
)
})?;
}
Ok((flattened_source, context.target_name.clone(), CodeFormat::SingleFile))
}
}
impl EtherscanFlattenedSource {
/// Attempts to compile the flattened content locally with the compiler version.
///
/// This expects the completely flattened content and will try to compile it using the
/// provided compiler. If the compiler is missing it will be installed.
///
/// # Errors
///
/// If it failed to install a missing solc compiler
///
/// # Exits
///
/// If the solc compiler output contains errors, this could either be due to a bug in the
/// flattening code or could to conflict in the flattened code, for example if there are
/// multiple interfaces with the same name.
fn check_flattened(
&self,
content: impl Into<String>,
version: &Version,
contract_path: &Path,
) -> Result<()> {
let version = strip_build_meta(version.clone());
let solc = Solc::find_or_install(&version)?;
let input = SolcVersionedInput::build(
Sources::from([("contract.sol".into(), Source::new(content))]),
Default::default(),
SolcLanguage::Solidity,
version.clone(),
);
let out = SolcCompiler::Specific(solc).compile(&input)?;
if out.errors.iter().any(|e| e.is_error()) {
let mut o = AggregatedCompilerOutput::<SolcCompiler>::default();
o.extend(version, RawBuildInfo::new(&input, &out, false)?, "default", out);
let diags = o.diagnostics(&[], &[], Default::default());
eyre::bail!(
"\
Failed to compile the flattened code locally.
This could be a bug, please inspect the output of `forge flatten {}` and report an issue.
To skip this solc dry, pass `--force`.
Diagnostics: {diags}",
contract_path.display()
);
}
Ok(())
}
}
/// Strips [BuildMetadata] from the [Version]
///
/// **Note:** this is only for local compilation as a dry run, therefore this will return a
/// sanitized variant of the specific version so that it can be installed. This is merely
/// intended to ensure the flattened code can be compiled without errors.
fn strip_build_meta(version: Version) -> Version {
if version.build != BuildMetadata::EMPTY {
Version::new(version.major, version.minor, version.patch)
} else {
version
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/verify/src/etherscan/mod.rs | crates/verify/src/etherscan/mod.rs | use crate::{
VerifierArgs,
provider::{VerificationContext, VerificationProvider},
retry::RETRY_CHECK_ON_VERIFY,
utils::ensure_solc_build_metadata,
verify::{ContractLanguage, VerifyArgs, VerifyCheckArgs},
};
use alloy_json_abi::Function;
use alloy_primitives::hex;
use alloy_provider::Provider;
use alloy_rpc_types::TransactionTrait;
use eyre::{Context, OptionExt, Result, eyre};
use foundry_block_explorers::{
Client,
errors::EtherscanError,
verify::{CodeFormat, VerifyContract},
};
use foundry_cli::{
opts::EtherscanOpts,
utils::{LoadConfig, get_provider, read_constructor_args_file},
};
use foundry_common::{abi::encode_function_args, retry::RetryError};
use foundry_compilers::{Artifact, artifacts::BytecodeObject};
use foundry_config::Config;
use foundry_evm::constants::DEFAULT_CREATE2_DEPLOYER;
use regex::Regex;
use semver::BuildMetadata;
use std::{fmt::Debug, sync::LazyLock};
mod flatten;
mod standard_json;
pub static RE_BUILD_COMMIT: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"(?P<commit>commit\.[0-9,a-f]{8})").unwrap());
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
pub struct EtherscanVerificationProvider;
/// The contract source provider for [EtherscanVerificationProvider]
///
/// Returns source, contract_name and the source [CodeFormat]
trait EtherscanSourceProvider: Send + Sync + Debug {
fn source(
&self,
args: &VerifyArgs,
context: &VerificationContext,
) -> Result<(String, String, CodeFormat)>;
}
#[async_trait::async_trait]
impl VerificationProvider for EtherscanVerificationProvider {
async fn preflight_verify_check(
&mut self,
args: VerifyArgs,
context: VerificationContext,
) -> Result<()> {
let _ = self.prepare_verify_request(&args, &context).await?;
Ok(())
}
async fn verify(&mut self, args: VerifyArgs, context: VerificationContext) -> Result<()> {
let (etherscan, verify_args) = self.prepare_verify_request(&args, &context).await?;
if !args.skip_is_verified_check
&& self.is_contract_verified(ðerscan, &verify_args).await?
{
sh_println!(
"\nContract [{}] {:?} is already verified. Skipping verification.",
verify_args.contract_name,
verify_args.address.to_checksum(None)
)?;
return Ok(());
}
trace!(?verify_args, "submitting verification request");
let resp = args
.retry
.into_retry()
.run_async(|| async {
sh_println!(
"\nSubmitting verification for [{}] {}.",
verify_args.contract_name,
verify_args.address
)?;
let resp = etherscan
.submit_contract_verification(&verify_args)
.await
.wrap_err_with(|| {
// valid json
let args = serde_json::to_string(&verify_args).unwrap();
format!("Failed to submit contract verification, payload:\n{args}")
})?;
trace!(?resp, "Received verification response");
if resp.status == "0" {
if resp.result == "Contract source code already verified"
// specific for blockscout response
|| resp.result == "Smart-contract already verified."
{
return Ok(None);
}
if resp.result.starts_with("Unable to locate ContractCode at")
|| resp.result.starts_with("The address is not a smart contract")
|| resp.result.starts_with("Address is not a smart-contract")
{
warn!("{}", resp.result);
return Err(eyre!("Could not detect deployment: {}", resp.result));
}
sh_err!(
"Encountered an error verifying this contract:\nResponse: `{}`\nDetails:
`{}`",
resp.message,
resp.result
)?;
warn!("Failed verify submission: {:?}", resp);
std::process::exit(1);
}
Ok(Some(resp))
})
.await?;
if let Some(resp) = resp {
sh_println!(
"Submitted contract for verification:\n\tResponse: `{}`\n\tGUID: `{}`\n\tURL: {}",
resp.message,
resp.result,
etherscan.address_url(args.address)
)?;
if args.watch {
let check_args = VerifyCheckArgs {
id: resp.result,
etherscan: args.etherscan,
retry: RETRY_CHECK_ON_VERIFY,
verifier: args.verifier,
};
return self.check(check_args).await;
}
} else {
sh_println!("Contract source code already verified")?;
}
Ok(())
}
/// Executes the command to check verification status on Etherscan
async fn check(&self, args: VerifyCheckArgs) -> Result<()> {
let config = args.load_config()?;
let etherscan = self.client(&args.etherscan, &args.verifier, &config)?;
args.retry
.into_retry()
.run_async_until_break(|| async {
let resp = etherscan
.check_contract_verification_status(args.id.clone())
.await
.wrap_err("Failed to request verification status")
.map_err(RetryError::Retry)?;
trace!(?resp, "Received verification response");
let _ = sh_println!(
"Contract verification status:\nResponse: `{}`\nDetails: `{}`",
resp.message,
resp.result
);
if resp.result == "Pending in queue"
|| resp.result.starts_with("Error: contract does not exist")
{
return Err(RetryError::Retry(eyre!("Verification is still pending...")));
}
if resp.result == "Unable to verify" {
return Err(RetryError::Retry(eyre!("Unable to verify.")));
}
if resp.result == "Already Verified" {
let _ = sh_println!("Contract source code already verified");
return Ok(());
}
if resp.status == "0" {
return Err(RetryError::Break(eyre!(
"Contract verification failed:\nStatus: `{}`\nResult: `{}`",
resp.status,
resp.result
)));
}
if resp.result == "Pass - Verified" {
let _ = sh_println!("Contract successfully verified");
}
Ok(())
})
.await
.wrap_err("Checking verification result failed")
}
}
impl EtherscanVerificationProvider {
/// Create a source provider
fn source_provider(&self, args: &VerifyArgs) -> Box<dyn EtherscanSourceProvider> {
if args.flatten {
Box::new(flatten::EtherscanFlattenedSource)
} else {
Box::new(standard_json::EtherscanStandardJsonSource)
}
}
/// Configures the API request to the Etherscan API using the given [`VerifyArgs`].
async fn prepare_verify_request(
&mut self,
args: &VerifyArgs,
context: &VerificationContext,
) -> Result<(Client, VerifyContract)> {
let config = args.load_config()?;
let etherscan = self.client(&args.etherscan, &args.verifier, &config)?;
let verify_args = self.create_verify_request(args, context).await?;
Ok((etherscan, verify_args))
}
/// Queries the Etherscan API to verify if the contract is already verified.
async fn is_contract_verified(
&self,
etherscan: &Client,
verify_contract: &VerifyContract,
) -> Result<bool> {
let check = etherscan.contract_abi(verify_contract.address).await;
if let Err(err) = check {
return match err {
EtherscanError::ContractCodeNotVerified(_) => Ok(false),
error => Err(error).wrap_err_with(|| {
format!("Failed to obtain contract ABI for {}", verify_contract.address)
}),
};
}
Ok(true)
}
/// Create an Etherscan client.
pub(crate) fn client(
&self,
etherscan_opts: &EtherscanOpts,
verifier_args: &VerifierArgs,
config: &Config,
) -> Result<Client> {
let chain = etherscan_opts.chain.unwrap_or_default();
let etherscan_key = etherscan_opts.key();
let verifier_type = &verifier_args.verifier;
let verifier_url = verifier_args.verifier_url.as_deref();
// Verifier is etherscan if explicitly set or if no verifier set (default sourcify) but
// API key passed.
let is_etherscan = verifier_type.is_etherscan()
|| (verifier_type.is_sourcify() && etherscan_key.is_some());
let etherscan_config = config.get_etherscan_config_with_chain(Some(chain))?;
let etherscan_api_url = verifier_url.or(None).map(str::to_owned);
let api_url = etherscan_api_url.as_deref();
let base_url = etherscan_config
.as_ref()
.and_then(|c| c.browser_url.as_deref())
.or_else(|| chain.etherscan_urls().map(|(_, url)| url));
let etherscan_key =
etherscan_key.or_else(|| etherscan_config.as_ref().map(|c| c.key.clone()));
let mut builder = Client::builder();
builder = if let Some(api_url) = api_url {
// we don't want any trailing slashes because this can cause cloudflare issues: <https://github.com/foundry-rs/foundry/pull/6079>
let api_url = api_url.trim_end_matches('/');
let base_url = if !is_etherscan {
// If verifier is not Etherscan then set base url as api url without /api suffix.
api_url.strip_suffix("/api").unwrap_or(api_url)
} else {
base_url.unwrap_or(api_url)
};
builder.with_api_url(api_url)?.with_url(base_url)?
} else {
builder.chain(chain)?
};
builder
.with_api_key(etherscan_key.unwrap_or_default())
.build()
.wrap_err("Failed to create Etherscan client")
}
/// Creates the `VerifyContract` Etherscan request in order to verify the contract
///
/// If `--flatten` is set to `true` then this will send with [`CodeFormat::SingleFile`]
/// otherwise this will use the [`CodeFormat::StandardJsonInput`]
pub async fn create_verify_request(
&mut self,
args: &VerifyArgs,
context: &VerificationContext,
) -> Result<VerifyContract> {
let (source, contract_name, code_format) =
self.source_provider(args).source(args, context)?;
let lang = args.detect_language(context);
let mut compiler_version = context.compiler_version.clone();
compiler_version.build = match RE_BUILD_COMMIT.captures(compiler_version.build.as_str()) {
Some(cap) => BuildMetadata::new(cap.name("commit").unwrap().as_str())?,
_ => BuildMetadata::EMPTY,
};
let compiler_version = if matches!(lang, ContractLanguage::Vyper) {
format!("vyper:{}", compiler_version.to_string().split('+').next().unwrap_or("0.0.0"))
} else {
format!("v{}", ensure_solc_build_metadata(context.compiler_version.clone()).await?)
};
let constructor_args = self.constructor_args(args, context).await?;
let mut verify_args =
VerifyContract::new(args.address, contract_name, source, compiler_version)
.constructor_arguments(constructor_args)
.code_format(code_format);
if args.via_ir {
// we explicitly set this __undocumented__ argument to true if provided by the user,
// though this info is also available in the compiler settings of the standard json
// object if standard json is used
// unclear how Etherscan interprets this field in standard-json mode
verify_args = verify_args.via_ir(true);
}
if code_format == CodeFormat::SingleFile {
verify_args = if let Some(optimizations) = args.num_of_optimizations {
verify_args.optimized().runs(optimizations as u32)
} else if context.config.optimizer == Some(true) {
verify_args
.optimized()
.runs(context.config.optimizer_runs.unwrap_or(200).try_into()?)
} else {
verify_args.not_optimized()
};
}
if code_format == CodeFormat::VyperJson {
verify_args =
if args.num_of_optimizations.is_some() || context.config.optimizer == Some(true) {
verify_args.optimized().runs(1)
} else {
verify_args.not_optimized().runs(0)
}
}
Ok(verify_args)
}
/// Return the optional encoded constructor arguments. If the path to
/// constructor arguments was provided, read them and encode. Otherwise,
/// return whatever was set in the [VerifyArgs] args.
async fn constructor_args(
&mut self,
args: &VerifyArgs,
context: &VerificationContext,
) -> Result<Option<String>> {
if let Some(ref constructor_args_path) = args.constructor_args_path {
let abi = context.get_target_abi()?;
let constructor = abi
.constructor()
.ok_or_else(|| eyre!("Can't retrieve constructor info from artifact ABI."))?;
let func = Function {
name: "constructor".to_string(),
inputs: constructor.inputs.clone(),
outputs: vec![],
state_mutability: alloy_json_abi::StateMutability::NonPayable,
};
let encoded_args = encode_function_args(
&func,
read_constructor_args_file(constructor_args_path.to_path_buf())?,
)?;
let encoded_args = hex::encode(encoded_args);
return Ok(Some(encoded_args[8..].into()));
}
if args.guess_constructor_args {
return Ok(Some(self.guess_constructor_args(args, context).await?));
}
Ok(args.constructor_args.clone())
}
/// Uses Etherscan API to fetch contract creation transaction.
/// If transaction is a create transaction or a invocation of default CREATE2 deployer, tries to
/// match provided creation code with local bytecode of the target contract.
/// If bytecode match, returns latest bytes of on-chain creation code as constructor arguments.
async fn guess_constructor_args(
&mut self,
args: &VerifyArgs,
context: &VerificationContext,
) -> Result<String> {
let provider = get_provider(&context.config)?;
let client = self.client(&args.etherscan, &args.verifier, &context.config)?;
let creation_data = client.contract_creation_data(args.address).await?;
let transaction = provider
.get_transaction_by_hash(creation_data.transaction_hash)
.await?
.ok_or_eyre("Transaction not found")?;
let receipt = provider
.get_transaction_receipt(creation_data.transaction_hash)
.await?
.ok_or_eyre("Couldn't fetch transaction receipt from RPC")?;
let maybe_creation_code = if receipt.contract_address == Some(args.address) {
transaction.inner.inner.input()
} else if transaction.to() == Some(DEFAULT_CREATE2_DEPLOYER) {
&transaction.inner.inner.input()[32..]
} else {
eyre::bail!(
"Fetching of constructor arguments is not supported for contracts created by contracts"
)
};
let output = context.project.compile_file(&context.target_path)?;
let artifact = output
.find(&context.target_path, &context.target_name)
.ok_or_eyre("Contract artifact wasn't found locally")?;
let bytecode = artifact
.get_bytecode_object()
.ok_or_eyre("Contract artifact does not contain bytecode")?;
let bytecode = match bytecode.as_ref() {
BytecodeObject::Bytecode(bytes) => Ok(bytes),
BytecodeObject::Unlinked(_) => {
Err(eyre!("You have to provide correct libraries to use --guess-constructor-args"))
}
}?;
if maybe_creation_code.starts_with(bytecode) {
let constructor_args = &maybe_creation_code[bytecode.len()..];
let constructor_args = hex::encode(constructor_args);
sh_println!("Identified constructor arguments: {constructor_args}")?;
Ok(constructor_args)
} else {
eyre::bail!("Local bytecode doesn't match on-chain bytecode")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::provider::VerificationProviderType;
use clap::Parser;
use foundry_common::fs;
use foundry_test_utils::{forgetest_async, str};
use tempfile::tempdir;
#[test]
fn can_extract_etherscan_verify_config() {
let temp = tempdir().unwrap();
let root = temp.path();
let config = r#"
[profile.default]
[etherscan]
amoy = { key = "dummykey", chain = 80002, url = "https://amoy.polygonscan.com/" }
"#;
let toml_file = root.join(Config::FILE_NAME);
fs::write(toml_file, config).unwrap();
let args: VerifyArgs = VerifyArgs::parse_from([
"foundry-cli",
"0xd8509bee9c9bf012282ad33aba0d87241baf5064",
"src/Counter.sol:Counter",
"--chain",
"amoy",
"--root",
root.as_os_str().to_str().unwrap(),
]);
let config = args.load_config().unwrap();
let etherscan = EtherscanVerificationProvider::default();
let client = etherscan.client(&args.etherscan, &args.verifier, &config).unwrap();
assert_eq!(
client.etherscan_api_url().as_str(),
"https://api.etherscan.io/v2/api?chainid=80002"
);
assert!(format!("{client:?}").contains("dummykey"));
let args: VerifyArgs = VerifyArgs::parse_from([
"foundry-cli",
"0xd8509bee9c9bf012282ad33aba0d87241baf5064",
"src/Counter.sol:Counter",
"--chain",
"amoy",
"--verifier-url",
"https://verifier-url.com/",
"--root",
root.as_os_str().to_str().unwrap(),
]);
let config = args.load_config().unwrap();
let etherscan = EtherscanVerificationProvider::default();
let client = etherscan.client(&args.etherscan, &args.verifier, &config).unwrap();
assert_eq!(client.etherscan_api_url().as_str(), "https://verifier-url.com/");
assert!(format!("{client:?}").contains("dummykey"));
}
#[test]
fn can_extract_etherscan_v2_verify_config() {
let temp = tempdir().unwrap();
let root = temp.path();
let config = r#"
[profile.default]
[etherscan]
amoy = { key = "dummykey", chain = 80002, url = "https://amoy.polygonscan.com/" }
"#;
let toml_file = root.join(Config::FILE_NAME);
fs::write(toml_file, config).unwrap();
let args: VerifyArgs = VerifyArgs::parse_from([
"foundry-cli",
"0xd8509bee9c9bf012282ad33aba0d87241baf5064",
"src/Counter.sol:Counter",
"--verifier",
"etherscan",
"--chain",
"amoy",
"--root",
root.as_os_str().to_str().unwrap(),
]);
let config = args.load_config().unwrap();
let etherscan = EtherscanVerificationProvider::default();
let client = etherscan.client(&args.etherscan, &args.verifier, &config).unwrap();
assert_eq!(
client.etherscan_api_url().as_str(),
"https://api.etherscan.io/v2/api?chainid=80002"
);
assert!(format!("{client:?}").contains("dummykey"));
let args: VerifyArgs = VerifyArgs::parse_from([
"foundry-cli",
"0xd8509bee9c9bf012282ad33aba0d87241baf5064",
"src/Counter.sol:Counter",
"--verifier",
"etherscan",
"--chain",
"amoy",
"--verifier-url",
"https://verifier-url.com/",
"--root",
root.as_os_str().to_str().unwrap(),
]);
let config = args.load_config().unwrap();
assert_eq!(args.verifier.verifier, VerificationProviderType::Etherscan);
let etherscan = EtherscanVerificationProvider::default();
let client = etherscan.client(&args.etherscan, &args.verifier, &config).unwrap();
assert_eq!(client.etherscan_api_url().as_str(), "https://verifier-url.com/");
assert!(format!("{client:?}").contains("dummykey"));
}
#[tokio::test(flavor = "multi_thread")]
async fn fails_on_disabled_cache_and_missing_info() {
let temp = tempdir().unwrap();
let root = temp.path();
let root_path = root.as_os_str().to_str().unwrap();
let config = r"
[profile.default]
cache = false
";
let toml_file = root.join(Config::FILE_NAME);
fs::write(toml_file, config).unwrap();
let address = "0xd8509bee9c9bf012282ad33aba0d87241baf5064";
let contract_name = "Counter";
let src_dir = "src";
fs::create_dir_all(root.join(src_dir)).unwrap();
let contract_path = format!("{src_dir}/Counter.sol");
fs::write(root.join(&contract_path), "").unwrap();
// No compiler argument
let args = VerifyArgs::parse_from([
"foundry-cli",
address,
&format!("{contract_path}:{contract_name}"),
"--root",
root_path,
]);
let result = args.resolve_context().await;
assert!(result.is_err());
assert_eq!(
result.unwrap_err().to_string(),
"If cache is disabled, compiler version must be either provided with `--compiler-version` option or set in foundry.toml"
);
}
forgetest_async!(respects_path_for_duplicate, |prj, cmd| {
prj.add_source("Counter1", "contract Counter {}");
prj.add_source("Counter2", "contract Counter {}");
cmd.args(["build", "--force"]).assert_success().stdout_eq(str![[r#"
[COMPILING_FILES] with [SOLC_VERSION]
...
[SOLC_VERSION] [ELAPSED]
Compiler run successful!
"#]]);
let args = VerifyArgs::parse_from([
"foundry-cli",
"0x0000000000000000000000000000000000000000",
"src/Counter1.sol:Counter",
"--root",
&prj.root().to_string_lossy(),
]);
let context = args.resolve_context().await.unwrap();
let mut etherscan = EtherscanVerificationProvider::default();
etherscan.preflight_verify_check(args, context).await.unwrap();
});
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/lib.rs | crates/evm/evm/src/lib.rs | //! # foundry-evm
//!
//! Main Foundry EVM backend abstractions.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate tracing;
pub mod executors;
pub mod inspectors;
pub use foundry_evm_core as core;
pub use foundry_evm_core::{
Env, EnvMut, EvmEnv, InspectorExt, backend, constants, decode, fork, hardfork, opts, utils,
};
pub use foundry_evm_coverage as coverage;
pub use foundry_evm_fuzz as fuzz;
pub use foundry_evm_traces as traces;
// TODO: We should probably remove these, but it's a pretty big breaking change.
#[doc(hidden)]
pub use revm;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/inspectors/custom_printer.rs | crates/evm/evm/src/inspectors/custom_printer.rs | //! Custom print inspector, it has step level information of execution.
//! It is a great tool if some debugging is needed.
use foundry_common::sh_println;
use foundry_evm_core::backend::DatabaseError;
use revm::{
Database, Inspector,
bytecode::opcode::OpCode,
context::{ContextTr, JournalTr},
inspector::{JournalExt, inspectors::GasInspector},
interpreter::{
CallInputs, CallOutcome, CreateInputs, CreateOutcome, Interpreter,
interpreter::EthInterpreter,
interpreter_types::{Jumps, MemoryTr},
},
primitives::{Address, U256},
};
/// Custom print [Inspector], it has step level information of execution.
///
/// It is a great tool if some debugging is needed.
#[derive(Clone, Debug, Default)]
pub struct CustomPrintTracer {
gas_inspector: GasInspector,
}
impl<CTX, D> Inspector<CTX, EthInterpreter> for CustomPrintTracer
where
D: Database<Error = DatabaseError>,
CTX: ContextTr<Db = D>,
CTX::Journal: JournalExt,
{
fn initialize_interp(&mut self, interp: &mut Interpreter, _context: &mut CTX) {
self.gas_inspector.initialize_interp(&interp.gas);
}
// get opcode by calling `interp.contract.opcode(interp.program_counter())`.
// all other information can be obtained from interp.
fn step(&mut self, interp: &mut Interpreter, context: &mut CTX) {
let opcode = interp.bytecode.opcode();
let name = OpCode::name_by_op(opcode);
let gas_remaining = self.gas_inspector.gas_remaining();
let memory_size = interp.memory.size();
let _ = sh_println!(
"depth:{}, PC:{}, gas:{:#x}({}), OPCODE: {:?}({:?}) refund:{:#x}({}) Stack:{:?}, Data size:{}",
context.journal().depth(),
interp.bytecode.pc(),
gas_remaining,
gas_remaining,
name,
opcode,
interp.gas.refunded(),
interp.gas.refunded(),
interp.stack.data(),
memory_size,
);
self.gas_inspector.step(&interp.gas);
}
fn step_end(&mut self, interpreter: &mut Interpreter, _context: &mut CTX) {
self.gas_inspector.step_end(&interpreter.gas);
}
fn call_end(&mut self, _context: &mut CTX, _inputs: &CallInputs, outcome: &mut CallOutcome) {
self.gas_inspector.call_end(outcome)
}
fn create_end(
&mut self,
_context: &mut CTX,
_inputs: &CreateInputs,
outcome: &mut CreateOutcome,
) {
self.gas_inspector.create_end(outcome)
}
fn call(&mut self, _context: &mut CTX, inputs: &mut CallInputs) -> Option<CallOutcome> {
let _ = sh_println!(
"SM Address: {:?}, caller:{:?},target:{:?} is_static:{:?}, transfer:{:?}, input_size:{:?}",
inputs.bytecode_address,
inputs.caller,
inputs.target_address,
inputs.is_static,
inputs.value,
inputs.input.len(),
);
None
}
fn create(&mut self, _context: &mut CTX, inputs: &mut CreateInputs) -> Option<CreateOutcome> {
let _ = sh_println!(
"CREATE CALL: caller:{:?}, scheme:{:?}, value:{:?}, init_code:{:?}, gas:{:?}",
inputs.caller,
inputs.scheme,
inputs.value,
inputs.init_code,
inputs.gas_limit
);
None
}
fn selfdestruct(&mut self, contract: Address, target: Address, value: U256) {
let _ = sh_println!(
"SELFDESTRUCT: contract: {:?}, refund target: {:?}, value {:?}",
contract,
target,
value
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/inspectors/mod.rs | crates/evm/evm/src/inspectors/mod.rs | //! EVM inspectors.
pub use foundry_cheatcodes::{self as cheatcodes, Cheatcodes, CheatsConfig};
pub use foundry_evm_coverage::LineCoverageCollector;
pub use foundry_evm_fuzz::Fuzzer;
pub use foundry_evm_traces::{StackSnapshotType, TracingInspector, TracingInspectorConfig};
pub use revm_inspectors::access_list::AccessListInspector;
mod custom_printer;
pub use custom_printer::CustomPrintTracer;
mod chisel_state;
pub use chisel_state::ChiselState;
mod logs;
pub use logs::LogCollector;
mod script;
pub use script::ScriptExecutionInspector;
mod stack;
pub use stack::{InspectorData, InspectorStack, InspectorStackBuilder};
mod revert_diagnostic;
pub use revert_diagnostic::RevertDiagnostic;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/inspectors/stack.rs | crates/evm/evm/src/inspectors/stack.rs | use super::{
Cheatcodes, CheatsConfig, ChiselState, CustomPrintTracer, Fuzzer, LineCoverageCollector,
LogCollector, RevertDiagnostic, ScriptExecutionInspector, TracingInspector,
};
use alloy_evm::{Evm, eth::EthEvmContext};
use alloy_primitives::{
Address, Bytes, Log, TxKind, U256,
map::{AddressHashMap, HashMap},
};
use foundry_cheatcodes::{CheatcodeAnalysis, CheatcodesExecutor, Wallets};
use foundry_common::compile::Analysis;
use foundry_compilers::ProjectPathsConfig;
use foundry_evm_core::{
ContextExt, Env, InspectorExt,
backend::{DatabaseExt, JournaledState},
evm::new_evm_with_inspector,
};
use foundry_evm_coverage::HitMaps;
use foundry_evm_networks::NetworkConfigs;
use foundry_evm_traces::{SparsedTraceArena, TraceMode};
use revm::{
Inspector,
context::{
BlockEnv,
result::{ExecutionResult, Output},
},
context_interface::CreateScheme,
interpreter::{
CallInputs, CallOutcome, CallScheme, CreateInputs, CreateOutcome, Gas, InstructionResult,
Interpreter, InterpreterResult,
},
state::{Account, AccountStatus},
};
use revm_inspectors::edge_cov::EdgeCovInspector;
use std::{
ops::{Deref, DerefMut},
sync::Arc,
};
#[derive(Clone, Debug, Default)]
#[must_use = "builders do nothing unless you call `build` on them"]
pub struct InspectorStackBuilder {
/// Solar compiler instance, to grant syntactic and semantic analysis capabilities.
pub analysis: Option<Analysis>,
/// The block environment.
///
/// Used in the cheatcode handler to overwrite the block environment separately from the
/// execution block environment.
pub block: Option<BlockEnv>,
/// The gas price.
///
/// Used in the cheatcode handler to overwrite the gas price separately from the gas price
/// in the execution environment.
pub gas_price: Option<u128>,
/// The cheatcodes config.
pub cheatcodes: Option<Arc<CheatsConfig>>,
/// The fuzzer inspector and its state, if it exists.
pub fuzzer: Option<Fuzzer>,
/// Whether to enable tracing and revert diagnostics.
pub trace_mode: TraceMode,
/// Whether logs should be collected.
pub logs: Option<bool>,
/// Whether line coverage info should be collected.
pub line_coverage: Option<bool>,
/// Whether to print all opcode traces into the console. Useful for debugging the EVM.
pub print: Option<bool>,
/// The chisel state inspector.
pub chisel_state: Option<usize>,
/// Whether to enable call isolation.
/// In isolation mode all top-level calls are executed as a separate transaction in a separate
/// EVM context, enabling more precise gas accounting and transaction state changes.
pub enable_isolation: bool,
/// Networks with enabled features.
pub networks: NetworkConfigs,
/// The wallets to set in the cheatcodes context.
pub wallets: Option<Wallets>,
/// The CREATE2 deployer address.
pub create2_deployer: Address,
}
impl InspectorStackBuilder {
/// Create a new inspector stack builder.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Set the solar compiler instance that grants syntactic and semantic analysis capabilities
#[inline]
pub fn set_analysis(mut self, analysis: Analysis) -> Self {
self.analysis = Some(analysis);
self
}
/// Set the block environment.
#[inline]
pub fn block(mut self, block: BlockEnv) -> Self {
self.block = Some(block);
self
}
/// Set the gas price.
#[inline]
pub fn gas_price(mut self, gas_price: u128) -> Self {
self.gas_price = Some(gas_price);
self
}
/// Enable cheatcodes with the given config.
#[inline]
pub fn cheatcodes(mut self, config: Arc<CheatsConfig>) -> Self {
self.cheatcodes = Some(config);
self
}
/// Set the wallets.
#[inline]
pub fn wallets(mut self, wallets: Wallets) -> Self {
self.wallets = Some(wallets);
self
}
/// Set the fuzzer inspector.
#[inline]
pub fn fuzzer(mut self, fuzzer: Fuzzer) -> Self {
self.fuzzer = Some(fuzzer);
self
}
/// Set the Chisel inspector.
#[inline]
pub fn chisel_state(mut self, final_pc: usize) -> Self {
self.chisel_state = Some(final_pc);
self
}
/// Set whether to collect logs.
#[inline]
pub fn logs(mut self, yes: bool) -> Self {
self.logs = Some(yes);
self
}
/// Set whether to collect line coverage information.
#[inline]
pub fn line_coverage(mut self, yes: bool) -> Self {
self.line_coverage = Some(yes);
self
}
/// Set whether to enable the trace printer.
#[inline]
pub fn print(mut self, yes: bool) -> Self {
self.print = Some(yes);
self
}
/// Set whether to enable the tracer.
/// Revert diagnostic inspector is activated when `mode != TraceMode::None`
#[inline]
pub fn trace_mode(mut self, mode: TraceMode) -> Self {
if self.trace_mode < mode {
self.trace_mode = mode
}
self
}
/// Set whether to enable the call isolation.
/// For description of call isolation, see [`InspectorStack::enable_isolation`].
#[inline]
pub fn enable_isolation(mut self, yes: bool) -> Self {
self.enable_isolation = yes;
self
}
/// Set networks with enabled features.
#[inline]
pub fn networks(mut self, networks: NetworkConfigs) -> Self {
self.networks = networks;
self
}
#[inline]
pub fn create2_deployer(mut self, create2_deployer: Address) -> Self {
self.create2_deployer = create2_deployer;
self
}
/// Builds the stack of inspectors to use when transacting/committing on the EVM.
pub fn build(self) -> InspectorStack {
let Self {
analysis,
block,
gas_price,
cheatcodes,
fuzzer,
trace_mode,
logs,
line_coverage,
print,
chisel_state,
enable_isolation,
networks,
wallets,
create2_deployer,
} = self;
let mut stack = InspectorStack::new();
// inspectors
if let Some(config) = cheatcodes {
let mut cheatcodes = Cheatcodes::new(config);
// Set analysis capabilities if they are provided
if let Some(analysis) = analysis {
stack.set_analysis(analysis.clone());
cheatcodes.set_analysis(CheatcodeAnalysis::new(analysis));
}
// Set wallets if they are provided
if let Some(wallets) = wallets {
cheatcodes.set_wallets(wallets);
}
stack.set_cheatcodes(cheatcodes);
}
if let Some(fuzzer) = fuzzer {
stack.set_fuzzer(fuzzer);
}
if let Some(chisel_state) = chisel_state {
stack.set_chisel(chisel_state);
}
stack.collect_line_coverage(line_coverage.unwrap_or(false));
stack.collect_logs(logs.unwrap_or(true));
stack.print(print.unwrap_or(false));
stack.tracing(trace_mode);
stack.enable_isolation(enable_isolation);
stack.networks(networks);
stack.set_create2_deployer(create2_deployer);
// environment, must come after all of the inspectors
if let Some(block) = block {
stack.set_block(&block);
}
if let Some(gas_price) = gas_price {
stack.set_gas_price(gas_price);
}
stack
}
}
/// Helper macro to call the same method on multiple inspectors without resorting to dynamic
/// dispatch.
#[macro_export]
macro_rules! call_inspectors {
([$($inspector:expr),+ $(,)?], |$id:ident $(,)?| $body:expr $(,)?) => {
$(
if let Some($id) = $inspector {
$crate::utils::cold_path();
$body;
}
)+
};
(#[ret] [$($inspector:expr),+ $(,)?], |$id:ident $(,)?| $body:expr $(,)?) => {{
$(
if let Some($id) = $inspector {
$crate::utils::cold_path();
if let Some(result) = $body {
return result;
}
}
)+
}};
}
/// The collected results of [`InspectorStack`].
pub struct InspectorData {
pub logs: Vec<Log>,
pub labels: AddressHashMap<String>,
pub traces: Option<SparsedTraceArena>,
pub line_coverage: Option<HitMaps>,
pub edge_coverage: Option<Vec<u8>>,
pub cheatcodes: Option<Box<Cheatcodes>>,
pub chisel_state: Option<(Vec<U256>, Vec<u8>)>,
pub reverter: Option<Address>,
}
/// Contains data about the state of outer/main EVM which created and invoked the inner EVM context.
/// Used to adjust EVM state while in inner context.
///
/// We need this to avoid breaking changes due to EVM behavior differences in isolated vs
/// non-isolated mode. For descriptions and workarounds for those changes see: <https://github.com/foundry-rs/foundry/pull/7186#issuecomment-1959102195>
#[derive(Debug, Clone)]
pub struct InnerContextData {
/// Origin of the transaction in the outer EVM context.
original_origin: Address,
}
/// An inspector that calls multiple inspectors in sequence.
///
/// If a call to an inspector returns a value (indicating a stop or revert) the remaining inspectors
/// are not called.
///
/// Stack is divided into [Cheatcodes] and `InspectorStackInner`. This is done to allow assembling
/// `InspectorStackRefMut` inside [Cheatcodes] to allow usage of it as [revm::Inspector]. This gives
/// us ability to create and execute separate EVM frames from inside cheatcodes while still having
/// access to entire stack of inspectors and correctly handling traces, logs, debugging info
/// collection, etc.
#[derive(Clone, Debug, Default)]
pub struct InspectorStack {
pub cheatcodes: Option<Box<Cheatcodes>>,
pub inner: InspectorStackInner,
}
impl InspectorStack {
pub fn paths_config(&self) -> Option<&ProjectPathsConfig> {
self.cheatcodes.as_ref().map(|c| &c.config.paths)
}
}
/// All used inpectors besides [Cheatcodes].
///
/// See [`InspectorStack`].
#[derive(Default, Clone, Debug)]
pub struct InspectorStackInner {
/// Solar compiler instance, to grant syntactic and semantic analysis capabilities.
pub analysis: Option<Analysis>,
// Inspectors.
// These are boxed to reduce the size of the struct and slightly improve performance of the
// `if let Some` checks.
pub chisel_state: Option<Box<ChiselState>>,
pub edge_coverage: Option<Box<EdgeCovInspector>>,
pub fuzzer: Option<Box<Fuzzer>>,
pub line_coverage: Option<Box<LineCoverageCollector>>,
pub log_collector: Option<Box<LogCollector>>,
pub printer: Option<Box<CustomPrintTracer>>,
pub revert_diag: Option<Box<RevertDiagnostic>>,
pub script_execution_inspector: Option<Box<ScriptExecutionInspector>>,
pub tracer: Option<Box<TracingInspector>>,
// InspectorExt and other internal data.
pub enable_isolation: bool,
pub networks: NetworkConfigs,
pub create2_deployer: Address,
/// Flag marking if we are in the inner EVM context.
pub in_inner_context: bool,
pub inner_context_data: Option<InnerContextData>,
pub top_frame_journal: HashMap<Address, Account>,
/// Address that reverted the call, if any.
pub reverter: Option<Address>,
}
/// Struct keeping mutable references to both parts of [InspectorStack] and implementing
/// [revm::Inspector]. This struct can be obtained via [InspectorStack::as_mut] or via
/// [CheatcodesExecutor::get_inspector] method implemented for [InspectorStackInner].
pub struct InspectorStackRefMut<'a> {
pub cheatcodes: Option<&'a mut Cheatcodes>,
pub inner: &'a mut InspectorStackInner,
}
impl CheatcodesExecutor for InspectorStackInner {
fn get_inspector<'a>(&'a mut self, cheats: &'a mut Cheatcodes) -> Box<dyn InspectorExt + 'a> {
Box::new(InspectorStackRefMut { cheatcodes: Some(cheats), inner: self })
}
fn tracing_inspector(&mut self) -> Option<&mut TracingInspector> {
self.tracer.as_deref_mut()
}
}
impl InspectorStack {
/// Creates a new inspector stack.
///
/// Note that the stack is empty by default, and you must add inspectors to it.
/// This is done by calling the `set_*` methods on the stack directly, or by building the stack
/// with [`InspectorStack`].
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Logs the status of the inspectors.
pub fn log_status(&self) {
trace!(enabled=%{
let mut enabled = Vec::with_capacity(16);
macro_rules! push {
($($id:ident),* $(,)?) => {
$(
if self.$id.is_some() {
enabled.push(stringify!($id));
}
)*
};
}
push!(cheatcodes, chisel_state, line_coverage, fuzzer, log_collector, printer, tracer);
if self.enable_isolation {
enabled.push("isolation");
}
format!("[{}]", enabled.join(", "))
});
}
/// Set the solar compiler instance.
#[inline]
pub fn set_analysis(&mut self, analysis: Analysis) {
self.analysis = Some(analysis);
}
/// Set variables from an environment for the relevant inspectors.
#[inline]
pub fn set_env(&mut self, env: &Env) {
self.set_block(&env.evm_env.block_env);
self.set_gas_price(env.tx.gas_price);
}
/// Sets the block for the relevant inspectors.
#[inline]
pub fn set_block(&mut self, block: &BlockEnv) {
if let Some(cheatcodes) = &mut self.cheatcodes {
cheatcodes.block = Some(block.clone());
}
}
/// Sets the gas price for the relevant inspectors.
#[inline]
pub fn set_gas_price(&mut self, gas_price: u128) {
if let Some(cheatcodes) = &mut self.cheatcodes {
cheatcodes.gas_price = Some(gas_price);
}
}
/// Set the cheatcodes inspector.
#[inline]
pub fn set_cheatcodes(&mut self, cheatcodes: Cheatcodes) {
self.cheatcodes = Some(cheatcodes.into());
}
/// Set the fuzzer inspector.
#[inline]
pub fn set_fuzzer(&mut self, fuzzer: Fuzzer) {
self.fuzzer = Some(fuzzer.into());
}
/// Set the Chisel inspector.
#[inline]
pub fn set_chisel(&mut self, final_pc: usize) {
self.chisel_state = Some(ChiselState::new(final_pc).into());
}
/// Set whether to enable the line coverage collector.
#[inline]
pub fn collect_line_coverage(&mut self, yes: bool) {
self.line_coverage = yes.then(Default::default);
}
/// Set whether to enable the edge coverage collector.
#[inline]
pub fn collect_edge_coverage(&mut self, yes: bool) {
// TODO: configurable edge size?
self.edge_coverage = yes.then(EdgeCovInspector::new).map(Into::into);
}
/// Set whether to enable call isolation.
#[inline]
pub fn enable_isolation(&mut self, yes: bool) {
self.enable_isolation = yes;
}
/// Set networks with enabled features.
#[inline]
pub fn networks(&mut self, networks: NetworkConfigs) {
self.networks = networks;
}
/// Set the CREATE2 deployer address.
#[inline]
pub fn set_create2_deployer(&mut self, deployer: Address) {
self.create2_deployer = deployer;
}
/// Set whether to enable the log collector.
#[inline]
pub fn collect_logs(&mut self, yes: bool) {
self.log_collector = yes.then(Default::default);
}
/// Set whether to enable the trace printer.
#[inline]
pub fn print(&mut self, yes: bool) {
self.printer = yes.then(Default::default);
}
/// Set whether to enable the tracer.
/// Revert diagnostic inspector is activated when `mode != TraceMode::None`
#[inline]
pub fn tracing(&mut self, mode: TraceMode) {
self.revert_diag = (!mode.is_none()).then(RevertDiagnostic::default).map(Into::into);
if let Some(config) = mode.into_config() {
*self.tracer.get_or_insert_with(Default::default).config_mut() = config;
} else {
self.tracer = None;
}
}
/// Set whether to enable script execution inspector.
#[inline]
pub fn script(&mut self, script_address: Address) {
self.script_execution_inspector.get_or_insert_with(Default::default).script_address =
script_address;
}
#[inline(always)]
fn as_mut(&mut self) -> InspectorStackRefMut<'_> {
InspectorStackRefMut { cheatcodes: self.cheatcodes.as_deref_mut(), inner: &mut self.inner }
}
/// Returns an [`InspectorExt`] using this stack's inspectors.
#[inline]
pub fn as_inspector(&mut self) -> impl InspectorExt + '_ {
self
}
/// Collects all the data gathered during inspection into a single struct.
pub fn collect(self) -> InspectorData {
let Self {
mut cheatcodes,
inner:
InspectorStackInner {
chisel_state,
line_coverage,
edge_coverage,
log_collector,
tracer,
reverter,
..
},
} = self;
let traces = tracer.map(|tracer| tracer.into_traces()).map(|arena| {
let ignored = cheatcodes
.as_mut()
.map(|cheatcodes| {
let mut ignored = std::mem::take(&mut cheatcodes.ignored_traces.ignored);
// If the last pause call was not resumed, ignore the rest of the trace
if let Some(last_pause_call) = cheatcodes.ignored_traces.last_pause_call {
ignored.insert(last_pause_call, (arena.nodes().len(), 0));
}
ignored
})
.unwrap_or_default();
SparsedTraceArena { arena, ignored }
});
InspectorData {
logs: log_collector.map(|logs| logs.logs).unwrap_or_default(),
labels: cheatcodes
.as_ref()
.map(|cheatcodes| cheatcodes.labels.clone())
.unwrap_or_default(),
traces,
line_coverage: line_coverage.map(|line_coverage| line_coverage.finish()),
edge_coverage: edge_coverage.map(|edge_coverage| edge_coverage.into_hitcount()),
cheatcodes,
chisel_state: chisel_state.and_then(|state| state.state),
reverter,
}
}
}
impl InspectorStackRefMut<'_> {
/// Adjusts the EVM data for the inner EVM context.
/// Should be called on the top-level call of inner context (depth == 0 &&
/// self.in_inner_context) Decreases sender nonce for CALLs to keep backwards compatibility
/// Updates tx.origin to the value before entering inner context
fn adjust_evm_data_for_inner_context(&mut self, ecx: &mut EthEvmContext<&mut dyn DatabaseExt>) {
let inner_context_data =
self.inner_context_data.as_ref().expect("should be called in inner context");
ecx.tx.caller = inner_context_data.original_origin;
}
fn do_call_end(
&mut self,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
inputs: &CallInputs,
outcome: &mut CallOutcome,
) -> CallOutcome {
let result = outcome.result.result;
call_inspectors!(
#[ret]
[
&mut self.fuzzer,
&mut self.tracer,
&mut self.cheatcodes,
&mut self.printer,
&mut self.revert_diag
],
|inspector| {
let previous_outcome = outcome.clone();
inspector.call_end(ecx, inputs, outcome);
// If the inspector returns a different status or a revert with a non-empty message,
// we assume it wants to tell us something
let different = outcome.result.result != result
|| (outcome.result.result == InstructionResult::Revert
&& outcome.output() != previous_outcome.output());
different.then_some(outcome.clone())
},
);
// Record first address that reverted the call.
if result.is_revert() && self.reverter.is_none() {
self.reverter = Some(inputs.target_address);
}
outcome.clone()
}
fn do_create_end(
&mut self,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
call: &CreateInputs,
outcome: &mut CreateOutcome,
) -> CreateOutcome {
let result = outcome.result.result;
call_inspectors!(
#[ret]
[&mut self.tracer, &mut self.cheatcodes, &mut self.printer],
|inspector| {
let previous_outcome = outcome.clone();
inspector.create_end(ecx, call, outcome);
// If the inspector returns a different status or a revert with a non-empty message,
// we assume it wants to tell us something
let different = outcome.result.result != result
|| (outcome.result.result == InstructionResult::Revert
&& outcome.output() != previous_outcome.output());
different.then_some(outcome.clone())
},
);
outcome.clone()
}
fn transact_inner(
&mut self,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
kind: TxKind,
caller: Address,
input: Bytes,
gas_limit: u64,
value: U256,
) -> (InterpreterResult, Option<Address>) {
let cached_env = Env::from(ecx.cfg.clone(), ecx.block.clone(), ecx.tx.clone());
ecx.block.basefee = 0;
ecx.tx.chain_id = Some(ecx.cfg.chain_id);
ecx.tx.caller = caller;
ecx.tx.kind = kind;
ecx.tx.data = input;
ecx.tx.value = value;
// Add 21000 to the gas limit to account for the base cost of transaction.
ecx.tx.gas_limit = gas_limit + 21000;
// If we haven't disabled gas limit checks, ensure that transaction gas limit will not
// exceed block gas limit.
if !ecx.cfg.disable_block_gas_limit {
ecx.tx.gas_limit = std::cmp::min(ecx.tx.gas_limit, ecx.block.gas_limit);
}
ecx.tx.gas_price = 0;
self.inner_context_data = Some(InnerContextData { original_origin: cached_env.tx.caller });
self.in_inner_context = true;
let res = self.with_inspector(|inspector| {
let (db, journal, env) = ecx.as_db_env_and_journal();
let mut evm = new_evm_with_inspector(db, env.to_owned(), inspector);
evm.journaled_state.state = {
let mut state = journal.state.clone();
for (addr, acc_mut) in &mut state {
// mark all accounts cold, besides preloaded addresses
if journal.warm_addresses.is_cold(addr) {
acc_mut.mark_cold();
}
// mark all slots cold
for slot_mut in acc_mut.storage.values_mut() {
slot_mut.is_cold = true;
slot_mut.original_value = slot_mut.present_value;
}
}
state
};
// set depth to 1 to make sure traces are collected correctly
evm.journaled_state.depth = 1;
let res = evm.transact(env.tx.clone());
// need to reset the env in case it was modified via cheatcodes during execution
*env.cfg = evm.cfg.clone();
*env.block = evm.block.clone();
*env.tx = cached_env.tx;
env.block.basefee = cached_env.evm_env.block_env.basefee;
res
});
self.in_inner_context = false;
self.inner_context_data = None;
let mut gas = Gas::new(gas_limit);
let Ok(res) = res else {
// Should we match, encode and propagate error as a revert reason?
let result =
InterpreterResult { result: InstructionResult::Revert, output: Bytes::new(), gas };
return (result, None);
};
for (addr, mut acc) in res.state {
let Some(acc_mut) = ecx.journaled_state.state.get_mut(&addr) else {
ecx.journaled_state.state.insert(addr, acc);
continue;
};
// make sure accounts that were warmed earlier do not become cold
if acc.status.contains(AccountStatus::Cold)
&& !acc_mut.status.contains(AccountStatus::Cold)
{
acc.status -= AccountStatus::Cold;
}
acc_mut.info = acc.info;
acc_mut.status |= acc.status;
for (key, val) in acc.storage {
let Some(slot_mut) = acc_mut.storage.get_mut(&key) else {
acc_mut.storage.insert(key, val);
continue;
};
slot_mut.present_value = val.present_value;
slot_mut.is_cold &= val.is_cold;
}
}
let (result, address, output) = match res.result {
ExecutionResult::Success { reason, gas_used, gas_refunded, logs: _, output } => {
gas.set_refund(gas_refunded as i64);
let _ = gas.record_cost(gas_used);
let address = match output {
Output::Create(_, address) => address,
Output::Call(_) => None,
};
(reason.into(), address, output.into_data())
}
ExecutionResult::Halt { reason, gas_used } => {
let _ = gas.record_cost(gas_used);
(reason.into(), None, Bytes::new())
}
ExecutionResult::Revert { gas_used, output } => {
let _ = gas.record_cost(gas_used);
(InstructionResult::Revert, None, output)
}
};
(InterpreterResult { result, output, gas }, address)
}
/// Moves out of references, constructs a new [`InspectorStackRefMut`] and runs the given
/// closure with it.
fn with_inspector<O>(&mut self, f: impl FnOnce(InspectorStackRefMut<'_>) -> O) -> O {
let mut cheatcodes = self
.cheatcodes
.as_deref_mut()
.map(|cheats| core::mem::replace(cheats, Cheatcodes::new(cheats.config.clone())));
let mut inner = std::mem::take(self.inner);
let out = f(InspectorStackRefMut { cheatcodes: cheatcodes.as_mut(), inner: &mut inner });
if let Some(cheats) = self.cheatcodes.as_deref_mut() {
*cheats = cheatcodes.unwrap();
}
*self.inner = inner;
out
}
/// Invoked at the beginning of a new top-level (0 depth) frame.
fn top_level_frame_start(&mut self, ecx: &mut EthEvmContext<&mut dyn DatabaseExt>) {
if self.enable_isolation {
// If we're in isolation mode, we need to keep track of the state at the beginning of
// the frame to be able to roll back on revert
self.top_frame_journal.clone_from(&ecx.journaled_state.state);
}
}
/// Invoked at the end of root frame.
fn top_level_frame_end(
&mut self,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
result: InstructionResult,
) {
if !result.is_revert() {
return;
}
// Encountered a revert, since cheatcodes may have altered the evm state in such a way
// that violates some constraints, e.g. `deal`, we need to manually roll back on revert
// before revm reverts the state itself
if let Some(cheats) = self.cheatcodes.as_mut() {
cheats.on_revert(ecx);
}
// If we're in isolation mode, we need to rollback to state before the root frame was
// created We can't rely on revm's journal because it doesn't account for changes
// made by isolated calls
if self.enable_isolation {
ecx.journaled_state.state = std::mem::take(&mut self.top_frame_journal);
}
}
// We take extra care in optimizing `step` and `step_end`, as they're are likely the most
// hot functions in all of Foundry.
// We want to `#[inline(always)]` these functions so that `InspectorStack` does not
// delegate to `InspectorStackRefMut` in this case.
#[inline(always)]
fn step_inlined(
&mut self,
interpreter: &mut Interpreter,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
) {
call_inspectors!(
[
// These are sorted in definition order.
&mut self.edge_coverage,
&mut self.fuzzer,
&mut self.line_coverage,
&mut self.printer,
&mut self.revert_diag,
&mut self.script_execution_inspector,
&mut self.tracer,
// Keep `cheatcodes` last to make use of the tail call.
&mut self.cheatcodes,
],
|inspector| (**inspector).step(interpreter, ecx),
);
}
#[inline(always)]
fn step_end_inlined(
&mut self,
interpreter: &mut Interpreter,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
) {
call_inspectors!(
[
// These are sorted in definition order.
&mut self.chisel_state,
&mut self.printer,
&mut self.revert_diag,
&mut self.tracer,
// Keep `cheatcodes` last to make use of the tail call.
&mut self.cheatcodes,
],
|inspector| (**inspector).step_end(interpreter, ecx),
);
}
}
impl Inspector<EthEvmContext<&mut dyn DatabaseExt>> for InspectorStackRefMut<'_> {
fn initialize_interp(
&mut self,
interpreter: &mut Interpreter,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
) {
call_inspectors!(
[
&mut self.line_coverage,
&mut self.tracer,
&mut self.cheatcodes,
&mut self.script_execution_inspector,
&mut self.printer
],
|inspector| inspector.initialize_interp(interpreter, ecx),
);
}
fn step(
&mut self,
interpreter: &mut Interpreter,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
) {
self.step_inlined(interpreter, ecx);
}
fn step_end(
&mut self,
interpreter: &mut Interpreter,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
) {
self.step_end_inlined(interpreter, ecx);
}
#[allow(clippy::redundant_clone)]
fn log(&mut self, ecx: &mut EthEvmContext<&mut dyn DatabaseExt>, log: Log) {
call_inspectors!(
[&mut self.tracer, &mut self.log_collector, &mut self.cheatcodes, &mut self.printer],
|inspector| inspector.log(ecx, log.clone()),
);
}
#[allow(clippy::redundant_clone)]
fn log_full(
&mut self,
interpreter: &mut Interpreter,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
log: Log,
) {
call_inspectors!(
[&mut self.tracer, &mut self.log_collector, &mut self.cheatcodes, &mut self.printer],
|inspector| inspector.log_full(interpreter, ecx, log.clone()),
);
}
fn call(
&mut self,
ecx: &mut EthEvmContext<&mut dyn DatabaseExt>,
call: &mut CallInputs,
) -> Option<CallOutcome> {
if self.in_inner_context && ecx.journaled_state.depth == 1 {
self.adjust_evm_data_for_inner_context(ecx);
return None;
}
if ecx.journaled_state.depth == 0 {
self.top_level_frame_start(ecx);
}
call_inspectors!(
#[ret]
[
&mut self.fuzzer,
&mut self.tracer,
&mut self.log_collector,
&mut self.printer,
&mut self.revert_diag
],
|inspector| {
let mut out = None;
if let Some(output) = inspector.call(ecx, call) {
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/inspectors/revert_diagnostic.rs | crates/evm/evm/src/inspectors/revert_diagnostic.rs | use alloy_primitives::{Address, U256};
use alloy_sol_types::SolValue;
use foundry_evm_core::{
backend::DatabaseError,
constants::{CHEATCODE_ADDRESS, HARDHAT_CONSOLE_ADDRESS},
};
use revm::{
Database, Inspector,
bytecode::opcode,
context::{ContextTr, JournalTr},
inspector::JournalExt,
interpreter::{
CallInputs, CallOutcome, CallScheme, InstructionResult, Interpreter, InterpreterAction,
interpreter::EthInterpreter,
interpreter_types::{Jumps, LoopControl},
},
};
use std::fmt;
const IGNORE: [Address; 2] = [HARDHAT_CONSOLE_ADDRESS, CHEATCODE_ADDRESS];
/// Checks if the call scheme corresponds to any sort of delegate call
pub fn is_delegatecall(scheme: CallScheme) -> bool {
matches!(scheme, CallScheme::DelegateCall | CallScheme::CallCode)
}
#[derive(Debug, Clone, Copy)]
pub enum DetailedRevertReason {
CallToNonContract(Address),
DelegateCallToNonContract(Address),
}
impl fmt::Display for DetailedRevertReason {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::CallToNonContract(addr) => {
write!(f, "call to non-contract address {addr}")
}
Self::DelegateCallToNonContract(addr) => write!(
f,
"delegatecall to non-contract address {addr} (usually an unliked library)"
),
}
}
}
/// An inspector that tracks call context to enhances revert diagnostics.
/// Useful for understanding reverts that are not linked to custom errors or revert strings.
///
/// Supported diagnostics:
/// 1. **Non-void call to non-contract address:** the soldity compiler adds some validation to the
/// return data of the call, so despite the call succeeds, as doesn't return data, the
/// validation causes a revert.
///
/// Identified when: a call with non-empty calldata is made to an address without bytecode,
/// followed by an empty revert at the same depth.
///
/// 2. **Void call to non-contract address:** in this case the solidity compiler adds some checks
/// before doing the call, so it never takes place.
///
/// Identified when: extcodesize for the target address returns 0 + empty revert at the same
/// depth.
#[derive(Clone, Debug, Default)]
pub struct RevertDiagnostic {
/// Tracks calls with calldata that target an address without executable code.
non_contract_call: Option<(Address, CallScheme, usize)>,
/// Tracks EXTCODESIZE checks that target an address without executable code.
non_contract_size_check: Option<(Address, usize)>,
/// Whether the step opcode is EXTCODESIZE or not.
is_extcodesize_step: bool,
}
impl RevertDiagnostic {
/// Returns the effective target address whose code would be executed.
/// For delegate calls, this is the `bytecode_address`. Otherwise, it's the `target_address`.
fn code_target_address(&self, inputs: &mut CallInputs) -> Address {
if is_delegatecall(inputs.scheme) { inputs.bytecode_address } else { inputs.target_address }
}
/// Derives the revert reason based on the cached data. Should only be called after a revert.
fn reason(&self) -> Option<DetailedRevertReason> {
if let Some((addr, scheme, _)) = self.non_contract_call {
let reason = if is_delegatecall(scheme) {
DetailedRevertReason::DelegateCallToNonContract(addr)
} else {
DetailedRevertReason::CallToNonContract(addr)
};
return Some(reason);
}
if let Some((addr, _)) = self.non_contract_size_check {
// unknown schema as the call never took place --> output most generic reason
return Some(DetailedRevertReason::CallToNonContract(addr));
}
None
}
/// Injects the revert diagnostic into the debug traces. Should only be called after a revert.
fn broadcast_diagnostic(&self, interpreter: &mut Interpreter) {
if let Some(reason) = self.reason() {
interpreter.bytecode.set_action(InterpreterAction::new_return(
InstructionResult::Revert,
reason.to_string().abi_encode().into(),
interpreter.gas,
));
}
}
/// When a `REVERT` opcode with zero data size occurs:
/// - if `non_contract_call` was set at the current depth, `broadcast_diagnostic` is called.
/// Otherwise, it is cleared.
/// - if `non_contract_size_check` was set at the current depth, `broadcast_diagnostic` is
/// called. Otherwise, it is cleared.
#[cold]
fn handle_revert<CTX, D>(&mut self, interp: &mut Interpreter, ctx: &mut CTX)
where
D: Database<Error = DatabaseError>,
CTX: ContextTr<Db = D>,
CTX::Journal: JournalExt,
{
// REVERT (offset, size)
if let Ok(size) = interp.stack.peek(1)
&& size.is_zero()
{
// Check empty revert with same depth as a non-contract call
if let Some((_, _, depth)) = self.non_contract_call {
if ctx.journal_ref().depth() == depth {
self.broadcast_diagnostic(interp);
} else {
self.non_contract_call = None;
}
return;
}
// Check empty revert with same depth as a non-contract size check
if let Some((_, depth)) = self.non_contract_size_check {
if depth == ctx.journal_ref().depth() {
self.broadcast_diagnostic(interp);
} else {
self.non_contract_size_check = None;
}
}
}
}
/// When an `EXTCODESIZE` opcode occurs:
/// - Optimistically caches the target address and current depth in `non_contract_size_check`,
/// pending later validation.
#[cold]
fn handle_extcodesize<CTX, D>(&mut self, interp: &mut Interpreter, ctx: &mut CTX)
where
D: Database<Error = DatabaseError>,
CTX: ContextTr<Db = D>,
CTX::Journal: JournalExt,
{
// EXTCODESIZE (address)
if let Ok(word) = interp.stack.peek(0) {
let addr = Address::from_word(word.into());
if IGNORE.contains(&addr) || ctx.journal_ref().precompile_addresses().contains(&addr) {
return;
}
// Optimistically cache --> validated and cleared (if necessary) at `fn
// step_end()`
self.non_contract_size_check = Some((addr, ctx.journal_ref().depth()));
self.is_extcodesize_step = true;
}
}
/// Tracks `EXTCODESIZE` output. If the bytecode size is NOT 0, clears the cache.
#[cold]
fn handle_extcodesize_output(&mut self, interp: &mut Interpreter) {
if let Ok(size) = interp.stack.peek(0)
&& size != U256::ZERO
{
self.non_contract_size_check = None;
}
self.is_extcodesize_step = false;
}
}
impl<CTX, D> Inspector<CTX, EthInterpreter> for RevertDiagnostic
where
D: Database<Error = DatabaseError>,
CTX: ContextTr<Db = D>,
CTX::Journal: JournalExt,
{
/// Tracks the first call with non-zero calldata that targets a non-contract address. Excludes
/// precompiles and test addresses.
fn call(&mut self, ctx: &mut CTX, inputs: &mut CallInputs) -> Option<CallOutcome> {
let target = self.code_target_address(inputs);
if IGNORE.contains(&target) || ctx.journal_ref().precompile_addresses().contains(&target) {
return None;
}
if let Ok(state) = ctx.journal_mut().code(target)
&& state.is_empty()
&& !inputs.input.is_empty()
{
self.non_contract_call = Some((target, inputs.scheme, ctx.journal_ref().depth()));
}
None
}
/// Handles `REVERT` and `EXTCODESIZE` opcodes for diagnostics.
fn step(&mut self, interp: &mut Interpreter, ctx: &mut CTX) {
match interp.bytecode.opcode() {
opcode::REVERT => self.handle_revert(interp, ctx),
opcode::EXTCODESIZE => self.handle_extcodesize(interp, ctx),
_ => {}
}
}
fn step_end(&mut self, interp: &mut Interpreter, _ctx: &mut CTX) {
if self.is_extcodesize_step {
self.handle_extcodesize_output(interp);
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/inspectors/script.rs | crates/evm/evm/src/inspectors/script.rs | use alloy_evm::Database;
use alloy_primitives::{Address, Bytes};
use foundry_evm_core::backend::DatabaseError;
use revm::{
Inspector,
bytecode::opcode::ADDRESS,
context::ContextTr,
inspector::JournalExt,
interpreter::{
InstructionResult, Interpreter, InterpreterAction,
interpreter::EthInterpreter,
interpreter_types::{Jumps, LoopControl},
},
};
/// An inspector that enforces certain rules during script execution.
///
/// Currently, it only warns if the `ADDRESS` opcode is used within the script's main contract.
#[derive(Clone, Debug, Default)]
pub struct ScriptExecutionInspector {
/// The address of the script contract being executed.
pub script_address: Address,
}
impl<CTX, D> Inspector<CTX, EthInterpreter> for ScriptExecutionInspector
where
D: Database<Error = DatabaseError>,
CTX: ContextTr<Db = D>,
CTX::Journal: JournalExt,
{
fn step(&mut self, interpreter: &mut Interpreter, _ecx: &mut CTX) {
// Check if both target and bytecode address are the same as script contract address
// (allow calling external libraries when bytecode address is different).
if interpreter.bytecode.opcode() == ADDRESS
&& interpreter.input.target_address == self.script_address
&& interpreter.input.bytecode_address == Some(self.script_address)
{
interpreter.bytecode.set_action(InterpreterAction::new_return(
InstructionResult::Revert,
Bytes::from("Usage of `address(this)` detected in script contract. Script contracts are ephemeral and their addresses should not be relied upon."),
interpreter.gas,
));
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/inspectors/chisel_state.rs | crates/evm/evm/src/inspectors/chisel_state.rs | use alloy_primitives::U256;
use foundry_evm_core::backend::DatabaseError;
use revm::{
Database, Inspector,
context::ContextTr,
inspector::JournalExt,
interpreter::{Interpreter, interpreter::EthInterpreter, interpreter_types::Jumps},
};
/// An inspector for Chisel
#[derive(Clone, Debug, Default)]
pub struct ChiselState {
/// The PC of the final instruction
pub final_pc: usize,
/// The final state of the REPL contract call
pub state: Option<(Vec<U256>, Vec<u8>)>,
}
impl ChiselState {
/// Create a new Chisel state inspector.
#[inline]
pub fn new(final_pc: usize) -> Self {
Self { final_pc, state: None }
}
}
impl<CTX, D> Inspector<CTX, EthInterpreter> for ChiselState
where
D: Database<Error = DatabaseError>,
CTX: ContextTr<Db = D>,
CTX::Journal: JournalExt,
{
#[cold]
fn step_end(&mut self, interpreter: &mut Interpreter, _context: &mut CTX) {
// If we are at the final pc of the REPL contract execution, set the state.
// Subtraction can't overflow because `pc` is always at least 1 in `step_end`.
if self.final_pc == interpreter.bytecode.pc() - 1 {
self.state = Some((
interpreter.stack.data().clone(),
interpreter.memory.context_memory().to_vec(),
))
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/inspectors/logs.rs | crates/evm/evm/src/inspectors/logs.rs | use alloy_primitives::Log;
use alloy_sol_types::{SolEvent, SolInterface, SolValue};
use foundry_common::{ErrorExt, fmt::ConsoleFmt};
use foundry_evm_core::{InspectorExt, abi::console, constants::HARDHAT_CONSOLE_ADDRESS};
use revm::{
Inspector,
context::ContextTr,
interpreter::{
CallInputs, CallOutcome, Gas, InstructionResult, InterpreterResult,
interpreter::EthInterpreter,
},
};
/// An inspector that collects logs during execution.
///
/// The inspector collects logs from the `LOG` opcodes as well as Hardhat-style `console.sol` logs.
#[derive(Clone, Debug, Default)]
pub struct LogCollector {
/// The collected logs. Includes both `LOG` opcodes and Hardhat-style `console.sol` logs.
pub logs: Vec<Log>,
}
impl LogCollector {
#[cold]
fn do_hardhat_log<CTX>(&mut self, context: &mut CTX, inputs: &CallInputs) -> Option<CallOutcome>
where
CTX: ContextTr,
{
if let Err(err) = self.hardhat_log(&inputs.input.bytes(context)) {
let result = InstructionResult::Revert;
let output = err.abi_encode_revert();
return Some(CallOutcome {
result: InterpreterResult { result, output, gas: Gas::new(inputs.gas_limit) },
memory_offset: inputs.return_memory_offset.clone(),
was_precompile_called: true,
precompile_call_logs: vec![],
});
}
None
}
fn hardhat_log(&mut self, data: &[u8]) -> alloy_sol_types::Result<()> {
let decoded = console::hh::ConsoleCalls::abi_decode(data)?;
self.logs.push(hh_to_ds(&decoded));
Ok(())
}
}
impl<CTX> Inspector<CTX, EthInterpreter> for LogCollector
where
CTX: ContextTr,
{
fn log(&mut self, _context: &mut CTX, log: Log) {
self.logs.push(log);
}
fn call(&mut self, context: &mut CTX, inputs: &mut CallInputs) -> Option<CallOutcome> {
if inputs.target_address == HARDHAT_CONSOLE_ADDRESS {
return self.do_hardhat_log(context, inputs);
}
None
}
}
impl InspectorExt for LogCollector {
fn console_log(&mut self, msg: &str) {
self.logs.push(new_console_log(msg));
}
}
/// Converts a Hardhat `console.log` call to a DSTest `log(string)` event.
fn hh_to_ds(call: &console::hh::ConsoleCalls) -> Log {
// Convert the parameters of the call to their string representation using `ConsoleFmt`.
let msg = call.fmt(Default::default());
new_console_log(&msg)
}
/// Creates a `console.log(string)` event.
fn new_console_log(msg: &str) -> Log {
Log::new_unchecked(
HARDHAT_CONSOLE_ADDRESS,
vec![console::ds::log::SIGNATURE_HASH],
msg.abi_encode().into(),
)
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/builder.rs | crates/evm/evm/src/executors/builder.rs | use crate::{executors::Executor, inspectors::InspectorStackBuilder};
use foundry_evm_core::{Env, backend::Backend};
use revm::primitives::hardfork::SpecId;
/// The builder that allows to configure an evm [`Executor`] which a stack of optional
/// [`revm::Inspector`]s, such as [`Cheatcodes`].
///
/// By default, the [`Executor`] will be configured with an empty [`InspectorStack`].
///
/// [`Cheatcodes`]: super::Cheatcodes
/// [`InspectorStack`]: super::InspectorStack
#[derive(Debug, Clone)]
#[must_use = "builders do nothing unless you call `build` on them"]
pub struct ExecutorBuilder {
/// The configuration used to build an `InspectorStack`.
stack: InspectorStackBuilder,
/// The gas limit.
gas_limit: Option<u64>,
/// The spec ID.
spec_id: SpecId,
legacy_assertions: bool,
}
impl Default for ExecutorBuilder {
#[inline]
fn default() -> Self {
Self {
stack: InspectorStackBuilder::new(),
gas_limit: None,
spec_id: SpecId::default(),
legacy_assertions: false,
}
}
}
impl ExecutorBuilder {
/// Create a new executor builder.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Modify the inspector stack.
#[inline]
pub fn inspectors(
mut self,
f: impl FnOnce(InspectorStackBuilder) -> InspectorStackBuilder,
) -> Self {
self.stack = f(self.stack);
self
}
/// Sets the EVM spec to use.
#[inline]
pub fn spec_id(mut self, spec: SpecId) -> Self {
self.spec_id = spec;
self
}
/// Sets the executor gas limit.
#[inline]
pub fn gas_limit(mut self, gas_limit: u64) -> Self {
self.gas_limit = Some(gas_limit);
self
}
/// Sets the `legacy_assertions` flag.
#[inline]
pub fn legacy_assertions(mut self, legacy_assertions: bool) -> Self {
self.legacy_assertions = legacy_assertions;
self
}
/// Builds the executor as configured.
#[inline]
pub fn build(self, env: Env, db: Backend) -> Executor {
let Self { mut stack, gas_limit, spec_id, legacy_assertions } = self;
if stack.block.is_none() {
stack.block = Some(env.evm_env.block_env.clone());
}
if stack.gas_price.is_none() {
stack.gas_price = Some(env.tx.gas_price);
}
let gas_limit = gas_limit.unwrap_or(env.evm_env.block_env.gas_limit);
let env = Env::new_with_spec_id(
env.evm_env.cfg_env.clone(),
env.evm_env.block_env.clone(),
env.tx,
spec_id,
);
Executor::new(db, env, stack.build(), gas_limit, legacy_assertions)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/trace.rs | crates/evm/evm/src/executors/trace.rs | use crate::{
Env,
executors::{Executor, ExecutorBuilder},
};
use alloy_primitives::{Address, U256, map::HashMap};
use alloy_rpc_types::state::StateOverride;
use eyre::Context;
use foundry_compilers::artifacts::EvmVersion;
use foundry_config::{Chain, Config, utils::evm_spec_id};
use foundry_evm_core::{backend::Backend, fork::CreateFork, opts::EvmOpts};
use foundry_evm_networks::NetworkConfigs;
use foundry_evm_traces::TraceMode;
use revm::{primitives::hardfork::SpecId, state::Bytecode};
use std::ops::{Deref, DerefMut};
/// A default executor with tracing enabled
pub struct TracingExecutor {
executor: Executor,
}
impl TracingExecutor {
pub fn new(
env: Env,
fork: CreateFork,
version: Option<EvmVersion>,
trace_mode: TraceMode,
networks: NetworkConfigs,
create2_deployer: Address,
state_overrides: Option<StateOverride>,
) -> eyre::Result<Self> {
let db = Backend::spawn(Some(fork))?;
// configures a bare version of the evm executor: no cheatcode inspector is enabled,
// tracing will be enabled only for the targeted transaction
let mut executor = ExecutorBuilder::new()
.inspectors(|stack| {
stack.trace_mode(trace_mode).networks(networks).create2_deployer(create2_deployer)
})
.spec_id(evm_spec_id(version.unwrap_or_default()))
.build(env, db);
// Apply the state overrides.
if let Some(state_overrides) = state_overrides {
for (address, overrides) in state_overrides {
if let Some(balance) = overrides.balance {
executor.set_balance(address, balance)?;
}
if let Some(nonce) = overrides.nonce {
executor.set_nonce(address, nonce)?;
}
if let Some(code) = overrides.code {
let bytecode = Bytecode::new_raw_checked(code)
.wrap_err("invalid bytecode in state override")?;
executor.set_code(address, bytecode)?;
}
if let Some(state) = overrides.state {
let state: HashMap<U256, U256> = state
.into_iter()
.map(|(slot, value)| (slot.into(), value.into()))
.collect();
executor.set_storage(address, state)?;
}
if let Some(state_diff) = overrides.state_diff {
for (slot, value) in state_diff {
executor.set_storage_slot(address, slot.into(), value.into())?;
}
}
}
}
Ok(Self { executor })
}
/// Returns the spec id of the executor
pub fn spec_id(&self) -> SpecId {
self.executor.spec_id()
}
/// uses the fork block number from the config
pub async fn get_fork_material(
config: &mut Config,
mut evm_opts: EvmOpts,
) -> eyre::Result<(Env, CreateFork, Chain, NetworkConfigs)> {
evm_opts.fork_url = Some(config.get_rpc_url_or_localhost_http()?.into_owned());
evm_opts.fork_block_number = config.fork_block_number;
let env = evm_opts.evm_env().await?;
let fork = evm_opts.get_fork(config, env.clone()).unwrap();
let networks = evm_opts.networks.with_chain_id(env.evm_env.cfg_env.chain_id);
config.labels.extend(networks.precompiles_label());
let chain = env.tx.chain_id.unwrap().into();
Ok((env, fork, chain, networks))
}
}
impl Deref for TracingExecutor {
type Target = Executor;
fn deref(&self) -> &Self::Target {
&self.executor
}
}
impl DerefMut for TracingExecutor {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.executor
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/mod.rs | crates/evm/evm/src/executors/mod.rs | //! EVM executor abstractions, which can execute calls.
//!
//! Used for running tests, scripts, and interacting with the inner backend which holds the state.
// TODO: The individual executors in this module should be moved into the respective crates, and the
// `Executor` struct should be accessed using a trait defined in `foundry-evm-core` instead of
// the concrete `Executor` type.
use crate::{
Env,
inspectors::{
Cheatcodes, InspectorData, InspectorStack, cheatcodes::BroadcastableTransactions,
},
};
use alloy_dyn_abi::{DynSolValue, FunctionExt, JsonAbiExt};
use alloy_json_abi::Function;
use alloy_primitives::{
Address, Bytes, Log, TxKind, U256, keccak256,
map::{AddressHashMap, HashMap},
};
use alloy_sol_types::{SolCall, sol};
use foundry_evm_core::{
EvmEnv,
backend::{Backend, BackendError, BackendResult, CowBackend, DatabaseExt, GLOBAL_FAIL_SLOT},
constants::{
CALLER, CHEATCODE_ADDRESS, CHEATCODE_CONTRACT_HASH, DEFAULT_CREATE2_DEPLOYER,
DEFAULT_CREATE2_DEPLOYER_CODE, DEFAULT_CREATE2_DEPLOYER_DEPLOYER,
},
decode::{RevertDecoder, SkipReason},
utils::StateChangeset,
};
use foundry_evm_coverage::HitMaps;
use foundry_evm_traces::{SparsedTraceArena, TraceMode};
use revm::{
bytecode::Bytecode,
context::{BlockEnv, TxEnv},
context_interface::{
result::{ExecutionResult, Output, ResultAndState},
transaction::SignedAuthorization,
},
database::{DatabaseCommit, DatabaseRef},
interpreter::{InstructionResult, return_ok},
primitives::hardfork::SpecId,
};
use std::{
borrow::Cow,
sync::{
Arc,
atomic::{AtomicBool, Ordering},
},
time::{Duration, Instant},
};
mod builder;
pub use builder::ExecutorBuilder;
pub mod fuzz;
pub use fuzz::FuzzedExecutor;
pub mod invariant;
pub use invariant::InvariantExecutor;
mod corpus;
mod trace;
pub use trace::TracingExecutor;
const DURATION_BETWEEN_METRICS_REPORT: Duration = Duration::from_secs(5);
sol! {
interface ITest {
function setUp() external;
function failed() external view returns (bool failed);
#[derive(Default)]
function beforeTestSetup(bytes4 testSelector) public view returns (bytes[] memory beforeTestCalldata);
}
}
/// EVM executor.
///
/// The executor can be configured with various `revm::Inspector`s, like `Cheatcodes`.
///
/// There are multiple ways of interacting the EVM:
/// - `call`: executes a transaction, but does not persist any state changes; similar to `eth_call`,
/// where the EVM state is unchanged after the call.
/// - `transact`: executes a transaction and persists the state changes
/// - `deploy`: a special case of `transact`, specialized for persisting the state of a contract
/// deployment
/// - `setup`: a special case of `transact`, used to set up the environment for a test
#[derive(Clone, Debug)]
pub struct Executor {
/// The underlying `revm::Database` that contains the EVM storage.
// Note: We do not store an EVM here, since we are really
// only interested in the database. REVM's `EVM` is a thin
// wrapper around spawning a new EVM on every call anyway,
// so the performance difference should be negligible.
backend: Backend,
/// The EVM environment.
env: Env,
/// The Revm inspector stack.
inspector: InspectorStack,
/// The gas limit for calls and deployments.
gas_limit: u64,
/// Whether `failed()` should be called on the test contract to determine if the test failed.
legacy_assertions: bool,
}
impl Executor {
/// Creates a new `ExecutorBuilder`.
#[inline]
pub fn builder() -> ExecutorBuilder {
ExecutorBuilder::new()
}
/// Creates a new `Executor` with the given arguments.
#[inline]
pub fn new(
mut backend: Backend,
env: Env,
inspector: InspectorStack,
gas_limit: u64,
legacy_assertions: bool,
) -> Self {
// Need to create a non-empty contract on the cheatcodes address so `extcodesize` checks
// do not fail.
backend.insert_account_info(
CHEATCODE_ADDRESS,
revm::state::AccountInfo {
code: Some(Bytecode::new_raw(Bytes::from_static(&[0]))),
// Also set the code hash manually so that it's not computed later.
// The code hash value does not matter, as long as it's not zero or `KECCAK_EMPTY`.
code_hash: CHEATCODE_CONTRACT_HASH,
..Default::default()
},
);
Self { backend, env, inspector, gas_limit, legacy_assertions }
}
fn clone_with_backend(&self, backend: Backend) -> Self {
let env = Env::new_with_spec_id(
self.env.evm_env.cfg_env.clone(),
self.env.evm_env.block_env.clone(),
self.env.tx.clone(),
self.spec_id(),
);
Self::new(backend, env, self.inspector().clone(), self.gas_limit, self.legacy_assertions)
}
/// Returns a reference to the EVM backend.
pub fn backend(&self) -> &Backend {
&self.backend
}
/// Returns a mutable reference to the EVM backend.
pub fn backend_mut(&mut self) -> &mut Backend {
&mut self.backend
}
/// Returns a reference to the EVM environment.
pub fn env(&self) -> &Env {
&self.env
}
/// Returns a mutable reference to the EVM environment.
pub fn env_mut(&mut self) -> &mut Env {
&mut self.env
}
/// Returns a reference to the EVM inspector.
pub fn inspector(&self) -> &InspectorStack {
&self.inspector
}
/// Returns a mutable reference to the EVM inspector.
pub fn inspector_mut(&mut self) -> &mut InspectorStack {
&mut self.inspector
}
/// Returns the EVM spec ID.
pub fn spec_id(&self) -> SpecId {
self.env.evm_env.cfg_env.spec
}
/// Sets the EVM spec ID.
pub fn set_spec_id(&mut self, spec_id: SpecId) {
self.env.evm_env.cfg_env.spec = spec_id;
}
/// Returns the gas limit for calls and deployments.
///
/// This is different from the gas limit imposed by the passed in environment, as those limits
/// are used by the EVM for certain opcodes like `gaslimit`.
pub fn gas_limit(&self) -> u64 {
self.gas_limit
}
/// Sets the gas limit for calls and deployments.
pub fn set_gas_limit(&mut self, gas_limit: u64) {
self.gas_limit = gas_limit;
}
/// Returns whether `failed()` should be called on the test contract to determine if the test
/// failed.
pub fn legacy_assertions(&self) -> bool {
self.legacy_assertions
}
/// Sets whether `failed()` should be called on the test contract to determine if the test
/// failed.
pub fn set_legacy_assertions(&mut self, legacy_assertions: bool) {
self.legacy_assertions = legacy_assertions;
}
/// Creates the default CREATE2 Contract Deployer for local tests and scripts.
pub fn deploy_create2_deployer(&mut self) -> eyre::Result<()> {
trace!("deploying local create2 deployer");
let create2_deployer_account = self
.backend()
.basic_ref(DEFAULT_CREATE2_DEPLOYER)?
.ok_or_else(|| BackendError::MissingAccount(DEFAULT_CREATE2_DEPLOYER))?;
// If the deployer is not currently deployed, deploy the default one.
if create2_deployer_account.code.is_none_or(|code| code.is_empty()) {
let creator = DEFAULT_CREATE2_DEPLOYER_DEPLOYER;
// Probably 0, but just in case.
let initial_balance = self.get_balance(creator)?;
self.set_balance(creator, U256::MAX)?;
let res =
self.deploy(creator, DEFAULT_CREATE2_DEPLOYER_CODE.into(), U256::ZERO, None)?;
trace!(create2=?res.address, "deployed local create2 deployer");
self.set_balance(creator, initial_balance)?;
}
Ok(())
}
/// Set the balance of an account.
pub fn set_balance(&mut self, address: Address, amount: U256) -> BackendResult<()> {
trace!(?address, ?amount, "setting account balance");
let mut account = self.backend().basic_ref(address)?.unwrap_or_default();
account.balance = amount;
self.backend_mut().insert_account_info(address, account);
Ok(())
}
/// Gets the balance of an account
pub fn get_balance(&self, address: Address) -> BackendResult<U256> {
Ok(self.backend().basic_ref(address)?.map(|acc| acc.balance).unwrap_or_default())
}
/// Set the nonce of an account.
pub fn set_nonce(&mut self, address: Address, nonce: u64) -> BackendResult<()> {
let mut account = self.backend().basic_ref(address)?.unwrap_or_default();
account.nonce = nonce;
self.backend_mut().insert_account_info(address, account);
self.env_mut().tx.nonce = nonce;
Ok(())
}
/// Returns the nonce of an account.
pub fn get_nonce(&self, address: Address) -> BackendResult<u64> {
Ok(self.backend().basic_ref(address)?.map(|acc| acc.nonce).unwrap_or_default())
}
/// Set the code of an account.
pub fn set_code(&mut self, address: Address, code: Bytecode) -> BackendResult<()> {
let mut account = self.backend().basic_ref(address)?.unwrap_or_default();
account.code_hash = keccak256(code.original_byte_slice());
account.code = Some(code);
self.backend_mut().insert_account_info(address, account);
Ok(())
}
/// Set the storage of an account.
pub fn set_storage(
&mut self,
address: Address,
storage: HashMap<U256, U256>,
) -> BackendResult<()> {
self.backend_mut().replace_account_storage(address, storage)?;
Ok(())
}
/// Set a storage slot of an account.
pub fn set_storage_slot(
&mut self,
address: Address,
slot: U256,
value: U256,
) -> BackendResult<()> {
self.backend_mut().insert_account_storage(address, slot, value)?;
Ok(())
}
/// Returns `true` if the account has no code.
pub fn is_empty_code(&self, address: Address) -> BackendResult<bool> {
Ok(self.backend().basic_ref(address)?.map(|acc| acc.is_empty_code_hash()).unwrap_or(true))
}
#[inline]
pub fn set_tracing(&mut self, mode: TraceMode) -> &mut Self {
self.inspector_mut().tracing(mode);
self
}
#[inline]
pub fn set_script_execution(&mut self, script_address: Address) {
self.inspector_mut().script(script_address);
}
#[inline]
pub fn set_trace_printer(&mut self, trace_printer: bool) -> &mut Self {
self.inspector_mut().print(trace_printer);
self
}
#[inline]
pub fn create2_deployer(&self) -> Address {
self.inspector().create2_deployer
}
/// Deploys a contract and commits the new state to the underlying database.
///
/// Executes a CREATE transaction with the contract `code` and persistent database state
/// modifications.
pub fn deploy(
&mut self,
from: Address,
code: Bytes,
value: U256,
rd: Option<&RevertDecoder>,
) -> Result<DeployResult, EvmError> {
let env = self.build_test_env(from, TxKind::Create, code, value);
self.deploy_with_env(env, rd)
}
/// Deploys a contract using the given `env` and commits the new state to the underlying
/// database.
///
/// # Panics
///
/// Panics if `env.tx.kind` is not `TxKind::Create(_)`.
#[instrument(name = "deploy", level = "debug", skip_all)]
pub fn deploy_with_env(
&mut self,
env: Env,
rd: Option<&RevertDecoder>,
) -> Result<DeployResult, EvmError> {
assert!(
matches!(env.tx.kind, TxKind::Create),
"Expected create transaction, got {:?}",
env.tx.kind
);
trace!(sender=%env.tx.caller, "deploying contract");
let mut result = self.transact_with_env(env)?;
result = result.into_result(rd)?;
let Some(Output::Create(_, Some(address))) = result.out else {
panic!("Deployment succeeded, but no address was returned: {result:#?}");
};
// also mark this library as persistent, this will ensure that the state of the library is
// persistent across fork swaps in forking mode
self.backend_mut().add_persistent_account(address);
trace!(%address, "deployed contract");
Ok(DeployResult { raw: result, address })
}
/// Calls the `setUp()` function on a contract.
///
/// This will commit any state changes to the underlying database.
///
/// Ayn changes made during the setup call to env's block environment are persistent, for
/// example `vm.chainId()` will change the `block.chainId` for all subsequent test calls.
#[instrument(name = "setup", level = "debug", skip_all)]
pub fn setup(
&mut self,
from: Option<Address>,
to: Address,
rd: Option<&RevertDecoder>,
) -> Result<RawCallResult, EvmError> {
trace!(?from, ?to, "setting up contract");
let from = from.unwrap_or(CALLER);
self.backend_mut().set_test_contract(to).set_caller(from);
let calldata = Bytes::from_static(&ITest::setUpCall::SELECTOR);
let mut res = self.transact_raw(from, to, calldata, U256::ZERO)?;
res = res.into_result(rd)?;
// record any changes made to the block's environment during setup
self.env_mut().evm_env.block_env = res.env.evm_env.block_env.clone();
// and also the chainid, which can be set manually
self.env_mut().evm_env.cfg_env.chain_id = res.env.evm_env.cfg_env.chain_id;
let success =
self.is_raw_call_success(to, Cow::Borrowed(&res.state_changeset), &res, false);
if !success {
return Err(res.into_execution_error("execution error".to_string()).into());
}
Ok(res)
}
/// Performs a call to an account on the current state of the VM.
pub fn call(
&self,
from: Address,
to: Address,
func: &Function,
args: &[DynSolValue],
value: U256,
rd: Option<&RevertDecoder>,
) -> Result<CallResult, EvmError> {
let calldata = Bytes::from(func.abi_encode_input(args)?);
let result = self.call_raw(from, to, calldata, value)?;
result.into_decoded_result(func, rd)
}
/// Performs a call to an account on the current state of the VM.
pub fn call_sol<C: SolCall>(
&self,
from: Address,
to: Address,
args: &C,
value: U256,
rd: Option<&RevertDecoder>,
) -> Result<CallResult<C::Return>, EvmError> {
let calldata = Bytes::from(args.abi_encode());
let mut raw = self.call_raw(from, to, calldata, value)?;
raw = raw.into_result(rd)?;
Ok(CallResult { decoded_result: C::abi_decode_returns(&raw.result)?, raw })
}
/// Performs a call to an account on the current state of the VM.
pub fn transact(
&mut self,
from: Address,
to: Address,
func: &Function,
args: &[DynSolValue],
value: U256,
rd: Option<&RevertDecoder>,
) -> Result<CallResult, EvmError> {
let calldata = Bytes::from(func.abi_encode_input(args)?);
let result = self.transact_raw(from, to, calldata, value)?;
result.into_decoded_result(func, rd)
}
/// Performs a raw call to an account on the current state of the VM.
pub fn call_raw(
&self,
from: Address,
to: Address,
calldata: Bytes,
value: U256,
) -> eyre::Result<RawCallResult> {
let env = self.build_test_env(from, TxKind::Call(to), calldata, value);
self.call_with_env(env)
}
/// Performs a raw call to an account on the current state of the VM with an EIP-7702
/// authorization list.
pub fn call_raw_with_authorization(
&mut self,
from: Address,
to: Address,
calldata: Bytes,
value: U256,
authorization_list: Vec<SignedAuthorization>,
) -> eyre::Result<RawCallResult> {
let mut env = self.build_test_env(from, to.into(), calldata, value);
env.tx.set_signed_authorization(authorization_list);
env.tx.tx_type = 4;
self.call_with_env(env)
}
/// Performs a raw call to an account on the current state of the VM.
pub fn transact_raw(
&mut self,
from: Address,
to: Address,
calldata: Bytes,
value: U256,
) -> eyre::Result<RawCallResult> {
let env = self.build_test_env(from, TxKind::Call(to), calldata, value);
self.transact_with_env(env)
}
/// Performs a raw call to an account on the current state of the VM with an EIP-7702
/// authorization last.
pub fn transact_raw_with_authorization(
&mut self,
from: Address,
to: Address,
calldata: Bytes,
value: U256,
authorization_list: Vec<SignedAuthorization>,
) -> eyre::Result<RawCallResult> {
let mut env = self.build_test_env(from, TxKind::Call(to), calldata, value);
env.tx.set_signed_authorization(authorization_list);
env.tx.tx_type = 4;
self.transact_with_env(env)
}
/// Execute the transaction configured in `env.tx`.
///
/// The state after the call is **not** persisted.
#[instrument(name = "call", level = "debug", skip_all)]
pub fn call_with_env(&self, mut env: Env) -> eyre::Result<RawCallResult> {
let mut stack = self.inspector().clone();
let mut backend = CowBackend::new_borrowed(self.backend());
let result = backend.inspect(&mut env, stack.as_inspector())?;
convert_executed_result(env, stack, result, backend.has_state_snapshot_failure())
}
/// Execute the transaction configured in `env.tx`.
#[instrument(name = "transact", level = "debug", skip_all)]
pub fn transact_with_env(&mut self, mut env: Env) -> eyre::Result<RawCallResult> {
let mut stack = self.inspector().clone();
let backend = self.backend_mut();
let result = backend.inspect(&mut env, stack.as_inspector())?;
let mut result =
convert_executed_result(env, stack, result, backend.has_state_snapshot_failure())?;
self.commit(&mut result);
Ok(result)
}
/// Commit the changeset to the database and adjust `self.inspector_config` values according to
/// the executed call result.
///
/// This should not be exposed to the user, as it should be called only by `transact*`.
#[instrument(name = "commit", level = "debug", skip_all)]
fn commit(&mut self, result: &mut RawCallResult) {
// Persist changes to db.
self.backend_mut().commit(result.state_changeset.clone());
// Persist cheatcode state.
self.inspector_mut().cheatcodes = result.cheatcodes.take();
if let Some(cheats) = self.inspector_mut().cheatcodes.as_mut() {
// Clear broadcastable transactions
cheats.broadcastable_transactions.clear();
cheats.ignored_traces.ignored.clear();
// if tracing was paused but never unpaused, we should begin next frame with tracing
// still paused
if let Some(last_pause_call) = cheats.ignored_traces.last_pause_call.as_mut() {
*last_pause_call = (0, 0);
}
}
// Persist the changed environment.
self.inspector_mut().set_env(&result.env);
}
/// Returns `true` if a test can be considered successful.
///
/// This is the same as [`Self::is_success`], but will consume the `state_changeset` map to use
/// internally when calling `failed()`.
pub fn is_raw_call_mut_success(
&self,
address: Address,
call_result: &mut RawCallResult,
should_fail: bool,
) -> bool {
self.is_raw_call_success(
address,
Cow::Owned(std::mem::take(&mut call_result.state_changeset)),
call_result,
should_fail,
)
}
/// Returns `true` if a test can be considered successful.
///
/// This is the same as [`Self::is_success`], but intended for outcomes of [`Self::call_raw`].
pub fn is_raw_call_success(
&self,
address: Address,
state_changeset: Cow<'_, StateChangeset>,
call_result: &RawCallResult,
should_fail: bool,
) -> bool {
if call_result.has_state_snapshot_failure {
// a failure occurred in a reverted snapshot, which is considered a failed test
return should_fail;
}
self.is_success(address, call_result.reverted, state_changeset, should_fail)
}
/// Returns `true` if a test can be considered successful.
///
/// If the call succeeded, we also have to check the global and local failure flags.
///
/// These are set by the test contract itself when an assertion fails, using the internal `fail`
/// function. The global flag is located in [`CHEATCODE_ADDRESS`] at slot [`GLOBAL_FAIL_SLOT`],
/// and the local flag is located in the test contract at an unspecified slot.
///
/// This behavior is inherited from Dapptools, where initially only a public
/// `failed` variable was used to track test failures, and later, a global failure flag was
/// introduced to track failures across multiple contracts in
/// [ds-test#30](https://github.com/dapphub/ds-test/pull/30).
///
/// The assumption is that the test runner calls `failed` on the test contract to determine if
/// it failed. However, we want to avoid this as much as possible, as it is relatively
/// expensive to set up an EVM call just for checking a single boolean flag.
///
/// See:
/// - Newer DSTest: <https://github.com/dapphub/ds-test/blob/e282159d5170298eb2455a6c05280ab5a73a4ef0/src/test.sol#L47-L63>
/// - Older DSTest: <https://github.com/dapphub/ds-test/blob/9ca4ecd48862b40d7b0197b600713f64d337af12/src/test.sol#L38-L49>
/// - forge-std: <https://github.com/foundry-rs/forge-std/blob/19891e6a0b5474b9ea6827ddb90bb9388f7acfc0/src/StdAssertions.sol#L38-L44>
pub fn is_success(
&self,
address: Address,
reverted: bool,
state_changeset: Cow<'_, StateChangeset>,
should_fail: bool,
) -> bool {
let success = self.is_success_raw(address, reverted, state_changeset);
should_fail ^ success
}
#[instrument(name = "is_success", level = "debug", skip_all)]
fn is_success_raw(
&self,
address: Address,
reverted: bool,
state_changeset: Cow<'_, StateChangeset>,
) -> bool {
// The call reverted.
if reverted {
return false;
}
// A failure occurred in a reverted snapshot, which is considered a failed test.
if self.backend().has_state_snapshot_failure() {
return false;
}
// Check the global failure slot.
if let Some(acc) = state_changeset.get(&CHEATCODE_ADDRESS)
&& let Some(failed_slot) = acc.storage.get(&GLOBAL_FAIL_SLOT)
&& !failed_slot.present_value().is_zero()
{
return false;
}
if let Ok(failed_slot) = self.backend().storage_ref(CHEATCODE_ADDRESS, GLOBAL_FAIL_SLOT)
&& !failed_slot.is_zero()
{
return false;
}
if !self.legacy_assertions {
return true;
}
// Finally, resort to calling `DSTest::failed`.
{
// Construct a new bare-bones backend to evaluate success.
let mut backend = self.backend().clone_empty();
// We only clone the test contract and cheatcode accounts,
// that's all we need to evaluate success.
for address in [address, CHEATCODE_ADDRESS] {
let Ok(acc) = self.backend().basic_ref(address) else { return false };
backend.insert_account_info(address, acc.unwrap_or_default());
}
// If this test failed any asserts, then this changeset will contain changes
// `false -> true` for the contract's `failed` variable and the `globalFailure` flag
// in the state of the cheatcode address,
// which are both read when we call `"failed()(bool)"` in the next step.
backend.commit(state_changeset.into_owned());
// Check if a DSTest assertion failed
let executor = self.clone_with_backend(backend);
let call = executor.call_sol(CALLER, address, &ITest::failedCall {}, U256::ZERO, None);
match call {
Ok(CallResult { raw: _, decoded_result: failed }) => {
trace!(failed, "DSTest::failed()");
!failed
}
Err(err) => {
trace!(%err, "failed to call DSTest::failed()");
true
}
}
}
}
/// Creates the environment to use when executing a transaction in a test context
///
/// If using a backend with cheatcodes, `tx.gas_price` and `block.number` will be overwritten by
/// the cheatcode state in between calls.
fn build_test_env(&self, caller: Address, kind: TxKind, data: Bytes, value: U256) -> Env {
Env {
evm_env: EvmEnv {
cfg_env: {
let mut cfg = self.env().evm_env.cfg_env.clone();
cfg.spec = self.spec_id();
cfg
},
// We always set the gas price to 0 so we can execute the transaction regardless of
// network conditions - the actual gas price is kept in `self.block` and is applied
// by the cheatcode handler if it is enabled
block_env: BlockEnv {
basefee: 0,
gas_limit: self.gas_limit,
..self.env().evm_env.block_env.clone()
},
},
tx: TxEnv {
caller,
kind,
data,
value,
// As above, we set the gas price to 0.
gas_price: 0,
gas_priority_fee: None,
gas_limit: self.gas_limit,
chain_id: Some(self.env().evm_env.cfg_env.chain_id),
..self.env().tx.clone()
},
}
}
pub fn call_sol_default<C: SolCall>(&self, to: Address, args: &C) -> C::Return
where
C::Return: Default,
{
self.call_sol(CALLER, to, args, U256::ZERO, None)
.map(|c| c.decoded_result)
.inspect_err(|e| warn!(target: "forge::test", "failed calling {:?}: {e}", C::SIGNATURE))
.unwrap_or_default()
}
}
/// Represents the context after an execution error occurred.
#[derive(Debug, thiserror::Error)]
#[error("execution reverted: {reason} (gas: {})", raw.gas_used)]
pub struct ExecutionErr {
/// The raw result of the call.
pub raw: RawCallResult,
/// The revert reason.
pub reason: String,
}
impl std::ops::Deref for ExecutionErr {
type Target = RawCallResult;
#[inline]
fn deref(&self) -> &Self::Target {
&self.raw
}
}
impl std::ops::DerefMut for ExecutionErr {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.raw
}
}
#[derive(Debug, thiserror::Error)]
pub enum EvmError {
/// Error which occurred during execution of a transaction.
#[error(transparent)]
Execution(#[from] Box<ExecutionErr>),
/// Error which occurred during ABI encoding/decoding.
#[error(transparent)]
Abi(#[from] alloy_dyn_abi::Error),
/// Error caused which occurred due to calling the `skip` cheatcode.
#[error("{0}")]
Skip(SkipReason),
/// Any other error.
#[error("{0}")]
Eyre(
#[from]
#[source]
eyre::Report,
),
}
impl From<ExecutionErr> for EvmError {
fn from(err: ExecutionErr) -> Self {
Self::Execution(Box::new(err))
}
}
impl From<alloy_sol_types::Error> for EvmError {
fn from(err: alloy_sol_types::Error) -> Self {
Self::Abi(err.into())
}
}
/// The result of a deployment.
#[derive(Debug)]
pub struct DeployResult {
/// The raw result of the deployment.
pub raw: RawCallResult,
/// The address of the deployed contract
pub address: Address,
}
impl std::ops::Deref for DeployResult {
type Target = RawCallResult;
#[inline]
fn deref(&self) -> &Self::Target {
&self.raw
}
}
impl std::ops::DerefMut for DeployResult {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.raw
}
}
impl From<DeployResult> for RawCallResult {
fn from(d: DeployResult) -> Self {
d.raw
}
}
/// The result of a raw call.
#[derive(Debug)]
pub struct RawCallResult {
/// The status of the call
pub exit_reason: Option<InstructionResult>,
/// Whether the call reverted or not
pub reverted: bool,
/// Whether the call includes a snapshot failure
///
/// This is tracked separately from revert because a snapshot failure can occur without a
/// revert, since assert failures are stored in a global variable (ds-test legacy)
pub has_state_snapshot_failure: bool,
/// The raw result of the call.
pub result: Bytes,
/// The gas used for the call
pub gas_used: u64,
/// Refunded gas
pub gas_refunded: u64,
/// The initial gas stipend for the transaction
pub stipend: u64,
/// The logs emitted during the call
pub logs: Vec<Log>,
/// The labels assigned to addresses during the call
pub labels: AddressHashMap<String>,
/// The traces of the call
pub traces: Option<SparsedTraceArena>,
/// The line coverage info collected during the call
pub line_coverage: Option<HitMaps>,
/// The edge coverage info collected during the call
pub edge_coverage: Option<Vec<u8>>,
/// Scripted transactions generated from this call
pub transactions: Option<BroadcastableTransactions>,
/// The changeset of the state.
pub state_changeset: StateChangeset,
/// The `revm::Env` after the call
pub env: Env,
/// The cheatcode states after execution
pub cheatcodes: Option<Box<Cheatcodes>>,
/// The raw output of the execution
pub out: Option<Output>,
/// The chisel state
pub chisel_state: Option<(Vec<U256>, Vec<u8>)>,
pub reverter: Option<Address>,
}
impl Default for RawCallResult {
fn default() -> Self {
Self {
exit_reason: None,
reverted: false,
has_state_snapshot_failure: false,
result: Bytes::new(),
gas_used: 0,
gas_refunded: 0,
stipend: 0,
logs: Vec::new(),
labels: HashMap::default(),
traces: None,
line_coverage: None,
edge_coverage: None,
transactions: None,
state_changeset: HashMap::default(),
env: Env::default(),
cheatcodes: Default::default(),
out: None,
chisel_state: None,
reverter: None,
}
}
}
impl RawCallResult {
/// Unpacks an EVM result.
pub fn from_evm_result(r: Result<Self, EvmError>) -> eyre::Result<(Self, Option<String>)> {
match r {
Ok(r) => Ok((r, None)),
Err(EvmError::Execution(e)) => Ok((e.raw, Some(e.reason))),
Err(e) => Err(e.into()),
}
}
/// Unpacks an execution result.
pub fn from_execution_result(r: Result<Self, ExecutionErr>) -> (Self, Option<String>) {
match r {
Ok(r) => (r, None),
Err(e) => (e.raw, Some(e.reason)),
}
}
/// Converts the result of the call into an `EvmError`.
pub fn into_evm_error(self, rd: Option<&RevertDecoder>) -> EvmError {
if let Some(reason) = SkipReason::decode(&self.result) {
return EvmError::Skip(reason);
}
let reason = rd.unwrap_or_default().decode(&self.result, self.exit_reason);
EvmError::Execution(Box::new(self.into_execution_error(reason)))
}
/// Converts the result of the call into an `ExecutionErr`.
pub fn into_execution_error(self, reason: String) -> ExecutionErr {
ExecutionErr { raw: self, reason }
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/corpus.rs | crates/evm/evm/src/executors/corpus.rs | use crate::executors::{Executor, RawCallResult, invariant::execute_tx};
use alloy_dyn_abi::JsonAbiExt;
use alloy_json_abi::Function;
use alloy_primitives::Bytes;
use eyre::eyre;
use foundry_config::FuzzCorpusConfig;
use foundry_evm_fuzz::{
BasicTxDetails,
invariant::FuzzRunIdentifiedContracts,
strategies::{EvmFuzzState, mutate_param_value},
};
use proptest::{
prelude::{Just, Rng, Strategy},
prop_oneof,
strategy::{BoxedStrategy, ValueTree},
test_runner::TestRunner,
};
use serde::Serialize;
use std::{
fmt,
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
use uuid::Uuid;
const METADATA_SUFFIX: &str = "metadata.json";
const JSON_EXTENSION: &str = ".json";
const FAVORABILITY_THRESHOLD: f64 = 0.3;
const COVERAGE_MAP_SIZE: usize = 65536;
/// Possible mutation strategies to apply on a call sequence.
#[derive(Debug, Clone)]
enum MutationType {
/// Splice original call sequence.
Splice,
/// Repeat selected call several times.
Repeat,
/// Interleave calls from two random call sequences.
Interleave,
/// Replace prefix of the original call sequence with new calls.
Prefix,
/// Replace suffix of the original call sequence with new calls.
Suffix,
/// ABI mutate random args of selected call in sequence.
Abi,
}
/// Holds Corpus information.
#[derive(Serialize)]
struct CorpusEntry {
// Unique corpus identifier.
uuid: Uuid,
// Total mutations of corpus as primary source.
total_mutations: usize,
// New coverage found as a result of mutating this corpus.
new_finds_produced: usize,
// Corpus call sequence.
#[serde(skip_serializing)]
tx_seq: Vec<BasicTxDetails>,
// Whether this corpus is favored, i.e. producing new finds more often than
// `FAVORABILITY_THRESHOLD`.
is_favored: bool,
}
impl CorpusEntry {
/// New corpus from given call sequence and corpus path to read uuid.
pub fn new(tx_seq: Vec<BasicTxDetails>, path: PathBuf) -> eyre::Result<Self> {
let uuid = if let Some(stem) = path.file_stem().and_then(|s| s.to_str()) {
Uuid::try_from(stem.strip_suffix(JSON_EXTENSION).unwrap_or(stem).to_string())?
} else {
Uuid::new_v4()
};
Ok(Self { uuid, total_mutations: 0, new_finds_produced: 0, tx_seq, is_favored: false })
}
/// New corpus with given call sequence and new uuid.
pub fn from_tx_seq(tx_seq: &[BasicTxDetails]) -> Self {
Self {
uuid: Uuid::new_v4(),
total_mutations: 0,
new_finds_produced: 0,
tx_seq: tx_seq.into(),
is_favored: false,
}
}
}
#[derive(Serialize, Default)]
pub(crate) struct CorpusMetrics {
// Number of edges seen during the invariant run.
cumulative_edges_seen: usize,
// Number of features (new hitcount bin of previously hit edge) seen during the invariant run.
cumulative_features_seen: usize,
// Number of corpus entries.
corpus_count: usize,
// Number of corpus entries that are favored.
favored_items: usize,
}
impl fmt::Display for CorpusMetrics {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f)?;
writeln!(f, " - cumulative edges seen: {}", self.cumulative_edges_seen)?;
writeln!(f, " - cumulative features seen: {}", self.cumulative_features_seen)?;
writeln!(f, " - corpus count: {}", self.corpus_count)?;
write!(f, " - favored items: {}", self.favored_items)?;
Ok(())
}
}
impl CorpusMetrics {
/// Records number of new edges or features explored during the campaign.
pub fn update_seen(&mut self, is_edge: bool) {
if is_edge {
self.cumulative_edges_seen += 1;
} else {
self.cumulative_features_seen += 1;
}
}
/// Updates campaign favored items.
pub fn update_favored(&mut self, is_favored: bool, corpus_favored: bool) {
if is_favored && !corpus_favored {
self.favored_items += 1;
} else if !is_favored && corpus_favored {
self.favored_items -= 1;
}
}
}
/// Fuzz corpus manager, used in coverage guided fuzzing mode by both stateless and stateful tests.
pub(crate) struct CorpusManager {
// Fuzzed calls generator.
tx_generator: BoxedStrategy<BasicTxDetails>,
// Call sequence mutation strategy type generator.
mutation_generator: BoxedStrategy<MutationType>,
// Corpus configuration.
config: FuzzCorpusConfig,
// In-memory corpus, populated from persisted files and current runs.
// Mutation is performed on these.
in_memory_corpus: Vec<CorpusEntry>,
// Identifier of current mutated entry.
current_mutated: Option<Uuid>,
// Number of failed replays from persisted corpus.
failed_replays: usize,
// History of binned hitcount of edges seen during fuzzing.
history_map: Vec<u8>,
// Corpus metrics.
pub(crate) metrics: CorpusMetrics,
}
impl CorpusManager {
pub fn new(
config: FuzzCorpusConfig,
tx_generator: BoxedStrategy<BasicTxDetails>,
executor: &Executor,
fuzzed_function: Option<&Function>,
fuzzed_contracts: Option<&FuzzRunIdentifiedContracts>,
) -> eyre::Result<Self> {
let mutation_generator = prop_oneof![
Just(MutationType::Splice),
Just(MutationType::Repeat),
Just(MutationType::Interleave),
Just(MutationType::Prefix),
Just(MutationType::Suffix),
Just(MutationType::Abi),
]
.boxed();
let mut history_map = vec![0u8; COVERAGE_MAP_SIZE];
let mut metrics = CorpusMetrics::default();
let mut in_memory_corpus = vec![];
let mut failed_replays = 0;
// Early return if corpus dir / coverage guided fuzzing not configured.
let Some(corpus_dir) = &config.corpus_dir else {
return Ok(Self {
tx_generator,
mutation_generator,
config,
in_memory_corpus,
current_mutated: None,
failed_replays,
history_map,
metrics,
});
};
// Ensure corpus dir for current test is created.
if !corpus_dir.is_dir() {
foundry_common::fs::create_dir_all(corpus_dir)?;
}
let can_replay_tx = |tx: &BasicTxDetails| -> bool {
fuzzed_contracts.is_some_and(|contracts| contracts.targets.lock().can_replay(tx))
|| fuzzed_function.is_some_and(|function| {
tx.call_details
.calldata
.get(..4)
.is_some_and(|selector| function.selector() == selector)
})
};
'corpus_replay: for entry in std::fs::read_dir(corpus_dir)? {
let path = entry?.path();
if path.is_file()
&& let Some(name) = path.file_name().and_then(|s| s.to_str())
&& name.contains(METADATA_SUFFIX)
{
// Ignore metadata files
continue;
}
let read_corpus_result = match path.extension().and_then(|ext| ext.to_str()) {
Some("gz") => foundry_common::fs::read_json_gzip_file::<Vec<BasicTxDetails>>(&path),
_ => foundry_common::fs::read_json_file::<Vec<BasicTxDetails>>(&path),
};
let Ok(tx_seq) = read_corpus_result else {
trace!(target: "corpus", "failed to load corpus from {}", path.display());
continue;
};
if !tx_seq.is_empty() {
// Warm up history map from loaded sequences.
let mut executor = executor.clone();
for tx in &tx_seq {
if can_replay_tx(tx) {
let mut call_result = execute_tx(&mut executor, tx)?;
let (new_coverage, is_edge) =
call_result.merge_edge_coverage(&mut history_map);
if new_coverage {
metrics.update_seen(is_edge);
}
// Commit only when running invariant / stateful tests.
if fuzzed_contracts.is_some() {
executor.commit(&mut call_result);
}
} else {
failed_replays += 1;
// If the only input for fuzzed function cannot be replied, then move to
// next one without adding it in memory.
if fuzzed_function.is_some() {
continue 'corpus_replay;
}
}
}
metrics.corpus_count += 1;
trace!(
target: "corpus",
"load sequence with len {} from corpus file {}",
tx_seq.len(),
path.display()
);
// Populate in memory corpus with the sequence from corpus file.
in_memory_corpus.push(CorpusEntry::new(tx_seq, path)?);
}
}
Ok(Self {
tx_generator,
mutation_generator,
config,
in_memory_corpus,
current_mutated: None,
failed_replays,
history_map,
metrics,
})
}
/// Updates stats for the given call sequence, if new coverage produced.
/// Persists the call sequence (if corpus directory is configured and new coverage) and updates
/// in-memory corpus.
pub fn process_inputs(&mut self, inputs: &[BasicTxDetails], new_coverage: bool) {
// Early return if corpus dir / coverage guided fuzzing is not configured.
let Some(corpus_dir) = &self.config.corpus_dir else {
return;
};
// Update stats of current mutated primary corpus.
if let Some(uuid) = &self.current_mutated {
if let Some(corpus) =
self.in_memory_corpus.iter_mut().find(|corpus| corpus.uuid.eq(uuid))
{
corpus.total_mutations += 1;
if new_coverage {
corpus.new_finds_produced += 1
}
let is_favored = (corpus.new_finds_produced as f64 / corpus.total_mutations as f64)
> FAVORABILITY_THRESHOLD;
self.metrics.update_favored(is_favored, corpus.is_favored);
corpus.is_favored = is_favored;
trace!(
target: "corpus",
"updated corpus {}, total mutations: {}, new finds: {}",
corpus.uuid, corpus.total_mutations, corpus.new_finds_produced
);
}
self.current_mutated = None;
}
// Collect inputs only if current run produced new coverage.
if !new_coverage {
return;
}
let corpus = CorpusEntry::from_tx_seq(inputs);
let corpus_uuid = corpus.uuid;
// Persist to disk if corpus dir is configured.
let write_result = if self.config.corpus_gzip {
foundry_common::fs::write_json_gzip_file(
corpus_dir.join(format!("{corpus_uuid}{JSON_EXTENSION}.gz")).as_path(),
&corpus.tx_seq,
)
} else {
foundry_common::fs::write_json_file(
corpus_dir.join(format!("{corpus_uuid}{JSON_EXTENSION}")).as_path(),
&corpus.tx_seq,
)
};
if let Err(err) = write_result {
debug!(target: "corpus", %err, "Failed to record call sequence {:?}", &corpus.tx_seq);
} else {
trace!(
target: "corpus",
"persisted {} inputs for new coverage in {corpus_uuid} corpus",
&corpus.tx_seq.len()
);
}
// This includes reverting txs in the corpus and `can_continue` removes
// them. We want this as it is new coverage and may help reach the other branch.
self.metrics.corpus_count += 1;
self.in_memory_corpus.push(corpus);
}
/// Generates new call sequence from in memory corpus. Evicts oldest corpus mutated more than
/// configured max mutations value. Used by invariant test campaigns.
pub fn new_inputs(
&mut self,
test_runner: &mut TestRunner,
fuzz_state: &EvmFuzzState,
targeted_contracts: &FuzzRunIdentifiedContracts,
) -> eyre::Result<Vec<BasicTxDetails>> {
let mut new_seq = vec![];
// Early return with first_input only if corpus dir / coverage guided fuzzing not
// configured.
if !self.config.is_coverage_guided() {
new_seq.push(self.new_tx(test_runner)?);
return Ok(new_seq);
};
if !self.in_memory_corpus.is_empty() {
self.evict_oldest_corpus()?;
let mutation_type = self
.mutation_generator
.new_tree(test_runner)
.map_err(|err| eyre!("Could not generate mutation type {err}"))?
.current();
let rng = test_runner.rng();
let corpus_len = self.in_memory_corpus.len();
let primary = &self.in_memory_corpus[rng.random_range(0..corpus_len)];
let secondary = &self.in_memory_corpus[rng.random_range(0..corpus_len)];
match mutation_type {
MutationType::Splice => {
trace!(target: "corpus", "splice {} and {}", primary.uuid, secondary.uuid);
self.current_mutated = Some(primary.uuid);
let start1 = rng.random_range(0..primary.tx_seq.len());
let end1 = rng.random_range(start1..primary.tx_seq.len());
let start2 = rng.random_range(0..secondary.tx_seq.len());
let end2 = rng.random_range(start2..secondary.tx_seq.len());
for tx in primary.tx_seq.iter().take(end1).skip(start1) {
new_seq.push(tx.clone());
}
for tx in secondary.tx_seq.iter().take(end2).skip(start2) {
new_seq.push(tx.clone());
}
}
MutationType::Repeat => {
let corpus = if rng.random::<bool>() { primary } else { secondary };
trace!(target: "corpus", "repeat {}", corpus.uuid);
self.current_mutated = Some(corpus.uuid);
new_seq = corpus.tx_seq.clone();
let start = rng.random_range(0..corpus.tx_seq.len());
let end = rng.random_range(start..corpus.tx_seq.len());
let item_idx = rng.random_range(0..corpus.tx_seq.len());
let repeated = vec![new_seq[item_idx].clone(); end - start];
new_seq.splice(start..end, repeated);
}
MutationType::Interleave => {
trace!(target: "corpus", "interleave {} with {}", primary.uuid, secondary.uuid);
self.current_mutated = Some(primary.uuid);
for (tx1, tx2) in primary.tx_seq.iter().zip(secondary.tx_seq.iter()) {
// chunks?
let tx = if rng.random::<bool>() { tx1.clone() } else { tx2.clone() };
new_seq.push(tx);
}
}
MutationType::Prefix => {
let corpus = if rng.random::<bool>() { primary } else { secondary };
trace!(target: "corpus", "overwrite prefix of {}", corpus.uuid);
self.current_mutated = Some(corpus.uuid);
new_seq = corpus.tx_seq.clone();
for i in 0..rng.random_range(0..=new_seq.len()) {
new_seq[i] = self.new_tx(test_runner)?;
}
}
MutationType::Suffix => {
let corpus = if rng.random::<bool>() { primary } else { secondary };
trace!(target: "corpus", "overwrite suffix of {}", corpus.uuid);
self.current_mutated = Some(corpus.uuid);
new_seq = corpus.tx_seq.clone();
for i in new_seq.len() - rng.random_range(0..new_seq.len())..corpus.tx_seq.len()
{
new_seq[i] = self.new_tx(test_runner)?;
}
}
MutationType::Abi => {
let targets = targeted_contracts.targets.lock();
let corpus = if rng.random::<bool>() { primary } else { secondary };
trace!(target: "corpus", "ABI mutate args of {}", corpus.uuid);
self.current_mutated = Some(corpus.uuid);
new_seq = corpus.tx_seq.clone();
let idx = rng.random_range(0..new_seq.len());
let tx = new_seq.get_mut(idx).unwrap();
if let (_, Some(function)) = targets.fuzzed_artifacts(tx) {
// TODO add call_value to call details and mutate it as well as sender some
// of the time
if !function.inputs.is_empty() {
self.abi_mutate(tx, function, test_runner, fuzz_state)?;
}
}
}
}
}
// Make sure the new sequence contains at least one tx to start fuzzing from.
if new_seq.is_empty() {
new_seq.push(self.new_tx(test_runner)?);
}
trace!(target: "corpus", "new sequence of {} calls generated", new_seq.len());
Ok(new_seq)
}
/// Generates new input from in memory corpus. Evicts oldest corpus mutated more than
/// configured max mutations value. Used by fuzz test campaigns.
pub fn new_input(
&mut self,
test_runner: &mut TestRunner,
fuzz_state: &EvmFuzzState,
function: &Function,
) -> eyre::Result<Bytes> {
// Early return if not running with coverage guided fuzzing.
if !self.config.is_coverage_guided() {
return Ok(self.new_tx(test_runner)?.call_details.calldata);
}
let tx = if !self.in_memory_corpus.is_empty() {
self.evict_oldest_corpus()?;
let corpus = &self.in_memory_corpus
[test_runner.rng().random_range(0..self.in_memory_corpus.len())];
self.current_mutated = Some(corpus.uuid);
let new_seq = corpus.tx_seq.clone();
let mut tx = new_seq.first().unwrap().clone();
self.abi_mutate(&mut tx, function, test_runner, fuzz_state)?;
tx
} else {
self.new_tx(test_runner)?
};
Ok(tx.call_details.calldata)
}
/// Returns the next call to be used in call sequence.
/// If coverage guided fuzzing is not configured or if previous input was discarded then this is
/// a new tx from strategy.
/// If running with coverage guided fuzzing it returns a new call only when sequence
/// does not have enough entries, or randomly. Otherwise, returns the next call from initial
/// sequence.
pub fn generate_next_input(
&mut self,
test_runner: &mut TestRunner,
sequence: &[BasicTxDetails],
discarded: bool,
depth: usize,
) -> eyre::Result<BasicTxDetails> {
// Early return with new input if corpus dir / coverage guided fuzzing not configured or if
// call was discarded.
if self.config.corpus_dir.is_none() || discarded {
return self.new_tx(test_runner);
}
// When running with coverage guided fuzzing enabled then generate new sequence if initial
// sequence's length is less than depth or randomly, to occasionally intermix new txs.
if depth > sequence.len().saturating_sub(1) || test_runner.rng().random_ratio(1, 10) {
return self.new_tx(test_runner);
}
// Continue with the next call initial sequence
Ok(sequence[depth].clone())
}
/// Generates single call from corpus strategy.
pub fn new_tx(&mut self, test_runner: &mut TestRunner) -> eyre::Result<BasicTxDetails> {
Ok(self
.tx_generator
.new_tree(test_runner)
.map_err(|_| eyre!("Could not generate case"))?
.current())
}
/// Returns campaign failed replays.
pub fn failed_replays(self) -> usize {
self.failed_replays
}
/// Collects coverage from call result and updates metrics.
pub fn merge_edge_coverage(&mut self, call_result: &mut RawCallResult) -> bool {
if !self.config.collect_edge_coverage() {
return false;
}
let (new_coverage, is_edge) = call_result.merge_edge_coverage(&mut self.history_map);
if new_coverage {
self.metrics.update_seen(is_edge);
}
new_coverage
}
/// Flush the oldest corpus mutated more than configured max mutations unless they are
/// favored.
fn evict_oldest_corpus(&mut self) -> eyre::Result<()> {
if self.in_memory_corpus.len() > self.config.corpus_min_size.max(1)
&& let Some(index) = self.in_memory_corpus.iter().position(|corpus| {
corpus.total_mutations > self.config.corpus_min_mutations && !corpus.is_favored
})
{
let corpus = self.in_memory_corpus.get(index).unwrap();
let uuid = corpus.uuid;
debug!(target: "corpus", "evict corpus {uuid}");
// Flush to disk the seed metadata at the time of eviction.
let eviction_time = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs();
foundry_common::fs::write_json_file(
self.config
.corpus_dir
.clone()
.unwrap()
.join(format!("{uuid}-{eviction_time}-{METADATA_SUFFIX}"))
.as_path(),
&corpus,
)?;
// Remove corpus from memory.
self.in_memory_corpus.remove(index);
}
Ok(())
}
/// Mutates calldata of provided tx by abi decoding current values and randomly selecting the
/// inputs to change.
fn abi_mutate(
&self,
tx: &mut BasicTxDetails,
function: &Function,
test_runner: &mut TestRunner,
fuzz_state: &EvmFuzzState,
) -> eyre::Result<()> {
// let rng = test_runner.rng();
let mut arg_mutation_rounds =
test_runner.rng().random_range(0..=function.inputs.len()).max(1);
let round_arg_idx: Vec<usize> = if function.inputs.len() <= 1 {
vec![0]
} else {
(0..arg_mutation_rounds)
.map(|_| test_runner.rng().random_range(0..function.inputs.len()))
.collect()
};
let mut prev_inputs = function
.abi_decode_input(&tx.call_details.calldata[4..])
.map_err(|err| eyre!("failed to load previous inputs: {err}"))?;
while arg_mutation_rounds > 0 {
let idx = round_arg_idx[arg_mutation_rounds - 1];
prev_inputs[idx] = mutate_param_value(
&function
.inputs
.get(idx)
.expect("Could not get input to mutate")
.selector_type()
.parse()?,
prev_inputs[idx].clone(),
test_runner,
fuzz_state,
);
arg_mutation_rounds -= 1;
}
tx.call_details.calldata =
function.abi_encode_input(&prev_inputs).map_err(|e| eyre!(e.to_string()))?.into();
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::Address;
use std::fs;
fn basic_tx() -> BasicTxDetails {
BasicTxDetails {
warp: None,
roll: None,
sender: Address::ZERO,
call_details: foundry_evm_fuzz::CallDetails {
target: Address::ZERO,
calldata: Bytes::new(),
},
}
}
fn temp_corpus_dir() -> PathBuf {
let dir = std::env::temp_dir().join(format!("foundry-corpus-tests-{}", Uuid::new_v4()));
let _ = fs::create_dir_all(&dir);
dir
}
fn new_manager_with_single_corpus() -> (CorpusManager, Uuid) {
let tx_gen = Just(basic_tx()).boxed();
let config = FuzzCorpusConfig {
corpus_dir: Some(temp_corpus_dir()),
corpus_gzip: false,
corpus_min_mutations: 0,
corpus_min_size: 0,
..Default::default()
};
let tx_seq = vec![basic_tx()];
let corpus = CorpusEntry::from_tx_seq(&tx_seq);
let seed_uuid = corpus.uuid;
let manager = CorpusManager {
tx_generator: tx_gen,
mutation_generator: Just(MutationType::Repeat).boxed(),
config,
in_memory_corpus: vec![corpus],
current_mutated: Some(seed_uuid),
failed_replays: 0,
history_map: vec![0u8; COVERAGE_MAP_SIZE],
metrics: CorpusMetrics::default(),
};
(manager, seed_uuid)
}
#[test]
fn favored_sets_true_and_metrics_increment_when_ratio_gt_threshold() {
let (mut manager, uuid) = new_manager_with_single_corpus();
let corpus = manager.in_memory_corpus.iter_mut().find(|c| c.uuid == uuid).unwrap();
corpus.total_mutations = 4;
corpus.new_finds_produced = 2; // ratio currently 0.5 if both increment → 3/5 = 0.6 > 0.3
corpus.is_favored = false;
// ensure metrics start at 0
assert_eq!(manager.metrics.favored_items, 0);
// mark this as the currently mutated corpus and process a run with new coverage
manager.current_mutated = Some(uuid);
manager.process_inputs(&[basic_tx()], true);
let corpus = manager.in_memory_corpus.iter().find(|c| c.uuid == uuid).unwrap();
assert!(corpus.is_favored, "expected favored to be true when ratio > threshold");
assert_eq!(
manager.metrics.favored_items, 1,
"favored_items should increment on false→true"
);
}
#[test]
fn favored_sets_false_and_metrics_decrement_when_ratio_lt_threshold() {
let (mut manager, uuid) = new_manager_with_single_corpus();
let corpus = manager.in_memory_corpus.iter_mut().find(|c| c.uuid == uuid).unwrap();
corpus.total_mutations = 9;
corpus.new_finds_produced = 3; // 3/9 = 0.333.. > 0.3; after +1: 3/10 = 0.3 => not favored
corpus.is_favored = true; // start as favored
manager.metrics.favored_items = 1;
// Next run does NOT produce coverage → only total_mutations increments, ratio drops
manager.current_mutated = Some(uuid);
manager.process_inputs(&[basic_tx()], false);
let corpus = manager.in_memory_corpus.iter().find(|c| c.uuid == uuid).unwrap();
assert!(!corpus.is_favored, "expected favored to be false when ratio < threshold");
assert_eq!(
manager.metrics.favored_items, 0,
"favored_items should decrement on true→false"
);
}
#[test]
fn favored_is_false_on_ratio_equal_threshold() {
let (mut manager, uuid) = new_manager_with_single_corpus();
let corpus = manager.in_memory_corpus.iter_mut().find(|c| c.uuid == uuid).unwrap();
// After this call with new_coverage=true, totals become 10 and 3 → 0.3
corpus.total_mutations = 9;
corpus.new_finds_produced = 2;
corpus.is_favored = false;
manager.current_mutated = Some(uuid);
manager.process_inputs(&[basic_tx()], true);
let corpus = manager.in_memory_corpus.iter().find(|c| c.uuid == uuid).unwrap();
assert!(
!(corpus.is_favored),
"with strict '>' comparison, favored must be false when ratio == threshold"
);
}
#[test]
fn eviction_skips_favored_and_evicts_non_favored() {
// manager with two corpora
let tx_gen = Just(basic_tx()).boxed();
let config = FuzzCorpusConfig {
corpus_dir: Some(temp_corpus_dir()),
corpus_min_mutations: 0,
corpus_min_size: 0,
..Default::default()
};
let mut favored = CorpusEntry::from_tx_seq(&[basic_tx()]);
favored.total_mutations = 2;
favored.is_favored = true;
let mut non_favored = CorpusEntry::from_tx_seq(&[basic_tx()]);
non_favored.total_mutations = 2;
non_favored.is_favored = false;
let non_favored_uuid = non_favored.uuid;
let mut manager = CorpusManager {
tx_generator: tx_gen,
mutation_generator: Just(MutationType::Repeat).boxed(),
config,
in_memory_corpus: vec![favored, non_favored],
current_mutated: None,
failed_replays: 0,
history_map: vec![0u8; COVERAGE_MAP_SIZE],
metrics: CorpusMetrics::default(),
};
// First eviction should remove the non-favored one
manager.evict_oldest_corpus().unwrap();
assert_eq!(manager.in_memory_corpus.len(), 1);
assert!(manager.in_memory_corpus.iter().all(|c| c.is_favored));
// Attempt eviction again: only favored remains → should not remove
manager.evict_oldest_corpus().unwrap();
assert_eq!(manager.in_memory_corpus.len(), 1, "favored corpus must not be evicted");
// ensure the evicted one was the non-favored uuid
assert!(manager.in_memory_corpus.iter().all(|c| c.uuid != non_favored_uuid));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/invariant/error.rs | crates/evm/evm/src/executors/invariant/error.rs | use super::InvariantContract;
use crate::executors::RawCallResult;
use alloy_primitives::{Address, Bytes};
use foundry_config::InvariantConfig;
use foundry_evm_core::decode::RevertDecoder;
use foundry_evm_fuzz::{BasicTxDetails, Reason, invariant::FuzzRunIdentifiedContracts};
use proptest::test_runner::TestError;
/// Stores information about failures and reverts of the invariant tests.
#[derive(Clone, Default)]
pub struct InvariantFailures {
/// Total number of reverts.
pub reverts: usize,
/// The latest revert reason of a run.
pub revert_reason: Option<String>,
/// Maps a broken invariant to its specific error.
pub error: Option<InvariantFuzzError>,
}
impl InvariantFailures {
pub fn new() -> Self {
Self::default()
}
pub fn into_inner(self) -> (usize, Option<InvariantFuzzError>) {
(self.reverts, self.error)
}
}
#[derive(Clone, Debug)]
pub enum InvariantFuzzError {
Revert(FailedInvariantCaseData),
BrokenInvariant(FailedInvariantCaseData),
MaxAssumeRejects(u32),
}
impl InvariantFuzzError {
pub fn revert_reason(&self) -> Option<String> {
match self {
Self::BrokenInvariant(case_data) | Self::Revert(case_data) => {
(!case_data.revert_reason.is_empty()).then(|| case_data.revert_reason.clone())
}
Self::MaxAssumeRejects(allowed) => {
Some(format!("`vm.assume` rejected too many inputs ({allowed} allowed)"))
}
}
}
}
#[derive(Clone, Debug)]
pub struct FailedInvariantCaseData {
/// The proptest error occurred as a result of a test case.
pub test_error: TestError<Vec<BasicTxDetails>>,
/// The return reason of the offending call.
pub return_reason: Reason,
/// The revert string of the offending call.
pub revert_reason: String,
/// Address of the invariant asserter.
pub addr: Address,
/// Function calldata for invariant check.
pub calldata: Bytes,
/// Inner fuzzing Sequence coming from overriding calls.
pub inner_sequence: Vec<Option<BasicTxDetails>>,
/// Shrink run limit
pub shrink_run_limit: u32,
/// Fail on revert, used to check sequence when shrinking.
pub fail_on_revert: bool,
}
impl FailedInvariantCaseData {
pub fn new(
invariant_contract: &InvariantContract<'_>,
invariant_config: &InvariantConfig,
targeted_contracts: &FuzzRunIdentifiedContracts,
calldata: &[BasicTxDetails],
call_result: RawCallResult,
inner_sequence: &[Option<BasicTxDetails>],
) -> Self {
// Collect abis of fuzzed and invariant contracts to decode custom error.
let revert_reason = RevertDecoder::new()
.with_abis(targeted_contracts.targets.lock().values().map(|c| &c.abi))
.with_abi(invariant_contract.abi)
.decode(call_result.result.as_ref(), call_result.exit_reason);
let func = invariant_contract.invariant_function;
debug_assert!(func.inputs.is_empty());
let origin = func.name.as_str();
Self {
test_error: TestError::Fail(
format!("{origin}, reason: {revert_reason}").into(),
calldata.to_vec(),
),
return_reason: "".into(),
revert_reason,
addr: invariant_contract.address,
calldata: func.selector().to_vec().into(),
inner_sequence: inner_sequence.to_vec(),
shrink_run_limit: invariant_config.shrink_run_limit,
fail_on_revert: invariant_config.fail_on_revert,
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/invariant/result.rs | crates/evm/evm/src/executors/invariant/result.rs | use super::{
InvariantFailures, InvariantFuzzError, InvariantMetrics, InvariantTest, InvariantTestRun,
call_after_invariant_function, call_invariant_function, error::FailedInvariantCaseData,
};
use crate::executors::{Executor, RawCallResult};
use alloy_dyn_abi::JsonAbiExt;
use eyre::Result;
use foundry_config::InvariantConfig;
use foundry_evm_core::utils::StateChangeset;
use foundry_evm_coverage::HitMaps;
use foundry_evm_fuzz::{
BasicTxDetails, FuzzedCases,
invariant::{FuzzRunIdentifiedContracts, InvariantContract},
};
use revm_inspectors::tracing::CallTraceArena;
use std::{borrow::Cow, collections::HashMap};
/// The outcome of an invariant fuzz test
#[derive(Debug)]
pub struct InvariantFuzzTestResult {
pub error: Option<InvariantFuzzError>,
/// Every successful fuzz test case
pub cases: Vec<FuzzedCases>,
/// Number of reverted fuzz calls
pub reverts: usize,
/// The entire inputs of the last run of the invariant campaign, used for
/// replaying the run for collecting traces.
pub last_run_inputs: Vec<BasicTxDetails>,
/// Additional traces used for gas report construction.
pub gas_report_traces: Vec<Vec<CallTraceArena>>,
/// The coverage info collected during the invariant test runs.
pub line_coverage: Option<HitMaps>,
/// Fuzzed selectors metrics collected during the invariant test runs.
pub metrics: HashMap<String, InvariantMetrics>,
/// NUmber of failed replays from persisted corpus.
pub failed_corpus_replays: usize,
}
/// Enriched results of an invariant run check.
///
/// Contains the success condition and call results of the last run
pub(crate) struct RichInvariantResults {
pub(crate) can_continue: bool,
pub(crate) call_result: Option<RawCallResult>,
}
impl RichInvariantResults {
fn new(can_continue: bool, call_result: Option<RawCallResult>) -> Self {
Self { can_continue, call_result }
}
}
/// Given the executor state, asserts that no invariant has been broken. Otherwise, it fills the
/// external `invariant_failures.failed_invariant` map and returns a generic error.
/// Either returns the call result if successful, or nothing if there was an error.
pub(crate) fn assert_invariants(
invariant_contract: &InvariantContract<'_>,
invariant_config: &InvariantConfig,
targeted_contracts: &FuzzRunIdentifiedContracts,
executor: &Executor,
calldata: &[BasicTxDetails],
invariant_failures: &mut InvariantFailures,
) -> Result<Option<RawCallResult>> {
let mut inner_sequence = vec![];
if let Some(fuzzer) = &executor.inspector().fuzzer
&& let Some(call_generator) = &fuzzer.call_generator
{
inner_sequence.extend(call_generator.last_sequence.read().iter().cloned());
}
let (call_result, success) = call_invariant_function(
executor,
invariant_contract.address,
invariant_contract.invariant_function.abi_encode_input(&[])?.into(),
)?;
if !success {
// We only care about invariants which we haven't broken yet.
if invariant_failures.error.is_none() {
let case_data = FailedInvariantCaseData::new(
invariant_contract,
invariant_config,
targeted_contracts,
calldata,
call_result,
&inner_sequence,
);
invariant_failures.error = Some(InvariantFuzzError::BrokenInvariant(case_data));
return Ok(None);
}
}
Ok(Some(call_result))
}
/// Returns if invariant test can continue and last successful call result of the invariant test
/// function (if it can continue).
pub(crate) fn can_continue(
invariant_contract: &InvariantContract<'_>,
invariant_test: &mut InvariantTest,
invariant_run: &mut InvariantTestRun,
invariant_config: &InvariantConfig,
call_result: RawCallResult,
state_changeset: &StateChangeset,
) -> Result<RichInvariantResults> {
let mut call_results = None;
let handlers_succeeded = || {
invariant_test.targeted_contracts.targets.lock().keys().all(|address| {
invariant_run.executor.is_success(
*address,
false,
Cow::Borrowed(state_changeset),
false,
)
})
};
// Assert invariants if the call did not revert and the handlers did not fail.
if !call_result.reverted && handlers_succeeded() {
if let Some(traces) = call_result.traces {
invariant_run.run_traces.push(traces);
}
call_results = assert_invariants(
invariant_contract,
invariant_config,
&invariant_test.targeted_contracts,
&invariant_run.executor,
&invariant_run.inputs,
&mut invariant_test.test_data.failures,
)?;
if call_results.is_none() {
return Ok(RichInvariantResults::new(false, None));
}
} else {
// Increase the amount of reverts.
let invariant_data = &mut invariant_test.test_data;
invariant_data.failures.reverts += 1;
// If fail on revert is set, we must return immediately.
if invariant_config.fail_on_revert {
let case_data = FailedInvariantCaseData::new(
invariant_contract,
invariant_config,
&invariant_test.targeted_contracts,
&invariant_run.inputs,
call_result,
&[],
);
invariant_data.failures.revert_reason = Some(case_data.revert_reason.clone());
invariant_data.failures.error = Some(InvariantFuzzError::Revert(case_data));
return Ok(RichInvariantResults::new(false, None));
} else if call_result.reverted {
// If we don't fail test on revert then remove last reverted call from inputs.
// This improves shrinking performance as irrelevant calls won't be checked again.
invariant_run.inputs.pop();
}
}
Ok(RichInvariantResults::new(true, call_results))
}
/// Given the executor state, asserts conditions within `afterInvariant` function.
/// If call fails then the invariant test is considered failed.
pub(crate) fn assert_after_invariant(
invariant_contract: &InvariantContract<'_>,
invariant_test: &mut InvariantTest,
invariant_run: &InvariantTestRun,
invariant_config: &InvariantConfig,
) -> Result<bool> {
let (call_result, success) =
call_after_invariant_function(&invariant_run.executor, invariant_contract.address)?;
// Fail the test case if `afterInvariant` doesn't succeed.
if !success {
let case_data = FailedInvariantCaseData::new(
invariant_contract,
invariant_config,
&invariant_test.targeted_contracts,
&invariant_run.inputs,
call_result,
&[],
);
invariant_test.set_error(InvariantFuzzError::BrokenInvariant(case_data));
}
Ok(success)
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/invariant/mod.rs | crates/evm/evm/src/executors/invariant/mod.rs | use crate::{
executors::{Executor, RawCallResult},
inspectors::Fuzzer,
};
use alloy_primitives::{
Address, Bytes, FixedBytes, Selector, U256,
map::{AddressMap, HashMap},
};
use alloy_sol_types::{SolCall, sol};
use eyre::{ContextCompat, Result, eyre};
use foundry_common::contracts::{ContractsByAddress, ContractsByArtifact};
use foundry_config::InvariantConfig;
use foundry_evm_core::{
constants::{
CALLER, CHEATCODE_ADDRESS, DEFAULT_CREATE2_DEPLOYER, HARDHAT_CONSOLE_ADDRESS, MAGIC_ASSUME,
},
precompiles::PRECOMPILES,
};
use foundry_evm_fuzz::{
BasicTxDetails, FuzzCase, FuzzFixtures, FuzzedCases,
invariant::{
ArtifactFilters, FuzzRunIdentifiedContracts, InvariantContract, RandomCallGenerator,
SenderFilters, TargetedContract, TargetedContracts,
},
strategies::{EvmFuzzState, invariant_strat, override_call_strat},
};
use foundry_evm_traces::{CallTraceArena, SparsedTraceArena};
use indicatif::ProgressBar;
use parking_lot::RwLock;
use proptest::{strategy::Strategy, test_runner::TestRunner};
use result::{assert_after_invariant, assert_invariants, can_continue};
use revm::state::Account;
use std::{
collections::{HashMap as Map, btree_map::Entry},
sync::Arc,
time::{Instant, SystemTime, UNIX_EPOCH},
};
mod error;
pub use error::{InvariantFailures, InvariantFuzzError};
use foundry_evm_coverage::HitMaps;
mod replay;
pub use replay::{replay_error, replay_run};
mod result;
use foundry_common::{TestFunctionExt, sh_println};
pub use result::InvariantFuzzTestResult;
use serde::{Deserialize, Serialize};
use serde_json::json;
mod shrink;
use crate::executors::{
DURATION_BETWEEN_METRICS_REPORT, EarlyExit, EvmError, FuzzTestTimer, corpus::CorpusManager,
};
pub use shrink::check_sequence;
sol! {
interface IInvariantTest {
#[derive(Default)]
struct FuzzSelector {
address addr;
bytes4[] selectors;
}
#[derive(Default)]
struct FuzzArtifactSelector {
string artifact;
bytes4[] selectors;
}
#[derive(Default)]
struct FuzzInterface {
address addr;
string[] artifacts;
}
function afterInvariant() external;
#[derive(Default)]
function excludeArtifacts() public view returns (string[] memory excludedArtifacts);
#[derive(Default)]
function excludeContracts() public view returns (address[] memory excludedContracts);
#[derive(Default)]
function excludeSelectors() public view returns (FuzzSelector[] memory excludedSelectors);
#[derive(Default)]
function excludeSenders() public view returns (address[] memory excludedSenders);
#[derive(Default)]
function targetArtifacts() public view returns (string[] memory targetedArtifacts);
#[derive(Default)]
function targetArtifactSelectors() public view returns (FuzzArtifactSelector[] memory targetedArtifactSelectors);
#[derive(Default)]
function targetContracts() public view returns (address[] memory targetedContracts);
#[derive(Default)]
function targetSelectors() public view returns (FuzzSelector[] memory targetedSelectors);
#[derive(Default)]
function targetSenders() public view returns (address[] memory targetedSenders);
#[derive(Default)]
function targetInterfaces() public view returns (FuzzInterface[] memory targetedInterfaces);
}
}
/// Contains invariant metrics for a single fuzzed selector.
#[derive(Default, Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct InvariantMetrics {
// Count of fuzzed selector calls.
pub calls: usize,
// Count of fuzzed selector reverts.
pub reverts: usize,
// Count of fuzzed selector discards (through assume cheatcodes).
pub discards: usize,
}
/// Contains data collected during invariant test runs.
struct InvariantTestData {
// Consumed gas and calldata of every successful fuzz call.
fuzz_cases: Vec<FuzzedCases>,
// Data related to reverts or failed assertions of the test.
failures: InvariantFailures,
// Calldata in the last invariant run.
last_run_inputs: Vec<BasicTxDetails>,
// Additional traces for gas report.
gas_report_traces: Vec<Vec<CallTraceArena>>,
// Last call results of the invariant test.
last_call_results: Option<RawCallResult>,
// Line coverage information collected from all fuzzed calls.
line_coverage: Option<HitMaps>,
// Metrics for each fuzzed selector.
metrics: Map<String, InvariantMetrics>,
// Proptest runner to query for random values.
// The strategy only comes with the first `input`. We fill the rest of the `inputs`
// until the desired `depth` so we can use the evolving fuzz dictionary
// during the run.
branch_runner: TestRunner,
}
/// Contains invariant test data.
struct InvariantTest {
// Fuzz state of invariant test.
fuzz_state: EvmFuzzState,
// Contracts fuzzed by the invariant test.
targeted_contracts: FuzzRunIdentifiedContracts,
// Data collected during invariant runs.
test_data: InvariantTestData,
}
impl InvariantTest {
/// Instantiates an invariant test.
fn new(
fuzz_state: EvmFuzzState,
targeted_contracts: FuzzRunIdentifiedContracts,
failures: InvariantFailures,
last_call_results: Option<RawCallResult>,
branch_runner: TestRunner,
) -> Self {
let mut fuzz_cases = vec![];
if last_call_results.is_none() {
fuzz_cases.push(FuzzedCases::new(vec![]));
}
let test_data = InvariantTestData {
fuzz_cases,
failures,
last_run_inputs: vec![],
gas_report_traces: vec![],
last_call_results,
line_coverage: None,
metrics: Map::default(),
branch_runner,
};
Self { fuzz_state, targeted_contracts, test_data }
}
/// Returns number of invariant test reverts.
fn reverts(&self) -> usize {
self.test_data.failures.reverts
}
/// Whether invariant test has errors or not.
fn has_errors(&self) -> bool {
self.test_data.failures.error.is_some()
}
/// Set invariant test error.
fn set_error(&mut self, error: InvariantFuzzError) {
self.test_data.failures.error = Some(error);
}
/// Set last invariant test call results.
fn set_last_call_results(&mut self, call_result: Option<RawCallResult>) {
self.test_data.last_call_results = call_result;
}
/// Set last invariant run call sequence.
fn set_last_run_inputs(&mut self, inputs: &Vec<BasicTxDetails>) {
self.test_data.last_run_inputs.clone_from(inputs);
}
/// Merge current collected line coverage with the new coverage from last fuzzed call.
fn merge_line_coverage(&mut self, new_coverage: Option<HitMaps>) {
HitMaps::merge_opt(&mut self.test_data.line_coverage, new_coverage);
}
/// Update metrics for a fuzzed selector, extracted from tx details.
/// Always increments number of calls; discarded runs (through assume cheatcodes) are tracked
/// separated from reverts.
fn record_metrics(&mut self, tx_details: &BasicTxDetails, reverted: bool, discarded: bool) {
if let Some(metric_key) =
self.targeted_contracts.targets.lock().fuzzed_metric_key(tx_details)
{
let test_metrics = &mut self.test_data.metrics;
let invariant_metrics = test_metrics.entry(metric_key).or_default();
invariant_metrics.calls += 1;
if discarded {
invariant_metrics.discards += 1;
} else if reverted {
invariant_metrics.reverts += 1;
}
}
}
/// End invariant test run by collecting results, cleaning collected artifacts and reverting
/// created fuzz state.
fn end_run(&mut self, run: InvariantTestRun, gas_samples: usize) {
// We clear all the targeted contracts created during this run.
self.targeted_contracts.clear_created_contracts(run.created_contracts);
if self.test_data.gas_report_traces.len() < gas_samples {
self.test_data
.gas_report_traces
.push(run.run_traces.into_iter().map(|arena| arena.arena).collect());
}
self.test_data.fuzz_cases.push(FuzzedCases::new(run.fuzz_runs));
// Revert state to not persist values between runs.
self.fuzz_state.revert();
}
}
/// Contains data for an invariant test run.
struct InvariantTestRun {
// Invariant run call sequence.
inputs: Vec<BasicTxDetails>,
// Current invariant run executor.
executor: Executor,
// Invariant run stat reports (eg. gas usage).
fuzz_runs: Vec<FuzzCase>,
// Contracts created during current invariant run.
created_contracts: Vec<Address>,
// Traces of each call of the invariant run call sequence.
run_traces: Vec<SparsedTraceArena>,
// Current depth of invariant run.
depth: u32,
// Current assume rejects of the invariant run.
rejects: u32,
// Whether new coverage was discovered during this run.
new_coverage: bool,
}
impl InvariantTestRun {
/// Instantiates an invariant test run.
fn new(first_input: BasicTxDetails, executor: Executor, depth: usize) -> Self {
Self {
inputs: vec![first_input],
executor,
fuzz_runs: Vec::with_capacity(depth),
created_contracts: vec![],
run_traces: vec![],
depth: 0,
rejects: 0,
new_coverage: false,
}
}
}
/// Wrapper around any [`Executor`] implementer which provides fuzzing support using [`proptest`].
///
/// After instantiation, calling `invariant_fuzz` will proceed to hammer the deployed smart
/// contracts with inputs, until it finds a counterexample sequence. The provided [`TestRunner`]
/// contains all the configuration which can be overridden via [environment
/// variables](proptest::test_runner::Config)
pub struct InvariantExecutor<'a> {
pub executor: Executor,
/// Proptest runner.
runner: TestRunner,
/// The invariant configuration
config: InvariantConfig,
/// Contracts deployed with `setUp()`
setup_contracts: &'a ContractsByAddress,
/// Contracts that are part of the project but have not been deployed yet. We need the bytecode
/// to identify them from the stateset changes.
project_contracts: &'a ContractsByArtifact,
/// Filters contracts to be fuzzed through their artifact identifiers.
artifact_filters: ArtifactFilters,
}
impl<'a> InvariantExecutor<'a> {
/// Instantiates a fuzzed executor EVM given a testrunner
pub fn new(
executor: Executor,
runner: TestRunner,
config: InvariantConfig,
setup_contracts: &'a ContractsByAddress,
project_contracts: &'a ContractsByArtifact,
) -> Self {
Self {
executor,
runner,
config,
setup_contracts,
project_contracts,
artifact_filters: ArtifactFilters::default(),
}
}
pub fn config(self) -> InvariantConfig {
self.config
}
/// Fuzzes any deployed contract and checks any broken invariant at `invariant_address`.
pub fn invariant_fuzz(
&mut self,
invariant_contract: InvariantContract<'_>,
fuzz_fixtures: &FuzzFixtures,
fuzz_state: EvmFuzzState,
progress: Option<&ProgressBar>,
early_exit: &EarlyExit,
) -> Result<InvariantFuzzTestResult> {
// Throw an error to abort test run if the invariant function accepts input params
if !invariant_contract.invariant_function.inputs.is_empty() {
return Err(eyre!("Invariant test function should have no inputs"));
}
let (mut invariant_test, mut corpus_manager) =
self.prepare_test(&invariant_contract, fuzz_fixtures, fuzz_state)?;
// Start timer for this invariant test.
let mut runs = 0;
let timer = FuzzTestTimer::new(self.config.timeout);
let mut last_metrics_report = Instant::now();
let continue_campaign = |runs: u32| {
if early_exit.should_stop() {
return false;
}
if timer.is_enabled() { !timer.is_timed_out() } else { runs < self.config.runs }
};
// Invariant runs with edge coverage if corpus dir is set or showing edge coverage.
let edge_coverage_enabled = self.config.corpus.collect_edge_coverage();
'stop: while continue_campaign(runs) {
let initial_seq = corpus_manager.new_inputs(
&mut invariant_test.test_data.branch_runner,
&invariant_test.fuzz_state,
&invariant_test.targeted_contracts,
)?;
// Create current invariant run data.
let mut current_run = InvariantTestRun::new(
initial_seq[0].clone(),
// Before each run, we must reset the backend state.
self.executor.clone(),
self.config.depth as usize,
);
// We stop the run immediately if we have reverted, and `fail_on_revert` is set.
if self.config.fail_on_revert && invariant_test.reverts() > 0 {
return Err(eyre!("call reverted"));
}
while current_run.depth < self.config.depth {
// Check if the timeout has been reached.
if timer.is_timed_out() {
// Since we never record a revert here the test is still considered
// successful even though it timed out. We *want*
// this behavior for now, so that's ok, but
// future developers should be aware of this.
break 'stop;
}
let tx = current_run
.inputs
.last()
.ok_or_else(|| eyre!("no input generated to call fuzzed target."))?;
// Execute call from the randomly generated sequence without committing state.
// State is committed only if call is not a magic assume.
let mut call_result = execute_tx(&mut current_run.executor, tx)?;
let discarded = call_result.result.as_ref() == MAGIC_ASSUME;
if self.config.show_metrics {
invariant_test.record_metrics(tx, call_result.reverted, discarded);
}
// Collect line coverage from last fuzzed call.
invariant_test.merge_line_coverage(call_result.line_coverage.clone());
// Collect edge coverage and set the flag in the current run.
if corpus_manager.merge_edge_coverage(&mut call_result) {
current_run.new_coverage = true;
}
if discarded {
current_run.inputs.pop();
current_run.rejects += 1;
if current_run.rejects > self.config.max_assume_rejects {
invariant_test.set_error(InvariantFuzzError::MaxAssumeRejects(
self.config.max_assume_rejects,
));
break 'stop;
}
} else {
// Commit executed call result.
current_run.executor.commit(&mut call_result);
// Collect data for fuzzing from the state changeset.
// This step updates the state dictionary and therefore invalidates the
// ValueTree in use by the current run. This manifestsitself in proptest
// observing a different input case than what it was called with, and creates
// inconsistencies whenever proptest tries to use the input case after test
// execution.
// See <https://github.com/foundry-rs/foundry/issues/9764>.
let mut state_changeset = call_result.state_changeset.clone();
if !call_result.reverted {
collect_data(
&invariant_test,
&mut state_changeset,
tx,
&call_result,
self.config.depth,
);
}
// Collect created contracts and add to fuzz targets only if targeted contracts
// are updatable.
if let Err(error) =
&invariant_test.targeted_contracts.collect_created_contracts(
&state_changeset,
self.project_contracts,
self.setup_contracts,
&self.artifact_filters,
&mut current_run.created_contracts,
)
{
warn!(target: "forge::test", "{error}");
}
current_run.fuzz_runs.push(FuzzCase {
calldata: tx.call_details.calldata.clone(),
gas: call_result.gas_used,
stipend: call_result.stipend,
});
// Determine if test can continue or should exit.
let result = can_continue(
&invariant_contract,
&mut invariant_test,
&mut current_run,
&self.config,
call_result,
&state_changeset,
)
.map_err(|e| eyre!(e.to_string()))?;
if !result.can_continue || current_run.depth == self.config.depth - 1 {
invariant_test.set_last_run_inputs(¤t_run.inputs);
}
// If test cannot continue then stop current run and exit test suite.
if !result.can_continue {
break 'stop;
}
invariant_test.set_last_call_results(result.call_result);
current_run.depth += 1;
}
current_run.inputs.push(corpus_manager.generate_next_input(
&mut invariant_test.test_data.branch_runner,
&initial_seq,
discarded,
current_run.depth as usize,
)?);
}
// Extend corpus with current run data.
corpus_manager.process_inputs(¤t_run.inputs, current_run.new_coverage);
// Call `afterInvariant` only if it is declared and test didn't fail already.
if invariant_contract.call_after_invariant && !invariant_test.has_errors() {
assert_after_invariant(
&invariant_contract,
&mut invariant_test,
¤t_run,
&self.config,
)
.map_err(|_| eyre!("Failed to call afterInvariant"))?;
}
// End current invariant test run.
invariant_test.end_run(current_run, self.config.gas_report_samples as usize);
if let Some(progress) = progress {
// If running with progress then increment completed runs.
progress.inc(1);
// Display metrics in progress bar.
if edge_coverage_enabled {
progress.set_message(format!("{}", &corpus_manager.metrics));
}
} else if edge_coverage_enabled
&& last_metrics_report.elapsed() > DURATION_BETWEEN_METRICS_REPORT
{
// Display metrics inline if corpus dir set.
let metrics = json!({
"timestamp": SystemTime::now()
.duration_since(UNIX_EPOCH)?
.as_secs(),
"invariant": invariant_contract.invariant_function.name,
"metrics": &corpus_manager.metrics,
});
let _ = sh_println!("{}", serde_json::to_string(&metrics)?);
last_metrics_report = Instant::now();
}
runs += 1;
}
trace!(?fuzz_fixtures);
invariant_test.fuzz_state.log_stats();
let result = invariant_test.test_data;
Ok(InvariantFuzzTestResult {
error: result.failures.error,
cases: result.fuzz_cases,
reverts: result.failures.reverts,
last_run_inputs: result.last_run_inputs,
gas_report_traces: result.gas_report_traces,
line_coverage: result.line_coverage,
metrics: result.metrics,
failed_corpus_replays: corpus_manager.failed_replays(),
})
}
/// Prepares certain structures to execute the invariant tests:
/// * Invariant Fuzz Test.
/// * Invariant Corpus Manager.
fn prepare_test(
&mut self,
invariant_contract: &InvariantContract<'_>,
fuzz_fixtures: &FuzzFixtures,
fuzz_state: EvmFuzzState,
) -> Result<(InvariantTest, CorpusManager)> {
// Finds out the chosen deployed contracts and/or senders.
self.select_contract_artifacts(invariant_contract.address)?;
let (targeted_senders, targeted_contracts) =
self.select_contracts_and_senders(invariant_contract.address)?;
// Creates the invariant strategy.
let strategy = invariant_strat(
fuzz_state.clone(),
targeted_senders,
targeted_contracts.clone(),
self.config.clone(),
fuzz_fixtures.clone(),
)
.no_shrink();
// Allows `override_call_strat` to use the address given by the Fuzzer inspector during
// EVM execution.
let mut call_generator = None;
if self.config.call_override {
let target_contract_ref = Arc::new(RwLock::new(Address::ZERO));
call_generator = Some(RandomCallGenerator::new(
invariant_contract.address,
self.runner.clone(),
override_call_strat(
fuzz_state.clone(),
targeted_contracts.clone(),
target_contract_ref.clone(),
fuzz_fixtures.clone(),
),
target_contract_ref,
));
}
// If any of the targeted contracts have the storage layout enabled then we can sample
// mapping values. To accomplish, we need to record the mapping storage slots and keys.
let fuzz_state =
if targeted_contracts.targets.lock().iter().any(|(_, t)| t.storage_layout.is_some()) {
fuzz_state.with_mapping_slots(AddressMap::default())
} else {
fuzz_state
};
self.executor.inspector_mut().set_fuzzer(Fuzzer {
call_generator,
fuzz_state: fuzz_state.clone(),
collect: true,
});
// Let's make sure the invariant is sound before actually starting the run:
// We'll assert the invariant in its initial state, and if it fails, we'll
// already know if we can early exit the invariant run.
// This does not count as a fuzz run. It will just register the revert.
let mut failures = InvariantFailures::new();
let last_call_results = assert_invariants(
invariant_contract,
&self.config,
&targeted_contracts,
&self.executor,
&[],
&mut failures,
)?;
if let Some(error) = failures.error {
return Err(eyre!(error.revert_reason().unwrap_or_default()));
}
let corpus_manager = CorpusManager::new(
self.config.corpus.clone(),
strategy.boxed(),
&self.executor,
None,
Some(&targeted_contracts),
)?;
let invariant_test = InvariantTest::new(
fuzz_state,
targeted_contracts,
failures,
last_call_results,
self.runner.clone(),
);
Ok((invariant_test, corpus_manager))
}
/// Fills the `InvariantExecutor` with the artifact identifier filters (in `path:name` string
/// format). They will be used to filter contracts after the `setUp`, and more importantly,
/// during the runs.
///
/// Also excludes any contract without any mutable functions.
///
/// Priority:
///
/// targetArtifactSelectors > excludeArtifacts > targetArtifacts
pub fn select_contract_artifacts(&mut self, invariant_address: Address) -> Result<()> {
let targeted_artifact_selectors = self
.executor
.call_sol_default(invariant_address, &IInvariantTest::targetArtifactSelectorsCall {});
// Insert them into the executor `targeted_abi`.
for IInvariantTest::FuzzArtifactSelector { artifact, selectors } in
targeted_artifact_selectors
{
let identifier = self.validate_selected_contract(artifact, &selectors)?;
self.artifact_filters.targeted.entry(identifier).or_default().extend(selectors);
}
let targeted_artifacts = self
.executor
.call_sol_default(invariant_address, &IInvariantTest::targetArtifactsCall {});
let excluded_artifacts = self
.executor
.call_sol_default(invariant_address, &IInvariantTest::excludeArtifactsCall {});
// Insert `excludeArtifacts` into the executor `excluded_abi`.
for contract in excluded_artifacts {
let identifier = self.validate_selected_contract(contract, &[])?;
if !self.artifact_filters.excluded.contains(&identifier) {
self.artifact_filters.excluded.push(identifier);
}
}
// Exclude any artifact without mutable functions.
for (artifact, contract) in self.project_contracts.iter() {
if contract
.abi
.functions()
.filter(|func| {
!matches!(
func.state_mutability,
alloy_json_abi::StateMutability::Pure
| alloy_json_abi::StateMutability::View
)
})
.count()
== 0
&& !self.artifact_filters.excluded.contains(&artifact.identifier())
{
self.artifact_filters.excluded.push(artifact.identifier());
}
}
// Insert `targetArtifacts` into the executor `targeted_abi`, if they have not been seen
// before.
for contract in targeted_artifacts {
let identifier = self.validate_selected_contract(contract, &[])?;
if !self.artifact_filters.targeted.contains_key(&identifier)
&& !self.artifact_filters.excluded.contains(&identifier)
{
self.artifact_filters.targeted.insert(identifier, vec![]);
}
}
Ok(())
}
/// Makes sure that the contract exists in the project. If so, it returns its artifact
/// identifier.
fn validate_selected_contract(
&mut self,
contract: String,
selectors: &[FixedBytes<4>],
) -> Result<String> {
if let Some((artifact, contract_data)) =
self.project_contracts.find_by_name_or_identifier(&contract)?
{
// Check that the selectors really exist for this contract.
for selector in selectors {
contract_data
.abi
.functions()
.find(|func| func.selector().as_slice() == selector.as_slice())
.wrap_err(format!("{contract} does not have the selector {selector:?}"))?;
}
return Ok(artifact.identifier());
}
eyre::bail!(
"{contract} not found in the project. Allowed format: `contract_name` or `contract_path:contract_name`."
);
}
/// Selects senders and contracts based on the contract methods `targetSenders() -> address[]`,
/// `targetContracts() -> address[]` and `excludeContracts() -> address[]`.
pub fn select_contracts_and_senders(
&self,
to: Address,
) -> Result<(SenderFilters, FuzzRunIdentifiedContracts)> {
let targeted_senders =
self.executor.call_sol_default(to, &IInvariantTest::targetSendersCall {});
let mut excluded_senders =
self.executor.call_sol_default(to, &IInvariantTest::excludeSendersCall {});
// Extend with default excluded addresses - https://github.com/foundry-rs/foundry/issues/4163
excluded_senders.extend([
CHEATCODE_ADDRESS,
HARDHAT_CONSOLE_ADDRESS,
DEFAULT_CREATE2_DEPLOYER,
]);
// Extend with precompiles - https://github.com/foundry-rs/foundry/issues/4287
excluded_senders.extend(PRECOMPILES);
let sender_filters = SenderFilters::new(targeted_senders, excluded_senders);
let selected = self.executor.call_sol_default(to, &IInvariantTest::targetContractsCall {});
let excluded = self.executor.call_sol_default(to, &IInvariantTest::excludeContractsCall {});
let contracts = self
.setup_contracts
.iter()
.filter(|&(addr, (identifier, _))| {
// Include to address if explicitly set as target.
if *addr == to && selected.contains(&to) {
return true;
}
*addr != to
&& *addr != CHEATCODE_ADDRESS
&& *addr != HARDHAT_CONSOLE_ADDRESS
&& (selected.is_empty() || selected.contains(addr))
&& (excluded.is_empty() || !excluded.contains(addr))
&& self.artifact_filters.matches(identifier)
})
.map(|(addr, (identifier, abi))| {
(
*addr,
TargetedContract::new(identifier.clone(), abi.clone())
.with_project_contracts(self.project_contracts),
)
})
.collect();
let mut contracts = TargetedContracts { inner: contracts };
self.target_interfaces(to, &mut contracts)?;
self.select_selectors(to, &mut contracts)?;
// There should be at least one contract identified as target for fuzz runs.
if contracts.is_empty() {
eyre::bail!("No contracts to fuzz.");
}
Ok((sender_filters, FuzzRunIdentifiedContracts::new(contracts, selected.is_empty())))
}
/// Extends the contracts and selectors to fuzz with the addresses and ABIs specified in
/// `targetInterfaces() -> (address, string[])[]`. Enables targeting of addresses that are
/// not deployed during `setUp` such as when fuzzing in a forked environment. Also enables
/// targeting of delegate proxies and contracts deployed with `create` or `create2`.
pub fn target_interfaces(
&self,
invariant_address: Address,
targeted_contracts: &mut TargetedContracts,
) -> Result<()> {
let interfaces = self
.executor
.call_sol_default(invariant_address, &IInvariantTest::targetInterfacesCall {});
// Since `targetInterfaces` returns a tuple array there is no guarantee
// that the addresses are unique this map is used to merge functions of
// the specified interfaces for the same address. For example:
// `[(addr1, ["IERC20", "IOwnable"])]` and `[(addr1, ["IERC20"]), (addr1, ("IOwnable"))]`
// should be equivalent.
let mut combined = TargetedContracts::new();
// Loop through each address and its associated artifact identifiers.
// We're borrowing here to avoid taking full ownership.
for IInvariantTest::FuzzInterface { addr, artifacts } in &interfaces {
// Identifiers are specified as an array, so we loop through them.
for identifier in artifacts {
// Try to find the contract by name or identifier in the project's contracts.
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/invariant/replay.rs | crates/evm/evm/src/executors/invariant/replay.rs | use super::{call_after_invariant_function, call_invariant_function, execute_tx};
use crate::executors::{EarlyExit, Executor, invariant::shrink::shrink_sequence};
use alloy_dyn_abi::JsonAbiExt;
use alloy_primitives::{Log, map::HashMap};
use eyre::Result;
use foundry_common::{ContractsByAddress, ContractsByArtifact};
use foundry_config::InvariantConfig;
use foundry_evm_coverage::HitMaps;
use foundry_evm_fuzz::{BaseCounterExample, BasicTxDetails, invariant::InvariantContract};
use foundry_evm_traces::{TraceKind, TraceMode, Traces, load_contracts};
use indicatif::ProgressBar;
use parking_lot::RwLock;
use std::sync::Arc;
/// Replays a call sequence for collecting logs and traces.
/// Returns counterexample to be used when the call sequence is a failed scenario.
#[expect(clippy::too_many_arguments)]
pub fn replay_run(
invariant_contract: &InvariantContract<'_>,
mut executor: Executor,
known_contracts: &ContractsByArtifact,
mut ided_contracts: ContractsByAddress,
logs: &mut Vec<Log>,
traces: &mut Traces,
line_coverage: &mut Option<HitMaps>,
deprecated_cheatcodes: &mut HashMap<&'static str, Option<&'static str>>,
inputs: &[BasicTxDetails],
show_solidity: bool,
) -> Result<Vec<BaseCounterExample>> {
// We want traces for a failed case.
if executor.inspector().tracer.is_none() {
executor.set_tracing(TraceMode::Call);
}
let mut counterexample_sequence = vec![];
// Replay each call from the sequence, collect logs, traces and coverage.
for tx in inputs {
let call_result = execute_tx(&mut executor, tx)?;
logs.extend(call_result.logs);
traces.push((TraceKind::Execution, call_result.traces.clone().unwrap()));
HitMaps::merge_opt(line_coverage, call_result.line_coverage);
// Identify newly generated contracts, if they exist.
ided_contracts
.extend(load_contracts(call_result.traces.iter().map(|a| &a.arena), known_contracts));
// Create counter example to be used in failed case.
counterexample_sequence.push(BaseCounterExample::from_invariant_call(
tx,
&ided_contracts,
call_result.traces,
show_solidity,
));
}
// Replay invariant to collect logs and traces.
// We do this only once at the end of the replayed sequence.
// Checking after each call doesn't add valuable info for passing scenario
// (invariant call result is always success) nor for failed scenarios
// (invariant call result is always success until the last call that breaks it).
let (invariant_result, invariant_success) = call_invariant_function(
&executor,
invariant_contract.address,
invariant_contract.invariant_function.abi_encode_input(&[])?.into(),
)?;
traces.push((TraceKind::Execution, invariant_result.traces.clone().unwrap()));
logs.extend(invariant_result.logs);
deprecated_cheatcodes.extend(
invariant_result
.cheatcodes
.as_ref()
.map_or_else(Default::default, |cheats| cheats.deprecated.clone()),
);
// Collect after invariant logs and traces.
if invariant_contract.call_after_invariant && invariant_success {
let (after_invariant_result, _) =
call_after_invariant_function(&executor, invariant_contract.address)?;
traces.push((TraceKind::Execution, after_invariant_result.traces.clone().unwrap()));
logs.extend(after_invariant_result.logs);
}
Ok(counterexample_sequence)
}
/// Replays the error case, shrinks the failing sequence and collects all necessary traces.
#[expect(clippy::too_many_arguments)]
pub fn replay_error(
config: InvariantConfig,
mut executor: Executor,
calls: &[BasicTxDetails],
inner_sequence: Option<Vec<Option<BasicTxDetails>>>,
invariant_contract: &InvariantContract<'_>,
known_contracts: &ContractsByArtifact,
ided_contracts: ContractsByAddress,
logs: &mut Vec<Log>,
traces: &mut Traces,
line_coverage: &mut Option<HitMaps>,
deprecated_cheatcodes: &mut HashMap<&'static str, Option<&'static str>>,
progress: Option<&ProgressBar>,
early_exit: &EarlyExit,
) -> Result<Vec<BaseCounterExample>> {
// Shrink sequence of failed calls.
let calls =
shrink_sequence(&config, invariant_contract, calls, &executor, progress, early_exit)?;
if let Some(sequence) = inner_sequence {
set_up_inner_replay(&mut executor, &sequence);
}
// Replay calls to get the counterexample and to collect logs, traces and coverage.
replay_run(
invariant_contract,
executor,
known_contracts,
ided_contracts,
logs,
traces,
line_coverage,
deprecated_cheatcodes,
&calls,
config.show_solidity,
)
}
/// Sets up the calls generated by the internal fuzzer, if they exist.
fn set_up_inner_replay(executor: &mut Executor, inner_sequence: &[Option<BasicTxDetails>]) {
if let Some(fuzzer) = &mut executor.inspector_mut().fuzzer
&& let Some(call_generator) = &mut fuzzer.call_generator
{
call_generator.last_sequence = Arc::new(RwLock::new(inner_sequence.to_owned()));
call_generator.set_replay(true);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/invariant/shrink.rs | crates/evm/evm/src/executors/invariant/shrink.rs | use crate::executors::{
EarlyExit, Executor,
invariant::{call_after_invariant_function, call_invariant_function, execute_tx},
};
use alloy_primitives::{Address, Bytes};
use foundry_config::InvariantConfig;
use foundry_evm_core::constants::MAGIC_ASSUME;
use foundry_evm_fuzz::{BasicTxDetails, invariant::InvariantContract};
use indicatif::ProgressBar;
use proptest::bits::{BitSetLike, VarBitSet};
/// Shrinker for a call sequence failure.
/// Iterates sequence call sequence top down and removes calls one by one.
/// If the failure is still reproducible with removed call then moves to the next one.
/// If the failure is not reproducible then restore removed call and moves to next one.
#[derive(Debug)]
struct CallSequenceShrinker {
/// Length of call sequence to be shrunk.
call_sequence_len: usize,
/// Call ids contained in current shrunk sequence.
included_calls: VarBitSet,
}
impl CallSequenceShrinker {
fn new(call_sequence_len: usize) -> Self {
Self { call_sequence_len, included_calls: VarBitSet::saturated(call_sequence_len) }
}
/// Return candidate shrink sequence to be tested, by removing ids from original sequence.
fn current(&self) -> impl Iterator<Item = usize> + '_ {
(0..self.call_sequence_len).filter(|&call_id| self.included_calls.test(call_id))
}
}
pub(crate) fn shrink_sequence(
config: &InvariantConfig,
invariant_contract: &InvariantContract<'_>,
calls: &[BasicTxDetails],
executor: &Executor,
progress: Option<&ProgressBar>,
early_exit: &EarlyExit,
) -> eyre::Result<Vec<BasicTxDetails>> {
trace!(target: "forge::test", "Shrinking sequence of {} calls.", calls.len());
// Reset run count and display shrinking message.
if let Some(progress) = progress {
progress.set_length(config.shrink_run_limit as u64);
progress.reset();
progress.set_message(" Shrink");
}
let target_address = invariant_contract.address;
let calldata: Bytes = invariant_contract.invariant_function.selector().to_vec().into();
// Special case test: the invariant is *unsatisfiable* - it took 0 calls to
// break the invariant -- consider emitting a warning.
let (_, success) = call_invariant_function(executor, target_address, calldata.clone())?;
if !success {
return Ok(vec![]);
}
let mut call_idx = 0;
let mut shrinker = CallSequenceShrinker::new(calls.len());
for _ in 0..config.shrink_run_limit {
if early_exit.should_stop() {
break;
}
// Remove call at current index.
shrinker.included_calls.clear(call_idx);
match check_sequence(
executor.clone(),
calls,
shrinker.current().collect(),
target_address,
calldata.clone(),
config.fail_on_revert,
invariant_contract.call_after_invariant,
) {
// If candidate sequence still fails, shrink until shortest possible.
Ok((false, _)) if shrinker.included_calls.count() == 1 => break,
// Restore last removed call as it caused sequence to pass invariant.
Ok((true, _)) => shrinker.included_calls.set(call_idx),
_ => {}
}
if let Some(progress) = progress {
progress.inc(1);
}
// Restart from first call once we reach the end of sequence.
if call_idx + 1 == shrinker.call_sequence_len {
call_idx = 0;
} else {
call_idx += 1;
};
}
Ok(shrinker.current().map(|idx| &calls[idx]).cloned().collect())
}
/// Checks if the given call sequence breaks the invariant.
///
/// Used in shrinking phase for checking candidate sequences and in replay failures phase to test
/// persisted failures.
/// Returns the result of invariant check (and afterInvariant call if needed) and if sequence was
/// entirely applied.
pub fn check_sequence(
mut executor: Executor,
calls: &[BasicTxDetails],
sequence: Vec<usize>,
test_address: Address,
calldata: Bytes,
fail_on_revert: bool,
call_after_invariant: bool,
) -> eyre::Result<(bool, bool)> {
// Apply the call sequence.
for call_index in sequence {
let tx = &calls[call_index];
let mut call_result = execute_tx(&mut executor, tx)?;
executor.commit(&mut call_result);
// Ignore calls reverted with `MAGIC_ASSUME`. This is needed to handle failed scenarios that
// are replayed with a modified version of test driver (that use new `vm.assume`
// cheatcodes).
if call_result.reverted && fail_on_revert && call_result.result.as_ref() != MAGIC_ASSUME {
// Candidate sequence fails test.
// We don't have to apply remaining calls to check sequence.
return Ok((false, false));
}
}
// Check the invariant for call sequence.
let (_, mut success) = call_invariant_function(&executor, test_address, calldata)?;
// Check after invariant result if invariant is success and `afterInvariant` function is
// declared.
if success && call_after_invariant {
(_, success) = call_after_invariant_function(&executor, test_address)?;
}
Ok((success, true))
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/fuzz/types.rs | crates/evm/evm/src/executors/fuzz/types.rs | use crate::executors::RawCallResult;
use alloy_primitives::{Bytes, Log, map::HashMap};
use foundry_evm_core::Breakpoints;
use foundry_evm_coverage::HitMaps;
use foundry_evm_fuzz::FuzzCase;
use foundry_evm_traces::SparsedTraceArena;
use revm::interpreter::InstructionResult;
/// Returned by a single fuzz in the case of a successful run
#[derive(Debug)]
pub struct CaseOutcome {
/// Data of a single fuzz test case.
pub case: FuzzCase,
/// The traces of the call.
pub traces: Option<SparsedTraceArena>,
/// The coverage info collected during the call.
pub coverage: Option<HitMaps>,
/// Breakpoints char pc map.
pub breakpoints: Breakpoints,
/// logs of a single fuzz test case.
pub logs: Vec<Log>,
// Deprecated cheatcodes mapped to their replacements.
pub deprecated_cheatcodes: HashMap<&'static str, Option<&'static str>>,
}
/// Returned by a single fuzz when a counterexample has been discovered
#[derive(Debug)]
pub struct CounterExampleOutcome {
/// Minimal reproduction test case for failing test.
pub counterexample: (Bytes, RawCallResult),
/// The status of the call.
pub exit_reason: Option<InstructionResult>,
/// Breakpoints char pc map.
pub breakpoints: Breakpoints,
}
/// Outcome of a single fuzz
#[derive(Debug)]
#[expect(clippy::large_enum_variant)]
pub enum FuzzOutcome {
Case(CaseOutcome),
CounterExample(CounterExampleOutcome),
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/evm/src/executors/fuzz/mod.rs | crates/evm/evm/src/executors/fuzz/mod.rs | use crate::executors::{
DURATION_BETWEEN_METRICS_REPORT, EarlyExit, Executor, FuzzTestTimer, RawCallResult,
};
use alloy_dyn_abi::JsonAbiExt;
use alloy_json_abi::Function;
use alloy_primitives::{Address, Bytes, Log, U256, map::HashMap};
use eyre::Result;
use foundry_common::sh_println;
use foundry_config::FuzzConfig;
use foundry_evm_core::{
Breakpoints,
constants::{CHEATCODE_ADDRESS, MAGIC_ASSUME},
decode::{RevertDecoder, SkipReason},
};
use foundry_evm_coverage::HitMaps;
use foundry_evm_fuzz::{
BaseCounterExample, BasicTxDetails, CallDetails, CounterExample, FuzzCase, FuzzError,
FuzzFixtures, FuzzTestResult,
strategies::{EvmFuzzState, fuzz_calldata, fuzz_calldata_from_state},
};
use foundry_evm_traces::SparsedTraceArena;
use indicatif::ProgressBar;
use proptest::{
strategy::Strategy,
test_runner::{TestCaseError, TestRunner},
};
use serde_json::json;
use std::time::{Instant, SystemTime, UNIX_EPOCH};
mod types;
use crate::executors::corpus::CorpusManager;
pub use types::{CaseOutcome, CounterExampleOutcome, FuzzOutcome};
/// Contains data collected during fuzz test runs.
#[derive(Default)]
struct FuzzTestData {
// Stores the first fuzz case.
first_case: Option<FuzzCase>,
// Stored gas usage per fuzz case.
gas_by_case: Vec<(u64, u64)>,
// Stores the result and calldata of the last failed call, if any.
counterexample: (Bytes, RawCallResult),
// Stores up to `max_traces_to_collect` traces.
traces: Vec<SparsedTraceArena>,
// Stores breakpoints for the last fuzz case.
breakpoints: Option<Breakpoints>,
// Stores coverage information for all fuzz cases.
coverage: Option<HitMaps>,
// Stores logs for all fuzz cases (when show_logs is true) or just the last run (when show_logs
// is false)
logs: Vec<Log>,
// Deprecated cheatcodes mapped to their replacements.
deprecated_cheatcodes: HashMap<&'static str, Option<&'static str>>,
// Runs performed in fuzz test.
runs: u32,
// Current assume rejects of the fuzz run.
rejects: u32,
// Test failure.
failure: Option<TestCaseError>,
}
/// Wrapper around an [`Executor`] which provides fuzzing support using [`proptest`].
///
/// After instantiation, calling `fuzz` will proceed to hammer the deployed smart contract with
/// inputs, until it finds a counterexample. The provided [`TestRunner`] contains all the
/// configuration which can be overridden via [environment variables](proptest::test_runner::Config)
pub struct FuzzedExecutor {
/// The EVM executor.
executor: Executor,
/// The fuzzer
runner: TestRunner,
/// The account that calls tests.
sender: Address,
/// The fuzz configuration.
config: FuzzConfig,
/// The persisted counterexample to be replayed, if any.
persisted_failure: Option<BaseCounterExample>,
}
impl FuzzedExecutor {
/// Instantiates a fuzzed executor given a testrunner
pub fn new(
executor: Executor,
runner: TestRunner,
sender: Address,
config: FuzzConfig,
persisted_failure: Option<BaseCounterExample>,
) -> Self {
Self { executor, runner, sender, config, persisted_failure }
}
/// Fuzzes the provided function, assuming it is available at the contract at `address`
/// If `should_fail` is set to `true`, then it will stop only when there's a success
/// test case.
///
/// Returns a list of all the consumed gas and calldata of every fuzz case.
#[allow(clippy::too_many_arguments)]
pub fn fuzz(
&mut self,
func: &Function,
fuzz_fixtures: &FuzzFixtures,
state: EvmFuzzState,
address: Address,
rd: &RevertDecoder,
progress: Option<&ProgressBar>,
early_exit: &EarlyExit,
) -> Result<FuzzTestResult> {
let state = &state;
// Stores the fuzz test execution data.
let mut test_data = FuzzTestData::default();
let dictionary_weight = self.config.dictionary.dictionary_weight.min(100);
let strategy = proptest::prop_oneof![
100 - dictionary_weight => fuzz_calldata(func.clone(), fuzz_fixtures),
dictionary_weight => fuzz_calldata_from_state(func.clone(), state),
]
.prop_map(move |calldata| BasicTxDetails {
warp: None,
roll: None,
sender: Default::default(),
call_details: CallDetails { target: Default::default(), calldata },
});
// We want to collect at least one trace which will be displayed to user.
let max_traces_to_collect = std::cmp::max(1, self.config.gas_report_samples) as usize;
let mut corpus_manager = CorpusManager::new(
self.config.corpus.clone(),
strategy.boxed(),
&self.executor,
Some(func),
None,
)?;
// Start timer for this fuzz test.
let timer = FuzzTestTimer::new(self.config.timeout);
let mut last_metrics_report = Instant::now();
let max_runs = self.config.runs;
let continue_campaign = |runs: u32| {
if early_exit.should_stop() {
return false;
}
if timer.is_enabled() { !timer.is_timed_out() } else { runs < max_runs }
};
'stop: while continue_campaign(test_data.runs) {
// If counterexample recorded, replay it first, without incrementing runs.
let input = if let Some(failure) = self.persisted_failure.take()
&& failure.calldata.get(..4).is_some_and(|selector| func.selector() == selector)
{
failure.calldata.clone()
} else {
// If running with progress, then increment current run.
if let Some(progress) = progress {
progress.inc(1);
// Display metrics in progress bar.
if self.config.corpus.collect_edge_coverage() {
progress.set_message(format!("{}", &corpus_manager.metrics));
}
} else if self.config.corpus.collect_edge_coverage()
&& last_metrics_report.elapsed() > DURATION_BETWEEN_METRICS_REPORT
{
// Display metrics inline.
let metrics = json!({
"timestamp": SystemTime::now()
.duration_since(UNIX_EPOCH)?
.as_secs(),
"test": func.name,
"metrics": &corpus_manager.metrics,
});
let _ = sh_println!("{}", serde_json::to_string(&metrics)?);
last_metrics_report = Instant::now();
};
if let Some(cheats) = self.executor.inspector_mut().cheatcodes.as_mut()
&& let Some(seed) = self.config.seed
{
cheats.set_seed(seed.wrapping_add(U256::from(test_data.runs)));
}
test_data.runs += 1;
match corpus_manager.new_input(&mut self.runner, state, func) {
Ok(input) => input,
Err(err) => {
test_data.failure = Some(TestCaseError::fail(format!(
"failed to generate fuzzed input: {err}"
)));
break 'stop;
}
}
};
match self.single_fuzz(address, input, &mut corpus_manager) {
Ok(fuzz_outcome) => match fuzz_outcome {
FuzzOutcome::Case(case) => {
test_data.gas_by_case.push((case.case.gas, case.case.stipend));
if test_data.first_case.is_none() {
test_data.first_case.replace(case.case);
}
if let Some(call_traces) = case.traces {
if test_data.traces.len() == max_traces_to_collect {
test_data.traces.pop();
}
test_data.traces.push(call_traces);
test_data.breakpoints.replace(case.breakpoints);
}
// Always store logs from the last run in test_data.logs for display at
// verbosity >= 2. When show_logs is true,
// accumulate all logs. When false, only keep the last run's logs.
if self.config.show_logs {
test_data.logs.extend(case.logs);
} else {
test_data.logs = case.logs;
}
HitMaps::merge_opt(&mut test_data.coverage, case.coverage);
test_data.deprecated_cheatcodes = case.deprecated_cheatcodes;
}
FuzzOutcome::CounterExample(CounterExampleOutcome {
exit_reason: status,
counterexample: outcome,
..
}) => {
let reason = rd.maybe_decode(&outcome.1.result, status);
test_data.logs.extend(outcome.1.logs.clone());
test_data.counterexample = outcome;
test_data.failure = Some(TestCaseError::fail(reason.unwrap_or_default()));
break 'stop;
}
},
Err(err) => {
match err {
TestCaseError::Fail(_) => {
test_data.failure = Some(err);
break 'stop;
}
TestCaseError::Reject(_) => {
// Discard run and apply max rejects if configured. Saturate to handle
// the case of replayed failure, which doesn't count as a run.
test_data.runs = test_data.runs.saturating_sub(1);
test_data.rejects += 1;
// Update progress bar to reflect rejected runs.
if let Some(progress) = progress {
progress.set_message(format!("([{}] rejected)", test_data.rejects));
progress.dec(1);
}
if self.config.max_test_rejects > 0
&& test_data.rejects >= self.config.max_test_rejects
{
test_data.failure = Some(TestCaseError::reject(
FuzzError::TooManyRejects(self.config.max_test_rejects),
));
break 'stop;
}
}
}
}
}
}
let (calldata, call) = test_data.counterexample;
let mut traces = test_data.traces;
let (last_run_traces, last_run_breakpoints) = if test_data.failure.is_none() {
(traces.pop(), test_data.breakpoints)
} else {
(call.traces.clone(), call.cheatcodes.map(|c| c.breakpoints))
};
// test_data.logs already contains the appropriate logs:
// - For failed tests: logs from the counterexample
// - For successful tests with show_logs=true: all logs from all runs
// - For successful tests with show_logs=false: logs from the last run only
let result_logs = test_data.logs;
let mut result = FuzzTestResult {
first_case: test_data.first_case.unwrap_or_default(),
gas_by_case: test_data.gas_by_case,
success: test_data.failure.is_none(),
skipped: false,
reason: None,
counterexample: None,
logs: result_logs,
labels: call.labels,
traces: last_run_traces,
breakpoints: last_run_breakpoints,
gas_report_traces: traces.into_iter().map(|a| a.arena).collect(),
line_coverage: test_data.coverage,
deprecated_cheatcodes: test_data.deprecated_cheatcodes,
failed_corpus_replays: corpus_manager.failed_replays(),
};
match test_data.failure {
Some(TestCaseError::Fail(reason)) => {
let reason = reason.to_string();
result.reason = (!reason.is_empty()).then_some(reason);
let args = if let Some(data) = calldata.get(4..) {
func.abi_decode_input(data).unwrap_or_default()
} else {
vec![]
};
result.counterexample = Some(CounterExample::Single(
BaseCounterExample::from_fuzz_call(calldata, args, call.traces),
));
}
Some(TestCaseError::Reject(reason)) => {
let reason = reason.to_string();
result.reason = (!reason.is_empty()).then_some(reason);
}
None => {}
}
if let Some(reason) = &result.reason
&& let Some(reason) = SkipReason::decode_self(reason)
{
result.skipped = true;
result.reason = reason.0;
}
state.log_stats();
Ok(result)
}
/// Granular and single-step function that runs only one fuzz and returns either a `CaseOutcome`
/// or a `CounterExampleOutcome`
fn single_fuzz(
&mut self,
address: Address,
calldata: Bytes,
coverage_metrics: &mut CorpusManager,
) -> Result<FuzzOutcome, TestCaseError> {
let mut call = self
.executor
.call_raw(self.sender, address, calldata.clone(), U256::ZERO)
.map_err(|e| TestCaseError::fail(e.to_string()))?;
let new_coverage = coverage_metrics.merge_edge_coverage(&mut call);
coverage_metrics.process_inputs(
&[BasicTxDetails {
warp: None,
roll: None,
sender: self.sender,
call_details: CallDetails { target: address, calldata: calldata.clone() },
}],
new_coverage,
);
// Handle `vm.assume`.
if call.result.as_ref() == MAGIC_ASSUME {
return Err(TestCaseError::reject(FuzzError::AssumeReject));
}
let (breakpoints, deprecated_cheatcodes) =
call.cheatcodes.as_ref().map_or_else(Default::default, |cheats| {
(cheats.breakpoints.clone(), cheats.deprecated.clone())
});
// Consider call success if test should not fail on reverts and reverter is not the
// cheatcode or test address.
let success = if !self.config.fail_on_revert
&& call
.reverter
.is_some_and(|reverter| reverter != address && reverter != CHEATCODE_ADDRESS)
{
true
} else {
self.executor.is_raw_call_mut_success(address, &mut call, false)
};
if success {
Ok(FuzzOutcome::Case(CaseOutcome {
case: FuzzCase { calldata, gas: call.gas_used, stipend: call.stipend },
traces: call.traces,
coverage: call.line_coverage,
breakpoints,
logs: call.logs,
deprecated_cheatcodes,
}))
} else {
Ok(FuzzOutcome::CounterExample(CounterExampleOutcome {
exit_reason: call.exit_reason,
counterexample: (calldata, call),
breakpoints,
}))
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/coverage/src/lib.rs | crates/evm/coverage/src/lib.rs | //! # foundry-evm-coverage
//!
//! EVM bytecode coverage analysis.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate tracing;
use alloy_primitives::{
Bytes,
map::{B256HashMap, HashMap, rustc_hash::FxHashMap},
};
use analysis::SourceAnalysis;
use eyre::Result;
use foundry_compilers::artifacts::sourcemap::SourceMap;
use semver::Version;
use std::{
collections::BTreeMap,
fmt,
num::NonZeroU32,
ops::{Deref, DerefMut, Range},
path::{Path, PathBuf},
sync::Arc,
};
pub mod analysis;
pub mod anchors;
mod inspector;
pub use inspector::LineCoverageCollector;
/// A coverage report.
///
/// A coverage report contains coverage items and opcodes corresponding to those items (called
/// "anchors"). A single coverage item may be referred to by multiple anchors.
#[derive(Clone, Debug, Default)]
pub struct CoverageReport {
/// A map of source IDs to the source path.
pub source_paths: HashMap<(Version, usize), PathBuf>,
/// A map of source paths to source IDs.
pub source_paths_to_ids: HashMap<(Version, PathBuf), usize>,
/// All coverage items for the codebase, keyed by the compiler version.
pub analyses: HashMap<Version, SourceAnalysis>,
/// All item anchors for the codebase, keyed by their contract ID.
///
/// `(id, (creation, runtime))`
pub anchors: HashMap<ContractId, (Vec<ItemAnchor>, Vec<ItemAnchor>)>,
/// All the bytecode hits for the codebase.
pub bytecode_hits: HashMap<ContractId, HitMap>,
/// The bytecode -> source mappings.
pub source_maps: HashMap<ContractId, (SourceMap, SourceMap)>,
}
impl CoverageReport {
/// Add a source file path.
pub fn add_source(&mut self, version: Version, source_id: usize, path: PathBuf) {
self.source_paths.insert((version.clone(), source_id), path.clone());
self.source_paths_to_ids.insert((version, path), source_id);
}
/// Get the source ID for a specific source file path.
pub fn get_source_id(&self, version: Version, path: PathBuf) -> Option<usize> {
self.source_paths_to_ids.get(&(version, path)).copied()
}
/// Add the source maps.
pub fn add_source_maps(
&mut self,
source_maps: impl IntoIterator<Item = (ContractId, (SourceMap, SourceMap))>,
) {
self.source_maps.extend(source_maps);
}
/// Add a [`SourceAnalysis`] to this report.
pub fn add_analysis(&mut self, version: Version, analysis: SourceAnalysis) {
self.analyses.insert(version, analysis);
}
/// Add anchors to this report.
///
/// `(id, (creation, runtime))`
pub fn add_anchors(
&mut self,
anchors: impl IntoIterator<Item = (ContractId, (Vec<ItemAnchor>, Vec<ItemAnchor>))>,
) {
self.anchors.extend(anchors);
}
/// Returns an iterator over coverage summaries by source file path.
pub fn summary_by_file(&self) -> impl Iterator<Item = (&Path, CoverageSummary)> {
self.by_file(|summary: &mut CoverageSummary, item| summary.add_item(item))
}
/// Returns an iterator over coverage items by source file path.
pub fn items_by_file(&self) -> impl Iterator<Item = (&Path, Vec<&CoverageItem>)> {
self.by_file(|list: &mut Vec<_>, item| list.push(item))
}
fn by_file<'a, T: Default>(
&'a self,
mut f: impl FnMut(&mut T, &'a CoverageItem),
) -> impl Iterator<Item = (&'a Path, T)> {
let mut by_file: BTreeMap<&Path, T> = BTreeMap::new();
for (version, items) in &self.analyses {
for item in items.all_items() {
let key = (version.clone(), item.loc.source_id);
let Some(path) = self.source_paths.get(&key) else { continue };
f(by_file.entry(path).or_default(), item);
}
}
by_file.into_iter()
}
/// Processes data from a [`HitMap`] and sets hit counts for coverage items in this coverage
/// map.
///
/// This function should only be called *after* all the relevant sources have been processed and
/// added to the map (see [`add_source`](Self::add_source)).
pub fn add_hit_map(
&mut self,
contract_id: &ContractId,
hit_map: &HitMap,
is_deployed_code: bool,
) -> Result<()> {
// Add bytecode level hits.
self.bytecode_hits
.entry(contract_id.clone())
.and_modify(|m| m.merge(hit_map))
.or_insert_with(|| hit_map.clone());
// Add source level hits.
if let Some(anchors) = self.anchors.get(contract_id) {
let anchors = if is_deployed_code { &anchors.1 } else { &anchors.0 };
for anchor in anchors {
if let Some(hits) = hit_map.get(anchor.instruction) {
self.analyses
.get_mut(&contract_id.version)
.and_then(|items| items.all_items_mut().get_mut(anchor.item_id as usize))
.expect("Anchor refers to non-existent coverage item")
.hits += hits.get();
}
}
}
Ok(())
}
/// Retains all the coverage items specified by `predicate`.
///
/// This function should only be called after all the sources were used, otherwise, the output
/// will be missing the ones that are dependent on them.
pub fn retain_sources(&mut self, mut predicate: impl FnMut(&Path) -> bool) {
self.analyses.retain(|version, analysis| {
analysis.all_items_mut().retain(|item| {
self.source_paths
.get(&(version.clone(), item.loc.source_id))
.map(|path| predicate(path))
.unwrap_or(false)
});
!analysis.all_items().is_empty()
});
}
}
/// A collection of [`HitMap`]s.
#[derive(Clone, Debug, Default)]
pub struct HitMaps(pub B256HashMap<HitMap>);
impl HitMaps {
/// Merges two `Option<HitMaps>`.
pub fn merge_opt(a: &mut Option<Self>, b: Option<Self>) {
match (a, b) {
(_, None) => {}
(a @ None, Some(b)) => *a = Some(b),
(Some(a), Some(b)) => a.merge(b),
}
}
/// Merges two `HitMaps`.
pub fn merge(&mut self, other: Self) {
self.reserve(other.len());
for (code_hash, other) in other.0 {
self.entry(code_hash).and_modify(|e| e.merge(&other)).or_insert(other);
}
}
/// Merges two `HitMaps`.
pub fn merged(mut self, other: Self) -> Self {
self.merge(other);
self
}
}
impl Deref for HitMaps {
type Target = B256HashMap<HitMap>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for HitMaps {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// Hit data for an address.
///
/// Contains low-level data about hit counters for the instructions in the bytecode of a contract.
#[derive(Clone, Debug)]
pub struct HitMap {
hits: FxHashMap<u32, u32>,
bytecode: Bytes,
}
impl HitMap {
/// Create a new hitmap with the given bytecode.
#[inline]
pub fn new(bytecode: Bytes) -> Self {
Self { bytecode, hits: HashMap::with_capacity_and_hasher(1024, Default::default()) }
}
/// Returns the bytecode.
#[inline]
pub fn bytecode(&self) -> &Bytes {
&self.bytecode
}
/// Returns the number of hits for the given program counter.
#[inline]
pub fn get(&self, pc: u32) -> Option<NonZeroU32> {
NonZeroU32::new(self.hits.get(&pc).copied().unwrap_or(0))
}
/// Increase the hit counter by 1 for the given program counter.
#[inline]
pub fn hit(&mut self, pc: u32) {
self.hits(pc, 1)
}
/// Increase the hit counter by `hits` for the given program counter.
#[inline]
pub fn hits(&mut self, pc: u32, hits: u32) {
*self.hits.entry(pc).or_default() += hits;
}
/// Reserve space for additional hits.
#[inline]
pub fn reserve(&mut self, additional: usize) {
self.hits.reserve(additional);
}
/// Merge another hitmap into this, assuming the bytecode is consistent
pub fn merge(&mut self, other: &Self) {
self.reserve(other.len());
for (pc, hits) in other.iter() {
self.hits(pc, hits);
}
}
/// Returns an iterator over all the program counters and their hit counts.
#[inline]
pub fn iter(&self) -> impl Iterator<Item = (u32, u32)> + '_ {
self.hits.iter().map(|(&pc, &hits)| (pc, hits))
}
/// Returns the number of program counters hit in the hitmap.
#[inline]
pub fn len(&self) -> usize {
self.hits.len()
}
/// Returns `true` if the hitmap is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.hits.is_empty()
}
}
/// A unique identifier for a contract
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ContractId {
pub version: Version,
pub source_id: usize,
pub contract_name: Arc<str>,
}
impl fmt::Display for ContractId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"Contract \"{}\" (solc {}, source ID {})",
self.contract_name, self.version, self.source_id
)
}
}
/// An item anchor describes what instruction marks a [CoverageItem] as covered.
#[derive(Clone, Debug)]
pub struct ItemAnchor {
/// The program counter for the opcode of this anchor.
pub instruction: u32,
/// The item ID this anchor points to.
pub item_id: u32,
}
impl fmt::Display for ItemAnchor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "IC {} -> Item {}", self.instruction, self.item_id)
}
}
#[derive(Clone, Debug)]
pub enum CoverageItemKind {
/// An executable line in the code.
Line,
/// A statement in the code.
Statement,
/// A branch in the code.
Branch {
/// The ID that identifies the branch.
///
/// There may be multiple items with the same branch ID - they belong to the same branch,
/// but represent different paths.
branch_id: u32,
/// The path ID for this branch.
///
/// The first path has ID 0, the next ID 1, and so on.
path_id: u32,
/// If true, then the branch anchor is the first opcode within the branch source range.
is_first_opcode: bool,
},
/// A function in the code.
Function {
/// The name of the function.
name: Box<str>,
},
}
impl PartialEq for CoverageItemKind {
fn eq(&self, other: &Self) -> bool {
self.ord_key() == other.ord_key()
}
}
impl Eq for CoverageItemKind {}
impl PartialOrd for CoverageItemKind {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CoverageItemKind {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.ord_key().cmp(&other.ord_key())
}
}
impl CoverageItemKind {
fn ord_key(&self) -> impl Ord + use<> {
match *self {
Self::Line => 0,
Self::Statement => 1,
Self::Branch { .. } => 2,
Self::Function { .. } => 3,
}
}
}
#[derive(Clone, Debug)]
pub struct CoverageItem {
/// The coverage item kind.
pub kind: CoverageItemKind,
/// The location of the item in the source code.
pub loc: SourceLocation,
/// The number of times this item was hit.
pub hits: u32,
}
impl PartialEq for CoverageItem {
fn eq(&self, other: &Self) -> bool {
self.ord_key() == other.ord_key()
}
}
impl Eq for CoverageItem {}
impl PartialOrd for CoverageItem {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for CoverageItem {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.ord_key().cmp(&other.ord_key())
}
}
impl fmt::Display for CoverageItem {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.fmt_with_source(None).fmt(f)
}
}
impl CoverageItem {
fn ord_key(&self) -> impl Ord + use<> {
(
self.loc.source_id,
self.loc.lines.start,
self.loc.lines.end,
self.kind.ord_key(),
self.loc.bytes.start,
self.loc.bytes.end,
)
}
pub fn fmt_with_source(&self, src: Option<&str>) -> impl fmt::Display {
solar::data_structures::fmt::from_fn(move |f| {
match &self.kind {
CoverageItemKind::Line => {
write!(f, "Line")?;
}
CoverageItemKind::Statement => {
write!(f, "Statement")?;
}
CoverageItemKind::Branch { branch_id, path_id, .. } => {
write!(f, "Branch (branch: {branch_id}, path: {path_id})")?;
}
CoverageItemKind::Function { name } => {
write!(f, r#"Function "{name}""#)?;
}
}
write!(f, " (location: ({}), hits: {})", self.loc, self.hits)?;
if let Some(src) = src
&& let Some(src) = src.get(self.loc.bytes())
{
write!(f, " -> ")?;
let max_len = 64;
let max_half = max_len / 2;
if src.len() > max_len {
write!(f, "\"{}", src[..max_half].escape_debug())?;
write!(f, "...")?;
write!(f, "{}\"", src[src.len() - max_half..].escape_debug())?;
} else {
write!(f, "{src:?}")?;
}
}
Ok(())
})
}
}
/// A source location.
#[derive(Clone, Debug)]
pub struct SourceLocation {
/// The source ID.
pub source_id: usize,
/// The contract this source range is in.
pub contract_name: Arc<str>,
/// Byte range.
pub bytes: Range<u32>,
/// Line range. Indices are 1-based.
pub lines: Range<u32>,
}
impl fmt::Display for SourceLocation {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "source ID: {}, lines: {:?}, bytes: {:?}", self.source_id, self.lines, self.bytes)
}
}
impl SourceLocation {
/// Returns the byte range as usize.
pub fn bytes(&self) -> Range<usize> {
self.bytes.start as usize..self.bytes.end as usize
}
/// Returns the length of the byte range.
pub fn len(&self) -> u32 {
self.bytes.len() as u32
}
/// Returns true if the byte range is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
/// Coverage summary for a source file.
#[derive(Clone, Debug, Default)]
pub struct CoverageSummary {
/// The number of executable lines in the source file.
pub line_count: usize,
/// The number of lines that were hit.
pub line_hits: usize,
/// The number of statements in the source file.
pub statement_count: usize,
/// The number of statements that were hit.
pub statement_hits: usize,
/// The number of branches in the source file.
pub branch_count: usize,
/// The number of branches that were hit.
pub branch_hits: usize,
/// The number of functions in the source file.
pub function_count: usize,
/// The number of functions hit.
pub function_hits: usize,
}
impl CoverageSummary {
/// Creates a new, empty coverage summary.
pub fn new() -> Self {
Self::default()
}
/// Creates a coverage summary from a collection of coverage items.
pub fn from_items<'a>(items: impl IntoIterator<Item = &'a CoverageItem>) -> Self {
let mut summary = Self::default();
summary.add_items(items);
summary
}
/// Adds another coverage summary to this one.
pub fn merge(&mut self, other: &Self) {
let Self {
line_count,
line_hits,
statement_count,
statement_hits,
branch_count,
branch_hits,
function_count,
function_hits,
} = self;
*line_count += other.line_count;
*line_hits += other.line_hits;
*statement_count += other.statement_count;
*statement_hits += other.statement_hits;
*branch_count += other.branch_count;
*branch_hits += other.branch_hits;
*function_count += other.function_count;
*function_hits += other.function_hits;
}
/// Adds a coverage item to this summary.
pub fn add_item(&mut self, item: &CoverageItem) {
match item.kind {
CoverageItemKind::Line => {
self.line_count += 1;
if item.hits > 0 {
self.line_hits += 1;
}
}
CoverageItemKind::Statement => {
self.statement_count += 1;
if item.hits > 0 {
self.statement_hits += 1;
}
}
CoverageItemKind::Branch { .. } => {
self.branch_count += 1;
if item.hits > 0 {
self.branch_hits += 1;
}
}
CoverageItemKind::Function { .. } => {
self.function_count += 1;
if item.hits > 0 {
self.function_hits += 1;
}
}
}
}
/// Adds multiple coverage items to this summary.
pub fn add_items<'a>(&mut self, items: impl IntoIterator<Item = &'a CoverageItem>) {
for item in items {
self.add_item(item);
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/coverage/src/anchors.rs | crates/evm/coverage/src/anchors.rs | use super::{CoverageItemKind, ItemAnchor, SourceLocation};
use crate::analysis::SourceAnalysis;
use alloy_primitives::map::rustc_hash::FxHashSet;
use eyre::ensure;
use foundry_compilers::artifacts::sourcemap::{SourceElement, SourceMap};
use foundry_evm_core::{bytecode::InstIter, ic::IcPcMap};
use revm::bytecode::opcode;
/// Attempts to find anchors for the given items using the given source map and bytecode.
pub fn find_anchors(
bytecode: &[u8],
source_map: &SourceMap,
ic_pc_map: &IcPcMap,
analysis: &SourceAnalysis,
) -> Vec<ItemAnchor> {
let mut seen_sources = FxHashSet::default();
source_map
.iter()
.filter_map(|element| element.index())
.filter(|&source| seen_sources.insert(source))
.flat_map(|source| analysis.items_for_source_enumerated(source))
.filter_map(|(item_id, item)| {
match item.kind {
CoverageItemKind::Branch { path_id, is_first_opcode: false, .. } => {
find_anchor_branch(bytecode, source_map, item_id, &item.loc).map(|anchors| {
match path_id {
0 => anchors.0,
1 => anchors.1,
_ => panic!("too many path IDs for branch"),
}
})
}
_ => find_anchor_simple(source_map, ic_pc_map, item_id, &item.loc),
}
.inspect_err(|err| warn!(%item, %err, "could not find anchor"))
.ok()
})
.collect()
}
/// Find an anchor representing the first opcode within the given source range.
pub fn find_anchor_simple(
source_map: &SourceMap,
ic_pc_map: &IcPcMap,
item_id: u32,
loc: &SourceLocation,
) -> eyre::Result<ItemAnchor> {
let instruction =
source_map.iter().position(|element| is_in_source_range(element, loc)).ok_or_else(
|| eyre::eyre!("Could not find anchor: No matching instruction in range {loc}"),
)?;
Ok(ItemAnchor {
instruction: ic_pc_map.get(instruction as u32).ok_or_else(|| {
eyre::eyre!("We found an anchor, but we can't translate it to a program counter")
})?,
item_id,
})
}
/// Finds the anchor corresponding to a branch item.
///
/// This finds the relevant anchors for a branch coverage item. These anchors
/// are found using the bytecode of the contract in the range of the branching node.
///
/// For `IfStatement` nodes, the template is generally:
/// ```text
/// <condition>
/// PUSH <ic if false>
/// JUMPI
/// <true branch>
/// <...>
/// <false branch>
/// ```
///
/// For `assert` and `require`, the template is generally:
///
/// ```text
/// PUSH <ic if true>
/// JUMPI
/// <revert>
/// <...>
/// <true branch>
/// ```
///
/// This function will look for the last JUMPI instruction, backtrack to find the program
/// counter of the first branch, and return an item for that program counter, and the
/// program counter immediately after the JUMPI instruction.
pub fn find_anchor_branch(
bytecode: &[u8],
source_map: &SourceMap,
item_id: u32,
loc: &SourceLocation,
) -> eyre::Result<(ItemAnchor, ItemAnchor)> {
let mut anchors: Option<(ItemAnchor, ItemAnchor)> = None;
for (ic, (pc, inst)) in InstIter::new(bytecode).with_pc().enumerate() {
// We found a push, so we do some PC -> IC translation accounting, but we also check if
// this push is coupled with the JUMPI we are interested in.
// Check if Opcode is PUSH
if (opcode::PUSH1..=opcode::PUSH32).contains(&inst.opcode.get()) {
let Some(element) = source_map.get(ic) else {
// NOTE(onbjerg): For some reason the last few bytes of the bytecode do not have
// a source map associated, so at that point we just stop searching
break;
};
// Check if we are in the source range we are interested in, and if the next opcode
// is a JUMPI
let next_pc = pc + inst.immediate.len() + 1;
let push_size = inst.immediate.len();
if bytecode.get(next_pc).copied() == Some(opcode::JUMPI)
&& is_in_source_range(element, loc)
{
// We do not support program counters bigger than u32.
ensure!(push_size <= 4, "jump destination overflow");
// Convert the push bytes for the second branch's PC to a u32.
let mut pc_bytes = [0u8; 4];
pc_bytes[4 - push_size..].copy_from_slice(inst.immediate);
let pc_jump = u32::from_be_bytes(pc_bytes);
anchors = Some((
ItemAnchor {
item_id,
// The first branch is the opcode directly after JUMPI
instruction: (next_pc + 1) as u32,
},
ItemAnchor { item_id, instruction: pc_jump },
));
}
}
}
anchors.ok_or_else(|| eyre::eyre!("Could not detect branches in source: {}", loc))
}
/// Calculates whether `element` is within the range of the target `location`.
fn is_in_source_range(element: &SourceElement, location: &SourceLocation) -> bool {
// Source IDs must match.
let source_ids_match = element.index_i32() == location.source_id as i32;
if !source_ids_match {
return false;
}
// Needed because some source ranges in the source map mark the entire contract...
let is_within_start = element.offset() >= location.bytes.start;
if !is_within_start {
return false;
}
let start_of_ranges = location.bytes.start.max(element.offset());
let end_of_ranges =
(location.bytes.start + location.len()).min(element.offset() + element.length());
let within_ranges = start_of_ranges <= end_of_ranges;
if !within_ranges {
return false;
}
true
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/coverage/src/analysis.rs | crates/evm/coverage/src/analysis.rs | use super::{CoverageItem, CoverageItemKind, SourceLocation};
use alloy_primitives::map::HashMap;
use foundry_common::TestFunctionExt;
use foundry_compilers::ProjectCompileOutput;
use rayon::prelude::*;
use solar::{
ast::{self, ExprKind, ItemKind, StmtKind, yul},
data_structures::{Never, map::FxHashSet},
interface::{BytePos, Span},
sema::{Gcx, hir},
};
use std::{
ops::{ControlFlow, Range},
path::PathBuf,
sync::Arc,
};
/// A visitor that walks the AST of a single contract and finds coverage items.
#[derive(Clone)]
struct SourceVisitor<'gcx> {
/// The source ID of the contract.
source_id: u32,
/// The solar session for span resolution.
gcx: Gcx<'gcx>,
/// The name of the contract being walked.
contract_name: Arc<str>,
/// The current branch ID
branch_id: u32,
/// Coverage items
items: Vec<CoverageItem>,
all_lines: Vec<u32>,
function_calls: Vec<Span>,
function_calls_set: FxHashSet<Span>,
}
struct SourceVisitorCheckpoint {
items: usize,
all_lines: usize,
function_calls: usize,
}
impl<'gcx> SourceVisitor<'gcx> {
fn new(source_id: u32, gcx: Gcx<'gcx>) -> Self {
Self {
source_id,
gcx,
contract_name: Arc::default(),
branch_id: 0,
all_lines: Default::default(),
function_calls: Default::default(),
function_calls_set: Default::default(),
items: Default::default(),
}
}
fn checkpoint(&self) -> SourceVisitorCheckpoint {
SourceVisitorCheckpoint {
items: self.items.len(),
all_lines: self.all_lines.len(),
function_calls: self.function_calls.len(),
}
}
fn restore_checkpoint(&mut self, checkpoint: SourceVisitorCheckpoint) {
let SourceVisitorCheckpoint { items, all_lines, function_calls } = checkpoint;
self.items.truncate(items);
self.all_lines.truncate(all_lines);
self.function_calls.truncate(function_calls);
}
fn visit_contract<'ast>(&mut self, contract: &'ast ast::ItemContract<'ast>) {
let _ = ast::Visit::visit_item_contract(self, contract);
}
/// Returns `true` if the contract has any test functions.
fn has_tests(&self, checkpoint: &SourceVisitorCheckpoint) -> bool {
self.items[checkpoint.items..].iter().any(|item| {
if let CoverageItemKind::Function { name } = &item.kind {
name.is_any_test()
} else {
false
}
})
}
/// Disambiguate functions with the same name in the same contract.
fn disambiguate_functions(&mut self) {
let mut dups = HashMap::<_, Vec<usize>>::default();
for (i, item) in self.items.iter().enumerate() {
if let CoverageItemKind::Function { name } = &item.kind {
dups.entry(name.clone()).or_default().push(i);
}
}
for dups in dups.values() {
if dups.len() > 1 {
for (i, &dup) in dups.iter().enumerate() {
let item = &mut self.items[dup];
if let CoverageItemKind::Function { name } = &item.kind {
item.kind =
CoverageItemKind::Function { name: format!("{name}.{i}").into() };
}
}
}
}
}
fn resolve_function_calls(&mut self, hir_source_id: hir::SourceId) {
self.function_calls_set = self.function_calls.iter().copied().collect();
let _ = hir::Visit::visit_nested_source(self, hir_source_id);
}
fn sort(&mut self) {
self.items.sort();
}
fn push_lines(&mut self) {
self.all_lines.sort_unstable();
self.all_lines.dedup();
let mut lines = Vec::new();
for &line in &self.all_lines {
if let Some(reference_item) =
self.items.iter().find(|item| item.loc.lines.start == line)
{
lines.push(CoverageItem {
kind: CoverageItemKind::Line,
loc: reference_item.loc.clone(),
hits: 0,
});
}
}
self.items.extend(lines);
}
fn push_stmt(&mut self, span: Span) {
self.push_item_kind(CoverageItemKind::Statement, span);
}
/// Creates a coverage item for a given kind and source location. Pushes item to the internal
/// collection (plus additional coverage line if item is a statement).
fn push_item_kind(&mut self, kind: CoverageItemKind, span: Span) {
let item = CoverageItem { kind, loc: self.source_location_for(span), hits: 0 };
debug_assert!(!matches!(item.kind, CoverageItemKind::Line));
self.all_lines.push(item.loc.lines.start);
self.items.push(item);
}
fn source_location_for(&self, mut span: Span) -> SourceLocation {
// Statements' ranges in the solc source map do not include the semicolon.
if let Ok(snippet) = self.gcx.sess.source_map().span_to_snippet(span)
&& let Some(stripped) = snippet.strip_suffix(';')
{
let stripped = stripped.trim_end();
let skipped = snippet.len() - stripped.len();
span = span.with_hi(span.hi() - BytePos::from_usize(skipped));
}
SourceLocation {
source_id: self.source_id as usize,
contract_name: self.contract_name.clone(),
bytes: self.byte_range(span),
lines: self.line_range(span),
}
}
fn byte_range(&self, span: Span) -> Range<u32> {
let bytes_usize = self.gcx.sess.source_map().span_to_source(span).unwrap().data;
bytes_usize.start as u32..bytes_usize.end as u32
}
fn line_range(&self, span: Span) -> Range<u32> {
let lines = self.gcx.sess.source_map().span_to_lines(span).unwrap().data;
assert!(!lines.is_empty());
let first = lines.first().unwrap();
let last = lines.last().unwrap();
first.line_index as u32 + 1..last.line_index as u32 + 2
}
fn next_branch_id(&mut self) -> u32 {
let id = self.branch_id;
self.branch_id = id + 1;
id
}
}
impl<'ast> ast::Visit<'ast> for SourceVisitor<'_> {
type BreakValue = Never;
fn visit_item_contract(
&mut self,
contract: &'ast ast::ItemContract<'ast>,
) -> ControlFlow<Self::BreakValue> {
self.contract_name = contract.name.as_str().into();
self.walk_item_contract(contract)
}
#[expect(clippy::single_match)]
fn visit_item(&mut self, item: &'ast ast::Item<'ast>) -> ControlFlow<Self::BreakValue> {
match &item.kind {
ItemKind::Function(func) => {
// TODO: We currently can only detect empty bodies in normal functions, not any of
// the other kinds: https://github.com/foundry-rs/foundry/issues/9458
if func.kind != ast::FunctionKind::Function && !has_statements(func.body.as_ref()) {
return ControlFlow::Continue(());
}
let name = func.header.name.as_ref().map(|n| n.as_str()).unwrap_or_else(|| {
match func.kind {
ast::FunctionKind::Constructor => "constructor",
ast::FunctionKind::Receive => "receive",
ast::FunctionKind::Fallback => "fallback",
ast::FunctionKind::Function | ast::FunctionKind::Modifier => unreachable!(),
}
});
// Exclude function from coverage report if it is virtual without implementation.
let exclude_func = func.header.virtual_() && !func.is_implemented();
if !exclude_func {
self.push_item_kind(
CoverageItemKind::Function { name: name.into() },
item.span,
);
}
self.walk_item(item)?;
}
_ => {}
}
// Only walk functions.
ControlFlow::Continue(())
}
fn visit_stmt(&mut self, stmt: &'ast ast::Stmt<'ast>) -> ControlFlow<Self::BreakValue> {
match &stmt.kind {
StmtKind::Break | StmtKind::Continue | StmtKind::Emit(..) | StmtKind::Revert(..) => {
self.push_stmt(stmt.span);
// TODO(dani): these probably shouldn't be excluded.
return ControlFlow::Continue(());
}
StmtKind::Return(_) | StmtKind::DeclSingle(_) | StmtKind::DeclMulti(..) => {
self.push_stmt(stmt.span);
}
StmtKind::If(_cond, then_stmt, else_stmt) => {
let branch_id = self.next_branch_id();
// Add branch coverage items only if one of true/branch bodies contains statements.
if stmt_has_statements(then_stmt)
|| else_stmt.as_ref().is_some_and(|s| stmt_has_statements(s))
{
// The branch instruction is mapped to the first opcode within the true
// body source range.
self.push_item_kind(
CoverageItemKind::Branch { branch_id, path_id: 0, is_first_opcode: true },
then_stmt.span,
);
if else_stmt.is_some() {
// We use `stmt.span`, which includes `else_stmt.span`, since we need to
// include the condition so that this can be marked as covered.
// Initially implemented in https://github.com/foundry-rs/foundry/pull/3094.
self.push_item_kind(
CoverageItemKind::Branch {
branch_id,
path_id: 1,
is_first_opcode: false,
},
stmt.span,
);
}
}
}
StmtKind::Try(ast::StmtTry { expr: _, clauses }) => {
let branch_id = self.next_branch_id();
let mut path_id = 0;
for catch in clauses.iter() {
let ast::TryCatchClause { span, name: _, args, block } = catch;
let span = if path_id == 0 { stmt.span.to(*span) } else { *span };
if path_id == 0 || has_statements(Some(block)) {
self.push_item_kind(
CoverageItemKind::Branch { branch_id, path_id, is_first_opcode: true },
span,
);
path_id += 1;
} else if !args.is_empty() {
// Add coverage for clause with parameters and empty statements.
// (`catch (bytes memory reason) {}`).
// Catch all clause without statements is ignored (`catch {}`).
self.push_stmt(span);
}
}
}
// Skip placeholder statements as they are never referenced in source maps.
StmtKind::Assembly(_)
| StmtKind::Block(_)
| StmtKind::UncheckedBlock(_)
| StmtKind::Placeholder
| StmtKind::Expr(_)
| StmtKind::While(..)
| StmtKind::DoWhile(..)
| StmtKind::For { .. } => {}
}
self.walk_stmt(stmt)
}
fn visit_expr(&mut self, expr: &'ast ast::Expr<'ast>) -> ControlFlow<Self::BreakValue> {
match &expr.kind {
ExprKind::Assign(..)
| ExprKind::Unary(..)
| ExprKind::Binary(..)
| ExprKind::Ternary(..) => {
self.push_stmt(expr.span);
if matches!(expr.kind, ExprKind::Binary(..)) {
return self.walk_expr(expr);
}
}
ExprKind::Call(callee, _args) => {
// Resolve later.
self.function_calls.push(expr.span);
if let ExprKind::Ident(ident) = &callee.kind {
// Might be a require call, add branch coverage.
// Asserts should not be considered branches: <https://github.com/foundry-rs/foundry/issues/9460>.
if ident.as_str() == "require" {
let branch_id = self.next_branch_id();
self.push_item_kind(
CoverageItemKind::Branch {
branch_id,
path_id: 0,
is_first_opcode: false,
},
expr.span,
);
self.push_item_kind(
CoverageItemKind::Branch {
branch_id,
path_id: 1,
is_first_opcode: false,
},
expr.span,
);
}
}
}
_ => {}
}
// Intentionally do not walk all expressions.
ControlFlow::Continue(())
}
fn visit_yul_stmt(&mut self, stmt: &'ast yul::Stmt<'ast>) -> ControlFlow<Self::BreakValue> {
use yul::StmtKind;
match &stmt.kind {
StmtKind::VarDecl(..)
| StmtKind::AssignSingle(..)
| StmtKind::AssignMulti(..)
| StmtKind::Leave
| StmtKind::Break
| StmtKind::Continue => {
self.push_stmt(stmt.span);
// Don't walk assignments.
return ControlFlow::Continue(());
}
StmtKind::If(..) => {
let branch_id = self.next_branch_id();
self.push_item_kind(
CoverageItemKind::Branch { branch_id, path_id: 0, is_first_opcode: false },
stmt.span,
);
}
StmtKind::For(yul::StmtFor { body, .. }) => {
self.push_stmt(body.span);
}
StmtKind::Switch(switch) => {
for case in switch.cases.iter() {
self.push_stmt(case.span);
self.push_stmt(case.body.span);
}
}
StmtKind::FunctionDef(func) => {
let name = func.name.as_str();
self.push_item_kind(CoverageItemKind::Function { name: name.into() }, stmt.span);
}
// TODO(dani): merge with Block below on next solar release: https://github.com/paradigmxyz/solar/pull/496
StmtKind::Expr(_) => {
self.push_stmt(stmt.span);
return ControlFlow::Continue(());
}
StmtKind::Block(_) => {}
}
self.walk_yul_stmt(stmt)
}
fn visit_yul_expr(&mut self, expr: &'ast yul::Expr<'ast>) -> ControlFlow<Self::BreakValue> {
use yul::ExprKind;
match &expr.kind {
ExprKind::Path(_) | ExprKind::Lit(_) => {}
ExprKind::Call(_) => self.push_stmt(expr.span),
}
// Intentionally do not walk all expressions.
ControlFlow::Continue(())
}
}
impl<'gcx> hir::Visit<'gcx> for SourceVisitor<'gcx> {
type BreakValue = Never;
fn hir(&self) -> &'gcx hir::Hir<'gcx> {
&self.gcx.hir
}
fn visit_expr(&mut self, expr: &'gcx hir::Expr<'gcx>) -> ControlFlow<Self::BreakValue> {
if let hir::ExprKind::Call(lhs, ..) = &expr.kind
&& self.function_calls_set.contains(&expr.span)
&& is_regular_call(lhs)
{
self.push_stmt(expr.span);
}
self.walk_expr(expr)
}
}
// https://github.com/argotorg/solidity/blob/965166317bbc2b02067eb87f222a2dce9d24e289/libsolidity/ast/ASTAnnotations.h#L336-L341
// https://github.com/argotorg/solidity/blob/965166317bbc2b02067eb87f222a2dce9d24e289/libsolidity/analysis/TypeChecker.cpp#L2720
fn is_regular_call(lhs: &hir::Expr<'_>) -> bool {
match lhs.peel_parens().kind {
// StructConstructorCall
hir::ExprKind::Ident([hir::Res::Item(hir::ItemId::Struct(_))]) => false,
// TypeConversion
hir::ExprKind::Type(_) => false,
_ => true,
}
}
fn has_statements(block: Option<&ast::Block<'_>>) -> bool {
block.is_some_and(|block| !block.is_empty())
}
fn stmt_has_statements(stmt: &ast::Stmt<'_>) -> bool {
match &stmt.kind {
StmtKind::Assembly(a) => !a.block.is_empty(),
StmtKind::Block(b) | StmtKind::UncheckedBlock(b) => has_statements(Some(b)),
_ => true,
}
}
/// Coverage source analysis.
#[derive(Clone, Debug, Default)]
pub struct SourceAnalysis {
/// All the coverage items.
all_items: Vec<CoverageItem>,
/// Source ID to `(offset, len)` into `all_items`.
map: Vec<(u32, u32)>,
}
impl SourceAnalysis {
/// Analyzes contracts in the sources held by the source analyzer.
///
/// Coverage items are found by:
/// - Walking the AST of each contract (except interfaces)
/// - Recording the items of each contract
///
/// Each coverage item contains relevant information to find opcodes corresponding to them: the
/// source ID the item is in, the source code range of the item, and the contract name the item
/// is in.
///
/// Note: Source IDs are only unique per compilation job; that is, a code base compiled with
/// two different solc versions will produce overlapping source IDs if the compiler version is
/// not taken into account.
#[instrument(name = "SourceAnalysis::new", skip_all)]
pub fn new(data: &SourceFiles, output: &ProjectCompileOutput) -> eyre::Result<Self> {
let mut sourced_items = output.parser().solc().compiler().enter(|compiler| {
data.sources
.par_iter()
.map(|(&source_id, path)| {
let _guard = debug_span!("SourceAnalysis::new::visit", ?path).entered();
let (_, source) = compiler.gcx().get_ast_source(path).unwrap();
let ast = source.ast.as_ref().unwrap();
let (hir_source_id, _) = compiler.gcx().get_hir_source(path).unwrap();
let mut visitor = SourceVisitor::new(source_id, compiler.gcx());
for item in ast.items.iter() {
// Visit only top-level contracts.
let ItemKind::Contract(contract) = &item.kind else { continue };
// Skip interfaces which have no function implementations.
if contract.kind.is_interface() {
continue;
}
let checkpoint = visitor.checkpoint();
visitor.visit_contract(contract);
if visitor.has_tests(&checkpoint) {
visitor.restore_checkpoint(checkpoint);
}
}
if !visitor.function_calls.is_empty() {
visitor.resolve_function_calls(hir_source_id);
}
if !visitor.items.is_empty() {
visitor.disambiguate_functions();
visitor.sort();
visitor.push_lines();
visitor.sort();
}
(source_id, visitor.items)
})
.collect::<Vec<(u32, Vec<CoverageItem>)>>()
});
// Create mapping and merge items.
sourced_items.sort_by_key(|(id, items)| (*id, items.first().map(|i| i.loc.bytes.start)));
let Some(&(max_idx, _)) = sourced_items.last() else { return Ok(Self::default()) };
let len = max_idx + 1;
let mut all_items = Vec::new();
let mut map = vec![(u32::MAX, 0); len as usize];
for (idx, items) in sourced_items {
// Assumes that all `idx` items are consecutive, guaranteed by the sort above.
let idx = idx as usize;
if map[idx].0 == u32::MAX {
map[idx].0 = all_items.len() as u32;
}
map[idx].1 += items.len() as u32;
all_items.extend(items);
}
Ok(Self { all_items, map })
}
/// Returns all the coverage items.
pub fn all_items(&self) -> &[CoverageItem] {
&self.all_items
}
/// Returns all the mutable coverage items.
pub fn all_items_mut(&mut self) -> &mut Vec<CoverageItem> {
&mut self.all_items
}
/// Returns an iterator over the coverage items and their IDs for the given source.
pub fn items_for_source_enumerated(
&self,
source_id: u32,
) -> impl Iterator<Item = (u32, &CoverageItem)> {
let (base_id, items) = self.items_for_source(source_id);
items.iter().enumerate().map(move |(idx, item)| (base_id + idx as u32, item))
}
/// Returns the base item ID and all the coverage items for the given source.
pub fn items_for_source(&self, source_id: u32) -> (u32, &[CoverageItem]) {
let (mut offset, len) = self.map.get(source_id as usize).copied().unwrap_or_default();
if offset == u32::MAX {
offset = 0;
}
(offset, &self.all_items[offset as usize..][..len as usize])
}
/// Returns the coverage item for the given item ID.
#[inline]
pub fn get(&self, item_id: u32) -> Option<&CoverageItem> {
self.all_items.get(item_id as usize)
}
}
/// A list of versioned sources and their ASTs.
#[derive(Default)]
pub struct SourceFiles {
/// The versioned sources.
pub sources: HashMap<u32, PathBuf>,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/coverage/src/inspector.rs | crates/evm/coverage/src/inspector.rs | use crate::{HitMap, HitMaps};
use alloy_primitives::B256;
use revm::{
Inspector,
context::ContextTr,
inspector::JournalExt,
interpreter::{Interpreter, interpreter_types::Jumps},
};
use std::ptr::NonNull;
/// Inspector implementation for collecting coverage information.
#[derive(Clone, Debug)]
pub struct LineCoverageCollector {
// NOTE: `current_map` is always a valid reference into `maps`.
// It is accessed only through `get_or_insert_map` which guarantees that it's valid.
// Both of these fields are unsafe to access directly outside of `*insert_map`.
current_map: NonNull<HitMap>,
current_hash: B256,
maps: HitMaps,
}
// SAFETY: See comments on `current_map`.
unsafe impl Send for LineCoverageCollector {}
unsafe impl Sync for LineCoverageCollector {}
impl Default for LineCoverageCollector {
fn default() -> Self {
Self {
current_map: NonNull::dangling(),
current_hash: B256::ZERO,
maps: Default::default(),
}
}
}
impl<CTX> Inspector<CTX> for LineCoverageCollector
where
CTX: ContextTr<Journal: JournalExt>,
{
fn initialize_interp(&mut self, interpreter: &mut Interpreter, _context: &mut CTX) {
let map = self.get_or_insert_map(interpreter);
// Reserve some space early to avoid reallocating too often.
map.reserve(8192.min(interpreter.bytecode.len()));
}
fn step(&mut self, interpreter: &mut Interpreter, _context: &mut CTX) {
let map = self.get_or_insert_map(interpreter);
map.hit(interpreter.bytecode.pc() as u32);
}
}
impl LineCoverageCollector {
/// Finish collecting coverage information and return the [`HitMaps`].
pub fn finish(self) -> HitMaps {
self.maps
}
/// Gets the hit map for the current contract, or inserts a new one if it doesn't exist.
///
/// The map is stored in `current_map` and returned as a mutable reference.
/// See comments on `current_map` for more details.
#[inline]
fn get_or_insert_map(&mut self, interpreter: &mut Interpreter) -> &mut HitMap {
let hash = interpreter.bytecode.get_or_calculate_hash();
if self.current_hash != *hash {
self.insert_map(interpreter);
}
// SAFETY: See comments on `current_map`.
unsafe { self.current_map.as_mut() }
}
#[cold]
#[inline(never)]
fn insert_map(&mut self, interpreter: &mut Interpreter) {
let hash = interpreter.bytecode.hash().unwrap();
self.current_hash = hash;
// Converts the mutable reference to a `NonNull` pointer.
self.current_map = self
.maps
.entry(hash)
.or_insert_with(|| HitMap::new(interpreter.bytecode.original_bytes()))
.into();
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/lib.rs | crates/evm/fuzz/src/lib.rs | //! # foundry-evm-fuzz
//!
//! EVM fuzzing implementation using [`proptest`].
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate tracing;
use alloy_dyn_abi::{DynSolValue, JsonAbiExt};
use alloy_primitives::{
Address, Bytes, Log, U256,
map::{AddressHashMap, HashMap},
};
use foundry_common::{calc, contracts::ContractsByAddress};
use foundry_evm_core::Breakpoints;
use foundry_evm_coverage::HitMaps;
use foundry_evm_traces::{CallTraceArena, SparsedTraceArena};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use std::{fmt, sync::Arc};
pub use proptest::test_runner::{Config as FuzzConfig, Reason};
mod error;
pub use error::FuzzError;
pub mod invariant;
pub mod strategies;
pub use strategies::LiteralMaps;
mod inspector;
pub use inspector::Fuzzer;
/// Details of a transaction generated by fuzz strategy for fuzzing a target.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BasicTxDetails {
// Time (in seconds) to increase block timestamp before executing the tx.
pub warp: Option<U256>,
// Number to increase block number before executing the tx.
pub roll: Option<U256>,
// Transaction sender address.
pub sender: Address,
// Transaction call details.
pub call_details: CallDetails,
}
/// Call details of a transaction generated to fuzz.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CallDetails {
// Address of target contract.
pub target: Address,
// The data of the transaction.
pub calldata: Bytes,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[expect(clippy::large_enum_variant)]
pub enum CounterExample {
/// Call used as a counter example for fuzz tests.
Single(BaseCounterExample),
/// Original sequence size and sequence of calls used as a counter example for invariant tests.
Sequence(usize, Vec<BaseCounterExample>),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BaseCounterExample {
// Amount to increase block timestamp.
pub warp: Option<U256>,
// Amount to increase block number.
pub roll: Option<U256>,
/// Address which makes the call.
pub sender: Option<Address>,
/// Address to which to call to.
pub addr: Option<Address>,
/// The data to provide.
pub calldata: Bytes,
/// Contract name if it exists.
pub contract_name: Option<String>,
/// Function name if it exists.
pub func_name: Option<String>,
/// Function signature if it exists.
pub signature: Option<String>,
/// Pretty formatted args used to call the function.
pub args: Option<String>,
/// Unformatted args used to call the function.
pub raw_args: Option<String>,
/// Counter example traces.
#[serde(skip)]
pub traces: Option<SparsedTraceArena>,
/// Whether to display sequence as solidity.
#[serde(skip)]
pub show_solidity: bool,
}
impl BaseCounterExample {
/// Creates counter example representing a step from invariant call sequence.
pub fn from_invariant_call(
tx: &BasicTxDetails,
contracts: &ContractsByAddress,
traces: Option<SparsedTraceArena>,
show_solidity: bool,
) -> Self {
let sender = tx.sender;
let target = tx.call_details.target;
let bytes = &tx.call_details.calldata;
let warp = tx.warp;
let roll = tx.roll;
if let Some((name, abi)) = &contracts.get(&target)
&& let Some(func) = abi.functions().find(|f| f.selector() == bytes[..4])
{
// skip the function selector when decoding
if let Ok(args) = func.abi_decode_input(&bytes[4..]) {
return Self {
warp,
roll,
sender: Some(sender),
addr: Some(target),
calldata: bytes.clone(),
contract_name: Some(name.clone()),
func_name: Some(func.name.clone()),
signature: Some(func.signature()),
args: Some(foundry_common::fmt::format_tokens(&args).format(", ").to_string()),
raw_args: Some(
foundry_common::fmt::format_tokens_raw(&args).format(", ").to_string(),
),
traces,
show_solidity,
};
}
}
Self {
warp,
roll,
sender: Some(sender),
addr: Some(target),
calldata: bytes.clone(),
contract_name: None,
func_name: None,
signature: None,
args: None,
raw_args: None,
traces,
show_solidity: false,
}
}
/// Creates counter example for a fuzz test failure.
pub fn from_fuzz_call(
bytes: Bytes,
args: Vec<DynSolValue>,
traces: Option<SparsedTraceArena>,
) -> Self {
Self {
warp: None,
roll: None,
sender: None,
addr: None,
calldata: bytes,
contract_name: None,
func_name: None,
signature: None,
args: Some(foundry_common::fmt::format_tokens(&args).format(", ").to_string()),
raw_args: Some(foundry_common::fmt::format_tokens_raw(&args).format(", ").to_string()),
traces,
show_solidity: false,
}
}
}
impl fmt::Display for BaseCounterExample {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Display counterexample as solidity.
if self.show_solidity
&& let (Some(sender), Some(contract), Some(address), Some(func_name), Some(args)) =
(&self.sender, &self.contract_name, &self.addr, &self.func_name, &self.raw_args)
{
if let Some(warp) = &self.warp {
writeln!(f, "\t\tvm.warp(block.timestamp + {warp});")?;
}
if let Some(roll) = &self.roll {
writeln!(f, "\t\tvm.roll(block.number + {roll});")?;
}
writeln!(f, "\t\tvm.prank({sender});")?;
write!(
f,
"\t\t{}({}).{}({});",
contract.split_once(':').map_or(contract.as_str(), |(_, contract)| contract),
address,
func_name,
args
)?;
return Ok(());
}
// Regular counterexample display.
if let Some(sender) = self.sender {
write!(f, "\t\tsender={sender} addr=")?
}
if let Some(name) = &self.contract_name {
write!(f, "[{name}]")?
}
if let Some(addr) = &self.addr {
write!(f, "{addr} ")?
}
if let Some(warp) = &self.warp {
write!(f, "warp={warp} ")?;
}
if let Some(roll) = &self.roll {
write!(f, "roll={roll} ")?;
}
if let Some(sig) = &self.signature {
write!(f, "calldata={sig}")?
} else {
write!(f, "calldata={}", &self.calldata)?
}
if let Some(args) = &self.args {
write!(f, " args=[{args}]")
} else {
write!(f, " args=[]")
}
}
}
/// The outcome of a fuzz test
#[derive(Debug, Default)]
pub struct FuzzTestResult {
/// we keep this for the debugger
pub first_case: FuzzCase,
/// Gas usage (gas_used, call_stipend) per cases
pub gas_by_case: Vec<(u64, u64)>,
/// Whether the test case was successful. This means that the transaction executed
/// properly, or that there was a revert and that the test was expected to fail
/// (prefixed with `testFail`)
pub success: bool,
/// Whether the test case was skipped. `reason` will contain the skip reason, if any.
pub skipped: bool,
/// If there was a revert, this field will be populated. Note that the test can
/// still be successful (i.e self.success == true) when it's expected to fail.
pub reason: Option<String>,
/// Minimal reproduction test case for failing fuzz tests
pub counterexample: Option<CounterExample>,
/// Any captured & parsed as strings logs along the test's execution which should
/// be printed to the user.
pub logs: Vec<Log>,
/// Labeled addresses
pub labels: AddressHashMap<String>,
/// Exemplary traces for a fuzz run of the test function
///
/// **Note** We only store a single trace of a successful fuzz call, otherwise we would get
/// `num(fuzz_cases)` traces, one for each run, which is neither helpful nor performant.
pub traces: Option<SparsedTraceArena>,
/// Additional traces used for gas report construction.
/// Those traces should not be displayed.
pub gas_report_traces: Vec<CallTraceArena>,
/// Raw line coverage info
pub line_coverage: Option<HitMaps>,
/// Breakpoints for debugger. Correspond to the same fuzz case as `traces`.
pub breakpoints: Option<Breakpoints>,
// Deprecated cheatcodes mapped to their replacements.
pub deprecated_cheatcodes: HashMap<&'static str, Option<&'static str>>,
/// NUmber of failed replays from persisted corpus.
pub failed_corpus_replays: usize,
}
impl FuzzTestResult {
/// Returns the median gas of all test cases
pub fn median_gas(&self, with_stipend: bool) -> u64 {
let mut values = self.gas_values(with_stipend);
values.sort_unstable();
calc::median_sorted(&values)
}
/// Returns the average gas use of all test cases
pub fn mean_gas(&self, with_stipend: bool) -> u64 {
let mut values = self.gas_values(with_stipend);
values.sort_unstable();
calc::mean(&values)
}
fn gas_values(&self, with_stipend: bool) -> Vec<u64> {
self.gas_by_case
.iter()
.map(|gas| if with_stipend { gas.0 } else { gas.0.saturating_sub(gas.1) })
.collect()
}
}
/// Data of a single fuzz test case
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct FuzzCase {
/// The calldata used for this fuzz test
pub calldata: Bytes,
/// Consumed gas
pub gas: u64,
/// The initial gas stipend for the transaction
pub stipend: u64,
}
/// Container type for all successful test cases
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(transparent)]
pub struct FuzzedCases {
cases: Vec<FuzzCase>,
}
impl FuzzedCases {
pub fn new(mut cases: Vec<FuzzCase>) -> Self {
cases.sort_by_key(|c| c.gas);
Self { cases }
}
pub fn cases(&self) -> &[FuzzCase] {
&self.cases
}
pub fn into_cases(self) -> Vec<FuzzCase> {
self.cases
}
/// Get the last [FuzzCase]
pub fn last(&self) -> Option<&FuzzCase> {
self.cases.last()
}
/// Returns the median gas of all test cases
pub fn median_gas(&self, with_stipend: bool) -> u64 {
let mut values = self.gas_values(with_stipend);
values.sort_unstable();
calc::median_sorted(&values)
}
/// Returns the average gas use of all test cases
pub fn mean_gas(&self, with_stipend: bool) -> u64 {
let mut values = self.gas_values(with_stipend);
values.sort_unstable();
calc::mean(&values)
}
fn gas_values(&self, with_stipend: bool) -> Vec<u64> {
self.cases
.iter()
.map(|c| if with_stipend { c.gas } else { c.gas.saturating_sub(c.stipend) })
.collect()
}
/// Returns the case with the highest gas usage
pub fn highest(&self) -> Option<&FuzzCase> {
self.cases.last()
}
/// Returns the case with the lowest gas usage
pub fn lowest(&self) -> Option<&FuzzCase> {
self.cases.first()
}
/// Returns the highest amount of gas spent on a fuzz case
pub fn highest_gas(&self, with_stipend: bool) -> u64 {
self.highest()
.map(|c| if with_stipend { c.gas } else { c.gas - c.stipend })
.unwrap_or_default()
}
/// Returns the lowest amount of gas spent on a fuzz case
pub fn lowest_gas(&self) -> u64 {
self.lowest().map(|c| c.gas).unwrap_or_default()
}
}
/// Fixtures to be used for fuzz tests.
///
/// The key represents name of the fuzzed parameter, value holds possible fuzzed values.
/// For example, for a fixture function declared as
/// `function fixture_sender() external returns (address[] memory senders)`
/// the fuzz fixtures will contain `sender` key with `senders` array as value
#[derive(Clone, Default, Debug)]
pub struct FuzzFixtures {
inner: Arc<HashMap<String, DynSolValue>>,
}
impl FuzzFixtures {
pub fn new(fixtures: HashMap<String, DynSolValue>) -> Self {
Self { inner: Arc::new(fixtures) }
}
/// Returns configured fixtures for `param_name` fuzzed parameter.
pub fn param_fixtures(&self, param_name: &str) -> Option<&[DynSolValue]> {
if let Some(param_fixtures) = self.inner.get(&normalize_fixture(param_name)) {
param_fixtures.as_fixed_array().or_else(|| param_fixtures.as_array())
} else {
None
}
}
}
/// Extracts fixture name from a function name.
/// For example: fixtures defined in `fixture_Owner` function will be applied for `owner` parameter.
pub fn fixture_name(function_name: String) -> String {
normalize_fixture(function_name.strip_prefix("fixture").unwrap())
}
/// Normalize fixture parameter name, for example `_Owner` to `owner`.
fn normalize_fixture(param_name: &str) -> String {
param_name.trim_matches('_').to_ascii_lowercase()
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/error.rs | crates/evm/fuzz/src/error.rs | //! Errors related to fuzz tests.
use proptest::test_runner::Reason;
/// Possible errors when running fuzz tests
#[derive(Debug, thiserror::Error)]
pub enum FuzzError {
#[error("`vm.assume` reject")]
AssumeReject,
#[error("`vm.assume` rejected too many inputs ({0} allowed)")]
TooManyRejects(u32),
}
impl From<FuzzError> for Reason {
fn from(error: FuzzError) -> Self {
error.to_string().into()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/inspector.rs | crates/evm/fuzz/src/inspector.rs | use crate::{invariant::RandomCallGenerator, strategies::EvmFuzzState};
use foundry_common::mapping_slots::step as mapping_step;
use revm::{
Inspector,
context::{ContextTr, Transaction},
inspector::JournalExt,
interpreter::{CallInput, CallInputs, CallOutcome, CallScheme, Interpreter},
};
/// An inspector that can fuzz and collect data for that effect.
#[derive(Clone, Debug)]
pub struct Fuzzer {
/// If set, it collects `stack` and `memory` values for fuzzing purposes.
pub collect: bool,
/// Given a strategy, it generates a random call.
pub call_generator: Option<RandomCallGenerator>,
/// If `collect` is set, we store the collected values in this fuzz dictionary.
pub fuzz_state: EvmFuzzState,
}
impl<CTX> Inspector<CTX> for Fuzzer
where
CTX: ContextTr<Journal: JournalExt>,
{
#[inline]
fn step(&mut self, interp: &mut Interpreter, _context: &mut CTX) {
// We only collect `stack` and `memory` data before and after calls.
if self.collect {
self.collect_data(interp);
if let Some(mapping_slots) = &mut self.fuzz_state.mapping_slots {
mapping_step(mapping_slots, interp);
}
}
}
fn call(&mut self, ecx: &mut CTX, inputs: &mut CallInputs) -> Option<CallOutcome> {
// We don't want to override the very first call made to the test contract.
if self.call_generator.is_some() && ecx.tx().caller() != inputs.caller {
self.override_call(inputs);
}
// We only collect `stack` and `memory` data before and after calls.
// this will be turned off on the next `step`
self.collect = true;
None
}
fn call_end(&mut self, _context: &mut CTX, _inputs: &CallInputs, _outcome: &mut CallOutcome) {
if let Some(ref mut call_generator) = self.call_generator {
call_generator.used = false;
}
// We only collect `stack` and `memory` data before and after calls.
// this will be turned off on the next `step`
self.collect = true;
}
}
impl Fuzzer {
/// Collects `stack` and `memory` values into the fuzz dictionary.
#[cold]
fn collect_data(&mut self, interpreter: &Interpreter) {
self.fuzz_state.collect_values(interpreter.stack.data().iter().copied().map(Into::into));
// TODO: disabled for now since it's flooding the dictionary
// for index in 0..interpreter.shared_memory.len() / 32 {
// let mut slot = [0u8; 32];
// slot.clone_from_slice(interpreter.shared_memory.get_slice(index * 32, 32));
// state.insert(slot);
// }
self.collect = false;
}
/// Overrides an external call and tries to call any method of msg.sender.
fn override_call(&mut self, call: &mut CallInputs) {
if let Some(ref mut call_generator) = self.call_generator {
// We only override external calls which are not coming from the test contract.
if call.caller != call_generator.test_address
&& call.scheme == CallScheme::Call
&& !call_generator.used
{
// There's only a 30% chance that an override happens.
if let Some(tx) = call_generator.next(call.caller, call.target_address) {
call.input = CallInput::Bytes(tx.call_details.calldata.0.into());
call.caller = tx.sender;
call.target_address = tx.call_details.target;
// TODO: in what scenarios can the following be problematic
call.bytecode_address = tx.call_details.target;
call_generator.used = true;
}
}
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/invariant/filters.rs | crates/evm/fuzz/src/invariant/filters.rs | use alloy_json_abi::{Function, JsonAbi};
use alloy_primitives::{Address, Selector};
use foundry_compilers::ArtifactId;
use foundry_evm_core::utils::get_function;
use std::collections::BTreeMap;
/// Contains which contracts are to be targeted or excluded on an invariant test through their
/// artifact identifiers.
#[derive(Default)]
pub struct ArtifactFilters {
/// List of `contract_path:contract_name` along with selectors, which are to be targeted. If
/// list of functions is not empty, target only those.
pub targeted: BTreeMap<String, Vec<Selector>>,
/// List of `contract_path:contract_name` which are to be excluded.
pub excluded: Vec<String>,
}
impl ArtifactFilters {
/// Returns `true` if the given identifier matches this filter.
pub fn matches(&self, identifier: &str) -> bool {
(self.targeted.is_empty() || self.targeted.contains_key(identifier))
&& (self.excluded.is_empty() || !self.excluded.iter().any(|id| id == identifier))
}
/// Gets all the targeted functions from `artifact`. Returns error, if selectors do not match
/// the `artifact`.
///
/// An empty vector means that it targets any mutable function.
pub fn get_targeted_functions(
&self,
artifact: &ArtifactId,
abi: &JsonAbi,
) -> eyre::Result<Option<Vec<Function>>> {
if let Some(selectors) = self.targeted.get(&artifact.identifier()) {
let functions = selectors
.iter()
.map(|selector| get_function(&artifact.name, *selector, abi).cloned())
.collect::<eyre::Result<Vec<_>>>()?;
// targetArtifactSelectors > excludeArtifacts > targetArtifacts
if functions.is_empty() && self.excluded.contains(&artifact.identifier()) {
return Ok(None);
}
return Ok(Some(functions));
}
// If no contract is specifically targeted, and this contract is not excluded, then accept
// all functions.
if self.targeted.is_empty() && !self.excluded.contains(&artifact.identifier()) {
return Ok(Some(vec![]));
}
Ok(None)
}
}
/// Filter for acceptable senders to use for invariant testing. Exclusion takes priority if
/// clashing.
///
/// `address(0)` is excluded by default.
#[derive(Default)]
pub struct SenderFilters {
pub targeted: Vec<Address>,
pub excluded: Vec<Address>,
}
impl SenderFilters {
pub fn new(mut targeted: Vec<Address>, mut excluded: Vec<Address>) -> Self {
let addr_0 = Address::ZERO;
if !excluded.contains(&addr_0) {
excluded.push(addr_0);
}
targeted.retain(|addr| !excluded.contains(addr));
Self { targeted, excluded }
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/invariant/mod.rs | crates/evm/fuzz/src/invariant/mod.rs | use alloy_json_abi::{Function, JsonAbi};
use alloy_primitives::{Address, Selector, map::HashMap};
use foundry_compilers::artifacts::StorageLayout;
use itertools::Either;
use parking_lot::Mutex;
use std::{collections::BTreeMap, sync::Arc};
mod call_override;
pub use call_override::RandomCallGenerator;
mod filters;
use crate::BasicTxDetails;
pub use filters::{ArtifactFilters, SenderFilters};
use foundry_common::{ContractsByAddress, ContractsByArtifact};
use foundry_evm_core::utils::{StateChangeset, get_function};
/// Contracts identified as targets during a fuzz run.
///
/// During execution, any newly created contract is added as target and used through the rest of
/// the fuzz run if the collection is updatable (no `targetContract` specified in `setUp`).
#[derive(Clone, Debug)]
pub struct FuzzRunIdentifiedContracts {
/// Contracts identified as targets during a fuzz run.
pub targets: Arc<Mutex<TargetedContracts>>,
/// Whether target contracts are updatable or not.
pub is_updatable: bool,
}
impl FuzzRunIdentifiedContracts {
/// Creates a new `FuzzRunIdentifiedContracts` instance.
pub fn new(targets: TargetedContracts, is_updatable: bool) -> Self {
Self { targets: Arc::new(Mutex::new(targets)), is_updatable }
}
/// If targets are updatable, collect all contracts created during an invariant run (which
/// haven't been discovered yet).
pub fn collect_created_contracts(
&self,
state_changeset: &StateChangeset,
project_contracts: &ContractsByArtifact,
setup_contracts: &ContractsByAddress,
artifact_filters: &ArtifactFilters,
created_contracts: &mut Vec<Address>,
) -> eyre::Result<()> {
if !self.is_updatable {
return Ok(());
}
let mut targets = self.targets.lock();
for (address, account) in state_changeset {
if setup_contracts.contains_key(address) {
continue;
}
if !account.is_touched() {
continue;
}
let Some(code) = &account.info.code else {
continue;
};
if code.is_empty() {
continue;
}
let Some((artifact, contract)) =
project_contracts.find_by_deployed_code(code.original_byte_slice())
else {
continue;
};
let Some(functions) =
artifact_filters.get_targeted_functions(artifact, &contract.abi)?
else {
continue;
};
created_contracts.push(*address);
let contract = TargetedContract {
identifier: artifact.name.clone(),
abi: contract.abi.clone(),
targeted_functions: functions,
excluded_functions: Vec::new(),
storage_layout: contract.storage_layout.as_ref().map(Arc::clone),
};
targets.insert(*address, contract);
}
Ok(())
}
/// Clears targeted contracts created during an invariant run.
pub fn clear_created_contracts(&self, created_contracts: Vec<Address>) {
if !created_contracts.is_empty() {
let mut targets = self.targets.lock();
for addr in &created_contracts {
targets.remove(addr);
}
}
}
}
/// A collection of contracts identified as targets for invariant testing.
#[derive(Clone, Debug, Default)]
pub struct TargetedContracts {
/// The inner map of targeted contracts.
pub inner: BTreeMap<Address, TargetedContract>,
}
impl TargetedContracts {
/// Returns a new `TargetedContracts` instance.
pub fn new() -> Self {
Self::default()
}
/// Returns fuzzed contract abi and fuzzed function from address and provided calldata.
///
/// Used to decode return values and logs in order to add values into fuzz dictionary.
pub fn fuzzed_artifacts(&self, tx: &BasicTxDetails) -> (Option<&JsonAbi>, Option<&Function>) {
match self.inner.get(&tx.call_details.target) {
Some(c) => (
Some(&c.abi),
c.abi.functions().find(|f| f.selector() == tx.call_details.calldata[..4]),
),
None => (None, None),
}
}
/// Returns flatten target contract address and functions to be fuzzed.
/// Includes contract targeted functions if specified, else all mutable contract functions.
pub fn fuzzed_functions(&self) -> impl Iterator<Item = (&Address, &Function)> {
self.inner
.iter()
.filter(|(_, c)| !c.abi.functions.is_empty())
.flat_map(|(contract, c)| c.abi_fuzzed_functions().map(move |f| (contract, f)))
}
/// Returns whether the given transaction can be replayed or not with known contracts.
pub fn can_replay(&self, tx: &BasicTxDetails) -> bool {
match self.inner.get(&tx.call_details.target) {
Some(c) => c.abi.functions().any(|f| f.selector() == tx.call_details.calldata[..4]),
None => false,
}
}
/// Identifies fuzzed contract and function based on given tx details and returns unique metric
/// key composed from contract identifier and function name.
pub fn fuzzed_metric_key(&self, tx: &BasicTxDetails) -> Option<String> {
self.inner.get(&tx.call_details.target).and_then(|contract| {
contract
.abi
.functions()
.find(|f| f.selector() == tx.call_details.calldata[..4])
.map(|function| format!("{}.{}", contract.identifier.clone(), function.name))
})
}
/// Returns a map of contract addresses to their storage layouts.
pub fn get_storage_layouts(&self) -> HashMap<Address, Arc<StorageLayout>> {
self.inner
.iter()
.filter_map(|(addr, c)| {
c.storage_layout.as_ref().map(|layout| (*addr, Arc::clone(layout)))
})
.collect()
}
}
impl std::ops::Deref for TargetedContracts {
type Target = BTreeMap<Address, TargetedContract>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl std::ops::DerefMut for TargetedContracts {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
/// A contract identified as target for invariant testing.
#[derive(Clone, Debug)]
pub struct TargetedContract {
/// The contract identifier. This is only used in error messages.
pub identifier: String,
/// The contract's ABI.
pub abi: JsonAbi,
/// The targeted functions of the contract.
pub targeted_functions: Vec<Function>,
/// The excluded functions of the contract.
pub excluded_functions: Vec<Function>,
/// The contract's storage layout, if available.
pub storage_layout: Option<Arc<StorageLayout>>,
}
impl TargetedContract {
/// Returns a new `TargetedContract` instance.
pub fn new(identifier: String, abi: JsonAbi) -> Self {
Self {
identifier,
abi,
targeted_functions: Vec::new(),
excluded_functions: Vec::new(),
storage_layout: None,
}
}
/// Determines contract storage layout from project contracts. Needs `storageLayout` to be
/// enabled as extra output in project configuration.
pub fn with_project_contracts(mut self, project_contracts: &ContractsByArtifact) -> Self {
if let Some((src, name)) = self.identifier.split_once(':')
&& let Some((_, contract_data)) = project_contracts.iter().find(|(artifact, _)| {
artifact.name == name && artifact.source.as_path().ends_with(src)
})
{
self.storage_layout = contract_data.storage_layout.as_ref().map(Arc::clone);
}
self
}
/// Helper to retrieve functions to fuzz for specified abi.
/// Returns specified targeted functions if any, else mutable abi functions that are not
/// marked as excluded.
pub fn abi_fuzzed_functions(&self) -> impl Iterator<Item = &Function> {
if !self.targeted_functions.is_empty() {
Either::Left(self.targeted_functions.iter())
} else {
Either::Right(self.abi.functions().filter(|&func| {
!matches!(
func.state_mutability,
alloy_json_abi::StateMutability::Pure | alloy_json_abi::StateMutability::View
) && !self.excluded_functions.contains(func)
}))
}
}
/// Returns the function for the given selector.
pub fn get_function(&self, selector: Selector) -> eyre::Result<&Function> {
get_function(&self.identifier, selector, &self.abi)
}
/// Adds the specified selectors to the targeted functions.
pub fn add_selectors(
&mut self,
selectors: impl IntoIterator<Item = Selector>,
should_exclude: bool,
) -> eyre::Result<()> {
for selector in selectors {
if should_exclude {
self.excluded_functions.push(self.get_function(selector)?.clone());
} else {
self.targeted_functions.push(self.get_function(selector)?.clone());
}
}
Ok(())
}
}
/// Test contract which is testing its invariants.
#[derive(Clone, Debug)]
pub struct InvariantContract<'a> {
/// Address of the test contract.
pub address: Address,
/// Invariant function present in the test contract.
pub invariant_function: &'a Function,
/// If true, `afterInvariant` function is called after each invariant run.
pub call_after_invariant: bool,
/// ABI of the test contract.
pub abi: &'a JsonAbi,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/invariant/call_override.rs | crates/evm/fuzz/src/invariant/call_override.rs | use crate::{BasicTxDetails, CallDetails};
use alloy_primitives::Address;
use parking_lot::{Mutex, RwLock};
use proptest::{
option::weighted,
strategy::{SBoxedStrategy, Strategy, ValueTree},
test_runner::TestRunner,
};
use std::sync::Arc;
/// Given a TestRunner and a strategy, it generates calls. Used inside the Fuzzer inspector to
/// override external calls to test for potential reentrancy vulnerabilities..
#[derive(Clone, Debug)]
pub struct RandomCallGenerator {
/// Address of the test contract.
pub test_address: Address,
/// Runner that will generate the call from the strategy.
pub runner: Arc<Mutex<TestRunner>>,
/// Strategy to be used to generate calls from `target_reference`.
pub strategy: SBoxedStrategy<Option<CallDetails>>,
/// Reference to which contract we want a fuzzed calldata from.
pub target_reference: Arc<RwLock<Address>>,
/// Flag to know if a call has been overridden. Don't allow nesting for now.
pub used: bool,
/// If set to `true`, consumes the next call from `last_sequence`, otherwise queries it from
/// the strategy.
pub replay: bool,
/// Saves the sequence of generated calls that can be replayed later on.
pub last_sequence: Arc<RwLock<Vec<Option<BasicTxDetails>>>>,
}
impl RandomCallGenerator {
pub fn new(
test_address: Address,
runner: TestRunner,
strategy: impl Strategy<Value = CallDetails> + Send + Sync + 'static,
target_reference: Arc<RwLock<Address>>,
) -> Self {
Self {
test_address,
runner: Arc::new(Mutex::new(runner)),
strategy: weighted(0.9, strategy).sboxed(),
target_reference,
last_sequence: Arc::default(),
replay: false,
used: false,
}
}
/// All `self.next()` calls will now pop `self.last_sequence`. Used to replay an invariant
/// failure.
pub fn set_replay(&mut self, status: bool) {
self.replay = status;
if status {
// So it can later be popped.
self.last_sequence.write().reverse();
}
}
/// Gets the next call. Random if replay is not set. Otherwise, it pops from `last_sequence`.
pub fn next(
&mut self,
original_caller: Address,
original_target: Address,
) -> Option<BasicTxDetails> {
if self.replay {
self.last_sequence.write().pop().expect(
"to have same size as the number of (unsafe) external calls of the sequence.",
)
} else {
// TODO: Do we want it to be 80% chance only too ?
let sender = original_target;
// Set which contract we mostly (80% chance) want to generate calldata from.
*self.target_reference.write() = original_caller;
// `original_caller` has a 80% chance of being the `new_target`.
let choice = self.strategy.new_tree(&mut self.runner.lock()).unwrap().current().map(
|call_details| BasicTxDetails { warp: None, roll: None, sender, call_details },
);
self.last_sequence.write().push(choice.clone());
choice
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/mutators.rs | crates/evm/fuzz/src/strategies/mutators.rs | use alloy_dyn_abi::Word;
use alloy_primitives::{Address, I256, Sign, U256};
use proptest::{prelude::*, test_runner::TestRunner};
use rand::seq::IndexedRandom;
use std::fmt::Debug;
// Interesting 8-bit values to inject.
static INTERESTING_8: &[i8] = &[-128, -1, 0, 1, 16, 32, 64, 100, 127];
/// Interesting 16-bit values to inject.
static INTERESTING_16: &[i16] = &[
-128, -1, 0, 1, 16, 32, 64, 100, 127, -32768, -129, 128, 255, 256, 512, 1000, 1024, 4096, 32767,
];
/// Interesting 32-bit values to inject.
static INTERESTING_32: &[i32] = &[
-128,
-1,
0,
1,
16,
32,
64,
100,
127,
-32768,
-129,
128,
255,
256,
512,
1000,
1024,
4096,
32767,
-2147483648,
-100663046,
-32769,
32768,
65535,
65536,
100663045,
2147483647,
];
/// Multipliers used to define the 3 standard deviation range of a Gaussian-like curve.
/// For example, a multiplier of 0.25 means the +/-3 standard deviation bounds are +/-25% of the
/// original value.
static THREE_SIGMA_MULTIPLIERS: &[f64] = &[0.1, 0.25, 0.5, 1.0, 2.0, 5.0, 10.0];
/// Mutator that randomly increments or decrements an uint or int.
pub(crate) trait IncrementDecrementMutator: Sized + Copy + Debug {
fn validate(old: Self, new: Self, size: usize) -> Option<Self>;
#[instrument(
name = "mutator::increment_decrement",
level = "trace",
skip(size, test_runner),
ret
)]
fn increment_decrement(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mutated = if test_runner.rng().random::<bool>() {
self.wrapping_add(Self::ONE)
} else {
self.wrapping_sub(Self::ONE)
};
Self::validate(self, mutated, size)
}
fn wrapping_add(self, rhs: Self) -> Self;
fn wrapping_sub(self, rhs: Self) -> Self;
const ONE: Self;
}
macro_rules! impl_increment_decrement_mutator {
($ty:ty, $validate_fn:path) => {
impl IncrementDecrementMutator for $ty {
fn validate(old: Self, new: Self, size: usize) -> Option<Self> {
$validate_fn(old, new, size)
}
fn wrapping_add(self, rhs: Self) -> Self {
Self::wrapping_add(self, rhs)
}
fn wrapping_sub(self, rhs: Self) -> Self {
Self::wrapping_sub(self, rhs)
}
const ONE: Self = Self::ONE;
}
};
}
impl_increment_decrement_mutator!(U256, validate_uint_mutation);
impl_increment_decrement_mutator!(I256, validate_int_mutation);
/// Mutator that changes the current value of an uint or int by applying gaussian noise.
pub(crate) trait GaussianNoiseMutator: Sized + Copy + Debug {
fn mutate_with_gaussian_noise(self, size: usize, test_runner: &mut TestRunner) -> Option<Self>;
}
impl GaussianNoiseMutator for U256 {
#[instrument(
name = "U256::mutate_with_gaussian_noise",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_with_gaussian_noise(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let scale_factor = sample_gaussian_scale(&mut test_runner.rng())?;
let mut bytes: [u8; 32] = self.to_be_bytes();
apply_scale_to_bytes(&mut bytes[32 - size / 8..], scale_factor)?;
validate_uint_mutation(self, Self::from_be_bytes(bytes), size)
}
}
impl GaussianNoiseMutator for I256 {
#[instrument(
name = "I256::mutate_with_gaussian_noise",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_with_gaussian_noise(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let scale_factor = sample_gaussian_scale(&mut test_runner.rng())?;
let mut bytes: [u8; 32] = self.to_be_bytes();
apply_scale_to_bytes(&mut bytes[32 - size / 8..], scale_factor)?;
validate_int_mutation(self, Self::from_be_bytes(bytes), size)
}
}
/// Mutator that bounds the current value of an uint or int in the given range.
/// The mutated value is always different from the current value.
pub trait BoundMutator: Sized + Copy + Debug {
fn bound(self, min: Self, max: Self, test_runner: &mut TestRunner) -> Option<Self>;
}
impl BoundMutator for U256 {
#[instrument(name = "U256::bound", level = "trace", skip(test_runner), ret)]
fn bound(self, min: Self, max: Self, test_runner: &mut TestRunner) -> Option<Self> {
if min > max || self < min || self > max || min == max {
return None;
}
let rng = test_runner.rng();
loop {
let bits = rng.random_range(8..=256);
let mask = (Self::ONE << bits) - Self::ONE;
let candidate = Self::from(rng.random::<u128>()) & mask;
// Map to range.
let candidate = min + (candidate % ((max - min).saturating_add(Self::ONE)));
if candidate != self {
return Some(candidate);
}
}
}
}
impl BoundMutator for I256 {
#[instrument(name = "I256::bound", level = "trace", skip(test_runner), ret)]
fn bound(self, min: Self, max: Self, test_runner: &mut TestRunner) -> Option<Self> {
if min > max || self < min || self > max || min == max {
return None;
}
let rng = test_runner.rng();
loop {
let bits = rng.random_range(8..=255);
let mask = (U256::ONE << bits) - U256::ONE;
let rand_u = U256::from(rng.next_u64()) | (U256::from(rng.next_u64()) << 64);
let unsigned_candidate = rand_u & mask;
let signed_candidate = {
let midpoint = U256::ONE << (bits - 1);
if unsigned_candidate < midpoint {
Self::from_raw(unsigned_candidate)
} else {
Self::from_raw(unsigned_candidate) - Self::from_raw(U256::ONE << bits)
}
};
// Map to range.
let range = max.saturating_sub(min).saturating_add(Self::ONE).unsigned_abs();
let wrapped = Self::from_raw(U256::from(signed_candidate.unsigned_abs()) % range);
let candidate =
if signed_candidate.is_negative() { max - wrapped } else { min + wrapped };
if candidate != self {
return Some(candidate);
}
}
}
}
/// Mutator that changes the current value by flipping a random bit.
pub(crate) trait BitMutator: Sized + Copy + Debug {
fn flip_random_bit(self, size: usize, test_runner: &mut TestRunner) -> Option<Self>;
}
impl BitMutator for U256 {
#[instrument(name = "U256::flip_random_bit", level = "trace", skip(size, test_runner), ret)]
fn flip_random_bit(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes: [u8; 32] = self.to_be_bytes();
flip_random_bit_in_slice(&mut bytes[32 - size / 8..], test_runner)?;
validate_uint_mutation(self, Self::from_be_bytes(bytes), size)
}
}
impl BitMutator for I256 {
#[instrument(name = "I256::flip_random_bit", level = "trace", skip(size, test_runner), ret)]
fn flip_random_bit(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes: [u8; 32] = self.to_be_bytes();
flip_random_bit_in_slice(&mut bytes[32 - size / 8..], test_runner)?;
validate_int_mutation(self, Self::from_be_bytes(bytes), size)
}
}
impl BitMutator for Address {
#[instrument(name = "Address::flip_random_bit", level = "trace", skip(_size, test_runner), ret)]
fn flip_random_bit(self, _size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut mutated = self;
flip_random_bit_in_slice(mutated.as_mut_slice(), test_runner)?;
(self != mutated).then_some(mutated)
}
}
impl BitMutator for Word {
#[instrument(name = "Word::flip_random_bit", level = "trace", skip(size, test_runner), ret)]
fn flip_random_bit(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes = self;
let slice = &mut bytes[..size];
flip_random_bit_in_slice(slice, test_runner)?;
(self != bytes).then_some(bytes)
}
}
/// Mutator that changes the current value by randomly injecting interesting words (for uint, int,
/// address and fixed bytes) - see <https://github.com/AFLplusplus/LibAFL/blob/90cb9a2919faf386e0678870e52784070cdac4b6/crates/libafl/src/mutators/mutations.rs#L88-L123>.
pub(crate) trait InterestingWordMutator: Sized + Copy + Debug {
fn mutate_interesting_byte(self, size: usize, test_runner: &mut TestRunner) -> Option<Self>;
fn mutate_interesting_word(self, size: usize, test_runner: &mut TestRunner) -> Option<Self>;
fn mutate_interesting_dword(self, size: usize, test_runner: &mut TestRunner) -> Option<Self>;
}
impl InterestingWordMutator for U256 {
#[instrument(
name = "U256::mutate_interesting_byte",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_byte(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes: [u8; 32] = self.to_be_bytes();
mutate_interesting_byte_slice(&mut bytes[32 - size / 8..], test_runner)?;
validate_uint_mutation(self, Self::from_be_bytes(bytes), size)
}
#[instrument(
name = "U256::mutate_interesting_word",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_word(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes: [u8; 32] = self.to_be_bytes();
mutate_interesting_word_slice(&mut bytes[32 - size / 8..], test_runner)?;
validate_uint_mutation(self, Self::from_be_bytes(bytes), size)
}
#[instrument(
name = "U256::mutate_interesting_dword",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_dword(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes: [u8; 32] = self.to_be_bytes();
mutate_interesting_dword_slice(&mut bytes[32 - size / 8..], test_runner)?;
validate_uint_mutation(self, Self::from_be_bytes(bytes), size)
}
}
impl InterestingWordMutator for I256 {
#[instrument(
name = "I256::mutate_interesting_byte",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_byte(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes: [u8; 32] = self.to_be_bytes();
mutate_interesting_byte_slice(&mut bytes[32 - size / 8..], test_runner)?;
validate_int_mutation(self, Self::from_be_bytes(bytes), size)
}
#[instrument(
name = "I256::mutate_interesting_word",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_word(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes: [u8; 32] = self.to_be_bytes();
mutate_interesting_word_slice(&mut bytes[32 - size / 8..], test_runner)?;
validate_int_mutation(self, Self::from_be_bytes(bytes), size)
}
#[instrument(
name = "I256::mutate_interesting_dword",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_dword(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes: [u8; 32] = self.to_be_bytes();
mutate_interesting_dword_slice(&mut bytes[32 - size / 8..], test_runner)?;
validate_int_mutation(self, Self::from_be_bytes(bytes), size)
}
}
impl InterestingWordMutator for Address {
#[instrument(
name = "Address::mutate_interesting_byte",
level = "trace",
skip(_size, test_runner),
ret
)]
fn mutate_interesting_byte(self, _size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut mutated = self;
mutate_interesting_byte_slice(mutated.as_mut_slice(), test_runner)?;
(self != mutated).then_some(mutated)
}
#[instrument(
name = "Address::mutate_interesting_word",
level = "trace",
skip(_size, test_runner),
ret
)]
fn mutate_interesting_word(self, _size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut mutated = self;
mutate_interesting_word_slice(mutated.as_mut_slice(), test_runner)?;
(self != mutated).then_some(mutated)
}
#[instrument(
name = "Address::mutate_interesting_dword",
level = "trace",
skip(_size, test_runner),
ret
)]
fn mutate_interesting_dword(self, _size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut mutated = self;
mutate_interesting_dword_slice(mutated.as_mut_slice(), test_runner)?;
(self != mutated).then_some(mutated)
}
}
impl InterestingWordMutator for Word {
#[instrument(
name = "Word::mutate_interesting_byte",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_byte(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes = self;
let slice = &mut bytes[..size];
mutate_interesting_byte_slice(slice, test_runner)?;
(self != bytes).then_some(bytes)
}
#[instrument(
name = "Word::mutate_interesting_word",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_word(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes = self;
let slice = &mut bytes[..size];
mutate_interesting_word_slice(slice, test_runner)?;
(self != bytes).then_some(bytes)
}
#[instrument(
name = "Word::mutate_interesting_dword",
level = "trace",
skip(size, test_runner),
ret
)]
fn mutate_interesting_dword(self, size: usize, test_runner: &mut TestRunner) -> Option<Self> {
let mut bytes = self;
let slice = &mut bytes[..size];
mutate_interesting_dword_slice(slice, test_runner)?;
(self != bytes).then_some(bytes)
}
}
/// Flips a random bit in the given mutable byte slice.
fn flip_random_bit_in_slice(bytes: &mut [u8], test_runner: &mut TestRunner) -> Option<()> {
if bytes.is_empty() {
return None;
}
let bit_index = test_runner.rng().random_range(0..(bytes.len() * 8));
bytes[bit_index / 8] ^= 1 << (bit_index % 8);
Some(())
}
/// Mutates a random byte in the given byte slice by replacing it with a randomly chosen
/// interesting 8-bit value.
fn mutate_interesting_byte_slice(bytes: &mut [u8], test_runner: &mut TestRunner) -> Option<()> {
let index = test_runner.rng().random_range(0..bytes.len());
let val = *INTERESTING_8.choose(&mut test_runner.rng())? as u8;
bytes[index] = val;
Some(())
}
/// Mutates a random 2-byte (16-bit) region in the byte slice with a randomly chosen interesting
/// 16-bit value.
fn mutate_interesting_word_slice(bytes: &mut [u8], test_runner: &mut TestRunner) -> Option<()> {
if bytes.len() < 2 {
return None;
}
let index = test_runner.rng().random_range(0..=bytes.len() - 2);
let val = *INTERESTING_16.choose(&mut test_runner.rng())? as u16;
bytes[index..index + 2].copy_from_slice(&val.to_be_bytes());
Some(())
}
/// Mutates a random 4-byte (32-bit) region in the byte slice with a randomly chosen interesting
/// 32-bit value.
fn mutate_interesting_dword_slice(bytes: &mut [u8], test_runner: &mut TestRunner) -> Option<()> {
if bytes.len() < 4 {
return None;
}
let index = test_runner.rng().random_range(0..=bytes.len() - 4);
let val = *INTERESTING_32.choose(&mut test_runner.rng())? as u32;
bytes[index..index + 4].copy_from_slice(&val.to_be_bytes());
Some(())
}
/// Samples a scale factor from a pseudo-Gaussian distribution centered around 1.0.
///
/// - Select a random standard deviation multiplier from a predefined set.
/// - Approximates a standard normal distribution using the Irwin-Hall method (sum of uniform
/// samples).
/// - Scales the normal value by the chosen standard deviation multiplier, divided by 3 to get
/// standard deviation.
/// - Adds 1.0 to center the scale factor around 1.0 (no mutation).
///
/// Returns a scale factor that, when applied to a number, mimics Gaussian noise.
fn sample_gaussian_scale<R: Rng>(rng: &mut R) -> Option<f64> {
let num_samples = 8;
let chosen_3rd_sigma = *THREE_SIGMA_MULTIPLIERS.choose(rng).unwrap_or(&1.0);
let mut sum = 0.0;
for _ in 0..num_samples {
sum += rng.random::<f64>();
}
let standard_normal = sum - (num_samples as f64 / 2.0);
let mut scale_factor = (chosen_3rd_sigma / 3.0) * standard_normal;
scale_factor += 1.0;
if scale_factor < 0.0 || (scale_factor - 1.0).abs() < f64::EPSILON {
None
} else {
Some(scale_factor)
}
}
/// Applies a floating-point scale factor to a byte slice representing an unsigned or signed
/// integer.
fn apply_scale_to_bytes(bytes: &mut [u8], scale_factor: f64) -> Option<()> {
let mut carry_down = 0.0;
for i in (0..bytes.len()).rev() {
let byte_val = bytes[i] as f64;
let scaled = (byte_val + carry_down * 256.0) * scale_factor;
if i == 0 && scaled >= 256.0 {
bytes.iter_mut().for_each(|b| *b = 0xFF);
return Some(());
}
bytes[i] = (scaled % 256.0).floor() as u8;
let mut carry_up = (scaled / 256.0).floor();
carry_down = (scaled % 1.0) / scale_factor;
let mut j = i;
// Propagate carry_up until it is zero or no more bytes left
while carry_up > 0.0 && j > 0 {
j -= 1;
let new_val = bytes[j] as f64 + carry_up;
if j == 0 && new_val >= 256.0 {
bytes.iter_mut().for_each(|b| *b = 0xFF);
return Some(());
}
bytes[j] = (new_val % 256.0).floor() as u8;
carry_up = (new_val / 256.0).floor();
}
}
Some(())
}
/// Returns mutated uint value if different from the original value and if it fits in the given
/// size, otherwise None.
fn validate_uint_mutation(original: U256, mutated: U256, size: usize) -> Option<U256> {
// Early return if mutated value is the same as original value.
if mutated == original {
return None;
}
// Check if mutated value fits the given size.
let max = if size < 256 { (U256::from(1) << size) - U256::from(1) } else { U256::MAX };
(mutated < max).then_some(mutated)
}
/// Returns mutated int value if different from the original value and if it fits in the given size,
/// otherwise None.
fn validate_int_mutation(original: I256, mutated: I256, size: usize) -> Option<I256> {
// Early return if mutated value is the same as original value.
if mutated == original {
return None;
}
// Check if mutated value fits the given size.
let max_abs = (U256::from(1) << (size - 1)) - U256::from(1);
match mutated.sign() {
Sign::Positive => mutated < I256::overflowing_from_sign_and_abs(Sign::Positive, max_abs).0,
Sign::Negative => mutated > I256::overflowing_from_sign_and_abs(Sign::Negative, max_abs).0,
}
.then_some(mutated)
}
#[cfg(test)]
mod tests {
use super::*;
use proptest::test_runner::Config;
#[test]
fn test_mutate_uint() {
let mut runner = TestRunner::new(Config::default());
let size = 32;
let test_values =
vec![U256::ZERO, U256::ONE, U256::from(12345u64), U256::from(255), U256::MAX];
#[track_caller]
fn validate_mutation(value: U256, mutated: Option<U256>) {
assert!(
mutated.is_none() || mutated.is_some_and(|m| m != value),
"Mutation failed: value = {value:?}, mutated = {mutated:?}"
);
}
for value in test_values {
for _ in 0..100 {
validate_mutation(value, U256::increment_decrement(value, size, &mut runner));
validate_mutation(value, U256::flip_random_bit(value, size, &mut runner));
validate_mutation(value, U256::mutate_interesting_byte(value, size, &mut runner));
validate_mutation(value, U256::mutate_interesting_word(value, size, &mut runner));
validate_mutation(value, U256::mutate_interesting_dword(value, size, &mut runner));
}
}
}
#[test]
fn test_mutate_int() {
let mut runner = TestRunner::new(Config::default());
let size = 32;
let test_values = vec![
I256::ZERO,
I256::ONE,
I256::MINUS_ONE,
I256::from_dec_str("12345").unwrap(),
I256::from_dec_str("-54321").unwrap(),
I256::from_dec_str("340282366920938463463374607431768211455").unwrap(),
I256::from_dec_str("-340282366920938463463374607431768211455").unwrap(),
];
#[track_caller]
fn validate_mutation(value: I256, mutated: Option<I256>) {
assert!(
mutated.is_none() || mutated.is_some_and(|m| m != value),
"Mutation failed: value = {value:?}, mutated = {mutated:?}"
);
}
for value in test_values {
for _ in 0..100 {
validate_mutation(value, I256::increment_decrement(value, size, &mut runner));
validate_mutation(value, I256::flip_random_bit(value, size, &mut runner));
validate_mutation(value, I256::mutate_interesting_byte(value, size, &mut runner));
validate_mutation(value, I256::mutate_interesting_word(value, size, &mut runner));
validate_mutation(value, I256::mutate_interesting_dword(value, size, &mut runner));
}
}
}
#[test]
fn test_mutate_address() {
let mut runner = TestRunner::new(Config::default());
let value = Address::random();
#[track_caller]
fn validate_mutation(value: Address, mutated: Option<Address>) {
assert!(
mutated.is_none() || mutated.is_some_and(|mutated| mutated != value),
"Mutation failed for value: {value:?}, result: {mutated:?}"
);
}
for _ in 0..100 {
validate_mutation(value, Address::flip_random_bit(value, 20, &mut runner));
validate_mutation(value, Address::mutate_interesting_byte(value, 20, &mut runner));
validate_mutation(value, Address::mutate_interesting_word(value, 20, &mut runner));
validate_mutation(value, Address::mutate_interesting_dword(value, 20, &mut runner));
}
}
#[test]
fn test_mutate_word() {
let mut runner = TestRunner::new(Config::default());
let value = Word::random();
#[track_caller]
fn validate_mutation(value: Word, mutated: Option<Word>) {
assert!(
mutated.is_none() || mutated.is_some_and(|mutated| mutated != value),
"Mutation failed for value: {value:?}, result: {mutated:?}"
);
}
for _ in 0..100 {
validate_mutation(value, Word::flip_random_bit(value, 32, &mut runner));
validate_mutation(value, Word::mutate_interesting_byte(value, 32, &mut runner));
validate_mutation(value, Word::mutate_interesting_word(value, 32, &mut runner));
validate_mutation(value, Word::mutate_interesting_dword(value, 32, &mut runner));
}
}
#[test]
fn test_mutate_interesting_word_too_small_returns_none() {
let mut runner = TestRunner::new(Config::default());
let value = U256::from(123);
assert!(U256::mutate_interesting_word(value, 8, &mut runner).is_none());
}
#[test]
fn test_mutate_interesting_dword_too_small_returns_none() {
let mut runner = TestRunner::new(Config::default());
let value = I256::from_dec_str("123").unwrap();
assert!(I256::mutate_interesting_dword(value, 16, &mut runner).is_none());
}
#[test]
fn test_u256_bound() {
let mut runner = TestRunner::new(Config::default());
let min = U256::from(0u64);
let max = U256::from(200u64);
let original = U256::from(100u64);
for _ in 0..50 {
let result = original.bound(min, max, &mut runner);
assert!(result.is_some(), "Mutation should occur");
let mutated = result.unwrap();
assert!(mutated >= min, "Mutated value >= min");
assert!(mutated <= max, "Mutated value <= max");
assert_ne!(mutated, original, "mutated value should differ from original");
}
// Test bound in [min, max] range.
let result = original.bound(U256::MIN, U256::MAX, &mut runner);
assert!(result.is_some(), "Mutation should occur");
}
#[test]
fn test_i256_bound() {
let mut runner = TestRunner::new(Config::default());
let min = I256::from_dec_str("-100").unwrap();
let max = I256::from_dec_str("100").unwrap();
let original = I256::from_dec_str("10").unwrap();
for _ in 0..50 {
let result = original.bound(min, max, &mut runner);
assert!(result.is_some(), "Mutation should occur");
let mutated = result.unwrap();
assert!(mutated >= min, "Mutated value >= min");
assert!(mutated <= max, "Mutated value <= max");
assert_ne!(mutated, original, "Mutated value should not equal current");
}
// Test bound in [min, max] range.
let result = original.bound(I256::MIN, I256::MAX, &mut runner);
assert!(result.is_some(), "Mutation should occur");
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/invariants.rs | crates/evm/fuzz/src/strategies/invariants.rs | use super::{fuzz_calldata, fuzz_param_from_state};
use crate::{
BasicTxDetails, CallDetails, FuzzFixtures,
invariant::{FuzzRunIdentifiedContracts, SenderFilters},
strategies::{EvmFuzzState, fuzz_calldata_from_state, fuzz_param},
};
use alloy_json_abi::Function;
use alloy_primitives::{Address, U256};
use foundry_config::InvariantConfig;
use parking_lot::RwLock;
use proptest::prelude::*;
use rand::seq::IteratorRandom;
use std::{rc::Rc, sync::Arc};
/// Given a target address, we generate random calldata.
pub fn override_call_strat(
fuzz_state: EvmFuzzState,
contracts: FuzzRunIdentifiedContracts,
target: Arc<RwLock<Address>>,
fuzz_fixtures: FuzzFixtures,
) -> impl Strategy<Value = CallDetails> + Send + Sync + 'static {
let contracts_ref = contracts.targets.clone();
proptest::prop_oneof![
80 => proptest::strategy::LazyJust::new(move || *target.read()),
20 => any::<prop::sample::Selector>()
.prop_map(move |selector| *selector.select(contracts_ref.lock().keys())),
]
.prop_flat_map(move |target_address| {
let fuzz_state = fuzz_state.clone();
let fuzz_fixtures = fuzz_fixtures.clone();
let func = {
let contracts = contracts.targets.lock();
let contract = contracts.get(&target_address).unwrap_or_else(|| {
// Choose a random contract if target selected by lazy strategy is not in fuzz run
// identified contracts. This can happen when contract is created in `setUp` call
// but is not included in targetContracts.
contracts.values().choose(&mut rand::rng()).unwrap()
});
let fuzzed_functions: Vec<_> = contract.abi_fuzzed_functions().cloned().collect();
any::<prop::sample::Index>().prop_map(move |index| index.get(&fuzzed_functions).clone())
};
func.prop_flat_map(move |func| {
fuzz_contract_with_calldata(&fuzz_state, &fuzz_fixtures, target_address, func)
})
})
}
/// Creates the invariant strategy.
///
/// Given the known and future contracts, it generates the next call by fuzzing the `caller`,
/// `calldata` and `target`. The generated data is evaluated lazily for every single call to fully
/// leverage the evolving fuzz dictionary.
///
/// The fuzzed parameters can be filtered through different methods implemented in the test
/// contract:
///
/// `targetContracts()`, `targetSenders()`, `excludeContracts()`, `targetSelectors()`
pub fn invariant_strat(
fuzz_state: EvmFuzzState,
senders: SenderFilters,
contracts: FuzzRunIdentifiedContracts,
config: InvariantConfig,
fuzz_fixtures: FuzzFixtures,
) -> impl Strategy<Value = BasicTxDetails> {
let senders = Rc::new(senders);
let dictionary_weight = config.dictionary.dictionary_weight;
// Strategy to generate values for tx warp and roll.
let warp_roll_strat = |cond: bool| {
if cond { any::<U256>().prop_map(Some).boxed() } else { Just(None).boxed() }
};
any::<prop::sample::Selector>()
.prop_flat_map(move |selector| {
let contracts = contracts.targets.lock();
let functions = contracts.fuzzed_functions();
let (target_address, target_function) = selector.select(functions);
let sender = select_random_sender(&fuzz_state, senders.clone(), dictionary_weight);
let call_details = fuzz_contract_with_calldata(
&fuzz_state,
&fuzz_fixtures,
*target_address,
target_function.clone(),
);
let warp = warp_roll_strat(config.max_time_delay.is_some());
let roll = warp_roll_strat(config.max_block_delay.is_some());
(warp, roll, sender, call_details)
})
.prop_map(move |(warp, roll, sender, call_details)| {
let warp =
warp.map(|time| time % U256::from(config.max_time_delay.unwrap_or_default()));
let roll =
roll.map(|block| block % U256::from(config.max_block_delay.unwrap_or_default()));
BasicTxDetails { warp, roll, sender, call_details }
})
}
/// Strategy to select a sender address:
/// * If `senders` is empty, then it's either a random address (10%) or from the dictionary (90%).
/// * If `senders` is not empty, a random address is chosen from the list of senders.
fn select_random_sender(
fuzz_state: &EvmFuzzState,
senders: Rc<SenderFilters>,
dictionary_weight: u32,
) -> impl Strategy<Value = Address> + use<> {
if !senders.targeted.is_empty() {
any::<prop::sample::Index>().prop_map(move |index| *index.get(&senders.targeted)).boxed()
} else {
assert!(dictionary_weight <= 100, "dictionary_weight must be <= 100");
proptest::prop_oneof![
100 - dictionary_weight => fuzz_param(&alloy_dyn_abi::DynSolType::Address),
dictionary_weight => fuzz_param_from_state(&alloy_dyn_abi::DynSolType::Address, fuzz_state),
]
.prop_map(move |addr| {
let mut addr = addr.as_address().unwrap();
// Make sure the selected address is not in the list of excluded senders.
// We don't use proptest's filter to avoid reaching the `PROPTEST_MAX_LOCAL_REJECTS`
// max rejects and exiting test before all runs completes.
// See <https://github.com/foundry-rs/foundry/issues/11369>.
loop {
if !senders.excluded.contains(&addr) {
break;
}
addr = Address::random();
}
addr
})
.boxed()
}
}
/// Given a function, it returns a proptest strategy which generates valid abi-encoded calldata
/// for that function's input types.
pub fn fuzz_contract_with_calldata(
fuzz_state: &EvmFuzzState,
fuzz_fixtures: &FuzzFixtures,
target: Address,
func: Function,
) -> impl Strategy<Value = CallDetails> + use<> {
// We need to compose all the strategies generated for each parameter in all possible
// combinations.
// `prop_oneof!` / `TupleUnion` `Arc`s for cheap cloning.
prop_oneof![
60 => fuzz_calldata(func.clone(), fuzz_fixtures),
40 => fuzz_calldata_from_state(func, fuzz_state),
]
.prop_map(move |calldata| {
trace!(input=?calldata);
CallDetails { target, calldata }
})
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/calldata.rs | crates/evm/fuzz/src/strategies/calldata.rs | use crate::{
FuzzFixtures,
strategies::{EvmFuzzState, fuzz_param_from_state, fuzz_param_with_fixtures},
};
use alloy_dyn_abi::JsonAbiExt;
use alloy_json_abi::Function;
use alloy_primitives::Bytes;
use proptest::prelude::Strategy;
/// Given a function, it returns a strategy which generates valid calldata
/// for that function's input types, following declared test fixtures.
pub fn fuzz_calldata(
func: Function,
fuzz_fixtures: &FuzzFixtures,
) -> impl Strategy<Value = Bytes> + use<> {
// We need to compose all the strategies generated for each parameter in all
// possible combinations, accounting any parameter declared fixture
let strats = func
.inputs
.iter()
.map(|input| {
fuzz_param_with_fixtures(
&input.selector_type().parse().unwrap(),
fuzz_fixtures.param_fixtures(&input.name),
&input.name,
)
})
.collect::<Vec<_>>();
strats.prop_map(move |values| {
func.abi_encode_input(&values)
.unwrap_or_else(|_| {
panic!(
"Fuzzer generated invalid arguments for function `{}` with inputs {:?}: {:?}",
func.name, func.inputs, values
)
})
.into()
})
}
/// Given a function and some state, it returns a strategy which generated valid calldata for the
/// given function's input types, based on state taken from the EVM.
pub fn fuzz_calldata_from_state(
func: Function,
state: &EvmFuzzState,
) -> impl Strategy<Value = Bytes> + use<> {
let strats = func
.inputs
.iter()
.map(|input| fuzz_param_from_state(&input.selector_type().parse().unwrap(), state))
.collect::<Vec<_>>();
strats
.prop_map(move |values| {
func.abi_encode_input(&values)
.unwrap_or_else(|_| {
panic!(
"Fuzzer generated invalid arguments for function `{}` with inputs {:?}: {:?}",
func.name, func.inputs, values
)
})
.into()
})
.no_shrink()
}
#[cfg(test)]
mod tests {
use crate::{FuzzFixtures, strategies::fuzz_calldata};
use alloy_dyn_abi::{DynSolValue, JsonAbiExt};
use alloy_json_abi::Function;
use alloy_primitives::{Address, map::HashMap};
use proptest::prelude::Strategy;
#[test]
fn can_fuzz_with_fixtures() {
let function = Function::parse("test_fuzzed_address(address addressFixture)").unwrap();
let address_fixture = DynSolValue::Address(Address::random());
let mut fixtures = HashMap::default();
fixtures.insert(
"addressFixture".to_string(),
DynSolValue::Array(vec![address_fixture.clone()]),
);
let expected = function.abi_encode_input(&[address_fixture]).unwrap();
let strategy = fuzz_calldata(function, &FuzzFixtures::new(fixtures));
let _ = strategy.prop_map(move |fuzzed| {
assert_eq!(expected, fuzzed);
});
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/state.rs | crates/evm/fuzz/src/strategies/state.rs | use crate::{
BasicTxDetails, invariant::FuzzRunIdentifiedContracts, strategies::literals::LiteralsDictionary,
};
use alloy_dyn_abi::{DynSolType, DynSolValue, EventExt, FunctionExt};
use alloy_json_abi::{Function, JsonAbi};
use alloy_primitives::{
Address, B256, Bytes, Log, U256,
map::{AddressIndexSet, AddressMap, B256IndexSet, HashMap, IndexSet},
};
use foundry_common::{
ignore_metadata_hash, mapping_slots::MappingSlots, slot_identifier::SlotIdentifier,
};
use foundry_compilers::artifacts::StorageLayout;
use foundry_config::FuzzDictionaryConfig;
use foundry_evm_core::{bytecode::InstIter, utils::StateChangeset};
use parking_lot::{RawRwLock, RwLock, lock_api::RwLockReadGuard};
use revm::{
database::{CacheDB, DatabaseRef, DbAccount},
state::AccountInfo,
};
use std::{collections::BTreeMap, fmt, sync::Arc};
/// The maximum number of bytes we will look at in bytecodes to find push bytes (24 KiB).
///
/// This is to limit the performance impact of fuzz tests that might deploy arbitrarily sized
/// bytecode (as is the case with Solmate).
const PUSH_BYTE_ANALYSIS_LIMIT: usize = 24 * 1024;
/// A set of arbitrary 32 byte data from the VM used to generate values for the strategy.
///
/// Wrapped in a shareable container.
#[derive(Clone, Debug)]
pub struct EvmFuzzState {
inner: Arc<RwLock<FuzzDictionary>>,
/// Addresses of external libraries deployed in test setup, excluded from fuzz test inputs.
pub deployed_libs: Vec<Address>,
/// Records mapping accesses. Used to identify storage slots belonging to mappings and sampling
/// the values in the [`FuzzDictionary`].
///
/// Only needed when [`StorageLayout`] is available.
pub(crate) mapping_slots: Option<AddressMap<MappingSlots>>,
}
impl EvmFuzzState {
#[cfg(test)]
pub(crate) fn test() -> Self {
Self::new(
&[],
&CacheDB::<revm::database::EmptyDB>::default(),
FuzzDictionaryConfig::default(),
None,
)
}
pub fn new<DB: DatabaseRef>(
deployed_libs: &[Address],
db: &CacheDB<DB>,
config: FuzzDictionaryConfig,
literals: Option<&LiteralsDictionary>,
) -> Self {
// Sort accounts to ensure deterministic dictionary generation from the same setUp state.
let mut accs = db.cache.accounts.iter().collect::<Vec<_>>();
accs.sort_by_key(|(address, _)| *address);
// Create fuzz dictionary and insert values from db state.
let mut dictionary = FuzzDictionary::new(config);
dictionary.insert_db_values(accs);
if let Some(literals) = literals {
dictionary.literal_values = literals.clone();
}
Self {
inner: Arc::new(RwLock::new(dictionary)),
deployed_libs: deployed_libs.to_vec(),
mapping_slots: None,
}
}
pub fn with_mapping_slots(mut self, mapping_slots: AddressMap<MappingSlots>) -> Self {
self.mapping_slots = Some(mapping_slots);
self
}
pub fn collect_values(&self, values: impl IntoIterator<Item = B256>) {
let mut dict = self.inner.write();
for value in values {
dict.insert_value(value);
}
}
/// Collects state changes from a [StateChangeset] and logs into an [EvmFuzzState] according to
/// the given [FuzzDictionaryConfig].
pub fn collect_values_from_call(
&self,
fuzzed_contracts: &FuzzRunIdentifiedContracts,
tx: &BasicTxDetails,
result: &Bytes,
logs: &[Log],
state_changeset: &StateChangeset,
run_depth: u32,
) {
let mut dict = self.inner.write();
{
let targets = fuzzed_contracts.targets.lock();
let (target_abi, target_function) = targets.fuzzed_artifacts(tx);
dict.insert_logs_values(target_abi, logs, run_depth);
dict.insert_result_values(target_function, result, run_depth);
// Get storage layouts for contracts in the state changeset
let storage_layouts = targets.get_storage_layouts();
dict.insert_new_state_values(
state_changeset,
&storage_layouts,
self.mapping_slots.as_ref(),
);
}
}
/// Removes all newly added entries from the dictionary.
///
/// Should be called between fuzz/invariant runs to avoid accumulating data derived from fuzz
/// inputs.
pub fn revert(&self) {
self.inner.write().revert();
}
pub fn dictionary_read(&self) -> RwLockReadGuard<'_, RawRwLock, FuzzDictionary> {
self.inner.read()
}
/// Logs stats about the current state.
pub fn log_stats(&self) {
self.inner.read().log_stats();
}
/// Test-only helper to seed the dictionary with literal values.
#[cfg(test)]
pub(crate) fn seed_literals(&self, map: super::LiteralMaps) {
self.inner.write().seed_literals(map);
}
}
// We're using `IndexSet` to have a stable element order when restoring persisted state, as well as
// for performance when iterating over the sets.
pub struct FuzzDictionary {
/// Collected state values.
state_values: B256IndexSet,
/// Addresses that already had their PUSH bytes collected.
addresses: AddressIndexSet,
/// Configuration for the dictionary.
config: FuzzDictionaryConfig,
/// Number of state values initially collected from db.
/// Used to revert new collected values at the end of each run.
db_state_values: usize,
/// Number of address values initially collected from db.
/// Used to revert new collected addresses at the end of each run.
db_addresses: usize,
/// Typed runtime sample values persisted across invariant runs.
/// Initially seeded with literal values collected from the source code.
sample_values: HashMap<DynSolType, B256IndexSet>,
/// Lazily initialized dictionary of literal values collected from the source code.
literal_values: LiteralsDictionary,
/// Tracks whether literals from `literal_values` have been merged into `sample_values`.
///
/// Set to `true` on first call to `seed_samples()`. Before seeding, `samples()` checks both
/// maps separately. After seeding, literals are merged in, so only `sample_values` is checked.
samples_seeded: bool,
misses: usize,
hits: usize,
}
impl fmt::Debug for FuzzDictionary {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FuzzDictionary")
.field("state_values", &self.state_values.len())
.field("addresses", &self.addresses)
.finish()
}
}
impl Default for FuzzDictionary {
fn default() -> Self {
Self::new(Default::default())
}
}
impl FuzzDictionary {
pub fn new(config: FuzzDictionaryConfig) -> Self {
let mut dictionary = Self {
config,
samples_seeded: false,
state_values: Default::default(),
addresses: Default::default(),
db_state_values: Default::default(),
db_addresses: Default::default(),
sample_values: Default::default(),
literal_values: Default::default(),
misses: Default::default(),
hits: Default::default(),
};
dictionary.prefill();
dictionary
}
/// Insert common values into the dictionary at initialization.
fn prefill(&mut self) {
self.insert_value(B256::ZERO);
}
/// Seeds `sample_values` with all words from the [`LiteralsDictionary`].
/// Should only be called once per dictionary lifetime.
#[cold]
fn seed_samples(&mut self) {
trace!("seeding `sample_values` from literal dictionary");
self.sample_values
.extend(self.literal_values.get().words.iter().map(|(k, v)| (k.clone(), v.clone())));
self.samples_seeded = true;
}
/// Insert values from initial db state into fuzz dictionary.
/// These values are persisted across invariant runs.
fn insert_db_values(&mut self, db_state: Vec<(&Address, &DbAccount)>) {
for (address, account) in db_state {
// Insert basic account information
self.insert_value(address.into_word());
// Insert push bytes
self.insert_push_bytes_values(address, &account.info);
// Insert storage values.
if self.config.include_storage {
// Sort storage values before inserting to ensure deterministic dictionary.
let values = account.storage.iter().collect::<BTreeMap<_, _>>();
for (slot, value) in values {
self.insert_storage_value(slot, value, None, None);
}
}
}
// We need at least some state data if DB is empty,
// otherwise we can't select random data for state fuzzing.
if self.values().is_empty() {
// Prefill with a random address.
self.insert_value(Address::random().into_word());
}
// Record number of values and addresses inserted from db to be used for reverting at the
// end of each run.
self.db_state_values = self.state_values.len();
self.db_addresses = self.addresses.len();
}
/// Insert values collected from call result into fuzz dictionary.
fn insert_result_values(
&mut self,
function: Option<&Function>,
result: &Bytes,
run_depth: u32,
) {
if let Some(function) = function
&& !function.outputs.is_empty()
{
// Decode result and collect samples to be used in subsequent fuzz runs.
if let Ok(decoded_result) = function.abi_decode_output(result) {
self.insert_sample_values(decoded_result, run_depth);
}
}
}
/// Insert values from call log topics and data into fuzz dictionary.
fn insert_logs_values(&mut self, abi: Option<&JsonAbi>, logs: &[Log], run_depth: u32) {
let mut samples = Vec::new();
// Decode logs with known events and collect samples from indexed fields and event body.
for log in logs {
let mut log_decoded = false;
// Try to decode log with events from contract abi.
if let Some(abi) = abi {
for event in abi.events() {
if let Ok(decoded_event) = event.decode_log(log) {
samples.extend(decoded_event.indexed);
samples.extend(decoded_event.body);
log_decoded = true;
break;
}
}
}
// If we weren't able to decode event then we insert raw data in fuzz dictionary.
if !log_decoded {
for &topic in log.topics() {
self.insert_value(topic);
}
let chunks = log.data.data.chunks_exact(32);
let rem = chunks.remainder();
for chunk in chunks {
self.insert_value(chunk.try_into().unwrap());
}
if !rem.is_empty() {
self.insert_value(B256::right_padding_from(rem));
}
}
}
// Insert samples collected from current call in fuzz dictionary.
self.insert_sample_values(samples, run_depth);
}
/// Insert values from call state changeset into fuzz dictionary.
/// These values are removed at the end of current run.
fn insert_new_state_values(
&mut self,
state_changeset: &StateChangeset,
storage_layouts: &HashMap<Address, Arc<StorageLayout>>,
mapping_slots: Option<&AddressMap<MappingSlots>>,
) {
for (address, account) in state_changeset {
// Insert basic account information.
self.insert_value(address.into_word());
// Insert push bytes.
self.insert_push_bytes_values(address, &account.info);
// Insert storage values.
if self.config.include_storage {
let storage_layout = storage_layouts.get(address).cloned();
trace!(
"{address:?} has mapping_slots {}",
mapping_slots.is_some_and(|m| m.contains_key(address))
);
let mapping_slots = mapping_slots.and_then(|m| m.get(address));
for (slot, value) in &account.storage {
self.insert_storage_value(
slot,
&value.present_value,
storage_layout.as_deref(),
mapping_slots,
);
}
}
}
}
/// Insert values from push bytes into fuzz dictionary.
/// Values are collected only once for a given address.
/// If values are newly collected then they are removed at the end of current run.
fn insert_push_bytes_values(&mut self, address: &Address, account_info: &AccountInfo) {
if self.config.include_push_bytes
&& !self.addresses.contains(address)
&& let Some(code) = &account_info.code
{
self.insert_address(*address);
if !self.values_full() {
self.collect_push_bytes(ignore_metadata_hash(code.original_byte_slice()));
}
}
}
fn collect_push_bytes(&mut self, code: &[u8]) {
let len = code.len().min(PUSH_BYTE_ANALYSIS_LIMIT);
let code = &code[..len];
for inst in InstIter::new(code) {
// Don't add 0 to the dictionary as it's already present.
if !inst.immediate.is_empty()
&& let Some(push_value) = U256::try_from_be_slice(inst.immediate)
&& push_value != U256::ZERO
{
self.insert_value_u256(push_value);
}
}
}
/// Insert values from single storage slot and storage value into fuzz dictionary.
/// Uses [`SlotIdentifier`] to identify storage slots types.
fn insert_storage_value(
&mut self,
slot: &U256,
value: &U256,
layout: Option<&StorageLayout>,
mapping_slots: Option<&MappingSlots>,
) {
let slot = B256::from(*slot);
let value = B256::from(*value);
// Always insert the slot itself
self.insert_value(slot);
// If we have a storage layout, use SlotIdentifier for better type identification
if let Some(slot_identifier) =
layout.map(|l| SlotIdentifier::new(l.clone().into()))
// Identify Slot Type
&& let Some(slot_info) = slot_identifier.identify(&slot, mapping_slots) && slot_info.decode(value).is_some()
{
trace!(?slot_info, "inserting typed storage value");
if !self.samples_seeded {
self.seed_samples();
}
self.sample_values.entry(slot_info.slot_type.dyn_sol_type).or_default().insert(value);
} else {
self.insert_value_u256(value.into());
}
}
/// Insert address into fuzz dictionary.
/// If address is newly collected then it is removed by index at the end of current run.
fn insert_address(&mut self, address: Address) {
if self.addresses.len() < self.config.max_fuzz_dictionary_addresses {
self.addresses.insert(address);
}
}
/// Insert raw value into fuzz dictionary.
///
/// If value is newly collected then it is removed by index at the end of current run.
///
/// Returns true if the value was inserted.
fn insert_value(&mut self, value: B256) -> bool {
let insert = !self.values_full();
if insert {
let new_value = self.state_values.insert(value);
let counter = if new_value { &mut self.misses } else { &mut self.hits };
*counter += 1;
}
insert
}
fn insert_value_u256(&mut self, value: U256) -> bool {
// Also add the value below and above the push value to the dictionary.
let one = U256::from(1);
self.insert_value(value.into())
| self.insert_value((value.wrapping_sub(one)).into())
| self.insert_value((value.wrapping_add(one)).into())
}
fn values_full(&self) -> bool {
self.state_values.len() >= self.config.max_fuzz_dictionary_values
}
/// Insert sample values that are reused across multiple runs.
/// The number of samples is limited to invariant run depth.
/// If collected samples limit is reached then values are inserted as regular values.
pub fn insert_sample_values(
&mut self,
sample_values: impl IntoIterator<Item = DynSolValue>,
limit: u32,
) {
if !self.samples_seeded {
self.seed_samples();
}
for sample in sample_values {
if let (Some(sample_type), Some(sample_value)) = (sample.as_type(), sample.as_word()) {
if let Some(values) = self.sample_values.get_mut(&sample_type) {
if values.len() < limit as usize {
values.insert(sample_value);
} else {
// Insert as state value (will be removed at the end of the run).
self.insert_value(sample_value);
}
} else {
self.sample_values.entry(sample_type).or_default().insert(sample_value);
}
}
}
}
pub fn values(&self) -> &B256IndexSet {
&self.state_values
}
pub fn len(&self) -> usize {
self.state_values.len()
}
pub fn is_empty(&self) -> bool {
self.state_values.is_empty()
}
/// Returns sample values for a given type, checking both runtime samples and literals.
///
/// Before `seed_samples()` is called, checks both `literal_values` and `sample_values`
/// separately. After seeding, all literal values are merged into `sample_values`.
#[inline]
pub fn samples(&self, param_type: &DynSolType) -> Option<&B256IndexSet> {
// If not seeded yet, return literals
if !self.samples_seeded {
return self.literal_values.get().words.get(param_type);
}
self.sample_values.get(param_type)
}
/// Returns the collected literal strings, triggering initialization if needed.
#[inline]
pub fn ast_strings(&self) -> &IndexSet<String> {
&self.literal_values.get().strings
}
/// Returns the collected literal bytes (hex strings), triggering initialization if needed.
#[inline]
pub fn ast_bytes(&self) -> &IndexSet<Bytes> {
&self.literal_values.get().bytes
}
#[inline]
pub fn addresses(&self) -> &AddressIndexSet {
&self.addresses
}
/// Revert values and addresses collected during the run by truncating to initial db len.
pub fn revert(&mut self) {
self.state_values.truncate(self.db_state_values);
self.addresses.truncate(self.db_addresses);
}
pub fn log_stats(&self) {
trace!(
addresses.len = self.addresses.len(),
sample.len = self.sample_values.len(),
state.len = self.state_values.len(),
state.misses = self.misses,
state.hits = self.hits,
"FuzzDictionary stats",
);
}
#[cfg(test)]
/// Test-only helper to seed the dictionary with literal values.
pub(crate) fn seed_literals(&mut self, map: super::LiteralMaps) {
self.literal_values.set(map);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/uint.rs | crates/evm/fuzz/src/strategies/uint.rs | use alloy_dyn_abi::{DynSolType, DynSolValue};
use alloy_primitives::U256;
use proptest::{
prelude::Rng,
strategy::{NewTree, Strategy, ValueTree},
test_runner::TestRunner,
};
/// Value tree for unsigned ints (up to uint256).
pub struct UintValueTree {
/// Lower base
lo: U256,
/// Current value
curr: U256,
/// Higher base
hi: U256,
/// If true cannot be simplified or complexified
fixed: bool,
}
impl UintValueTree {
/// Create a new tree
/// # Arguments
/// * `start` - Starting value for the tree
/// * `fixed` - If `true` the tree would only contain one element and won't be simplified.
fn new(start: U256, fixed: bool) -> Self {
Self { lo: U256::ZERO, curr: start, hi: start, fixed }
}
fn reposition(&mut self) -> bool {
let interval = self.hi - self.lo;
let new_mid = self.lo + interval / U256::from(2);
if new_mid == self.curr {
false
} else {
self.curr = new_mid;
true
}
}
}
impl ValueTree for UintValueTree {
type Value = U256;
fn current(&self) -> Self::Value {
self.curr
}
fn simplify(&mut self) -> bool {
if self.fixed || (self.hi <= self.lo) {
return false;
}
self.hi = self.curr;
self.reposition()
}
fn complicate(&mut self) -> bool {
if self.fixed || (self.hi <= self.lo) {
return false;
}
self.lo = self.curr + U256::from(1);
self.reposition()
}
}
/// Value tree for unsigned ints (up to uint256).
/// The strategy combines 3 different strategies, each assigned a specific weight:
/// 1. Generate purely random value in a range. This will first choose bit size uniformly (up `bits`
/// param). Then generate a value for this bit size.
/// 2. Generate a random value around the edges (+/- 3 around 0 and max possible value)
/// 3. Generate a value from a predefined fixtures set
///
/// To define uint fixtures:
/// - return an array of possible values for a parameter named `amount` declare a function `function
/// fixture_amount() public returns (uint32[] memory)`.
/// - use `amount` named parameter in fuzzed test in order to include fixtures in fuzzed values
/// `function testFuzz_uint32(uint32 amount)`.
///
/// If fixture is not a valid uint type then error is raised and random value generated.
#[derive(Debug)]
pub struct UintStrategy {
/// Bit size of uint (e.g. 256)
bits: usize,
/// A set of fixtures to be generated
fixtures: Vec<DynSolValue>,
/// The weight for edge cases (+/- 3 around 0 and max possible value)
edge_weight: usize,
/// The weight for fixtures
fixtures_weight: usize,
/// The weight for purely random values
random_weight: usize,
}
impl UintStrategy {
/// Create a new strategy.
/// #Arguments
/// * `bits` - Size of uint in bits
/// * `fixtures` - A set of fixed values to be generated (according to fixtures weight)
pub fn new(bits: usize, fixtures: Option<&[DynSolValue]>) -> Self {
Self {
bits,
fixtures: Vec::from(fixtures.unwrap_or_default()),
edge_weight: 10usize,
fixtures_weight: 40usize,
random_weight: 50usize,
}
}
fn generate_edge_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let rng = runner.rng();
// Choose if we want values around 0 or max
let is_min = rng.random::<bool>();
let offset = U256::from(rng.random_range(0..4));
let start = if is_min { offset } else { self.type_max().saturating_sub(offset) };
Ok(UintValueTree::new(start, false))
}
fn generate_fixtures_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
// generate random cases if there's no fixtures
if self.fixtures.is_empty() {
return self.generate_random_tree(runner);
}
// Generate value tree from fixture.
let fixture = &self.fixtures[runner.rng().random_range(0..self.fixtures.len())];
if let Some(uint_fixture) = fixture.as_uint()
&& uint_fixture.1 == self.bits
{
return Ok(UintValueTree::new(uint_fixture.0, false));
}
// If fixture is not a valid type, raise error and generate random value.
error!("{:?} is not a valid {} fixture", fixture, DynSolType::Uint(self.bits));
self.generate_random_tree(runner)
}
fn generate_random_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let rng = runner.rng();
// generate random number of bits uniformly
let bits = rng.random_range(0..=self.bits);
// init 2 128-bit randoms
let mut higher: u128 = rng.random_range(0..=u128::MAX);
let mut lower: u128 = rng.random_range(0..=u128::MAX);
// cut 2 randoms according to bits size
match bits {
x if x < 128 => {
lower &= (1u128 << x) - 1;
higher = 0;
}
x if (128..256).contains(&x) => higher &= (1u128 << (x - 128)) - 1,
_ => {}
};
// init U256 from 2 randoms
let mut inner: [u64; 4] = [0; 4];
inner[0] = lower as u64;
inner[1] = (lower >> 64) as u64;
inner[2] = higher as u64;
inner[3] = (higher >> 64) as u64;
let start: U256 = U256::from_limbs(inner);
Ok(UintValueTree::new(start, false))
}
fn type_max(&self) -> U256 {
if self.bits < 256 { (U256::from(1) << self.bits) - U256::from(1) } else { U256::MAX }
}
}
impl Strategy for UintStrategy {
type Tree = UintValueTree;
type Value = U256;
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let total_weight = self.random_weight + self.fixtures_weight + self.edge_weight;
let bias = runner.rng().random_range(0..total_weight);
// randomly select one of 3 strategies
match bias {
x if x < self.edge_weight => self.generate_edge_tree(runner),
x if x < self.edge_weight + self.fixtures_weight => self.generate_fixtures_tree(runner),
_ => self.generate_random_tree(runner),
}
}
}
#[cfg(test)]
mod tests {
use crate::strategies::uint::UintValueTree;
use alloy_primitives::U256;
use proptest::strategy::ValueTree;
#[test]
fn test_uint_tree_complicate_max() {
let mut uint_tree = UintValueTree::new(U256::MAX, false);
assert_eq!(uint_tree.hi, U256::MAX);
assert_eq!(uint_tree.curr, U256::MAX);
uint_tree.complicate();
assert_eq!(uint_tree.lo, U256::MIN);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/mod.rs | crates/evm/fuzz/src/strategies/mod.rs | mod int;
pub use int::IntStrategy;
mod uint;
pub use uint::UintStrategy;
mod param;
pub use param::{fuzz_param, fuzz_param_from_state, fuzz_param_with_fixtures, mutate_param_value};
mod calldata;
pub use calldata::{fuzz_calldata, fuzz_calldata_from_state};
mod state;
pub use state::EvmFuzzState;
mod invariants;
pub use invariants::{fuzz_contract_with_calldata, invariant_strat, override_call_strat};
mod mutators;
pub use mutators::BoundMutator;
mod literals;
pub use literals::{LiteralMaps, LiteralsCollector, LiteralsDictionary};
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/param.rs | crates/evm/fuzz/src/strategies/param.rs | use super::state::EvmFuzzState;
use crate::strategies::mutators::{
BitMutator, GaussianNoiseMutator, IncrementDecrementMutator, InterestingWordMutator,
};
use alloy_dyn_abi::{DynSolType, DynSolValue, Word};
use alloy_primitives::{Address, B256, I256, U256};
use proptest::{prelude::*, test_runner::TestRunner};
use rand::{SeedableRng, prelude::IndexedMutRandom, rngs::StdRng};
use std::mem::replace;
/// The max length of arrays we fuzz for is 256.
const MAX_ARRAY_LEN: usize = 256;
/// Given a parameter type, returns a strategy for generating values for that type.
///
/// See [`fuzz_param_with_fixtures`] for more information.
pub fn fuzz_param(param: &DynSolType) -> BoxedStrategy<DynSolValue> {
fuzz_param_inner(param, None)
}
/// Given a parameter type and configured fixtures for param name, returns a strategy for generating
/// values for that type.
///
/// Fixtures can be currently generated for uint, int, address, bytes and
/// string types and are defined for parameter name.
/// For example, fixtures for parameter `owner` of type `address` can be defined in a function with
/// a `function fixture_owner() public returns (address[] memory)` signature.
///
/// Fixtures are matched on parameter name, hence fixtures defined in
/// `fixture_owner` function can be used in a fuzzed test function with a signature like
/// `function testFuzz_ownerAddress(address owner, uint amount)`.
///
/// Raises an error if all the fixture types are not of the same type as the input parameter.
///
/// Works with ABI Encoder v2 tuples.
pub fn fuzz_param_with_fixtures(
param: &DynSolType,
fixtures: Option<&[DynSolValue]>,
name: &str,
) -> BoxedStrategy<DynSolValue> {
fuzz_param_inner(param, fixtures.map(|f| (f, name)))
}
fn fuzz_param_inner(
param: &DynSolType,
mut fuzz_fixtures: Option<(&[DynSolValue], &str)>,
) -> BoxedStrategy<DynSolValue> {
if let Some((fixtures, name)) = fuzz_fixtures
&& !fixtures.iter().all(|f| f.matches(param))
{
error!("fixtures for {name:?} do not match type {param}");
fuzz_fixtures = None;
}
let fuzz_fixtures = fuzz_fixtures.map(|(f, _)| f);
let value = || {
let default_strategy = DynSolValue::type_strategy(param);
if let Some(fixtures) = fuzz_fixtures {
proptest::prop_oneof![
50 => {
let fixtures = fixtures.to_vec();
any::<prop::sample::Index>()
.prop_map(move |index| index.get(&fixtures).clone())
},
50 => default_strategy,
]
.boxed()
} else {
default_strategy.boxed()
}
};
match *param {
DynSolType::Address => value(),
DynSolType::Int(n @ 8..=256) => super::IntStrategy::new(n, fuzz_fixtures)
.prop_map(move |x| DynSolValue::Int(x, n))
.boxed(),
DynSolType::Uint(n @ 8..=256) => super::UintStrategy::new(n, fuzz_fixtures)
.prop_map(move |x| DynSolValue::Uint(x, n))
.boxed(),
DynSolType::Function | DynSolType::Bool => DynSolValue::type_strategy(param).boxed(),
DynSolType::Bytes => value(),
DynSolType::FixedBytes(_size @ 1..=32) => value(),
DynSolType::String => value()
.prop_map(move |value| {
DynSolValue::String(
value.as_str().unwrap().trim().trim_end_matches('\0').to_string(),
)
})
.boxed(),
DynSolType::Tuple(ref params) => params
.iter()
.map(|param| fuzz_param_inner(param, None))
.collect::<Vec<_>>()
.prop_map(DynSolValue::Tuple)
.boxed(),
DynSolType::FixedArray(ref param, size) => {
proptest::collection::vec(fuzz_param_inner(param, None), size)
.prop_map(DynSolValue::FixedArray)
.boxed()
}
DynSolType::Array(ref param) => {
proptest::collection::vec(fuzz_param_inner(param, None), 0..MAX_ARRAY_LEN)
.prop_map(DynSolValue::Array)
.boxed()
}
_ => panic!("unsupported fuzz param type: {param}"),
}
}
/// Given a parameter type, returns a strategy for generating values for that type, given some EVM
/// fuzz state.
///
/// Works with ABI Encoder v2 tuples.
pub fn fuzz_param_from_state(
param: &DynSolType,
state: &EvmFuzzState,
) -> BoxedStrategy<DynSolValue> {
// Value strategy that uses the state.
let value = || {
let state = state.clone();
let param = param.clone();
// Generate a bias and use it to pick samples or non-persistent values (50 / 50).
// Use `Index` instead of `Selector` when selecting a value to avoid iterating over the
// entire dictionary.
any::<(bool, prop::sample::Index)>().prop_map(move |(bias, index)| {
let state = state.dictionary_read();
let values = if bias { state.samples(¶m) } else { None }
.unwrap_or_else(|| state.values())
.as_slice();
values[index.index(values.len())]
})
};
// Convert the value based on the parameter type
match *param {
DynSolType::Address => {
let deployed_libs = state.deployed_libs.clone();
value()
.prop_map(move |value| {
let mut fuzzed_addr = Address::from_word(value);
if deployed_libs.contains(&fuzzed_addr) {
let mut rng = StdRng::seed_from_u64(0x1337); // use deterministic rng
// Do not use addresses of deployed libraries as fuzz input, instead return
// a deterministically random address. We cannot filter out this value (via
// `prop_filter_map`) as proptest can invoke this closure after test
// execution, and returning a `None` will cause it to panic.
// See <https://github.com/foundry-rs/foundry/issues/9764> and <https://github.com/foundry-rs/foundry/issues/8639>.
loop {
fuzzed_addr.randomize_with(&mut rng);
if !deployed_libs.contains(&fuzzed_addr) {
break;
}
}
}
DynSolValue::Address(fuzzed_addr)
})
.boxed()
}
DynSolType::Function => value()
.prop_map(move |value| {
DynSolValue::Function(alloy_primitives::Function::from_word(value))
})
.boxed(),
DynSolType::FixedBytes(size @ 1..=32) => value()
.prop_map(move |mut v| {
v[size..].fill(0);
DynSolValue::FixedBytes(B256::from(v), size)
})
.boxed(),
DynSolType::Bool => DynSolValue::type_strategy(param).boxed(),
DynSolType::String => {
let state = state.clone();
(proptest::bool::weighted(0.3), any::<prop::sample::Index>())
.prop_flat_map(move |(use_ast, select_index)| {
let dict = state.dictionary_read();
// AST string literals available: 30% probability
let ast_strings = dict.ast_strings();
if use_ast && !ast_strings.is_empty() {
let s = &ast_strings.as_slice()[select_index.index(ast_strings.len())];
return Just(DynSolValue::String(s.clone())).boxed();
}
// Fallback to random string generation
DynSolValue::type_strategy(&DynSolType::String)
.prop_map(|value| {
DynSolValue::String(
value.as_str().unwrap().trim().trim_end_matches('\0').to_string(),
)
})
.boxed()
})
.boxed()
}
DynSolType::Bytes => {
let state_clone = state.clone();
(
value(),
proptest::bool::weighted(0.1),
proptest::bool::weighted(0.2),
any::<prop::sample::Index>(),
)
.prop_map(move |(word, use_ast_string, use_ast_bytes, select_index)| {
let dict = state_clone.dictionary_read();
// Try string literals as bytes: 10% chance
let ast_strings = dict.ast_strings();
if use_ast_string && !ast_strings.is_empty() {
let s = &ast_strings.as_slice()[select_index.index(ast_strings.len())];
return DynSolValue::Bytes(s.as_bytes().to_vec());
}
// Try hex literals: 20% chance
let ast_bytes = dict.ast_bytes();
if use_ast_bytes && !ast_bytes.is_empty() {
let bytes = &ast_bytes.as_slice()[select_index.index(ast_bytes.len())];
return DynSolValue::Bytes(bytes.to_vec());
}
// Fallback to the generated word from the dictionary: 70% chance
DynSolValue::Bytes(word.0.into())
})
.boxed()
}
DynSolType::Int(n @ 8..=256) => match n / 8 {
32 => value()
.prop_map(move |value| DynSolValue::Int(I256::from_raw(value.into()), 256))
.boxed(),
1..=31 => value()
.prop_map(move |value| {
// Extract lower N bits
let uint_n = U256::from_be_bytes(value.0) % U256::from(1).wrapping_shl(n);
// Interpret as signed int (two's complement) --> check sign bit (bit N-1).
let sign_bit = U256::from(1) << (n - 1);
let num = if uint_n >= sign_bit {
// Negative number in two's complement
let modulus = U256::from(1) << n;
I256::from_raw(uint_n.wrapping_sub(modulus))
} else {
// Positive number
I256::from_raw(uint_n)
};
DynSolValue::Int(num, n)
})
.boxed(),
_ => unreachable!(),
},
DynSolType::Uint(n @ 8..=256) => match n / 8 {
32 => value()
.prop_map(move |value| DynSolValue::Uint(U256::from_be_bytes(value.0), 256))
.boxed(),
1..=31 => value()
.prop_map(move |value| {
let uint = U256::from_be_bytes(value.0) % U256::from(1).wrapping_shl(n);
DynSolValue::Uint(uint, n)
})
.boxed(),
_ => unreachable!(),
},
DynSolType::Tuple(ref params) => params
.iter()
.map(|p| fuzz_param_from_state(p, state))
.collect::<Vec<_>>()
.prop_map(DynSolValue::Tuple)
.boxed(),
DynSolType::FixedArray(ref param, size) => {
proptest::collection::vec(fuzz_param_from_state(param, state), size)
.prop_map(DynSolValue::FixedArray)
.boxed()
}
DynSolType::Array(ref param) => {
proptest::collection::vec(fuzz_param_from_state(param, state), 0..MAX_ARRAY_LEN)
.prop_map(DynSolValue::Array)
.boxed()
}
_ => panic!("unsupported fuzz param type: {param}"),
}
}
/// Mutates the current value of the given parameter type and value.
pub fn mutate_param_value(
param: &DynSolType,
value: DynSolValue,
test_runner: &mut TestRunner,
state: &EvmFuzzState,
) -> DynSolValue {
let new_value = |param: &DynSolType, test_runner: &mut TestRunner| {
fuzz_param_from_state(param, state)
.new_tree(test_runner)
.expect("Could not generate case")
.current()
};
match value {
DynSolValue::Bool(val) => {
// flip boolean value
trace!(target: "mutator", "Bool flip {val}");
Some(DynSolValue::Bool(!val))
}
DynSolValue::Uint(val, size) => match test_runner.rng().random_range(0..=6) {
0 => U256::increment_decrement(val, size, test_runner),
1 => U256::flip_random_bit(val, size, test_runner),
2 => U256::mutate_interesting_byte(val, size, test_runner),
3 => U256::mutate_interesting_word(val, size, test_runner),
4 => U256::mutate_interesting_dword(val, size, test_runner),
5 => U256::mutate_with_gaussian_noise(val, size, test_runner),
6 => None,
_ => unreachable!(),
}
.map(|v| DynSolValue::Uint(v, size)),
DynSolValue::Int(val, size) => match test_runner.rng().random_range(0..=6) {
0 => I256::increment_decrement(val, size, test_runner),
1 => I256::flip_random_bit(val, size, test_runner),
2 => I256::mutate_interesting_byte(val, size, test_runner),
3 => I256::mutate_interesting_word(val, size, test_runner),
4 => I256::mutate_interesting_dword(val, size, test_runner),
5 => I256::mutate_with_gaussian_noise(val, size, test_runner),
6 => None,
_ => unreachable!(),
}
.map(|v| DynSolValue::Int(v, size)),
DynSolValue::Address(val) => match test_runner.rng().random_range(0..=4) {
0 => Address::flip_random_bit(val, 20, test_runner),
1 => Address::mutate_interesting_byte(val, 20, test_runner),
2 => Address::mutate_interesting_word(val, 20, test_runner),
3 => Address::mutate_interesting_dword(val, 20, test_runner),
4 => None,
_ => unreachable!(),
}
.map(DynSolValue::Address),
DynSolValue::Array(mut values) => {
if let DynSolType::Array(param_type) = param
&& !values.is_empty()
{
match test_runner.rng().random_range(0..=2) {
// Decrease array size by removing a random element.
0 => {
values.remove(test_runner.rng().random_range(0..values.len()));
}
// Increase array size.
1 => values.push(new_value(param_type, test_runner)),
// Mutate random array element.
2 => mutate_random_array_value(&mut values, param_type, test_runner, state),
_ => unreachable!(),
}
Some(DynSolValue::Array(values))
} else {
None
}
}
DynSolValue::FixedArray(mut values) => {
if let DynSolType::FixedArray(param_type, _size) = param
&& !values.is_empty()
{
mutate_random_array_value(&mut values, param_type, test_runner, state);
Some(DynSolValue::FixedArray(values))
} else {
None
}
}
DynSolValue::FixedBytes(word, size) => match test_runner.rng().random_range(0..=4) {
0 => Word::flip_random_bit(word, size, test_runner),
1 => Word::mutate_interesting_byte(word, size, test_runner),
2 => Word::mutate_interesting_word(word, size, test_runner),
3 => Word::mutate_interesting_dword(word, size, test_runner),
4 => None,
_ => unreachable!(),
}
.map(|word| DynSolValue::FixedBytes(word, size)),
DynSolValue::CustomStruct { name, prop_names, tuple: mut values } => {
if let DynSolType::CustomStruct { name: _, prop_names: _, tuple: tuple_types }
| DynSolType::Tuple(tuple_types) = param
&& !values.is_empty()
{
// Mutate random struct element.
mutate_random_tuple_value(&mut values, tuple_types, test_runner, state);
Some(DynSolValue::CustomStruct { name, prop_names, tuple: values })
} else {
None
}
}
DynSolValue::Tuple(mut values) => {
if let DynSolType::Tuple(tuple_types) = param
&& !values.is_empty()
{
// Mutate random tuple element.
mutate_random_tuple_value(&mut values, tuple_types, test_runner, state);
Some(DynSolValue::Tuple(values))
} else {
None
}
}
_ => None,
}
.unwrap_or_else(|| new_value(param, test_runner))
}
/// Mutates random value from given tuples.
fn mutate_random_tuple_value(
tuple_values: &mut [DynSolValue],
tuple_types: &[DynSolType],
test_runner: &mut TestRunner,
state: &EvmFuzzState,
) {
let id = test_runner.rng().random_range(0..tuple_values.len());
let param_type = &tuple_types[id];
let old_val = replace(&mut tuple_values[id], DynSolValue::Bool(false));
let new_val = mutate_param_value(param_type, old_val, test_runner, state);
tuple_values[id] = new_val;
}
/// Mutates random value from given array.
fn mutate_random_array_value(
array_values: &mut [DynSolValue],
element_type: &DynSolType,
test_runner: &mut TestRunner,
state: &EvmFuzzState,
) {
let elem = array_values.choose_mut(&mut test_runner.rng()).unwrap();
let old_val = replace(elem, DynSolValue::Bool(false));
let new_val = mutate_param_value(element_type, old_val, test_runner, state);
*elem = new_val;
}
#[cfg(test)]
mod tests {
use crate::{
FuzzFixtures,
strategies::{EvmFuzzState, fuzz_calldata, fuzz_calldata_from_state},
};
use alloy_primitives::B256;
use foundry_common::abi::get_func;
use std::collections::HashSet;
#[test]
fn can_fuzz_array() {
let f = "testArray(uint64[2] calldata values)";
let func = get_func(f).unwrap();
let state = EvmFuzzState::test();
let strategy = proptest::prop_oneof![
60 => fuzz_calldata(func.clone(), &FuzzFixtures::default()),
40 => fuzz_calldata_from_state(func, &state),
];
let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() };
let mut runner = proptest::test_runner::TestRunner::new(cfg);
let _ = runner.run(&strategy, |_| Ok(()));
}
#[test]
fn can_fuzz_string_and_bytes_with_ast_literals_and_hashes() {
use super::fuzz_param_from_state;
use crate::strategies::LiteralMaps;
use alloy_dyn_abi::DynSolType;
use alloy_primitives::keccak256;
use proptest::strategy::Strategy;
// Seed dict with string values and their hashes --> mimic `CheatcodeAnalysis` behavior.
let mut literals = LiteralMaps::default();
literals.strings.insert("hello".to_string());
literals.strings.insert("world".to_string());
literals.words.entry(DynSolType::FixedBytes(32)).or_default().insert(keccak256("hello"));
literals.words.entry(DynSolType::FixedBytes(32)).or_default().insert(keccak256("world"));
let state = EvmFuzzState::test();
state.seed_literals(literals);
let cfg = proptest::test_runner::Config { failure_persistence: None, ..Default::default() };
let mut runner = proptest::test_runner::TestRunner::new(cfg);
// Verify strategies generates the seeded AST literals
let mut generated_bytes = HashSet::new();
let mut generated_hashes = HashSet::new();
let mut generated_strings = HashSet::new();
let bytes_strategy = fuzz_param_from_state(&DynSolType::Bytes, &state);
let string_strategy = fuzz_param_from_state(&DynSolType::String, &state);
let bytes32_strategy = fuzz_param_from_state(&DynSolType::FixedBytes(32), &state);
for _ in 0..256 {
let tree = bytes_strategy.new_tree(&mut runner).unwrap();
if let Some(bytes) = tree.current().as_bytes()
&& let Ok(s) = std::str::from_utf8(bytes)
{
generated_bytes.insert(s.to_string());
}
let tree = string_strategy.new_tree(&mut runner).unwrap();
if let Some(s) = tree.current().as_str() {
generated_strings.insert(s.to_string());
}
let tree = bytes32_strategy.new_tree(&mut runner).unwrap();
if let Some((bytes, size)) = tree.current().as_fixed_bytes()
&& size == 32
{
generated_hashes.insert(B256::from_slice(bytes));
}
}
assert!(generated_bytes.contains("hello"));
assert!(generated_bytes.contains("world"));
assert!(generated_strings.contains("hello"));
assert!(generated_strings.contains("world"));
assert!(generated_hashes.contains(&keccak256("hello")));
assert!(generated_hashes.contains(&keccak256("world")));
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/int.rs | crates/evm/fuzz/src/strategies/int.rs | use alloy_dyn_abi::{DynSolType, DynSolValue};
use alloy_primitives::{I256, Sign, U256};
use proptest::{
prelude::Rng,
strategy::{NewTree, Strategy, ValueTree},
test_runner::TestRunner,
};
/// Value tree for signed ints (up to int256).
pub struct IntValueTree {
/// Lower base (by absolute value)
lo: I256,
/// Current value
curr: I256,
/// Higher base (by absolute value)
hi: I256,
/// If true cannot be simplified or complexified
fixed: bool,
}
impl IntValueTree {
/// Create a new tree
/// # Arguments
/// * `start` - Starting value for the tree
/// * `fixed` - If `true` the tree would only contain one element and won't be simplified.
fn new(start: I256, fixed: bool) -> Self {
Self { lo: I256::ZERO, curr: start, hi: start, fixed }
}
fn reposition(&mut self) -> bool {
let interval = self.hi - self.lo;
let new_mid = self.lo + interval / I256::from_raw(U256::from(2));
if new_mid == self.curr {
false
} else {
self.curr = new_mid;
true
}
}
fn magnitude_greater(lhs: I256, rhs: I256) -> bool {
if lhs.is_zero() {
return false;
}
(lhs > rhs) ^ (lhs.is_negative())
}
}
impl ValueTree for IntValueTree {
type Value = I256;
fn current(&self) -> Self::Value {
self.curr
}
fn simplify(&mut self) -> bool {
if self.fixed || !Self::magnitude_greater(self.hi, self.lo) {
return false;
}
self.hi = self.curr;
self.reposition()
}
fn complicate(&mut self) -> bool {
if self.fixed || !Self::magnitude_greater(self.hi, self.lo) {
return false;
}
self.lo = if self.curr != I256::MIN && self.curr != I256::MAX {
self.curr + if self.hi.is_negative() { I256::MINUS_ONE } else { I256::ONE }
} else {
self.curr
};
self.reposition()
}
}
/// Value tree for signed ints (up to int256).
/// The strategy combines 3 different strategies, each assigned a specific weight:
/// 1. Generate purely random value in a range. This will first choose bit size uniformly (up `bits`
/// param). Then generate a value for this bit size.
/// 2. Generate a random value around the edges (+/- 3 around min, 0 and max possible value)
/// 3. Generate a value from a predefined fixtures set
///
/// To define int fixtures:
/// - return an array of possible values for a parameter named `amount` declare a function `function
/// fixture_amount() public returns (int32[] memory)`.
/// - use `amount` named parameter in fuzzed test in order to include fixtures in fuzzed values
/// `function testFuzz_int32(int32 amount)`.
///
/// If fixture is not a valid int type then error is raised and random value generated.
#[derive(Debug)]
pub struct IntStrategy {
/// Bit size of int (e.g. 256)
bits: usize,
/// A set of fixtures to be generated
fixtures: Vec<DynSolValue>,
/// The weight for edge cases (+/- 3 around 0 and max possible value)
edge_weight: usize,
/// The weight for fixtures
fixtures_weight: usize,
/// The weight for purely random values
random_weight: usize,
}
impl IntStrategy {
/// Create a new strategy.
/// #Arguments
/// * `bits` - Size of uint in bits
/// * `fixtures` - A set of fixed values to be generated (according to fixtures weight)
pub fn new(bits: usize, fixtures: Option<&[DynSolValue]>) -> Self {
Self {
bits,
fixtures: Vec::from(fixtures.unwrap_or_default()),
edge_weight: 10usize,
fixtures_weight: 40usize,
random_weight: 50usize,
}
}
fn generate_edge_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let rng = runner.rng();
let offset = I256::from_raw(U256::from(rng.random_range(0..4)));
let umax: U256 = (U256::from(1) << (self.bits - 1)) - U256::from(1);
// Choose if we want values around min, -0, +0, or max
let kind = rng.random_range(0..4);
let start = match kind {
0 => {
I256::overflowing_from_sign_and_abs(Sign::Negative, umax + U256::from(1)).0 + offset
}
1 => -offset - I256::ONE,
2 => offset,
3 => I256::overflowing_from_sign_and_abs(Sign::Positive, umax).0 - offset,
_ => unreachable!(),
};
Ok(IntValueTree::new(start, false))
}
fn generate_fixtures_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
// generate random cases if there's no fixtures
if self.fixtures.is_empty() {
return self.generate_random_tree(runner);
}
// Generate value tree from fixture.
let fixture = &self.fixtures[runner.rng().random_range(0..self.fixtures.len())];
if let Some(int_fixture) = fixture.as_int()
&& int_fixture.1 == self.bits
{
return Ok(IntValueTree::new(int_fixture.0, false));
}
// If fixture is not a valid type, raise error and generate random value.
error!("{:?} is not a valid {} fixture", fixture, DynSolType::Int(self.bits));
self.generate_random_tree(runner)
}
fn generate_random_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let rng = runner.rng();
// generate random number of bits uniformly
let bits = rng.random_range(0..=self.bits);
if bits == 0 {
return Ok(IntValueTree::new(I256::ZERO, false));
}
// init 2 128-bit randoms
let mut higher: u128 = rng.random_range(0..=u128::MAX);
let mut lower: u128 = rng.random_range(0..=u128::MAX);
// cut 2 randoms according to bits size
match bits - 1 {
x if x < 128 => {
lower &= (1u128 << x) - 1;
higher = 0;
}
x if (128..256).contains(&x) => higher &= (1u128 << (x - 128)) - 1,
_ => {}
};
// init I256 from 2 randoms
let mut inner: [u64; 4] = [0; 4];
inner[0] = lower as u64;
inner[1] = (lower >> 64) as u64;
inner[2] = higher as u64;
inner[3] = (higher >> 64) as u64;
// we have a small bias here, i.e. intN::min will never be generated
// but it's ok since it's generated in `fn generate_edge_tree(...)`
let sign = if rng.random::<bool>() { Sign::Positive } else { Sign::Negative };
let (start, _) = I256::overflowing_from_sign_and_abs(sign, U256::from_limbs(inner));
Ok(IntValueTree::new(start, false))
}
}
impl Strategy for IntStrategy {
type Tree = IntValueTree;
type Value = I256;
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let total_weight = self.random_weight + self.fixtures_weight + self.edge_weight;
let bias = runner.rng().random_range(0..total_weight);
// randomly select one of 3 strategies
match bias {
x if x < self.edge_weight => self.generate_edge_tree(runner),
x if x < self.edge_weight + self.fixtures_weight => self.generate_fixtures_tree(runner),
_ => self.generate_random_tree(runner),
}
}
}
#[cfg(test)]
mod tests {
use crate::strategies::int::IntValueTree;
use alloy_primitives::I256;
use proptest::strategy::ValueTree;
#[test]
fn test_int_tree_complicate_should_not_overflow() {
let mut int_tree = IntValueTree::new(I256::MAX, false);
assert_eq!(int_tree.hi, I256::MAX);
assert_eq!(int_tree.curr, I256::MAX);
int_tree.complicate();
assert_eq!(int_tree.lo, I256::MAX);
let mut int_tree = IntValueTree::new(I256::MIN, false);
assert_eq!(int_tree.hi, I256::MIN);
assert_eq!(int_tree.curr, I256::MIN);
int_tree.complicate();
assert_eq!(int_tree.lo, I256::MIN);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/fuzz/src/strategies/literals.rs | crates/evm/fuzz/src/strategies/literals.rs | use alloy_dyn_abi::DynSolType;
use alloy_primitives::{
B256, Bytes, I256, U256, keccak256,
map::{B256IndexSet, HashMap, IndexSet},
};
use foundry_common::Analysis;
use foundry_compilers::ProjectPathsConfig;
use solar::{
ast::{self, Visit},
interface::source_map::FileName,
};
use std::{
ops::ControlFlow,
sync::{Arc, OnceLock},
};
#[derive(Clone, Debug)]
pub struct LiteralsDictionary {
maps: Arc<OnceLock<LiteralMaps>>,
}
impl Default for LiteralsDictionary {
fn default() -> Self {
Self::new(None, None, usize::MAX)
}
}
impl LiteralsDictionary {
pub fn new(
analysis: Option<Analysis>,
paths_config: Option<ProjectPathsConfig>,
max_values: usize,
) -> Self {
let maps = Arc::new(OnceLock::<LiteralMaps>::new());
if let Some(analysis) = analysis
&& max_values > 0
{
let maps = maps.clone();
// This can't be done in a rayon task (including inside of `get`) because it can cause a
// deadlock, since internally `solar` also uses rayon.
let _ = std::thread::Builder::new().name("literal-collector".into()).spawn(move || {
let _ = maps.get_or_init(|| {
let literals =
LiteralsCollector::process(&analysis, paths_config.as_ref(), max_values);
debug!(
words = literals.words.values().map(|set| set.len()).sum::<usize>(),
strings = literals.strings.len(),
bytes = literals.bytes.len(),
"collected source code literals for fuzz dictionary"
);
literals
});
});
} else {
maps.set(Default::default()).unwrap();
}
Self { maps }
}
/// Returns a reference to the `LiteralMaps`.
pub fn get(&self) -> &LiteralMaps {
self.maps.wait()
}
/// Test-only helper to seed the dictionary with literal values.
#[cfg(test)]
pub(crate) fn set(&mut self, map: super::LiteralMaps) {
self.maps = Arc::new(OnceLock::new());
self.maps.set(map).unwrap();
}
}
#[derive(Debug, Default)]
pub struct LiteralMaps {
pub words: HashMap<DynSolType, B256IndexSet>,
pub strings: IndexSet<String>,
pub bytes: IndexSet<Bytes>,
}
#[derive(Debug, Default)]
pub struct LiteralsCollector {
max_values: usize,
total_values: usize,
output: LiteralMaps,
}
impl LiteralsCollector {
fn new(max_values: usize) -> Self {
Self { max_values, ..Default::default() }
}
pub fn process(
analysis: &Analysis,
paths_config: Option<&ProjectPathsConfig>,
max_values: usize,
) -> LiteralMaps {
analysis.enter(|compiler| {
let mut literals_collector = Self::new(max_values);
for source in compiler.sources().iter() {
// Ignore scripts, and libs
if let Some(paths) = paths_config
&& let FileName::Real(source_path) = &source.file.name
&& !(source_path.starts_with(&paths.sources) || paths.is_test(source_path))
{
continue;
}
if let Some(ast) = &source.ast
&& literals_collector.visit_source_unit(ast).is_break()
{
break;
}
}
literals_collector.output
})
}
}
impl<'ast> ast::Visit<'ast> for LiteralsCollector {
type BreakValue = ();
fn visit_expr(&mut self, expr: &'ast ast::Expr<'ast>) -> ControlFlow<()> {
// Stop early if we've hit the limit
if self.total_values >= self.max_values {
return ControlFlow::Break(());
}
// Handle unary negation of number literals
if let ast::ExprKind::Unary(un_op, inner_expr) = &expr.kind
&& un_op.kind == ast::UnOpKind::Neg
&& let ast::ExprKind::Lit(lit, _) = &inner_expr.kind
&& let ast::LitKind::Number(n) = &lit.kind
{
// Compute the negative I256 value
if let Ok(pos_i256) = I256::try_from(*n) {
let neg_value = -pos_i256;
let neg_b256 = B256::from(neg_value.into_raw());
// Store under all intN sizes that can represent this value
for bits in [16, 32, 64, 128, 256] {
if can_fit_int(neg_value, bits)
&& self
.output
.words
.entry(DynSolType::Int(bits))
.or_default()
.insert(neg_b256)
{
self.total_values += 1;
}
}
}
// Continue walking the expression
return self.walk_expr(expr);
}
// Handle literals
if let ast::ExprKind::Lit(lit, _) = &expr.kind {
let is_new = match &lit.kind {
ast::LitKind::Number(n) => {
let pos_value = U256::from(*n);
let pos_b256 = B256::from(pos_value);
// Store under all uintN sizes that can represent this value
for bits in [8, 16, 32, 64, 128, 256] {
if can_fit_uint(pos_value, bits)
&& self
.output
.words
.entry(DynSolType::Uint(bits))
.or_default()
.insert(pos_b256)
{
self.total_values += 1;
}
}
false // already handled inserts individually
}
ast::LitKind::Address(addr) => self
.output
.words
.entry(DynSolType::Address)
.or_default()
.insert(addr.into_word()),
ast::LitKind::Str(ast::StrKind::Hex, sym, _) => {
self.output.bytes.insert(Bytes::copy_from_slice(sym.as_byte_str()))
}
ast::LitKind::Str(_, sym, _) => {
let s = String::from_utf8_lossy(sym.as_byte_str()).into_owned();
// For strings, also store the hashed version
let hash = keccak256(s.as_bytes());
if self.output.words.entry(DynSolType::FixedBytes(32)).or_default().insert(hash)
{
self.total_values += 1;
}
// And the right-padded version if it fits.
if s.len() <= 32 {
let padded = B256::right_padding_from(s.as_bytes());
if self
.output
.words
.entry(DynSolType::FixedBytes(32))
.or_default()
.insert(padded)
{
self.total_values += 1;
}
}
self.output.strings.insert(s)
}
ast::LitKind::Bool(..) | ast::LitKind::Rational(..) | ast::LitKind::Err(..) => {
false // ignore
}
};
if is_new {
self.total_values += 1;
}
}
self.walk_expr(expr)
}
}
/// Checks if a signed integer value can fit in intN type.
fn can_fit_int(value: I256, bits: usize) -> bool {
// Calculate the maximum positive value for intN: 2^(N-1) - 1
let max_val = I256::try_from((U256::from(1) << (bits - 1)) - U256::from(1))
.expect("max value should fit in I256");
// Calculate the minimum negative value for intN: -2^(N-1)
let min_val = -max_val - I256::ONE;
value >= min_val && value <= max_val
}
/// Checks if an unsigned integer value can fit in uintN type.
fn can_fit_uint(value: U256, bits: usize) -> bool {
if bits == 256 {
return true;
}
// Calculate the maximum value for uintN: 2^N - 1
let max_val = (U256::from(1) << bits) - U256::from(1);
value <= max_val
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::address;
use solar::interface::{Session, source_map};
const SOURCE: &str = r#"
contract Magic {
// plain literals
address constant DAI = 0x6B175474E89094C44Da98b954EedeAC495271d0F;
uint64 constant MAGIC_NUMBER = 1122334455;
int32 constant MAGIC_INT = -777;
bytes32 constant MAGIC_WORD = "abcd1234";
bytes constant MAGIC_BYTES = hex"deadbeef";
string constant MAGIC_STRING = "xyzzy";
// constant exprs with folding
uint256 constant NEG_FOLDING = uint(-2);
uint256 constant BIN_FOLDING = 2 * 2 ether;
bytes32 constant IMPLEMENTATION_SLOT = bytes32(uint256(keccak256('eip1967.proxy.implementation')) - 1);
}"#;
#[test]
fn test_literals_collector_coverage() {
let map = process_source_literals(SOURCE);
// Expected values from the SOURCE contract
let addr = address!("0x6B175474E89094C44Da98b954EedeAC495271d0F").into_word();
let num = B256::from(U256::from(1122334455u64));
let int = B256::from(I256::try_from(-777i32).unwrap().into_raw());
let word = B256::right_padding_from(b"abcd1234");
let dyn_bytes = Bytes::from_static(&[0xde, 0xad, 0xbe, 0xef]);
assert_word(&map, DynSolType::Address, addr, "Expected DAI in address set");
assert_word(&map, DynSolType::Uint(64), num, "Expected MAGIC_NUMBER in uint64 set");
assert_word(&map, DynSolType::Int(32), int, "Expected MAGIC_INT in int32 set");
assert_word(&map, DynSolType::FixedBytes(32), word, "Expected MAGIC_WORD in bytes32 set");
assert!(map.strings.contains("xyzzy"), "Expected MAGIC_STRING to be collected");
assert!(
map.strings.contains("eip1967.proxy.implementation"),
"Expected IMPLEMENTATION_SLOT in string set"
);
assert!(map.bytes.contains(&dyn_bytes), "Expected MAGIC_BYTES in bytes set");
}
#[test]
fn test_literals_collector_size() {
let literals = process_source_literals(SOURCE);
// Helper to get count for a type, returns 0 if not present
let count = |ty: DynSolType| literals.words.get(&ty).map_or(0, |set| set.len());
assert_eq!(count(DynSolType::Address), 1, "Address literal count mismatch");
assert_eq!(literals.strings.len(), 3, "String literals count mismatch");
assert_eq!(literals.bytes.len(), 1, "Byte literals count mismatch");
// Unsigned integers - MAGIC_NUMBER (1122334455) appears in multiple sizes
assert_eq!(count(DynSolType::Uint(8)), 2, "Uint(8) count mismatch");
assert_eq!(count(DynSolType::Uint(16)), 3, "Uint(16) count mismatch");
assert_eq!(count(DynSolType::Uint(32)), 4, "Uint(32) count mismatch");
assert_eq!(count(DynSolType::Uint(64)), 5, "Uint(64) count mismatch");
assert_eq!(count(DynSolType::Uint(128)), 5, "Uint(128) count mismatch");
assert_eq!(count(DynSolType::Uint(256)), 5, "Uint(256) count mismatch");
// Signed integers - MAGIC_INT (-777) appears in multiple sizes
assert_eq!(count(DynSolType::Int(16)), 2, "Int(16) count mismatch");
assert_eq!(count(DynSolType::Int(32)), 2, "Int(32) count mismatch");
assert_eq!(count(DynSolType::Int(64)), 2, "Int(64) count mismatch");
assert_eq!(count(DynSolType::Int(128)), 2, "Int(128) count mismatch");
assert_eq!(count(DynSolType::Int(256)), 2, "Int(256) count mismatch");
// FixedBytes(32) includes:
// - MAGIC_WORD
// - String literals (hashed and right-padded versions)
assert_eq!(count(DynSolType::FixedBytes(32)), 6, "FixedBytes(32) count mismatch");
// Total count check
assert_eq!(
literals.words.values().map(|set| set.len()).sum::<usize>(),
41,
"Total word values count mismatch"
);
}
// -- TEST HELPERS ---------------------------------------------------------
fn process_source_literals(source: &str) -> LiteralMaps {
let mut compiler =
solar::sema::Compiler::new(Session::builder().with_stderr_emitter().build());
compiler
.enter_mut(|c| -> std::io::Result<()> {
let mut pcx = c.parse();
pcx.set_resolve_imports(false);
pcx.add_file(
c.sess().source_map().new_source_file(source_map::FileName::Stdin, source)?,
);
pcx.parse();
let _ = c.lower_asts();
Ok(())
})
.expect("Failed to compile test source");
LiteralsCollector::process(&std::sync::Arc::new(compiler), None, usize::MAX)
}
fn assert_word(literals: &LiteralMaps, ty: DynSolType, value: B256, msg: &str) {
assert!(literals.words.get(&ty).is_some_and(|set| set.contains(&value)), "{}", msg);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/abi/src/lib.rs | crates/evm/abi/src/lib.rs | //! Solidity ABI-related utilities and [`sol!`](alloy_sol_types::sol) definitions.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
pub mod console;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/abi/src/console/hh.rs | crates/evm/abi/src/console/hh.rs | //! Hardhat `console.sol` interface.
use alloy_sol_types::sol;
use foundry_common_fmt::*;
use foundry_macros::ConsoleFmt;
sol!(
#[sol(abi)]
#[derive(ConsoleFmt)]
Console,
"src/Console.json"
);
pub use Console::*;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/abi/src/console/mod.rs | crates/evm/abi/src/console/mod.rs | use alloy_primitives::{I256, U256};
pub mod ds;
pub mod hh;
pub fn format_units_int(x: &I256, decimals: &U256) -> String {
let (sign, x) = x.into_sign_and_abs();
format!("{sign}{}", format_units_uint(&x, decimals))
}
pub fn format_units_uint(x: &U256, decimals: &U256) -> String {
match alloy_primitives::utils::Unit::new(decimals.saturating_to::<u8>()) {
Some(units) => alloy_primitives::utils::ParseUnits::U256(*x).format_units(units),
None => x.to_string(),
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/abi/src/console/ds.rs | crates/evm/abi/src/console/ds.rs | //! DSTest log interface.
use super::{format_units_int, format_units_uint};
use alloy_primitives::hex;
use alloy_sol_types::sol;
use derive_more::Display;
use foundry_common_fmt::UIfmt;
use itertools::Itertools;
// Using UIfmt for consistent and user-friendly formatting
sol! {
#[sol(abi)]
#[derive(Display)]
interface Console {
#[display("{}", val.pretty())]
event log(string val);
#[display("{}", hex::encode_prefixed(val))]
event logs(bytes val);
#[display("{}", val.pretty())]
event log_address(address val);
#[display("{}", val.pretty())]
event log_bytes32(bytes32 val);
#[display("{}", val.pretty())]
event log_int(int val);
#[display("{}", val.pretty())]
event log_uint(uint val);
#[display("{}", hex::encode_prefixed(val))]
event log_bytes(bytes val);
#[display("{}", val.pretty())]
event log_string(string val);
#[display("[{}]", val.iter().map(|v| v.pretty()).format(", "))]
event log_array(uint256[] val);
#[display("[{}]", val.iter().map(|v| v.pretty()).format(", "))]
event log_array(int256[] val);
#[display("[{}]", val.iter().map(|v| v.pretty()).format(", "))]
event log_array(address[] val);
#[display("{}: {}", key.pretty(), val.pretty())]
event log_named_address(string key, address val);
#[display("{}: {}", key.pretty(), val.pretty())]
event log_named_bytes32(string key, bytes32 val);
#[display("{}: {}", key.pretty(), format_units_int(val, decimals))]
event log_named_decimal_int(string key, int val, uint decimals);
#[display("{}: {}", key.pretty(), format_units_uint(val, decimals))]
event log_named_decimal_uint(string key, uint val, uint decimals);
#[display("{}: {}", key.pretty(), val.pretty())]
event log_named_int(string key, int val);
#[display("{}: {}", key.pretty(), val.pretty())]
event log_named_uint(string key, uint val);
#[display("{}: {}", key.pretty(), hex::encode_prefixed(val))]
event log_named_bytes(string key, bytes val);
#[display("{}: {}", key.pretty(), val.pretty())]
event log_named_string(string key, string val);
#[display("{}: [{}]", key.pretty(), val.iter().map(|v| v.pretty()).format(", "))]
event log_named_array(string key, uint256[] val);
#[display("{}: [{}]", key.pretty(), val.iter().map(|v| v.pretty()).format(", "))]
event log_named_array(string key, int256[] val);
#[display("{}: [{}]", key.pretty(), val.iter().map(|v| v.pretty()).format(", "))]
event log_named_array(string key, address[] val);
}
}
pub use Console::*;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/evm.rs | crates/evm/core/src/evm.rs | use std::{
marker::PhantomData,
ops::{Deref, DerefMut},
};
use crate::{
Env, InspectorExt, backend::DatabaseExt, constants::DEFAULT_CREATE2_DEPLOYER_CODEHASH,
};
use alloy_consensus::constants::KECCAK_EMPTY;
use alloy_evm::{Evm, EvmEnv, eth::EthEvmContext, precompiles::PrecompilesMap};
use alloy_primitives::{Address, Bytes, U256};
use foundry_fork_db::DatabaseError;
use revm::{
Context, Journal,
context::{
BlockEnv, CfgEnv, ContextTr, CreateScheme, Evm as RevmEvm, JournalTr, LocalContext,
LocalContextTr, TxEnv,
result::{EVMError, ExecResultAndState, ExecutionResult, HaltReason, ResultAndState},
},
handler::{
EthFrame, EthPrecompiles, EvmTr, FrameResult, FrameTr, Handler, ItemOrResult,
instructions::EthInstructions,
},
inspector::{InspectorEvmTr, InspectorHandler},
interpreter::{
CallInput, CallInputs, CallOutcome, CallScheme, CallValue, CreateInputs, CreateOutcome,
FrameInput, Gas, InstructionResult, InterpreterResult, SharedMemory,
interpreter::EthInterpreter, interpreter_action::FrameInit, return_ok,
},
precompile::{PrecompileSpecId, Precompiles},
primitives::hardfork::SpecId,
};
pub fn new_evm_with_inspector<'db, I: InspectorExt>(
db: &'db mut dyn DatabaseExt,
env: Env,
inspector: I,
) -> FoundryEvm<'db, I> {
let mut ctx = EthEvmContext {
journaled_state: {
let mut journal = Journal::new(db);
journal.set_spec_id(env.evm_env.cfg_env.spec);
journal
},
block: env.evm_env.block_env,
cfg: env.evm_env.cfg_env,
tx: env.tx,
chain: (),
local: LocalContext::default(),
error: Ok(()),
};
ctx.cfg.tx_chain_id_check = true;
let spec = ctx.cfg.spec;
let mut evm = FoundryEvm {
inner: RevmEvm::new_with_inspector(
ctx,
inspector,
EthInstructions::default(),
get_precompiles(spec),
),
};
evm.inspector().get_networks().inject_precompiles(evm.precompiles_mut());
evm
}
pub fn new_evm_with_existing_context<'a>(
ctx: EthEvmContext<&'a mut dyn DatabaseExt>,
inspector: &'a mut dyn InspectorExt,
) -> FoundryEvm<'a, &'a mut dyn InspectorExt> {
let spec = ctx.cfg.spec;
let mut evm = FoundryEvm {
inner: RevmEvm::new_with_inspector(
ctx,
inspector,
EthInstructions::default(),
get_precompiles(spec),
),
};
evm.inspector().get_networks().inject_precompiles(evm.precompiles_mut());
evm
}
/// Get the precompiles for the given spec.
fn get_precompiles(spec: SpecId) -> PrecompilesMap {
PrecompilesMap::from_static(
EthPrecompiles {
precompiles: Precompiles::new(PrecompileSpecId::from_spec_id(spec)),
spec,
}
.precompiles,
)
}
/// Get the call inputs for the CREATE2 factory.
fn get_create2_factory_call_inputs(
salt: U256,
inputs: &CreateInputs,
deployer: Address,
) -> CallInputs {
let calldata = [&salt.to_be_bytes::<32>()[..], &inputs.init_code[..]].concat();
CallInputs {
caller: inputs.caller,
bytecode_address: deployer,
known_bytecode: None,
target_address: deployer,
scheme: CallScheme::Call,
value: CallValue::Transfer(inputs.value),
input: CallInput::Bytes(calldata.into()),
gas_limit: inputs.gas_limit,
is_static: false,
return_memory_offset: 0..0,
}
}
pub struct FoundryEvm<'db, I: InspectorExt> {
#[allow(clippy::type_complexity)]
inner: RevmEvm<
EthEvmContext<&'db mut dyn DatabaseExt>,
I,
EthInstructions<EthInterpreter, EthEvmContext<&'db mut dyn DatabaseExt>>,
PrecompilesMap,
EthFrame<EthInterpreter>,
>,
}
impl<'db, I: InspectorExt> FoundryEvm<'db, I> {
/// Consumes the EVM and returns the inner context.
pub fn into_context(self) -> EthEvmContext<&'db mut dyn DatabaseExt> {
self.inner.ctx
}
pub fn run_execution(
&mut self,
frame: FrameInput,
) -> Result<FrameResult, EVMError<DatabaseError>> {
let mut handler = FoundryHandler::<I>::default();
// Create first frame
let memory =
SharedMemory::new_with_buffer(self.inner.ctx().local().shared_memory_buffer().clone());
let first_frame_input = FrameInit { depth: 0, memory, frame_input: frame };
// Run execution loop
let mut frame_result = handler.inspect_run_exec_loop(&mut self.inner, first_frame_input)?;
// Handle last frame result
handler.last_frame_result(&mut self.inner, &mut frame_result)?;
Ok(frame_result)
}
}
impl<'db, I: InspectorExt> Evm for FoundryEvm<'db, I> {
type Precompiles = PrecompilesMap;
type Inspector = I;
type DB = &'db mut dyn DatabaseExt;
type Error = EVMError<DatabaseError>;
type HaltReason = HaltReason;
type Spec = SpecId;
type Tx = TxEnv;
type BlockEnv = BlockEnv;
fn block(&self) -> &BlockEnv {
&self.inner.block
}
fn chain_id(&self) -> u64 {
self.inner.ctx.cfg.chain_id
}
fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) {
(&self.inner.ctx.journaled_state.database, &self.inner.inspector, &self.inner.precompiles)
}
fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) {
(
&mut self.inner.ctx.journaled_state.database,
&mut self.inner.inspector,
&mut self.inner.precompiles,
)
}
fn db_mut(&mut self) -> &mut Self::DB {
&mut self.inner.ctx.journaled_state.database
}
fn precompiles(&self) -> &Self::Precompiles {
&self.inner.precompiles
}
fn precompiles_mut(&mut self) -> &mut Self::Precompiles {
&mut self.inner.precompiles
}
fn inspector(&self) -> &Self::Inspector {
&self.inner.inspector
}
fn inspector_mut(&mut self) -> &mut Self::Inspector {
&mut self.inner.inspector
}
fn set_inspector_enabled(&mut self, _enabled: bool) {
unimplemented!("FoundryEvm is always inspecting")
}
fn transact_raw(
&mut self,
tx: Self::Tx,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
self.inner.ctx.tx = tx;
let mut handler = FoundryHandler::<I>::default();
let result = handler.inspect_run(&mut self.inner)?;
Ok(ResultAndState::new(result, self.inner.ctx.journaled_state.inner.state.clone()))
}
fn transact_system_call(
&mut self,
_caller: Address,
_contract: Address,
_data: Bytes,
) -> Result<ExecResultAndState<ExecutionResult>, Self::Error> {
unimplemented!()
}
fn finish(self) -> (Self::DB, EvmEnv<Self::Spec>)
where
Self: Sized,
{
let Context { block: block_env, cfg: cfg_env, journaled_state, .. } = self.inner.ctx;
(journaled_state.database, EvmEnv { block_env, cfg_env })
}
}
impl<'db, I: InspectorExt> Deref for FoundryEvm<'db, I> {
type Target = Context<BlockEnv, TxEnv, CfgEnv, &'db mut dyn DatabaseExt>;
fn deref(&self) -> &Self::Target {
&self.inner.ctx
}
}
impl<I: InspectorExt> DerefMut for FoundryEvm<'_, I> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner.ctx
}
}
pub struct FoundryHandler<'db, I: InspectorExt> {
create2_overrides: Vec<(usize, CallInputs)>,
_phantom: PhantomData<(&'db mut dyn DatabaseExt, I)>,
}
impl<I: InspectorExt> Default for FoundryHandler<'_, I> {
fn default() -> Self {
Self { create2_overrides: Vec::new(), _phantom: PhantomData }
}
}
// Blanket Handler implementation for FoundryHandler, needed for implementing the InspectorHandler
// trait.
impl<'db, I: InspectorExt> Handler for FoundryHandler<'db, I> {
type Evm = RevmEvm<
EthEvmContext<&'db mut dyn DatabaseExt>,
I,
EthInstructions<EthInterpreter, EthEvmContext<&'db mut dyn DatabaseExt>>,
PrecompilesMap,
EthFrame<EthInterpreter>,
>;
type Error = EVMError<DatabaseError>;
type HaltReason = HaltReason;
}
impl<'db, I: InspectorExt> FoundryHandler<'db, I> {
/// Handles CREATE2 frame initialization, potentially transforming it to use the CREATE2
/// factory.
fn handle_create_frame(
&mut self,
evm: &mut <Self as Handler>::Evm,
init: &mut FrameInit,
) -> Result<Option<FrameResult>, <Self as Handler>::Error> {
if let FrameInput::Create(inputs) = &init.frame_input
&& let CreateScheme::Create2 { salt } = inputs.scheme
{
let (ctx, inspector) = evm.ctx_inspector();
if inspector.should_use_create2_factory(ctx, inputs) {
let gas_limit = inputs.gas_limit;
// Get CREATE2 deployer.
let create2_deployer = evm.inspector().create2_deployer();
// Generate call inputs for CREATE2 factory.
let call_inputs = get_create2_factory_call_inputs(salt, inputs, create2_deployer);
// Push data about current override to the stack.
self.create2_overrides.push((evm.journal().depth(), call_inputs.clone()));
// Sanity check that CREATE2 deployer exists.
let code_hash = evm.journal_mut().load_account(create2_deployer)?.info.code_hash;
if code_hash == KECCAK_EMPTY {
return Ok(Some(FrameResult::Call(CallOutcome {
result: InterpreterResult {
result: InstructionResult::Revert,
output: Bytes::from(
format!("missing CREATE2 deployer: {create2_deployer}")
.into_bytes(),
),
gas: Gas::new(gas_limit),
},
memory_offset: 0..0,
was_precompile_called: false,
precompile_call_logs: vec![],
})));
} else if code_hash != DEFAULT_CREATE2_DEPLOYER_CODEHASH {
return Ok(Some(FrameResult::Call(CallOutcome {
result: InterpreterResult {
result: InstructionResult::Revert,
output: "invalid CREATE2 deployer bytecode".into(),
gas: Gas::new(gas_limit),
},
memory_offset: 0..0,
was_precompile_called: false,
precompile_call_logs: vec![],
})));
}
// Rewrite the frame init
init.frame_input = FrameInput::Call(Box::new(call_inputs));
}
}
Ok(None)
}
/// Transforms CREATE2 factory call results back into CREATE outcomes.
fn handle_create2_override(
&mut self,
evm: &mut <Self as Handler>::Evm,
result: FrameResult,
) -> FrameResult {
if self.create2_overrides.last().is_some_and(|(depth, _)| *depth == evm.journal().depth()) {
let (_, call_inputs) = self.create2_overrides.pop().unwrap();
let FrameResult::Call(mut call) = result else {
unreachable!("create2 override should be a call frame");
};
// Decode address from output.
let address = match call.instruction_result() {
return_ok!() => Address::try_from(call.output().as_ref())
.map_err(|_| {
call.result = InterpreterResult {
result: InstructionResult::Revert,
output: "invalid CREATE2 factory output".into(),
gas: Gas::new(call_inputs.gas_limit),
};
})
.ok(),
_ => None,
};
FrameResult::Create(CreateOutcome { result: call.result, address })
} else {
result
}
}
}
impl<I: InspectorExt> InspectorHandler for FoundryHandler<'_, I> {
type IT = EthInterpreter;
fn inspect_run_exec_loop(
&mut self,
evm: &mut Self::Evm,
first_frame_input: <<Self::Evm as EvmTr>::Frame as FrameTr>::FrameInit,
) -> Result<FrameResult, Self::Error> {
let res = evm.inspect_frame_init(first_frame_input)?;
if let ItemOrResult::Result(frame_result) = res {
return Ok(frame_result);
}
loop {
let call_or_result = evm.inspect_frame_run()?;
let result = match call_or_result {
ItemOrResult::Item(mut init) => {
// Handle CREATE/CREATE2 frame initialization
if let Some(frame_result) = self.handle_create_frame(evm, &mut init)? {
return Ok(frame_result);
}
match evm.inspect_frame_init(init)? {
ItemOrResult::Item(_) => continue,
ItemOrResult::Result(result) => result,
}
}
ItemOrResult::Result(result) => result,
};
// Handle CREATE2 override transformation if needed
let result = self.handle_create2_override(evm, result);
if let Some(result) = evm.frame_return_result(result)? {
return Ok(result);
}
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/lib.rs | crates/evm/core/src/lib.rs | //! # foundry-evm-core
//!
//! Core EVM abstractions.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
use crate::constants::DEFAULT_CREATE2_DEPLOYER;
use alloy_evm::eth::EthEvmContext;
use alloy_primitives::{Address, map::HashMap};
use auto_impl::auto_impl;
use backend::DatabaseExt;
use revm::{Inspector, inspector::NoOpInspector, interpreter::CreateInputs};
use revm_inspectors::access_list::AccessListInspector;
/// Map keyed by breakpoints char to their location (contract address, pc)
pub type Breakpoints = HashMap<char, (Address, usize)>;
#[macro_use]
extern crate tracing;
pub mod abi {
pub use foundry_cheatcodes_spec::Vm;
pub use foundry_evm_abi::*;
}
pub mod env;
pub use env::*;
use foundry_evm_networks::NetworkConfigs;
pub mod backend;
pub mod buffer;
pub mod bytecode;
pub mod constants;
pub mod decode;
pub mod either_evm;
pub mod evm;
pub mod fork;
pub mod hardfork;
pub mod ic;
pub mod opts;
pub mod precompiles;
pub mod state_snapshot;
pub mod utils;
/// An extension trait that allows us to add additional hooks to Inspector for later use in
/// handlers.
#[auto_impl(&mut, Box)]
pub trait InspectorExt: for<'a> Inspector<EthEvmContext<&'a mut dyn DatabaseExt>> {
/// Determines whether the `DEFAULT_CREATE2_DEPLOYER` should be used for a CREATE2 frame.
///
/// If this function returns true, we'll replace CREATE2 frame with a CALL frame to CREATE2
/// factory.
fn should_use_create2_factory(
&mut self,
_context: &mut EthEvmContext<&mut dyn DatabaseExt>,
_inputs: &CreateInputs,
) -> bool {
false
}
/// Simulates `console.log` invocation.
fn console_log(&mut self, msg: &str) {
let _ = msg;
}
/// Returns configured networks.
fn get_networks(&self) -> NetworkConfigs {
NetworkConfigs::default()
}
/// Returns the CREATE2 deployer address.
fn create2_deployer(&self) -> Address {
DEFAULT_CREATE2_DEPLOYER
}
}
impl InspectorExt for NoOpInspector {}
impl InspectorExt for AccessListInspector {}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/decode.rs | crates/evm/core/src/decode.rs | //! Various utilities to decode test results.
use crate::abi::{Vm, console};
use alloy_dyn_abi::JsonAbiExt;
use alloy_json_abi::{Error, JsonAbi};
use alloy_primitives::{Log, Selector, hex, map::HashMap};
use alloy_sol_types::{
ContractError::Revert, RevertReason, RevertReason::ContractError, SolEventInterface,
SolInterface, SolValue,
};
use foundry_common::SELECTOR_LEN;
use itertools::Itertools;
use revm::interpreter::InstructionResult;
use std::{fmt, sync::OnceLock};
/// A skip reason.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SkipReason(pub Option<String>);
impl SkipReason {
/// Decodes a skip reason, if any.
pub fn decode(raw_result: &[u8]) -> Option<Self> {
raw_result.strip_prefix(crate::constants::MAGIC_SKIP).map(|reason| {
let reason = String::from_utf8_lossy(reason).into_owned();
Self((!reason.is_empty()).then_some(reason))
})
}
/// Decodes a skip reason from a string that was obtained by formatting `Self`.
///
/// This is a hack to support re-decoding a skip reason in proptest.
pub fn decode_self(s: &str) -> Option<Self> {
s.strip_prefix("skipped").map(|rest| Self(rest.strip_prefix(": ").map(ToString::to_string)))
}
}
impl fmt::Display for SkipReason {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("skipped")?;
if let Some(reason) = &self.0 {
f.write_str(": ")?;
f.write_str(reason)?;
}
Ok(())
}
}
/// Decode a set of logs, only returning logs from DSTest logging events and Hardhat's `console.log`
pub fn decode_console_logs(logs: &[Log]) -> Vec<String> {
logs.iter().filter_map(decode_console_log).collect()
}
/// Decode a single log.
///
/// This function returns [None] if it is not a DSTest log or the result of a Hardhat
/// `console.log`.
fn decode_console_log(log: &Log) -> Option<String> {
console::ds::ConsoleEvents::decode_log(log).ok().map(|decoded| decoded.to_string())
}
/// Decodes revert data.
#[derive(Clone, Debug, Default)]
pub struct RevertDecoder {
/// The custom errors to use for decoding.
errors: HashMap<Selector, Vec<Error>>,
}
impl Default for &RevertDecoder {
fn default() -> Self {
static EMPTY: OnceLock<RevertDecoder> = OnceLock::new();
EMPTY.get_or_init(RevertDecoder::new)
}
}
impl RevertDecoder {
/// Creates a new, empty revert decoder.
pub fn new() -> Self {
Self::default()
}
/// Sets the ABIs to use for error decoding.
///
/// Note that this is decently expensive as it will hash all errors for faster indexing.
pub fn with_abis<'a>(mut self, abi: impl IntoIterator<Item = &'a JsonAbi>) -> Self {
self.extend_from_abis(abi);
self
}
/// Sets the ABI to use for error decoding.
///
/// Note that this is decently expensive as it will hash all errors for faster indexing.
pub fn with_abi(mut self, abi: &JsonAbi) -> Self {
self.extend_from_abi(abi);
self
}
/// Extends the decoder with the given ABI's custom errors.
fn extend_from_abis<'a>(&mut self, abi: impl IntoIterator<Item = &'a JsonAbi>) {
for abi in abi {
self.extend_from_abi(abi);
}
}
/// Extends the decoder with the given ABI's custom errors.
fn extend_from_abi(&mut self, abi: &JsonAbi) {
for error in abi.errors() {
self.push_error(error.clone());
}
}
/// Adds a custom error to use for decoding.
pub fn push_error(&mut self, error: Error) {
self.errors.entry(error.selector()).or_default().push(error);
}
/// Tries to decode an error message from the given revert bytes.
///
/// Note that this is just a best-effort guess, and should not be relied upon for anything other
/// than user output.
pub fn decode(&self, err: &[u8], status: Option<InstructionResult>) -> String {
self.maybe_decode(err, status).unwrap_or_else(|| {
if err.is_empty() { "<empty revert data>".to_string() } else { trimmed_hex(err) }
})
}
/// Tries to decode an error message from the given revert bytes.
///
/// See [`decode`](Self::decode) for more information.
pub fn maybe_decode(&self, err: &[u8], status: Option<InstructionResult>) -> Option<String> {
if let Some(reason) = SkipReason::decode(err) {
return Some(reason.to_string());
}
// Solidity's `Error(string)` (handled separately in order to strip revert: prefix)
if let Some(ContractError(Revert(revert))) = RevertReason::decode(err) {
return Some(revert.reason);
}
// Solidity's `Panic(uint256)` and `Vm`'s custom errors.
if let Ok(e) = alloy_sol_types::ContractError::<Vm::VmErrors>::abi_decode(err) {
return Some(e.to_string());
}
let string_decoded = decode_as_non_empty_string(err);
if let Some((selector, data)) = err.split_first_chunk::<SELECTOR_LEN>() {
// Custom errors.
if let Some(errors) = self.errors.get(selector) {
for error in errors {
// If we don't decode, don't return an error, try to decode as a string
// later.
if let Ok(decoded) = error.abi_decode_input(data) {
return Some(format!(
"{}({})",
error.name,
decoded.iter().map(foundry_common::fmt::format_token).format(", ")
));
}
}
}
if string_decoded.is_some() {
return string_decoded;
}
// Generic custom error.
return Some({
let mut s = format!("custom error {}", hex::encode_prefixed(selector));
if !data.is_empty() {
s.push_str(": ");
match std::str::from_utf8(data) {
Ok(data) => s.push_str(data),
Err(_) => s.push_str(&hex::encode(data)),
}
}
s
});
}
if string_decoded.is_some() {
return string_decoded;
}
if let Some(status) = status
&& !status.is_ok()
{
return Some(format!("EvmError: {status:?}"));
}
if err.is_empty() {
None
} else {
Some(format!("custom error bytes {}", hex::encode_prefixed(err)))
}
}
}
/// Helper function that decodes provided error as an ABI encoded or an ASCII string (if not empty).
fn decode_as_non_empty_string(err: &[u8]) -> Option<String> {
// ABI-encoded `string`.
if let Ok(s) = String::abi_decode(err)
&& !s.is_empty()
{
return Some(s);
}
// ASCII string.
if err.is_ascii() {
let msg = std::str::from_utf8(err).unwrap().to_string();
if !msg.is_empty() {
return Some(msg);
}
}
None
}
fn trimmed_hex(s: &[u8]) -> String {
let n = 32;
if s.len() <= n {
hex::encode(s)
} else {
format!(
"{}…{} ({} bytes)",
&hex::encode(&s[..n / 2]),
&hex::encode(&s[s.len() - n / 2..]),
s.len(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trimmed_hex() {
assert_eq!(trimmed_hex(&hex::decode("1234567890").unwrap()), "1234567890");
assert_eq!(
trimmed_hex(&hex::decode("492077697368207275737420737570706F72746564206869676865722D6B696E646564207479706573").unwrap()),
"49207769736820727573742073757070…6865722d6b696e646564207479706573 (41 bytes)"
);
}
// https://github.com/foundry-rs/foundry/issues/10162
#[test]
fn partial_decode() {
/*
error ValidationFailed(bytes);
error InvalidNonce();
*/
let mut decoder = RevertDecoder::default();
decoder.push_error("ValidationFailed(bytes)".parse().unwrap());
/*
abi.encodeWithSelector(ValidationFailed.selector, InvalidNonce.selector)
*/
let data = &hex!(
"0xe17594de"
"756688fe00000000000000000000000000000000000000000000000000000000"
);
assert_eq!(
decoder.decode(data, None),
"custom error 0xe17594de: 756688fe00000000000000000000000000000000000000000000000000000000"
);
/*
abi.encodeWithSelector(ValidationFailed.selector, abi.encodeWithSelector(InvalidNonce.selector))
*/
let data = &hex!(
"0xe17594de"
"0000000000000000000000000000000000000000000000000000000000000020"
"0000000000000000000000000000000000000000000000000000000000000004"
"756688fe00000000000000000000000000000000000000000000000000000000"
);
assert_eq!(decoder.decode(data, None), "ValidationFailed(0x756688fe)");
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/env.rs | crates/evm/core/src/env.rs | pub use alloy_evm::EvmEnv;
use revm::{
Context, Database, Journal, JournalEntry,
context::{BlockEnv, CfgEnv, JournalInner, JournalTr, TxEnv},
primitives::hardfork::SpecId,
};
/// Helper container type for [`EvmEnv`] and [`TxEnv`].
#[derive(Clone, Debug, Default)]
pub struct Env {
pub evm_env: EvmEnv,
pub tx: TxEnv,
}
/// Helper container type for [`EvmEnv`] and [`TxEnv`].
impl Env {
pub fn default_with_spec_id(spec_id: SpecId) -> Self {
let mut cfg = CfgEnv::default();
cfg.spec = spec_id;
Self::from(cfg, BlockEnv::default(), TxEnv::default())
}
pub fn from(cfg: CfgEnv, block: BlockEnv, tx: TxEnv) -> Self {
Self { evm_env: EvmEnv { cfg_env: cfg, block_env: block }, tx }
}
pub fn new_with_spec_id(cfg: CfgEnv, block: BlockEnv, tx: TxEnv, spec_id: SpecId) -> Self {
let mut cfg = cfg;
cfg.spec = spec_id;
Self::from(cfg, block, tx)
}
}
/// Helper struct with mutable references to the block and cfg environments.
pub struct EnvMut<'a> {
pub block: &'a mut BlockEnv,
pub cfg: &'a mut CfgEnv,
pub tx: &'a mut TxEnv,
}
impl EnvMut<'_> {
/// Returns a copy of the environment.
pub fn to_owned(&self) -> Env {
Env {
evm_env: EvmEnv { cfg_env: self.cfg.to_owned(), block_env: self.block.to_owned() },
tx: self.tx.to_owned(),
}
}
}
pub trait AsEnvMut {
fn as_env_mut(&mut self) -> EnvMut<'_>;
}
impl AsEnvMut for EnvMut<'_> {
fn as_env_mut(&mut self) -> EnvMut<'_> {
EnvMut { block: self.block, cfg: self.cfg, tx: self.tx }
}
}
impl AsEnvMut for Env {
fn as_env_mut(&mut self) -> EnvMut<'_> {
EnvMut {
block: &mut self.evm_env.block_env,
cfg: &mut self.evm_env.cfg_env,
tx: &mut self.tx,
}
}
}
impl<DB: Database, J: JournalTr<Database = DB>, C> AsEnvMut
for Context<BlockEnv, TxEnv, CfgEnv, DB, J, C>
{
fn as_env_mut(&mut self) -> EnvMut<'_> {
EnvMut { block: &mut self.block, cfg: &mut self.cfg, tx: &mut self.tx }
}
}
pub trait ContextExt {
type DB: Database;
fn as_db_env_and_journal(
&mut self,
) -> (&mut Self::DB, &mut JournalInner<JournalEntry>, EnvMut<'_>);
}
impl<DB: Database, C> ContextExt
for Context<BlockEnv, TxEnv, CfgEnv, DB, Journal<DB, JournalEntry>, C>
{
type DB = DB;
fn as_db_env_and_journal(
&mut self,
) -> (&mut Self::DB, &mut JournalInner<JournalEntry>, EnvMut<'_>) {
(
&mut self.journaled_state.database,
&mut self.journaled_state.inner,
EnvMut { block: &mut self.block, cfg: &mut self.cfg, tx: &mut self.tx },
)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/bytecode.rs | crates/evm/core/src/bytecode.rs | use revm::bytecode::{OpCode, opcode};
use std::{fmt, slice};
/// An iterator that yields opcodes and their immediate data.
///
/// If the bytecode is not well-formed, the iterator will still yield opcodes, but the immediate
/// data may be incorrect. For example, if the bytecode is `PUSH2 0x69`, the iterator will yield
/// `PUSH2, &[]`.
#[derive(Clone, Debug)]
pub struct InstIter<'a> {
iter: slice::Iter<'a, u8>,
}
impl fmt::Display for InstIter<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, op) in self.clone().enumerate() {
if i > 0 {
f.write_str(" ")?;
}
write!(f, "{op}")?;
}
Ok(())
}
}
impl<'a> InstIter<'a> {
/// Create a new iterator over the given bytecode slice.
#[inline]
pub fn new(slice: &'a [u8]) -> Self {
Self { iter: slice.iter() }
}
/// Returns a new iterator that also yields the program counter alongside the opcode and
/// immediate data.
#[inline]
pub fn with_pc(self) -> InstIterWithPc<'a> {
InstIterWithPc { iter: self, pc: 0 }
}
/// Returns the inner iterator.
#[inline]
pub fn inner(&self) -> &slice::Iter<'a, u8> {
&self.iter
}
/// Returns the inner iterator.
#[inline]
pub fn inner_mut(&mut self) -> &mut slice::Iter<'a, u8> {
&mut self.iter
}
/// Returns the inner iterator.
#[inline]
pub fn into_inner(self) -> slice::Iter<'a, u8> {
self.iter
}
}
impl<'a> Iterator for InstIter<'a> {
type Item = Inst<'a>;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|&opcode| {
let opcode = unsafe { OpCode::new_unchecked(opcode) };
let len = imm_len(opcode.get()) as usize;
let (immediate, rest) = self.iter.as_slice().split_at_checked(len).unwrap_or_default();
self.iter = rest.iter();
Inst { opcode, immediate }
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.iter.len();
((len != 0) as usize, Some(len))
}
}
impl std::iter::FusedIterator for InstIter<'_> {}
/// A bytecode iterator that yields opcodes and their immediate data, alongside the program counter.
///
/// Created by calling [`InstIter::with_pc`].
#[derive(Debug)]
pub struct InstIterWithPc<'a> {
iter: InstIter<'a>,
pc: usize,
}
impl<'a> Iterator for InstIterWithPc<'a> {
type Item = (usize, Inst<'a>);
#[inline]
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|inst| {
let pc = self.pc;
self.pc += 1 + inst.immediate.len();
(pc, inst)
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl std::iter::FusedIterator for InstIterWithPc<'_> {}
/// An opcode and its immediate data. Returned by [`InstIter`].
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Inst<'a> {
/// The opcode.
pub opcode: OpCode,
/// The immediate data, if any.
///
/// If an opcode is missing immediate data, e.g. malformed or bytecode hash, this will be an
/// empty slice.
pub immediate: &'a [u8],
}
impl fmt::Debug for Inst<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
impl fmt::Display for Inst<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.opcode)?;
match self.immediate {
[] => Ok(()),
imm => write!(f, " {:#x}", alloy_primitives::hex::display(imm)),
}
}
}
/// Returns the length of the immediate data for the given opcode, or `0` if none.
#[inline]
const fn imm_len(op: u8) -> u8 {
match op {
opcode::PUSH1..=opcode::PUSH32 => op - opcode::PUSH0,
_ => 0,
}
}
/// Returns a string representation of the given bytecode.
pub fn format_bytecode(bytecode: &[u8]) -> String {
let mut w = String::new();
format_bytecode_to(bytecode, &mut w).unwrap();
w
}
/// Formats an EVM bytecode to the given writer.
pub fn format_bytecode_to<W: fmt::Write + ?Sized>(bytecode: &[u8], w: &mut W) -> fmt::Result {
write!(w, "{}", InstIter::new(bytecode))
}
#[cfg(test)]
mod tests {
use super::*;
use revm::bytecode::opcode as op;
fn o(op: u8) -> OpCode {
unsafe { OpCode::new_unchecked(op) }
}
#[test]
fn iter_basic() {
let bytecode = [0x01, 0x02, 0x03, 0x04, 0x05];
let mut iter = InstIter::new(&bytecode);
assert_eq!(iter.next(), Some(Inst { opcode: o(0x01), immediate: &[] }));
assert_eq!(iter.next(), Some(Inst { opcode: o(0x02), immediate: &[] }));
assert_eq!(iter.next(), Some(Inst { opcode: o(0x03), immediate: &[] }));
assert_eq!(iter.next(), Some(Inst { opcode: o(0x04), immediate: &[] }));
assert_eq!(iter.next(), Some(Inst { opcode: o(0x05), immediate: &[] }));
assert_eq!(iter.next(), None);
}
#[test]
fn iter_with_imm() {
let bytecode = [op::PUSH0, op::PUSH1, 0x69, op::PUSH2, 0x01, 0x02];
let mut iter = InstIter::new(&bytecode);
assert_eq!(iter.next(), Some(Inst { opcode: o(op::PUSH0), immediate: &[] }));
assert_eq!(iter.next(), Some(Inst { opcode: o(op::PUSH1), immediate: &[0x69] }));
assert_eq!(iter.next(), Some(Inst { opcode: o(op::PUSH2), immediate: &[0x01, 0x02] }));
assert_eq!(iter.next(), None);
}
#[test]
fn iter_with_imm_too_short() {
let bytecode = [op::PUSH2, 0x69];
let mut iter = InstIter::new(&bytecode);
assert_eq!(iter.next(), Some(Inst { opcode: o(op::PUSH2), immediate: &[] }));
assert_eq!(iter.next(), None);
}
#[test]
fn display() {
let bytecode = [op::PUSH0, op::PUSH1, 0x69, op::PUSH2, 0x01, 0x02];
let s = format_bytecode(&bytecode);
assert_eq!(s, "PUSH0 PUSH1 0x69 PUSH2 0x0102");
}
#[test]
fn decode_push2_and_stop() {
// 0x61 0xAA 0xBB = PUSH2 0xAABB
// 0x00 = STOP
let code = vec![0x61, 0xAA, 0xBB, 0x00];
let insns = InstIter::new(&code).with_pc().collect::<Vec<_>>();
// PUSH2 then STOP
assert_eq!(insns.len(), 2);
// PUSH2 at pc = 0
let i0 = &insns[0];
assert_eq!(i0.0, 0);
assert_eq!(i0.1.opcode, op::PUSH2);
assert_eq!(i0.1.immediate, &[0xAA, 0xBB]);
// STOP at pc = 3
let i1 = &insns[1];
assert_eq!(i1.0, 3);
assert_eq!(i1.1.opcode, op::STOP);
assert!(i1.1.immediate.is_empty());
}
#[test]
fn decode_arithmetic_ops() {
// 0x01 = ADD, 0x02 = MUL, 0x03 = SUB, 0x04 = DIV
let code = vec![0x01, 0x02, 0x03, 0x04];
let insns = InstIter::new(&code).with_pc().collect::<Vec<_>>();
assert_eq!(insns.len(), 4);
let expected = [(0, op::ADD), (1, op::MUL), (2, op::SUB), (3, op::DIV)];
for ((pc, want_op), insn) in expected.iter().zip(insns.iter()) {
assert_eq!(insn.0, *pc);
assert_eq!(insn.1.opcode, *want_op);
assert!(insn.1.immediate.is_empty());
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/utils.rs | crates/evm/core/src/utils.rs | use crate::EnvMut;
use alloy_chains::Chain;
use alloy_consensus::{BlockHeader, private::alloy_eips::eip7840::BlobParams};
use alloy_hardforks::EthereumHardfork;
use alloy_json_abi::{Function, JsonAbi};
use alloy_network::{AnyTxEnvelope, TransactionResponse};
use alloy_primitives::{Address, B256, ChainId, Selector, TxKind, U256};
use alloy_provider::{Network, network::BlockResponse};
use alloy_rpc_types::{Transaction, TransactionRequest};
use foundry_config::NamedChain;
use foundry_evm_networks::NetworkConfigs;
use revm::primitives::{
eip4844::{BLOB_BASE_FEE_UPDATE_FRACTION_CANCUN, BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE},
hardfork::SpecId,
};
pub use revm::state::EvmState as StateChangeset;
/// Hints to the compiler that this is a cold path, i.e. unlikely to be taken.
#[cold]
#[inline(always)]
pub fn cold_path() {
// TODO: remove `#[cold]` and call `std::hint::cold_path` once stable.
}
/// Depending on the configured chain id and block number this should apply any specific changes
///
/// - checks for prevrandao mixhash after merge
/// - applies chain specifics: on Arbitrum `block.number` is the L1 block
///
/// Should be called with proper chain id (retrieved from provider if not provided).
pub fn apply_chain_and_block_specific_env_changes<N: Network>(
env: EnvMut<'_>,
block: &N::BlockResponse,
configs: NetworkConfigs,
) {
use NamedChain::*;
if let Ok(chain) = NamedChain::try_from(env.cfg.chain_id) {
let block_number = block.header().number();
match chain {
Mainnet => {
// after merge difficulty is supplanted with prevrandao EIP-4399
if block_number >= 15_537_351u64 {
env.block.difficulty = env.block.prevrandao.unwrap_or_default().into();
}
return;
}
BinanceSmartChain | BinanceSmartChainTestnet => {
// https://github.com/foundry-rs/foundry/issues/9942
// As far as observed from the source code of bnb-chain/bsc, the `difficulty` field
// is still in use and returned by the corresponding opcode but `prevrandao`
// (`mixHash`) is always zero, even though bsc adopts the newer EVM
// specification. This will confuse revm and causes emulation
// failure.
env.block.prevrandao = Some(env.block.difficulty.into());
return;
}
c if c.is_arbitrum() => {
// on arbitrum `block.number` is the L1 block which is included in the
// `l1BlockNumber` field
if let Some(l1_block_number) = block
.other_fields()
.and_then(|other| other.get("l1BlockNumber").cloned())
.and_then(|l1_block_number| {
serde_json::from_value::<U256>(l1_block_number).ok()
})
{
env.block.number = l1_block_number.to();
}
}
_ => {}
}
}
if configs.bypass_prevrandao(env.cfg.chain_id) && env.block.prevrandao.is_none() {
// <https://github.com/foundry-rs/foundry/issues/4232>
env.block.prevrandao = Some(B256::random());
}
// if difficulty is `0` we assume it's past merge
if block.header().difficulty().is_zero() {
env.block.difficulty = env.block.prevrandao.unwrap_or_default().into();
}
}
/// Derives the active [`BlobParams`] based on the given timestamp.
///
/// This falls back to regular ethereum blob params if no hardforks for the given chain id are
/// detected.
pub fn get_blob_params(chain_id: ChainId, timestamp: u64) -> BlobParams {
let hardfork = EthereumHardfork::from_chain_and_timestamp(Chain::from_id(chain_id), timestamp)
.unwrap_or_default();
match hardfork {
EthereumHardfork::Prague => BlobParams::prague(),
EthereumHardfork::Osaka => BlobParams::osaka(),
EthereumHardfork::Bpo1 => BlobParams::bpo1(),
EthereumHardfork::Bpo2 => BlobParams::bpo2(),
// future hardforks/unknown settings: update once decided
EthereumHardfork::Bpo3 => BlobParams::bpo2(),
EthereumHardfork::Bpo4 => BlobParams::bpo2(),
EthereumHardfork::Bpo5 => BlobParams::bpo2(),
EthereumHardfork::Amsterdam => BlobParams::bpo2(),
// fallback
_ => BlobParams::cancun(),
}
}
/// Derive the blob base fee update fraction based on the chain and timestamp by checking the
/// hardfork.
pub fn get_blob_base_fee_update_fraction(chain_id: ChainId, timestamp: u64) -> u64 {
get_blob_params(chain_id, timestamp).update_fraction as u64
}
/// Returns the blob base fee update fraction based on the spec id.
pub fn get_blob_base_fee_update_fraction_by_spec_id(spec: SpecId) -> u64 {
if spec >= SpecId::PRAGUE {
BLOB_BASE_FEE_UPDATE_FRACTION_PRAGUE
} else {
BLOB_BASE_FEE_UPDATE_FRACTION_CANCUN
}
}
/// Given an ABI and selector, it tries to find the respective function.
pub fn get_function<'a>(
contract_name: &str,
selector: Selector,
abi: &'a JsonAbi,
) -> eyre::Result<&'a Function> {
abi.functions()
.find(|func| func.selector() == selector)
.ok_or_else(|| eyre::eyre!("{contract_name} does not have the selector {selector}"))
}
/// Configures the env for the given RPC transaction.
/// Accounts for an impersonated transaction by resetting the `env.tx.caller` field to `tx.from`.
pub fn configure_tx_env(env: &mut EnvMut<'_>, tx: &Transaction<AnyTxEnvelope>) {
let from = tx.from();
if let AnyTxEnvelope::Ethereum(tx) = &tx.inner.inner() {
configure_tx_req_env(
env,
&TransactionRequest::from_transaction_with_sender(tx.clone(), from),
Some(from),
)
.expect("cannot fail");
}
}
/// Configures the env for the given RPC transaction request.
/// `impersonated_from` is the address of the impersonated account. This helps account for an
/// impersonated transaction by resetting the `env.tx.caller` field to `impersonated_from`.
pub fn configure_tx_req_env(
env: &mut EnvMut<'_>,
tx: &TransactionRequest,
impersonated_from: Option<Address>,
) -> eyre::Result<()> {
// If no transaction type is provided, we need to infer it from the other fields.
let tx_type = tx.transaction_type.unwrap_or_else(|| tx.minimal_tx_type() as u8);
env.tx.tx_type = tx_type;
let TransactionRequest {
nonce,
from,
to,
value,
gas_price,
gas,
max_fee_per_gas,
max_priority_fee_per_gas,
max_fee_per_blob_gas,
ref input,
chain_id,
ref blob_versioned_hashes,
ref access_list,
ref authorization_list,
transaction_type: _,
sidecar: _,
} = *tx;
// If no `to` field then set create kind: https://eips.ethereum.org/EIPS/eip-2470#deployment-transaction
env.tx.kind = to.unwrap_or(TxKind::Create);
// If the transaction is impersonated, we need to set the caller to the from
// address Ref: https://github.com/foundry-rs/foundry/issues/9541
env.tx.caller = if let Some(caller) = impersonated_from {
caller
} else {
from.ok_or_else(|| eyre::eyre!("missing `from` field"))?
};
env.tx.gas_limit = gas.ok_or_else(|| eyre::eyre!("missing `gas` field"))?;
env.tx.nonce = nonce.unwrap_or_default();
env.tx.value = value.unwrap_or_default();
env.tx.data = input.input().cloned().unwrap_or_default();
env.tx.chain_id = chain_id;
// Type 1, EIP-2930
env.tx.access_list = access_list.clone().unwrap_or_default();
// Type 2, EIP-1559
env.tx.gas_price = gas_price.or(max_fee_per_gas).unwrap_or_default();
env.tx.gas_priority_fee = max_priority_fee_per_gas;
// Type 3, EIP-4844
env.tx.blob_hashes = blob_versioned_hashes.clone().unwrap_or_default();
env.tx.max_fee_per_blob_gas = max_fee_per_blob_gas.unwrap_or_default();
// Type 4, EIP-7702
env.tx.set_signed_authorization(authorization_list.clone().unwrap_or_default());
Ok(())
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/hardfork.rs | crates/evm/core/src/hardfork.rs | use alloy_rpc_types::BlockNumberOrTag;
use op_revm::OpSpecId;
use revm::primitives::hardfork::SpecId;
pub use alloy_hardforks::EthereumHardfork;
pub use alloy_op_hardforks::OpHardfork;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum FoundryHardfork {
Ethereum(EthereumHardfork),
Optimism(OpHardfork),
}
impl FoundryHardfork {
pub fn ethereum(h: EthereumHardfork) -> Self {
Self::Ethereum(h)
}
pub fn optimism(h: OpHardfork) -> Self {
Self::Optimism(h)
}
}
impl From<EthereumHardfork> for FoundryHardfork {
fn from(value: EthereumHardfork) -> Self {
Self::Ethereum(value)
}
}
impl From<OpHardfork> for FoundryHardfork {
fn from(value: OpHardfork) -> Self {
Self::Optimism(value)
}
}
impl From<FoundryHardfork> for SpecId {
fn from(fork: FoundryHardfork) -> Self {
match fork {
FoundryHardfork::Ethereum(hardfork) => spec_id_from_ethereum_hardfork(hardfork),
FoundryHardfork::Optimism(hardfork) => spec_id_from_optimism_hardfork(hardfork).into(),
}
}
}
/// Map an EthereumHardfork enum into its corresponding SpecId.
pub fn spec_id_from_ethereum_hardfork(hardfork: EthereumHardfork) -> SpecId {
match hardfork {
EthereumHardfork::Frontier => SpecId::FRONTIER,
EthereumHardfork::Homestead => SpecId::HOMESTEAD,
EthereumHardfork::Dao => SpecId::DAO_FORK,
EthereumHardfork::Tangerine => SpecId::TANGERINE,
EthereumHardfork::SpuriousDragon => SpecId::SPURIOUS_DRAGON,
EthereumHardfork::Byzantium => SpecId::BYZANTIUM,
EthereumHardfork::Constantinople => SpecId::CONSTANTINOPLE,
EthereumHardfork::Petersburg => SpecId::PETERSBURG,
EthereumHardfork::Istanbul => SpecId::ISTANBUL,
EthereumHardfork::MuirGlacier => SpecId::MUIR_GLACIER,
EthereumHardfork::Berlin => SpecId::BERLIN,
EthereumHardfork::London => SpecId::LONDON,
EthereumHardfork::ArrowGlacier => SpecId::ARROW_GLACIER,
EthereumHardfork::GrayGlacier => SpecId::GRAY_GLACIER,
EthereumHardfork::Paris => SpecId::MERGE,
EthereumHardfork::Shanghai => SpecId::SHANGHAI,
EthereumHardfork::Cancun => SpecId::CANCUN,
EthereumHardfork::Prague => SpecId::PRAGUE,
EthereumHardfork::Osaka => SpecId::OSAKA,
EthereumHardfork::Bpo1 | EthereumHardfork::Bpo2 => SpecId::OSAKA,
EthereumHardfork::Bpo3 | EthereumHardfork::Bpo4 | EthereumHardfork::Bpo5 => {
unimplemented!()
}
f => unreachable!("unimplemented {}", f),
}
}
/// Map an OptimismHardfork enum into its corresponding OpSpecId.
pub fn spec_id_from_optimism_hardfork(hardfork: OpHardfork) -> OpSpecId {
match hardfork {
OpHardfork::Bedrock => OpSpecId::BEDROCK,
OpHardfork::Regolith => OpSpecId::REGOLITH,
OpHardfork::Canyon => OpSpecId::CANYON,
OpHardfork::Ecotone => OpSpecId::ECOTONE,
OpHardfork::Fjord => OpSpecId::FJORD,
OpHardfork::Granite => OpSpecId::GRANITE,
OpHardfork::Holocene => OpSpecId::HOLOCENE,
OpHardfork::Isthmus => OpSpecId::ISTHMUS,
OpHardfork::Interop => OpSpecId::INTEROP,
OpHardfork::Jovian => OpSpecId::JOVIAN,
f => unreachable!("unimplemented {}", f),
}
}
/// Convert a `BlockNumberOrTag` into an `EthereumHardfork`.
pub fn ethereum_hardfork_from_block_tag(block: impl Into<BlockNumberOrTag>) -> EthereumHardfork {
let num = match block.into() {
BlockNumberOrTag::Earliest => 0,
BlockNumberOrTag::Number(num) => num,
_ => u64::MAX,
};
EthereumHardfork::from_mainnet_block_number(num)
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_hardforks::ethereum::mainnet::*;
#[test]
fn test_ethereum_spec_id_mapping() {
assert_eq!(spec_id_from_ethereum_hardfork(EthereumHardfork::Frontier), SpecId::FRONTIER);
assert_eq!(spec_id_from_ethereum_hardfork(EthereumHardfork::Homestead), SpecId::HOMESTEAD);
// Test latest hardforks
assert_eq!(spec_id_from_ethereum_hardfork(EthereumHardfork::Cancun), SpecId::CANCUN);
assert_eq!(spec_id_from_ethereum_hardfork(EthereumHardfork::Prague), SpecId::PRAGUE);
}
#[test]
fn test_optimism_spec_id_mapping() {
assert_eq!(spec_id_from_optimism_hardfork(OpHardfork::Bedrock), OpSpecId::BEDROCK);
assert_eq!(spec_id_from_optimism_hardfork(OpHardfork::Regolith), OpSpecId::REGOLITH);
// Test latest hardforks
assert_eq!(spec_id_from_optimism_hardfork(OpHardfork::Holocene), OpSpecId::HOLOCENE);
assert_eq!(spec_id_from_optimism_hardfork(OpHardfork::Interop), OpSpecId::INTEROP);
}
#[test]
fn test_hardfork_from_block_tag_numbers() {
assert_eq!(
ethereum_hardfork_from_block_tag(MAINNET_HOMESTEAD_BLOCK - 1),
EthereumHardfork::Frontier
);
assert_eq!(
ethereum_hardfork_from_block_tag(MAINNET_LONDON_BLOCK + 1),
EthereumHardfork::London
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/either_evm.rs | crates/evm/core/src/either_evm.rs | use alloy_evm::{Database, EthEvm, Evm, EvmEnv, eth::EthEvmContext};
use alloy_op_evm::OpEvm;
use alloy_primitives::{Address, Bytes};
use op_revm::{OpContext, OpHaltReason, OpSpecId, OpTransaction, OpTransactionError};
use revm::{
DatabaseCommit, Inspector,
context::{
BlockEnv, TxEnv,
result::{EVMError, ExecResultAndState, ExecutionResult, ResultAndState},
},
handler::PrecompileProvider,
interpreter::InterpreterResult,
primitives::hardfork::SpecId,
};
/// Alias for result type returned by [`Evm::transact`] methods.
type EitherEvmResult<DBError, HaltReason, TxError> =
Result<ResultAndState<HaltReason>, EVMError<DBError, TxError>>;
/// Alias for result type returned by [`Evm::transact_commit`] methods.
type EitherExecResult<DBError, HaltReason, TxError> =
Result<ExecutionResult<HaltReason>, EVMError<DBError, TxError>>;
/// [`EitherEvm`] delegates its calls to one of the two evm implementations; either [`EthEvm`] or
/// [`OpEvm`].
///
/// Calls are delegated to [`OpEvm`] only if optimism is enabled.
///
/// The call delegation is handled via its own implementation of the [`Evm`] trait.
///
/// The [`Evm::transact`] and other such calls work over the [`OpTransaction<TxEnv>`] type.
///
/// However, the [`Evm::HaltReason`] and [`Evm::Error`] leverage the optimism [`OpHaltReason`] and
/// [`OpTransactionError`] as these are supersets of the eth types. This makes it easier to map eth
/// types to op types and also prevents ignoring of any error that maybe thrown by [`OpEvm`].
#[allow(clippy::large_enum_variant)]
pub enum EitherEvm<DB, I, P>
where
DB: Database,
{
/// [`EthEvm`] implementation.
Eth(EthEvm<DB, I, P>),
/// [`OpEvm`] implementation.
Op(OpEvm<DB, I, P>),
}
impl<DB, I, P> EitherEvm<DB, I, P>
where
DB: Database,
I: Inspector<EthEvmContext<DB>> + Inspector<OpContext<DB>>,
P: PrecompileProvider<EthEvmContext<DB>, Output = InterpreterResult>
+ PrecompileProvider<OpContext<DB>, Output = InterpreterResult>,
{
/// Converts the [`EthEvm::transact`] result to [`EitherEvmResult`].
fn map_eth_result(
&self,
result: Result<ExecResultAndState<ExecutionResult>, EVMError<DB::Error>>,
) -> EitherEvmResult<DB::Error, OpHaltReason, OpTransactionError> {
match result {
Ok(result) => Ok(ResultAndState {
result: result.result.map_haltreason(OpHaltReason::Base),
state: result.state,
}),
Err(e) => Err(self.map_eth_err(e)),
}
}
/// Converts the [`EthEvm::transact_commit`] result to [`EitherExecResult`].
fn map_exec_result(
&self,
result: Result<ExecutionResult, EVMError<DB::Error>>,
) -> EitherExecResult<DB::Error, OpHaltReason, OpTransactionError> {
match result {
Ok(result) => {
// Map the halt reason
Ok(result.map_haltreason(OpHaltReason::Base))
}
Err(e) => Err(self.map_eth_err(e)),
}
}
/// Maps [`EVMError<DBError>`] to [`EVMError<DBError, OpTransactionError>`].
fn map_eth_err(&self, err: EVMError<DB::Error>) -> EVMError<DB::Error, OpTransactionError> {
match err {
EVMError::Transaction(invalid_tx) => {
EVMError::Transaction(OpTransactionError::Base(invalid_tx))
}
EVMError::Database(e) => EVMError::Database(e),
EVMError::Header(e) => EVMError::Header(e),
EVMError::Custom(e) => EVMError::Custom(e),
}
}
}
impl<DB, I, P> Evm for EitherEvm<DB, I, P>
where
DB: Database,
I: Inspector<EthEvmContext<DB>> + Inspector<OpContext<DB>>,
P: PrecompileProvider<EthEvmContext<DB>, Output = InterpreterResult>
+ PrecompileProvider<OpContext<DB>, Output = InterpreterResult>,
{
type DB = DB;
type Error = EVMError<DB::Error, OpTransactionError>;
type HaltReason = OpHaltReason;
type Tx = OpTransaction<TxEnv>;
type Inspector = I;
type Precompiles = P;
type Spec = SpecId;
type BlockEnv = BlockEnv;
fn block(&self) -> &BlockEnv {
match self {
Self::Eth(evm) => evm.block(),
Self::Op(evm) => evm.block(),
}
}
fn chain_id(&self) -> u64 {
match self {
Self::Eth(evm) => evm.chain_id(),
Self::Op(evm) => evm.chain_id(),
}
}
fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) {
match self {
Self::Eth(evm) => evm.components(),
Self::Op(evm) => evm.components(),
}
}
fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) {
match self {
Self::Eth(evm) => evm.components_mut(),
Self::Op(evm) => evm.components_mut(),
}
}
fn db_mut(&mut self) -> &mut Self::DB {
match self {
Self::Eth(evm) => evm.db_mut(),
Self::Op(evm) => evm.db_mut(),
}
}
fn into_db(self) -> Self::DB
where
Self: Sized,
{
match self {
Self::Eth(evm) => evm.into_db(),
Self::Op(evm) => evm.into_db(),
}
}
fn finish(self) -> (Self::DB, EvmEnv<Self::Spec>)
where
Self: Sized,
{
match self {
Self::Eth(evm) => evm.finish(),
Self::Op(evm) => {
let (db, env) = evm.finish();
(db, map_env(env))
}
}
}
fn precompiles(&self) -> &Self::Precompiles {
match self {
Self::Eth(evm) => evm.precompiles(),
Self::Op(evm) => evm.precompiles(),
}
}
fn precompiles_mut(&mut self) -> &mut Self::Precompiles {
match self {
Self::Eth(evm) => evm.precompiles_mut(),
Self::Op(evm) => evm.precompiles_mut(),
}
}
fn inspector(&self) -> &Self::Inspector {
match self {
Self::Eth(evm) => evm.inspector(),
Self::Op(evm) => evm.inspector(),
}
}
fn inspector_mut(&mut self) -> &mut Self::Inspector {
match self {
Self::Eth(evm) => evm.inspector_mut(),
Self::Op(evm) => evm.inspector_mut(),
}
}
fn enable_inspector(&mut self) {
match self {
Self::Eth(evm) => evm.enable_inspector(),
Self::Op(evm) => evm.enable_inspector(),
}
}
fn disable_inspector(&mut self) {
match self {
Self::Eth(evm) => evm.disable_inspector(),
Self::Op(evm) => evm.disable_inspector(),
}
}
fn set_inspector_enabled(&mut self, enabled: bool) {
match self {
Self::Eth(evm) => evm.set_inspector_enabled(enabled),
Self::Op(evm) => evm.set_inspector_enabled(enabled),
}
}
fn into_env(self) -> EvmEnv<Self::Spec>
where
Self: Sized,
{
match self {
Self::Eth(evm) => evm.into_env(),
Self::Op(evm) => map_env(evm.into_env()),
}
}
fn transact(
&mut self,
tx: impl alloy_evm::IntoTxEnv<Self::Tx>,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
match self {
Self::Eth(evm) => {
let eth = evm.transact(tx.into_tx_env().base);
self.map_eth_result(eth)
}
Self::Op(evm) => evm.transact(tx),
}
}
fn transact_commit(
&mut self,
tx: impl alloy_evm::IntoTxEnv<Self::Tx>,
) -> Result<ExecutionResult<Self::HaltReason>, Self::Error>
where
Self::DB: DatabaseCommit,
{
match self {
Self::Eth(evm) => {
let eth = evm.transact_commit(tx.into_tx_env().base);
self.map_exec_result(eth)
}
Self::Op(evm) => evm.transact_commit(tx),
}
}
fn transact_raw(
&mut self,
tx: Self::Tx,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
match self {
Self::Eth(evm) => {
let res = evm.transact_raw(tx.base);
self.map_eth_result(res)
}
Self::Op(evm) => evm.transact_raw(tx),
}
}
fn transact_system_call(
&mut self,
caller: Address,
contract: Address,
data: Bytes,
) -> Result<ResultAndState<Self::HaltReason>, Self::Error> {
match self {
Self::Eth(evm) => {
let eth = evm.transact_system_call(caller, contract, data);
self.map_eth_result(eth)
}
Self::Op(evm) => evm.transact_system_call(caller, contract, data),
}
}
}
/// Maps [`EvmEnv<OpSpecId>`] to [`EvmEnv`].
fn map_env(env: EvmEnv<OpSpecId>) -> EvmEnv {
let eth_spec_id = env.spec_id().into_eth_spec();
let cfg = env.cfg_env.with_spec(eth_spec_id);
EvmEnv { cfg_env: cfg, block_env: env.block_env }
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/ic.rs | crates/evm/core/src/ic.rs | use crate::bytecode::InstIter;
use alloy_primitives::map::rustc_hash::FxHashMap;
use serde::Serialize;
/// Maps from program counter to instruction counter.
///
/// Inverse of [`IcPcMap`].
#[derive(Debug, Clone, Serialize)]
#[serde(transparent)]
pub struct PcIcMap {
inner: FxHashMap<u32, u32>,
}
impl PcIcMap {
/// Creates a new `PcIcMap` for the given code.
pub fn new(code: &[u8]) -> Self {
Self { inner: make_map::<true>(code) }
}
/// Returns the length of the map.
pub fn len(&self) -> usize {
self.inner.len()
}
/// Returns `true` if the map is empty.
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Returns the instruction counter for the given program counter.
pub fn get(&self, pc: u32) -> Option<u32> {
self.inner.get(&pc).copied()
}
}
/// Map from instruction counter to program counter.
///
/// Inverse of [`PcIcMap`].
pub struct IcPcMap {
inner: FxHashMap<u32, u32>,
}
impl IcPcMap {
/// Creates a new `IcPcMap` for the given code.
pub fn new(code: &[u8]) -> Self {
Self { inner: make_map::<false>(code) }
}
/// Returns the length of the map.
pub fn len(&self) -> usize {
self.inner.len()
}
/// Returns `true` if the map is empty.
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Returns the program counter for the given instruction counter.
pub fn get(&self, ic: u32) -> Option<u32> {
self.inner.get(&ic).copied()
}
/// Iterate over the IC-PC pairs.
pub fn iter(&self) -> impl Iterator<Item = (&u32, &u32)> {
self.inner.iter()
}
}
fn make_map<const PC_FIRST: bool>(code: &[u8]) -> FxHashMap<u32, u32> {
assert!(code.len() <= u32::MAX as usize, "bytecode is too big");
let mut map = FxHashMap::with_capacity_and_hasher(code.len(), Default::default());
for (ic, (pc, _)) in InstIter::new(code).with_pc().enumerate() {
if PC_FIRST {
map.insert(pc as u32, ic as u32);
} else {
map.insert(ic as u32, pc as u32);
}
}
map.shrink_to_fit();
map
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/constants.rs | crates/evm/core/src/constants.rs | use alloy_primitives::{Address, B256, address, b256, hex};
/// The cheatcode handler address.
///
/// This is the same address as the one used in DappTools's HEVM.
///
/// This is calculated as:
/// `address(bytes20(uint160(uint256(keccak256('hevm cheat code')))))`
pub const CHEATCODE_ADDRESS: Address = address!("0x7109709ECfa91a80626fF3989D68f67F5b1DD12D");
/// The contract hash at [`CHEATCODE_ADDRESS`].
///
/// This is calculated as:
/// `keccak256(abi.encodePacked(CHEATCODE_ADDRESS))`.
pub const CHEATCODE_CONTRACT_HASH: B256 =
b256!("0xb0450508e5a2349057c3b4c9c84524d62be4bb17e565dbe2df34725a26872291");
/// The Hardhat console address.
///
/// See: <https://github.com/NomicFoundation/hardhat/blob/main/v-next/hardhat/console.sol>
pub const HARDHAT_CONSOLE_ADDRESS: Address = address!("0x000000000000000000636F6e736F6c652e6c6f67");
/// Stores the caller address to be used as *sender* account for:
/// - deploying Test contracts
/// - deploying Script contracts
///
/// Derived from `address(uint160(uint256(keccak256("foundry default caller"))))`,
/// which is equal to `0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38`.
pub const CALLER: Address = address!("0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38");
/// The default test contract address.
///
/// Derived from `CALLER.create(1)`.
pub const TEST_CONTRACT_ADDRESS: Address = address!("0x7FA9385bE102ac3EAc297483Dd6233D62b3e1496");
/// Magic return value returned by the `assume` cheatcode.
pub const MAGIC_ASSUME: &[u8] = b"FOUNDRY::ASSUME";
/// Magic return value returned by the `skip` cheatcode. Optionally appended with a reason.
pub const MAGIC_SKIP: &[u8] = b"FOUNDRY::SKIP";
/// The address that deploys the default CREATE2 deployer contract.
pub const DEFAULT_CREATE2_DEPLOYER_DEPLOYER: Address =
address!("0x3fAB184622Dc19b6109349B94811493BF2a45362");
/// The default CREATE2 deployer.
pub const DEFAULT_CREATE2_DEPLOYER: Address =
address!("0x4e59b44847b379578588920ca78fbf26c0b4956c");
/// The initcode of the default CREATE2 deployer.
pub const DEFAULT_CREATE2_DEPLOYER_CODE: &[u8] = &hex!(
"604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3"
);
/// The runtime code of the default CREATE2 deployer.
pub const DEFAULT_CREATE2_DEPLOYER_RUNTIME_CODE: &[u8] = &hex!(
"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3"
);
/// The hash of the default CREATE2 deployer code.
///
/// This is calculated as `keccak256([`DEFAULT_CREATE2_DEPLOYER_RUNTIME_CODE`])`.
pub const DEFAULT_CREATE2_DEPLOYER_CODEHASH: B256 =
b256!("0x2fa86add0aed31f33a762c9d88e807c475bd51d0f52bd0955754b2608f7e4989");
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create2_deployer() {
assert_eq!(DEFAULT_CREATE2_DEPLOYER_DEPLOYER.create(0), DEFAULT_CREATE2_DEPLOYER);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/buffer.rs | crates/evm/core/src/buffer.rs | use alloy_primitives::U256;
use revm::bytecode::opcode;
/// Used to keep track of which buffer is currently active to be drawn by the debugger.
#[derive(Debug, PartialEq)]
pub enum BufferKind {
Memory,
Calldata,
Returndata,
}
impl BufferKind {
/// Helper to cycle through the active buffers.
pub fn next(&self) -> Self {
match self {
Self::Memory => Self::Calldata,
Self::Calldata => Self::Returndata,
Self::Returndata => Self::Memory,
}
}
/// Helper to format the title of the active buffer pane
pub fn title(&self, size: usize) -> String {
match self {
Self::Memory => format!("Memory (max expansion: {size} bytes)"),
Self::Calldata => format!("Calldata (size: {size} bytes)"),
Self::Returndata => format!("Returndata (size: {size} bytes)"),
}
}
}
/// Container for buffer access information.
pub struct BufferAccess {
pub offset: usize,
pub len: usize,
}
/// Container for read and write buffer access information.
pub struct BufferAccesses {
/// The read buffer kind and access information.
pub read: Option<(BufferKind, BufferAccess)>,
/// The only mutable buffer is the memory buffer, so don't store the buffer kind.
pub write: Option<BufferAccess>,
}
/// A utility function to get the buffer access.
///
/// The memory_access variable stores the index on the stack that indicates the buffer
/// offset/len accessed by the given opcode:
/// (read buffer, buffer read offset, buffer read len, write memory offset, write memory len)
/// \>= 1: the stack index
/// 0: no memory access
/// -1: a fixed len of 32 bytes
/// -2: a fixed len of 1 byte
///
/// The return value is a tuple about accessed buffer region by the given opcode:
/// (read buffer, buffer read offset, buffer read len, write memory offset, write memory len)
pub fn get_buffer_accesses(op: u8, stack: &[U256]) -> Option<BufferAccesses> {
let buffer_access = match op {
opcode::KECCAK256 | opcode::RETURN | opcode::REVERT => {
(Some((BufferKind::Memory, 1, 2)), None)
}
opcode::CALLDATACOPY => (Some((BufferKind::Calldata, 2, 3)), Some((1, 3))),
opcode::RETURNDATACOPY => (Some((BufferKind::Returndata, 2, 3)), Some((1, 3))),
opcode::CALLDATALOAD => (Some((BufferKind::Calldata, 1, -1)), None),
opcode::CODECOPY => (None, Some((1, 3))),
opcode::EXTCODECOPY => (None, Some((2, 4))),
opcode::MLOAD => (Some((BufferKind::Memory, 1, -1)), None),
opcode::MSTORE => (None, Some((1, -1))),
opcode::MSTORE8 => (None, Some((1, -2))),
opcode::LOG0 | opcode::LOG1 | opcode::LOG2 | opcode::LOG3 | opcode::LOG4 => {
(Some((BufferKind::Memory, 1, 2)), None)
}
opcode::CREATE | opcode::CREATE2 => (Some((BufferKind::Memory, 2, 3)), None),
opcode::CALL | opcode::CALLCODE => (Some((BufferKind::Memory, 4, 5)), None),
opcode::DELEGATECALL | opcode::STATICCALL => (Some((BufferKind::Memory, 3, 4)), None),
opcode::MCOPY => (Some((BufferKind::Memory, 2, 3)), Some((1, 3))),
_ => Default::default(),
};
let stack_len = stack.len();
let get_size = |stack_index| match stack_index {
-2 => Some(1),
-1 => Some(32),
0 => None,
1.. => {
if (stack_index as usize) <= stack_len {
Some(stack[stack_len - stack_index as usize].saturating_to())
} else {
None
}
}
_ => panic!("invalid stack index"),
};
if buffer_access.0.is_some() || buffer_access.1.is_some() {
let (read, write) = buffer_access;
let read_access = read.and_then(|b| {
let (buffer, offset, len) = b;
Some((buffer, BufferAccess { offset: get_size(offset)?, len: get_size(len)? }))
});
let write_access = write.and_then(|b| {
let (offset, len) = b;
Some(BufferAccess { offset: get_size(offset)?, len: get_size(len)? })
});
Some(BufferAccesses { read: read_access, write: write_access })
} else {
None
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/opts.rs | crates/evm/core/src/opts.rs | use super::fork::environment;
use crate::{
EvmEnv,
constants::DEFAULT_CREATE2_DEPLOYER,
fork::{CreateFork, configure_env},
};
use alloy_network::Network;
use alloy_primitives::{Address, B256, U256};
use alloy_provider::{Provider, network::AnyRpcBlock};
use eyre::WrapErr;
use foundry_common::{
ALCHEMY_FREE_TIER_CUPS,
provider::{ProviderBuilder, RetryProvider},
};
use foundry_config::{Chain, Config, GasLimit};
use foundry_evm_networks::NetworkConfigs;
use revm::context::{BlockEnv, TxEnv};
use serde::{Deserialize, Serialize};
use std::fmt::Write;
use url::Url;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct EvmOpts {
/// The EVM environment configuration.
#[serde(flatten)]
pub env: Env,
/// Fetch state over a remote instead of starting from empty state.
#[serde(rename = "eth_rpc_url")]
pub fork_url: Option<String>,
/// Pins the block number for the state fork.
pub fork_block_number: Option<u64>,
/// The number of retries.
pub fork_retries: Option<u32>,
/// Initial retry backoff.
pub fork_retry_backoff: Option<u64>,
/// Headers to use with `fork_url`
pub fork_headers: Option<Vec<String>>,
/// The available compute units per second.
///
/// See also <https://docs.alchemy.com/reference/compute-units#what-are-cups-compute-units-per-second>
pub compute_units_per_second: Option<u64>,
/// Disables RPC rate limiting entirely.
pub no_rpc_rate_limit: bool,
/// Disables storage caching entirely.
pub no_storage_caching: bool,
/// The initial balance of each deployed test contract.
pub initial_balance: U256,
/// The address which will be executing all tests.
pub sender: Address,
/// Enables the FFI cheatcode.
pub ffi: bool,
/// Use the create 2 factory in all cases including tests and non-broadcasting scripts.
pub always_use_create_2_factory: bool,
/// Verbosity mode of EVM output as number of occurrences.
pub verbosity: u8,
/// The memory limit per EVM execution in bytes.
/// If this limit is exceeded, a `MemoryLimitOOG` result is thrown.
pub memory_limit: u64,
/// Whether to enable isolation of calls.
pub isolate: bool,
/// Whether to disable block gas limit checks.
pub disable_block_gas_limit: bool,
/// Whether to enable tx gas limit checks as imposed by Osaka (EIP-7825).
pub enable_tx_gas_limit: bool,
#[serde(flatten)]
/// Networks with enabled features.
pub networks: NetworkConfigs,
/// The CREATE2 deployer's address.
pub create2_deployer: Address,
}
impl Default for EvmOpts {
fn default() -> Self {
Self {
env: Env::default(),
fork_url: None,
fork_block_number: None,
fork_retries: None,
fork_retry_backoff: None,
fork_headers: None,
compute_units_per_second: None,
no_rpc_rate_limit: false,
no_storage_caching: false,
initial_balance: U256::default(),
sender: Address::default(),
ffi: false,
always_use_create_2_factory: false,
verbosity: 0,
memory_limit: 0,
isolate: false,
disable_block_gas_limit: false,
enable_tx_gas_limit: false,
networks: NetworkConfigs::default(),
create2_deployer: DEFAULT_CREATE2_DEPLOYER,
}
}
}
impl EvmOpts {
/// Returns a `RetryProvider` for the given fork URL configured with options in `self`.
pub fn fork_provider_with_url(&self, fork_url: &str) -> eyre::Result<RetryProvider> {
ProviderBuilder::new(fork_url)
.maybe_max_retry(self.fork_retries)
.maybe_initial_backoff(self.fork_retry_backoff)
.maybe_headers(self.fork_headers.clone())
.compute_units_per_second(self.get_compute_units_per_second())
.build()
}
/// Configures a new `revm::Env`
///
/// If a `fork_url` is set, it gets configured with settings fetched from the endpoint (chain
/// id, )
pub async fn evm_env(&self) -> eyre::Result<crate::Env> {
if let Some(ref fork_url) = self.fork_url {
Ok(self.fork_evm_env(fork_url).await?.0)
} else {
Ok(self.local_evm_env())
}
}
/// Returns the `revm::Env` that is configured with settings retrieved from the endpoint,
/// and the block that was used to configure the environment.
pub async fn fork_evm_env(&self, fork_url: &str) -> eyre::Result<(crate::Env, AnyRpcBlock)> {
let provider = self.fork_provider_with_url(fork_url)?;
self.fork_evm_env_with_provider(fork_url, &provider).await
}
/// Returns the `revm::Env` that is configured with settings retrieved from the provider,
/// and the block that was used to configure the environment.
pub async fn fork_evm_env_with_provider<P: Provider<N>, N: Network>(
&self,
fork_url: &str,
provider: &P,
) -> eyre::Result<(crate::Env, N::BlockResponse)> {
environment(
provider,
self.memory_limit,
self.env.gas_price.map(|v| v as u128),
self.env.chain_id,
self.fork_block_number,
self.sender,
self.disable_block_gas_limit,
self.enable_tx_gas_limit,
self.networks,
)
.await
.wrap_err_with(|| {
let mut msg = "could not instantiate forked environment".to_string();
if let Ok(url) = Url::parse(fork_url)
&& let Some(provider) = url.host()
{
write!(msg, " with provider {provider}").unwrap();
}
msg
})
}
/// Returns the `revm::Env` configured with only local settings
fn local_evm_env(&self) -> crate::Env {
let cfg = configure_env(
self.env.chain_id.unwrap_or(foundry_common::DEV_CHAIN_ID),
self.memory_limit,
self.disable_block_gas_limit,
self.enable_tx_gas_limit,
);
crate::Env {
evm_env: EvmEnv {
cfg_env: cfg,
block_env: BlockEnv {
number: self.env.block_number,
beneficiary: self.env.block_coinbase,
timestamp: self.env.block_timestamp,
difficulty: U256::from(self.env.block_difficulty),
prevrandao: Some(self.env.block_prevrandao),
basefee: self.env.block_base_fee_per_gas,
gas_limit: self.gas_limit(),
..Default::default()
},
},
tx: TxEnv {
gas_price: self.env.gas_price.unwrap_or_default().into(),
gas_limit: self.gas_limit(),
caller: self.sender,
..Default::default()
},
}
}
/// Helper function that returns the [CreateFork] to use, if any.
///
/// storage caching for the [CreateFork] will be enabled if
/// - `fork_url` is present
/// - `fork_block_number` is present
/// - `StorageCachingConfig` allows the `fork_url` + chain ID pair
/// - storage is allowed (`no_storage_caching = false`)
///
/// If all these criteria are met, then storage caching is enabled and storage info will be
/// written to `<Config::foundry_cache_dir()>/<str(chainid)>/<block>/storage.json`.
///
/// for `mainnet` and `--fork-block-number 14435000` on mac the corresponding storage cache will
/// be at `~/.foundry/cache/mainnet/14435000/storage.json`.
pub fn get_fork(&self, config: &Config, env: crate::Env) -> Option<CreateFork> {
let url = self.fork_url.clone()?;
let enable_caching = config.enable_caching(&url, env.evm_env.cfg_env.chain_id);
Some(CreateFork { url, enable_caching, env, evm_opts: self.clone() })
}
/// Returns the gas limit to use
pub fn gas_limit(&self) -> u64 {
self.env.block_gas_limit.unwrap_or(self.env.gas_limit).0
}
/// Returns the available compute units per second, which will be
/// - u64::MAX, if `no_rpc_rate_limit` if set (as rate limiting is disabled)
/// - the assigned compute units, if `compute_units_per_second` is set
/// - ALCHEMY_FREE_TIER_CUPS (330) otherwise
fn get_compute_units_per_second(&self) -> u64 {
if self.no_rpc_rate_limit {
u64::MAX
} else if let Some(cups) = self.compute_units_per_second {
cups
} else {
ALCHEMY_FREE_TIER_CUPS
}
}
/// Returns the chain ID from the RPC, if any.
pub async fn get_remote_chain_id(&self) -> Option<Chain> {
if let Some(url) = &self.fork_url
&& let Ok(provider) = self.fork_provider_with_url(url)
{
trace!(?url, "retrieving chain via eth_chainId");
if let Ok(id) = provider.get_chain_id().await {
return Some(Chain::from(id));
}
// Provider URLs could be of the format `{CHAIN_IDENTIFIER}-mainnet`
// (e.g. Alchemy `opt-mainnet`, `arb-mainnet`), fallback to this method only
// if we're not able to retrieve chain id from `RetryProvider`.
if url.contains("mainnet") {
trace!(?url, "auto detected mainnet chain");
return Some(Chain::mainnet());
}
}
None
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct Env {
/// The block gas limit.
pub gas_limit: GasLimit,
/// The `CHAINID` opcode value.
pub chain_id: Option<u64>,
/// the tx.gasprice value during EVM execution
///
/// This is an Option, so we can determine in fork mode whether to use the config's gas price
/// (if set by user) or the remote client's gas price.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gas_price: Option<u64>,
/// the base fee in a block
pub block_base_fee_per_gas: u64,
/// the tx.origin value during EVM execution
pub tx_origin: Address,
/// the block.coinbase value during EVM execution
pub block_coinbase: Address,
/// the block.timestamp value during EVM execution
#[serde(
deserialize_with = "foundry_config::deserialize_u64_to_u256",
serialize_with = "foundry_config::serialize_u64_or_u256"
)]
pub block_timestamp: U256,
/// the block.number value during EVM execution"
#[serde(
deserialize_with = "foundry_config::deserialize_u64_to_u256",
serialize_with = "foundry_config::serialize_u64_or_u256"
)]
pub block_number: U256,
/// the block.difficulty value during EVM execution
pub block_difficulty: u64,
/// Previous block beacon chain random value. Before merge this field is used for mix_hash
pub block_prevrandao: B256,
/// the block.gaslimit value during EVM execution
#[serde(default, skip_serializing_if = "Option::is_none")]
pub block_gas_limit: Option<GasLimit>,
/// EIP-170: Contract code size limit in bytes. Useful to increase this because of tests.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code_size_limit: Option<usize>,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/state_snapshot.rs | crates/evm/core/src/state_snapshot.rs | //! Support for snapshotting different states
use alloy_primitives::{U256, map::HashMap};
/// Represents all state snapshots
#[derive(Clone, Debug)]
pub struct StateSnapshots<T> {
id: U256,
state_snapshots: HashMap<U256, T>,
}
impl<T> StateSnapshots<T> {
fn next_id(&mut self) -> U256 {
let id = self.id;
self.id = id.saturating_add(U256::from(1));
id
}
/// Returns the state snapshot with the given id `id`
pub fn get(&self, id: U256) -> Option<&T> {
self.state_snapshots.get(&id)
}
/// Removes the state snapshot with the given `id`.
///
/// This will also remove any state snapshots taken after the state snapshot with the `id`.
/// e.g.: reverting to id 1 will delete snapshots with ids 1, 2, 3, etc.)
pub fn remove(&mut self, id: U256) -> Option<T> {
let snapshot_state = self.state_snapshots.remove(&id);
// Revert all state snapshots taken after the state snapshot with the `id`
let mut to_revert = id + U256::from(1);
while to_revert < self.id {
self.state_snapshots.remove(&to_revert);
to_revert += U256::from(1);
}
snapshot_state
}
/// Removes all state snapshots.
pub fn clear(&mut self) {
self.state_snapshots.clear();
}
/// Removes the state snapshot with the given `id`.
///
/// Does not remove state snapshots after it.
pub fn remove_at(&mut self, id: U256) -> Option<T> {
self.state_snapshots.remove(&id)
}
/// Inserts the new state snapshot and returns the id.
pub fn insert(&mut self, state_snapshot: T) -> U256 {
let id = self.next_id();
self.state_snapshots.insert(id, state_snapshot);
id
}
/// Inserts the new state snapshot at the given `id`.
///
/// Does not auto-increment the next `id`.
pub fn insert_at(&mut self, state_snapshot: T, id: U256) {
self.state_snapshots.insert(id, state_snapshot);
}
}
impl<T> Default for StateSnapshots<T> {
fn default() -> Self {
Self { id: U256::ZERO, state_snapshots: HashMap::default() }
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/precompiles.rs | crates/evm/core/src/precompiles.rs | use alloy_primitives::{Address, address};
/// The ECRecover precompile address.
pub const EC_RECOVER: Address = address!("0x0000000000000000000000000000000000000001");
/// The SHA-256 precompile address.
pub const SHA_256: Address = address!("0x0000000000000000000000000000000000000002");
/// The RIPEMD-160 precompile address.
pub const RIPEMD_160: Address = address!("0x0000000000000000000000000000000000000003");
/// The Identity precompile address.
pub const IDENTITY: Address = address!("0x0000000000000000000000000000000000000004");
/// The ModExp precompile address.
pub const MOD_EXP: Address = address!("0x0000000000000000000000000000000000000005");
/// The ECAdd precompile address.
pub const EC_ADD: Address = address!("0x0000000000000000000000000000000000000006");
/// The ECMul precompile address.
pub const EC_MUL: Address = address!("0x0000000000000000000000000000000000000007");
/// The ECPairing precompile address.
pub const EC_PAIRING: Address = address!("0x0000000000000000000000000000000000000008");
/// The Blake2F precompile address.
pub const BLAKE_2F: Address = address!("0x0000000000000000000000000000000000000009");
/// The PointEvaluation precompile address.
pub const POINT_EVALUATION: Address = address!("0x000000000000000000000000000000000000000a");
/// Precompile addresses.
pub const PRECOMPILES: &[Address] = &[
EC_RECOVER,
SHA_256,
RIPEMD_160,
IDENTITY,
MOD_EXP,
EC_ADD,
EC_MUL,
EC_PAIRING,
BLAKE_2F,
POINT_EVALUATION,
];
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/fork/database.rs | crates/evm/core/src/fork/database.rs | //! A revm database that forks off a remote client
use crate::{
backend::{RevertStateSnapshotAction, StateSnapshot},
state_snapshot::StateSnapshots,
};
use alloy_primitives::{Address, B256, U256, map::HashMap};
use alloy_rpc_types::BlockId;
use foundry_fork_db::{BlockchainDb, DatabaseError, SharedBackend};
use parking_lot::Mutex;
use revm::{
Database, DatabaseCommit,
bytecode::Bytecode,
database::{CacheDB, DatabaseRef},
state::{Account, AccountInfo},
};
use std::sync::Arc;
/// a [revm::Database] that's forked off another client
///
/// The `backend` is used to retrieve (missing) data, which is then fetched from the remote
/// endpoint. The inner in-memory database holds this storage and will be used for write operations.
/// This database uses the `backend` for read and the `db` for write operations. But note the
/// `backend` will also write (missing) data to the `db` in the background
#[derive(Clone, Debug)]
pub struct ForkedDatabase {
/// Responsible for fetching missing data.
///
/// This is responsible for getting data.
backend: SharedBackend,
/// Cached Database layer, ensures that changes are not written to the database that
/// exclusively stores the state of the remote client.
///
/// This separates Read/Write operations
/// - reads from the `SharedBackend as DatabaseRef` writes to the internal cache storage.
cache_db: CacheDB<SharedBackend>,
/// Contains all the data already fetched.
///
/// This exclusively stores the _unchanged_ remote client state.
db: BlockchainDb,
/// Holds the state snapshots of a blockchain.
state_snapshots: Arc<Mutex<StateSnapshots<ForkDbStateSnapshot>>>,
}
impl ForkedDatabase {
/// Creates a new instance of this DB
pub fn new(backend: SharedBackend, db: BlockchainDb) -> Self {
Self {
cache_db: CacheDB::new(backend.clone()),
backend,
db,
state_snapshots: Arc::new(Mutex::new(Default::default())),
}
}
pub fn database(&self) -> &CacheDB<SharedBackend> {
&self.cache_db
}
pub fn database_mut(&mut self) -> &mut CacheDB<SharedBackend> {
&mut self.cache_db
}
pub fn state_snapshots(&self) -> &Arc<Mutex<StateSnapshots<ForkDbStateSnapshot>>> {
&self.state_snapshots
}
/// Reset the fork to a fresh forked state, and optionally update the fork config
pub fn reset(
&mut self,
_url: Option<String>,
block_number: impl Into<BlockId>,
) -> Result<(), String> {
self.backend.set_pinned_block(block_number).map_err(|err| err.to_string())?;
// TODO need to find a way to update generic provider via url
// wipe the storage retrieved from remote
self.inner().db().clear();
// create a fresh `CacheDB`, effectively wiping modified state
self.cache_db = CacheDB::new(self.backend.clone());
trace!(target: "backend::forkdb", "Cleared database");
Ok(())
}
/// Flushes the cache to disk if configured
pub fn flush_cache(&self) {
self.db.cache().flush()
}
/// Returns the database that holds the remote state
pub fn inner(&self) -> &BlockchainDb {
&self.db
}
pub fn create_state_snapshot(&self) -> ForkDbStateSnapshot {
let db = self.db.db();
let state_snapshot = StateSnapshot {
accounts: db.accounts.read().clone(),
storage: db.storage.read().clone(),
block_hashes: db.block_hashes.read().clone(),
};
ForkDbStateSnapshot { local: self.cache_db.clone(), state_snapshot }
}
pub fn insert_state_snapshot(&self) -> U256 {
let state_snapshot = self.create_state_snapshot();
let mut state_snapshots = self.state_snapshots().lock();
let id = state_snapshots.insert(state_snapshot);
trace!(target: "backend::forkdb", "Created new snapshot {}", id);
id
}
/// Removes the snapshot from the tracked snapshot and sets it as the current state
pub fn revert_state_snapshot(&mut self, id: U256, action: RevertStateSnapshotAction) -> bool {
let state_snapshot = { self.state_snapshots().lock().remove_at(id) };
if let Some(state_snapshot) = state_snapshot {
if action.is_keep() {
self.state_snapshots().lock().insert_at(state_snapshot.clone(), id);
}
let ForkDbStateSnapshot {
local,
state_snapshot: StateSnapshot { accounts, storage, block_hashes },
} = state_snapshot;
let db = self.inner().db();
{
let mut accounts_lock = db.accounts.write();
accounts_lock.clear();
accounts_lock.extend(accounts);
}
{
let mut storage_lock = db.storage.write();
storage_lock.clear();
storage_lock.extend(storage);
}
{
let mut block_hashes_lock = db.block_hashes.write();
block_hashes_lock.clear();
block_hashes_lock.extend(block_hashes);
}
self.cache_db = local;
trace!(target: "backend::forkdb", "Reverted snapshot {}", id);
true
} else {
warn!(target: "backend::forkdb", "No snapshot to revert for {}", id);
false
}
}
}
impl Database for ForkedDatabase {
type Error = DatabaseError;
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
// Note: this will always return Some, since the `SharedBackend` will always load the
// account, this differs from `<CacheDB as Database>::basic`, See also
// [MemDb::ensure_loaded](crate::backend::MemDb::ensure_loaded)
Database::basic(&mut self.cache_db, address)
}
fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
Database::code_by_hash(&mut self.cache_db, code_hash)
}
fn storage(&mut self, address: Address, index: U256) -> Result<U256, Self::Error> {
Database::storage(&mut self.cache_db, address, index)
}
fn block_hash(&mut self, number: u64) -> Result<B256, Self::Error> {
Database::block_hash(&mut self.cache_db, number)
}
}
impl DatabaseRef for ForkedDatabase {
type Error = DatabaseError;
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
self.cache_db.basic_ref(address)
}
fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
self.cache_db.code_by_hash_ref(code_hash)
}
fn storage_ref(&self, address: Address, index: U256) -> Result<U256, Self::Error> {
DatabaseRef::storage_ref(&self.cache_db, address, index)
}
fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
self.cache_db.block_hash_ref(number)
}
}
impl DatabaseCommit for ForkedDatabase {
fn commit(&mut self, changes: HashMap<Address, Account>) {
self.database_mut().commit(changes)
}
}
/// Represents a snapshot of the database
///
/// This mimics `revm::CacheDB`
#[derive(Clone, Debug)]
pub struct ForkDbStateSnapshot {
pub local: CacheDB<SharedBackend>,
pub state_snapshot: StateSnapshot,
}
impl ForkDbStateSnapshot {
fn get_storage(&self, address: Address, index: U256) -> Option<U256> {
self.local
.cache
.accounts
.get(&address)
.and_then(|account| account.storage.get(&index))
.copied()
}
}
// This `DatabaseRef` implementation works similar to `CacheDB` which prioritizes modified elements,
// and uses another db as fallback
// We prioritize stored changed accounts/storage
impl DatabaseRef for ForkDbStateSnapshot {
type Error = DatabaseError;
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
match self.local.cache.accounts.get(&address) {
Some(account) => Ok(Some(account.info.clone())),
None => {
let mut acc = self.state_snapshot.accounts.get(&address).cloned();
if acc.is_none() {
acc = self.local.basic_ref(address)?;
}
Ok(acc)
}
}
}
fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
self.local.code_by_hash_ref(code_hash)
}
fn storage_ref(&self, address: Address, index: U256) -> Result<U256, Self::Error> {
match self.local.cache.accounts.get(&address) {
Some(account) => match account.storage.get(&index) {
Some(entry) => Ok(*entry),
None => match self.get_storage(address, index) {
None => DatabaseRef::storage_ref(&self.local, address, index),
Some(storage) => Ok(storage),
},
},
None => match self.get_storage(address, index) {
None => DatabaseRef::storage_ref(&self.local, address, index),
Some(storage) => Ok(storage),
},
}
}
fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
match self.state_snapshot.block_hashes.get(&U256::from(number)).copied() {
None => self.local.block_hash_ref(number),
Some(block_hash) => Ok(block_hash),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::backend::BlockchainDbMeta;
use foundry_common::provider::get_http_provider;
/// Demonstrates that `Database::basic` for `ForkedDatabase` will always return the
/// `AccountInfo`
#[tokio::test(flavor = "multi_thread")]
async fn fork_db_insert_basic_default() {
let rpc = foundry_test_utils::rpc::next_http_rpc_endpoint();
let provider = get_http_provider(rpc.clone());
let meta = BlockchainDbMeta::new(Default::default(), rpc);
let db = BlockchainDb::new(meta, None);
let backend = SharedBackend::spawn_backend(Arc::new(provider), db.clone(), None).await;
let mut db = ForkedDatabase::new(backend, db);
let address = Address::random();
let info = Database::basic(&mut db, address).unwrap();
assert!(info.is_some());
let mut info = info.unwrap();
info.balance = U256::from(500u64);
// insert the modified account info
db.database_mut().insert_account_info(address, info.clone());
let loaded = Database::basic(&mut db, address).unwrap();
assert!(loaded.is_some());
assert_eq!(loaded.unwrap(), info);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/fork/mod.rs | crates/evm/core/src/fork/mod.rs | use super::opts::EvmOpts;
use crate::Env;
mod init;
pub use init::{configure_env, environment};
pub mod database;
mod multi;
pub use multi::{ForkId, MultiFork, MultiForkHandler};
/// Represents a _fork_ of a remote chain whose data is available only via the `url` endpoint.
#[derive(Clone, Debug)]
pub struct CreateFork {
/// Whether to enable rpc storage caching for this fork
pub enable_caching: bool,
/// The URL to a node for fetching remote state
pub url: String,
/// The env to create this fork, main purpose is to provide some metadata for the fork
pub env: Env,
/// All env settings as configured by the user
pub evm_opts: EvmOpts,
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/fork/init.rs | crates/evm/core/src/fork/init.rs | use crate::{AsEnvMut, Env, EvmEnv, utils::apply_chain_and_block_specific_env_changes};
use alloy_consensus::BlockHeader;
use alloy_primitives::{Address, U256};
use alloy_provider::{Network, Provider, network::BlockResponse};
use alloy_rpc_types::BlockNumberOrTag;
use foundry_common::NON_ARCHIVE_NODE_WARNING;
use foundry_evm_networks::NetworkConfigs;
use revm::context::{BlockEnv, CfgEnv, TxEnv};
/// Initializes a REVM block environment based on a forked
/// ethereum provider.
#[allow(clippy::too_many_arguments)]
pub async fn environment<N: Network, P: Provider<N>>(
provider: &P,
memory_limit: u64,
override_gas_price: Option<u128>,
override_chain_id: Option<u64>,
pin_block: Option<u64>,
origin: Address,
disable_block_gas_limit: bool,
enable_tx_gas_limit: bool,
configs: NetworkConfigs,
) -> eyre::Result<(Env, N::BlockResponse)> {
trace!(
%memory_limit,
?override_gas_price,
?override_chain_id,
?pin_block,
%origin,
%disable_block_gas_limit,
%enable_tx_gas_limit,
?configs,
"creating fork environment"
);
let bn = match pin_block {
Some(bn) => BlockNumberOrTag::Number(bn),
None => BlockNumberOrTag::Latest,
};
let (gas_price, chain_id, block) = tokio::try_join!(
option_try_or_else(override_gas_price, async || provider.get_gas_price().await),
option_try_or_else(override_chain_id, async || provider.get_chain_id().await),
provider.get_block_by_number(bn)
)?;
let Some(block) = block else {
let bn_msg = match bn {
BlockNumberOrTag::Number(bn) => format!("block number: {bn}"),
bn => format!("{bn} block"),
};
let latest_msg = if let Ok(latest_block) = provider.get_block_number().await {
// If the `eth_getBlockByNumber` call succeeds, but returns null instead of
// the block, and the block number is less than equal the latest block, then
// the user is forking from a non-archive node with an older block number.
if let Some(block_number) = pin_block
&& block_number <= latest_block
{
error!("{NON_ARCHIVE_NODE_WARNING}");
}
format!("; latest block number: {latest_block}")
} else {
Default::default()
};
eyre::bail!("failed to get {bn_msg}{latest_msg}");
};
let cfg = configure_env(chain_id, memory_limit, disable_block_gas_limit, enable_tx_gas_limit);
let mut env = Env {
evm_env: EvmEnv {
cfg_env: cfg,
block_env: BlockEnv {
number: U256::from(block.header().number()),
timestamp: U256::from(block.header().timestamp()),
beneficiary: block.header().beneficiary(),
difficulty: block.header().difficulty(),
prevrandao: block.header().mix_hash(),
basefee: block.header().base_fee_per_gas().unwrap_or_default(),
gas_limit: block.header().gas_limit(),
..Default::default()
},
},
tx: TxEnv {
caller: origin,
gas_price,
chain_id: Some(chain_id),
gas_limit: block.header().gas_limit(),
..Default::default()
},
};
apply_chain_and_block_specific_env_changes::<N>(env.as_env_mut(), &block, configs);
Ok((env, block))
}
async fn option_try_or_else<T, E>(
option: Option<T>,
f: impl AsyncFnOnce() -> Result<T, E>,
) -> Result<T, E> {
if let Some(value) = option { Ok(value) } else { f().await }
}
/// Configures the environment for the given chain id and memory limit.
pub fn configure_env(
chain_id: u64,
memory_limit: u64,
disable_block_gas_limit: bool,
enable_tx_gas_limit: bool,
) -> CfgEnv {
let mut cfg = CfgEnv::default();
cfg.chain_id = chain_id;
cfg.memory_limit = memory_limit;
cfg.limit_contract_code_size = Some(usize::MAX);
// EIP-3607 rejects transactions from senders with deployed code.
// If EIP-3607 is enabled it can cause issues during fuzz/invariant tests if the caller
// is a contract. So we disable the check by default.
cfg.disable_eip3607 = true;
cfg.disable_block_gas_limit = disable_block_gas_limit;
cfg.disable_nonce_check = true;
// By default do not enforce transaction gas limits imposed by Osaka (EIP-7825).
// Users can opt-in to enable these limits by setting `enable_tx_gas_limit` to true.
if !enable_tx_gas_limit {
cfg.tx_gas_limit_cap = Some(u64::MAX);
}
cfg
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/fork/multi.rs | crates/evm/core/src/fork/multi.rs | //! Support for running multiple fork backends.
//!
//! The design is similar to the single `SharedBackend`, `BackendHandler` but supports multiple
//! concurrently active pairs at once.
use super::CreateFork;
use crate::Env;
use alloy_consensus::BlockHeader;
use alloy_primitives::{U256, map::HashMap};
use alloy_provider::network::BlockResponse;
use foundry_config::Config;
use foundry_fork_db::{BackendHandler, BlockchainDb, SharedBackend, cache::BlockchainDbMeta};
use futures::{
FutureExt, StreamExt,
channel::mpsc::{Receiver, Sender, channel},
stream::Fuse,
task::{Context, Poll},
};
use revm::context::BlockEnv;
use std::{
fmt::{self, Write},
pin::Pin,
sync::{
Arc,
atomic::AtomicUsize,
mpsc::{Sender as OneshotSender, channel as oneshot_channel},
},
time::Duration,
};
/// The _unique_ identifier for a specific fork, this could be the name of the network a custom
/// descriptive name.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct ForkId(pub String);
impl ForkId {
/// Returns the identifier for a Fork from a URL and block number.
pub fn new(url: &str, num: Option<u64>) -> Self {
let mut id = url.to_string();
id.push('@');
match num {
Some(n) => write!(id, "{n:#x}").unwrap(),
None => id.push_str("latest"),
}
Self(id)
}
/// Returns the identifier of the fork.
pub fn as_str(&self) -> &str {
&self.0
}
}
impl fmt::Display for ForkId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl<T: Into<String>> From<T> for ForkId {
fn from(id: T) -> Self {
Self(id.into())
}
}
/// The Sender half of multi fork pair.
/// Can send requests to the `MultiForkHandler` to create forks.
#[derive(Clone, Debug)]
#[must_use]
pub struct MultiFork {
/// Channel to send `Request`s to the handler.
handler: Sender<Request>,
/// Ensures that all rpc resources get flushed properly.
_shutdown: Arc<ShutDownMultiFork>,
}
impl MultiFork {
/// Creates a new pair and spawns the `MultiForkHandler` on a background thread.
pub fn spawn() -> Self {
trace!(target: "fork::multi", "spawning multifork");
let (fork, mut handler) = Self::new();
// Spawn a light-weight thread just for sending and receiving data from the remote
// client(s).
let fut = async move {
// Flush cache every 60s, this ensures that long-running fork tests get their
// cache flushed from time to time.
// NOTE: we install the interval here because the `tokio::timer::Interval`
// requires a rt.
handler.set_flush_cache_interval(Duration::from_secs(60));
handler.await
};
match tokio::runtime::Handle::try_current() {
Ok(rt) => _ = rt.spawn(fut),
Err(_) => {
trace!(target: "fork::multi", "spawning multifork backend thread");
_ = std::thread::Builder::new()
.name("multi-fork-backend".into())
.spawn(move || {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.expect("failed to build tokio runtime")
.block_on(fut)
})
.expect("failed to spawn thread")
}
}
trace!(target: "fork::multi", "spawned MultiForkHandler thread");
fork
}
/// Creates a new pair multi fork pair.
///
/// Use [`spawn`](Self::spawn) instead.
#[doc(hidden)]
pub fn new() -> (Self, MultiForkHandler) {
let (handler, handler_rx) = channel(1);
let _shutdown = Arc::new(ShutDownMultiFork { handler: Some(handler.clone()) });
(Self { handler, _shutdown }, MultiForkHandler::new(handler_rx))
}
/// Returns a fork backend.
///
/// If no matching fork backend exists it will be created.
pub fn create_fork(&self, fork: CreateFork) -> eyre::Result<(ForkId, SharedBackend, Env)> {
trace!("Creating new fork, url={}, block={:?}", fork.url, fork.evm_opts.fork_block_number);
let (sender, rx) = oneshot_channel();
let req = Request::CreateFork(Box::new(fork), sender);
self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?;
rx.recv()?
}
/// Rolls the block of the fork.
///
/// If no matching fork backend exists it will be created.
pub fn roll_fork(
&self,
fork: ForkId,
block: u64,
) -> eyre::Result<(ForkId, SharedBackend, Env)> {
trace!(?fork, ?block, "rolling fork");
let (sender, rx) = oneshot_channel();
let req = Request::RollFork(fork, block, sender);
self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?;
rx.recv()?
}
/// Returns the `Env` of the given fork, if any.
pub fn get_env(&self, fork: ForkId) -> eyre::Result<Option<Env>> {
trace!(?fork, "getting env config");
let (sender, rx) = oneshot_channel();
let req = Request::GetEnv(fork, sender);
self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?;
Ok(rx.recv()?)
}
/// Updates block number and timestamp of given fork with new values.
pub fn update_block(&self, fork: ForkId, number: U256, timestamp: U256) -> eyre::Result<()> {
trace!(?fork, ?number, ?timestamp, "update fork block");
self.handler
.clone()
.try_send(Request::UpdateBlock(fork, number, timestamp))
.map_err(|e| eyre::eyre!("{:?}", e))
}
/// Updates the fork's entire env
///
/// This is required for tx level forking where we need to fork off the `block - 1` state but
/// still need use env settings for `env`.
pub fn update_block_env(&self, fork: ForkId, env: BlockEnv) -> eyre::Result<()> {
trace!(?fork, ?env, "update fork block");
self.handler
.clone()
.try_send(Request::UpdateEnv(fork, env))
.map_err(|e| eyre::eyre!("{:?}", e))
}
/// Returns the corresponding fork if it exists.
///
/// Returns `None` if no matching fork backend is available.
pub fn get_fork(&self, id: impl Into<ForkId>) -> eyre::Result<Option<SharedBackend>> {
let id = id.into();
trace!(?id, "get fork backend");
let (sender, rx) = oneshot_channel();
let req = Request::GetFork(id, sender);
self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?;
Ok(rx.recv()?)
}
/// Returns the corresponding fork url if it exists.
///
/// Returns `None` if no matching fork is available.
pub fn get_fork_url(&self, id: impl Into<ForkId>) -> eyre::Result<Option<String>> {
let (sender, rx) = oneshot_channel();
let req = Request::GetForkUrl(id.into(), sender);
self.handler.clone().try_send(req).map_err(|e| eyre::eyre!("{:?}", e))?;
Ok(rx.recv()?)
}
}
type Handler = BackendHandler;
type CreateFuture =
Pin<Box<dyn Future<Output = eyre::Result<(ForkId, CreatedFork, Handler)>> + Send>>;
type CreateSender = OneshotSender<eyre::Result<(ForkId, SharedBackend, Env)>>;
type GetEnvSender = OneshotSender<Option<Env>>;
/// Request that's send to the handler.
#[derive(Debug)]
enum Request {
/// Creates a new ForkBackend.
CreateFork(Box<CreateFork>, CreateSender),
/// Returns the Fork backend for the `ForkId` if it exists.
GetFork(ForkId, OneshotSender<Option<SharedBackend>>),
/// Adjusts the block that's being forked, by creating a new fork at the new block.
RollFork(ForkId, u64, CreateSender),
/// Returns the environment of the fork.
GetEnv(ForkId, GetEnvSender),
/// Updates the block number and timestamp of the fork.
UpdateBlock(ForkId, U256, U256),
/// Updates the block the entire block env,
UpdateEnv(ForkId, BlockEnv),
/// Shutdowns the entire `MultiForkHandler`, see `ShutDownMultiFork`
ShutDown(OneshotSender<()>),
/// Returns the Fork Url for the `ForkId` if it exists.
GetForkUrl(ForkId, OneshotSender<Option<String>>),
}
enum ForkTask {
/// Contains the future that will establish a new fork.
Create(CreateFuture, ForkId, CreateSender, Vec<CreateSender>),
}
/// The type that manages connections in the background.
#[must_use = "futures do nothing unless polled"]
pub struct MultiForkHandler {
/// Incoming requests from the `MultiFork`.
incoming: Fuse<Receiver<Request>>,
/// All active handlers.
///
/// It's expected that this list will be rather small (<10).
handlers: Vec<(ForkId, Handler)>,
// tasks currently in progress
pending_tasks: Vec<ForkTask>,
/// All _unique_ forkids mapped to their corresponding backend.
///
/// Note: The backend can be shared by multiple ForkIds if the target the same provider and
/// block number.
forks: HashMap<ForkId, CreatedFork>,
/// Optional periodic interval to flush rpc cache.
flush_cache_interval: Option<tokio::time::Interval>,
}
impl MultiForkHandler {
fn new(incoming: Receiver<Request>) -> Self {
Self {
incoming: incoming.fuse(),
handlers: Default::default(),
pending_tasks: Default::default(),
forks: Default::default(),
flush_cache_interval: None,
}
}
/// Sets the interval after which all rpc caches should be flushed periodically.
pub fn set_flush_cache_interval(&mut self, period: Duration) -> &mut Self {
self.flush_cache_interval =
Some(tokio::time::interval_at(tokio::time::Instant::now() + period, period));
self
}
/// Returns the list of additional senders of a matching task for the given id, if any.
#[expect(irrefutable_let_patterns)]
fn find_in_progress_task(&mut self, id: &ForkId) -> Option<&mut Vec<CreateSender>> {
for task in &mut self.pending_tasks {
if let ForkTask::Create(_, in_progress, _, additional) = task
&& in_progress == id
{
return Some(additional);
}
}
None
}
fn create_fork(&mut self, fork: CreateFork, sender: CreateSender) {
let fork_id = ForkId::new(&fork.url, fork.evm_opts.fork_block_number);
trace!(?fork_id, "created new forkId");
// There could already be a task for the requested fork in progress.
if let Some(in_progress) = self.find_in_progress_task(&fork_id) {
in_progress.push(sender);
return;
}
// Need to create a new fork.
let task = Box::pin(create_fork(fork));
self.pending_tasks.push(ForkTask::Create(task, fork_id, sender, Vec::new()));
}
fn insert_new_fork(
&mut self,
fork_id: ForkId,
fork: CreatedFork,
sender: CreateSender,
additional_senders: Vec<CreateSender>,
) {
self.forks.insert(fork_id.clone(), fork.clone());
let _ = sender.send(Ok((fork_id.clone(), fork.backend.clone(), fork.opts.env.clone())));
// Notify all additional senders and track unique forkIds.
for sender in additional_senders {
let next_fork_id = fork.inc_senders(fork_id.clone());
self.forks.insert(next_fork_id.clone(), fork.clone());
let _ = sender.send(Ok((next_fork_id, fork.backend.clone(), fork.opts.env.clone())));
}
}
/// Update the fork's block entire env
fn update_env(&mut self, fork_id: ForkId, env: BlockEnv) {
if let Some(fork) = self.forks.get_mut(&fork_id) {
fork.opts.env.evm_env.block_env = env;
}
}
/// Update fork block number and timestamp. Used to preserve values set by `roll` and `warp`
/// cheatcodes when new fork selected.
fn update_block(&mut self, fork_id: ForkId, block_number: U256, block_timestamp: U256) {
if let Some(fork) = self.forks.get_mut(&fork_id) {
fork.opts.env.evm_env.block_env.number = block_number;
fork.opts.env.evm_env.block_env.timestamp = block_timestamp;
}
}
fn on_request(&mut self, req: Request) {
match req {
Request::CreateFork(fork, sender) => self.create_fork(*fork, sender),
Request::GetFork(fork_id, sender) => {
let fork = self.forks.get(&fork_id).map(|f| f.backend.clone());
let _ = sender.send(fork);
}
Request::RollFork(fork_id, block, sender) => {
if let Some(fork) = self.forks.get(&fork_id) {
trace!(target: "fork::multi", "rolling {} to {}", fork_id, block);
let mut opts = fork.opts.clone();
opts.evm_opts.fork_block_number = Some(block);
self.create_fork(opts, sender)
} else {
let _ =
sender.send(Err(eyre::eyre!("No matching fork exists for {}", fork_id)));
}
}
Request::GetEnv(fork_id, sender) => {
let _ = sender.send(self.forks.get(&fork_id).map(|fork| fork.opts.env.clone()));
}
Request::UpdateBlock(fork_id, block_number, block_timestamp) => {
self.update_block(fork_id, block_number, block_timestamp);
}
Request::UpdateEnv(fork_id, block_env) => {
self.update_env(fork_id, block_env);
}
Request::ShutDown(sender) => {
trace!(target: "fork::multi", "received shutdown signal");
// We're emptying all fork backends, this way we ensure all caches get flushed.
self.forks.clear();
self.handlers.clear();
let _ = sender.send(());
}
Request::GetForkUrl(fork_id, sender) => {
let fork = self.forks.get(&fork_id).map(|f| f.opts.url.clone());
let _ = sender.send(fork);
}
}
}
}
// Drives all handler to completion.
// This future will finish once all underlying BackendHandler are completed.
impl Future for MultiForkHandler {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
// Receive new requests.
loop {
match this.incoming.poll_next_unpin(cx) {
Poll::Ready(Some(req)) => this.on_request(req),
Poll::Ready(None) => {
// Channel closed, but we still need to drive the fork handlers to completion.
trace!(target: "fork::multi", "request channel closed");
break;
}
Poll::Pending => break,
}
}
// Advance all tasks.
for n in (0..this.pending_tasks.len()).rev() {
let task = this.pending_tasks.swap_remove(n);
match task {
ForkTask::Create(mut fut, id, sender, additional_senders) => {
if let Poll::Ready(resp) = fut.poll_unpin(cx) {
match resp {
Ok((fork_id, fork, handler)) => {
if let Some(fork) = this.forks.get(&fork_id).cloned() {
this.insert_new_fork(
fork.inc_senders(fork_id),
fork,
sender,
additional_senders,
);
} else {
this.handlers.push((fork_id.clone(), handler));
this.insert_new_fork(fork_id, fork, sender, additional_senders);
}
}
Err(err) => {
let _ = sender.send(Err(eyre::eyre!("{err}")));
for sender in additional_senders {
let _ = sender.send(Err(eyre::eyre!("{err}")));
}
}
}
} else {
this.pending_tasks.push(ForkTask::Create(
fut,
id,
sender,
additional_senders,
));
}
}
}
}
// Advance all handlers.
for n in (0..this.handlers.len()).rev() {
let (id, mut handler) = this.handlers.swap_remove(n);
match handler.poll_unpin(cx) {
Poll::Ready(_) => {
trace!(target: "fork::multi", "fork {:?} completed", id);
}
Poll::Pending => {
this.handlers.push((id, handler));
}
}
}
if this.handlers.is_empty() && this.incoming.is_done() {
trace!(target: "fork::multi", "completed");
return Poll::Ready(());
}
// Periodically flush cached RPC state.
if this
.flush_cache_interval
.as_mut()
.map(|interval| interval.poll_tick(cx).is_ready())
.unwrap_or_default()
&& !this.forks.is_empty()
{
trace!(target: "fork::multi", "tick flushing caches");
let forks = this.forks.values().map(|f| f.backend.clone()).collect::<Vec<_>>();
// Flush this on new thread to not block here.
std::thread::Builder::new()
.name("flusher".into())
.spawn(move || {
forks.into_iter().for_each(|fork| fork.flush_cache());
})
.expect("failed to spawn thread");
}
Poll::Pending
}
}
/// Tracks the created Fork
#[derive(Debug, Clone)]
struct CreatedFork {
/// How the fork was initially created.
opts: CreateFork,
/// Copy of the sender.
backend: SharedBackend,
/// How many consumers there are, since a `SharedBacked` can be used by multiple
/// consumers.
num_senders: Arc<AtomicUsize>,
}
impl CreatedFork {
pub fn new(opts: CreateFork, backend: SharedBackend) -> Self {
Self { opts, backend, num_senders: Arc::new(AtomicUsize::new(1)) }
}
/// Increment senders and return unique identifier of the fork.
fn inc_senders(&self, fork_id: ForkId) -> ForkId {
format!(
"{}-{}",
fork_id.as_str(),
self.num_senders.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
)
.into()
}
}
/// A type that's used to signaling the `MultiForkHandler` when it's time to shut down.
///
/// This is essentially a sync on drop, so that the `MultiForkHandler` can flush all rpc cashes.
///
/// This type intentionally does not implement `Clone` since it's intended that there's only once
/// instance.
#[derive(Debug)]
struct ShutDownMultiFork {
handler: Option<Sender<Request>>,
}
impl Drop for ShutDownMultiFork {
fn drop(&mut self) {
trace!(target: "fork::multi", "initiating shutdown");
let (sender, rx) = oneshot_channel();
let req = Request::ShutDown(sender);
if let Some(mut handler) = self.handler.take()
&& handler.try_send(req).is_ok()
{
let _ = rx.recv();
trace!(target: "fork::cache", "multifork backend shutdown");
}
}
}
/// Creates a new fork.
///
/// This will establish a new `Provider` to the endpoint and return the Fork Backend.
async fn create_fork(mut fork: CreateFork) -> eyre::Result<(ForkId, CreatedFork, Handler)> {
let provider = fork.evm_opts.fork_provider_with_url(&fork.url)?;
// Initialise the fork environment.
let (env, block) = fork.evm_opts.fork_evm_env_with_provider(&fork.url, &provider).await?;
fork.env = env;
let meta = BlockchainDbMeta::new(fork.env.evm_env.block_env.clone(), fork.url.clone());
// We need to use the block number from the block because the env's number can be different on
// some L2s (e.g. Arbitrum).
let number = block.header().number();
// Determine the cache path if caching is enabled.
let cache_path = if fork.enable_caching {
Config::foundry_block_cache_dir(fork.env.evm_env.cfg_env.chain_id, number)
} else {
None
};
let db = BlockchainDb::new(meta, cache_path);
let (backend, handler) = SharedBackend::new(provider, db, Some(number.into()));
let fork = CreatedFork::new(fork, backend);
let fork_id = ForkId::new(&fork.opts.url, Some(number));
Ok((fork_id, fork, handler))
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/backend/error.rs | crates/evm/core/src/backend/error.rs | use alloy_primitives::Address;
pub use foundry_fork_db::{DatabaseError, DatabaseResult};
use revm::context_interface::result::EVMError;
use std::convert::Infallible;
pub type BackendResult<T> = Result<T, BackendError>;
/// Errors that can happen when working with [`revm::Database`]
#[derive(Debug, thiserror::Error)]
#[expect(missing_docs)]
pub enum BackendError {
#[error("{0}")]
Message(String),
#[error("cheatcodes are not enabled for {0}; see `vm.allowCheatcodes(address)`")]
NoCheats(Address),
#[error(transparent)]
Database(#[from] DatabaseError),
#[error("failed to fetch account info for {0}")]
MissingAccount(Address),
#[error(
"CREATE2 Deployer (0x4e59b44847b379578588920ca78fbf26c0b4956c) not present on this chain.\n\
For a production environment, you can deploy it using the pre-signed transaction from \
https://github.com/Arachnid/deterministic-deployment-proxy.\n\
For a test environment, you can use `etch` to place the required bytecode at that address."
)]
MissingCreate2Deployer,
#[error("{0}")]
Other(String),
}
impl BackendError {
/// Create a new error with a message
pub fn msg(msg: impl Into<String>) -> Self {
Self::Message(msg.into())
}
/// Create a new error with a message
pub fn display(msg: impl std::fmt::Display) -> Self {
Self::Message(msg.to_string())
}
}
impl From<tokio::task::JoinError> for BackendError {
fn from(value: tokio::task::JoinError) -> Self {
Self::display(value)
}
}
impl From<Infallible> for BackendError {
fn from(value: Infallible) -> Self {
match value {}
}
}
// Note: this is mostly necessary to use some revm internals that return an [EVMError]
impl<T: Into<Self>> From<EVMError<T>> for BackendError {
fn from(err: EVMError<T>) -> Self {
match err {
EVMError::Database(err) => err.into(),
EVMError::Custom(err) => Self::msg(err),
EVMError::Header(err) => Self::msg(err.to_string()),
EVMError::Transaction(err) => Self::msg(err.to_string()),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/backend/mod.rs | crates/evm/core/src/backend/mod.rs | //! Foundry's main executor backend abstraction and implementation.
use crate::{
AsEnvMut, Env, EnvMut, InspectorExt,
constants::{CALLER, CHEATCODE_ADDRESS, DEFAULT_CREATE2_DEPLOYER, TEST_CONTRACT_ADDRESS},
evm::new_evm_with_inspector,
fork::{CreateFork, ForkId, MultiFork},
state_snapshot::StateSnapshots,
utils::{configure_tx_env, configure_tx_req_env, get_blob_base_fee_update_fraction},
};
use alloy_consensus::Typed2718;
use alloy_evm::Evm;
use alloy_genesis::GenesisAccount;
use alloy_network::{AnyRpcBlock, AnyTxEnvelope, TransactionResponse};
use alloy_primitives::{Address, B256, TxKind, U256, keccak256, uint};
use alloy_rpc_types::{BlockNumberOrTag, Transaction, TransactionRequest};
use eyre::Context;
use foundry_common::{SYSTEM_TRANSACTION_TYPE, is_known_system_sender};
pub use foundry_fork_db::{BlockchainDb, SharedBackend, cache::BlockchainDbMeta};
use revm::{
Database, DatabaseCommit, JournalEntry,
bytecode::Bytecode,
context::JournalInner,
context_interface::{block::BlobExcessGasAndPrice, result::ResultAndState},
database::{CacheDB, DatabaseRef},
inspector::NoOpInspector,
precompile::{PrecompileSpecId, Precompiles},
primitives::{HashMap as Map, KECCAK_EMPTY, Log, hardfork::SpecId},
state::{Account, AccountInfo, EvmState, EvmStorageSlot},
};
use std::{
collections::{BTreeMap, HashMap, HashSet},
fmt::Debug,
time::Instant,
};
mod diagnostic;
pub use diagnostic::RevertDiagnostic;
mod error;
pub use error::{BackendError, BackendResult, DatabaseError, DatabaseResult};
mod cow;
pub use cow::CowBackend;
mod in_memory_db;
pub use in_memory_db::{EmptyDBWrapper, FoundryEvmInMemoryDB, MemDb};
mod snapshot;
pub use snapshot::{BackendStateSnapshot, RevertStateSnapshotAction, StateSnapshot};
// A `revm::Database` that is used in forking mode
type ForkDB = CacheDB<SharedBackend>;
/// Represents a numeric `ForkId` valid only for the existence of the `Backend`.
///
/// The difference between `ForkId` and `LocalForkId` is that `ForkId` tracks pairs of `endpoint +
/// block` which can be reused by multiple tests, whereas the `LocalForkId` is unique within a test
pub type LocalForkId = U256;
/// Represents the index of a fork in the created forks vector
/// This is used for fast lookup
type ForkLookupIndex = usize;
/// All accounts that will have persistent storage across fork swaps.
const DEFAULT_PERSISTENT_ACCOUNTS: [Address; 3] =
[CHEATCODE_ADDRESS, DEFAULT_CREATE2_DEPLOYER, CALLER];
/// `bytes32("failed")`, as a storage slot key into [`CHEATCODE_ADDRESS`].
///
/// Used by all `forge-std` test contracts and newer `DSTest` test contracts as a global marker for
/// a failed test.
pub const GLOBAL_FAIL_SLOT: U256 =
uint!(0x6661696c65640000000000000000000000000000000000000000000000000000_U256);
pub type JournaledState = JournalInner<JournalEntry>;
/// An extension trait that allows us to easily extend the `revm::Inspector` capabilities
#[auto_impl::auto_impl(&mut)]
pub trait DatabaseExt: Database<Error = DatabaseError> + DatabaseCommit + Debug {
/// Creates a new state snapshot at the current point of execution.
///
/// A state snapshot is associated with a new unique id that's created for the snapshot.
/// State snapshots can be reverted: [DatabaseExt::revert_state], however, depending on the
/// [RevertStateSnapshotAction], it will keep the snapshot alive or delete it.
fn snapshot_state(&mut self, journaled_state: &JournaledState, env: &mut EnvMut<'_>) -> U256;
/// Reverts the snapshot if it exists
///
/// Returns `true` if the snapshot was successfully reverted, `false` if no snapshot for that id
/// exists.
///
/// **N.B.** While this reverts the state of the evm to the snapshot, it keeps new logs made
/// since the snapshots was created. This way we can show logs that were emitted between
/// snapshot and its revert.
/// This will also revert any changes in the `Env` and replace it with the captured `Env` of
/// `Self::snapshot_state`.
///
/// Depending on [RevertStateSnapshotAction] it will keep the snapshot alive or delete it.
fn revert_state(
&mut self,
id: U256,
journaled_state: &JournaledState,
env: &mut EnvMut<'_>,
action: RevertStateSnapshotAction,
) -> Option<JournaledState>;
/// Deletes the state snapshot with the given `id`
///
/// Returns `true` if the snapshot was successfully deleted, `false` if no snapshot for that id
/// exists.
fn delete_state_snapshot(&mut self, id: U256) -> bool;
/// Deletes all state snapshots.
fn delete_state_snapshots(&mut self);
/// Creates and also selects a new fork
///
/// This is basically `create_fork` + `select_fork`
fn create_select_fork(
&mut self,
fork: CreateFork,
env: &mut EnvMut<'_>,
journaled_state: &mut JournaledState,
) -> eyre::Result<LocalForkId> {
let id = self.create_fork(fork)?;
self.select_fork(id, env, journaled_state)?;
Ok(id)
}
/// Creates and also selects a new fork
///
/// This is basically `create_fork` + `select_fork`
fn create_select_fork_at_transaction(
&mut self,
fork: CreateFork,
env: &mut EnvMut<'_>,
journaled_state: &mut JournaledState,
transaction: B256,
) -> eyre::Result<LocalForkId> {
let id = self.create_fork_at_transaction(fork, transaction)?;
self.select_fork(id, env, journaled_state)?;
Ok(id)
}
/// Creates a new fork but does _not_ select it
fn create_fork(&mut self, fork: CreateFork) -> eyre::Result<LocalForkId>;
/// Creates a new fork but does _not_ select it
fn create_fork_at_transaction(
&mut self,
fork: CreateFork,
transaction: B256,
) -> eyre::Result<LocalForkId>;
/// Selects the fork's state
///
/// This will also modify the current `Env`.
///
/// **Note**: this does not change the local state, but swaps the remote state
///
/// # Errors
///
/// Returns an error if no fork with the given `id` exists
fn select_fork(
&mut self,
id: LocalForkId,
env: &mut EnvMut<'_>,
journaled_state: &mut JournaledState,
) -> eyre::Result<()>;
/// Updates the fork to given block number.
///
/// This will essentially create a new fork at the given block height.
///
/// # Errors
///
/// Returns an error if not matching fork was found.
fn roll_fork(
&mut self,
id: Option<LocalForkId>,
block_number: u64,
env: &mut EnvMut<'_>,
journaled_state: &mut JournaledState,
) -> eyre::Result<()>;
/// Updates the fork to given transaction hash
///
/// This will essentially create a new fork at the block this transaction was mined and replays
/// all transactions up until the given transaction.
///
/// # Errors
///
/// Returns an error if not matching fork was found.
fn roll_fork_to_transaction(
&mut self,
id: Option<LocalForkId>,
transaction: B256,
env: &mut EnvMut<'_>,
journaled_state: &mut JournaledState,
) -> eyre::Result<()>;
/// Fetches the given transaction for the fork and executes it, committing the state in the DB
fn transact(
&mut self,
id: Option<LocalForkId>,
transaction: B256,
env: Env,
journaled_state: &mut JournaledState,
inspector: &mut dyn InspectorExt,
) -> eyre::Result<()>;
/// Executes a given TransactionRequest, commits the new state to the DB
fn transact_from_tx(
&mut self,
transaction: &TransactionRequest,
env: Env,
journaled_state: &mut JournaledState,
inspector: &mut dyn InspectorExt,
) -> eyre::Result<()>;
/// Returns the `ForkId` that's currently used in the database, if fork mode is on
fn active_fork_id(&self) -> Option<LocalForkId>;
/// Returns the Fork url that's currently used in the database, if fork mode is on
fn active_fork_url(&self) -> Option<String>;
/// Whether the database is currently in forked mode.
fn is_forked_mode(&self) -> bool {
self.active_fork_id().is_some()
}
/// Ensures that an appropriate fork exists
///
/// If `id` contains a requested `Fork` this will ensure it exists.
/// Otherwise, this returns the currently active fork.
///
/// # Errors
///
/// Returns an error if the given `id` does not match any forks
///
/// Returns an error if no fork exists
fn ensure_fork(&self, id: Option<LocalForkId>) -> eyre::Result<LocalForkId>;
/// Ensures that a corresponding `ForkId` exists for the given local `id`
fn ensure_fork_id(&self, id: LocalForkId) -> eyre::Result<&ForkId>;
/// Handling multiple accounts/new contracts in a multifork environment can be challenging since
/// every fork has its own standalone storage section. So this can be a common error to run
/// into:
///
/// ```solidity
/// function testCanDeploy() public {
/// vm.selectFork(mainnetFork);
/// // contract created while on `mainnetFork`
/// DummyContract dummy = new DummyContract();
/// // this will succeed
/// dummy.hello();
///
/// vm.selectFork(optimismFork);
///
/// vm.expectRevert();
/// // this will revert since `dummy` contract only exists on `mainnetFork`
/// dummy.hello();
/// }
/// ```
///
/// If this happens (`dummy.hello()`), or more general, a call on an address that's not a
/// contract, revm will revert without useful context. This call will check in this context if
/// `address(dummy)` belongs to an existing contract and if not will check all other forks if
/// the contract is deployed there.
///
/// Returns a more useful error message if that's the case
fn diagnose_revert(
&self,
callee: Address,
journaled_state: &JournaledState,
) -> Option<RevertDiagnostic>;
/// Loads the account allocs from the given `allocs` map into the passed [JournaledState].
///
/// Returns [Ok] if all accounts were successfully inserted into the journal, [Err] otherwise.
fn load_allocs(
&mut self,
allocs: &BTreeMap<Address, GenesisAccount>,
journaled_state: &mut JournaledState,
) -> Result<(), BackendError>;
/// Copies bytecode, storage, nonce and balance from the given genesis account to the target
/// address.
///
/// Returns [Ok] if data was successfully inserted into the journal, [Err] otherwise.
fn clone_account(
&mut self,
source: &GenesisAccount,
target: &Address,
journaled_state: &mut JournaledState,
) -> Result<(), BackendError>;
/// Returns true if the given account is currently marked as persistent.
fn is_persistent(&self, acc: &Address) -> bool;
/// Revokes persistent status from the given account.
fn remove_persistent_account(&mut self, account: &Address) -> bool;
/// Marks the given account as persistent.
fn add_persistent_account(&mut self, account: Address) -> bool;
/// Removes persistent status from all given accounts.
#[auto_impl(keep_default_for(&, &mut, Rc, Arc, Box))]
fn remove_persistent_accounts(&mut self, accounts: impl IntoIterator<Item = Address>)
where
Self: Sized,
{
for acc in accounts {
self.remove_persistent_account(&acc);
}
}
/// Extends the persistent accounts with the accounts the iterator yields.
#[auto_impl(keep_default_for(&, &mut, Rc, Arc, Box))]
fn extend_persistent_accounts(&mut self, accounts: impl IntoIterator<Item = Address>)
where
Self: Sized,
{
for acc in accounts {
self.add_persistent_account(acc);
}
}
/// Grants cheatcode access for the given `account`
///
/// Returns true if the `account` already has access
fn allow_cheatcode_access(&mut self, account: Address) -> bool;
/// Revokes cheatcode access for the given account
///
/// Returns true if the `account` was previously allowed cheatcode access
fn revoke_cheatcode_access(&mut self, account: &Address) -> bool;
/// Returns `true` if the given account is allowed to execute cheatcodes
fn has_cheatcode_access(&self, account: &Address) -> bool;
/// Ensures that `account` is allowed to execute cheatcodes
///
/// Returns an error if [`Self::has_cheatcode_access`] returns `false`
fn ensure_cheatcode_access(&self, account: &Address) -> Result<(), BackendError> {
if !self.has_cheatcode_access(account) {
return Err(BackendError::NoCheats(*account));
}
Ok(())
}
/// Same as [`Self::ensure_cheatcode_access()`] but only enforces it if the backend is currently
/// in forking mode
fn ensure_cheatcode_access_forking_mode(&self, account: &Address) -> Result<(), BackendError> {
if self.is_forked_mode() {
return self.ensure_cheatcode_access(account);
}
Ok(())
}
/// Set the blockhash for a given block number.
///
/// # Arguments
///
/// * `number` - The block number to set the blockhash for
/// * `hash` - The blockhash to set
///
/// # Note
///
/// This function mimics the EVM limits of the `blockhash` operation:
/// - It sets the blockhash for blocks where `block.number - 256 <= number < block.number`
/// - Setting a blockhash for the current block (number == block.number) has no effect
/// - Setting a blockhash for future blocks (number > block.number) has no effect
/// - Setting a blockhash for blocks older than `block.number - 256` has no effect
fn set_blockhash(&mut self, block_number: U256, block_hash: B256);
}
struct _ObjectSafe(dyn DatabaseExt);
/// Provides the underlying `revm::Database` implementation.
///
/// A `Backend` can be initialised in two forms:
///
/// # 1. Empty in-memory Database
/// This is the default variant: an empty `revm::Database`
///
/// # 2. Forked Database
/// A `revm::Database` that forks off a remote client
///
///
/// In addition to that we support forking manually on the fly.
/// Additional forks can be created. Each unique fork is identified by its unique `ForkId`. We treat
/// forks as unique if they have the same `(endpoint, block number)` pair.
///
/// When it comes to testing, it's intended that each contract will use its own `Backend`
/// (`Backend::clone`). This way each contract uses its own encapsulated evm state. For in-memory
/// testing, the database is just an owned `revm::InMemoryDB`.
///
/// Each `Fork`, identified by a unique id, uses completely separate storage, write operations are
/// performed only in the fork's own database, `ForkDB`.
///
/// A `ForkDB` consists of 2 halves:
/// - everything fetched from the remote is readonly
/// - all local changes (instructed by the contract) are written to the backend's `db` and don't
/// alter the state of the remote client.
///
/// # Fork swapping
///
/// Multiple "forks" can be created `Backend::create_fork()`, however only 1 can be used by the
/// `db`. However, their state can be hot-swapped by swapping the read half of `db` from one fork to
/// another.
/// When swapping forks (`Backend::select_fork()`) we also update the current `Env` of the `EVM`
/// accordingly, so that all `block.*` config values match
///
/// When another for is selected [`DatabaseExt::select_fork()`] the entire storage, including
/// `JournaledState` is swapped, but the storage of the caller's and the test contract account is
/// _always_ cloned. This way a fork has entirely separate storage but data can still be shared
/// across fork boundaries via stack and contract variables.
///
/// # Snapshotting
///
/// A snapshot of the current overall state can be taken at any point in time. A snapshot is
/// identified by a unique id that's returned when a snapshot is created. A snapshot can only be
/// reverted _once_. After a successful revert, the same snapshot id cannot be used again. Reverting
/// a snapshot replaces the current active state with the snapshot state, the snapshot is deleted
/// afterwards, as well as any snapshots taken after the reverted snapshot, (e.g.: reverting to id
/// 0x1 will delete snapshots with ids 0x1, 0x2, etc.)
///
/// **Note:** State snapshots work across fork-swaps, e.g. if fork `A` is currently active, then a
/// snapshot is created before fork `B` is selected, then fork `A` will be the active fork again
/// after reverting the snapshot.
#[derive(Clone, Debug)]
#[must_use]
pub struct Backend {
/// The access point for managing forks
forks: MultiFork,
// The default in memory db
mem_db: FoundryEvmInMemoryDB,
/// The journaled_state to use to initialize new forks with
///
/// The way [`JournaledState`] works is, that it holds the "hot" accounts loaded from the
/// underlying `Database` that feeds the Account and State data to the journaled_state so it
/// can apply changes to the state while the EVM executes.
///
/// In a way the `JournaledState` is something like a cache that
/// 1. check if account is already loaded (hot)
/// 2. if not load from the `Database` (this will then retrieve the account via RPC in forking
/// mode)
///
/// To properly initialize we store the `JournaledState` before the first fork is selected
/// ([`DatabaseExt::select_fork`]).
///
/// This will be an empty `JournaledState`, which will be populated with persistent accounts,
/// See [`Self::update_fork_db()`].
fork_init_journaled_state: JournaledState,
/// The currently active fork database
///
/// If this is set, then the Backend is currently in forking mode
active_fork_ids: Option<(LocalForkId, ForkLookupIndex)>,
/// holds additional Backend data
inner: BackendInner,
}
impl Backend {
/// Creates a new Backend with a spawned multi fork thread.
///
/// If `fork` is `Some` this will use a `fork` database, otherwise with an in-memory
/// database.
pub fn spawn(fork: Option<CreateFork>) -> eyre::Result<Self> {
Self::new(MultiFork::spawn(), fork)
}
/// Creates a new instance of `Backend`
///
/// If `fork` is `Some` this will use a `fork` database, otherwise with an in-memory
/// database.
///
/// Prefer using [`spawn`](Self::spawn) instead.
pub fn new(forks: MultiFork, fork: Option<CreateFork>) -> eyre::Result<Self> {
trace!(target: "backend", forking_mode=?fork.is_some(), "creating executor backend");
// Note: this will take of registering the `fork`
let inner = BackendInner {
persistent_accounts: HashSet::from(DEFAULT_PERSISTENT_ACCOUNTS),
..Default::default()
};
let mut backend = Self {
forks,
mem_db: CacheDB::new(Default::default()),
fork_init_journaled_state: inner.new_journaled_state(),
active_fork_ids: None,
inner,
};
if let Some(fork) = fork {
let (fork_id, fork, _) = backend.forks.create_fork(fork)?;
let fork_db = ForkDB::new(fork);
let fork_ids = backend.inner.insert_new_fork(
fork_id.clone(),
fork_db,
backend.inner.new_journaled_state(),
);
backend.inner.launched_with_fork = Some((fork_id, fork_ids.0, fork_ids.1));
backend.active_fork_ids = Some(fork_ids);
}
trace!(target: "backend", forking_mode=? backend.active_fork_ids.is_some(), "created executor backend");
Ok(backend)
}
/// Creates a new instance of `Backend` with fork added to the fork database and sets the fork
/// as active
pub(crate) fn new_with_fork(
id: &ForkId,
fork: Fork,
journaled_state: JournaledState,
) -> eyre::Result<Self> {
let mut backend = Self::spawn(None)?;
let fork_ids = backend.inner.insert_new_fork(id.clone(), fork.db, journaled_state);
backend.inner.launched_with_fork = Some((id.clone(), fork_ids.0, fork_ids.1));
backend.active_fork_ids = Some(fork_ids);
Ok(backend)
}
/// Creates a new instance with a `BackendDatabase::InMemory` cache layer for the `CacheDB`
pub fn clone_empty(&self) -> Self {
Self {
forks: self.forks.clone(),
mem_db: CacheDB::new(Default::default()),
fork_init_journaled_state: self.inner.new_journaled_state(),
active_fork_ids: None,
inner: Default::default(),
}
}
pub fn insert_account_info(&mut self, address: Address, account: AccountInfo) {
if let Some(db) = self.active_fork_db_mut() {
db.insert_account_info(address, account)
} else {
self.mem_db.insert_account_info(address, account)
}
}
/// Inserts a value on an account's storage without overriding account info
pub fn insert_account_storage(
&mut self,
address: Address,
slot: U256,
value: U256,
) -> Result<(), DatabaseError> {
if let Some(db) = self.active_fork_db_mut() {
db.insert_account_storage(address, slot, value)
} else {
self.mem_db.insert_account_storage(address, slot, value)
}
}
/// Completely replace an account's storage without overriding account info.
///
/// When forking, this causes the backend to assume a `0` value for all
/// unset storage slots instead of trying to fetch it.
pub fn replace_account_storage(
&mut self,
address: Address,
storage: Map<U256, U256>,
) -> Result<(), DatabaseError> {
if let Some(db) = self.active_fork_db_mut() {
db.replace_account_storage(address, storage)
} else {
self.mem_db.replace_account_storage(address, storage)
}
}
/// Returns all snapshots created in this backend
pub fn state_snapshots(
&self,
) -> &StateSnapshots<BackendStateSnapshot<BackendDatabaseSnapshot>> {
&self.inner.state_snapshots
}
/// Sets the address of the `DSTest` contract that is being executed
///
/// This will also mark the caller as persistent and remove the persistent status from the
/// previous test contract address
///
/// This will also grant cheatcode access to the test account
pub fn set_test_contract(&mut self, acc: Address) -> &mut Self {
trace!(?acc, "setting test account");
self.add_persistent_account(acc);
self.allow_cheatcode_access(acc);
self
}
/// Sets the caller address
pub fn set_caller(&mut self, acc: Address) -> &mut Self {
trace!(?acc, "setting caller account");
self.inner.caller = Some(acc);
self.allow_cheatcode_access(acc);
self
}
/// Sets the current spec id
pub fn set_spec_id(&mut self, spec_id: SpecId) -> &mut Self {
trace!(?spec_id, "setting spec ID");
self.inner.spec_id = spec_id;
self
}
/// Returns the set caller address
pub fn caller_address(&self) -> Option<Address> {
self.inner.caller
}
/// Failures occurred in state snapshots are tracked when the state snapshot is reverted.
///
/// If an error occurs in a restored state snapshot, the test is considered failed.
///
/// This returns whether there was a reverted state snapshot that recorded an error.
pub fn has_state_snapshot_failure(&self) -> bool {
self.inner.has_state_snapshot_failure
}
/// Sets the state snapshot failure flag.
pub fn set_state_snapshot_failure(&mut self, has_state_snapshot_failure: bool) {
self.inner.has_state_snapshot_failure = has_state_snapshot_failure
}
/// When creating or switching forks, we update the AccountInfo of the contract
pub(crate) fn update_fork_db(
&self,
active_journaled_state: &mut JournaledState,
target_fork: &mut Fork,
) {
self.update_fork_db_contracts(
self.inner.persistent_accounts.iter().copied(),
active_journaled_state,
target_fork,
)
}
/// Merges the state of all `accounts` from the currently active db into the given `fork`
pub(crate) fn update_fork_db_contracts(
&self,
accounts: impl IntoIterator<Item = Address>,
active_journaled_state: &mut JournaledState,
target_fork: &mut Fork,
) {
if let Some(db) = self.active_fork_db() {
merge_account_data(accounts, db, active_journaled_state, target_fork)
} else {
merge_account_data(accounts, &self.mem_db, active_journaled_state, target_fork)
}
}
/// Returns the memory db used if not in forking mode
pub fn mem_db(&self) -> &FoundryEvmInMemoryDB {
&self.mem_db
}
/// Returns true if the `id` is currently active
pub fn is_active_fork(&self, id: LocalForkId) -> bool {
self.active_fork_ids.map(|(i, _)| i == id).unwrap_or_default()
}
/// Returns `true` if the `Backend` is currently in forking mode
pub fn is_in_forking_mode(&self) -> bool {
self.active_fork().is_some()
}
/// Returns the currently active `Fork`, if any
pub fn active_fork(&self) -> Option<&Fork> {
self.active_fork_ids.map(|(_, idx)| self.inner.get_fork(idx))
}
/// Returns the currently active `Fork`, if any
pub fn active_fork_mut(&mut self) -> Option<&mut Fork> {
self.active_fork_ids.map(|(_, idx)| self.inner.get_fork_mut(idx))
}
/// Returns the currently active `ForkDB`, if any
pub fn active_fork_db(&self) -> Option<&ForkDB> {
self.active_fork().map(|f| &f.db)
}
/// Returns the currently active `ForkDB`, if any
pub fn active_fork_db_mut(&mut self) -> Option<&mut ForkDB> {
self.active_fork_mut().map(|f| &mut f.db)
}
/// Returns the current database implementation as a `&dyn` value.
pub fn db(&self) -> &dyn Database<Error = DatabaseError> {
match self.active_fork_db() {
Some(fork_db) => fork_db,
None => &self.mem_db,
}
}
/// Returns the current database implementation as a `&mut dyn` value.
pub fn db_mut(&mut self) -> &mut dyn Database<Error = DatabaseError> {
match self.active_fork_ids.map(|(_, idx)| &mut self.inner.get_fork_mut(idx).db) {
Some(fork_db) => fork_db,
None => &mut self.mem_db,
}
}
/// Creates a snapshot of the currently active database
pub(crate) fn create_db_snapshot(&self) -> BackendDatabaseSnapshot {
if let Some((id, idx)) = self.active_fork_ids {
let fork = self.inner.get_fork(idx).clone();
let fork_id = self.inner.ensure_fork_id(id).cloned().expect("Exists; qed");
BackendDatabaseSnapshot::Forked(id, fork_id, idx, Box::new(fork))
} else {
BackendDatabaseSnapshot::InMemory(self.mem_db.clone())
}
}
/// Since each `Fork` tracks logs separately, we need to merge them to get _all_ of them
pub fn merged_logs(&self, mut logs: Vec<Log>) -> Vec<Log> {
if let Some((_, active)) = self.active_fork_ids {
let mut all_logs = Vec::with_capacity(logs.len());
self.inner
.forks
.iter()
.enumerate()
.filter_map(|(idx, f)| f.as_ref().map(|f| (idx, f)))
.for_each(|(idx, f)| {
if idx == active {
all_logs.append(&mut logs);
} else {
all_logs.extend(f.journaled_state.logs.clone())
}
});
return all_logs;
}
logs
}
/// Initializes settings we need to keep track of.
///
/// We need to track these mainly to prevent issues when switching between different evms
pub(crate) fn initialize(&mut self, env: &Env) {
self.set_caller(env.tx.caller);
self.set_spec_id(env.evm_env.cfg_env.spec);
let test_contract = match env.tx.kind {
TxKind::Call(to) => to,
TxKind::Create => {
let nonce = self
.basic_ref(env.tx.caller)
.map(|b| b.unwrap_or_default().nonce)
.unwrap_or_default();
env.tx.caller.create(nonce)
}
};
self.set_test_contract(test_contract);
}
/// Executes the configured test call of the `env` without committing state changes.
///
/// Note: in case there are any cheatcodes executed that modify the environment, this will
/// update the given `env` with the new values.
#[instrument(name = "inspect", level = "debug", skip_all)]
pub fn inspect<I: InspectorExt>(
&mut self,
env: &mut Env,
inspector: I,
) -> eyre::Result<ResultAndState> {
self.initialize(env);
let mut evm = crate::evm::new_evm_with_inspector(self, env.to_owned(), inspector);
let res = evm.transact(env.tx.clone()).wrap_err("EVM error")?;
*env = evm.as_env_mut().to_owned();
Ok(res)
}
/// Returns true if the address is a precompile
pub fn is_existing_precompile(&self, addr: &Address) -> bool {
self.inner.precompiles().contains(addr)
}
/// Sets the initial journaled state to use when initializing forks
#[inline]
fn set_init_journaled_state(&mut self, journaled_state: JournaledState) {
trace!("recording fork init journaled_state");
self.fork_init_journaled_state = journaled_state;
}
/// Cleans up already loaded accounts that would be initialized without the correct data from
/// the fork.
///
/// It can happen that an account is loaded before the first fork is selected, like
/// `getNonce(addr)`, which will load an empty account by default.
///
/// This account data then would not match the account data of a fork if it exists.
/// So when the first fork is initialized we replace these accounts with the actual account as
/// it exists on the fork.
fn prepare_init_journal_state(&mut self) -> Result<(), BackendError> {
let loaded_accounts = self
.fork_init_journaled_state
.state
.iter()
.filter(|(addr, _)| !self.is_existing_precompile(addr) && !self.is_persistent(addr))
.map(|(addr, _)| addr)
.copied()
.collect::<Vec<_>>();
for fork in self.inner.forks_iter_mut() {
let mut journaled_state = self.fork_init_journaled_state.clone();
for loaded_account in loaded_accounts.iter().copied() {
trace!(?loaded_account, "replacing account on init");
let init_account =
journaled_state.state.get_mut(&loaded_account).expect("exists; qed");
// here's an edge case where we need to check if this account has been created, in
// which case we don't need to replace it with the account from the fork because the
// created account takes precedence: for example contract creation in setups
if init_account.is_created() {
trace!(?loaded_account, "skipping created account");
continue;
}
// otherwise we need to replace the account's info with the one from the fork's
// database
let fork_account = Database::basic(&mut fork.db, loaded_account)?
.ok_or(BackendError::MissingAccount(loaded_account))?;
init_account.info = fork_account;
}
fork.journaled_state = journaled_state;
}
Ok(())
}
/// Returns the block numbers required for replaying a transaction
fn get_block_number_and_block_for_transaction(
&self,
id: LocalForkId,
transaction: B256,
) -> eyre::Result<(u64, AnyRpcBlock)> {
let fork = self.inner.get_fork_by_id(id)?;
let tx = fork.db.db.get_transaction(transaction)?;
// get the block number we need to fork
if let Some(tx_block) = tx.block_number {
let block = fork.db.db.get_full_block(tx_block)?;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/backend/in_memory_db.rs | crates/evm/core/src/backend/in_memory_db.rs | //! In-memory database.
use crate::state_snapshot::StateSnapshots;
use alloy_primitives::{Address, B256, U256};
use foundry_fork_db::DatabaseError;
use revm::{
Database, DatabaseCommit,
bytecode::Bytecode,
database::{CacheDB, DatabaseRef, EmptyDB},
primitives::HashMap as Map,
state::{Account, AccountInfo},
};
/// Type alias for an in-memory database.
///
/// See [`EmptyDBWrapper`].
pub type FoundryEvmInMemoryDB = CacheDB<EmptyDBWrapper>;
/// In-memory [`Database`] for Anvil.
///
/// This acts like a wrapper type for [`FoundryEvmInMemoryDB`] but is capable of applying snapshots.
#[derive(Debug)]
pub struct MemDb {
pub inner: FoundryEvmInMemoryDB,
pub state_snapshots: StateSnapshots<FoundryEvmInMemoryDB>,
}
impl Default for MemDb {
fn default() -> Self {
Self { inner: CacheDB::new(Default::default()), state_snapshots: Default::default() }
}
}
impl DatabaseRef for MemDb {
type Error = DatabaseError;
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
DatabaseRef::basic_ref(&self.inner, address)
}
fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
DatabaseRef::code_by_hash_ref(&self.inner, code_hash)
}
fn storage_ref(&self, address: Address, index: U256) -> Result<U256, Self::Error> {
DatabaseRef::storage_ref(&self.inner, address, index)
}
fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
DatabaseRef::block_hash_ref(&self.inner, number)
}
}
impl Database for MemDb {
type Error = DatabaseError;
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
// Note: this will always return `Some(AccountInfo)`, See `EmptyDBWrapper`
Database::basic(&mut self.inner, address)
}
fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
Database::code_by_hash(&mut self.inner, code_hash)
}
fn storage(&mut self, address: Address, index: U256) -> Result<U256, Self::Error> {
Database::storage(&mut self.inner, address, index)
}
fn block_hash(&mut self, number: u64) -> Result<B256, Self::Error> {
Database::block_hash(&mut self.inner, number)
}
}
impl DatabaseCommit for MemDb {
fn commit(&mut self, changes: Map<Address, Account>) {
DatabaseCommit::commit(&mut self.inner, changes)
}
}
/// An empty database that always returns default values when queried.
///
/// This is just a simple wrapper for `revm::EmptyDB` but implements `DatabaseError` instead, this
/// way we can unify all different `Database` impls
///
/// This will also _always_ return `Some(AccountInfo)`:
///
/// The [`Database`] implementation for `CacheDB` manages an `AccountState` for the
/// `DbAccount`, this will be set to `AccountState::NotExisting` if the account does not exist yet.
/// This is because there's a distinction between "non-existing" and "empty",
/// see <https://github.com/bluealloy/revm/blob/8f4348dc93022cffb3730d9db5d3ab1aad77676a/crates/revm/src/db/in_memory_db.rs#L81-L83>.
/// If an account is `NotExisting`, `Database::basic_ref` will always return `None` for the
/// requested `AccountInfo`.
///
/// To prevent this, we ensure that a missing account is never marked as `NotExisting` by always
/// returning `Some` with this type, which will then insert a default [`AccountInfo`] instead
/// of one marked as `AccountState::NotExisting`.
#[derive(Clone, Debug, Default)]
pub struct EmptyDBWrapper(EmptyDB);
impl DatabaseRef for EmptyDBWrapper {
type Error = DatabaseError;
fn basic_ref(&self, _address: Address) -> Result<Option<AccountInfo>, Self::Error> {
// Note: this will always return `Some(AccountInfo)`, for the reason explained above
Ok(Some(AccountInfo::default()))
}
fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
Ok(self.0.code_by_hash_ref(code_hash)?)
}
fn storage_ref(&self, address: Address, index: U256) -> Result<U256, Self::Error> {
Ok(self.0.storage_ref(address, index)?)
}
fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
Ok(self.0.block_hash_ref(number)?)
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::b256;
/// Ensures the `Database(Ref)` implementation for `revm::CacheDB` works as expected
///
/// Demonstrates how calling `Database::basic` works if an account does not exist
#[test]
fn cache_db_insert_basic_non_existing() {
let mut db = CacheDB::new(EmptyDB::default());
let address = Address::random();
// call `basic` on a non-existing account
let info = Database::basic(&mut db, address).unwrap();
assert!(info.is_none());
let mut info = info.unwrap_or_default();
info.balance = U256::from(500u64);
// insert the modified account info
db.insert_account_info(address, info);
// now we can call `basic` again and it should return the inserted account info
let info = Database::basic(&mut db, address).unwrap();
assert!(info.is_some());
}
/// Demonstrates how to insert a new account but not mark it as non-existing
#[test]
fn cache_db_insert_basic_default() {
let mut db = CacheDB::new(EmptyDB::default());
let address = Address::random();
// We use `basic_ref` here to ensure that the account is not marked as `NotExisting`.
let info = DatabaseRef::basic_ref(&db, address).unwrap();
assert!(info.is_none());
let mut info = info.unwrap_or_default();
info.balance = U256::from(500u64);
// insert the modified account info
db.insert_account_info(address, info.clone());
let loaded = Database::basic(&mut db, address).unwrap();
assert!(loaded.is_some());
assert_eq!(loaded.unwrap(), info)
}
/// Demonstrates that `Database::basic` for `MemDb` will always return the `AccountInfo`
#[test]
fn mem_db_insert_basic_default() {
let mut db = MemDb::default();
let address = Address::from_word(b256!(
"0x000000000000000000000000d8da6bf26964af9d7eed9e03e53415d37aa96045"
));
let info = Database::basic(&mut db, address).unwrap();
// We know info exists, as MemDb always returns `Some(AccountInfo)` due to the
// `EmptyDbWrapper`.
assert!(info.is_some());
let mut info = info.unwrap();
info.balance = U256::from(500u64);
// insert the modified account info
db.inner.insert_account_info(address, info.clone());
let loaded = Database::basic(&mut db, address).unwrap();
assert!(loaded.is_some());
assert_eq!(loaded.unwrap(), info)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/backend/cow.rs | crates/evm/core/src/backend/cow.rs | //! A wrapper around `Backend` that is clone-on-write used for fuzzing.
use super::BackendError;
use crate::{
AsEnvMut, Env, EnvMut, InspectorExt,
backend::{
Backend, DatabaseExt, JournaledState, LocalForkId, RevertStateSnapshotAction,
diagnostic::RevertDiagnostic,
},
fork::{CreateFork, ForkId},
};
use alloy_evm::Evm;
use alloy_genesis::GenesisAccount;
use alloy_primitives::{Address, B256, U256};
use alloy_rpc_types::TransactionRequest;
use eyre::WrapErr;
use foundry_fork_db::DatabaseError;
use revm::{
Database, DatabaseCommit,
bytecode::Bytecode,
context_interface::result::ResultAndState,
database::DatabaseRef,
primitives::{HashMap as Map, hardfork::SpecId},
state::{Account, AccountInfo},
};
use std::{borrow::Cow, collections::BTreeMap};
/// A wrapper around `Backend` that ensures only `revm::DatabaseRef` functions are called.
///
/// Any changes made during its existence that affect the caching layer of the underlying Database
/// will result in a clone of the initial Database. Therefore, this backend type is basically
/// a clone-on-write `Backend`, where cloning is only necessary if cheatcodes will modify the
/// `Backend`
///
/// Entire purpose of this type is for fuzzing. A test function fuzzer will repeatedly execute the
/// function via immutable raw (no state changes) calls.
///
/// **N.B.**: we're assuming cheatcodes that alter the state (like multi fork swapping) are niche.
/// If they executed, it will require a clone of the initial input database.
/// This way we can support these cheatcodes cheaply without adding overhead for tests that
/// don't make use of them. Alternatively each test case would require its own `Backend` clone,
/// which would add significant overhead for large fuzz sets even if the Database is not big after
/// setup.
#[derive(Clone, Debug)]
pub struct CowBackend<'a> {
/// The underlying `Backend`.
///
/// No calls on the `CowBackend` will ever persistently modify the `backend`'s state.
pub backend: Cow<'a, Backend>,
/// Keeps track of whether the backed is already initialized
is_initialized: bool,
/// The [SpecId] of the current backend.
spec_id: SpecId,
}
impl<'a> CowBackend<'a> {
/// Creates a new `CowBackend` with the given `Backend`.
pub fn new_borrowed(backend: &'a Backend) -> Self {
Self { backend: Cow::Borrowed(backend), is_initialized: false, spec_id: SpecId::default() }
}
/// Executes the configured transaction of the `env` without committing state changes
///
/// Note: in case there are any cheatcodes executed that modify the environment, this will
/// update the given `env` with the new values.
#[instrument(name = "inspect", level = "debug", skip_all)]
pub fn inspect<I: InspectorExt>(
&mut self,
env: &mut Env,
inspector: I,
) -> eyre::Result<ResultAndState> {
// this is a new call to inspect with a new env, so even if we've cloned the backend
// already, we reset the initialized state
self.is_initialized = false;
self.spec_id = env.evm_env.cfg_env.spec;
let mut evm = crate::evm::new_evm_with_inspector(self, env.to_owned(), inspector);
let res = evm.transact(env.tx.clone()).wrap_err("EVM error")?;
*env = evm.as_env_mut().to_owned();
Ok(res)
}
/// Returns whether there was a state snapshot failure in the backend.
///
/// This is bubbled up from the underlying Copy-On-Write backend when a revert occurs.
pub fn has_state_snapshot_failure(&self) -> bool {
self.backend.has_state_snapshot_failure()
}
/// Returns a mutable instance of the Backend.
///
/// If this is the first time this is called, the backed is cloned and initialized.
fn backend_mut(&mut self, env: &EnvMut<'_>) -> &mut Backend {
if !self.is_initialized {
let backend = self.backend.to_mut();
let mut env = env.to_owned();
env.evm_env.cfg_env.spec = self.spec_id;
backend.initialize(&env);
self.is_initialized = true;
return backend;
}
self.backend.to_mut()
}
/// Returns a mutable instance of the Backend if it is initialized.
fn initialized_backend_mut(&mut self) -> Option<&mut Backend> {
if self.is_initialized {
return Some(self.backend.to_mut());
}
None
}
}
impl DatabaseExt for CowBackend<'_> {
fn snapshot_state(&mut self, journaled_state: &JournaledState, env: &mut EnvMut<'_>) -> U256 {
self.backend_mut(env).snapshot_state(journaled_state, env)
}
fn revert_state(
&mut self,
id: U256,
journaled_state: &JournaledState,
current: &mut EnvMut<'_>,
action: RevertStateSnapshotAction,
) -> Option<JournaledState> {
self.backend_mut(current).revert_state(id, journaled_state, current, action)
}
fn delete_state_snapshot(&mut self, id: U256) -> bool {
// delete state snapshot requires a previous snapshot to be initialized
if let Some(backend) = self.initialized_backend_mut() {
return backend.delete_state_snapshot(id);
}
false
}
fn delete_state_snapshots(&mut self) {
if let Some(backend) = self.initialized_backend_mut() {
backend.delete_state_snapshots()
}
}
fn create_fork(&mut self, fork: CreateFork) -> eyre::Result<LocalForkId> {
self.backend.to_mut().create_fork(fork)
}
fn create_fork_at_transaction(
&mut self,
fork: CreateFork,
transaction: B256,
) -> eyre::Result<LocalForkId> {
self.backend.to_mut().create_fork_at_transaction(fork, transaction)
}
fn select_fork(
&mut self,
id: LocalForkId,
env: &mut EnvMut<'_>,
journaled_state: &mut JournaledState,
) -> eyre::Result<()> {
self.backend_mut(env).select_fork(id, env, journaled_state)
}
fn roll_fork(
&mut self,
id: Option<LocalForkId>,
block_number: u64,
env: &mut EnvMut<'_>,
journaled_state: &mut JournaledState,
) -> eyre::Result<()> {
self.backend_mut(env).roll_fork(id, block_number, env, journaled_state)
}
fn roll_fork_to_transaction(
&mut self,
id: Option<LocalForkId>,
transaction: B256,
env: &mut EnvMut<'_>,
journaled_state: &mut JournaledState,
) -> eyre::Result<()> {
self.backend_mut(env).roll_fork_to_transaction(id, transaction, env, journaled_state)
}
fn transact(
&mut self,
id: Option<LocalForkId>,
transaction: B256,
mut env: Env,
journaled_state: &mut JournaledState,
inspector: &mut dyn InspectorExt,
) -> eyre::Result<()> {
self.backend_mut(&env.as_env_mut()).transact(
id,
transaction,
env,
journaled_state,
inspector,
)
}
fn transact_from_tx(
&mut self,
transaction: &TransactionRequest,
mut env: Env,
journaled_state: &mut JournaledState,
inspector: &mut dyn InspectorExt,
) -> eyre::Result<()> {
self.backend_mut(&env.as_env_mut()).transact_from_tx(
transaction,
env,
journaled_state,
inspector,
)
}
fn active_fork_id(&self) -> Option<LocalForkId> {
self.backend.active_fork_id()
}
fn active_fork_url(&self) -> Option<String> {
self.backend.active_fork_url()
}
fn ensure_fork(&self, id: Option<LocalForkId>) -> eyre::Result<LocalForkId> {
self.backend.ensure_fork(id)
}
fn ensure_fork_id(&self, id: LocalForkId) -> eyre::Result<&ForkId> {
self.backend.ensure_fork_id(id)
}
fn diagnose_revert(
&self,
callee: Address,
journaled_state: &JournaledState,
) -> Option<RevertDiagnostic> {
self.backend.diagnose_revert(callee, journaled_state)
}
fn load_allocs(
&mut self,
allocs: &BTreeMap<Address, GenesisAccount>,
journaled_state: &mut JournaledState,
) -> Result<(), BackendError> {
self.backend_mut(&Env::default().as_env_mut()).load_allocs(allocs, journaled_state)
}
fn clone_account(
&mut self,
source: &GenesisAccount,
target: &Address,
journaled_state: &mut JournaledState,
) -> Result<(), BackendError> {
self.backend_mut(&Env::default().as_env_mut()).clone_account(
source,
target,
journaled_state,
)
}
fn is_persistent(&self, acc: &Address) -> bool {
self.backend.is_persistent(acc)
}
fn remove_persistent_account(&mut self, account: &Address) -> bool {
self.backend.to_mut().remove_persistent_account(account)
}
fn add_persistent_account(&mut self, account: Address) -> bool {
self.backend.to_mut().add_persistent_account(account)
}
fn allow_cheatcode_access(&mut self, account: Address) -> bool {
self.backend.to_mut().allow_cheatcode_access(account)
}
fn revoke_cheatcode_access(&mut self, account: &Address) -> bool {
self.backend.to_mut().revoke_cheatcode_access(account)
}
fn has_cheatcode_access(&self, account: &Address) -> bool {
self.backend.has_cheatcode_access(account)
}
fn set_blockhash(&mut self, block_number: U256, block_hash: B256) {
self.backend.to_mut().set_blockhash(block_number, block_hash);
}
}
impl DatabaseRef for CowBackend<'_> {
type Error = DatabaseError;
fn basic_ref(&self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
DatabaseRef::basic_ref(self.backend.as_ref(), address)
}
fn code_by_hash_ref(&self, code_hash: B256) -> Result<Bytecode, Self::Error> {
DatabaseRef::code_by_hash_ref(self.backend.as_ref(), code_hash)
}
fn storage_ref(&self, address: Address, index: U256) -> Result<U256, Self::Error> {
DatabaseRef::storage_ref(self.backend.as_ref(), address, index)
}
fn block_hash_ref(&self, number: u64) -> Result<B256, Self::Error> {
DatabaseRef::block_hash_ref(self.backend.as_ref(), number)
}
}
impl Database for CowBackend<'_> {
type Error = DatabaseError;
fn basic(&mut self, address: Address) -> Result<Option<AccountInfo>, Self::Error> {
DatabaseRef::basic_ref(self, address)
}
fn code_by_hash(&mut self, code_hash: B256) -> Result<Bytecode, Self::Error> {
DatabaseRef::code_by_hash_ref(self, code_hash)
}
fn storage(&mut self, address: Address, index: U256) -> Result<U256, Self::Error> {
DatabaseRef::storage_ref(self, address, index)
}
fn block_hash(&mut self, number: u64) -> Result<B256, Self::Error> {
DatabaseRef::block_hash_ref(self, number)
}
}
impl DatabaseCommit for CowBackend<'_> {
fn commit(&mut self, changes: Map<Address, Account>) {
self.backend.to_mut().commit(changes)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/backend/snapshot.rs | crates/evm/core/src/backend/snapshot.rs | use super::JournaledState;
use crate::Env;
use alloy_primitives::{
B256, U256,
map::{AddressHashMap, HashMap},
};
use revm::state::AccountInfo;
use serde::{Deserialize, Serialize};
/// A minimal abstraction of a state at a certain point in time
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct StateSnapshot {
pub accounts: AddressHashMap<AccountInfo>,
pub storage: AddressHashMap<HashMap<U256, U256>>,
pub block_hashes: HashMap<U256, B256>,
}
/// Represents a state snapshot taken during evm execution
#[derive(Clone, Debug)]
pub struct BackendStateSnapshot<T> {
pub db: T,
/// The journaled_state state at a specific point
pub journaled_state: JournaledState,
/// Contains the env at the time of the snapshot
pub env: Env,
}
impl<T> BackendStateSnapshot<T> {
/// Takes a new state snapshot.
pub fn new(db: T, journaled_state: JournaledState, env: Env) -> Self {
Self { db, journaled_state, env }
}
/// Called when this state snapshot is reverted.
///
/// Since we want to keep all additional logs that were emitted since the snapshot was taken
/// we'll merge additional logs into the snapshot's `revm::JournaledState`. Additional logs are
/// those logs that are missing in the snapshot's journaled_state, since the current
/// journaled_state includes the same logs, we can simply replace use that See also
/// `DatabaseExt::revert`.
pub fn merge(&mut self, current: &JournaledState) {
self.journaled_state.logs.clone_from(¤t.logs);
}
}
/// What to do when reverting a state snapshot.
///
/// Whether to remove the state snapshot or keep it.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub enum RevertStateSnapshotAction {
/// Remove the state snapshot after reverting.
#[default]
RevertRemove,
/// Keep the state snapshot after reverting.
RevertKeep,
}
impl RevertStateSnapshotAction {
/// Returns `true` if the action is to keep the state snapshot.
pub fn is_keep(&self) -> bool {
matches!(self, Self::RevertKeep)
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/core/src/backend/diagnostic.rs | crates/evm/core/src/backend/diagnostic.rs | use crate::backend::LocalForkId;
use alloy_primitives::{Address, map::AddressHashMap};
use itertools::Itertools;
/// Represents possible diagnostic cases on revert
#[derive(Clone, Debug)]
pub enum RevertDiagnostic {
/// The `contract` does not exist on the `active` fork but exist on other fork(s)
ContractExistsOnOtherForks {
contract: Address,
active: LocalForkId,
available_on: Vec<LocalForkId>,
},
ContractDoesNotExist {
contract: Address,
active: LocalForkId,
persistent: bool,
},
}
impl RevertDiagnostic {
/// Converts the diagnostic to a readable error message
pub fn to_error_msg(&self, labels: &AddressHashMap<String>) -> String {
let get_label =
|addr: &Address| labels.get(addr).cloned().unwrap_or_else(|| addr.to_string());
match self {
Self::ContractExistsOnOtherForks { contract, active, available_on } => {
let contract_label = get_label(contract);
format!(
r#"Contract {} does not exist on active fork with id `{}`
But exists on non active forks: `[{}]`"#,
contract_label,
active,
available_on.iter().format(", ")
)
}
Self::ContractDoesNotExist { contract, persistent, .. } => {
let contract_label = get_label(contract);
if *persistent {
format!("Contract {contract_label} does not exist")
} else {
format!(
"Contract {contract_label} does not exist and is not marked as persistent, see `vm.makePersistent()`"
)
}
}
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/networks/src/lib.rs | crates/evm/networks/src/lib.rs | //! # foundry-evm-networks
//!
//! Foundry EVM network configuration.
use crate::celo::transfer::{
CELO_TRANSFER_ADDRESS, CELO_TRANSFER_LABEL, PRECOMPILE_ID_CELO_TRANSFER,
};
use alloy_chains::{
NamedChain,
NamedChain::{Chiado, Gnosis, Moonbase, Moonbeam, MoonbeamDev, Moonriver, Rsk, RskTestnet},
};
use alloy_eips::eip1559::BaseFeeParams;
use alloy_evm::precompiles::PrecompilesMap;
use alloy_op_hardforks::{OpChainHardforks, OpHardforks};
use alloy_primitives::{Address, map::AddressHashMap};
use clap::Parser;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
pub mod celo;
#[derive(Clone, Debug, Default, Parser, Copy, Serialize, Deserialize, PartialEq)]
pub struct NetworkConfigs {
/// Enable Optimism network features.
#[arg(help_heading = "Networks", long, conflicts_with = "celo")]
// Skipped from configs (forge) as there is no feature to be added yet.
#[serde(skip)]
optimism: bool,
/// Enable Celo network features.
#[arg(help_heading = "Networks", long, conflicts_with = "optimism")]
#[serde(default)]
celo: bool,
/// Whether to bypass prevrandao.
#[arg(skip)]
#[serde(default)]
bypass_prevrandao: bool,
}
impl NetworkConfigs {
pub fn with_optimism() -> Self {
Self { optimism: true, ..Default::default() }
}
pub fn with_celo() -> Self {
Self { celo: true, ..Default::default() }
}
pub fn is_optimism(&self) -> bool {
self.optimism
}
/// Returns the base fee parameters for the configured network.
///
/// For Optimism networks, returns Canyon parameters if the Canyon hardfork is active
/// at the given timestamp, otherwise returns pre-Canyon parameters.
pub fn base_fee_params(&self, timestamp: u64) -> BaseFeeParams {
if self.is_optimism() {
let op_hardforks = OpChainHardforks::op_mainnet();
if op_hardforks.is_canyon_active_at_timestamp(timestamp) {
BaseFeeParams::optimism_canyon()
} else {
BaseFeeParams::optimism()
}
} else {
BaseFeeParams::ethereum()
}
}
pub fn bypass_prevrandao(&self, chain_id: u64) -> bool {
if let Ok(
Moonbeam | Moonbase | Moonriver | MoonbeamDev | Rsk | RskTestnet | Gnosis | Chiado,
) = NamedChain::try_from(chain_id)
{
return true;
}
self.bypass_prevrandao
}
pub fn is_celo(&self) -> bool {
self.celo
}
pub fn with_chain_id(mut self, chain_id: u64) -> Self {
if let Ok(NamedChain::Celo | NamedChain::CeloSepolia) = NamedChain::try_from(chain_id) {
self.celo = true;
}
self
}
/// Inject precompiles for configured networks.
pub fn inject_precompiles(self, precompiles: &mut PrecompilesMap) {
if self.celo {
precompiles.apply_precompile(&CELO_TRANSFER_ADDRESS, move |_| {
Some(celo::transfer::precompile())
});
}
}
/// Returns precompiles label for configured networks, to be used in traces.
pub fn precompiles_label(self) -> AddressHashMap<String> {
let mut labels = AddressHashMap::default();
if self.celo {
labels.insert(CELO_TRANSFER_ADDRESS, CELO_TRANSFER_LABEL.to_string());
}
labels
}
/// Returns precompiles for configured networks.
pub fn precompiles(self) -> BTreeMap<String, Address> {
let mut precompiles = BTreeMap::new();
if self.celo {
precompiles
.insert(PRECOMPILE_ID_CELO_TRANSFER.name().to_string(), CELO_TRANSFER_ADDRESS);
}
precompiles
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/networks/src/celo/transfer.rs | crates/evm/networks/src/celo/transfer.rs | //! Celo precompile implementation for token transfers.
//!
//! This module implements the Celo transfer precompile that enables native token transfers from an
//! EVM contract. The precompile is part of Celo's token duality system, allowing transfer of
//! native tokens via ERC20.
//!
//! For more details, see: <https://specs.celo.org/token_duality.html#the-transfer-precompile>
//!
//! The transfer precompile is deployed at address 0xfd and accepts 96 bytes of input:
//! - from address (32 bytes, left-padded)
//! - to address (32 bytes, left-padded)
//! - value (32 bytes, big-endian U256)
use std::borrow::Cow;
use alloy_evm::precompiles::{DynPrecompile, PrecompileInput};
use alloy_primitives::{Address, U256, address};
use revm::precompile::{PrecompileError, PrecompileId, PrecompileOutput, PrecompileResult};
/// Label of the Celo transfer precompile to display in traces.
pub const CELO_TRANSFER_LABEL: &str = "CELO_TRANSFER_PRECOMPILE";
/// Address of the Celo transfer precompile.
pub const CELO_TRANSFER_ADDRESS: Address = address!("0x00000000000000000000000000000000000000fd");
/// ID for the [Celo transfer precompile](CELO_TRANSFER_ADDRESS).
pub static PRECOMPILE_ID_CELO_TRANSFER: PrecompileId =
PrecompileId::Custom(Cow::Borrowed("celo transfer"));
/// Gas cost for Celo transfer precompile.
const CELO_TRANSFER_GAS_COST: u64 = 9000;
/// Returns the Celo native transfer.
pub fn precompile() -> DynPrecompile {
DynPrecompile::new_stateful(PRECOMPILE_ID_CELO_TRANSFER.clone(), celo_transfer_precompile)
}
/// Celo transfer precompile implementation.
///
/// Uses load_account to modify balances directly, making it compatible with PrecompilesMap.
pub fn celo_transfer_precompile(mut input: PrecompileInput<'_>) -> PrecompileResult {
// Check minimum gas requirement
if input.gas < CELO_TRANSFER_GAS_COST {
return Err(PrecompileError::OutOfGas);
}
// Validate input length (must be exactly 96 bytes: 32 + 32 + 32)
if input.data.len() != 96 {
return Err(PrecompileError::Other(
format!(
"Invalid input length for Celo transfer precompile: expected 96 bytes, got {}",
input.data.len()
)
.into(),
));
}
// Parse input: from (bytes 12-32), to (bytes 44-64), value (bytes 64-96)
let from_bytes = &input.data[12..32];
let to_bytes = &input.data[44..64];
let value_bytes = &input.data[64..96];
let from_address = Address::from_slice(from_bytes);
let to_address = Address::from_slice(to_bytes);
let value = U256::from_be_slice(value_bytes);
// Perform the transfer using load_account to modify balances directly
let internals = input.internals_mut();
// Load and check the from account balance first
let from_account = match internals.load_account(from_address) {
Ok(account) => account,
Err(e) => {
return Err(PrecompileError::Other(
format!("Failed to load from account: {e:?}").into(),
));
}
};
// Check if from account has sufficient balance
if from_account.data.info.balance < value {
return Err(PrecompileError::Other("Insufficient balance".into()));
}
let to_account = match internals.load_account(to_address) {
Ok(account) => account,
Err(e) => {
return Err(PrecompileError::Other(format!("Failed to load to account: {e:?}").into()));
}
};
// Check for overflow in to account
if to_account.data.info.balance.checked_add(value).is_none() {
return Err(PrecompileError::Other("Balance overflow in to account".into()));
}
// Transfer the value between accounts
internals
.transfer(from_address, to_address, value)
.map_err(|e| PrecompileError::Other(format!("Failed to perform transfer: {e:?}").into()))?;
// No output data for successful transfer
Ok(PrecompileOutput::new(CELO_TRANSFER_GAS_COST, alloy_primitives::Bytes::new()))
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/networks/src/celo/mod.rs | crates/evm/networks/src/celo/mod.rs | pub mod transfer;
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/lib.rs | crates/evm/traces/src/lib.rs | //! # foundry-evm-traces
//!
//! EVM trace identifying and decoding.
#![cfg_attr(not(test), warn(unused_crate_dependencies))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate foundry_common;
#[macro_use]
extern crate tracing;
use foundry_common::{
contracts::{ContractsByAddress, ContractsByArtifact},
shell,
};
use revm::bytecode::opcode::OpCode;
use revm_inspectors::tracing::{
OpcodeFilter,
types::{DecodedTraceStep, TraceMemberOrder},
};
use serde::{Deserialize, Serialize};
use std::{
borrow::Cow,
collections::BTreeSet,
ops::{Deref, DerefMut},
};
use alloy_primitives::map::HashMap;
pub use revm_inspectors::tracing::{
CallTraceArena, FourByteInspector, GethTraceBuilder, ParityTraceBuilder, StackSnapshotType,
TraceWriter, TracingInspector, TracingInspectorConfig,
types::{
CallKind, CallLog, CallTrace, CallTraceNode, DecodedCallData, DecodedCallLog,
DecodedCallTrace,
},
};
/// Call trace address identifiers.
///
/// Identifiers figure out what ABIs and labels belong to all the addresses of the trace.
pub mod identifier;
use identifier::LocalTraceIdentifier;
mod decoder;
pub use decoder::{CallTraceDecoder, CallTraceDecoderBuilder};
pub mod debug;
pub use debug::DebugTraceIdentifier;
pub mod folded_stack_trace;
pub mod backtrace;
pub type Traces = Vec<(TraceKind, SparsedTraceArena)>;
/// Trace arena keeping track of ignored trace items.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SparsedTraceArena {
/// Full trace arena.
#[serde(flatten)]
pub arena: CallTraceArena,
/// Ranges of trace steps to ignore in format (start_node, start_step) -> (end_node, end_step).
/// See `foundry_cheatcodes::utils::IgnoredTraces` for more information.
#[serde(default, skip_serializing_if = "HashMap::is_empty")]
pub ignored: HashMap<(usize, usize), (usize, usize)>,
}
impl SparsedTraceArena {
/// Goes over entire trace arena and removes ignored trace items.
fn resolve_arena(&self) -> Cow<'_, CallTraceArena> {
if self.ignored.is_empty() {
Cow::Borrowed(&self.arena)
} else {
let mut arena = self.arena.clone();
fn clear_node(
nodes: &mut [CallTraceNode],
node_idx: usize,
ignored: &HashMap<(usize, usize), (usize, usize)>,
cur_ignore_end: &mut Option<(usize, usize)>,
) {
// Prepend an additional None item to the ordering to handle the beginning of the
// trace.
let items = std::iter::once(None)
.chain(nodes[node_idx].ordering.clone().into_iter().map(Some))
.enumerate();
let mut internal_calls = Vec::new();
let mut items_to_remove = BTreeSet::new();
for (item_idx, item) in items {
if let Some(end_node) = ignored.get(&(node_idx, item_idx)) {
*cur_ignore_end = Some(*end_node);
}
let mut remove = cur_ignore_end.is_some() & item.is_some();
match item {
// we only remove calls if they did not start/pause tracing
Some(TraceMemberOrder::Call(child_idx)) => {
clear_node(
nodes,
nodes[node_idx].children[child_idx],
ignored,
cur_ignore_end,
);
remove &= cur_ignore_end.is_some();
}
// we only remove decoded internal calls if they did not start/pause tracing
Some(TraceMemberOrder::Step(step_idx)) => {
// If this is an internal call beginning, track it in `internal_calls`
if let Some(decoded) = &nodes[node_idx].trace.steps[step_idx].decoded
&& let DecodedTraceStep::InternalCall(_, end_step_idx) = &**decoded
{
internal_calls.push((item_idx, remove, *end_step_idx));
// we decide if we should remove it later
remove = false;
}
// Handle ends of internal calls
internal_calls.retain(|(start_item_idx, remove_start, end_idx)| {
if *end_idx != step_idx {
return true;
}
// only remove start if end should be removed as well
if *remove_start && remove {
items_to_remove.insert(*start_item_idx);
} else {
remove = false;
}
false
});
}
_ => {}
}
if remove {
items_to_remove.insert(item_idx);
}
if let Some((end_node, end_step_idx)) = cur_ignore_end
&& node_idx == *end_node
&& item_idx == *end_step_idx
{
*cur_ignore_end = None;
}
}
for (offset, item_idx) in items_to_remove.into_iter().enumerate() {
nodes[node_idx].ordering.remove(item_idx - offset - 1);
}
}
clear_node(arena.nodes_mut(), 0, &self.ignored, &mut None);
Cow::Owned(arena)
}
}
}
impl Deref for SparsedTraceArena {
type Target = CallTraceArena;
fn deref(&self) -> &Self::Target {
&self.arena
}
}
impl DerefMut for SparsedTraceArena {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.arena
}
}
/// Decode a collection of call traces.
///
/// The traces will be decoded using the given decoder, if possible.
pub async fn decode_trace_arena(arena: &mut CallTraceArena, decoder: &CallTraceDecoder) {
decoder.prefetch_signatures(arena.nodes()).await;
decoder.populate_traces(arena.nodes_mut()).await;
}
/// Render a collection of call traces to a string.
pub fn render_trace_arena(arena: &SparsedTraceArena) -> String {
render_trace_arena_inner(arena, false, false)
}
/// Prunes trace depth if depth is provided as an argument
pub fn prune_trace_depth(arena: &mut CallTraceArena, depth: usize) {
for node in arena.nodes_mut() {
if node.trace.depth >= depth {
node.ordering.clear();
}
}
}
/// Render a collection of call traces to a string optionally including contract creation bytecodes
/// and in JSON format.
pub fn render_trace_arena_inner(
arena: &SparsedTraceArena,
with_bytecodes: bool,
with_storage_changes: bool,
) -> String {
if shell::is_json() {
return serde_json::to_string(&arena.resolve_arena()).expect("Failed to write traces");
}
let mut w = TraceWriter::new(Vec::<u8>::new())
.color_cheatcodes(true)
.use_colors(convert_color_choice(shell::color_choice()))
.write_bytecodes(with_bytecodes)
.with_storage_changes(with_storage_changes);
w.write_arena(&arena.resolve_arena()).expect("Failed to write traces");
String::from_utf8(w.into_writer()).expect("trace writer wrote invalid UTF-8")
}
fn convert_color_choice(choice: shell::ColorChoice) -> revm_inspectors::ColorChoice {
match choice {
shell::ColorChoice::Auto => revm_inspectors::ColorChoice::Auto,
shell::ColorChoice::Always => revm_inspectors::ColorChoice::Always,
shell::ColorChoice::Never => revm_inspectors::ColorChoice::Never,
}
}
/// Specifies the kind of trace.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum TraceKind {
Deployment,
Setup,
Execution,
}
impl TraceKind {
/// Returns `true` if the trace kind is [`Deployment`].
///
/// [`Deployment`]: TraceKind::Deployment
#[must_use]
pub fn is_deployment(self) -> bool {
matches!(self, Self::Deployment)
}
/// Returns `true` if the trace kind is [`Setup`].
///
/// [`Setup`]: TraceKind::Setup
#[must_use]
pub fn is_setup(self) -> bool {
matches!(self, Self::Setup)
}
/// Returns `true` if the trace kind is [`Execution`].
///
/// [`Execution`]: TraceKind::Execution
#[must_use]
pub fn is_execution(self) -> bool {
matches!(self, Self::Execution)
}
}
/// Given a list of traces and artifacts, it returns a map connecting address to abi
pub fn load_contracts<'a>(
traces: impl IntoIterator<Item = &'a CallTraceArena>,
known_contracts: &ContractsByArtifact,
) -> ContractsByAddress {
let mut local_identifier = LocalTraceIdentifier::new(known_contracts);
let decoder = CallTraceDecoder::new();
let mut contracts = ContractsByAddress::new();
for trace in traces {
for address in decoder.identify_addresses(trace, &mut local_identifier) {
if let (Some(contract), Some(abi)) = (address.contract, address.abi) {
contracts.insert(address.address, (contract, abi.into_owned()));
}
}
}
contracts
}
/// Different kinds of internal functions tracing.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
pub enum InternalTraceMode {
#[default]
None,
/// Traces internal functions without decoding inputs/outputs from memory.
Simple,
/// Same as `Simple`, but also tracks memory snapshots.
Full,
}
impl From<InternalTraceMode> for TraceMode {
fn from(mode: InternalTraceMode) -> Self {
match mode {
InternalTraceMode::None => Self::None,
InternalTraceMode::Simple => Self::JumpSimple,
InternalTraceMode::Full => Self::Jump,
}
}
}
// Different kinds of traces used by different foundry components.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
pub enum TraceMode {
/// Disabled tracing.
#[default]
None,
/// Simple call trace, no steps tracing required.
Call,
/// Call trace with steps tracing for JUMP and JUMPDEST opcodes.
///
/// Does not enable tracking memory or stack snapshots.
Steps,
/// Call trace with tracing for JUMP and JUMPDEST opcode steps.
///
/// Used for internal functions identification. Does not track memory snapshots.
JumpSimple,
/// Call trace with tracing for JUMP and JUMPDEST opcode steps.
///
/// Same as `JumpSimple`, but tracks memory snapshots as well.
Jump,
/// Call trace with complete steps tracing.
///
/// Used by debugger.
Debug,
/// Debug trace with storage changes.
RecordStateDiff,
}
impl TraceMode {
pub const fn is_none(self) -> bool {
matches!(self, Self::None)
}
pub const fn is_call(self) -> bool {
matches!(self, Self::Call)
}
pub const fn is_steps(self) -> bool {
matches!(self, Self::Steps)
}
pub const fn is_jump_simple(self) -> bool {
matches!(self, Self::JumpSimple)
}
pub const fn is_jump(self) -> bool {
matches!(self, Self::Jump)
}
pub const fn record_state_diff(self) -> bool {
matches!(self, Self::RecordStateDiff)
}
pub const fn is_debug(self) -> bool {
matches!(self, Self::Debug)
}
pub fn with_debug(self, yes: bool) -> Self {
if yes { std::cmp::max(self, Self::Debug) } else { self }
}
pub fn with_decode_internal(self, mode: InternalTraceMode) -> Self {
std::cmp::max(self, mode.into())
}
pub fn with_state_changes(self, yes: bool) -> Self {
if yes { std::cmp::max(self, Self::RecordStateDiff) } else { self }
}
pub fn with_verbosity(self, verbosity: u8) -> Self {
match verbosity {
0..3 => self,
3..=4 => std::cmp::max(self, Self::Call),
// Enable step recording for backtraces when verbosity is 5 or higher.
// We need to ensure we're recording JUMP AND JUMPDEST steps.
_ => std::cmp::min(self, Self::Steps),
}
}
pub fn into_config(self) -> Option<TracingInspectorConfig> {
if self.is_none() {
None
} else {
TracingInspectorConfig {
record_steps: self >= Self::Steps,
record_memory_snapshots: self >= Self::Jump,
record_stack_snapshots: if self > Self::Steps {
StackSnapshotType::Full
} else {
StackSnapshotType::None
},
record_logs: true,
record_state_diff: self.record_state_diff(),
record_returndata_snapshots: self.is_debug(),
record_opcodes_filter: (self.is_steps() || self.is_jump() || self.is_jump_simple())
.then(|| OpcodeFilter::new().enabled(OpCode::JUMP).enabled(OpCode::JUMPDEST)),
exclude_precompile_calls: false,
record_immediate_bytes: self.is_debug(),
}
.into()
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/folded_stack_trace.rs | crates/evm/traces/src/folded_stack_trace.rs | use alloy_primitives::hex::ToHexExt;
use revm_inspectors::tracing::{
CallTraceArena,
types::{CallTraceNode, CallTraceStep, DecodedTraceStep, TraceMemberOrder},
};
/// Builds a folded stack trace from a call trace arena.
pub fn build(arena: &CallTraceArena) -> Vec<String> {
let mut fst = EvmFoldedStackTraceBuilder::default();
fst.process_call_node(arena.nodes(), 0);
fst.build()
}
/// Wrapper for building a folded stack trace using EVM call trace node.
#[derive(Default)]
pub struct EvmFoldedStackTraceBuilder {
/// Raw folded stack trace builder.
fst: FoldedStackTraceBuilder,
}
impl EvmFoldedStackTraceBuilder {
/// Returns the folded stack trace.
pub fn build(self) -> Vec<String> {
self.fst.build()
}
/// Creates an entry for a EVM CALL in the folded stack trace. This method recursively processes
/// all the children nodes of the call node and at the end it exits.
pub fn process_call_node(&mut self, nodes: &[CallTraceNode], idx: usize) {
let node = &nodes[idx];
let func_name = if node.trace.kind.is_any_create() {
let contract_name = node
.trace
.decoded
.as_ref()
.and_then(|dc| dc.label.as_deref())
.unwrap_or("Contract");
format!("new {contract_name}")
} else {
let selector = node
.selector()
.map(|selector| selector.encode_hex_with_prefix())
.unwrap_or_else(|| "fallback".to_string());
let signature = node
.trace
.decoded
.as_ref()
.and_then(|dc| dc.call_data.as_ref())
.map(|dc| &dc.signature)
.unwrap_or(&selector);
if let Some(label) = node.trace.decoded.as_ref().and_then(|dc| dc.label.as_ref()) {
format!("{label}.{signature}")
} else {
signature.clone()
}
};
self.fst.enter(func_name, node.trace.gas_used as i64);
// Track internal function step exits to do in this call context.
let mut step_exits = vec![];
// Process children nodes.
for order in &node.ordering {
match order {
TraceMemberOrder::Call(child_idx) => {
let child_node_idx = node.children[*child_idx];
self.process_call_node(nodes, child_node_idx);
}
TraceMemberOrder::Step(step_idx) => {
self.exit_previous_steps(&mut step_exits, *step_idx);
self.process_step(&node.trace.steps, *step_idx, &mut step_exits)
}
TraceMemberOrder::Log(_) => {}
}
}
// Exit pending internal function calls if any.
for _ in 0..step_exits.len() {
self.fst.exit();
}
// Exit from this call context in the folded stack trace.
self.fst.exit();
}
/// Creates an entry for an internal function call in the folded stack trace. This method only
/// enters the function in the folded stack trace, we cannot exit since we need to exit at a
/// future step. Hence, we keep track of the step end index in the `step_exits`.
fn process_step(
&mut self,
steps: &[CallTraceStep],
step_idx: usize,
step_exits: &mut Vec<usize>,
) {
let step = &steps[step_idx];
if let Some(decoded_step) = &step.decoded {
match decoded_step.as_ref() {
DecodedTraceStep::InternalCall(decoded_internal_call, step_end_idx) => {
let gas_used = steps[*step_end_idx].gas_used.saturating_sub(step.gas_used);
self.fst.enter(decoded_internal_call.func_name.clone(), gas_used as i64);
step_exits.push(*step_end_idx);
}
DecodedTraceStep::Line(_) => {}
}
}
}
/// Exits all the previous internal calls that should end before starting step_idx.
fn exit_previous_steps(&mut self, step_exits: &mut Vec<usize>, step_idx: usize) {
let initial_length = step_exits.len();
step_exits.retain(|&number| number > step_idx);
let num_exits = initial_length - step_exits.len();
for _ in 0..num_exits {
self.fst.exit();
}
}
}
/// Helps to translate a function enter-exit flow into a folded stack trace.
///
/// Example:
/// ```solidity
/// function top() { child_a(); child_b() } // consumes 500 gas
/// function child_a() {} // consumes 100 gas
/// function child_b() {} // consumes 200 gas
/// ```
///
/// For execution of the `top` function looks like:
/// 1. enter `top`
/// 2. enter `child_a`
/// 3. exit `child_a`
/// 4. enter `child_b`
/// 5. exit `child_b`
/// 6. exit `top`
///
/// The translated folded stack trace lines look like:
/// 1. top
/// 2. top;child_a
/// 3. top;child_b
///
/// Including the gas consumed by the function by itself.
/// 1. top 200 // 500 - 100 - 200
/// 2. top;child_a 100
/// 3. top;child_b 200
#[derive(Debug, Default)]
pub struct FoldedStackTraceBuilder {
/// Trace entries.
traces: Vec<TraceEntry>,
/// Number of exits to be done before entering a new function.
exits: usize,
}
#[derive(Debug, Default)]
struct TraceEntry {
/// Names of all functions in the call stack of this trace.
names: Vec<String>,
/// Gas consumed by this function, allowed to be negative due to refunds.
gas: i64,
}
impl FoldedStackTraceBuilder {
/// Enter execution of a function call that consumes `gas`.
pub fn enter(&mut self, label: String, gas: i64) {
let mut names = self.traces.last().map(|entry| entry.names.clone()).unwrap_or_default();
while self.exits > 0 {
names.pop();
self.exits -= 1;
}
names.push(label);
self.traces.push(TraceEntry { names, gas });
}
/// Exit execution of a function call.
pub fn exit(&mut self) {
self.exits += 1;
}
/// Returns folded stack trace.
pub fn build(mut self) -> Vec<String> {
self.subtract_children();
self.build_without_subtraction()
}
/// Internal method to build the folded stack trace without subtracting gas consumed by
/// the children function calls.
fn build_without_subtraction(&mut self) -> Vec<String> {
let mut lines = Vec::new();
for TraceEntry { names, gas } in &self.traces {
lines.push(format!("{} {}", names.join(";"), gas));
}
lines
}
/// Subtracts gas consumed by the children function calls from the parent function calls.
fn subtract_children(&mut self) {
// Iterate over each trace to find the children and subtract their values from the parents.
for i in 0..self.traces.len() {
let (left, right) = self.traces.split_at_mut(i);
let TraceEntry { names, gas } = &right[0];
if names.len() > 1 {
let parent_trace_to_match = &names[..names.len() - 1];
for parent in left.iter_mut().rev() {
if parent.names == parent_trace_to_match {
parent.gas -= gas;
break;
}
}
}
}
}
}
mod tests {
#[test]
fn test_fst_1() {
let mut trace = super::FoldedStackTraceBuilder::default();
trace.enter("top".to_string(), 500);
trace.enter("child_a".to_string(), 100);
trace.exit();
trace.enter("child_b".to_string(), 200);
assert_eq!(
trace.build_without_subtraction(),
vec![
"top 500", //
"top;child_a 100",
"top;child_b 200",
]
);
assert_eq!(
trace.build(),
vec![
"top 200", // 500 - 100 - 200
"top;child_a 100",
"top;child_b 200",
]
);
}
#[test]
fn test_fst_2() {
let mut trace = super::FoldedStackTraceBuilder::default();
trace.enter("top".to_string(), 500);
trace.enter("child_a".to_string(), 300);
trace.enter("child_b".to_string(), 100);
trace.exit();
trace.exit();
trace.enter("child_c".to_string(), 100);
assert_eq!(
trace.build_without_subtraction(),
vec![
"top 500", //
"top;child_a 300",
"top;child_a;child_b 100",
"top;child_c 100",
]
);
assert_eq!(
trace.build(),
vec![
"top 100", // 500 - 300 - 100
"top;child_a 200", // 300 - 100
"top;child_a;child_b 100",
"top;child_c 100",
]
);
}
#[test]
fn test_fst_3() {
let mut trace = super::FoldedStackTraceBuilder::default();
trace.enter("top".to_string(), 1700);
trace.enter("child_a".to_string(), 500);
trace.exit();
trace.enter("child_b".to_string(), 500);
trace.enter("child_c".to_string(), 500);
trace.exit();
trace.exit();
trace.exit();
trace.enter("top2".to_string(), 1700);
assert_eq!(
trace.build_without_subtraction(),
vec![
"top 1700", //
"top;child_a 500",
"top;child_b 500",
"top;child_b;child_c 500",
"top2 1700",
]
);
assert_eq!(
trace.build(),
vec![
"top 700", //
"top;child_a 500",
"top;child_b 0",
"top;child_b;child_c 500",
"top2 1700",
]
);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/identifier/external.rs | crates/evm/traces/src/identifier/external.rs | use super::{IdentifiedAddress, TraceIdentifier};
use crate::debug::ContractSources;
use alloy_primitives::{
Address,
map::{Entry, HashMap},
};
use eyre::WrapErr;
use foundry_block_explorers::{contract::Metadata, errors::EtherscanError};
use foundry_common::compile::etherscan_project;
use foundry_config::{Chain, Config};
use futures::{
future::join_all,
stream::{FuturesUnordered, Stream, StreamExt},
task::{Context, Poll},
};
use revm_inspectors::tracing::types::CallTraceNode;
use serde::Deserialize;
use std::{
borrow::Cow,
pin::Pin,
sync::{
Arc,
atomic::{AtomicBool, Ordering},
},
};
use tokio::time::{Duration, Interval};
/// A trace identifier that tries to identify addresses using Etherscan.
pub struct ExternalIdentifier {
fetchers: Vec<Arc<dyn ExternalFetcherT>>,
/// Cached contracts.
contracts: HashMap<Address, (FetcherKind, Option<Metadata>)>,
}
impl ExternalIdentifier {
/// Creates a new external identifier with the given client
pub fn new(config: &Config, mut chain: Option<Chain>) -> eyre::Result<Option<Self>> {
if config.offline {
return Ok(None);
}
let config = match config.get_etherscan_config_with_chain(chain) {
Ok(Some(config)) => {
chain = config.chain;
Some(config)
}
Ok(None) => {
warn!(target: "evm::traces::external", "etherscan config not found");
None
}
Err(err) => {
warn!(target: "evm::traces::external", ?err, "failed to get etherscan config");
None
}
};
let mut fetchers = Vec::<Arc<dyn ExternalFetcherT>>::new();
if let Some(chain) = chain {
debug!(target: "evm::traces::external", ?chain, "using sourcify identifier");
fetchers.push(Arc::new(SourcifyFetcher::new(chain)));
}
if let Some(config) = config {
debug!(target: "evm::traces::external", chain=?config.chain, url=?config.api_url, "using etherscan identifier");
fetchers.push(Arc::new(EtherscanFetcher::new(config.into_client()?)));
}
if fetchers.is_empty() {
debug!(target: "evm::traces::external", "no fetchers enabled");
return Ok(None);
}
Ok(Some(Self { fetchers, contracts: Default::default() }))
}
/// Goes over the list of contracts we have pulled from the traces, clones their source from
/// Etherscan and compiles them locally, for usage in the debugger.
pub async fn get_compiled_contracts(&self) -> eyre::Result<ContractSources> {
// Collect contract info upfront so we can reference it in error messages
let contracts_info: Vec<_> = self
.contracts
.iter()
// filter out vyper files and contracts without metadata
.filter_map(|(addr, (_, metadata))| {
if let Some(metadata) = metadata.as_ref()
&& !metadata.is_vyper()
{
Some((*addr, metadata))
} else {
None
}
})
.collect();
let outputs_fut = contracts_info
.iter()
.map(|(addr, metadata)| async move {
sh_println!("Compiling: {} {addr}", metadata.contract_name)?;
let root = tempfile::tempdir()?;
let root_path = root.path();
let project = etherscan_project(metadata, root_path)?;
let output = project.compile()?;
if output.has_compiler_errors() {
eyre::bail!("{output}")
}
Ok((project, output, root))
})
.collect::<Vec<_>>();
// poll all the futures concurrently
let outputs = join_all(outputs_fut).await;
let mut sources: ContractSources = Default::default();
// construct the map
for (idx, res) in outputs.into_iter().enumerate() {
let (addr, metadata) = &contracts_info[idx];
let name = &metadata.contract_name;
let (project, output, _) =
res.wrap_err_with(|| format!("Failed to compile contract {name} at {addr}"))?;
sources
.insert(&output, project.root(), None)
.wrap_err_with(|| format!("Failed to insert contract {name} at {addr}"))?;
}
Ok(sources)
}
fn identify_from_metadata(
&self,
address: Address,
metadata: &Metadata,
) -> IdentifiedAddress<'static> {
let label = metadata.contract_name.clone();
let abi = metadata.abi().ok().map(Cow::Owned);
IdentifiedAddress {
address,
label: Some(label.clone()),
contract: Some(label),
abi,
artifact_id: None,
}
}
}
impl TraceIdentifier for ExternalIdentifier {
fn identify_addresses(&mut self, nodes: &[&CallTraceNode]) -> Vec<IdentifiedAddress<'_>> {
if nodes.is_empty() {
return Vec::new();
}
trace!(target: "evm::traces::external", "identify {} addresses", nodes.len());
let mut identities = Vec::new();
let mut to_fetch = Vec::new();
// Check cache first.
for &node in nodes {
let address = node.trace.address;
if let Some((_, metadata)) = self.contracts.get(&address) {
if let Some(metadata) = metadata {
identities.push(self.identify_from_metadata(address, metadata));
} else {
// Do nothing. We know that this contract was not verified.
}
} else {
to_fetch.push(address);
}
}
if to_fetch.is_empty() {
return identities;
}
trace!(target: "evm::traces::external", "fetching {} addresses", to_fetch.len());
let fetchers =
self.fetchers.iter().map(|fetcher| ExternalFetcher::new(fetcher.clone(), &to_fetch));
let fetched_identities = foundry_common::block_on(
futures::stream::select_all(fetchers)
.filter_map(|(address, value)| {
let addr = value
.1
.as_ref()
.map(|metadata| self.identify_from_metadata(address, metadata));
match self.contracts.entry(address) {
Entry::Occupied(mut occupied_entry) => {
// Override if:
// - new is from Etherscan and old is not
// - new is Some and old is None, meaning verified only in one source
if !matches!(occupied_entry.get().0, FetcherKind::Etherscan)
|| value.1.is_none()
{
occupied_entry.insert(value);
}
}
Entry::Vacant(vacant_entry) => {
vacant_entry.insert(value);
}
}
async move { addr }
})
.collect::<Vec<IdentifiedAddress<'_>>>(),
);
trace!(target: "evm::traces::external", "fetched {} addresses: {fetched_identities:#?}", fetched_identities.len());
identities.extend(fetched_identities);
identities
}
}
type FetchFuture =
Pin<Box<dyn Future<Output = (Address, Result<Option<Metadata>, EtherscanError>)>>>;
/// A rate limit aware fetcher.
///
/// Fetches information about multiple addresses concurrently, while respecting rate limits.
struct ExternalFetcher {
/// The fetcher
fetcher: Arc<dyn ExternalFetcherT>,
/// The time we wait if we hit the rate limit
timeout: Duration,
/// The interval we are currently waiting for before making a new request
backoff: Option<Interval>,
/// The maximum amount of requests to send concurrently
concurrency: usize,
/// The addresses we have yet to make requests for
queue: Vec<Address>,
/// The in progress requests
in_progress: FuturesUnordered<FetchFuture>,
}
impl ExternalFetcher {
fn new(fetcher: Arc<dyn ExternalFetcherT>, to_fetch: &[Address]) -> Self {
Self {
timeout: fetcher.timeout(),
backoff: None,
concurrency: fetcher.concurrency(),
fetcher,
queue: to_fetch.to_vec(),
in_progress: FuturesUnordered::new(),
}
}
fn queue_next_reqs(&mut self) {
while self.in_progress.len() < self.concurrency {
let Some(addr) = self.queue.pop() else { break };
let fetcher = Arc::clone(&self.fetcher);
self.in_progress.push(Box::pin(async move {
trace!(target: "evm::traces::external", ?addr, "fetching info");
let res = fetcher.fetch(addr).await;
(addr, res)
}));
}
}
}
impl Stream for ExternalFetcher {
type Item = (Address, (FetcherKind, Option<Metadata>));
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let pin = self.get_mut();
let _guard =
info_span!("evm::traces::external", kind=?pin.fetcher.kind(), "ExternalFetcher")
.entered();
if pin.fetcher.invalid_api_key().load(Ordering::Relaxed) {
return Poll::Ready(None);
}
loop {
if let Some(mut backoff) = pin.backoff.take()
&& backoff.poll_tick(cx).is_pending()
{
pin.backoff = Some(backoff);
return Poll::Pending;
}
pin.queue_next_reqs();
let mut made_progress_this_iter = false;
match pin.in_progress.poll_next_unpin(cx) {
Poll::Pending => {}
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some((addr, res))) => {
made_progress_this_iter = true;
match res {
Ok(metadata) => {
return Poll::Ready(Some((addr, (pin.fetcher.kind(), metadata))));
}
Err(EtherscanError::ContractCodeNotVerified(_)) => {
return Poll::Ready(Some((addr, (pin.fetcher.kind(), None))));
}
Err(EtherscanError::RateLimitExceeded) => {
warn!(target: "evm::traces::external", "rate limit exceeded on attempt");
pin.backoff = Some(tokio::time::interval(pin.timeout));
pin.queue.push(addr);
}
Err(EtherscanError::InvalidApiKey) => {
warn!(target: "evm::traces::external", "invalid api key");
// mark key as invalid
pin.fetcher.invalid_api_key().store(true, Ordering::Relaxed);
return Poll::Ready(None);
}
Err(EtherscanError::BlockedByCloudflare) => {
warn!(target: "evm::traces::external", "blocked by cloudflare");
// mark key as invalid
pin.fetcher.invalid_api_key().store(true, Ordering::Relaxed);
return Poll::Ready(None);
}
Err(err) => {
warn!(target: "evm::traces::external", ?err, "could not get info");
}
}
}
}
if !made_progress_this_iter {
return Poll::Pending;
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum FetcherKind {
Etherscan,
Sourcify,
}
#[async_trait::async_trait]
trait ExternalFetcherT: Send + Sync {
fn kind(&self) -> FetcherKind;
fn timeout(&self) -> Duration;
fn concurrency(&self) -> usize;
fn invalid_api_key(&self) -> &AtomicBool;
async fn fetch(&self, address: Address) -> Result<Option<Metadata>, EtherscanError>;
}
struct EtherscanFetcher {
client: foundry_block_explorers::Client,
invalid_api_key: AtomicBool,
}
impl EtherscanFetcher {
fn new(client: foundry_block_explorers::Client) -> Self {
Self { client, invalid_api_key: AtomicBool::new(false) }
}
}
#[async_trait::async_trait]
impl ExternalFetcherT for EtherscanFetcher {
fn kind(&self) -> FetcherKind {
FetcherKind::Etherscan
}
fn timeout(&self) -> Duration {
Duration::from_secs(1)
}
fn concurrency(&self) -> usize {
5
}
fn invalid_api_key(&self) -> &AtomicBool {
&self.invalid_api_key
}
async fn fetch(&self, address: Address) -> Result<Option<Metadata>, EtherscanError> {
self.client.contract_source_code(address).await.map(|mut metadata| metadata.items.pop())
}
}
struct SourcifyFetcher {
client: reqwest::Client,
url: String,
invalid_api_key: AtomicBool,
}
impl SourcifyFetcher {
fn new(chain: Chain) -> Self {
Self {
client: reqwest::Client::new(),
url: format!("https://sourcify.dev/server/v2/contract/{}", chain.id()),
invalid_api_key: AtomicBool::new(false),
}
}
}
#[async_trait::async_trait]
impl ExternalFetcherT for SourcifyFetcher {
fn kind(&self) -> FetcherKind {
FetcherKind::Sourcify
}
fn timeout(&self) -> Duration {
Duration::from_secs(1)
}
fn concurrency(&self) -> usize {
5
}
fn invalid_api_key(&self) -> &AtomicBool {
&self.invalid_api_key
}
async fn fetch(&self, address: Address) -> Result<Option<Metadata>, EtherscanError> {
let url = format!("{url}/{address}?fields=abi,compilation", url = self.url);
let response = self.client.get(url).send().await?;
let code = response.status();
let response: SourcifyResponse = response.json().await?;
trace!(target: "evm::traces::external", "Sourcify response for {address}: {response:#?}");
match code.as_u16() {
// Not verified.
404 => return Err(EtherscanError::ContractCodeNotVerified(address)),
// Too many requests.
429 => return Err(EtherscanError::RateLimitExceeded),
_ => {}
}
match response {
SourcifyResponse::Success(metadata) => Ok(Some(metadata.into())),
SourcifyResponse::Error(error) => Err(EtherscanError::Unknown(format!("{error:#?}"))),
}
}
}
/// Sourcify API response for `/v2/contract/{chainId}/{address}`.
#[derive(Debug, Clone, Deserialize)]
#[serde(untagged)]
enum SourcifyResponse {
Success(SourcifyMetadata),
Error(SourcifyError),
}
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
#[expect(dead_code)] // Used in Debug.
struct SourcifyError {
custom_code: String,
message: String,
error_id: String,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
struct SourcifyMetadata {
#[serde(default)]
abi: Option<Box<serde_json::value::RawValue>>,
#[serde(default)]
compilation: Option<Compilation>,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
struct Compilation {
#[serde(default)]
compiler_version: String,
#[serde(default)]
name: String,
}
impl From<SourcifyMetadata> for Metadata {
fn from(metadata: SourcifyMetadata) -> Self {
let SourcifyMetadata { abi, compilation } = metadata;
let (contract_name, compiler_version) = compilation
.map(|c| (c.name, c.compiler_version))
.unwrap_or_else(|| (String::new(), String::new()));
// Defaulted fields may be fetched from sourcify but we don't make use of them.
Self {
source_code: foundry_block_explorers::contract::SourceCodeMetadata::Sources(
Default::default(),
),
abi: Box::<str>::from(abi.unwrap_or_default()).into(),
contract_name,
compiler_version,
optimization_used: 0,
runs: 0,
constructor_arguments: Default::default(),
evm_version: String::new(),
library: String::new(),
license_type: String::new(),
proxy: 0,
implementation: None,
swarm_source: String::new(),
}
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/identifier/local.rs | crates/evm/traces/src/identifier/local.rs | use super::{IdentifiedAddress, TraceIdentifier};
use alloy_dyn_abi::JsonAbiExt;
use alloy_json_abi::JsonAbi;
use alloy_primitives::{Address, Bytes, map::HashMap};
use foundry_common::contracts::{ContractsByArtifact, bytecode_diff_score};
use foundry_compilers::ArtifactId;
use revm_inspectors::tracing::types::CallTraceNode;
use std::borrow::Cow;
/// A trace identifier that tries to identify addresses using local contracts.
pub struct LocalTraceIdentifier<'a> {
/// Known contracts to search through.
known_contracts: &'a ContractsByArtifact,
/// Vector of pairs of artifact ID and the runtime code length of the given artifact.
ordered_ids: Vec<(&'a ArtifactId, usize)>,
/// The contracts bytecode.
contracts_bytecode: Option<&'a HashMap<Address, Bytes>>,
}
impl<'a> LocalTraceIdentifier<'a> {
/// Creates a new local trace identifier.
pub fn new(known_contracts: &'a ContractsByArtifact) -> Self {
let mut ordered_ids = known_contracts
.iter()
.filter_map(|(id, contract)| Some((id, contract.deployed_bytecode()?)))
.map(|(id, bytecode)| (id, bytecode.len()))
.collect::<Vec<_>>();
ordered_ids.sort_by_key(|(_, len)| *len);
Self { known_contracts, ordered_ids, contracts_bytecode: None }
}
pub fn with_bytecodes(mut self, contracts_bytecode: &'a HashMap<Address, Bytes>) -> Self {
self.contracts_bytecode = Some(contracts_bytecode);
self
}
/// Returns the known contracts.
#[inline]
pub fn contracts(&self) -> &'a ContractsByArtifact {
self.known_contracts
}
/// Identifies the artifact based on score computed for both creation and deployed bytecodes.
pub fn identify_code(
&self,
runtime_code: &[u8],
creation_code: &[u8],
) -> Option<(&'a ArtifactId, &'a JsonAbi)> {
let len = runtime_code.len();
let mut min_score = f64::MAX;
let mut min_score_id = None;
let mut check = |id, is_creation, min_score: &mut f64| {
let contract = self.known_contracts.get(id)?;
// Select bytecodes to compare based on `is_creation` flag.
let (contract_bytecode, current_bytecode) = if is_creation {
(contract.bytecode_without_placeholders(), creation_code)
} else {
(contract.deployed_bytecode_without_placeholders(), runtime_code)
};
if let Some(bytecode) = contract_bytecode {
let mut current_bytecode = current_bytecode;
if is_creation && current_bytecode.len() > bytecode.len() {
// Try to decode ctor args with contract abi.
if let Some(constructor) = contract.abi.constructor() {
let constructor_args = ¤t_bytecode[bytecode.len()..];
if constructor.abi_decode_input(constructor_args).is_ok() {
// If we can decode args with current abi then remove args from
// code to compare.
current_bytecode = ¤t_bytecode[..bytecode.len()]
}
}
}
let score = bytecode_diff_score(&bytecode, current_bytecode);
if score == 0.0 {
trace!(target: "evm::traces::local", "found exact match");
return Some((id, &contract.abi));
}
if score < *min_score {
*min_score = score;
min_score_id = Some((id, &contract.abi));
}
}
None
};
// Check `[len * 0.9, ..., len * 1.1]`.
let max_len = (len * 11) / 10;
// Start at artifacts with the same code length: `len..len*1.1`.
let same_length_idx = self.find_index(len);
for idx in same_length_idx..self.ordered_ids.len() {
let (id, len) = self.ordered_ids[idx];
if len > max_len {
break;
}
if let found @ Some(_) = check(id, true, &mut min_score) {
return found;
}
}
// Iterate over the remaining artifacts with less code length: `len*0.9..len`.
let min_len = (len * 9) / 10;
let idx = self.find_index(min_len);
for i in idx..same_length_idx {
let (id, _) = self.ordered_ids[i];
if let found @ Some(_) = check(id, true, &mut min_score) {
return found;
}
}
// Fallback to comparing deployed code if min score greater than threshold.
if min_score >= 0.85 {
for (artifact, _) in &self.ordered_ids {
if let found @ Some(_) = check(artifact, false, &mut min_score) {
return found;
}
}
}
trace!(target: "evm::traces::local", %min_score, "no exact match found");
// Note: the diff score can be inaccurate for small contracts so we're using a relatively
// high threshold here to avoid filtering out too many contracts.
if min_score < 0.85 { min_score_id } else { None }
}
/// Returns the index of the artifact with the given code length, or the index of the first
/// artifact with a greater code length if the exact code length is not found.
fn find_index(&self, len: usize) -> usize {
let (Ok(mut idx) | Err(mut idx)) =
self.ordered_ids.binary_search_by_key(&len, |(_, probe)| *probe);
// In case of multiple artifacts with the same code length, we need to find the first one.
while idx > 0 && self.ordered_ids[idx - 1].1 == len {
idx -= 1;
}
idx
}
}
impl TraceIdentifier for LocalTraceIdentifier<'_> {
fn identify_addresses(&mut self, nodes: &[&CallTraceNode]) -> Vec<IdentifiedAddress<'_>> {
if nodes.is_empty() {
return Vec::new();
}
trace!(target: "evm::traces::local", "identify {} addresses", nodes.len());
nodes
.iter()
.map(|&node| {
(
node.trace.address,
node.trace.kind.is_any_create().then_some(&node.trace.output[..]),
node.trace.kind.is_any_create().then_some(&node.trace.data[..]),
)
})
.filter_map(|(address, runtime_code, creation_code)| {
let _span =
trace_span!(target: "evm::traces::local", "identify", %address).entered();
// In order to identify the addresses, we need at least the runtime code. It can be
// obtained from the trace itself (if it's a CREATE* call), or from the fetched
// bytecodes.
let (runtime_code, creation_code) = match (runtime_code, creation_code) {
(Some(runtime_code), Some(creation_code)) => (runtime_code, creation_code),
(Some(runtime_code), _) => (runtime_code, &[] as &[u8]),
_ => {
let code = self.contracts_bytecode?.get(&address)?;
(code.as_ref(), &[] as &[u8])
}
};
let (id, abi) = self.identify_code(runtime_code, creation_code)?;
trace!(target: "evm::traces::local", id=%id.identifier(), "identified");
Some(IdentifiedAddress {
address,
contract: Some(id.identifier()),
label: Some(id.name.clone()),
abi: Some(Cow::Borrowed(abi)),
artifact_id: Some(id.clone()),
})
})
.collect()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/identifier/signatures.rs | crates/evm/traces/src/identifier/signatures.rs | use alloy_json_abi::{Error, Event, Function, JsonAbi};
use alloy_primitives::{B256, Selector, map::HashMap};
use eyre::Result;
use foundry_common::{
abi::{get_error, get_event, get_func},
fs,
selectors::{OpenChainClient, SelectorKind},
};
use foundry_config::Config;
use serde::{Deserialize, Serialize};
use std::{
collections::BTreeMap,
path::{Path, PathBuf},
sync::Arc,
};
use tokio::sync::RwLock;
/// Cache for function, event and error signatures. Used by [`SignaturesIdentifier`].
#[derive(Debug, Default, Deserialize)]
#[serde(try_from = "SignaturesDiskCache")]
pub struct SignaturesCache {
signatures: HashMap<SelectorKind, Option<String>>,
}
/// Disk representation of the signatures cache.
#[derive(Serialize, Deserialize)]
struct SignaturesDiskCache {
functions: BTreeMap<Selector, String>,
errors: BTreeMap<Selector, String>,
events: BTreeMap<B256, String>,
}
impl From<SignaturesDiskCache> for SignaturesCache {
fn from(value: SignaturesDiskCache) -> Self {
let functions = value
.functions
.into_iter()
.map(|(selector, signature)| (SelectorKind::Function(selector), signature));
let errors = value
.errors
.into_iter()
.map(|(selector, signature)| (SelectorKind::Error(selector), signature));
let events = value
.events
.into_iter()
.map(|(selector, signature)| (SelectorKind::Event(selector), signature));
Self {
signatures: functions
.chain(errors)
.chain(events)
.map(|(sel, sig)| (sel, (!sig.is_empty()).then_some(sig)))
.collect(),
}
}
}
impl From<&SignaturesCache> for SignaturesDiskCache {
fn from(value: &SignaturesCache) -> Self {
let (functions, errors, events) = value.signatures.iter().fold(
(BTreeMap::new(), BTreeMap::new(), BTreeMap::new()),
|mut acc, (kind, signature)| {
let value = signature.clone().unwrap_or_default();
match *kind {
SelectorKind::Function(selector) => _ = acc.0.insert(selector, value),
SelectorKind::Error(selector) => _ = acc.1.insert(selector, value),
SelectorKind::Event(selector) => _ = acc.2.insert(selector, value),
}
acc
},
);
Self { functions, errors, events }
}
}
impl Serialize for SignaturesCache {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
SignaturesDiskCache::from(self).serialize(serializer)
}
}
impl SignaturesCache {
/// Loads the cache from a file.
#[instrument(target = "evm::traces", name = "SignaturesCache::load")]
pub fn load(path: &Path) -> Self {
trace!(target: "evm::traces", ?path, "reading signature cache");
fs::read_json_file(path)
.inspect_err(
|err| warn!(target: "evm::traces", ?path, ?err, "failed to read cache file"),
)
.unwrap_or_default()
}
/// Saves the cache to a file.
#[instrument(target = "evm::traces", name = "SignaturesCache::save", skip(self))]
pub fn save(&self, path: &Path) {
if let Some(parent) = path.parent()
&& let Err(err) = std::fs::create_dir_all(parent)
{
warn!(target: "evm::traces", ?parent, %err, "failed to create cache");
}
if let Err(err) = fs::write_json_file(path, self) {
warn!(target: "evm::traces", %err, "failed to flush signature cache");
} else {
trace!(target: "evm::traces", "flushed signature cache")
}
}
/// Updates the cache from an ABI.
pub fn extend_from_abi(&mut self, abi: &JsonAbi) {
self.extend(abi.items().filter_map(|item| match item {
alloy_json_abi::AbiItem::Function(f) => {
Some((SelectorKind::Function(f.selector()), f.signature()))
}
alloy_json_abi::AbiItem::Error(e) => {
Some((SelectorKind::Error(e.selector()), e.signature()))
}
alloy_json_abi::AbiItem::Event(e) => {
Some((SelectorKind::Event(e.selector()), e.full_signature()))
}
_ => None,
}));
}
/// Inserts a single signature into the cache.
pub fn insert(&mut self, key: SelectorKind, value: String) {
self.extend(std::iter::once((key, value)));
}
/// Extends the cache with multiple signatures.
pub fn extend(&mut self, signatures: impl IntoIterator<Item = (SelectorKind, String)>) {
self.signatures
.extend(signatures.into_iter().map(|(k, v)| (k, (!v.is_empty()).then_some(v))));
}
/// Gets a signature from the cache.
pub fn get(&self, key: &SelectorKind) -> Option<Option<String>> {
self.signatures.get(key).cloned()
}
/// Returns true if the cache contains a signature.
pub fn contains_key(&self, key: &SelectorKind) -> bool {
self.signatures.contains_key(key)
}
}
/// An identifier that tries to identify functions and events using signatures found at
/// `https://openchain.xyz` or a local cache.
#[derive(Clone, Debug)]
pub struct SignaturesIdentifier(Arc<SignaturesIdentifierInner>);
#[derive(Debug)]
struct SignaturesIdentifierInner {
/// Cached selectors for functions, events and custom errors.
cache: RwLock<SignaturesCache>,
/// Location where to save the signature cache.
cache_path: Option<PathBuf>,
/// The OpenChain client to fetch signatures from. `None` if disabled on construction.
client: Option<OpenChainClient>,
}
impl SignaturesIdentifier {
/// Creates a new `SignaturesIdentifier` with the default cache directory.
pub fn new(offline: bool) -> Result<Self> {
Self::new_with(Config::foundry_cache_dir().as_deref(), offline)
}
/// Creates a new `SignaturesIdentifier` from the global configuration.
pub fn from_config(config: &Config) -> Result<Self> {
Self::new(config.offline)
}
/// Creates a new `SignaturesIdentifier`.
///
/// - `cache_dir` is the cache directory to store the signatures.
/// - `offline` disables the OpenChain client.
pub fn new_with(cache_dir: Option<&Path>, offline: bool) -> Result<Self> {
let client = if !offline { Some(OpenChainClient::new()?) } else { None };
let (cache, cache_path) = if let Some(cache_dir) = cache_dir {
let path = cache_dir.join("signatures");
let cache = SignaturesCache::load(&path);
(cache, Some(path))
} else {
Default::default()
};
Ok(Self(Arc::new(SignaturesIdentifierInner {
cache: RwLock::new(cache),
cache_path,
client,
})))
}
/// Saves the cache to the file system.
pub fn save(&self) {
self.0.save();
}
/// Identifies `Function`s.
pub async fn identify_functions(
&self,
identifiers: impl IntoIterator<Item = Selector>,
) -> Vec<Option<Function>> {
self.identify_map(identifiers.into_iter().map(SelectorKind::Function), get_func).await
}
/// Identifies a `Function`.
pub async fn identify_function(&self, identifier: Selector) -> Option<Function> {
self.identify_functions([identifier]).await.pop().unwrap()
}
/// Identifies `Event`s.
pub async fn identify_events(
&self,
identifiers: impl IntoIterator<Item = B256>,
) -> Vec<Option<Event>> {
self.identify_map(identifiers.into_iter().map(SelectorKind::Event), get_event).await
}
/// Identifies an `Event`.
pub async fn identify_event(&self, identifier: B256) -> Option<Event> {
self.identify_events([identifier]).await.pop().unwrap()
}
/// Identifies `Error`s.
pub async fn identify_errors(
&self,
identifiers: impl IntoIterator<Item = Selector>,
) -> Vec<Option<Error>> {
self.identify_map(identifiers.into_iter().map(SelectorKind::Error), get_error).await
}
/// Identifies an `Error`.
pub async fn identify_error(&self, identifier: Selector) -> Option<Error> {
self.identify_errors([identifier]).await.pop().unwrap()
}
/// Identifies a list of selectors.
pub async fn identify(&self, selectors: &[SelectorKind]) -> Vec<Option<String>> {
if selectors.is_empty() {
return vec![];
}
trace!(target: "evm::traces", ?selectors, "identifying selectors");
let mut cache_r = self.0.cache.read().await;
if let Some(client) = &self.0.client {
let query =
selectors.iter().copied().filter(|v| !cache_r.contains_key(v)).collect::<Vec<_>>();
if !query.is_empty() {
drop(cache_r);
let mut cache_w = self.0.cache.write().await;
if let Ok(res) = client.decode_selectors(&query).await {
for (selector, signatures) in std::iter::zip(query, res) {
cache_w.signatures.insert(selector, signatures.into_iter().next());
}
}
drop(cache_w);
cache_r = self.0.cache.read().await;
}
}
selectors.iter().map(|selector| cache_r.get(selector).unwrap_or_default()).collect()
}
async fn identify_map<T>(
&self,
selectors: impl IntoIterator<Item = SelectorKind>,
get_type: impl Fn(&str) -> Result<T>,
) -> Vec<Option<T>> {
let results = self.identify(&Vec::from_iter(selectors)).await;
results.into_iter().map(|r| r.and_then(|r| get_type(&r).ok())).collect()
}
}
impl SignaturesIdentifierInner {
fn save(&self) {
// We only identify new signatures if the client is enabled.
if let Some(path) = &self.cache_path
&& self.client.is_some()
{
self.cache
.try_read()
.expect("SignaturesIdentifier cache is locked while attempting to save")
.save(path);
}
}
}
impl Drop for SignaturesIdentifierInner {
fn drop(&mut self) {
self.save();
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/identifier/mod.rs | crates/evm/traces/src/identifier/mod.rs | use alloy_json_abi::JsonAbi;
use alloy_primitives::{Address, Bytes, map::HashMap};
use foundry_common::ContractsByArtifact;
use foundry_compilers::ArtifactId;
use foundry_config::{Chain, Config};
use revm_inspectors::tracing::types::CallTraceNode;
use std::borrow::Cow;
mod local;
pub use local::LocalTraceIdentifier;
mod external;
pub use external::ExternalIdentifier;
mod signatures;
pub use signatures::{SignaturesCache, SignaturesIdentifier};
/// An address identified by a [`TraceIdentifier`].
#[derive(Debug)]
pub struct IdentifiedAddress<'a> {
/// The address.
pub address: Address,
/// The label for the address.
pub label: Option<String>,
/// The contract this address represents.
///
/// Note: This may be in the format `"<artifact>:<contract>"`.
pub contract: Option<String>,
/// The ABI of the contract at this address.
pub abi: Option<Cow<'a, JsonAbi>>,
/// The artifact ID of the contract, if any.
pub artifact_id: Option<ArtifactId>,
}
/// Trace identifiers figure out what ABIs and labels belong to all the addresses of the trace.
pub trait TraceIdentifier {
/// Attempts to identify an address in one or more call traces.
fn identify_addresses(&mut self, nodes: &[&CallTraceNode]) -> Vec<IdentifiedAddress<'_>>;
}
/// A collection of trace identifiers.
pub struct TraceIdentifiers<'a> {
/// The local trace identifier.
pub local: Option<LocalTraceIdentifier<'a>>,
/// The optional external trace identifier.
pub external: Option<ExternalIdentifier>,
}
impl Default for TraceIdentifiers<'_> {
fn default() -> Self {
Self::new()
}
}
impl TraceIdentifier for TraceIdentifiers<'_> {
fn identify_addresses(&mut self, nodes: &[&CallTraceNode]) -> Vec<IdentifiedAddress<'_>> {
if nodes.is_empty() {
return Vec::new();
}
let mut identities = Vec::with_capacity(nodes.len());
if let Some(local) = &mut self.local {
identities.extend(local.identify_addresses(nodes));
if identities.len() >= nodes.len() {
return identities;
}
}
if let Some(external) = &mut self.external {
identities.extend(external.identify_addresses(nodes));
}
identities
}
}
impl<'a> TraceIdentifiers<'a> {
/// Creates a new, empty instance.
pub const fn new() -> Self {
Self { local: None, external: None }
}
/// Sets the local identifier.
pub fn with_local(mut self, known_contracts: &'a ContractsByArtifact) -> Self {
self.local = Some(LocalTraceIdentifier::new(known_contracts));
self
}
/// Sets the local identifier.
pub fn with_local_and_bytecodes(
mut self,
known_contracts: &'a ContractsByArtifact,
contracts_bytecode: &'a HashMap<Address, Bytes>,
) -> Self {
self.local =
Some(LocalTraceIdentifier::new(known_contracts).with_bytecodes(contracts_bytecode));
self
}
/// Sets the external identifier.
pub fn with_external(mut self, config: &Config, chain: Option<Chain>) -> eyre::Result<Self> {
self.external = ExternalIdentifier::new(config, chain)?;
Ok(self)
}
/// Returns `true` if there are no set identifiers.
pub fn is_empty(&self) -> bool {
self.local.is_none() && self.external.is_none()
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/decoder/mod.rs | crates/evm/traces/src/decoder/mod.rs | use crate::{
CallTrace, CallTraceArena, CallTraceNode, DecodedCallData,
debug::DebugTraceIdentifier,
identifier::{IdentifiedAddress, LocalTraceIdentifier, SignaturesIdentifier, TraceIdentifier},
};
use alloy_dyn_abi::{DecodedEvent, DynSolValue, EventExt, FunctionExt, JsonAbiExt};
use alloy_json_abi::{Error, Event, Function, JsonAbi};
use alloy_primitives::{
Address, B256, LogData, Selector,
map::{HashMap, HashSet, hash_map::Entry},
};
use foundry_common::{
ContractsByArtifact, SELECTOR_LEN, abi::get_indexed_event, fmt::format_token,
get_contract_name, selectors::SelectorKind,
};
use foundry_evm_core::{
abi::{Vm, console},
constants::{CALLER, CHEATCODE_ADDRESS, DEFAULT_CREATE2_DEPLOYER, HARDHAT_CONSOLE_ADDRESS},
decode::RevertDecoder,
precompiles::{
BLAKE_2F, EC_ADD, EC_MUL, EC_PAIRING, EC_RECOVER, IDENTITY, MOD_EXP, POINT_EVALUATION,
RIPEMD_160, SHA_256,
},
};
use itertools::Itertools;
use revm_inspectors::tracing::types::{DecodedCallLog, DecodedCallTrace};
use std::{collections::BTreeMap, sync::OnceLock};
mod precompiles;
/// Build a new [CallTraceDecoder].
#[derive(Default)]
#[must_use = "builders do nothing unless you call `build` on them"]
pub struct CallTraceDecoderBuilder {
decoder: CallTraceDecoder,
}
impl CallTraceDecoderBuilder {
/// Create a new builder.
#[inline]
pub fn new() -> Self {
Self { decoder: CallTraceDecoder::new().clone() }
}
/// Add known labels to the decoder.
#[inline]
pub fn with_labels(mut self, labels: impl IntoIterator<Item = (Address, String)>) -> Self {
self.decoder.labels.extend(labels);
self
}
/// Add known errors to the decoder.
#[inline]
pub fn with_abi(mut self, abi: &JsonAbi) -> Self {
self.decoder.collect_abi(abi, None);
self
}
/// Add known contracts to the decoder.
#[inline]
pub fn with_known_contracts(mut self, contracts: &ContractsByArtifact) -> Self {
trace!(target: "evm::traces", len=contracts.len(), "collecting known contract ABIs");
for contract in contracts.values() {
self.decoder.collect_abi(&contract.abi, None);
}
self
}
/// Add known contracts to the decoder from a `LocalTraceIdentifier`.
#[inline]
pub fn with_local_identifier_abis(self, identifier: &LocalTraceIdentifier<'_>) -> Self {
self.with_known_contracts(identifier.contracts())
}
/// Sets the verbosity level of the decoder.
#[inline]
pub fn with_verbosity(mut self, level: u8) -> Self {
self.decoder.verbosity = level;
self
}
/// Sets the signature identifier for events and functions.
#[inline]
pub fn with_signature_identifier(mut self, identifier: SignaturesIdentifier) -> Self {
self.decoder.signature_identifier = Some(identifier);
self
}
/// Sets the signature identifier for events and functions.
#[inline]
pub fn with_label_disabled(mut self, disable_alias: bool) -> Self {
self.decoder.disable_labels = disable_alias;
self
}
/// Sets the debug identifier for the decoder.
#[inline]
pub fn with_debug_identifier(mut self, identifier: DebugTraceIdentifier) -> Self {
self.decoder.debug_identifier = Some(identifier);
self
}
/// Build the decoder.
#[inline]
pub fn build(self) -> CallTraceDecoder {
self.decoder
}
}
/// The call trace decoder.
///
/// The decoder collects address labels and ABIs from any number of [TraceIdentifier]s, which it
/// then uses to decode the call trace.
///
/// Note that a call trace decoder is required for each new set of traces, since addresses in
/// different sets might overlap.
#[derive(Clone, Debug, Default)]
pub struct CallTraceDecoder {
/// Addresses identified to be a specific contract.
///
/// The values are in the form `"<artifact>:<contract>"`.
pub contracts: HashMap<Address, String>,
/// Address labels.
pub labels: HashMap<Address, String>,
/// Contract addresses that have a receive function.
pub receive_contracts: HashSet<Address>,
/// Contract addresses that have fallback functions, mapped to function selectors of that
/// contract.
pub fallback_contracts: HashMap<Address, HashSet<Selector>>,
/// Contract addresses that have do NOT have fallback functions, mapped to function selectors
/// of that contract.
pub non_fallback_contracts: HashMap<Address, HashSet<Selector>>,
/// All known functions.
pub functions: HashMap<Selector, Vec<Function>>,
/// All known events.
///
/// Key is: `(topics[0], topics.len() - 1)`.
pub events: BTreeMap<(B256, usize), Vec<Event>>,
/// Revert decoder. Contains all known custom errors.
pub revert_decoder: RevertDecoder,
/// A signature identifier for events and functions.
pub signature_identifier: Option<SignaturesIdentifier>,
/// Verbosity level
pub verbosity: u8,
/// Optional identifier of individual trace steps.
pub debug_identifier: Option<DebugTraceIdentifier>,
/// Disable showing of labels.
pub disable_labels: bool,
}
impl CallTraceDecoder {
/// Creates a new call trace decoder.
///
/// The call trace decoder always knows how to decode calls to the cheatcode address, as well
/// as DSTest-style logs.
pub fn new() -> &'static Self {
// If you want to take arguments in this function, assign them to the fields of the cloned
// lazy instead of removing it
static INIT: OnceLock<CallTraceDecoder> = OnceLock::new();
INIT.get_or_init(Self::init)
}
#[instrument(name = "CallTraceDecoder::init", level = "debug")]
fn init() -> Self {
Self {
contracts: Default::default(),
labels: HashMap::from_iter([
(CHEATCODE_ADDRESS, "VM".to_string()),
(HARDHAT_CONSOLE_ADDRESS, "console".to_string()),
(DEFAULT_CREATE2_DEPLOYER, "Create2Deployer".to_string()),
(CALLER, "DefaultSender".to_string()),
(EC_RECOVER, "ECRecover".to_string()),
(SHA_256, "SHA-256".to_string()),
(RIPEMD_160, "RIPEMD-160".to_string()),
(IDENTITY, "Identity".to_string()),
(MOD_EXP, "ModExp".to_string()),
(EC_ADD, "ECAdd".to_string()),
(EC_MUL, "ECMul".to_string()),
(EC_PAIRING, "ECPairing".to_string()),
(BLAKE_2F, "Blake2F".to_string()),
(POINT_EVALUATION, "PointEvaluation".to_string()),
]),
receive_contracts: Default::default(),
fallback_contracts: Default::default(),
non_fallback_contracts: Default::default(),
functions: console::hh::abi::functions()
.into_values()
.chain(Vm::abi::functions().into_values())
.flatten()
.map(|func| (func.selector(), vec![func]))
.collect(),
events: console::ds::abi::events()
.into_values()
.flatten()
.map(|event| ((event.selector(), indexed_inputs(&event)), vec![event]))
.collect(),
revert_decoder: Default::default(),
signature_identifier: None,
verbosity: 0,
debug_identifier: None,
disable_labels: false,
}
}
/// Clears all known addresses.
pub fn clear_addresses(&mut self) {
self.contracts.clear();
let default_labels = &Self::new().labels;
if self.labels.len() > default_labels.len() {
self.labels.clone_from(default_labels);
}
self.receive_contracts.clear();
self.fallback_contracts.clear();
}
/// Identify unknown addresses in the specified call trace using the specified identifier.
///
/// Unknown contracts are contracts that either lack a label or an ABI.
pub fn identify(&mut self, arena: &CallTraceArena, identifier: &mut impl TraceIdentifier) {
self.collect_identified_addresses(self.identify_addresses(arena, identifier));
}
/// Identify unknown addresses in the specified call trace using the specified identifier.
///
/// Unknown contracts are contracts that either lack a label or an ABI.
pub fn identify_addresses<'a>(
&self,
arena: &CallTraceArena,
identifier: &'a mut impl TraceIdentifier,
) -> Vec<IdentifiedAddress<'a>> {
let nodes = arena.nodes().iter().filter(|node| {
let address = &node.trace.address;
!self.labels.contains_key(address) || !self.contracts.contains_key(address)
});
identifier.identify_addresses(&nodes.collect::<Vec<_>>())
}
/// Adds a single event to the decoder.
pub fn push_event(&mut self, event: Event) {
self.events.entry((event.selector(), indexed_inputs(&event))).or_default().push(event);
}
/// Adds a single function to the decoder.
pub fn push_function(&mut self, function: Function) {
match self.functions.entry(function.selector()) {
Entry::Occupied(entry) => {
// This shouldn't happen that often.
if entry.get().contains(&function) {
return;
}
trace!(target: "evm::traces", selector=%entry.key(), new=%function.signature(), "duplicate function selector");
entry.into_mut().push(function);
}
Entry::Vacant(entry) => {
entry.insert(vec![function]);
}
}
}
/// Selects the appropriate function from a list of functions with the same selector
/// by checking which one belongs to the contract being called, this avoids collisions
/// where multiple different functions across different contracts have the same selector.
fn select_contract_function<'a>(
&self,
functions: &'a [Function],
trace: &CallTrace,
) -> &'a [Function] {
// When there are selector collisions, try to decode the calldata with each function
// to determine which one is actually being called. The correct function should
// decode successfully while the wrong ones will fail due to parameter type mismatches.
if functions.len() > 1 {
for (i, func) in functions.iter().enumerate() {
if trace.data.len() >= SELECTOR_LEN
&& func.abi_decode_input(&trace.data[SELECTOR_LEN..]).is_ok()
{
return &functions[i..i + 1];
}
}
}
functions
}
/// Adds a single error to the decoder.
pub fn push_error(&mut self, error: Error) {
self.revert_decoder.push_error(error);
}
pub fn without_label(&mut self, disable: bool) {
self.disable_labels = disable;
}
fn collect_identified_addresses(&mut self, mut addrs: Vec<IdentifiedAddress<'_>>) {
addrs.sort_by_key(|identity| identity.address);
addrs.dedup_by_key(|identity| identity.address);
if addrs.is_empty() {
return;
}
trace!(target: "evm::traces", len=addrs.len(), "collecting address identities");
for IdentifiedAddress { address, label, contract, abi, artifact_id: _ } in addrs {
let _span = trace_span!(target: "evm::traces", "identity", ?contract, ?label).entered();
if let Some(contract) = contract {
self.contracts.entry(address).or_insert(contract);
}
if let Some(label) = label {
self.labels.entry(address).or_insert(label);
}
if let Some(abi) = abi {
self.collect_abi(&abi, Some(address));
}
}
}
fn collect_abi(&mut self, abi: &JsonAbi, address: Option<Address>) {
let len = abi.len();
if len == 0 {
return;
}
trace!(target: "evm::traces", len, ?address, "collecting ABI");
for function in abi.functions() {
self.push_function(function.clone());
}
for event in abi.events() {
self.push_event(event.clone());
}
for error in abi.errors() {
self.push_error(error.clone());
}
if let Some(address) = address {
if abi.receive.is_some() {
self.receive_contracts.insert(address);
}
if abi.fallback.is_some() {
self.fallback_contracts
.insert(address, abi.functions().map(|f| f.selector()).collect());
} else {
self.non_fallback_contracts
.insert(address, abi.functions().map(|f| f.selector()).collect());
}
}
}
/// Populates the traces with decoded data by mutating the
/// [CallTrace] in place. See [CallTraceDecoder::decode_function] and
/// [CallTraceDecoder::decode_event] for more details.
pub async fn populate_traces(&self, traces: &mut Vec<CallTraceNode>) {
for node in traces {
node.trace.decoded = Some(Box::new(self.decode_function(&node.trace).await));
for log in &mut node.logs {
log.decoded = Some(Box::new(self.decode_event(&log.raw_log).await));
}
if let Some(debug) = self.debug_identifier.as_ref()
&& let Some(identified) = self.contracts.get(&node.trace.address)
{
debug.identify_node_steps(node, get_contract_name(identified))
}
}
}
/// Decodes a call trace.
pub async fn decode_function(&self, trace: &CallTrace) -> DecodedCallTrace {
let label =
if self.disable_labels { None } else { self.labels.get(&trace.address).cloned() };
if trace.kind.is_any_create() {
return DecodedCallTrace { label, ..Default::default() };
}
if let Some(trace) = precompiles::decode(trace, 1) {
return trace;
}
let cdata = &trace.data;
if trace.address == DEFAULT_CREATE2_DEPLOYER {
return DecodedCallTrace {
label,
call_data: Some(DecodedCallData { signature: "create2".to_string(), args: vec![] }),
return_data: self.default_return_data(trace),
};
}
if is_abi_call_data(cdata) {
let selector = Selector::try_from(&cdata[..SELECTOR_LEN]).unwrap();
let mut functions = Vec::new();
let functions = match self.functions.get(&selector) {
Some(fs) => fs,
None => {
if let Some(identifier) = &self.signature_identifier
&& let Some(function) = identifier.identify_function(selector).await
{
functions.push(function);
}
&functions
}
};
// Check if unsupported fn selector: calldata dooes NOT point to one of its selectors +
// non-fallback contract + no receive
if let Some(contract_selectors) = self.non_fallback_contracts.get(&trace.address)
&& !contract_selectors.contains(&selector)
&& (!cdata.is_empty() || !self.receive_contracts.contains(&trace.address))
{
let return_data = if !trace.success {
let revert_msg = self.revert_decoder.decode(&trace.output, trace.status);
if trace.output.is_empty() || revert_msg.contains("EvmError: Revert") {
Some(format!(
"unrecognized function selector {} for contract {}, which has no fallback function.",
selector, trace.address
))
} else {
Some(revert_msg)
}
} else {
None
};
return if let Some(func) = functions.first() {
DecodedCallTrace {
label,
call_data: Some(self.decode_function_input(trace, func)),
return_data,
}
} else {
DecodedCallTrace {
label,
call_data: self.fallback_call_data(trace),
return_data,
}
};
}
let contract_functions = self.select_contract_function(functions, trace);
let [func, ..] = contract_functions else {
return DecodedCallTrace {
label,
call_data: self.fallback_call_data(trace),
return_data: self.default_return_data(trace),
};
};
// If traced contract is a fallback contract, check if it has the decoded function.
// If not, then replace call data signature with `fallback`.
let mut call_data = self.decode_function_input(trace, func);
if let Some(fallback_functions) = self.fallback_contracts.get(&trace.address)
&& !fallback_functions.contains(&selector)
&& let Some(cd) = self.fallback_call_data(trace)
{
call_data.signature = cd.signature;
}
DecodedCallTrace {
label,
call_data: Some(call_data),
return_data: self.decode_function_output(trace, contract_functions),
}
} else {
DecodedCallTrace {
label,
call_data: self.fallback_call_data(trace),
return_data: self.default_return_data(trace),
}
}
}
/// Decodes a function's input into the given trace.
fn decode_function_input(&self, trace: &CallTrace, func: &Function) -> DecodedCallData {
let mut args = None;
if trace.data.len() >= SELECTOR_LEN {
if trace.address == CHEATCODE_ADDRESS {
// Try to decode cheatcode inputs in a more custom way
if let Some(v) = self.decode_cheatcode_inputs(func, &trace.data) {
args = Some(v);
}
}
if args.is_none()
&& let Ok(v) = func.abi_decode_input(&trace.data[SELECTOR_LEN..])
{
args = Some(v.iter().map(|value| self.format_value(value)).collect());
}
}
DecodedCallData { signature: func.signature(), args: args.unwrap_or_default() }
}
/// Custom decoding for cheatcode inputs.
fn decode_cheatcode_inputs(&self, func: &Function, data: &[u8]) -> Option<Vec<String>> {
match func.name.as_str() {
"expectRevert" => Some(vec![self.revert_decoder.decode(data, None)]),
"addr" | "createWallet" | "deriveKey" | "rememberKey" => {
// Redact private key in all cases
Some(vec!["<pk>".to_string()])
}
"broadcast" | "startBroadcast" => {
// Redact private key if defined
// broadcast(uint256) / startBroadcast(uint256)
if !func.inputs.is_empty() && func.inputs[0].ty == "uint256" {
Some(vec!["<pk>".to_string()])
} else {
None
}
}
"getNonce" => {
// Redact private key if defined
// getNonce(Wallet)
if !func.inputs.is_empty() && func.inputs[0].ty == "tuple" {
Some(vec!["<pk>".to_string()])
} else {
None
}
}
"sign" | "signP256" => {
let mut decoded = func.abi_decode_input(&data[SELECTOR_LEN..]).ok()?;
// Redact private key and replace in trace
// sign(uint256,bytes32) / signP256(uint256,bytes32) / sign(Wallet,bytes32)
if !decoded.is_empty() &&
(func.inputs[0].ty == "uint256" || func.inputs[0].ty == "tuple")
{
decoded[0] = DynSolValue::String("<pk>".to_string());
}
Some(decoded.iter().map(format_token).collect())
}
"signDelegation" | "signAndAttachDelegation" => {
let mut decoded = func.abi_decode_input(&data[SELECTOR_LEN..]).ok()?;
// Redact private key and replace in trace for
// signAndAttachDelegation(address implementation, uint256 privateKey)
// signDelegation(address implementation, uint256 privateKey)
decoded[1] = DynSolValue::String("<pk>".to_string());
Some(decoded.iter().map(format_token).collect())
}
"parseJson" |
"parseJsonUint" |
"parseJsonUintArray" |
"parseJsonInt" |
"parseJsonIntArray" |
"parseJsonString" |
"parseJsonStringArray" |
"parseJsonAddress" |
"parseJsonAddressArray" |
"parseJsonBool" |
"parseJsonBoolArray" |
"parseJsonBytes" |
"parseJsonBytesArray" |
"parseJsonBytes32" |
"parseJsonBytes32Array" |
"writeJson" |
// `keyExists` is being deprecated in favor of `keyExistsJson`. It will be removed in future versions.
"keyExists" |
"keyExistsJson" |
"serializeBool" |
"serializeUint" |
"serializeUintToHex" |
"serializeInt" |
"serializeAddress" |
"serializeBytes32" |
"serializeString" |
"serializeBytes" => {
if self.verbosity >= 5 {
None
} else {
let mut decoded = func.abi_decode_input(&data[SELECTOR_LEN..]).ok()?;
let token = if func.name.as_str() == "parseJson" ||
// `keyExists` is being deprecated in favor of `keyExistsJson`. It will be removed in future versions.
func.name.as_str() == "keyExists" ||
func.name.as_str() == "keyExistsJson"
{
"<JSON file>"
} else {
"<stringified JSON>"
};
decoded[0] = DynSolValue::String(token.to_string());
Some(decoded.iter().map(format_token).collect())
}
}
s if s.contains("Toml") => {
if self.verbosity >= 5 {
None
} else {
let mut decoded = func.abi_decode_input(&data[SELECTOR_LEN..]).ok()?;
let token = if func.name.as_str() == "parseToml" ||
func.name.as_str() == "keyExistsToml"
{
"<TOML file>"
} else {
"<stringified TOML>"
};
decoded[0] = DynSolValue::String(token.to_string());
Some(decoded.iter().map(format_token).collect())
}
}
"createFork" |
"createSelectFork" |
"rpc" => {
let mut decoded = func.abi_decode_input(&data[SELECTOR_LEN..]).ok()?;
// Redact RPC URL except if referenced by an alias
if !decoded.is_empty() && func.inputs[0].ty == "string" {
let url_or_alias = decoded[0].as_str().unwrap_or_default();
if url_or_alias.starts_with("http") || url_or_alias.starts_with("ws") {
decoded[0] = DynSolValue::String("<rpc url>".to_string());
}
} else {
return None;
}
Some(decoded.iter().map(format_token).collect())
}
_ => None,
}
}
/// Decodes a function's output into the given trace.
fn decode_function_output(&self, trace: &CallTrace, funcs: &[Function]) -> Option<String> {
if !trace.success {
return self.default_return_data(trace);
}
if trace.address == CHEATCODE_ADDRESS
&& let Some(decoded) = funcs.iter().find_map(|func| self.decode_cheatcode_outputs(func))
{
return Some(decoded);
}
if let Some(values) =
funcs.iter().find_map(|func| func.abi_decode_output(&trace.output).ok())
{
// Functions coming from an external database do not have any outputs specified,
// and will lead to returning an empty list of values.
if values.is_empty() {
return None;
}
return Some(
values.iter().map(|value| self.format_value(value)).format(", ").to_string(),
);
}
None
}
/// Custom decoding for cheatcode outputs.
fn decode_cheatcode_outputs(&self, func: &Function) -> Option<String> {
match func.name.as_str() {
s if s.starts_with("env") => Some("<env var value>"),
"createWallet" | "deriveKey" => Some("<pk>"),
"promptSecret" | "promptSecretUint" => Some("<secret>"),
"parseJson" if self.verbosity < 5 => Some("<encoded JSON value>"),
"readFile" if self.verbosity < 5 => Some("<file>"),
"rpcUrl" | "rpcUrls" | "rpcUrlStructs" => Some("<rpc url>"),
_ => None,
}
.map(Into::into)
}
#[track_caller]
fn fallback_call_data(&self, trace: &CallTrace) -> Option<DecodedCallData> {
let cdata = &trace.data;
let signature = if cdata.is_empty() && self.receive_contracts.contains(&trace.address) {
"receive()"
} else if self.fallback_contracts.contains_key(&trace.address) {
"fallback()"
} else {
return None;
}
.to_string();
let args = if cdata.is_empty() { Vec::new() } else { vec![cdata.to_string()] };
Some(DecodedCallData { signature, args })
}
/// The default decoded return data for a trace.
fn default_return_data(&self, trace: &CallTrace) -> Option<String> {
// For calls with status None or successful status, don't decode revert data
// This is due to trace.status is derived from the revm_interpreter::InstructionResult in
// revm-inspectors status will `None` post revm 27, as `InstructionResult::Continue` does
// not exists anymore.
if trace.status.is_none() || trace.status.is_some_and(|s| s.is_ok()) {
return None;
}
(!trace.success).then(|| self.revert_decoder.decode(&trace.output, trace.status))
}
/// Decodes an event.
pub async fn decode_event(&self, log: &LogData) -> DecodedCallLog {
let &[t0, ..] = log.topics() else { return DecodedCallLog { name: None, params: None } };
let mut events = Vec::new();
let events = match self.events.get(&(t0, log.topics().len() - 1)) {
Some(es) => es,
None => {
if let Some(identifier) = &self.signature_identifier
&& let Some(event) = identifier.identify_event(t0).await
{
events.push(get_indexed_event(event, log));
}
&events
}
};
for event in events {
if let Ok(decoded) = event.decode_log(log) {
let params = reconstruct_params(event, &decoded);
return DecodedCallLog {
name: Some(event.name.clone()),
params: Some(
params
.into_iter()
.zip(event.inputs.iter())
.map(|(param, input)| {
// undo patched names
let name = input.name.clone();
(name, self.format_value(¶m))
})
.collect(),
),
};
}
}
DecodedCallLog { name: None, params: None }
}
/// Prefetches function and event signatures into the identifier cache
pub async fn prefetch_signatures(&self, nodes: &[CallTraceNode]) {
let Some(identifier) = &self.signature_identifier else { return };
let events = nodes
.iter()
.flat_map(|node| {
node.logs
.iter()
.map(|log| log.raw_log.topics())
.filter(|&topics| {
if let Some(&first) = topics.first()
&& self.events.contains_key(&(first, topics.len() - 1))
{
return false;
}
true
})
.filter_map(|topics| topics.first())
})
.copied();
let functions = nodes
.iter()
.filter(|&n| {
// Ignore known addresses.
if n.trace.address == DEFAULT_CREATE2_DEPLOYER
|| n.is_precompile()
|| precompiles::is_known_precompile(n.trace.address, 1)
{
return false;
}
// Ignore non-ABI calldata.
if n.trace.kind.is_any_create() || !is_abi_call_data(&n.trace.data) {
return false;
}
true
})
.filter_map(|n| n.trace.data.first_chunk().map(Selector::from))
.filter(|selector| !self.functions.contains_key(selector));
let selectors = events
.map(SelectorKind::Event)
.chain(functions.map(SelectorKind::Function))
.unique()
.collect::<Vec<_>>();
let _ = identifier.identify(&selectors).await;
}
/// Pretty-prints a value.
fn format_value(&self, value: &DynSolValue) -> String {
if let DynSolValue::Address(addr) = value
&& let Some(label) = self.labels.get(addr)
{
return format!("{label}: [{addr}]");
}
format_token(value)
}
}
/// Returns `true` if the given function calldata (including function selector) is ABI-encoded.
///
/// This is a simple heuristic to avoid fetching non ABI-encoded selectors.
fn is_abi_call_data(data: &[u8]) -> bool {
match data.len().cmp(&SELECTOR_LEN) {
std::cmp::Ordering::Less => false,
std::cmp::Ordering::Equal => true,
std::cmp::Ordering::Greater => is_abi_data(&data[SELECTOR_LEN..]),
}
}
/// Returns `true` if the given data is ABI-encoded.
///
/// See [`is_abi_call_data`] for more details.
fn is_abi_data(data: &[u8]) -> bool {
let rem = data.len() % 32;
if rem == 0 || data.is_empty() {
return true;
}
// If the length is not a multiple of 32, also accept when the last remainder bytes are all 0.
data[data.len() - rem..].iter().all(|byte| *byte == 0)
}
/// Restore the order of the params of a decoded event,
/// as Alloy returns the indexed and unindexed params separately.
fn reconstruct_params(event: &Event, decoded: &DecodedEvent) -> Vec<DynSolValue> {
let mut indexed = 0;
let mut unindexed = 0;
let mut inputs = vec![];
for input in &event.inputs {
// Prevent panic of event `Transfer(from, to)` decoded with a signature
// `Transfer(address indexed from, address indexed to, uint256 indexed tokenId)` by making
// sure the event inputs is not higher than decoded indexed / un-indexed values.
if input.indexed && indexed < decoded.indexed.len() {
inputs.push(decoded.indexed[indexed].clone());
indexed += 1;
} else if unindexed < decoded.body.len() {
inputs.push(decoded.body[unindexed].clone());
unindexed += 1;
}
}
inputs
}
fn indexed_inputs(event: &Event) -> usize {
event.inputs.iter().filter(|param| param.indexed).count()
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::hex;
#[test]
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | true |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/decoder/precompiles.rs | crates/evm/traces/src/decoder/precompiles.rs | use crate::{CallTrace, DecodedCallData};
use alloy_primitives::{Address, B256, U256, hex};
use alloy_sol_types::{SolCall, abi, sol};
use foundry_evm_core::precompiles::{
BLAKE_2F, EC_ADD, EC_MUL, EC_PAIRING, EC_RECOVER, IDENTITY, MOD_EXP, POINT_EVALUATION,
RIPEMD_160, SHA_256,
};
use itertools::Itertools;
use revm_inspectors::tracing::types::DecodedCallTrace;
sol! {
/// EVM precompiles interface. For illustration purposes only, as precompiles don't follow the
/// Solidity ABI codec.
///
/// Parameter names and types are taken from [evm.codes](https://www.evm.codes/precompiled).
interface Precompiles {
struct EcPairingInput {
uint256 x1;
uint256 y1;
uint256 x2;
uint256 y2;
uint256 x3;
uint256 y3;
}
/* 0x01 */ function ecrecover(bytes32 hash, uint8 v, uint256 r, uint256 s) returns (address publicAddress);
/* 0x02 */ function sha256(bytes data) returns (bytes32 hash);
/* 0x03 */ function ripemd(bytes data) returns (bytes20 hash);
/* 0x04 */ function identity(bytes data) returns (bytes data);
/* 0x05 */ function modexp(uint256 Bsize, uint256 Esize, uint256 Msize, bytes B, bytes E, bytes M) returns (bytes value);
/* 0x06 */ function ecadd(uint256 x1, uint256 y1, uint256 x2, uint256 y2) returns (uint256 x, uint256 y);
/* 0x07 */ function ecmul(uint256 x1, uint256 y1, uint256 s) returns (uint256 x, uint256 y);
/* 0x08 */ function ecpairing(EcPairingInput[] input) returns (bool success);
/* 0x09 */ function blake2f(uint32 rounds, uint64[8] h, uint64[16] m, uint64[2] t, bool f) returns (uint64[8] h);
/* 0x0a */ function pointEvaluation(bytes32 versionedHash, bytes32 z, bytes32 y, bytes1[48] commitment, bytes1[48] proof) returns (bytes value);
}
}
use Precompiles::*;
pub(super) fn is_known_precompile(address: Address, _chain_id: u64) -> bool {
address[..19].iter().all(|&x| x == 0)
&& matches!(
address,
EC_RECOVER
| SHA_256
| RIPEMD_160
| IDENTITY
| MOD_EXP
| EC_ADD
| EC_MUL
| EC_PAIRING
| BLAKE_2F
| POINT_EVALUATION
)
}
/// Tries to decode a precompile call. Returns `Some` if successful.
pub(super) fn decode(trace: &CallTrace, _chain_id: u64) -> Option<DecodedCallTrace> {
if !is_known_precompile(trace.address, _chain_id) {
return None;
}
for &precompile in PRECOMPILES {
if trace.address == precompile.address() {
let signature = precompile.signature(&trace.data);
let args = precompile
.decode_call(&trace.data)
.unwrap_or_else(|_| vec![trace.data.to_string()]);
let return_data = precompile
.decode_return(&trace.output)
.unwrap_or_else(|_| vec![trace.output.to_string()]);
let return_data = if return_data.len() == 1 {
return_data.into_iter().next().unwrap()
} else {
format!("({})", return_data.join(", "))
};
return Some(DecodedCallTrace {
label: Some("PRECOMPILES".to_string()),
call_data: Some(DecodedCallData { signature: signature.to_string(), args }),
return_data: Some(return_data),
});
}
}
None
}
pub(super) trait Precompile {
fn address(&self) -> Address;
fn signature(&self, data: &[u8]) -> &'static str;
fn decode_call(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
Ok(vec![hex::encode_prefixed(data)])
}
fn decode_return(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
Ok(vec![hex::encode_prefixed(data)])
}
}
// Note: we use the ABI decoder, but this is not necessarily ABI-encoded data. It's just a
// convenient way to decode the data.
const PRECOMPILES: &[&dyn Precompile] = &[
&Ecrecover,
&Sha256,
&Ripemd160,
&Identity,
&ModExp,
&EcAdd,
&Ecmul,
&Ecpairing,
&Blake2f,
&PointEvaluation,
];
struct Ecrecover;
impl Precompile for Ecrecover {
fn address(&self) -> Address {
EC_RECOVER
}
fn signature(&self, _: &[u8]) -> &'static str {
ecrecoverCall::SIGNATURE
}
fn decode_call(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ecrecoverCall { hash, v, r, s } = ecrecoverCall::abi_decode_raw(data)?;
Ok(vec![hash.to_string(), v.to_string(), r.to_string(), s.to_string()])
}
fn decode_return(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ret = ecrecoverCall::abi_decode_returns(data)?;
Ok(vec![ret.to_string()])
}
}
struct Sha256;
impl Precompile for Sha256 {
fn address(&self) -> Address {
SHA_256
}
fn signature(&self, _: &[u8]) -> &'static str {
sha256Call::SIGNATURE
}
fn decode_return(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ret = sha256Call::abi_decode_returns(data)?;
Ok(vec![ret.to_string()])
}
}
struct Ripemd160;
impl Precompile for Ripemd160 {
fn address(&self) -> Address {
RIPEMD_160
}
fn signature(&self, _: &[u8]) -> &'static str {
ripemdCall::SIGNATURE
}
fn decode_return(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ret = ripemdCall::abi_decode_returns(data)?;
Ok(vec![ret.to_string()])
}
}
struct Identity;
impl Precompile for Identity {
fn address(&self) -> Address {
IDENTITY
}
fn signature(&self, _: &[u8]) -> &'static str {
identityCall::SIGNATURE
}
}
struct ModExp;
impl Precompile for ModExp {
fn address(&self) -> Address {
MOD_EXP
}
fn signature(&self, _: &[u8]) -> &'static str {
modexpCall::SIGNATURE
}
fn decode_call(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let mut decoder = abi::Decoder::new(data);
let b_size = decoder.take_offset()?;
let e_size = decoder.take_offset()?;
let m_size = decoder.take_offset()?;
let b = decoder.take_slice(b_size)?;
let e = decoder.take_slice(e_size)?;
let m = decoder.take_slice(m_size)?;
Ok(vec![
b_size.to_string(),
e_size.to_string(),
m_size.to_string(),
hex::encode_prefixed(b),
hex::encode_prefixed(e),
hex::encode_prefixed(m),
])
}
}
struct EcAdd;
impl Precompile for EcAdd {
fn address(&self) -> Address {
EC_ADD
}
fn signature(&self, _: &[u8]) -> &'static str {
ecaddCall::SIGNATURE
}
fn decode_call(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ecaddCall { x1, y1, x2, y2 } = ecaddCall::abi_decode_raw(data)?;
Ok(vec![x1.to_string(), y1.to_string(), x2.to_string(), y2.to_string()])
}
fn decode_return(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ecaddReturn { x, y } = ecaddCall::abi_decode_returns(data)?;
Ok(vec![x.to_string(), y.to_string()])
}
}
struct Ecmul;
impl Precompile for Ecmul {
fn address(&self) -> Address {
EC_MUL
}
fn signature(&self, _: &[u8]) -> &'static str {
ecmulCall::SIGNATURE
}
fn decode_call(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ecmulCall { x1, y1, s } = ecmulCall::abi_decode_raw(data)?;
Ok(vec![x1.to_string(), y1.to_string(), s.to_string()])
}
fn decode_return(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ecmulReturn { x, y } = ecmulCall::abi_decode_returns(data)?;
Ok(vec![x.to_string(), y.to_string()])
}
}
struct Ecpairing;
impl Precompile for Ecpairing {
fn address(&self) -> Address {
EC_PAIRING
}
fn signature(&self, _: &[u8]) -> &'static str {
ecpairingCall::SIGNATURE
}
fn decode_call(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let mut decoder = abi::Decoder::new(data);
let mut values = Vec::new();
// input must be either empty or a multiple of 6 32-byte values
let mut tmp = <[&B256; 6]>::default();
while !decoder.is_empty() {
for tmp in &mut tmp {
*tmp = decoder.take_word()?;
}
values.push(iter_to_string(tmp.iter().map(|x| U256::from_be_bytes(x.0))));
}
Ok(values)
}
fn decode_return(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let ret = ecpairingCall::abi_decode_returns(data)?;
Ok(vec![ret.to_string()])
}
}
struct Blake2f;
impl Precompile for Blake2f {
fn address(&self) -> Address {
BLAKE_2F
}
fn signature(&self, _: &[u8]) -> &'static str {
blake2fCall::SIGNATURE
}
fn decode_call(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
decode_blake2f(data)
}
}
fn decode_blake2f<'a>(data: &'a [u8]) -> alloy_sol_types::Result<Vec<String>> {
let mut decoder = abi::Decoder::new(data);
let rounds = u32::from_be_bytes(decoder.take_slice(4)?.try_into().unwrap());
let u64_le_list =
|x: &'a [u8]| x.chunks_exact(8).map(|x| u64::from_le_bytes(x.try_into().unwrap()));
let h = u64_le_list(decoder.take_slice(64)?);
let m = u64_le_list(decoder.take_slice(128)?);
let t = u64_le_list(decoder.take_slice(16)?);
let f = decoder.take_slice(1)?[0];
Ok(vec![
rounds.to_string(),
iter_to_string(h),
iter_to_string(m),
iter_to_string(t),
f.to_string(),
])
}
struct PointEvaluation;
impl Precompile for PointEvaluation {
fn address(&self) -> Address {
POINT_EVALUATION
}
fn signature(&self, _: &[u8]) -> &'static str {
pointEvaluationCall::SIGNATURE
}
fn decode_call(&self, data: &[u8]) -> alloy_sol_types::Result<Vec<String>> {
let mut decoder = abi::Decoder::new(data);
let versioned_hash = decoder.take_word()?;
let z = decoder.take_word()?;
let y = decoder.take_word()?;
let commitment = decoder.take_slice(48)?;
let proof = decoder.take_slice(48)?;
Ok(vec![
versioned_hash.to_string(),
z.to_string(),
y.to_string(),
hex::encode_prefixed(commitment),
hex::encode_prefixed(proof),
])
}
}
fn iter_to_string<I: Iterator<Item = T>, T: std::fmt::Display>(iter: I) -> String {
format!("[{}]", iter.format(", "))
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_primitives::hex;
#[test]
fn ecpairing() {
// https://github.com/foundry-rs/foundry/issues/5337#issuecomment-1627384480
let data = hex!(
"
26bbb723f965460ca7282cd75f0e3e7c67b15817f7cee60856b394936ed02917
0fbe873ac672168143a91535450bab6c412dce8dc8b66a88f2da6e245f9282df
13cd4f0451538ece5014fe6688b197aefcc611a5c6a7c319f834f2188ba04b08
126ff07e81490a1b6ae92b2d9e700c8e23e9d5c7f6ab857027213819a6c9ae7d
04183624c9858a56c54deb237c26cb4355bc2551312004e65fc5b299440b15a3
2e4b11aa549ad6c667057b18be4f4437fda92f018a59430ebb992fa3462c9ca1
2d4d9aa7e302d9df41749d5507949d05dbea33fbb16c643b22f599a2be6df2e2
14bedd503c37ceb061d8ec60209fe345ce89830a19230301f076caff004d1926
0967032fcbf776d1afc985f88877f182d38480a653f2decaa9794cbc3bf3060c
0e187847ad4c798374d0d6732bf501847dd68bc0e071241e0213bc7fc13db7ab
304cfbd1e08a704a99f5e847d93f8c3caafddec46b7a0d379da69a4d112346a7
1739c1b1a457a8c7313123d24d2f9192f896b7c63eea05a9d57f06547ad0cec8
001d6fedb032f70e377635238e0563f131670001f6abf439adb3a9d5d52073c6
1889afe91e4e367f898a7fcd6464e5ca4e822fe169bccb624f6aeb87e4d060bc
198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2
1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed
090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b
12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa
2dde6d7baf0bfa09329ec8d44c38282f5bf7f9ead1914edd7dcaebb498c84519
0c359f868a85c6e6c1ea819cfab4a867501a3688324d74df1fe76556558b1937
29f41c6e0e30802e2749bfb0729810876f3423e6f24829ad3e30adb1934f1c8a
030e7a5f70bb5daa6e18d80d6d447e772efb0bb7fb9d0ffcd54fc5a48af1286d
0ea726b117e48cda8bce2349405f006a84cdd3dcfba12efc990df25970a27b6d
30364cd4f8a293b1a04f0153548d3e01baad091c69097ca4e9f26be63e4095b5
"
);
let decoded = Ecpairing.decode_call(&data).unwrap();
// 4 arrays of 6 32-byte values
assert_eq!(decoded.len(), 4);
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/backtrace/source_map.rs | crates/evm/traces/src/backtrace/source_map.rs | //! Source map decoding and PC mapping utilities.
use alloy_primitives::Bytes;
use foundry_compilers::{ProjectCompileOutput, artifacts::sourcemap::SourceMap};
use foundry_evm_core::ic::IcPcMap;
use std::path::{Path, PathBuf};
/// Source data for a single contract.
#[derive(Debug, Clone)]
pub struct SourceData {
/// Runtime source map for the contract
pub source_map: SourceMap,
/// Deployed bytecode for accurate PC mapping
pub bytecode: Bytes,
}
/// Maps program counters to source locations.
pub struct PcSourceMapper<'a> {
/// Mapping from instruction counter to program counter.
ic_pc_map: IcPcMap,
/// Source data consists of the source_map and the deployed bytecode
source_data: SourceData,
/// Source files i.e source path and content (indexed by source_id)
sources: &'a [(PathBuf, String)],
/// Cached line offset mappings for each source file.
line_offsets: Vec<Vec<usize>>,
}
impl<'a> PcSourceMapper<'a> {
/// Creates a new PC to source mapper.
pub fn new(source_data: SourceData, sources: &'a [(PathBuf, String)]) -> Self {
// Build instruction counter to program counter mapping
let ic_pc_map = IcPcMap::new(source_data.bytecode.as_ref());
// Pre-calculate line offsets for each source file
let line_offsets =
sources.iter().map(|(_, content)| compute_line_offsets(content)).collect();
Self { ic_pc_map, source_data, sources, line_offsets }
}
/// Maps a program counter to source location.
pub fn map_pc(&self, pc: usize) -> Option<SourceLocation> {
// Find the instruction counter for this PC
let ic = self.find_instruction_counter(pc)?;
// Get the source element for this instruction
let element = self.source_data.source_map.get(ic)?;
// Get the source file index - returns None if index is -1
let source_idx_opt = element.index();
let source_idx = source_idx_opt? as usize;
if source_idx >= self.sources.len() {
return None;
}
// Get the source file info
let (file_path, content) = &self.sources[source_idx];
// Convert byte offset to line and column
let offset = element.offset() as usize;
// Check if offset is valid for this source file
if offset >= content.len() {
return None;
}
let (line, column) = self.offset_to_line_column(source_idx, offset)?;
trace!(
file = ?file_path,
line = line,
column = column,
offset = offset,
"Mapped PC to source location"
);
Some(SourceLocation {
file: file_path.clone(),
line,
column,
length: element.length() as usize,
offset,
})
}
/// Finds the instruction counter for a given program counter.
fn find_instruction_counter(&self, pc: usize) -> Option<usize> {
// The IcPcMap maps IC -> PC, we need the reverse
// We find the highest IC that has a PC <= our target PC
let mut best_ic = None;
let mut best_pc = 0;
for (ic, mapped_pc) in self.ic_pc_map.iter() {
let mapped_pc = *mapped_pc as usize;
if mapped_pc <= pc && mapped_pc >= best_pc {
best_pc = mapped_pc;
best_ic = Some(*ic as usize);
}
}
best_ic
}
/// Converts a byte offset to line and column numbers.
///
/// Returned lines and column numbers are 1-indexed.
fn offset_to_line_column(&self, source_idx: usize, offset: usize) -> Option<(usize, usize)> {
let line_offsets = self.line_offsets.get(source_idx)?;
// Find the line containing this offset
let line = line_offsets.binary_search(&offset).unwrap_or_else(|i| i.saturating_sub(1));
// Calculate column within the line
let line_start = if line == 0 { 0 } else { line_offsets[line - 1] + 1 };
let column = offset.saturating_sub(line_start);
// Lines and columns are 1-indexed
Some((line + 1, column + 1))
}
}
/// Represents a location in source code.
#[derive(Debug, Clone)]
pub struct SourceLocation {
pub file: PathBuf,
pub line: usize,
pub column: usize,
pub length: usize,
/// Byte offset in the source file
/// This specifically useful when one source file contains multiple contracts / libraries.
pub offset: usize,
}
/// Computes line offset positions in source content.
fn compute_line_offsets(content: &str) -> Vec<usize> {
let mut offsets = vec![0];
offsets.extend(memchr::memchr_iter(b'\n', content.as_bytes()));
offsets
}
/// Loads sources for a specific ArtifactId.build_id
pub fn load_build_sources(
build_id: &str,
output: &ProjectCompileOutput,
root: &Path,
) -> Option<Vec<(PathBuf, String)>> {
let build_ctx = output.builds().find(|(bid, _)| *bid == build_id).map(|(_, ctx)| ctx)?;
// Determine the size needed for sources vector
// Highest source_id
let max_source_id = build_ctx.source_id_to_path.keys().max().map_or(0, |id| *id) as usize;
// Vec of source path and it's content
let mut sources = vec![(PathBuf::new(), String::new()); max_source_id + 1];
// Populate sources at their correct indices
for (source_id, source_path) in &build_ctx.source_id_to_path {
let idx = *source_id as usize;
let full_path =
if source_path.is_absolute() { source_path.clone() } else { root.join(source_path) };
let mut source_content = foundry_common::fs::read_to_string(&full_path).unwrap_or_default();
// Normalize line endings for windows
if source_content.contains('\r') {
source_content = source_content.replace("\r\n", "\n");
}
// Convert path to relative PathBuf
let path_buf = source_path.strip_prefix(root).unwrap_or(source_path).to_path_buf();
sources[idx] = (path_buf, source_content);
}
Some(sources)
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/backtrace/mod.rs | crates/evm/traces/src/backtrace/mod.rs | //! Solidity stack trace support for test failures.
use crate::{CallTrace, SparsedTraceArena};
use alloy_primitives::{Address, Bytes, map::HashMap};
use foundry_compilers::{
Artifact, ArtifactId, ProjectCompileOutput,
artifacts::{ConfigurableContractArtifact, Libraries, sourcemap::SourceMap},
};
use std::{fmt, path::PathBuf};
use yansi::Paint;
mod source_map;
use source_map::load_build_sources;
pub use source_map::{PcSourceMapper, SourceData};
/// Linked library information for backtrace resolution.
///
/// Contains the path, name, and deployed address of a linked library
/// to enable proper frame resolution in backtraces.
#[derive(Debug, Clone)]
struct LinkedLib {
/// The source file path of the library
path: PathBuf,
/// The name of the library contract
name: String,
/// The deployed address of the library
address: Address,
}
/// Holds a reference to [`ProjectCompileOutput`] to fetch artifacts and sources for backtrace
/// generation.
pub struct BacktraceBuilder<'a> {
/// Linked libraries from configuration
linked_libraries: Vec<LinkedLib>,
/// Reference to project output for on-demand source loading
output: &'a ProjectCompileOutput,
/// Project root
root: PathBuf,
/// Disable source locations
///
/// Source locations will be inaccurately reported if the files have been compiled with via-ir
disable_source_locs: bool,
/// Sources grouped by [`ArtifactId::build_id`] to avoid re-reading files for artifacts from
/// the same build
///
/// The source [`Vec`] is indexed by the compiler source ID, and contains the source path and
/// source content.
build_sources_cache: HashMap<String, Vec<(PathBuf, String)>>,
}
impl<'a> BacktraceBuilder<'a> {
/// Instantiates a backtrace builder from a [`ProjectCompileOutput`].
pub fn new(
output: &'a ProjectCompileOutput,
root: PathBuf,
linked_libraries: Option<Libraries>,
disable_source_locs: bool,
) -> Self {
let linked_libs = linked_libraries
.map(|libs| {
libs.libs
.iter()
.flat_map(|(path, libs_map)| {
libs_map.iter().map(move |(name, addr_str)| (path, name, addr_str))
})
.filter_map(|(path, name, addr_str)| {
addr_str.parse().ok().map(|address| LinkedLib {
path: path.clone(),
name: name.clone(),
address,
})
})
.collect()
})
.unwrap_or_default();
Self {
linked_libraries: linked_libs,
output,
root,
disable_source_locs,
build_sources_cache: HashMap::default(),
}
}
/// Generates a backtrace from a [`SparsedTraceArena`].
pub fn from_traces(&mut self, arena: &SparsedTraceArena) -> Backtrace<'_> {
// Resolve addresses to artifacts using trace labels and linked libraries
let artifacts_by_address = self.resolve_addresses(arena);
for (artifact_id, _) in artifacts_by_address.values() {
let build_id = &artifact_id.build_id;
if !self.build_sources_cache.contains_key(build_id)
&& let Some(sources) = load_build_sources(build_id, self.output, &self.root)
{
self.build_sources_cache.insert(build_id.clone(), sources);
}
}
Backtrace::new(
artifacts_by_address,
&self.build_sources_cache,
self.linked_libraries.clone(),
self.disable_source_locs,
arena,
)
}
/// Resolves contract addresses to [`ArtifactId`] and their [`SourceData`] from trace labels and
/// linked libraries.
fn resolve_addresses(
&self,
arena: &SparsedTraceArena,
) -> HashMap<Address, (ArtifactId, SourceData)> {
let mut artifacts_by_address = HashMap::default();
// Collect all labels from traces first
let label_to_address = arena
.nodes()
.iter()
.filter_map(|node| {
if let Some(decoded) = &node.trace.decoded
&& let Some(label) = &decoded.label
{
return Some((label.as_str(), node.trace.address));
}
None
})
.collect::<HashMap<_, _>>();
// Build linked library target IDs
let linked_lib_targets = self
.linked_libraries
.iter()
.map(|lib| (format!("{}:{}", lib.path.display(), lib.name), lib.address))
.collect::<HashMap<_, _>>();
let get_source = |artifact: &ConfigurableContractArtifact| -> Option<(SourceMap, Bytes)> {
let source_map = artifact.get_source_map_deployed()?.ok()?;
let deployed_bytecode = artifact.get_deployed_bytecode_bytes()?.into_owned();
if deployed_bytecode.is_empty() {
return None;
}
Some((source_map, deployed_bytecode))
};
for (artifact_id, artifact) in self.output.artifact_ids() {
// Match and insert artifacts using trace labels
if let Some(address) = label_to_address.get(artifact_id.name.as_str())
&& let Some((source_map, bytecode)) = get_source(artifact)
{
// Match and insert artifacts using trace labels
artifacts_by_address
.insert(*address, (artifact_id.clone(), SourceData { source_map, bytecode }));
} else if let Some(&lib_address) =
// Match and insert the linked library artifacts
linked_lib_targets.get(&artifact_id.identifier()).or_else(|| {
let id = artifact_id
.clone()
.with_stripped_file_prefixes(&self.root)
.identifier();
linked_lib_targets.get(&id)
})
&& let Some((source_map, bytecode)) = get_source(artifact)
{
// Insert linked libraries
artifacts_by_address
.insert(lib_address, (artifact_id, SourceData { source_map, bytecode }));
}
}
artifacts_by_address
}
}
/// A Solidity stack trace for a test failure.
///
/// Generates a backtrace from a [`SparsedTraceArena`] by leveraging source maps and bytecode.
///
/// It uses the program counter (PC) from the traces to map to a specific source location for the
/// call.
///
/// Each step/call in the backtrace is classified as a BacktraceFrame
#[non_exhaustive]
pub struct Backtrace<'a> {
/// The frames of the backtrace, from innermost (where the revert happened) to outermost.
frames: Vec<BacktraceFrame>,
/// Map from address to PcSourceMapper
pc_mappers: HashMap<Address, PcSourceMapper<'a>>,
/// Linked libraries from configuration
linked_libraries: Vec<LinkedLib>,
/// Disable pinpointing source locations in files
///
/// Should be disabled when via-ir is enabled
disable_source_locs: bool,
}
impl<'a> Backtrace<'a> {
/// Creates a backtrace from collected artifacts and sources.
fn new(
artifacts_by_address: HashMap<Address, (ArtifactId, SourceData)>,
build_sources: &'a HashMap<String, Vec<(PathBuf, String)>>,
linked_libraries: Vec<LinkedLib>,
disable_source_locs: bool,
arena: &SparsedTraceArena,
) -> Self {
let mut pc_mappers = HashMap::default();
// Build PC source mappers for each contract
if !disable_source_locs {
for (addr, (artifact_id, source_data)) in artifacts_by_address {
if let Some(sources) = build_sources.get(&artifact_id.build_id) {
let mapper = PcSourceMapper::new(source_data, sources);
pc_mappers.insert(addr, mapper);
}
}
}
let mut backtrace =
Self { frames: Vec::new(), pc_mappers, linked_libraries, disable_source_locs };
backtrace.extract_frames(arena);
backtrace
}
/// Extracts backtrace frames from a trace arena.
fn extract_frames(&mut self, arena: &SparsedTraceArena) {
let resolved_arena = &arena.arena;
if resolved_arena.nodes().is_empty() {
return;
}
// Find the deepest failed node (where the actual revert happened)
let mut current_idx = None;
let mut max_depth = 0;
for (idx, node) in resolved_arena.nodes().iter().enumerate() {
if !node.trace.success && node.trace.depth >= max_depth {
max_depth = node.trace.depth;
current_idx = Some(idx);
}
}
if current_idx.is_none() {
return;
}
// Build the call stack by walking from the deepest node back to root
while let Some(idx) = current_idx {
let node = &resolved_arena.nodes()[idx];
let trace = &node.trace;
if let Some(frame) = self.create_frame(trace) {
self.frames.push(frame);
}
current_idx = node.parent;
}
}
/// Creates a frame from a call trace.
fn create_frame(&self, trace: &CallTrace) -> Option<BacktraceFrame> {
let contract_address = trace.address;
let mut frame = BacktraceFrame::new(contract_address);
// Try to get source location from PC mapper
if !self.disable_source_locs
&& let Some(source_location) = trace.steps.last().and_then(|last_step| {
self.pc_mappers.get(&contract_address).and_then(|m| m.map_pc(last_step.pc))
})
{
frame = frame
.with_source_location(
source_location.file,
source_location.line,
source_location.column,
)
.with_byte_offset(source_location.offset);
}
if let Some(decoded) = &trace.decoded {
if let Some(label) = &decoded.label {
frame = frame.with_contract_name(label.clone());
} else if let Some(lib) =
self.linked_libraries.iter().find(|l| l.address == contract_address)
{
frame = frame.with_contract_name(lib.name.clone());
}
if let Some(call_data) = &decoded.call_data {
let sig = &call_data.signature;
let func_name =
if let Some(paren_pos) = sig.find('(') { &sig[..paren_pos] } else { sig };
frame = frame.with_function_name(func_name.to_string());
}
}
Some(frame)
}
/// Returns true if the backtrace is empty.
pub fn is_empty(&self) -> bool {
self.frames.is_empty()
}
}
impl fmt::Display for Backtrace<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.frames.is_empty() {
return Ok(());
}
writeln!(f, "{}", Paint::yellow("Backtrace:"))?;
for frame in &self.frames {
write!(f, " ")?;
write!(f, "at ")?;
writeln!(f, "{frame}")?;
}
Ok(())
}
}
/// A single frame in a backtrace.
#[derive(Debug, Clone)]
struct BacktraceFrame {
/// The contract address where this frame is executing.
pub contract_address: Address,
/// The contract name, if known.
pub contract_name: Option<String>,
/// The function name, if known.
pub function_name: Option<String>,
/// The source file path.
pub file: Option<PathBuf>,
/// The line number in the source file.
pub line: Option<usize>,
/// The column number in the source file.
pub column: Option<usize>,
/// The byte offset in the source file.
pub byte_offset: Option<usize>,
}
impl BacktraceFrame {
/// Creates a new backtrace frame.
fn new(contract_address: Address) -> Self {
Self {
contract_address,
contract_name: None,
function_name: None,
file: None,
line: None,
column: None,
byte_offset: None,
}
}
/// Sets the contract name.
fn with_contract_name(mut self, name: String) -> Self {
self.contract_name = Some(name);
self
}
/// Sets the function name.
fn with_function_name(mut self, name: String) -> Self {
self.function_name = Some(name);
self
}
/// Sets the source location.
fn with_source_location(mut self, file: PathBuf, line: usize, column: usize) -> Self {
self.file = Some(file);
self.line = Some(line);
self.column = Some(column);
self
}
/// Sets the byte offset.
fn with_byte_offset(mut self, offset: usize) -> Self {
self.byte_offset = Some(offset);
self
}
}
// Format: <CONTRACT_NAME>.<FUNCTION_NAME> (FILE:LINE:COL)
impl fmt::Display for BacktraceFrame {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut result = String::new();
// No contract name, show address
result.push_str(self.contract_name.as_ref().unwrap_or(&self.contract_address.to_string()));
// Add function name if available
result.push_str(&self.function_name.as_ref().map_or(String::new(), |f| format!(".{f}")));
if let Some(file) = &self.file {
result.push_str(" (");
result.push_str(&file.display().to_string());
}
if let Some(line) = self.line {
result.push(':');
result.push_str(&line.to_string());
result.push(':');
result.push_str(&self.column.as_ref().map_or("0".to_string(), |c| c.to_string()));
}
// Add location in parentheses if available
if self.file.is_some() || self.line.is_some() {
result.push(')');
}
write!(f, "{result}")
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/debug/mod.rs | crates/evm/traces/src/debug/mod.rs | mod sources;
use crate::CallTraceNode;
use alloy_dyn_abi::{
DynSolType, DynSolValue, Specifier,
parser::{Parameters, Storage},
};
use alloy_primitives::U256;
use foundry_common::fmt::format_token;
use foundry_compilers::artifacts::sourcemap::{Jump, SourceElement};
use revm::bytecode::opcode::OpCode;
use revm_inspectors::tracing::types::{CallTraceStep, DecodedInternalCall, DecodedTraceStep};
pub use sources::{ArtifactData, ContractSources, SourceData};
#[derive(Clone, Debug)]
pub struct DebugTraceIdentifier {
/// Source map of contract sources
contracts_sources: ContractSources,
}
impl DebugTraceIdentifier {
pub fn new(contracts_sources: ContractSources) -> Self {
Self { contracts_sources }
}
/// Identifies internal function invocations in a given [CallTraceNode].
///
/// Accepts the node itself and identified name of the contract which node corresponds to.
pub fn identify_node_steps(&self, node: &mut CallTraceNode, contract_name: &str) {
DebugStepsWalker::new(node, &self.contracts_sources, contract_name).walk();
}
}
/// Walks through the [CallTraceStep]s attempting to match JUMPs to internal functions.
///
/// This is done by looking up jump kinds in the source maps. The structure of internal function
/// call always looks like this:
/// - JUMP
/// - JUMPDEST
/// ... function steps ...
/// - JUMP
/// - JUMPDEST
///
/// The assumption we rely on is that first JUMP into function will be marked as [Jump::In] in
/// source map, and second JUMP out of the function will be marked as [Jump::Out].
///
/// Also, we rely on JUMPDEST after first JUMP pointing to the source location of the body of
/// function which was entered. We pass this source part to [parse_function_from_loc] to extract the
/// function name.
///
/// When we find a [Jump::In] and identify the function name, we push it to the stack.
///
/// When we find a [Jump::Out] we try to find a matching [Jump::In] in the stack. A match is found
/// when source location of the JUMP-in matches the source location of final JUMPDEST (this would be
/// the location of the function invocation), or when source location of first JUMODEST matches the
/// source location of the JUMP-out (this would be the location of function body).
///
/// When a match is found, all items which were pushed after the matched function are removed. There
/// is a lot of such items due to source maps getting malformed during optimization.
struct DebugStepsWalker<'a> {
node: &'a mut CallTraceNode,
current_step: usize,
stack: Vec<(String, usize)>,
sources: &'a ContractSources,
contract_name: &'a str,
}
impl<'a> DebugStepsWalker<'a> {
pub fn new(
node: &'a mut CallTraceNode,
sources: &'a ContractSources,
contract_name: &'a str,
) -> Self {
Self { node, current_step: 0, stack: Vec::new(), sources, contract_name }
}
fn current_step(&self) -> &CallTraceStep {
&self.node.trace.steps[self.current_step]
}
fn src_map(&self, step: usize) -> Option<(SourceElement, &SourceData)> {
self.sources.find_source_mapping(
self.contract_name,
self.node.trace.steps[step].pc as u32,
self.node.trace.kind.is_any_create(),
)
}
fn prev_src_map(&self) -> Option<(SourceElement, &SourceData)> {
if self.current_step == 0 {
return None;
}
self.src_map(self.current_step - 1)
}
fn current_src_map(&self) -> Option<(SourceElement, &SourceData)> {
self.src_map(self.current_step)
}
fn is_same_loc(&self, step: usize, other: usize) -> bool {
let Some((loc, _)) = self.src_map(step) else {
return false;
};
let Some((other_loc, _)) = self.src_map(other) else {
return false;
};
loc.offset() == other_loc.offset()
&& loc.length() == other_loc.length()
&& loc.index() == other_loc.index()
}
/// Invoked when current step is a JUMPDEST preceded by a JUMP marked as [Jump::In].
fn jump_in(&mut self) {
// This usually means that this is a jump into the external function which is an
// entrypoint for the current frame. We don't want to include this to avoid
// duplicating traces.
if self.is_same_loc(self.current_step, self.current_step - 1) {
return;
}
let Some((source_element, source)) = self.current_src_map() else {
return;
};
if let Some(name) = parse_function_from_loc(source, &source_element) {
self.stack.push((name, self.current_step - 1));
}
}
/// Invoked when current step is a JUMPDEST preceded by a JUMP marked as [Jump::Out].
fn jump_out(&mut self) {
let Some((i, _)) = self.stack.iter().enumerate().rfind(|(_, (_, step_idx))| {
self.is_same_loc(*step_idx, self.current_step)
|| self.is_same_loc(step_idx + 1, self.current_step - 1)
}) else {
return;
};
// We've found a match, remove all records between start and end, those
// are considered invalid.
let (func_name, start_idx) = self.stack.split_off(i).swap_remove(0);
// Try to decode function inputs and outputs from the stack and memory.
let (inputs, outputs) = self
.src_map(start_idx + 1)
.map(|(source_element, source)| {
let start = source_element.offset() as usize;
let end = start + source_element.length() as usize;
let fn_definition = source.source[start..end].replace('\n', "");
let (inputs, outputs) = parse_types(&fn_definition);
(
inputs.and_then(|t| {
try_decode_args_from_step(&t, &self.node.trace.steps[start_idx + 1])
}),
outputs.and_then(|t| try_decode_args_from_step(&t, self.current_step())),
)
})
.unwrap_or_default();
self.node.trace.steps[start_idx].decoded = Some(Box::new(DecodedTraceStep::InternalCall(
DecodedInternalCall { func_name, args: inputs, return_data: outputs },
self.current_step,
)));
}
fn process(&mut self) {
// We are only interested in JUMPs.
if self.current_step().op != OpCode::JUMP && self.current_step().op != OpCode::JUMPDEST {
return;
}
let Some((prev_source_element, _)) = self.prev_src_map() else {
return;
};
match prev_source_element.jump() {
Jump::In => self.jump_in(),
Jump::Out => self.jump_out(),
_ => {}
};
}
fn step(&mut self) {
self.process();
self.current_step += 1;
}
pub fn walk(mut self) {
while self.current_step < self.node.trace.steps.len() {
self.step();
}
}
}
/// Tries to parse the function name from the source code and detect the contract name which
/// contains the given function.
///
/// Returns string in the format `Contract::function`.
fn parse_function_from_loc(source: &SourceData, loc: &SourceElement) -> Option<String> {
let start = loc.offset() as usize;
let end = start + loc.length() as usize;
let src_len = source.source.len();
// Handle special case of preprocessed test sources.
if start > src_len || end > src_len {
return None;
}
let source_part = &source.source[start..end];
if !source_part.starts_with("function") {
return None;
}
let function_name = source_part.split_once("function")?.1.split('(').next()?.trim();
let contract_name = source.find_contract_name(start, end)?;
Some(format!("{contract_name}::{function_name}"))
}
/// Parses function input and output types into [Parameters].
fn parse_types(source: &str) -> (Option<Parameters<'_>>, Option<Parameters<'_>>) {
let inputs = source.find('(').and_then(|params_start| {
let params_end = params_start + source[params_start..].find(')')?;
Parameters::parse(&source[params_start..params_end + 1]).ok()
});
let outputs = source.find("returns").and_then(|returns_start| {
let return_params_start = returns_start + source[returns_start..].find('(')?;
let return_params_end = return_params_start + source[return_params_start..].find(')')?;
Parameters::parse(&source[return_params_start..return_params_end + 1]).ok()
});
(inputs, outputs)
}
/// Given [Parameters] and [CallTraceStep], tries to decode parameters by using stack and memory.
fn try_decode_args_from_step(args: &Parameters<'_>, step: &CallTraceStep) -> Option<Vec<String>> {
let params = &args.params;
if params.is_empty() {
return Some(vec![]);
}
let types = params.iter().map(|p| p.resolve().ok().map(|t| (t, p.storage))).collect::<Vec<_>>();
let stack = step.stack.as_ref()?;
if stack.len() < types.len() {
return None;
}
let inputs = &stack[stack.len() - types.len()..];
let decoded = inputs
.iter()
.zip(types.iter())
.map(|(input, type_and_storage)| {
type_and_storage
.as_ref()
.and_then(|(type_, storage)| {
match (type_, storage) {
// HACK: alloy parser treats user-defined types as uint8: https://github.com/alloy-rs/core/pull/386
//
// filter out `uint8` params which are marked as storage or memory as this
// is not possible in Solidity and means that type is user-defined
(DynSolType::Uint(8), Some(Storage::Memory | Storage::Storage)) => None,
(_, Some(Storage::Memory)) => decode_from_memory(
type_,
step.memory.as_ref()?.as_bytes(),
input.try_into().ok()?,
),
// Read other types from stack
_ => type_.abi_decode(&input.to_be_bytes::<32>()).ok(),
}
})
.as_ref()
.map(format_token)
.unwrap_or_else(|| "<unknown>".to_string())
})
.collect();
Some(decoded)
}
/// Decodes given [DynSolType] from memory.
fn decode_from_memory(ty: &DynSolType, memory: &[u8], location: usize) -> Option<DynSolValue> {
let first_word = memory.get(location..location + 32)?;
match ty {
// For `string` and `bytes` layout is a word with length followed by the data
DynSolType::String | DynSolType::Bytes => {
let length: usize = U256::from_be_slice(first_word).try_into().ok()?;
let data = memory.get(location + 32..location + 32 + length)?;
match ty {
DynSolType::Bytes => Some(DynSolValue::Bytes(data.to_vec())),
DynSolType::String => {
Some(DynSolValue::String(String::from_utf8_lossy(data).to_string()))
}
_ => unreachable!(),
}
}
// Dynamic arrays are encoded as a word with length followed by words with elements
// Fixed arrays are encoded as words with elements
DynSolType::Array(inner) | DynSolType::FixedArray(inner, _) => {
let (length, start) = match ty {
DynSolType::FixedArray(_, length) => (*length, location),
DynSolType::Array(_) => {
(U256::from_be_slice(first_word).try_into().ok()?, location + 32)
}
_ => unreachable!(),
};
let mut decoded = Vec::with_capacity(length);
for i in 0..length {
let offset = start + i * 32;
let location = match inner.as_ref() {
// Arrays of variable length types are arrays of pointers to the values
DynSolType::String | DynSolType::Bytes | DynSolType::Array(_) => {
U256::from_be_slice(memory.get(offset..offset + 32)?).try_into().ok()?
}
_ => offset,
};
decoded.push(decode_from_memory(inner, memory, location)?);
}
Some(DynSolValue::Array(decoded))
}
_ => ty.abi_decode(first_word).ok(),
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
foundry-rs/foundry | https://github.com/foundry-rs/foundry/blob/271c34d2550474a8ded7fbeedff398b920a96689/crates/evm/traces/src/debug/sources.rs | crates/evm/traces/src/debug/sources.rs | use eyre::{Context, Result};
use foundry_common::{compact_to_contract, strip_bytecode_placeholders};
use foundry_compilers::{
Artifact, ProjectCompileOutput,
artifacts::{
Bytecode, ContractBytecodeSome, Libraries, Source,
sourcemap::{SourceElement, SourceMap},
},
multi::MultiCompilerLanguage,
};
use foundry_evm_core::ic::PcIcMap;
use foundry_linking::Linker;
use rayon::prelude::*;
use std::{
collections::{BTreeMap, HashMap, HashSet},
fmt::Write,
ops::Range,
path::{Path, PathBuf},
sync::Arc,
};
#[derive(Clone, Debug)]
pub struct SourceData {
pub source: Arc<String>,
pub language: MultiCompilerLanguage,
pub path: PathBuf,
/// Maps contract name to (start, end) of the contract definition in the source code.
/// This is useful for determining which contract contains given function definition.
pub contract_definitions: Vec<(String, Range<usize>)>,
}
impl SourceData {
pub fn new(
output: &ProjectCompileOutput,
source: Arc<String>,
language: MultiCompilerLanguage,
path: PathBuf,
root: &Path,
) -> Self {
let mut contract_definitions = Vec::new();
match language {
MultiCompilerLanguage::Vyper(_) => {
// Vyper contracts have the same name as the file name.
if let Some(name) = path.file_stem().map(|s| s.to_string_lossy().to_string()) {
contract_definitions.push((name, 0..source.len()));
}
}
MultiCompilerLanguage::Solc(_) => {
let r = output.parser().solc().compiler().enter(|compiler| -> Option<()> {
let (_, source) = compiler.gcx().get_ast_source(root.join(&path))?;
for item in source.ast.as_ref()?.items.iter() {
if let solar::ast::ItemKind::Contract(contract) = &item.kind {
contract_definitions.push((
contract.name.to_string(),
compiler.sess().source_map().span_to_range(item.span).unwrap(),
));
}
}
Some(())
});
if r.is_none() {
warn!("failed to parse contract definitions for {}", path.display());
}
}
}
Self { source, language, path, contract_definitions }
}
/// Finds name of contract that contains given loc.
pub fn find_contract_name(&self, start: usize, end: usize) -> Option<&str> {
self.contract_definitions
.iter()
.find(|(_, r)| start >= r.start && end <= r.end)
.map(|(name, _)| name.as_str())
}
}
#[derive(Clone, Debug)]
pub struct ArtifactData {
pub source_map: Option<SourceMap>,
pub source_map_runtime: Option<SourceMap>,
pub pc_ic_map: Option<PcIcMap>,
pub pc_ic_map_runtime: Option<PcIcMap>,
pub build_id: String,
pub file_id: u32,
}
impl ArtifactData {
fn new(bytecode: ContractBytecodeSome, build_id: String, file_id: u32) -> Result<Self> {
let parse = |b: &Bytecode, name: &str| {
// Only parse source map if it's not empty.
let source_map = if b.source_map.as_ref().is_none_or(|s| s.is_empty()) {
Ok(None)
} else {
b.source_map().transpose().wrap_err_with(|| {
format!("failed to parse {name} source map of file {file_id} in {build_id}")
})
};
// Only parse bytecode if it's not empty, stripping placeholders if necessary.
let pc_ic_map = if let Some(bytes) = strip_bytecode_placeholders(&b.object) {
(!bytes.is_empty()).then(|| PcIcMap::new(bytes.as_ref()))
} else {
None
};
source_map.map(|source_map| (source_map, pc_ic_map))
};
let (source_map, pc_ic_map) = parse(&bytecode.bytecode, "creation")?;
let (source_map_runtime, pc_ic_map_runtime) = bytecode
.deployed_bytecode
.bytecode
.map(|b| parse(&b, "runtime"))
.unwrap_or_else(|| Ok((None, None)))?;
Ok(Self { source_map, source_map_runtime, pc_ic_map, pc_ic_map_runtime, build_id, file_id })
}
}
/// Container with artifacts data useful for identifying individual execution steps.
#[derive(Clone, Debug, Default)]
pub struct ContractSources {
/// Map over build_id -> file_id -> (source code, language)
pub sources_by_id: HashMap<String, HashMap<u32, Arc<SourceData>>>,
/// Map over contract name -> Vec<(bytecode, build_id, file_id)>
pub artifacts_by_name: HashMap<String, Vec<ArtifactData>>,
}
impl ContractSources {
/// Collects the contract sources and artifacts from the project compile output.
pub fn from_project_output(
output: &ProjectCompileOutput,
root: &Path,
libraries: Option<&Libraries>,
) -> Result<Self> {
let mut sources = Self::default();
sources.insert(output, root, libraries)?;
Ok(sources)
}
pub fn insert(
&mut self,
output: &ProjectCompileOutput,
root: &Path,
libraries: Option<&Libraries>,
) -> Result<()> {
let link_data = libraries.map(|libraries| {
let linker = Linker::new(root, output.artifact_ids().collect());
(linker, libraries)
});
let artifacts: Vec<_> = output
.artifact_ids()
.collect::<Vec<_>>()
.par_iter()
.map(|(id, artifact)| {
let mut new_artifact = None;
if let Some(file_id) = artifact.id {
let artifact = if let Some((linker, libraries)) = link_data.as_ref() {
linker.link(id, libraries)?
} else {
artifact.get_contract_bytecode()
};
let bytecode = compact_to_contract(artifact.into_contract_bytecode())?;
new_artifact = Some((
id.name.clone(),
ArtifactData::new(bytecode, id.build_id.clone(), file_id)?,
));
} else {
warn!(id = id.identifier(), "source not found");
};
Ok(new_artifact)
})
.collect::<Result<Vec<_>>>()?;
for (name, artifact) in artifacts.into_iter().flatten() {
self.artifacts_by_name.entry(name).or_default().push(artifact);
}
// Not all source files produce artifacts, so we are populating sources by using build
// infos.
let mut files: BTreeMap<PathBuf, Arc<SourceData>> = BTreeMap::new();
let mut removed_files = HashSet::new();
for (build_id, build) in output.builds() {
for (source_id, path) in &build.source_id_to_path {
if !path.exists() {
removed_files.insert(path);
continue;
}
let source_data = match files.entry(path.clone()) {
std::collections::btree_map::Entry::Vacant(entry) => {
let source = Source::read(path).wrap_err_with(|| {
format!("failed to read artifact source file for `{}`", path.display())
})?;
let stripped = path.strip_prefix(root).unwrap_or(path).to_path_buf();
let source_data = Arc::new(SourceData::new(
output,
source.content.clone(),
build.language,
stripped,
root,
));
entry.insert(source_data.clone());
source_data
}
std::collections::btree_map::Entry::Occupied(entry) => entry.get().clone(),
};
self.sources_by_id
.entry(build_id.clone())
.or_default()
.insert(*source_id, source_data);
}
}
if !removed_files.is_empty() {
let mut warning = "Detected artifacts built from source files that no longer exist. \
Run `forge clean` to make sure builds are in sync with project files."
.to_string();
for file in removed_files {
write!(warning, "\n - {}", file.display())?;
}
let _ = sh_warn!("{}", warning);
}
Ok(())
}
/// Merges given contract sources.
pub fn merge(&mut self, sources: Self) {
self.sources_by_id.extend(sources.sources_by_id);
for (name, artifacts) in sources.artifacts_by_name {
self.artifacts_by_name.entry(name).or_default().extend(artifacts);
}
}
/// Returns all sources for a contract by name.
pub fn get_sources(
&self,
name: &str,
) -> Option<impl Iterator<Item = (&ArtifactData, &SourceData)>> {
self.artifacts_by_name.get(name).map(|artifacts| {
artifacts.iter().filter_map(|artifact| {
let source =
self.sources_by_id.get(artifact.build_id.as_str())?.get(&artifact.file_id)?;
Some((artifact, source.as_ref()))
})
})
}
/// Returns all (name, bytecode, source) sets.
pub fn entries(&self) -> impl Iterator<Item = (&str, &ArtifactData, &SourceData)> {
self.artifacts_by_name.iter().flat_map(|(name, artifacts)| {
artifacts.iter().filter_map(|artifact| {
let source =
self.sources_by_id.get(artifact.build_id.as_str())?.get(&artifact.file_id)?;
Some((name.as_str(), artifact, source.as_ref()))
})
})
}
pub fn find_source_mapping(
&self,
contract_name: &str,
pc: u32,
init_code: bool,
) -> Option<(SourceElement, &SourceData)> {
self.get_sources(contract_name)?.find_map(|(artifact, source)| {
let source_map = if init_code {
artifact.source_map.as_ref()
} else {
artifact.source_map_runtime.as_ref()
}?;
// Solc indexes source maps by instruction counter, but Vyper indexes by program
// counter.
let source_element = if matches!(source.language, MultiCompilerLanguage::Solc(_)) {
let pc_ic_map = if init_code {
artifact.pc_ic_map.as_ref()
} else {
artifact.pc_ic_map_runtime.as_ref()
}?;
let ic = pc_ic_map.get(pc)?;
source_map.get(ic as usize)
} else {
source_map.get(pc as usize)
}?;
// if the source element has an index, find the sourcemap for that index
source_element
.index()
// if index matches current file_id, return current source code
.and_then(|index| {
(index == artifact.file_id).then(|| (source_element.clone(), source))
})
.or_else(|| {
// otherwise find the source code for the element's index
self.sources_by_id
.get(&artifact.build_id)?
.get(&source_element.index()?)
.map(|source| (source_element.clone(), source.as_ref()))
})
})
}
}
| rust | Apache-2.0 | 271c34d2550474a8ded7fbeedff398b920a96689 | 2026-01-04T15:43:23.630446Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.