repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/direct/test_via_direct.py | src/pytest_plugins/consume/direct/test_via_direct.py | """
Executes a JSON test fixture directly against a client using a dedicated client
interface similar to geth's EVM 'blocktest' command.
"""
from pathlib import Path
from ethereum_test_fixtures import FixtureConsumer
from ethereum_test_fixtures.consume import TestCaseIndexFile, TestCaseStream
def test_fixture(
test_case: TestCaseIndexFile | TestCaseStream,
fixture_consumer: FixtureConsumer,
fixture_path: Path,
test_dump_dir: Path | None,
) -> None:
"""
Generic test function used to call the fixture consumer with a given
fixture file path and a fixture name (for a single test run).
"""
fixture_consumer.consume_fixture(
test_case.format,
fixture_path,
fixture_name=test_case.id,
debug_output_path=test_dump_dir,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/hive_engine_test/__init__.py | src/pytest_plugins/consume/hive_engine_test/__init__.py | """Hive engine test consumer plugin."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/spec_version_checker/spec_version_checker.py | src/pytest_plugins/spec_version_checker/spec_version_checker.py | """
A pytest plugin that checks that the spec version specified in test/filler
modules matches that of https://github.com/ethereum/EIPs.
"""
import os
import re
import textwrap
from types import ModuleType
from typing import Any, List, Optional, Set
import pytest
from _pytest.nodes import Item, Node
from _pytest.python import Module
from ethereum_test_tools import ReferenceSpec, ReferenceSpecTypes
GITHUB_TOKEN_HELP = textwrap.dedent(
"Either set the GITHUB_TOKEN environment variable or specify one via --github-token. "
"The Github CLI can be used: `--github-token $(gh auth token)` (https://cli.github.com/) "
"or a PAT can be generated at https://github.com/settings/personal-access-tokens/new."
)
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add Github token option to pytest command line options."""
group = parser.getgroup(
"spec_version_checker", "Arguments defining the EIP spec version checker"
)
group.addoption(
"--github-token",
action="store",
dest="github_token",
default=None,
help=(
"Specify a Github API personal access token (PAT) to avoid rate limiting. "
f"{GITHUB_TOKEN_HELP}"
),
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""
Register the plugin's custom markers and process command-line options.
Custom marker registration:
https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
"""
config.addinivalue_line(
"markers",
"eip_version_check: a test that tests the reference spec defined in an EIP test module.",
)
github_token = config.getoption("github_token") or os.environ.get("GITHUB_TOKEN")
if not github_token:
pytest.exit(
"A Github personal access token (PAT) is required but has not been provided. "
f"{GITHUB_TOKEN_HELP}"
)
config.github_token = github_token # type: ignore[attr-defined]
def get_ref_spec_from_module(
module: ModuleType, github_token: Optional[str] = None
) -> None | ReferenceSpec:
"""
Return the reference spec object defined in a module.
Args:
module: The module to extract reference spec from
github_token: Optional GitHub token for API authentication
Raises:
Exception: If the module path contains "eip" and the module does
not define a reference spec.
Returns:
spec_obj: Return None if the module path does not contain "eip",
i.e., the module is not required to define a reference spec, otherwise,
return the ReferenceSpec object as defined by the module.
"""
if not is_test_for_an_eip(str(module.__file__)):
return None
module_dict = module.__dict__
parseable_ref_specs = [
ref_spec_type
for ref_spec_type in ReferenceSpecTypes
if ref_spec_type.parseable_from_module(module_dict)
]
if len(parseable_ref_specs) > 0:
module_dict = module.__dict__
try:
spec_obj = parseable_ref_specs[0].parse_from_module(
module_dict, github_token=github_token
)
except Exception as e:
raise Exception(f"Error in spec_version_checker: {e} (this test is generated).") from e
else:
raise Exception("Test doesn't define REFERENCE_SPEC_GIT_PATH and REFERENCE_SPEC_VERSION")
return spec_obj
def is_test_for_an_eip(input_string: str) -> bool:
"""Return True if `input_string` contains an EIP number, i.e., eipNNNN."""
pattern = re.compile(r".*eip\d{1,4}", re.IGNORECASE)
if pattern.match(input_string):
return True
return False
def test_eip_spec_version(module: ModuleType, github_token: Optional[str] = None) -> None:
"""
Test that the ReferenceSpec object as defined in the test module is not
outdated when compared to the remote hash from ethereum/EIPs.
Args:
module: Module to test
github_token: Optional GitHub token for API authentication
"""
ref_spec = get_ref_spec_from_module(module, github_token=github_token)
assert ref_spec, "No reference spec object defined"
message = (
"The version of the spec referenced in "
f"{module} does not match that from ethereum/EIPs, "
f"tests might be outdated: Spec: {ref_spec.name()}. "
f"Referenced version: {ref_spec.known_version()}. "
f"Latest version: {ref_spec.latest_version()}. The "
f"version was retrieved from {ref_spec.api_url()}."
)
try:
is_up_to_date = not ref_spec.is_outdated()
except Exception as e:
raise Exception(
f"Error in spec_version_checker: {e} (this test is generated). "
f"Reference spec URL: {ref_spec.api_url()}."
) from e
assert is_up_to_date, message
class EIPSpecTestItem(Item):
"""Custom pytest test item to test EIP spec versions."""
module: ModuleType
github_token: Optional[str]
def __init__(self, name: str, parent: Node, **kwargs: Any):
"""
Initialize the test item.
Args:
name: Name of the test
parent: Parent node
**kwargs: Additional keyword arguments
"""
super().__init__(name, parent, **kwargs)
self.module = None # type: ignore
self.github_token = None
@classmethod
def from_parent(cls, parent: Node, **kw: Any) -> "EIPSpecTestItem":
"""
Public constructor to define new tests.
https://docs.pytest.org/en/latest/reference/reference.html#pytest.nodes.Node.from_parent.
Args:
parent: The parent Node
kw: Additional keyword arguments (module, github_token)
"""
module = kw.pop("module", None)
github_token = kw.pop("github_token", None)
kw["name"] = "test_eip_spec_version"
item = super(EIPSpecTestItem, cls).from_parent(parent, **kw)
item.module = module
item.github_token = github_token
return item
def runtest(self) -> None:
"""Define the test to execute for this item."""
test_eip_spec_version(self.module, github_token=self.github_token)
def reportinfo(self) -> tuple[str, int, str]:
"""
Get location information for this test item to use test reports.
Returns:
A tuple of (path, line_number, description)
"""
return "spec_version_checker", 0, f"{self.name}"
def pytest_collection_modifyitems(config: pytest.Config, items: List[Item]) -> None:
"""
Insert a new test EIPSpecTestItem for every test module with 'eip' in its
path.
"""
github_token = config.github_token if hasattr(config, "github_token") else None
modules: Set[Module] = {item.parent for item in items if isinstance(item.parent, Module)}
new_test_eip_spec_version_items = [
EIPSpecTestItem.from_parent(parent=module, module=module.obj, github_token=github_token)
for module in sorted(modules, key=lambda module: module.path)
if is_test_for_an_eip(str(module.path))
]
for item in new_test_eip_spec_version_items:
item.add_marker("eip_version_check", append=True)
items.extend(new_test_eip_spec_version_items)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/spec_version_checker/__init__.py | src/pytest_plugins/spec_version_checker/__init__.py | """
A pytest plugin that verifies the tested version of an EIP specification
against the latest version from the
[ethereum/EIPs](https://github.com/ethereum/EIPs) Github repository.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/help/__init__.py | src/pytest_plugins/help/__init__.py | """
Pytest plugin that prints help defined in other execution-spec-tests plugins.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/help/help.py | src/pytest_plugins/help/help.py | """
A small pytest plugin that shows the a concise help string that only contains
the options defined by the plugins defined in execution-spec-tests.
"""
import argparse
from pathlib import Path
import pytest
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest for specific help commands."""
help_group = parser.getgroup("help_options", "Help options for different commands")
help_group.addoption(
"--check-eip-versions-help",
action="store_true",
dest="show_check_eip_versions_help",
default=False,
help="Show help options only for the check_eip_versions command and exit.",
)
help_group.addoption(
"--fill-help",
action="store_true",
dest="show_fill_help",
default=False,
help="Show help options only for the fill command and exit.",
)
help_group.addoption(
"--consume-help",
action="store_true",
dest="show_consume_help",
default=False,
help="Show help options specific to the consume command and exit.",
)
help_group.addoption(
"--execute-remote-help",
action="store_true",
dest="show_execute_help",
default=False,
help="Show help options specific to the execute's command remote and exit.",
)
help_group.addoption(
"--execute-hive-help",
action="store_true",
dest="show_execute_hive_help",
default=False,
help="Show help options specific to the execute's command hive and exit.",
)
help_group.addoption(
"--execute-recover-help",
action="store_true",
dest="show_execute_recover_help",
default=False,
help="Show help options specific to the execute's command recover and exit.",
)
help_group.addoption(
"--execute-eth-config-help",
action="store_true",
dest="show_execute_eth_config_help",
default=False,
help="Show help options specific to the execute's command eth_config and exit.",
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""
Handle specific help flags by displaying the corresponding help message.
"""
if config.getoption("show_check_eip_versions_help"):
show_specific_help(
config,
"pytest-check-eip-versions.ini",
[
"spec_version_checker",
"EIP spec version",
],
)
elif config.getoption("show_fill_help"):
show_specific_help(
config,
"pytest-fill.ini",
[
"evm",
"solc",
"fork range",
"filler location",
"defining debug",
"pre-allocation behavior during test filling",
"ported",
"witness",
],
)
elif config.getoption("show_consume_help"):
show_specific_help(
config,
"pytest-consume.ini",
[
"consuming",
],
)
elif config.getoption("show_execute_help"):
show_specific_help(
config,
"pytest-execute.ini",
[
"execute",
"remote RPC configuration",
"pre-allocation behavior during test execution",
"sender key fixtures",
"remote seed sender",
],
)
elif config.getoption("show_execute_hive_help"):
show_specific_help(
config,
"pytest-execute-hive.ini",
[
"execute",
"hive RPC client",
"pre-allocation behavior during test execution",
"sender key fixtures",
"remote seed sender",
],
)
elif config.getoption("show_execute_recover_help"):
show_specific_help(
config,
"pytest-execute-recover.ini",
[
"fund recovery",
"remote RPC configuration",
"remote seed sender",
],
)
elif config.getoption("show_execute_eth_config_help"):
show_specific_help(
config,
"pytest-execute-eth-config.ini",
[
"eth_config",
],
)
def show_specific_help(config: pytest.Config, expected_ini: str, substrings: list[str]) -> None:
"""
Print help options filtered by specific substrings from the given
configuration.
"""
pytest_ini = Path(config.inifile) # type: ignore
if pytest_ini.name != expected_ini:
raise ValueError(
f"Unexpected {expected_ini}!={pytest_ini.name} file option generating help."
)
test_parser = argparse.ArgumentParser()
for group in config._parser.optparser._action_groups:
title: str | None = group.title
if title and any(substring in title for substring in substrings):
new_group = test_parser.add_argument_group(group.title, group.description)
for action in group._group_actions:
kwargs = {
"default": action.default,
"help": action.help,
"required": action.required,
}
if isinstance(action, argparse._StoreTrueAction):
kwargs["action"] = "store_true"
else:
kwargs["type"] = action.type
if action.nargs:
kwargs["nargs"] = action.nargs
new_group.add_argument(*action.option_strings, **kwargs)
print(test_parser.format_help())
pytest.exit("After displaying help.", returncode=pytest.ExitCode.OK)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/help/tests/test_help.py | src/pytest_plugins/help/tests/test_help.py | """Test the help plugin."""
from typing import Any
import pytest
FILL_TEST_ARGS = (
"--evm-bin",
"--traces",
"--filler-path",
"--output",
"--forks",
"--fork",
"--from",
"--until",
"--help",
)
@pytest.mark.parametrize("help_flag", ["--fill-help"])
def test_local_arguments_present_in_fill_help(pytester: Any, help_flag: str) -> None:
"""
Test that locally defined command-line flags appear in the help if our
custom help flag is used.
"""
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest("-c", "pytest-fill.ini", help_flag)
for test_arg in FILL_TEST_ARGS:
assert test_arg in "\n".join(result.stdout.lines)
CONSUME_TEST_ARGS = (
"--input",
"--no-html",
"--help",
)
@pytest.mark.parametrize(
"command, help_flag",
[
("direct", "--consume-help"),
("rlp", "--consume-help"),
("engine", "--consume-help"),
],
)
def test_local_arguments_present_in_base_consume_help(
pytester: Any, help_flag: str, command: str
) -> None:
"""
Test that locally defined command-line flags appear in the help for consume
subcommands.
"""
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-consume.ini")
result = pytester.runpytest("-c", "pytest-consume.ini", command, help_flag)
for test_arg in CONSUME_TEST_ARGS:
assert test_arg in "\n".join(result.stdout.lines)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/pytest_hive/hive_info.py | src/pytest_plugins/pytest_hive/hive_info.py | """Hive instance information structures."""
from typing import Any, Dict, List, Optional
import yaml
from pydantic import BaseModel, Field, RootModel
from typing_extensions import Self
from ethereum_test_base_types import CamelModel
class YAMLModel(BaseModel):
"""A helper class for YAML serialization of pydantic models."""
def yaml(self, **kwargs: Any) -> str:
"""Return the YAML representation of the model."""
return yaml.dump(self.model_dump(), **kwargs)
@classmethod
def parse_yaml(cls, yaml_string: str) -> Self:
"""Parse a YAML string into a model instance."""
data = yaml.safe_load(yaml_string)
return cls(**data)
class ClientConfig(YAMLModel):
"""
Client configuration for YAML serialization.
Represents a single client entry in the clients.yaml file.
"""
client: str
nametag: Optional[str] = None
dockerfile: Optional[str] = None
build_args: Optional[Dict[str, str]] = Field(default_factory=lambda: {})
class ClientFile(RootModel, YAMLModel):
"""
Represents the entire clients.yaml file structure.
The clients.yaml file is a list of client configurations.
"""
root: List[ClientConfig]
class HiveInfo(CamelModel):
"""Hive instance information."""
command: List[str]
client_file: ClientFile = Field(default_factory=lambda: ClientFile(root=[]))
commit: str
date: str
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/pytest_hive/pytest_hive.py | src/pytest_plugins/pytest_hive/pytest_hive.py | """
A pytest plugin providing common functionality for Hive simulators.
Simulators using this plugin must define two pytest fixtures:
1. `test_suite_name`: The name of the test suite.
2. `test_suite_description`: The description of the test suite.
These fixtures are used when creating the hive test suite.
Log Capture Architecture:
-------------------------
This module implements a log capture approach that ensures all logs,
including those generated during fixture teardown, are properly
captured and included in the test results.
The key insight is that we need to ensure that test finalization happens
*before* the test suite is finalized, but *after* all fixtures have been torn
down so we can capture their logs. This is accomplished through the fixture
teardown mechanism in pytest:
1. Since the `hive_test` fixture depends on the `test_suite` fixture, pytest
guarantees that the teardown of `hive_test` runs before the teardown of
`test_suite`
2. All logs are processed and the test is finalized in the
teardown phase of the `hive_test` fixture using the pytest test report data
3. This sequencing ensures that all logs are captured and the test is properly
finalized before its parent test suite is finalized
This approach relies on the pytest fixture dependency graph and teardown
ordering to ensure proper sequencing, which is more reliable than using hooks
which might run in an unpredictable order relative to fixture teardown.
"""
import json
import os
import warnings
from dataclasses import asdict
from pathlib import Path
from typing import Any, Generator, List
import pytest
from filelock import FileLock
from hive.client import ClientRole
from hive.simulation import Simulation
from hive.testing import HiveTest, HiveTestResult, HiveTestSuite
from ..custom_logging import get_logger
from .hive_info import ClientFile, HiveInfo
logger = get_logger(__name__)
def pytest_configure(config: pytest.Config) -> None: # noqa: D103
hive_simulator_url = config.getoption("hive_simulator")
if hive_simulator_url is None:
pytest.exit(
"The HIVE_SIMULATOR environment variable is not set.\n\n"
"If running locally, start hive in --dev mode, for example:\n"
"./hive --dev --client go-ethereum\n\n"
"and set the HIVE_SIMULATOR to the reported URL. For example, in bash:\n"
"export HIVE_SIMULATOR=http://127.0.0.1:3000\n"
"or in fish:\n"
"set -x HIVE_SIMULATOR http://127.0.0.1:3000"
)
# TODO: Try and get these into fixtures; this is only here due to the
# "dynamic" parametrization of client_type with hive_execution_clients.
config.hive_simulator_url = hive_simulator_url # type: ignore[attr-defined]
config.hive_simulator = Simulation(url=hive_simulator_url) # type: ignore[attr-defined]
try:
config.hive_execution_clients = config.hive_simulator.client_types( # type: ignore[attr-defined]
role=ClientRole.ExecutionClient
)
except Exception as e:
message = (
f"Error connecting to hive simulator at {hive_simulator_url}.\n\n"
"Did you forget to start hive in --dev mode?\n"
"./hive --dev --client go-ethereum\n\n"
)
if config.option.verbose > 0:
message += f"Error details:\n{str(e)}"
else:
message += "Re-run with -v for more details."
pytest.exit(message)
def pytest_addoption(parser: pytest.Parser) -> None: # noqa: D103
pytest_hive_group = parser.getgroup("pytest_hive", "Arguments related to pytest hive")
pytest_hive_group.addoption(
"--hive-simulator",
action="store",
dest="hive_simulator",
default=os.environ.get("HIVE_SIMULATOR"),
help=(
"The Hive simulator endpoint, e.g. http://127.0.0.1:3000. By default, the value is "
"taken from the HIVE_SIMULATOR environment variable."
),
)
def get_hive_info(simulator: Simulation) -> HiveInfo | None:
"""Fetch and return the Hive instance information."""
try:
hive_info = simulator.hive_instance()
return HiveInfo(**hive_info)
except Exception as e:
warnings.warn(
f"Error fetching hive information: {str(e)}\n\n"
"Hive might need to be updated to a newer version.",
stacklevel=2,
)
return None
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: pytest.Config, start_path: Path) -> List[str] | None:
"""Add lines to pytest's console output header."""
del start_path
if config.option.collectonly:
return None
header_lines = [f"hive simulator: {config.hive_simulator_url}"] # type: ignore[attr-defined]
if hive_info := get_hive_info(config.hive_simulator): # type: ignore[attr-defined]
hive_command = " ".join(hive_info.command)
header_lines += [
f"hive command: {hive_command}",
f"hive commit: {hive_info.commit}",
f"hive date: {hive_info.date}",
]
for client in hive_info.client_file.root:
header_lines += [
f"hive client ({client.client}): {client.model_dump_json(exclude_none=True)}",
]
return header_lines
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(
item: pytest.Item, call: pytest.CallInfo[None]
) -> Generator[None, Any, None]:
"""
Make the setup, call, and teardown results available in the teardown phase
of a test fixture (i.e., after yield has been called).
This is used to get the test result and pass it to the hive test suite.
Available as:
- result_setup - setup result
- result_call - test result
- result_teardown - teardown result
"""
del call
outcome = yield
report = outcome.get_result()
setattr(item, f"result_{report.when}", report)
@pytest.fixture(scope="session")
def simulator(request: pytest.FixtureRequest) -> Simulation:
"""Return the Hive simulator instance."""
return request.config.hive_simulator # type: ignore[attr-defined]
@pytest.fixture(scope="session")
def hive_info(simulator: Simulation) -> HiveInfo | None:
"""Fetch and return the Hive instance information."""
return get_hive_info(simulator)
@pytest.fixture(scope="session")
def client_file(hive_info: HiveInfo | None) -> ClientFile:
"""Return the client file used when launching hive."""
if hive_info is None:
return ClientFile(root=[])
return hive_info.client_file
def get_test_suite_scope(fixture_name: str, config: pytest.Config) -> str:
"""
Return the appropriate scope of the test suite.
See: https://docs.pytest.org/en/stable/how-to/fixtures.html#dynamic-scope
"""
del fixture_name
if hasattr(config, "test_suite_scope"):
return config.test_suite_scope
return "module"
@pytest.fixture(scope=get_test_suite_scope) # type: ignore[arg-type]
def test_suite(
simulator: Simulation,
session_temp_folder: Path,
test_suite_name: str,
test_suite_description: str,
) -> Generator[HiveTestSuite, None, None]:
"""Defines a Hive test suite and cleans up after all tests have run."""
suite_file_name = f"test_suite_{test_suite_name}"
suite_file = session_temp_folder / suite_file_name
suite_lock_file = session_temp_folder / f"{suite_file_name}.lock"
with FileLock(suite_lock_file):
if suite_file.exists():
with open(suite_file, "r") as f:
suite = HiveTestSuite(**json.load(f))
else:
suite = simulator.start_suite(
name=test_suite_name,
description=test_suite_description,
)
with open(suite_file, "w") as f:
json.dump(asdict(suite), f)
users_file_name = f"test_suite_{test_suite_name}_users"
users_file = session_temp_folder / users_file_name
users_lock_file = session_temp_folder / f"{users_file_name}.lock"
with FileLock(users_lock_file):
if users_file.exists():
with open(users_file, "r") as f:
users = json.load(f)
else:
users = 0
users += 1
with open(users_file, "w") as f:
json.dump(users, f)
yield suite
with FileLock(users_lock_file):
with open(users_file, "r") as f:
users = json.load(f)
users -= 1
with open(users_file, "w") as f:
json.dump(users, f)
if users == 0:
suite.end()
suite_file.unlink()
users_file.unlink()
@pytest.fixture(scope="function")
def hive_test(
request: pytest.FixtureRequest, test_suite: HiveTestSuite
) -> Generator[HiveTest, None, None]:
"""
Propagate the pytest test case and its result to the hive server.
This fixture handles both starting the test and ending it with all logs,
including those generated during teardown of other fixtures. The approach
of processing teardown logs directly in the teardown phase of this fixture
ensures that the test gets properly finalized before the test suite is torn
down.
"""
try:
test_case_description = request.getfixturevalue("test_case_description")
except pytest.FixtureLookupError:
pytest.exit(
"Error: The 'test_case_description' fixture has not been defined by the simulator "
"or pytest plugin using this plugin!"
)
test_parameter_string = request.node.name
test: HiveTest = test_suite.start_test(
name=test_parameter_string,
description=test_case_description,
)
yield test
try:
# Collect all logs from all phases
captured = []
setup_out = ""
call_out = ""
for phase in ("setup", "call", "teardown"):
report = getattr(request.node, f"result_{phase}", None)
if report:
stdout = report.capstdout or "None"
stderr = report.capstderr or "None"
# Remove setup output from call phase output
if phase == "setup":
setup_out = stdout
if phase == "call":
call_out = stdout
# If call output starts with setup output, strip it
if call_out.startswith(setup_out):
stdout = call_out.removeprefix(setup_out)
captured.append(
f"# Captured Output from Test {phase.capitalize()}\n\n"
f"## stdout:\n{stdout}\n"
f"## stderr:\n{stderr}\n"
)
captured_output = "\n".join(captured)
if hasattr(request.node, "result_call") and request.node.result_call.passed:
test_passed = True
test_result_details = "Test passed.\n\n" + captured_output
elif hasattr(request.node, "result_call") and not request.node.result_call.passed:
test_passed = False
test_result_details = "Test failed.\n\n" + captured_output
test_result_details = request.node.result_call.longreprtext + "\n" + captured_output
elif hasattr(request.node, "result_setup") and not request.node.result_setup.passed:
test_passed = False
test_result_details = (
"Test setup failed.\n\n"
+ request.node.result_setup.longreprtext
+ "\n"
+ captured_output
)
elif hasattr(request.node, "result_teardown") and not request.node.result_teardown.passed:
test_passed = False
test_result_details = (
"Test teardown failed.\n\n"
+ request.node.result_teardown.longreprtext
+ "\n"
+ captured_output
)
else:
test_passed = False
test_result_details = (
"Test failed for unknown reason (setup or call status unknown).\n\n"
+ captured_output
)
test.end(result=HiveTestResult(test_pass=test_passed, details=test_result_details))
logger.verbose(f"Finished processing logs for test: {request.node.nodeid}")
except Exception as e:
logger.verbose(f"Error processing logs for test {request.node.nodeid}: {str(e)}")
test_passed = False
test_result_details = f"Exception whilst processing test result: {str(e)}"
test.end(result=HiveTestResult(test_pass=test_passed, details=test_result_details))
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/pre_alloc.py | src/pytest_plugins/filler/pre_alloc.py | """Pre-alloc specifically conditioned for test filling."""
import inspect
from enum import IntEnum
from functools import cache
from hashlib import sha256
from itertools import count
from typing import Any, Iterator, List, Literal
import pytest
from pydantic import PrivateAttr
from ethereum_test_base_types import (
Account,
Address,
Number,
Storage,
StorageRootType,
TestPrivateKey,
TestPrivateKey2,
ZeroPaddedHexNumber,
)
from ethereum_test_base_types.conversions import (
BytesConvertible,
FixedSizeBytesConvertible,
NumberConvertible,
)
from ethereum_test_fixtures import LabeledFixtureFormat
from ethereum_test_forks import Fork
from ethereum_test_specs import BaseTest
from ethereum_test_types import EOA
from ethereum_test_types import Alloc as BaseAlloc
from ethereum_test_types.eof.v1 import Container
from ethereum_test_vm import Bytecode, EVMCodeType, Opcodes
CONTRACT_START_ADDRESS_DEFAULT = 0x1000000000000000000000000000000000001000
CONTRACT_ADDRESS_INCREMENTS_DEFAULT = 0x100
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
pre_alloc_group = parser.getgroup(
"pre_alloc", "Arguments defining pre-allocation behavior during test filling."
)
pre_alloc_group.addoption(
"--strict-alloc",
action="store_true",
dest="strict_alloc",
default=False,
help=("[DEBUG ONLY] Disallows deploying a contract in a predefined address."),
)
pre_alloc_group.addoption(
"--ca-start",
"--contract-address-start",
action="store",
dest="test_contract_start_address",
default=f"{CONTRACT_START_ADDRESS_DEFAULT}",
type=str,
help="The starting address from which tests will deploy contracts.",
)
pre_alloc_group.addoption(
"--ca-incr",
"--contract-address-increment",
action="store",
dest="test_contract_address_increments",
default=f"{CONTRACT_ADDRESS_INCREMENTS_DEFAULT}",
type=str,
help="The address increment value to each deployed contract by a test.",
)
pre_alloc_group.addoption(
"--evm-code-type",
action="store",
dest="evm_code_type",
default=None,
type=EVMCodeType,
choices=list(EVMCodeType),
help="Type of EVM code to deploy in each test by default.",
)
class AllocMode(IntEnum):
"""Allocation mode for the state."""
PERMISSIVE = 0
STRICT = 1
DELEGATION_DESIGNATION = b"\xef\x01\x00"
class Alloc(BaseAlloc):
"""Allocation of accounts in the state, pre and post test execution."""
_alloc_mode: AllocMode = PrivateAttr()
_contract_address_iterator: Iterator[Address] = PrivateAttr()
_eoa_iterator: Iterator[EOA] = PrivateAttr()
_evm_code_type: EVMCodeType | None = PrivateAttr(None)
_fork: Fork = PrivateAttr()
def __init__(
self,
*args: Any,
alloc_mode: AllocMode,
contract_address_iterator: Iterator[Address],
eoa_iterator: Iterator[EOA],
fork: Fork,
evm_code_type: EVMCodeType | None = None,
**kwargs: Any,
) -> None:
"""Initialize allocation with the given properties."""
super().__init__(*args, **kwargs)
self._alloc_mode = alloc_mode
self._contract_address_iterator = contract_address_iterator
self._eoa_iterator = eoa_iterator
self._evm_code_type = evm_code_type
self._fork = fork
def __setitem__(
self,
address: Address | FixedSizeBytesConvertible,
account: Account | None,
) -> None:
"""Set account associated with an address."""
if self._alloc_mode == AllocMode.STRICT:
raise ValueError("Cannot set items in strict mode")
super().__setitem__(address, account)
def code_pre_processor(
self, code: BytesConvertible, *, evm_code_type: EVMCodeType | None
) -> BytesConvertible:
"""Pre-processes the code before setting it."""
if evm_code_type is None:
evm_code_type = self._evm_code_type
if evm_code_type == EVMCodeType.EOF_V1:
if not isinstance(code, Container):
if isinstance(code, Bytecode) and not code.terminating:
return Container.Code(code + Opcodes.STOP)
return Container.Code(code)
return code
def deploy_contract(
self,
code: BytesConvertible,
*,
storage: Storage | StorageRootType | None = None,
balance: NumberConvertible = 0,
nonce: NumberConvertible = 1,
address: Address | None = None,
evm_code_type: EVMCodeType | None = None,
label: str | None = None,
stub: str | None = None,
) -> Address:
"""
Deploy a contract to the allocation.
Warning: `address` parameter is a temporary solution to allow tests to
hard-code the contract address. Do NOT use in new tests as it will be
removed in the future!
"""
del stub
if storage is None:
storage = {}
if address is not None:
assert self._alloc_mode == AllocMode.PERMISSIVE, "address parameter is not supported"
assert address not in self, f"address {address} already in allocation"
contract_address = address
else:
contract_address = next(self._contract_address_iterator)
if self._alloc_mode == AllocMode.STRICT:
assert Number(nonce) >= 1, "impossible to deploy contract with nonce lower than one"
code = self.code_pre_processor(code, evm_code_type=evm_code_type)
code_bytes = bytes(code) if not isinstance(code, (bytes, str)) else code
max_code_size = self._fork.max_code_size()
assert len(code_bytes) <= max_code_size, (
f"code too large: {len(code_bytes)} > {max_code_size}"
)
super().__setitem__(
contract_address,
Account(
nonce=nonce,
balance=balance,
code=code,
storage=storage,
),
)
if label is None:
# Try to deduce the label from the code
frame = inspect.currentframe()
if frame is not None:
caller_frame = frame.f_back
if caller_frame is not None:
code_context = inspect.getframeinfo(caller_frame).code_context
if code_context is not None:
line = code_context[0].strip()
if "=" in line:
label = line.split("=")[0].strip()
contract_address.label = label
return contract_address
def fund_eoa(
self,
amount: NumberConvertible | None = None,
label: str | None = None,
storage: Storage | None = None,
delegation: Address | Literal["Self"] | None = None,
nonce: NumberConvertible | None = None,
) -> EOA:
"""
Add a previously unused EOA to the pre-alloc with the balance specified
by `amount`.
If amount is 0, nothing will be added to the pre-alloc but a new and
unique EOA will be returned.
"""
del label
eoa = next(self._eoa_iterator)
if amount is None:
amount = self._eoa_fund_amount_default
if (
Number(amount) > 0
or storage is not None
or delegation is not None
or (nonce is not None and Number(nonce) > 0)
):
if storage is None and delegation is None:
nonce = Number(0 if nonce is None else nonce)
account = Account(
nonce=nonce,
balance=amount,
)
if nonce > 0:
eoa.nonce = nonce
else:
# Type-4 transaction is sent to the EOA to set the storage, so
# the nonce must be 1
if not isinstance(delegation, Address) and delegation == "Self":
delegation = eoa
# If delegation is None but storage is not, realistically the
# nonce should be 2 because the account must have delegated to
# set the storage and then again to reset the delegation (but
# can be overridden by the test for a non-realistic scenario)
real_nonce = 2 if delegation is None else 1
nonce = Number(real_nonce if nonce is None else nonce)
account = Account(
nonce=nonce,
balance=amount,
storage=storage if storage is not None else {},
code=DELEGATION_DESIGNATION + bytes(delegation)
if delegation is not None
else b"",
)
eoa.nonce = nonce
super().__setitem__(eoa, account)
return eoa
def fund_address(self, address: Address, amount: NumberConvertible) -> None:
"""
Fund an address with a given amount.
If the address is already present in the pre-alloc the amount will be
added to its existing balance.
"""
if address in self:
account = self[address]
if account is not None:
current_balance = account.balance or 0
account.balance = ZeroPaddedHexNumber(current_balance + Number(amount))
return
super().__setitem__(address, Account(balance=amount))
def empty_account(self) -> Address:
"""
Add a previously unused account guaranteed to be empty to the
pre-alloc.
This ensures the account has:
- Zero balance
- Zero nonce
- No code
- No storage
This is different from precompiles or system contracts. The function
does not send any transactions, ensuring that the account remains
"empty."
Returns:
Address: The address of the created empty account.
"""
eoa = next(self._eoa_iterator)
return Address(eoa)
@pytest.fixture(scope="session")
def alloc_mode(request: pytest.FixtureRequest) -> AllocMode:
"""Return allocation mode for the tests."""
if request.config.getoption("strict_alloc"):
return AllocMode.STRICT
return AllocMode.PERMISSIVE
@pytest.fixture(scope="session")
def contract_start_address(request: pytest.FixtureRequest) -> int:
"""Return starting address for contract deployment."""
return int(request.config.getoption("test_contract_start_address"), 0)
@pytest.fixture(scope="session")
def contract_address_increments(request: pytest.FixtureRequest) -> int:
"""Return address increment for contract deployment."""
return int(request.config.getoption("test_contract_address_increments"), 0)
def sha256_from_string(s: str) -> int:
"""Return SHA-256 hash of a string."""
return int.from_bytes(sha256(s.encode("utf-8")).digest(), "big")
ALL_FIXTURE_FORMAT_NAMES: List[str] = []
for spec in BaseTest.spec_types.values():
for labeled_fixture_format in spec.supported_fixture_formats:
name = (
labeled_fixture_format.label
if isinstance(labeled_fixture_format, LabeledFixtureFormat)
else labeled_fixture_format.format_name.lower()
)
if name not in ALL_FIXTURE_FORMAT_NAMES:
ALL_FIXTURE_FORMAT_NAMES.append(name)
# Sort by length, from longest to shortest, since some fixture format names
# contain others so we are always sure to catch the longest one first.
ALL_FIXTURE_FORMAT_NAMES.sort(key=len, reverse=True)
@pytest.fixture(scope="function")
def node_id_for_entropy(request: pytest.FixtureRequest, fork: Fork | None) -> str:
"""
Return the node id with the fixture format name and fork name stripped.
Used in cases where we are filling for pre-alloc groups, and we take the
name of the test as source of entropy to get a deterministic address when
generating the pre-alloc grouping.
Removing the fixture format and the fork name from the node id before
hashing results in the contracts and senders addresses being the same
across fixture types and forks for the same test.
"""
node_id: str = request.node.nodeid
if fork is None:
# FIXME: Static tests don't have a fork, so we need to get it from the
# node.
assert hasattr(request.node, "fork")
fork = request.node.fork
for fixture_format_name in ALL_FIXTURE_FORMAT_NAMES:
if fixture_format_name in node_id:
parts = request.node.nodeid.split("::")
test_file_path = parts[0]
test_name = "::".join(parts[1:])
stripped_test_name = test_name.replace(fixture_format_name, "").replace(
fork.name(), ""
)
return f"{test_file_path}::{stripped_test_name}"
raise Exception(f"Fixture format name not found in test {node_id}")
@pytest.fixture(scope="function")
def contract_address_iterator(
request: pytest.FixtureRequest,
contract_start_address: int,
contract_address_increments: int,
node_id_for_entropy: str,
) -> Iterator[Address]:
"""Return iterator over contract addresses with dynamic scoping."""
if request.config.getoption(
# TODO: Ideally, we should check the fixture format instead of checking
# parameters.
"generate_pre_alloc_groups",
default=False,
) or request.config.getoption("use_pre_alloc_groups", default=False):
# Use a starting address that is derived from the test node
contract_start_address = sha256_from_string(node_id_for_entropy)
return iter(
Address((contract_start_address + (i * contract_address_increments)) % 2**160)
for i in count()
)
@cache
def eoa_by_index(i: int) -> EOA:
"""Return EOA by index."""
return EOA(key=TestPrivateKey + i if i != 1 else TestPrivateKey2, nonce=0)
@pytest.fixture(scope="function")
def eoa_iterator(
request: pytest.FixtureRequest,
node_id_for_entropy: str,
) -> Iterator[EOA]:
"""Return iterator over EOAs copies with dynamic scoping."""
if request.config.getoption(
# TODO: Ideally, we should check the fixture format instead of checking
# parameters.
"generate_pre_alloc_groups",
default=False,
) or request.config.getoption("use_pre_alloc_groups", default=False):
# Use a starting address that is derived from the test node
eoa_start_pk = sha256_from_string(node_id_for_entropy)
return iter(
EOA(
key=(eoa_start_pk + i)
% 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,
nonce=0,
)
for i in count()
)
return iter(eoa_by_index(i).copy() for i in count())
@pytest.fixture(autouse=True)
def evm_code_type(request: pytest.FixtureRequest) -> EVMCodeType:
"""Return default EVM code type for all tests (LEGACY)."""
parameter_evm_code_type = request.config.getoption("evm_code_type")
if parameter_evm_code_type is not None:
assert type(parameter_evm_code_type) is EVMCodeType, "Invalid EVM code type"
return parameter_evm_code_type
return EVMCodeType.LEGACY
@pytest.fixture(scope="function")
def pre(
alloc_mode: AllocMode,
contract_address_iterator: Iterator[Address],
eoa_iterator: Iterator[EOA],
evm_code_type: EVMCodeType,
fork: Fork | None,
request: pytest.FixtureRequest,
) -> Alloc:
"""Return default pre allocation for all tests (Empty alloc)."""
# FIXME: Static tests dont have a fork so we need to get it from the node.
actual_fork = fork
if actual_fork is None:
assert hasattr(request.node, "fork")
actual_fork = request.node.fork
return Alloc(
alloc_mode=alloc_mode,
contract_address_iterator=contract_address_iterator,
eoa_iterator=eoa_iterator,
fork=actual_fork,
evm_code_type=evm_code_type,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/static_filler.py | src/pytest_plugins/filler/static_filler.py | """
Static filler pytest plugin that reads test cases from static files and fills
them into test fixtures.
"""
import inspect
import itertools
import json
import warnings
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Self, Tuple, Type
import pytest
import yaml
from _pytest.fixtures import TopRequest
from _pytest.mark import ParameterSet
from _pytest.python import Module
from ethereum_test_fixtures import BaseFixture, LabeledFixtureFormat
from ethereum_test_forks import Fork, get_closest_fork
from ethereum_test_specs import BaseStaticTest, BaseTest
from ethereum_test_tools.tools_code.yul import Yul
from ..forks.forks import ValidityMarker
from ..shared.helpers import labeled_format_parameter_set
def get_test_id_from_arg_names_and_values(
arg_names: List[str], arg_values: List[Any] | Tuple[Any, ...]
) -> str:
"""Get the test id from argument names and values."""
return "-".join(
[
f"{arg_name}={arg_value}"
for arg_name, arg_value in zip(arg_names, arg_values, strict=True)
]
)
def get_argument_names_and_values_from_parametrize_mark(
mark: pytest.Mark,
) -> Tuple[List[str], List[ParameterSet]]:
"""Get the argument names and values from a parametrize mark."""
if mark.name != "parametrize":
raise Exception("Mark is not a parametrize mark")
kwargs_dict = dict(mark.kwargs)
ids: Callable | List[str] | None = kwargs_dict.pop("ids") if "ids" in kwargs_dict else None
marks: List[pytest.Mark] = kwargs_dict.pop("marks") if "marks" in kwargs_dict else []
if kwargs_dict:
raise Exception("Mark has kwargs which is not supported")
args = mark.args
if not isinstance(args, tuple):
raise Exception("Args is not a tuple")
if len(args) != 2:
raise Exception("Args does not have 2 elements")
arg_names = args[0] if isinstance(args[0], list) else args[0].split(",")
arg_values = []
for arg_index, arg_value in enumerate(args[1]):
if not isinstance(arg_value, ParameterSet):
original_arg_value = arg_value
if not isinstance(arg_value, tuple) and not isinstance(arg_value, list):
arg_value = (arg_value,)
test_id: str = get_test_id_from_arg_names_and_values(arg_names, arg_value)
if ids:
if callable(ids):
test_id = ids(original_arg_value)
else:
test_id = ids[arg_index]
arg_values.append(ParameterSet(arg_value, marks, id=test_id))
else:
arg_values.append(arg_value)
return arg_names, arg_values
def get_all_combinations_from_parametrize_marks(
parametrize_marks: List[pytest.Mark],
) -> Tuple[List[str], List[ParameterSet]]:
"""Get all combinations of arguments from multiple parametrize marks."""
assert parametrize_marks, "No parametrize marks found"
list_of_values: List[List[ParameterSet]] = []
all_argument_names = []
for mark in parametrize_marks:
arg_names, arg_values = get_argument_names_and_values_from_parametrize_mark(mark)
list_of_values.append(arg_values)
all_argument_names.extend(arg_names)
all_value_combinations: List[ParameterSet] = []
# use itertools to get all combinations
test_ids = set()
for combination in itertools.product(*list_of_values):
values: List[Any] = []
marks: List[pytest.Mark | pytest.MarkDecorator] = []
for param_set in combination:
values.extend(param_set.values)
marks.extend(param_set.marks)
test_id = "-".join([param.id or "" for param in combination]) # type: ignore[misc]
if test_id in test_ids:
current_int = 2
while f"{test_id}-{current_int}" in test_ids:
current_int += 1
test_id = f"{test_id}-{current_int}"
all_value_combinations.append(
ParameterSet(
values=values,
marks=marks,
id=test_id,
)
)
test_ids.add(test_id)
return all_argument_names, all_value_combinations
def pytest_collect_file(file_path: Path, parent: Module) -> pytest.Collector | None:
"""
Pytest hook that collects test cases from static files and fills them into
test fixtures.
"""
fill_static_tests_enabled = parent.config.getoption("fill_static_tests_enabled")
if not fill_static_tests_enabled:
return None
if not BaseStaticTest.formats:
# No formats registered, so no need to collect any files.
return None
if file_path.suffix in (".json", ".yml", ".yaml"):
init_file = file_path.parent / "__init__.py"
module = Module.from_parent(
parent=parent,
path=init_file,
nodeid=str(init_file),
)
return FillerFile.from_parent(module, path=file_path)
return None
class NoIntResolver(yaml.SafeLoader):
"""Class that tells yaml to not resolve int values."""
pass
# Remove the implicit resolver for integers
# Because yaml treat unquoted numbers 000001000 as oct numbers
# Treat all numbers as str instead
for ch in list(NoIntResolver.yaml_implicit_resolvers):
resolvers = NoIntResolver.yaml_implicit_resolvers[ch]
NoIntResolver.yaml_implicit_resolvers[ch] = [
(tag, regexp) for tag, regexp in resolvers if tag != "tag:yaml.org,2002:int"
]
class FillerFile(pytest.File):
"""
Filler file that reads test cases from static files and fills them into
test fixtures.
"""
def collect(self: "FillerFile") -> Generator["FillerTestItem", None, None]:
"""Collect test cases from a single static file."""
if not self.path.stem.endswith("Filler"):
return
with open(self.path, "r") as file:
try:
loaded_file = (
json.load(file)
if self.path.suffix == ".json"
else yaml.load(file, Loader=NoIntResolver)
)
for key in loaded_file:
filler = BaseStaticTest.model_validate(loaded_file[key])
func = filler.fill_function()
function_marks: List[pytest.Mark] = []
if hasattr(func, "pytestmark"):
function_marks = func.pytestmark[:]
parametrize_marks: List[pytest.Mark] = [
mark for mark in function_marks if mark.name == "parametrize"
]
func_parameters = inspect.signature(func).parameters
fixture_formats: List[Type[BaseFixture] | LabeledFixtureFormat] = []
spec_parameter_name = ""
for test_type in BaseTest.spec_types.values():
if test_type.pytest_parameter_name() in func_parameters:
assert not spec_parameter_name, "Multiple spec parameters found"
spec_parameter_name = test_type.pytest_parameter_name()
session = self.config.filling_session # type: ignore[attr-defined]
fixture_formats.extend(
fixture_format
for fixture_format in test_type.supported_fixture_formats
if session.should_generate_format(fixture_format)
)
test_fork_set = ValidityMarker.get_test_fork_set_from_markers(
iter(function_marks)
)
if not test_fork_set:
pytest.fail(
"The test function's "
f"'{key}' fork validity markers generate "
"an empty fork range. Please check the arguments to its "
f"markers: @pytest.mark.valid_from and "
f"@pytest.mark.valid_until."
)
intersection_set = test_fork_set & self.config.selected_fork_set # type: ignore
extra_function_marks: List[pytest.Mark] = [
mark
for mark in function_marks
if mark.name != "parametrize"
and not ValidityMarker.is_validity_or_filter_marker(mark.name)
]
for format_with_or_without_label in fixture_formats:
fixture_format_parameter_set = labeled_format_parameter_set(
format_with_or_without_label
)
fixture_format = (
format_with_or_without_label.format
if isinstance(format_with_or_without_label, LabeledFixtureFormat)
else format_with_or_without_label
)
for fork in sorted(intersection_set):
params: Dict[str, Any] = {spec_parameter_name: fixture_format}
fixturenames = [
spec_parameter_name,
]
marks: List[pytest.Mark] = [
mark # type: ignore
for mark in fixture_format_parameter_set.marks
if mark.name != "parametrize"
]
test_id = f"fork_{fork.name()}-{fixture_format_parameter_set.id}"
if "fork" in func_parameters:
params["fork"] = fork
if "pre" in func_parameters:
fixturenames.append("pre")
if "request" in func_parameters:
fixturenames.append("request")
if parametrize_marks:
parameter_names, parameter_set_list = (
get_all_combinations_from_parametrize_marks(parametrize_marks)
)
for parameter_set in parameter_set_list:
# Copy and extend the params with the
# parameter set
case_marks = (
marks[:]
+ [
mark
for mark in parameter_set.marks
if mark.name != "parametrize"
]
+ extra_function_marks
)
case_params = params.copy() | dict(
zip(parameter_names, parameter_set.values, strict=True)
)
yield FillerTestItem.from_parent(
self,
original_name=key,
func=func,
params=case_params,
fixturenames=fixturenames,
name=f"{key}[{test_id}-{parameter_set.id}]",
fork=fork,
fixture_format=fixture_format,
marks=case_marks,
)
else:
yield FillerTestItem.from_parent(
self,
original_name=key,
func=func,
params=params,
fixturenames=fixturenames,
name=f"{key}[{test_id}]",
fork=fork,
fixture_format=fixture_format,
marks=marks,
)
except Exception as e:
pytest.fail(f"Error loading file {self.path} as a test: {e}")
warnings.warn(f"Error loading file {self.path} as a test: {e}", stacklevel=1)
return
class FillerTestItem(pytest.Item):
"""Filler test item produced from a single test from a static file."""
originalname: str
func: Callable
params: Dict[str, Any]
fixturenames: List[str]
github_url: str = ""
fork: Fork
fixture_format: Type[BaseFixture]
def __init__(
self,
*args: Any,
original_name: str,
func: Callable,
params: Dict[str, Any],
fixturenames: List[str],
fork: Fork,
fixture_format: Type[BaseFixture],
marks: List[pytest.Mark],
**kwargs: Any,
) -> None:
"""Initialize the filler test item."""
super().__init__(*args, **kwargs)
self.originalname = original_name
self.func = func
self.params = params
self.fixturenames = fixturenames
self.fork = fork
self.fixture_format = fixture_format
for marker in marks:
if type(marker) is pytest.Mark:
self.own_markers.append(marker)
else:
self.add_marker(marker) # type: ignore
def setup(self) -> None:
"""Resolve and apply fixtures before test execution."""
self._fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self,
None,
None,
)
request = TopRequest(
self, # type: ignore[arg-type]
_ispytest=True,
)
for fixture_name in self.fixturenames:
if fixture_name == "request":
self.params[fixture_name] = request
else:
self.params[fixture_name] = request.getfixturevalue(fixture_name)
def runtest(self) -> None:
"""Execute the test logic for this specific static test."""
self.func(**self.params)
def reportinfo(self) -> Tuple[Path, int, str]:
"""Provide information for test reporting."""
return self.fspath, 0, f"Static file test: {self.name}"
@pytest.fixture
def yul(fork: Fork, request: pytest.FixtureRequest) -> Type[Yul]:
"""
Fixture that allows contract code to be defined with Yul code.
This fixture defines a class that wraps the ::ethereum_test_tools.Yul class
so that upon instantiation within the test case, it provides the test
case's current fork parameter. The forks is then available for use in
solc's arguments for the Yul code compilation.
Test cases can override the default value by specifying a fixed version
with the @pytest.mark.compile_yul_with(FORK) marker.
"""
solc_target_fork: Fork | None
marker = request.node.get_closest_marker("compile_yul_with")
assert hasattr(request.config, "solc_version"), "solc_version not set in pytest config."
if marker:
if not marker.args[0]:
pytest.fail(
f"{request.node.name}: Expected one argument in 'compile_yul_with' marker."
)
for fork in request.config.all_forks: # type: ignore
if fork.name() == marker.args[0]:
solc_target_fork = fork
break
else:
pytest.fail(f"{request.node.name}: Fork {marker.args[0]} not found in forks list.")
else:
solc_target_fork = get_closest_fork(fork)
assert solc_target_fork is not None, "No fork supports provided solc version."
if solc_target_fork != fork and request.config.getoption("verbose") >= 1:
warnings.warn(
f"Compiling Yul for {solc_target_fork.name()}, not {fork.name()}.", stacklevel=2
)
class YulWrapper(Yul):
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
kwargs["fork"] = solc_target_fork
return super(YulWrapper, cls).__new__(cls, *args, **kwargs)
return YulWrapper
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/witness.py | src/pytest_plugins/filler/witness.py | """
Pytest plugin for witness functionality.
Provides --witness command-line option that checks for the witness-filler tool
in PATH and generates execution witness data for blockchain test fixtures when
enabled.
"""
import shutil
import subprocess
from typing import Callable, List
import pytest
from ethereum_test_base_types import EthereumTestRootModel
from ethereum_test_fixtures.blockchain import BlockchainFixture, FixtureBlock, WitnessChunk
from ethereum_test_forks import Paris
class WitnessFillerResult(EthereumTestRootModel[List[WitnessChunk]]):
"""
Model that defines the expected result from the `witness-filler` command.
"""
root: List[WitnessChunk]
class Merge(Paris):
"""
Paris fork that serializes as 'Merge' for witness-filler compatibility.
IMPORTANT: This class MUST be named 'Merge' (not 'MergeForWitness' or
similar) because the class name is used directly in Pydantic serialization,
and witness-filler expects exactly 'Merge' for this fork.
"""
pass
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add witness command-line options to pytest."""
witness_group = parser.getgroup("witness", "Arguments for witness functionality")
witness_group.addoption(
"--witness",
"--witness-the-fitness",
action="store_true",
dest="witness",
default=False,
help=(
"Generate execution witness data for blockchain test fixtures using the "
"witness-filler tool (must be installed separately)."
),
)
def pytest_configure(config: pytest.Config) -> None:
"""
Pytest hook called after command line options have been parsed.
If --witness is enabled, checks that the witness-filler tool is available
in PATH.
"""
if config.getoption("witness"):
# Check if witness-filler binary is available in PATH
if not shutil.which("witness-filler"):
pytest.exit(
"witness-filler tool not found in PATH. Please build and install witness-filler "
"from https://github.com/kevaundray/reth.git before using --witness flag.\n"
"Example: cargo install --git https://github.com/kevaundray/reth.git "
"witness-filler",
1,
)
@pytest.fixture
def witness_generator(
request: pytest.FixtureRequest,
) -> Callable[[BlockchainFixture], None] | None:
"""
Provide a witness generator function if --witness is enabled.
Returns: None if witness functionality is disabled. Callable that generates
witness data for a BlockchainFixture if enabled.
"""
if not request.config.getoption("witness"):
return None
def generate_witness(fixture: BlockchainFixture) -> None:
"""
Generate witness data for a blockchain fixture using the witness-filler
tool.
"""
if not isinstance(fixture, BlockchainFixture):
return None
# Hotfix: witness-filler expects "Merge" but execution-spec-tests uses
# "Paris"
original_fork = None
if fixture.fork is Paris:
original_fork = fixture.fork
fixture.fork = Merge
try:
result = subprocess.run(
["witness-filler"],
input=fixture.model_dump_json(by_alias=True),
text=True,
capture_output=True,
)
finally:
if original_fork is not None:
fixture.fork = original_fork
if result.returncode != 0:
raise RuntimeError(
f"witness-filler tool failed with exit code {result.returncode}. "
f"stderr: {result.stderr}"
)
try:
result_model = WitnessFillerResult.model_validate_json(result.stdout)
witnesses = result_model.root
for i, witness in enumerate(witnesses):
if i < len(fixture.blocks):
block = fixture.blocks[i]
if isinstance(block, FixtureBlock):
block.execution_witness = witness
except Exception as e:
raise RuntimeError(
f"Failed to parse witness data from witness-filler tool. "
f"Output was: {result.stdout[:500]}{'...' if len(result.stdout) > 500 else ''}"
) from e
return generate_witness
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/eip_checklist.py | src/pytest_plugins/filler/eip_checklist.py | """
Pytest plugin for generating EIP test completion checklists.
This plugin collects checklist markers from tests and generates a filled
checklist for each EIP based on the template at
docs/writing_tests/checklist_templates/eip_testing_checklist_template.md
"""
import logging
import re
from dataclasses import dataclass, field
from pathlib import Path
from typing import ClassVar, Dict, List, Set, Tuple, Type
import pytest
from .gen_test_doc.page_props import EipChecklistPageProps
logger = logging.getLogger("mkdocs")
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options for checklist generation."""
group = parser.getgroup("checklist", "EIP checklist generation options")
group.addoption(
"--checklist-output",
action="store",
dest="checklist_output",
type=Path,
default=Path("./checklists"),
help="Directory to output the generated checklists",
)
group.addoption(
"--checklist-eip",
action="append",
dest="checklist_eips",
type=int,
default=[],
help="Generate checklist only for specific EIP(s)",
)
group.addoption(
"--checklist-doc-gen",
action="store_true",
dest="checklist_doc_gen",
default=False,
help="Generate checklists for documentation (uses mkdocs_gen_files)",
)
TITLE_LINE = "# EIP Execution Layer Testing Checklist Template"
PERCENTAGE_LINE = "| TOTAL_CHECKLIST_ITEMS | COVERED_CHECKLIST_ITEMS | PERCENTAGE |"
TEMPLATE_PATH = (
Path(__file__).parents[3]
/ "docs"
/ "writing_tests"
/ "checklist_templates"
/ "eip_testing_checklist_template.md"
)
TEMPLATE_CONTENT = TEMPLATE_PATH.read_text()
EXTERNAL_COVERAGE_FILE_NAME = "eip_checklist_external_coverage.txt"
NOT_APPLICABLE_FILE_NAME = "eip_checklist_not_applicable.txt"
WARNINGS_LINE = "<!-- WARNINGS LINE -->"
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None: # noqa: D103
config.pluginmanager.register(EIPChecklistCollector(), "eip-checklist-collector")
@dataclass(kw_only=True)
class EIPItem:
"""Represents an EIP checklist item."""
id: str
line_number: int
description: str
tests: Set[str]
not_applicable_reason: str = ""
external_coverage_reason: str = ""
@classmethod
def from_checklist_line(cls, *, line: str, line_number: int) -> "EIPItem | None":
"""Create an EIP item from a checklist line."""
match = re.match(r"\|\s*`([^`]+)`\s*\|\s*([^|]+)\s*\|", line)
if not match:
return None
return cls(
id=match.group(1),
line_number=line_number,
description=match.group(2),
tests=set(),
)
@property
def covered(self) -> bool:
"""Return True if the item is covered by at least one test."""
return len(self.tests) > 0 or self.external_coverage
@property
def external_coverage(self) -> bool:
"""Return True if the item is covered by an external test/procedure."""
return self.external_coverage_reason != ""
@property
def not_applicable(self) -> bool:
"""Return True if the item is not applicable."""
return self.not_applicable_reason != ""
def __str__(self) -> str:
"""Return a string representation of the EIP item."""
status = " "
tests = ""
if self.external_coverage:
status = "✅"
tests = self.external_coverage_reason
elif self.covered:
if self.not_applicable:
status = "❓"
else:
status = "✅"
tests = ", ".join(sorted(self.tests))
elif self.not_applicable:
status = "N/A"
tests = self.not_applicable_reason
return f"| `{self.id}` | {self.description} | {status} | {tests} |"
TEMPLATE_ITEMS: Dict[str, EIPItem] = {}
# Parse the template to extract checklist item IDs and descriptions
for i, line in enumerate(TEMPLATE_CONTENT.splitlines()):
# Match lines that contain checklist items with IDs in backticks
if item := EIPItem.from_checklist_line(line=line, line_number=i + 1):
TEMPLATE_ITEMS[item.id] = item
def template_items() -> Dict[str, EIPItem]:
"""Return a copy of the template items."""
new_items = {}
for test_id, item in TEMPLATE_ITEMS.items():
new_items[test_id] = EIPItem(
id=item.id,
line_number=item.line_number,
description=item.description,
tests=set(),
)
return new_items
ALL_IDS = set(TEMPLATE_ITEMS.keys())
def resolve_id(item_id: str) -> Set[str]:
"""Resolve an item ID to a set of checklist IDs."""
covered_ids = {
checklist_id
for checklist_id in ALL_IDS
if checklist_id == item_id or checklist_id.startswith(item_id + "/")
}
return covered_ids
ALL_CHECKLIST_WARNINGS: Dict[str, Type["ChecklistWarning"]] = {}
@dataclass(kw_only=True)
class ChecklistWarning:
"""Represents an EIP checklist warning."""
title: ClassVar[str] = ""
details: List[str]
def __init_subclass__(cls) -> None:
"""Register the checklist warning subclass."""
super().__init_subclass__()
assert cls.title, "Title must be set"
if cls.title in ALL_CHECKLIST_WARNINGS:
raise ValueError(f"Duplicate checklist warning class: {cls}")
ALL_CHECKLIST_WARNINGS[cls.title] = cls
def lines(self) -> List[str]:
"""Return the lines of the checklist warning."""
return ["", f"### {self.title}", ""] + self.details + [""]
@classmethod
def from_items(cls, all_items: Dict[str, EIPItem]) -> "ChecklistWarning | None":
"""Generate a checklist warning from a list of items."""
raise NotImplementedError(f"from_items not implemented for {cls}")
class ConflictingChecklistItemsWarning(ChecklistWarning):
"""Represents a conflicting checklist items warning."""
title: ClassVar[str] = "Conflicting Checklist Items"
@classmethod
def from_items(cls, all_items: Dict[str, EIPItem]) -> ChecklistWarning | None:
"""
Generate a conflicting checklist items warning from a list of items.
"""
conflicting_items = [
item for item in all_items.values() if item.not_applicable and item.covered
]
if not conflicting_items:
return None
details = [
"The following checklist items were marked both as not applicable and covered:",
"",
"| ID | Description | Not Applicable | Tests |",
"|---|---|---|---|",
]
for item in conflicting_items:
details.append(
f"| {item.id} | {item.description} | "
+ f"{item.not_applicable_reason} | {', '.join(sorted(item.tests))} |"
)
return cls(details=details)
@dataclass(kw_only=True)
class EIP:
"""Represents an EIP and its checklist."""
number: int
items: Dict[str, EIPItem] = field(default_factory=template_items)
path: Path | None = None
def add_covered_test(self, checklist_id: str, node_id: str) -> None:
"""Add a covered test to the EIP."""
self.items[checklist_id].tests.add(node_id)
@property
def covered_items(self) -> int:
"""Return the number of covered items."""
return sum(1 for item in self.items.values() if item.covered and not item.not_applicable)
return sum(1 for item in self.items.values() if item.covered and not item.not_applicable)
@property
def total_items(self) -> int:
"""Return the number of total items."""
return sum(1 for item in self.items.values() if not item.not_applicable)
@property
def percentage(self) -> float:
"""Return the percentage of covered items."""
return self.covered_items / self.total_items * 100 if self.total_items else 0
@property
def completeness_emoji(self) -> str:
"""Return the completeness emoji."""
return "🟢" if self.percentage == 100 else "🟡" if self.percentage > 50 else "🔴"
@property
def warnings(self) -> List[ChecklistWarning]:
"""Return the detected inconsistencies in the checklist."""
warnings = []
for warning_cls in ALL_CHECKLIST_WARNINGS.values():
if warning := warning_cls.from_items(self.items):
warnings.append(warning)
return warnings
def mark_not_applicable(self) -> None:
"""Read the not-applicable items from the EIP."""
if self.path is None:
return
not_applicable_path = self.path / NOT_APPLICABLE_FILE_NAME
if not not_applicable_path.exists():
return
with not_applicable_path.open() as f:
for line in f:
line = line.strip()
if not line:
continue
assert "=" in line
item_id, reason = line.split("=", 1)
item_id = item_id.strip()
reason = reason.strip()
assert reason, f"Reason is empty for {line}"
assert item_id, f"Item ID is empty for {line}"
ids = resolve_id(item_id)
if not ids:
logger.warning(
f"Item ID {item_id} not found in the checklist template, "
f"for EIP {self.number}"
)
continue
for id_covered in ids:
self.items[id_covered].not_applicable_reason = reason
def mark_external_coverage(self) -> None:
"""Read the externally covered items from the EIP."""
if self.path is None:
return
external_coverage_path = self.path / EXTERNAL_COVERAGE_FILE_NAME
if not external_coverage_path.exists():
return
with external_coverage_path.open() as f:
for line in f:
line = line.strip()
if not line:
continue
assert "=" in line
item_id, reason = line.split("=", 1)
item_id = item_id.strip()
reason = reason.strip()
assert item_id, f"Item ID is empty for {line}"
assert reason, f"Reason is empty for {line}"
ids = resolve_id(item_id)
if not ids:
logger.warning(
f"Item ID {item_id} not found in the checklist template, "
f"for EIP {self.number}"
)
continue
for id_covered in ids:
self.items[id_covered].external_coverage_reason = reason
def generate_filled_checklist_lines(self) -> List[str]:
"""Generate the filled checklist lines for a specific EIP."""
# Create a copy of the template content
lines = TEMPLATE_CONTENT.splitlines()
self.mark_not_applicable()
self.mark_external_coverage()
for checklist_item in self.items.values():
# Find the line with this item ID
lines[checklist_item.line_number - 1] = str(checklist_item)
lines[lines.index(PERCENTAGE_LINE)] = (
f"| {self.total_items} | {self.covered_items} | {self.completeness_emoji} "
f"{self.percentage:.2f}% |"
)
# Replace the title line with the EIP number
lines[lines.index(TITLE_LINE)] = f"# EIP-{self.number} Test Checklist"
# Last, add the warnings if there are any, this must be the last thing
# we do to avoid shifting the lines below the percentage line
if self.warnings:
warnings_line_idx = lines.index(WARNINGS_LINE)
warnings_lines = ["", "## ⚠️ Checklist Warnings ⚠️", ""]
for warning in self.warnings:
warnings_lines.extend(warning.lines())
lines[warnings_line_idx:warnings_line_idx] = warnings_lines
return lines
def generate_filled_checklist(self, output_dir: Path) -> Path:
"""Generate a filled checklist for a specific EIP."""
lines = self.generate_filled_checklist_lines()
output_dir = output_dir / f"eip{self.number}_checklist.md"
# Write the filled checklist
output_dir.parent.mkdir(exist_ok=True, parents=True)
output_dir.write_text("\n".join(lines))
return output_dir
class EIPChecklistCollector:
"""Collects and manages EIP checklist items from test markers."""
def __init__(self: "EIPChecklistCollector") -> None:
"""Initialize the EIP checklist collector."""
self.eips: Dict[int, EIP] = {}
def extract_eip_from_path(self, test_path: Path) -> Tuple[int | None, Path | None]:
"""Extract EIP number from test file path."""
# Look for patterns like eip1234_ or eip1234/ in the path
for part_idx, part in enumerate(test_path.parts):
match = re.match(r"eip(\d+)", part)
if match:
eip = int(match.group(1))
eip_path = test_path.parents[len(test_path.parents) - part_idx - 2]
return eip, eip_path
return None, None
def get_eip_from_item(self, item: pytest.Item) -> EIP | None:
"""Get the EIP for a test item."""
test_path = Path(item.location[0])
for part_idx, part in enumerate(test_path.parts):
match = re.match(r"eip(\d+)", part)
if match:
eip = int(match.group(1))
if eip not in self.eips:
self.eips[eip] = EIP(
number=eip,
path=test_path.parents[len(test_path.parents) - part_idx - 2],
)
else:
if self.eips[eip].path is None:
self.eips[eip].path = test_path.parents[
len(test_path.parents) - part_idx - 2
]
return self.eips[eip]
return None
def get_eip(self, eip: int) -> EIP:
"""Get the EIP for a given EIP number."""
if eip not in self.eips:
self.eips[eip] = EIP(number=eip, path=None)
return self.eips[eip]
def collect_from_item(self, item: pytest.Item, primary_eip: EIP | None) -> None:
"""Collect checklist markers from a test item."""
for marker in item.iter_markers("eip_checklist"):
if not marker.args:
pytest.fail(
f"eip_checklist marker on {item.nodeid} must have at least one argument "
"(item_id)"
)
additional_eips = marker.kwargs.get("eip", [])
if not isinstance(additional_eips, list):
additional_eips = [additional_eips]
eips: List[EIP] = [primary_eip] if primary_eip else []
if additional_eips:
if any(not isinstance(eip, int) for eip in additional_eips):
pytest.fail(
"EIP numbers must be integers. Found non-integer EIPs in "
f"{item.nodeid}: {additional_eips}"
)
eips += [self.get_eip(eip) for eip in additional_eips]
for item_id in marker.args:
item_id = str(item_id)
covered_ids = resolve_id(item_id.strip())
if not covered_ids:
logger.warning(
f"Item ID {item_id} not found in the checklist template, "
f"for test {item.nodeid}"
)
continue
for id_covered in covered_ids:
for eip in eips:
eip.add_covered_test(id_covered, item.nodeid)
@pytest.hookimpl(tryfirst=True)
def pytest_runtestloop(self, session: pytest.Session) -> bool:
"""Skip test execution, only generate checklists."""
session.testscollected = 0
return True
def pytest_collection_modifyitems(
self, config: pytest.Config, items: List[pytest.Item]
) -> None:
"""Collect checklist markers during test collection."""
for item in items:
eip = self.get_eip_from_item(item)
if item.get_closest_marker("derived_test") or item.get_closest_marker("skip"):
continue
self.collect_from_item(item, eip)
# Check which mode we are in
checklist_doc_gen = config.getoption("checklist_doc_gen", False)
checklist_output = config.getoption("checklist_output", Path("checklists"))
checklist_eips = config.getoption("checklist_eips", [])
checklist_props = {}
# Generate a checklist for each EIP
for eip in self.eips.values():
# Skip if specific EIPs were requested and this isn't one of them
if checklist_eips and eip.number not in checklist_eips:
continue
if checklist_doc_gen:
assert eip.path is not None
checklist_path = eip.path / "checklist.md"
checklist_props[checklist_path] = EipChecklistPageProps(
title=f"EIP-{eip.number} Test Checklist",
source_code_url="",
target_or_valid_fork="mainnet",
path=checklist_path,
pytest_node_id="",
package_name="checklist",
eip=eip.number,
lines=eip.generate_filled_checklist_lines(),
)
else:
checklist_path = eip.generate_filled_checklist(checklist_output)
print(f"\nGenerated EIP-{eip.number} checklist: {checklist_path}")
if checklist_doc_gen:
config.checklist_props = checklist_props # type: ignore
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/__init__.py | src/pytest_plugins/filler/__init__.py | """A pytest plugin to fill tests and generate JSON fixtures."""
from .fixture_output import FixtureOutput
__all__ = [
"FixtureOutput",
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/fixture_output.py | src/pytest_plugins/filler/fixture_output.py | """Fixture output configuration for generated test fixtures."""
import shutil
import tarfile
from pathlib import Path
import pytest
from pydantic import BaseModel, Field
from ethereum_test_fixtures.blockchain import BlockchainEngineXFixture
class FixtureOutput(BaseModel):
"""Represents the output destination for generated test fixtures."""
output_path: Path = Field(description="Directory path to store the generated test fixtures")
single_fixture_per_file: bool = Field(
default=False,
description=(
"Don't group fixtures in JSON files by test function; "
"write each fixture to its own file"
),
)
clean: bool = Field(
default=False,
description="Clean (remove) the output directory before filling fixtures.",
)
generate_pre_alloc_groups: bool = Field(
default=False,
description="Generate pre-allocation groups (phase 1).",
)
use_pre_alloc_groups: bool = Field(
default=False,
description="Use existing pre-allocation groups (phase 2).",
)
should_generate_all_formats: bool = Field(
default=False,
description="Generate all fixture formats including BlockchainEngineXFixture.",
)
@property
def directory(self) -> Path:
"""Return the actual directory path where fixtures will be written."""
return self.strip_tarball_suffix(self.output_path)
@property
def metadata_dir(self) -> Path:
"""Return metadata directory to store fixture meta files."""
if self.is_stdout:
return self.directory
return self.directory / ".meta"
@property
def is_tarball(self) -> bool:
"""Return True if the output should be packaged as a tarball."""
path = self.output_path
return path.suffix == ".gz" and path.with_suffix("").suffix == ".tar"
@property
def is_stdout(self) -> bool:
"""Return True if the fixture output is configured to be stdout."""
return self.directory.name == "stdout"
@property
def pre_alloc_groups_folder_path(self) -> Path:
"""Return the path for pre-allocation groups folder."""
engine_x_dir = BlockchainEngineXFixture.output_base_dir_name()
return self.directory / engine_x_dir / "pre_alloc"
@property
def should_auto_enable_all_formats(self) -> bool:
"""
Check if all formats should be auto-enabled due to tarball output.
"""
return self.is_tarball
@staticmethod
def strip_tarball_suffix(path: Path) -> Path:
"""Strip the '.tar.gz' suffix from the output path."""
if str(path).endswith(".tar.gz"):
return path.with_suffix("").with_suffix("")
return path
def is_directory_empty(self) -> bool:
"""Check if the output directory is empty."""
if not self.directory.exists():
return True
return not any(self.directory.iterdir())
def is_directory_usable_for_phase(self) -> bool:
"""Check if the output directory is usable for the current phase."""
if not self.directory.exists():
return True
if self.generate_pre_alloc_groups:
# Phase 1: Directory must be completely empty
return self.is_directory_empty()
elif self.use_pre_alloc_groups:
# Phase 2: Only pre-allocation groups must exist, no other files
# allowed
if not self.pre_alloc_groups_folder_path.exists():
return False
# Check that only the pre-allocation group files exist
existing_files = {f for f in self.directory.rglob("*") if f.is_file()}
allowed_files = set(self.pre_alloc_groups_folder_path.rglob("*.json"))
return existing_files == allowed_files
else:
# Normal filling: Directory must be empty
return self.is_directory_empty()
def get_directory_summary(self) -> str:
"""Return a summary of directory contents for error reporting."""
if not self.directory.exists():
return "directory does not exist"
items = list(self.directory.iterdir())
if not items:
return "empty directory"
dirs = [d.name for d in items if d.is_dir()]
files = [f.name for f in items if f.is_file()]
max_dirs = 4
summary_parts = []
if dirs:
summary_parts.append(
f"{len(dirs)} directories"
+ (
f" ({', '.join(dirs[:max_dirs])}"
+ (f"... and {len(dirs) - max_dirs} more" if len(dirs) > max_dirs else "")
+ ")"
if dirs
else ""
)
)
if files:
summary_parts.append(
f"{len(files)} files"
+ (
f" ({', '.join(files[:3])}"
+ (f"... and {len(files) - 3} more" if len(files) > 3 else "")
+ ")"
if files
else ""
)
)
return " and ".join(summary_parts)
def create_directories(self, is_master: bool) -> None:
"""
Create output and metadata directories if needed.
If clean flag is set, remove and recreate the directory. Otherwise,
verify the directory is empty before proceeding.
"""
if self.is_stdout:
return
# Only the master process should delete/create directories if using
# pytest-xdist
if not is_master:
return
if self.directory.exists() and self.clean:
shutil.rmtree(self.directory)
if self.directory.exists() and not self.is_directory_usable_for_phase():
summary = self.get_directory_summary()
if self.generate_pre_alloc_groups:
raise ValueError(
f"Output directory '{self.directory}' must be completely empty for "
f"pre-allocation group generation (phase 1). Contains: {summary}. "
"Use --clean to remove all existing files."
)
elif self.use_pre_alloc_groups:
if not self.pre_alloc_groups_folder_path.exists():
raise ValueError(
"Pre-allocation groups folder not found at "
f"'{self.pre_alloc_groups_folder_path}'. "
"Run phase 1 with --generate-pre-alloc-groups first."
)
else:
raise ValueError(
f"Output directory '{self.directory}' is not empty. "
f"Contains: {summary}. Use --clean to remove all existing files "
"or specify a different output directory."
)
# Create directories
self.directory.mkdir(parents=True, exist_ok=True)
self.metadata_dir.mkdir(parents=True, exist_ok=True)
# Create pre-allocation groups directory for phase 1
if self.generate_pre_alloc_groups:
self.pre_alloc_groups_folder_path.parent.mkdir(parents=True, exist_ok=True)
def create_tarball(self) -> None:
"""Create tarball of the output directory if configured to do so."""
if not self.is_tarball:
return
with tarfile.open(self.output_path, "w:gz") as tar:
for file in self.directory.rglob("*"):
if file.suffix in {".json", ".ini"}:
arcname = Path("fixtures") / file.relative_to(self.directory)
tar.add(file, arcname=arcname)
@classmethod
def from_config(cls, config: pytest.Config) -> "FixtureOutput":
"""Create a FixtureOutput instance from pytest configuration."""
output_path = Path(config.getoption("output"))
should_generate_all_formats = config.getoption("generate_all_formats")
# Auto-enable --generate-all-formats for tarball output
# Use same logic as is_tarball property
if output_path.suffix == ".gz" and output_path.with_suffix("").suffix == ".tar":
should_generate_all_formats = True
return cls(
output_path=output_path,
single_fixture_per_file=config.getoption("single_fixture_per_file"),
clean=config.getoption("clean"),
generate_pre_alloc_groups=config.getoption("generate_pre_alloc_groups"),
use_pre_alloc_groups=config.getoption("use_pre_alloc_groups"),
should_generate_all_formats=should_generate_all_formats,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/filler.py | src/pytest_plugins/filler/filler.py | """
Top-level pytest configuration file providing:
- Command-line options,
- Test-fixtures that can be used by all test cases,
and that modifies pytest hooks in order to fill test specs for all tests
and writes the generated fixtures to file.
"""
import configparser
import datetime
import json
import os
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Generator, List, Self, Set, Type
import pytest
import xdist
from _pytest.compat import NotSetType
from _pytest.terminal import TerminalReporter
from filelock import FileLock
from pytest_metadata.plugin import metadata_key
from cli.gen_index import generate_fixtures_index
from ethereum_clis import TransitionTool
from ethereum_clis.clis.geth import FixtureConsumerTool
from ethereum_test_base_types import Account, Address, Alloc, ReferenceSpec
from ethereum_test_fixtures import (
BaseFixture,
FixtureCollector,
FixtureConsumer,
FixtureFillingPhase,
LabeledFixtureFormat,
PreAllocGroup,
PreAllocGroups,
TestInfo,
)
from ethereum_test_forks import Fork, get_transition_fork_predecessor, get_transition_forks
from ethereum_test_specs import BaseTest
from ethereum_test_specs.base import OpMode
from ethereum_test_tools.utility.versioning import (
generate_github_url,
get_current_commit_hash_or_tag,
)
from ethereum_test_types import EnvironmentDefaults
from ..shared.execute_fill import ALL_FIXTURE_PARAMETERS
from ..shared.helpers import (
get_spec_format_for_item,
is_help_or_collectonly_mode,
labeled_format_parameter_set,
)
from ..spec_version_checker.spec_version_checker import get_ref_spec_from_module
from .fixture_output import FixtureOutput
def print_migration_warning(terminalreporter: Any = None) -> None:
"""Print migration warning about repository merge."""
lines = [
"",
"=" * 80,
"⚠️ IMPORTANT: Repository Migration in Progress - 'The Weld' ⚠️",
"=" * 80,
"",
"This repository is being merged into ethereum/execution-specs (EELS) during the",
"week of October 20-24, 2025.",
"",
"📅 Timeline:",
" • Week of Oct 13-17: Closing PRs, porting issues to EELS",
" • Week of Oct 20-24: Migration week - fixing CI and fixture building",
" • Oct 24 (ETA): Weld finalized - all development moves to EELS",
"",
"👉 What This Means:",
" • Test Contributors: After Oct 24, reopen draft PRs in ethereum/execution-specs",
" • All future test development happens in EELS after completion",
" • Fixture releases continue as usual during transition",
"",
"For details: https://steel.ethereum.foundation/blog/2025-09-11_weld-announcement/",
"=" * 80,
"",
]
if terminalreporter:
for line in lines:
if "⚠️" in line or "IMPORTANT" in line:
terminalreporter.write_line(line, bold=True, yellow=True)
elif line.startswith("="):
terminalreporter.write_line(line, yellow=True)
else:
terminalreporter.write_line(line)
else:
for line in lines:
print(line)
@dataclass(kw_only=True)
class PhaseManager:
"""
Manages the execution phase for fixture generation.
The filler plugin supports two-phase execution for pre-allocation group
generation:
- Phase 1: Generate pre-allocation groups (pytest run with
--generate-pre-alloc-groups).
- Phase 2: Fill fixtures using pre-allocation
groups (pytest run with --use-pre-alloc-groups).
Note: These are separate pytest runs orchestrated by the CLI wrapper. Each
run gets a fresh PhaseManager instance (no persistence between phases).
"""
current_phase: FixtureFillingPhase
previous_phases: Set[FixtureFillingPhase] = field(default_factory=set)
@classmethod
def from_config(cls, config: pytest.Config) -> "Self":
"""
Create a PhaseManager from pytest configuration.
Flag logic:
- use_pre_alloc_groups: We're in phase 2 (FILL) after phase
1 (PRE_ALLOC_GENERATION).
- generate_pre_alloc_groups or generate_all_formats:
We're in phase 1 (PRE_ALLOC_GENERATION). -
Otherwise: Normal single-phase filling (FILL).
Note: generate_all_formats triggers PRE_ALLOC_GENERATION because the
CLI passes it to phase 1 to ensure all formats are considered for
grouping.
"""
generate_pre_alloc = config.getoption("generate_pre_alloc_groups", False)
use_pre_alloc = config.getoption("use_pre_alloc_groups", False)
generate_all = config.getoption("generate_all_formats", False)
if use_pre_alloc:
# Phase 2: Using pre-generated groups
return cls(
current_phase=FixtureFillingPhase.FILL,
previous_phases={FixtureFillingPhase.PRE_ALLOC_GENERATION},
)
elif generate_pre_alloc or generate_all:
# Phase 1: Generating pre-allocation groups
return cls(current_phase=FixtureFillingPhase.PRE_ALLOC_GENERATION)
else:
# Normal single-phase filling
return cls(current_phase=FixtureFillingPhase.FILL)
@property
def is_pre_alloc_generation(self) -> bool:
"""Check if we're in the pre-allocation generation phase."""
return self.current_phase == FixtureFillingPhase.PRE_ALLOC_GENERATION
@property
def is_fill_after_pre_alloc(self) -> bool:
"""Check if we're filling after pre-allocation generation."""
return (
self.current_phase == FixtureFillingPhase.FILL
and FixtureFillingPhase.PRE_ALLOC_GENERATION in self.previous_phases
)
@property
def is_single_phase_fill(self) -> bool:
"""Check if we're in single-phase fill mode (no pre-allocation)."""
return (
self.current_phase == FixtureFillingPhase.FILL
and FixtureFillingPhase.PRE_ALLOC_GENERATION not in self.previous_phases
)
@dataclass(kw_only=True)
class FormatSelector:
"""
Handles fixture format selection based on the current phase and format
capabilities.
This class encapsulates the complex logic for determining which fixture
formats should be generated in each phase of the two-phase execution model.
"""
phase_manager: PhaseManager
generate_all_formats: bool
def should_generate(self, fixture_format: Type[BaseFixture] | LabeledFixtureFormat) -> bool:
"""
Determine if a fixture format should be generated in the current phase.
Args:
fixture_format: The fixture format to check (may be wrapped in
LabeledFixtureFormat).
Returns:
True if the format should be generated in the current phase.
"""
format_phases = fixture_format.format_phases
if self.phase_manager.is_pre_alloc_generation:
return self._should_generate_pre_alloc(format_phases)
else: # FILL phase
return self._should_generate_fill(format_phases)
def _should_generate_pre_alloc(self, format_phases: Set[FixtureFillingPhase]) -> bool:
"""
Determine if format should be generated during pre-alloc generation
phase.
"""
# Only generate formats that need pre-allocation groups
return FixtureFillingPhase.PRE_ALLOC_GENERATION in format_phases
def _should_generate_fill(self, format_phases: Set[FixtureFillingPhase]) -> bool:
"""Determine if format should be generated during fill phase."""
if FixtureFillingPhase.PRE_ALLOC_GENERATION in self.phase_manager.previous_phases:
# Phase 2: After pre-alloc generation
if self.generate_all_formats:
# Generate all formats, including those that don't need pre-
# alloc
return True
else:
# Only generate formats that needed pre-alloc groups
return FixtureFillingPhase.PRE_ALLOC_GENERATION in format_phases
else:
# Single phase: Only generate fill-only formats
return format_phases == {FixtureFillingPhase.FILL}
@dataclass(kw_only=True)
class FillingSession:
"""
Manages all state for a single pytest fill session.
This class serves as the single source of truth for all filler state
management, including phase management, format selection, and
pre-allocation groups.
Important: Each pytest run gets a fresh FillingSession instance. There is
no persistence between phase 1 (generate pre-alloc) and phase 2 (use
pre-alloc) except through file I/O.
"""
fixture_output: FixtureOutput
phase_manager: PhaseManager
format_selector: FormatSelector
pre_alloc_groups: PreAllocGroups | None
@classmethod
def from_config(cls, config: pytest.Config) -> "Self":
"""
Initialize a filling session from pytest configuration.
Args:
config: The pytest configuration object.
"""
phase_manager = PhaseManager.from_config(config)
instance = cls(
fixture_output=FixtureOutput.from_config(config),
phase_manager=phase_manager,
format_selector=FormatSelector(
phase_manager=phase_manager,
generate_all_formats=config.getoption("generate_all_formats", False),
),
pre_alloc_groups=None,
)
# Initialize pre-alloc groups based on phase
instance._initialize_pre_alloc_groups()
return instance
def _initialize_pre_alloc_groups(self) -> None:
"""Initialize pre-allocation groups based on the current phase."""
if self.phase_manager.is_pre_alloc_generation:
# Phase 1: Create empty container for collecting groups
self.pre_alloc_groups = PreAllocGroups(root={})
elif self.phase_manager.is_fill_after_pre_alloc:
# Phase 2: Load pre-alloc groups from disk
self._load_pre_alloc_groups_from_folder()
def _load_pre_alloc_groups_from_folder(self) -> None:
"""Load pre-allocation groups from the output folder."""
pre_alloc_folder = self.fixture_output.pre_alloc_groups_folder_path
if pre_alloc_folder.exists():
self.pre_alloc_groups = PreAllocGroups.from_folder(pre_alloc_folder, lazy_load=True)
else:
raise FileNotFoundError(
f"Pre-allocation groups folder not found: {pre_alloc_folder}. "
"Run phase 1 with --generate-pre-alloc-groups first."
)
def should_generate_format(
self, fixture_format: Type[BaseFixture] | LabeledFixtureFormat
) -> bool:
"""
Determine if a fixture format should be generated in the current
session.
Args:
fixture_format: The fixture format to check.
Returns:
True if the format should be generated.
"""
return self.format_selector.should_generate(fixture_format)
def get_pre_alloc_group(self, hash_key: str) -> PreAllocGroup:
"""
Get a pre-allocation group by hash.
Args:
hash_key: The hash of the pre-alloc group.
Returns:
The pre-allocation group.
Raises:
ValueError: If pre-alloc groups not initialized or hash not found.
"""
if self.pre_alloc_groups is None:
raise ValueError("Pre-allocation groups not initialized")
if hash_key not in self.pre_alloc_groups:
pre_alloc_path = self.fixture_output.pre_alloc_groups_folder_path / hash_key
raise ValueError(
f"Pre-allocation hash {hash_key} not found in pre-allocation groups. "
f"Please check the pre-allocation groups file at: {pre_alloc_path}. "
"Make sure phase 1 (--generate-pre-alloc-groups) was run before phase 2."
)
return self.pre_alloc_groups[hash_key]
def update_pre_alloc_group(self, hash_key: str, group: PreAllocGroup) -> None:
"""
Update or add a pre-allocation group.
Args:
hash_key: The hash of the pre-alloc group.
group: The pre-allocation group.
Raises:
ValueError: If not in pre-alloc generation phase.
"""
if not self.phase_manager.is_pre_alloc_generation:
raise ValueError("Can only update pre-alloc groups in generation phase")
if self.pre_alloc_groups is None:
self.pre_alloc_groups = PreAllocGroups(root={})
self.pre_alloc_groups[hash_key] = group
def save_pre_alloc_groups(self) -> None:
"""Save pre-allocation groups to disk."""
if self.pre_alloc_groups is None:
return
pre_alloc_folder = self.fixture_output.pre_alloc_groups_folder_path
pre_alloc_folder.mkdir(parents=True, exist_ok=True)
self.pre_alloc_groups.to_folder(pre_alloc_folder)
def aggregate_pre_alloc_groups(self, worker_groups: PreAllocGroups) -> None:
"""
Aggregate pre-alloc groups from a worker process (xdist support).
Args:
worker_groups: Pre-alloc groups from a worker process.
"""
if self.pre_alloc_groups is None:
self.pre_alloc_groups = PreAllocGroups(root={})
for hash_key, group in worker_groups.items():
if hash_key in self.pre_alloc_groups:
# Merge if exists (should not happen in practice)
existing = self.pre_alloc_groups[hash_key]
if existing.pre != group.pre:
raise ValueError(
f"Conflicting pre-alloc groups for hash {hash_key}: "
f"existing={self.pre_alloc_groups[hash_key].pre}, new={group.pre}"
)
else:
self.pre_alloc_groups[hash_key] = group
def calculate_post_state_diff(post_state: Alloc, genesis_state: Alloc) -> Alloc:
"""
Calculate the state difference between post_state and genesis_state.
This function enables significant space savings in Engine X fixtures by
storing only the accounts that changed during test execution, rather than
the full post-state which may contain thousands of unchanged accounts.
Returns an Alloc containing only the accounts that:
- Changed between genesis and post state (balance, nonce, storage, code)
- Were created during test execution (new accounts)
- Were deleted during test execution (represented as None)
Args:
post_state: Final state after test execution
genesis_state: Genesis pre-allocation state
Returns:
Alloc containing only the state differences for efficient storage
"""
diff: Dict[Address, Account | None] = {}
# Find all addresses that exist in either state
all_addresses = set(post_state.root.keys()) | set(genesis_state.root.keys())
for address in all_addresses:
genesis_account = genesis_state.root.get(address)
post_account = post_state.root.get(address)
# Account was deleted (exists in genesis but not in post)
if genesis_account is not None and post_account is None:
diff[address] = None
# Account was created (doesn't exist in genesis but exists in post)
elif genesis_account is None and post_account is not None:
diff[address] = post_account
# Account was modified (exists in both but different)
elif genesis_account != post_account:
diff[address] = post_account
# Account unchanged - don't include in diff
return Alloc(diff)
def default_output_directory() -> str:
"""
Directory (default) to store the generated test fixtures. Defined as a
function to allow for easier testing.
"""
return "./fixtures"
def default_html_report_file_path() -> str:
"""
File path (default) to store the generated HTML test report. Defined as a
function to allow for easier testing.
"""
return ".meta/report_fill.html"
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
evm_group = parser.getgroup("evm", "Arguments defining evm executable behavior")
evm_group.addoption(
"--evm-bin",
action="store",
dest="evm_bin",
type=Path,
default=None,
help=(
"Path to an evm executable (or name of an executable in the PATH) that provides `t8n`."
" Default: `ethereum-spec-evm-resolver`."
),
)
evm_group.addoption(
"--t8n-server-url",
action="store",
dest="t8n_server_url",
type=str,
default=None,
help=(
"[INTERNAL USE ONLY] URL of the t8n server to use. Used by framework tests/ci; not "
"intended for regular CLI use."
),
)
evm_group.addoption(
"--traces",
action="store_true",
dest="evm_collect_traces",
default=None,
help="Collect traces of the execution information from the transition tool.",
)
evm_group.addoption(
"--verify-fixtures",
action="store_true",
dest="verify_fixtures",
default=False,
help=(
"Verify generated fixture JSON files using geth's evm blocktest command. "
"By default, the same evm binary as for the t8n tool is used. A different (geth) evm "
"binary may be specified via --verify-fixtures-bin, this must be specified if filling "
"with a non-geth t8n tool that does not support blocktest."
),
)
evm_group.addoption(
"--verify-fixtures-bin",
action="store",
dest="verify_fixtures_bin",
type=Path,
default=None,
help=(
"Path to an evm executable that provides the `blocktest` command. "
"Default: The first (geth) 'evm' entry in PATH."
),
)
test_group = parser.getgroup("tests", "Arguments defining filler location and output")
test_group.addoption(
"--filler-path",
action="store",
dest="filler_path",
default="./tests/",
type=Path,
help="Path to filler directives",
)
test_group.addoption(
"--output",
action="store",
dest="output",
type=Path,
default=Path(default_output_directory()),
help=(
"Directory path to store the generated test fixtures. Must be empty if it exists. "
"If the specified path ends in '.tar.gz', then the specified tarball is additionally "
"created (the fixtures are still written to the specified path without the '.tar.gz' "
f"suffix). Tarball output automatically enables --generate-all-formats. "
f"Can be deleted. Default: '{default_output_directory()}'."
),
)
test_group.addoption(
"--clean",
action="store_true",
dest="clean",
default=False,
help="Clean (remove) the output directory before filling fixtures.",
)
test_group.addoption(
"--single-fixture-per-file",
action="store_true",
dest="single_fixture_per_file",
default=False,
help=(
"Don't group fixtures in JSON files by test function; write each fixture to its own "
"file. This can be used to increase the granularity of --verify-fixtures."
),
)
test_group.addoption(
"--no-html",
action="store_true",
dest="disable_html",
default=False,
help=(
"Don't generate an HTML test report (in the output directory). "
"The --html flag can be used to specify a different path."
),
)
test_group.addoption(
"--build-name",
action="store",
dest="build_name",
default=None,
type=str,
help="Specify a build name for the fixtures.ini file, e.g., 'stable'.",
)
test_group.addoption(
"--skip-index",
action="store_false",
dest="generate_index",
default=True,
help="Skip generating an index file for all produced fixtures.",
)
test_group.addoption(
"--block-gas-limit",
action="store",
dest="block_gas_limit",
default=EnvironmentDefaults.gas_limit,
type=int,
help=(
"Default gas limit used ceiling used for blocks and tests that attempt to "
f"consume an entire block's gas. (Default: {EnvironmentDefaults.gas_limit})"
),
)
test_group.addoption(
"--generate-pre-alloc-groups",
action="store_true",
dest="generate_pre_alloc_groups",
default=False,
help="Generate pre-allocation groups (phase 1 only).",
)
test_group.addoption(
"--use-pre-alloc-groups",
action="store_true",
dest="use_pre_alloc_groups",
default=False,
help="Fill tests using existing pre-allocation groups (phase 2 only).",
)
test_group.addoption(
"--generate-all-formats",
action="store_true",
dest="generate_all_formats",
default=False,
help=(
"Generate all fixture formats including BlockchainEngineXFixture. "
"This enables two-phase execution: Phase 1 generates pre-allocation groups, "
"phase 2 generates all supported fixture formats."
),
)
optimize_gas_group = parser.getgroup(
"optimize gas",
"Arguments defining test gas optimization behavior.",
)
optimize_gas_group.addoption(
"--optimize-gas",
action="store_true",
dest="optimize_gas",
default=False,
help=(
"Attempt to optimize the gas used in every transaction for the filled tests, "
"then print the minimum amount of gas at which the test still produces a correct "
"post state and the exact same trace."
),
)
optimize_gas_group.addoption(
"--optimize-gas-output",
action="store",
dest="optimize_gas_output",
default=Path("optimize-gas-output.json"),
type=Path,
help=(
"Path to the JSON file that is output to the gas optimization. "
"Requires `--optimize-gas`."
),
)
optimize_gas_group.addoption(
"--optimize-gas-max-gas-limit",
action="store",
dest="optimize_gas_max_gas_limit",
default=None,
type=int,
help=(
"Maximum gas limit for gas optimization, if reached the search will stop and "
"fail for that given test. Requires `--optimize-gas`."
),
)
optimize_gas_group.addoption(
"--optimize-gas-post-processing",
action="store_true",
dest="optimize_gas_post_processing",
default=False,
help=(
"Post process the traces during gas optimization in order to Account for "
"opcodes that put the current gas in the stack, in order to remove "
"remaining-gas from the comparison."
),
)
debug_group = parser.getgroup("debug", "Arguments defining debug behavior")
debug_group.addoption(
"--evm-dump-dir",
"--t8n-dump-dir",
action="store",
dest="base_dump_dir",
default=None,
help=(
"Path to dump the transition tool debug output. "
"Only creates debug output when explicitly specified."
),
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""
Pytest hook called after command line options have been parsed and before
test collection begins.
Couple of notes:
1. Register the plugin's custom markers and process command-line options.
Custom marker registration:
https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
2. `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
called before the pytest-html plugin's pytest_configure to ensure that
it uses the modified `htmlpath` option.
"""
if not is_help_or_collectonly_mode(config):
print_migration_warning()
# Register custom markers
# Modify the block gas limit if specified.
if config.getoption("block_gas_limit"):
EnvironmentDefaults.gas_limit = config.getoption("block_gas_limit")
# Initialize fixture output configuration
config.fixture_output = FixtureOutput.from_config( # type: ignore[attr-defined]
config
)
# Initialize filling session
config.filling_session = FillingSession.from_config( # type: ignore[attr-defined]
config
)
if is_help_or_collectonly_mode(config):
return
try:
# Check whether the directory exists and is not empty; if --clean is
# set, it will delete it
config.fixture_output.create_directories( # type: ignore[attr-defined]
is_master=not hasattr(config, "workerinput")
)
except ValueError as e:
pytest.exit(str(e), returncode=pytest.ExitCode.USAGE_ERROR)
if (
not config.getoption("disable_html")
and config.getoption("htmlpath") is None
and config.filling_session.phase_manager.current_phase # type: ignore[attr-defined]
!= FixtureFillingPhase.PRE_ALLOC_GENERATION
):
config.option.htmlpath = (
config.fixture_output.directory # type: ignore[attr-defined]
/ default_html_report_file_path()
)
config.gas_optimized_tests = {} # type: ignore[attr-defined]
if config.getoption("optimize_gas", False):
if config.getoption("optimize_gas_post_processing"):
config.op_mode = ( # type: ignore[attr-defined]
OpMode.OPTIMIZE_GAS_POST_PROCESSING
)
else:
config.op_mode = OpMode.OPTIMIZE_GAS # type: ignore[attr-defined]
config.collect_traces = ( # type: ignore[attr-defined]
config.getoption("evm_collect_traces") or config.getoption("optimize_gas", False)
)
# Instantiate the transition tool here to check that the binary path/trace
# option is valid. This ensures we only raise an error once, if
# appropriate, instead of for every test.
evm_bin = config.getoption("evm_bin")
trace = config.getoption("evm_collect_traces")
t8n_server_url = config.getoption("t8n_server_url")
kwargs = {
"trace": trace,
}
if t8n_server_url is not None:
kwargs["server_url"] = t8n_server_url
if evm_bin is None:
assert TransitionTool.default_tool is not None, "No default transition tool found"
t8n = TransitionTool.default_tool(**kwargs)
else:
t8n = TransitionTool.from_binary_path(binary_path=evm_bin, **kwargs)
if (
isinstance(config.getoption("numprocesses"), int)
and config.getoption("numprocesses") > 0
and not t8n.supports_xdist
):
pytest.exit(
f"The {t8n.__class__.__name__} t8n tool does not work well with the xdist plugin;"
"use -n=0.",
returncode=pytest.ExitCode.USAGE_ERROR,
)
config.t8n = t8n # type: ignore[attr-defined]
if "Tools" not in config.stash[metadata_key]:
config.stash[metadata_key]["Tools"] = {
"t8n": t8n.version(),
}
else:
config.stash[metadata_key]["Tools"]["t8n"] = t8n.version()
args = ["fill"] + [str(arg) for arg in config.invocation_params.args]
for i in range(len(args)):
if " " in args[i]:
args[i] = f'"{args[i]}"'
command_line_args = " ".join(args)
config.stash[metadata_key]["Command-line args"] = f"<code>{command_line_args}</code>"
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: pytest.Config) -> List[str]:
"""Add lines to pytest's console output header."""
if is_help_or_collectonly_mode(config):
return []
t8n_version = config.stash[metadata_key]["Tools"]["t8n"]
return [(f"{t8n_version}")]
@pytest.hookimpl(tryfirst=True)
def pytest_report_teststatus(report: Any, config: pytest.Config) -> tuple[str, str, str] | None:
"""
Modify test results in pytest's terminal output.
We use this:
1. To disable test session progress report if we're writing the JSON
fixtures to stdout to be read by a consume command on stdin. I.e., don't
write this type of output to the console:
```text
...x...
```
"""
if config.fixture_output.is_stdout: # type: ignore[attr-defined]
return report.outcome, "", report.outcome.upper()
return None
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_terminal_summary(
terminalreporter: TerminalReporter,
exitstatus: int,
config: pytest.Config,
) -> Generator[None, None, None]:
"""
Modify pytest's terminal summary to emphasize that no tests were ran.
Emphasize that fixtures have only been filled; they must now be executed to
actually run the tests.
"""
del exitstatus
yield
if config.fixture_output.is_stdout or hasattr(config, "workerinput"): # type: ignore[attr-defined]
return
print_migration_warning(terminalreporter)
stats = terminalreporter.stats
if "passed" in stats and stats["passed"]:
# Custom message for Phase 1 (pre-allocation group generation)
session_instance: FillingSession = config.filling_session # type: ignore[attr-defined]
if session_instance.phase_manager.is_pre_alloc_generation:
# Generate summary stats
pre_alloc_groups: PreAllocGroups
if config.pluginmanager.hasplugin("xdist"):
# Load pre-allocation groups from disk
pre_alloc_groups = PreAllocGroups.from_folder(
config.fixture_output.pre_alloc_groups_folder_path, # type: ignore[attr-defined]
lazy_load=False,
)
else:
assert session_instance.pre_alloc_groups is not None
pre_alloc_groups = session_instance.pre_alloc_groups
total_groups = len(pre_alloc_groups.root)
total_accounts = sum(group.pre_account_count for group in pre_alloc_groups.values())
terminalreporter.write_sep(
"=",
f" Phase 1 Complete: Generated {total_groups} pre-allocation groups "
f"({total_accounts} total accounts) ",
bold=True,
green=True,
)
else:
# Normal message for fixture generation
# append / to indicate this is a directory
output_dir = str(config.fixture_output.directory) + "/" # type: ignore[attr-defined]
terminalreporter.write_sep(
"=",
(
f' No tests executed - the test fixtures in "{output_dir}" may now be '
"executed against a client "
),
bold=True,
yellow=True,
)
def pytest_metadata(metadata: Any) -> None:
"""Add or remove metadata to/from the pytest report."""
metadata.pop("JAVA_HOME", None)
def pytest_html_results_table_header(cells: Any) -> None:
"""Customize the table headers of the HTML report table."""
cells.insert(3, '<th class="sortable" data-column-type="fixturePath">JSON Fixture File</th>')
cells.insert(4, '<th class="sortable" data-column-type="evmDumpDir">EVM Dump Dir</th>')
del cells[-1] # Remove the "Links" column
def pytest_html_results_table_row(report: Any, cells: Any) -> None:
"""Customize the table rows of the HTML report table."""
if hasattr(report, "user_properties"):
user_props = dict(report.user_properties)
if (
report.passed
and "fixture_path_absolute" in user_props
and "fixture_path_relative" in user_props
):
fixture_path_absolute = user_props["fixture_path_absolute"]
fixture_path_relative = user_props["fixture_path_relative"]
fixture_path_link = (
f'<a href="{fixture_path_absolute}" target="_blank">{fixture_path_relative}</a>'
)
cells.insert(3, f"<td>{fixture_path_link}</td>")
elif report.failed:
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/ported_tests.py | src/pytest_plugins/filler/ported_tests.py | """
A pytest plugin that shows `ported_from` marker information.
This plugin extracts and displays information from @pytest.mark.ported_from
markers, showing either the static filler file paths or associated PR URLs.
Usage:
------
# Show static filler file paths:
# uv run fill --show-ported-from tests/
# Show PR URLs instead:
# uv run fill --show-ported-from=prs tests/
The plugin will:
1. Collect all test items with @pytest.mark.ported_from markers
2. Extract either the file paths (first positional argument) or PR URLs (pr
keyword argument)
3. Output a deduplicated, sorted list, one per line
4. Skip test execution (collection only)
5. Exclude tests with coverage_missed_reason from output
Marker Format:
--------------
@pytest.mark.ported_from(
["path/to/static_filler1.json",
"path/to/static_filler2.json"],
pr=[
"https://github.com/ethereum/execution-spec-tests/pull/1234",
"https://github.com/ethereum/execution-spec-tests/pull/5678",
],
coverage_missed_reason="Optional reason for accepted coverage miss",
)
"""
import re
from typing import List, Set
from urllib.parse import urlparse
import pytest
from _pytest.terminal import TerminalReporter
def convert_to_filled(file_path: str) -> str | None:
"""Convert source link to filler to filled test path."""
path = urlparse(file_path).path
if "/src/" in path:
path = path.split("/src/", 1)[1]
if path.endswith((".sh", ".js")):
return None
# Remove "Filler" from the path components
path = path.replace("TestsFiller", "Tests")
# Replace file extension to .json
path = re.sub(r"Filler\.(yml|yaml|json)$", ".json", path)
return path
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
ported_from_group = parser.getgroup(
"ported_from", "Arguments for showing ported_from marker information"
)
ported_from_group.addoption(
"--show-ported-from",
action="store",
dest="show_ported_from",
default=None,
nargs="?",
const="paths",
help=(
"Show information from @pytest.mark.ported_from markers. "
"Use '--show-ported-from' or '--show-ported-from=paths' to show static filler paths. "
"Use '--show-ported-from=prs' to show PR URLs."
),
)
ported_from_group.addoption(
"--skip-coverage-missed-reason",
action="store_true",
dest="skip_coverage_missed_reason",
default=False,
help=(
"When using --show-ported-from, exclude tests that have "
"coverage_missed_reason in their @pytest.mark.ported_from marker. "
"These are tests that were intentionally not ported from the original "
"static filler files, typically because they are redundant or obsolete. "
"This helps filter out accepted coverage gaps when analyzing test coverage."
),
)
ported_from_group.addoption(
"--ported-from-output-file",
action="store",
dest="ported_from_output_file",
default=None,
help="Output file for ported_from information.",
)
ported_from_group.addoption(
"--links-as-filled",
action="store_true",
dest="links_as_filled",
default=False,
help=(
"Convert URLs or paths to filled test file paths for coverage script. "
"Used in combination with --show-ported-from."
),
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""Register the plugin if the CLI option is provided."""
if config.getoption("show_ported_from"):
config.pluginmanager.register(PortedFromDisplay(config), "ported-from-display")
class PortedFromDisplay:
"""Pytest plugin class for displaying ported_from marker information."""
def __init__(self, config: pytest.Config) -> None:
"""Initialize the plugin with the given pytest config."""
self.config = config
self.show_mode = config.getoption("show_ported_from")
self.links_as_filled = config.getoption("links_as_filled")
self.ported_from_output_file = config.getoption("ported_from_output_file")
self.skip_coverage_missed_reason = config.getoption("skip_coverage_missed_reason")
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(
self,
session: pytest.Session,
config: pytest.Config,
items: List[pytest.Item],
) -> object:
"""Extract ported_from information from collected test items."""
del session
yield
# Only run on master node when using pytest-xdist
if hasattr(config, "workerinput"):
return
paths: Set[str] = set()
prs: Set[str] = set()
for item in items:
ported_from_marker = item.get_closest_marker("ported_from")
if ported_from_marker:
# Skip tests with coverage_missed_reason
if (
"coverage_missed_reason" in ported_from_marker.kwargs
and self.skip_coverage_missed_reason
):
continue
# Extract paths (first positional argument)
if ported_from_marker.args:
first_arg = ported_from_marker.args[0]
if isinstance(first_arg, list):
paths.update(first_arg)
elif isinstance(first_arg, str):
paths.add(first_arg)
# Extract PRs (keyword argument 'pr')
if "pr" in ported_from_marker.kwargs:
pr_arg = ported_from_marker.kwargs["pr"]
if isinstance(pr_arg, list):
prs.update(pr_arg)
elif isinstance(pr_arg, str):
prs.add(pr_arg)
# Output results based on mode
if self.show_mode == "prs":
outputs = sorted(prs)
else: # default to "paths"
outputs = sorted(paths)
output_lines: List[str] = []
if self.links_as_filled:
for output in outputs:
converted_link_output = convert_to_filled(output)
if converted_link_output is not None:
output_lines.append(converted_link_output)
else:
output_lines.extend(outputs)
if self.ported_from_output_file:
with open(self.ported_from_output_file, "w") as f:
f.write("\n".join(output_lines))
else:
for line in output_lines:
print(line)
@pytest.hookimpl(tryfirst=True)
def pytest_runtestloop(self, session: pytest.Session) -> bool:
"""Skip test execution, only show ported_from information."""
del session
return True
def pytest_terminal_summary(
self,
terminalreporter: TerminalReporter,
exitstatus: int,
config: pytest.Config,
) -> None:
"""Add a summary line."""
del exitstatus
if config.getoption("verbose") < 0:
return
mode_desc = "PR URLs" if self.show_mode == "prs" else "static filler paths"
terminalreporter.write_sep("=", f"ported_from {mode_desc} displayed", bold=True)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_format_selector.py | src/pytest_plugins/filler/tests/test_format_selector.py | """Unit tests for the FormatSelector class."""
from typing import List, Set, Tuple
from ethereum_test_fixtures import BaseFixture, FixtureFillingPhase, LabeledFixtureFormat
from ..filler import FormatSelector, PhaseManager
class TestFormatSelector:
"""Test cases for FormatSelector class."""
def test_init(self) -> None:
"""Test basic initialization."""
phase_manager = PhaseManager(current_phase=FixtureFillingPhase.FILL)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=False)
assert format_selector.phase_manager is phase_manager
def test_should_generate_pre_alloc_phase_with_pre_alloc_format(self) -> None:
"""Test pre-alloc phase with format that supports pre-alloc."""
phase_manager = PhaseManager(current_phase=FixtureFillingPhase.PRE_ALLOC_GENERATION)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=False)
# MySub = type("MySub", (BaseClass,), {"MY_CLASSVAR": 42})
format_with_pre_alloc = type(
"MockFixtureFormat",
(BaseFixture,),
{
"format_phases": {
FixtureFillingPhase.PRE_ALLOC_GENERATION,
FixtureFillingPhase.FILL,
}
},
)
assert format_selector.should_generate(format_with_pre_alloc)
def test_should_generate_pre_alloc_phase_without_pre_alloc_format(self) -> None:
"""Test pre-alloc phase with format that doesn't support pre-alloc."""
phase_manager = PhaseManager(current_phase=FixtureFillingPhase.PRE_ALLOC_GENERATION)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=False)
format_without_pre_alloc = type(
"MockFixtureFormat",
(BaseFixture,),
{"format_phases": {FixtureFillingPhase.FILL}},
)
assert not format_selector.should_generate(format_without_pre_alloc)
def test_should_generate_single_phase_fill_only_format(self) -> None:
"""Test single-phase fill with fill-only format."""
phase_manager = PhaseManager(current_phase=FixtureFillingPhase.FILL)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=False)
fill_only_format = type(
"MockFixtureFormat",
(BaseFixture,),
{"format_phases": {FixtureFillingPhase.FILL}},
)
assert format_selector.should_generate(fill_only_format)
def test_should_generate_single_phase_pre_alloc_format(self) -> None:
"""Test single-phase fill with format that supports pre-alloc."""
phase_manager = PhaseManager(current_phase=FixtureFillingPhase.FILL)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=False)
format_with_pre_alloc = type(
"MockFixtureFormat",
(BaseFixture,),
{
"format_phases": {
FixtureFillingPhase.PRE_ALLOC_GENERATION,
FixtureFillingPhase.FILL,
}
},
)
# Should not generate because it needs pre-alloc but we're in single
# phase
assert not format_selector.should_generate(format_with_pre_alloc)
def test_should_generate_phase2_with_pre_alloc_format(self) -> None:
"""
Test phase 2 (after pre-alloc) with format that supports pre-alloc.
"""
phase_manager = PhaseManager(
current_phase=FixtureFillingPhase.FILL,
previous_phases={FixtureFillingPhase.PRE_ALLOC_GENERATION},
)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=False)
format_with_pre_alloc = type(
"MockFixtureFormat",
(BaseFixture,),
{
"format_phases": {
FixtureFillingPhase.PRE_ALLOC_GENERATION,
FixtureFillingPhase.FILL,
}
},
)
# Should generate in phase 2
assert format_selector.should_generate(format_with_pre_alloc)
def test_should_generate_phase2_without_pre_alloc_format(self) -> None:
"""Test phase 2 (after pre-alloc) with fill-only format."""
phase_manager = PhaseManager(
current_phase=FixtureFillingPhase.FILL,
previous_phases={FixtureFillingPhase.PRE_ALLOC_GENERATION},
)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=False)
fill_only_format = type(
"MockFixtureFormat",
(BaseFixture,),
{"format_phases": {FixtureFillingPhase.FILL}},
)
# Should not generate because it doesn't need pre-alloc
assert not format_selector.should_generate(fill_only_format)
def test_should_generate_phase2_with_generate_all(self) -> None:
"""Test phase 2 with --generate-all-formats flag."""
phase_manager = PhaseManager(
current_phase=FixtureFillingPhase.FILL,
previous_phases={FixtureFillingPhase.PRE_ALLOC_GENERATION},
)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=True)
fill_only_format = type(
"MockFixtureFormat",
(BaseFixture,),
{"format_phases": {FixtureFillingPhase.FILL}},
)
format_with_pre_alloc = type(
"MockFixtureFormat",
(BaseFixture,),
{
"format_phases": {
FixtureFillingPhase.PRE_ALLOC_GENERATION,
FixtureFillingPhase.FILL,
}
},
)
# With generate_all=True, both formats should be generated
assert format_selector.should_generate(fill_only_format)
assert format_selector.should_generate(format_with_pre_alloc)
def test_should_generate_labeled_format(self) -> None:
"""Test with LabeledFixtureFormat wrapper."""
phase_manager = PhaseManager(current_phase=FixtureFillingPhase.FILL)
format_selector = FormatSelector(phase_manager=phase_manager, generate_all_formats=False)
fill_only_format = type(
"MockFixtureFormat",
(BaseFixture,),
{"format_phases": {FixtureFillingPhase.FILL}},
)
labeled_format = LabeledFixtureFormat(
fill_only_format,
"mock_labeled_format",
"A mock labeled fixture format",
)
assert format_selector.should_generate(labeled_format)
def test_comprehensive_scenarios(self) -> None:
"""
Test comprehensive scenarios covering all phase and format
combinations.
"""
# Test matrix: (current_phase, previous_phases, format_phases,
# generate_all) -> expected
test_cases: List[
Tuple[
FixtureFillingPhase, Set[FixtureFillingPhase], Set[FixtureFillingPhase], bool, bool
]
] = [
# Pre-alloc generation phase
(
FixtureFillingPhase.PRE_ALLOC_GENERATION,
set(),
{FixtureFillingPhase.PRE_ALLOC_GENERATION, FixtureFillingPhase.FILL},
False,
True,
),
(
FixtureFillingPhase.PRE_ALLOC_GENERATION,
set(),
{FixtureFillingPhase.FILL},
False,
False,
),
# Single-phase fill
(FixtureFillingPhase.FILL, set(), {FixtureFillingPhase.FILL}, False, True),
(
FixtureFillingPhase.FILL,
set(),
{FixtureFillingPhase.PRE_ALLOC_GENERATION, FixtureFillingPhase.FILL},
False,
False,
),
# Phase 2 without generate_all
(
FixtureFillingPhase.FILL,
{FixtureFillingPhase.PRE_ALLOC_GENERATION},
{FixtureFillingPhase.PRE_ALLOC_GENERATION, FixtureFillingPhase.FILL},
False,
True,
),
(
FixtureFillingPhase.FILL,
{FixtureFillingPhase.PRE_ALLOC_GENERATION},
{FixtureFillingPhase.FILL},
False,
False,
),
# Phase 2 with generate_all
(
FixtureFillingPhase.FILL,
{FixtureFillingPhase.PRE_ALLOC_GENERATION},
{FixtureFillingPhase.PRE_ALLOC_GENERATION, FixtureFillingPhase.FILL},
True,
True,
),
(
FixtureFillingPhase.FILL,
{FixtureFillingPhase.PRE_ALLOC_GENERATION},
{FixtureFillingPhase.FILL},
True,
True,
),
]
for current, previous, format_phases, gen_all, expected in test_cases:
phase_manager = PhaseManager(current_phase=current, previous_phases=previous)
format_selector = FormatSelector(
phase_manager=phase_manager, generate_all_formats=gen_all
)
fixture_format = type(
"MockFixtureFormat",
(BaseFixture,),
{"format_phases": format_phases},
)
result = format_selector.should_generate(fixture_format)
assert result == expected, (
f"Failed for phase={current}, previous={previous}, "
f"format_phases={format_phases}, generate_all={gen_all}"
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_prealloc_group.py | src/pytest_plugins/filler/tests/test_prealloc_group.py | """Test the pre_alloc_group marker functionality."""
import textwrap
from pathlib import Path
from typing import Any, ClassVar, Dict, List
from unittest.mock import Mock
import pytest
from ethereum_clis import TransitionTool
from ethereum_test_fixtures import BaseFixture, PreAllocGroups
from ethereum_test_forks import Fork, Prague
from ethereum_test_specs.base import BaseTest
from ethereum_test_types import Alloc, Environment
from ..filler import default_output_directory
class MockTest(BaseTest):
"""Mock test class for testing."""
pre: Alloc
genesis_environment: Environment
def __init__(
self, pre: Alloc, genesis_environment: Environment, request: Mock | None = None
) -> None:
"""Initialize mock test."""
super().__init__( # type: ignore
pre=pre,
genesis_environment=genesis_environment,
)
self._request = request
def generate(self, *args: Any, **kwargs: Any) -> BaseFixture:
"""Mock generate method."""
raise NotImplementedError("This is a mock test class")
def get_genesis_environment(self, fork: Fork) -> Environment:
"""Return the genesis environment."""
return self.genesis_environment.set_fork_requirements(fork)
def test_pre_alloc_group_separate() -> None:
"""Test that pre_alloc_group("separate") forces unique grouping."""
# Create mock environment and pre-allocation
env = Environment()
pre = Alloc()
fork = Prague
# Create test without marker
test1 = MockTest(pre=pre, genesis_environment=env)
hash1 = test1.compute_pre_alloc_group_hash(fork)
# Create test with "separate" marker
mock_request = Mock()
mock_request.node = Mock()
mock_request.node.nodeid = "test_module.py::test_function"
mock_marker = Mock()
mock_marker.args = ("separate",)
mock_request.node.get_closest_marker = Mock(return_value=mock_marker)
test2 = MockTest(pre=pre, genesis_environment=env, request=mock_request)
hash2 = test2.compute_pre_alloc_group_hash(fork)
# Hashes should be different due to "separate" marker
assert hash1 != hash2
# Create another test without marker - should match first test
test3 = MockTest(pre=pre, genesis_environment=env)
hash3 = test3.compute_pre_alloc_group_hash(fork)
assert hash1 == hash3
def test_pre_alloc_group_custom_salt() -> None:
"""Test that custom group names create consistent grouping."""
env = Environment()
pre = Alloc()
fork = Prague
# Create test with custom group "eip1234"
mock_request1 = Mock()
mock_request1.node = Mock()
mock_request1.node.nodeid = "test_module.py::test_function1"
mock_marker1 = Mock()
mock_marker1.args = ("eip1234",)
mock_request1.node.get_closest_marker = Mock(return_value=mock_marker1)
test1 = MockTest(pre=pre, genesis_environment=env, request=mock_request1)
hash1 = test1.compute_pre_alloc_group_hash(fork)
# Create another test with same custom group "eip1234"
mock_request2 = Mock()
mock_request2.node = Mock()
mock_request2.node.nodeid = "test_module.py::test_function2" # Different nodeid
mock_marker2 = Mock()
mock_marker2.args = ("eip1234",) # Same group
mock_request2.node.get_closest_marker = Mock(return_value=mock_marker2)
test2 = MockTest(pre=pre, genesis_environment=env, request=mock_request2)
hash2 = test2.compute_pre_alloc_group_hash(fork)
# Hashes should be the same - both in "eip1234" group
assert hash1 == hash2
# Create test with different custom group "eip5678"
mock_request3 = Mock()
mock_request3.node = Mock()
mock_request3.node.nodeid = "test_module.py::test_function3"
mock_marker3 = Mock()
mock_marker3.args = ("eip5678",) # Different group
mock_request3.node.get_closest_marker = Mock(return_value=mock_marker3)
test3 = MockTest(pre=pre, genesis_environment=env, request=mock_request3)
hash3 = test3.compute_pre_alloc_group_hash(fork)
# Hash should be different - different custom group
assert hash1 != hash3
assert hash2 != hash3
def test_pre_alloc_group_separate_different_nodeids() -> None:
"""Test that different tests with "separate" get different hashes."""
env = Environment()
pre = Alloc()
fork = Prague
# Create test with "separate" and nodeid1
mock_request1 = Mock()
mock_request1.node = Mock()
mock_request1.node.nodeid = "test_module.py::test_function1"
mock_marker1 = Mock()
mock_marker1.args = ("separate",)
mock_request1.node.get_closest_marker = Mock(return_value=mock_marker1)
test1 = MockTest(pre=pre, genesis_environment=env, request=mock_request1)
hash1 = test1.compute_pre_alloc_group_hash(fork)
# Create test with "separate" and nodeid2
mock_request2 = Mock()
mock_request2.node = Mock()
mock_request2.node.nodeid = "test_module.py::test_function2"
mock_marker2 = Mock()
mock_marker2.args = ("separate",)
mock_request2.node.get_closest_marker = Mock(return_value=mock_marker2)
test2 = MockTest(pre=pre, genesis_environment=env, request=mock_request2)
hash2 = test2.compute_pre_alloc_group_hash(fork)
# Hashes should be different due to different nodeids
assert hash1 != hash2
def test_no_pre_alloc_group_marker() -> None:
"""Test normal grouping without pre_alloc_group marker."""
env = Environment()
pre = Alloc()
fork = Prague
# Create test without marker but with request object
mock_request = Mock()
mock_request.node = Mock()
mock_request.node.nodeid = "test_module.py::test_function"
mock_request.node.get_closest_marker = Mock(return_value=None) # No marker
test1 = MockTest(pre=pre, genesis_environment=env, request=mock_request)
hash1 = test1.compute_pre_alloc_group_hash(fork)
# Create test without any request
test2 = MockTest(pre=pre, genesis_environment=env)
hash2 = test2.compute_pre_alloc_group_hash(fork)
# Hashes should be the same - both have no marker
assert hash1 == hash2
def test_pre_alloc_group_with_reason() -> None:
"""Test that reason kwarg is accepted but doesn't affect grouping."""
env = Environment()
pre = Alloc()
fork = Prague
# Create test with custom group and reason
mock_request1 = Mock()
mock_request1.node = Mock()
mock_request1.node.nodeid = "test_module.py::test_function1"
mock_marker1 = Mock()
mock_marker1.args = ("hardcoded_addresses",)
mock_marker1.kwargs = {"reason": "Uses legacy hardcoded addresses for backwards compatibility"}
mock_request1.node.get_closest_marker = Mock(return_value=mock_marker1)
test1 = MockTest(pre=pre, genesis_environment=env, request=mock_request1)
hash1 = test1.compute_pre_alloc_group_hash(fork)
# Create another test with same group but different reason
mock_request2 = Mock()
mock_request2.node = Mock()
mock_request2.node.nodeid = "test_module.py::test_function2"
mock_marker2 = Mock()
mock_marker2.args = ("hardcoded_addresses",)
mock_marker2.kwargs = {"reason": "Different reason but same group"}
mock_request2.node.get_closest_marker = Mock(return_value=mock_marker2)
test2 = MockTest(pre=pre, genesis_environment=env, request=mock_request2)
hash2 = test2.compute_pre_alloc_group_hash(fork)
# Hashes should be the same - reason doesn't affect grouping
assert hash1 == hash2
class FormattedTest:
"""Represents a single formatted test."""
kwargs: Dict[str, str]
template: ClassVar[str]
def __init__(self, **kwargs: str) -> None: # noqa: D107
self.kwargs = kwargs
def format(self) -> str: # noqa: D102
return self.template.format(**self.kwargs)
class StateTest(FormattedTest): # noqa: D101
template: ClassVar[str] = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import (
Account,
Alloc,
Environment,
StateTestFiller,
Transaction
)
from ethereum_test_vm import Opcodes as Op
@pytest.mark.valid_from("Istanbul")
def test_chainid(state_test: StateTestFiller, pre: Alloc) -> None:
contract_address = pre.deploy_contract(Op.SSTORE(1, Op.CHAINID) + Op.STOP)
sender = pre.fund_eoa()
tx = Transaction(
ty=0x0,
chain_id=0x01,
to=contract_address,
gas_limit=100_000,
sender=sender,
)
post = {{
contract_address: Account(storage={{"0x01": "0x01"}}),
}}
state_test(env={env}, pre=pre, post=post, tx=tx)
"""
)
class BlockchainTest(FormattedTest): # noqa: D101
template: ClassVar[str] = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Environment,
Transaction
)
from ethereum_test_vm import Opcodes as Op
@pytest.mark.valid_from("Istanbul")
def test_chainid_blockchain(blockchain_test: BlockchainTestFiller, pre: Alloc) -> None:
contract_address = pre.deploy_contract(Op.SSTORE(1, Op.CHAINID) + Op.STOP)
sender = pre.fund_eoa()
tx = Transaction(
ty=0x0,
chain_id=0x01,
to=contract_address,
gas_limit=100_000,
sender=sender,
)
post = {{
contract_address: Account(storage={{"0x01": "0x01"}}),
}}
blockchain_test(
genesis_environment={env},
pre=pre,
post=post,
blocks=[Block(txs=[tx])],
)
"""
)
@pytest.mark.parametrize(
"test_definitions,expected_different_pre_alloc_groups",
[
# Environment fields not affecting the pre-alloc groups
pytest.param(
[
BlockchainTest(env="Environment()"),
StateTest(env="Environment()"),
],
1,
id="different_types_default_environment",
),
pytest.param(
[
StateTest(env="Environment(fee_recipient=pre.fund_eoa(amount=0))"),
StateTest(env="Environment(fee_recipient=1)"),
StateTest(env="Environment(fee_recipient=2)"),
],
1,
id="different_fee_recipients",
),
pytest.param(
[
StateTest(env="Environment(fee_recipient=1)"),
BlockchainTest(env="Environment(fee_recipient=1)"),
],
2,
id="different_fee_recipients_different_types",
),
pytest.param(
[
StateTest(env="Environment(prev_randao=1)"),
StateTest(env="Environment(prev_randao=2)"),
],
1,
id="different_prev_randaos",
),
pytest.param(
[
StateTest(env="Environment(prev_randao=1)"),
BlockchainTest(env="Environment(prev_randao=2)"),
],
2,
id="different_prev_randaos_different_types",
),
pytest.param(
[
StateTest(env="Environment(timestamp=1)"),
StateTest(env="Environment(timestamp=2)"),
],
1,
id="different_timestamps",
),
pytest.param(
[
StateTest(env="Environment(extra_data='0x01')"),
StateTest(env="Environment(extra_data='0x02')"),
],
1,
id="different_extra_data",
),
pytest.param(
[
StateTest(env="Environment(extra_data='0x01')"),
BlockchainTest(env="Environment(extra_data='0x02')"),
],
2,
id="different_extra_data_different_types",
marks=pytest.mark.xfail(
reason="Extra data is excluded=True in the Environment model, so it does not "
"propagate correctly to the genesis header without a lot of code changes.",
),
),
# Environment fields affecting the pre-alloc groups
pytest.param(
[
StateTest(env="Environment(gas_limit=100_000_000)"),
StateTest(env="Environment(gas_limit=200_000_000)"),
],
2,
id="different_gas_limits",
),
pytest.param(
[
StateTest(env="Environment(number=10)"),
StateTest(env="Environment(number=20)"),
],
2,
id="different_block_numbers",
),
pytest.param(
[
StateTest(env="Environment(base_fee_per_gas=10)"),
StateTest(env="Environment(base_fee_per_gas=20)"),
],
2,
id="different_base_fee",
),
pytest.param(
[
StateTest(env="Environment(excess_blob_gas=10)"),
StateTest(env="Environment(excess_blob_gas=20)"),
],
2,
id="different_excess_blob_gas",
),
],
)
def test_pre_alloc_grouping_by_test_type(
pytester: pytest.Pytester,
default_t8n: TransitionTool,
test_definitions: List[FormattedTest],
expected_different_pre_alloc_groups: int,
) -> None:
"""
Test pre-alloc grouping when filling state tests, and the effect of the
`state_test.env`.
"""
tests_dir = Path(pytester.mkdir("tests"))
for i, test in enumerate(test_definitions):
test_module = tests_dir / f"test_{i}.py"
test_module.write_text(test.format())
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
args = [
"-c",
"pytest-fill.ini",
"--generate-pre-alloc-groups",
"--fork=Cancun",
"--t8n-server-url",
]
assert default_t8n.server_url is not None
args.append(default_t8n.server_url)
result = pytester.runpytest(*args)
result.assert_outcomes(
passed=len(test_definitions),
failed=0,
skipped=0,
errors=0,
)
output_dir = (
Path(default_output_directory()).absolute() / "blockchain_tests_engine_x" / "pre_alloc"
)
assert output_dir.exists()
groups = PreAllocGroups.from_folder(output_dir, lazy_load=False)
if (
len([f for f in output_dir.iterdir() if f.name.endswith(".json")])
!= expected_different_pre_alloc_groups
):
error_message = (
f"Expected {expected_different_pre_alloc_groups} different pre-alloc groups, "
f"but got {len(groups)}"
)
for group_hash, group in groups.items():
error_message += f"\n{group_hash}: \n"
error_message += f"tests: {group.test_ids}\n"
error_message += (
f"env: {group.environment.model_dump_json(indent=2, exclude_none=True)}\n"
)
raise AssertionError(error_message)
for group_hash, group in groups.items():
assert group.environment.fee_recipient == group.genesis.fee_recipient, (
f"Fee recipient mismatch for group {group_hash}: {group.environment.fee_recipient} != "
f"{group.genesis.fee_recipient}"
)
assert group.environment.prev_randao == group.genesis.prev_randao, (
f"Prev randao mismatch for group {group_hash}: {group.environment.prev_randao} != "
f"{group.genesis.prev_randao}"
)
assert group.environment.extra_data == group.genesis.extra_data, (
f"Extra data mismatch for group {group_hash}: {group.environment.extra_data} != "
f"{group.genesis.extra_data}"
)
assert group.environment.number == group.genesis.number, (
f"Number mismatch for group {group_hash}: {group.environment.number} != "
f"{group.genesis.number}"
)
assert group.environment.timestamp == group.genesis.timestamp, (
f"Timestamp mismatch for group {group_hash}: {group.environment.timestamp} != "
f"{group.genesis.timestamp}"
)
assert group.environment.difficulty == group.genesis.difficulty, (
f"Difficulty mismatch for group {group_hash}: {group.environment.difficulty} != "
f"{group.genesis.difficulty}"
)
assert group.environment.gas_limit == group.genesis.gas_limit, (
f"Gas limit mismatch for group {group_hash}: {group.environment.gas_limit} != "
f"{group.genesis.gas_limit}"
)
assert group.environment.base_fee_per_gas == group.genesis.base_fee_per_gas, (
f"Base fee per gas mismatch for group {group_hash}: "
f"{group.environment.base_fee_per_gas} != "
f"{group.genesis.base_fee_per_gas}"
)
assert group.environment.excess_blob_gas == group.genesis.excess_blob_gas, (
f"Excess blob gas mismatch for group {group_hash}: "
f"{group.environment.excess_blob_gas} != "
f"{group.genesis.excess_blob_gas}"
)
assert group.environment.blob_gas_used == group.genesis.blob_gas_used, (
f"Blob gas used mismatch for group {group_hash}: {group.environment.blob_gas_used} != "
f"{group.genesis.blob_gas_used}"
)
assert (
group.environment.parent_beacon_block_root == group.genesis.parent_beacon_block_root
), (
f"Parent beacon block root mismatch for group {group_hash}: "
f"{group.environment.parent_beacon_block_root} != "
f"{group.genesis.parent_beacon_block_root}"
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_phase_manager.py | src/pytest_plugins/filler/tests/test_phase_manager.py | """Unit tests for the PhaseManager class."""
from typing import Any
import pytest
from ethereum_test_fixtures import FixtureFillingPhase
from ..filler import PhaseManager
class MockConfig:
"""Mock pytest config for testing."""
def __init__(
self,
generate_pre_alloc_groups: bool = False,
use_pre_alloc_groups: bool = False,
generate_all_formats: bool = False,
) -> None:
"""Initialize with flag values."""
self._options = {
"generate_pre_alloc_groups": generate_pre_alloc_groups,
"use_pre_alloc_groups": use_pre_alloc_groups,
"generate_all_formats": generate_all_formats,
}
def getoption(self, name: str, default: Any = None) -> Any:
"""Mock getoption method."""
return self._options.get(name, default)
class TestPhaseManager:
"""Test cases for PhaseManager class."""
def test_init(self) -> None:
"""Test basic initialization."""
phase_manager = PhaseManager(
current_phase=FixtureFillingPhase.FILL,
previous_phases={FixtureFillingPhase.PRE_ALLOC_GENERATION},
)
assert phase_manager.current_phase == FixtureFillingPhase.FILL
assert phase_manager.previous_phases == {FixtureFillingPhase.PRE_ALLOC_GENERATION}
def test_from_config_normal_fill(self) -> None:
"""Test normal single-phase filling (no flags set)."""
config = MockConfig()
phase_manager = PhaseManager.from_config(
config # type: ignore[arg-type]
)
assert phase_manager.current_phase == FixtureFillingPhase.FILL
assert phase_manager.previous_phases == set()
assert phase_manager.is_single_phase_fill
assert not phase_manager.is_pre_alloc_generation
assert not phase_manager.is_fill_after_pre_alloc
def test_from_config_generate_pre_alloc(self) -> None:
"""Test phase 1: generate pre-allocation groups."""
config = MockConfig(generate_pre_alloc_groups=True)
phase_manager = PhaseManager.from_config(
config # type: ignore[arg-type]
)
assert phase_manager.current_phase == FixtureFillingPhase.PRE_ALLOC_GENERATION
assert phase_manager.previous_phases == set()
assert phase_manager.is_pre_alloc_generation
assert not phase_manager.is_single_phase_fill
assert not phase_manager.is_fill_after_pre_alloc
def test_from_config_use_pre_alloc(self) -> None:
"""Test phase 2: use pre-allocation groups."""
config = MockConfig(use_pre_alloc_groups=True)
phase_manager = PhaseManager.from_config(
config # type: ignore[arg-type]
)
assert phase_manager.current_phase == FixtureFillingPhase.FILL
assert phase_manager.previous_phases == {FixtureFillingPhase.PRE_ALLOC_GENERATION}
assert phase_manager.is_fill_after_pre_alloc
assert not phase_manager.is_pre_alloc_generation
assert not phase_manager.is_single_phase_fill
def test_from_config_generate_all_formats(self) -> None:
"""Generate_all_formats should trigger PRE_ALLOC_GENERATION phase."""
config = MockConfig(generate_all_formats=True)
phase_manager = PhaseManager.from_config(
config # type: ignore[arg-type]
)
assert phase_manager.current_phase == FixtureFillingPhase.PRE_ALLOC_GENERATION
assert phase_manager.previous_phases == set()
assert phase_manager.is_pre_alloc_generation
assert not phase_manager.is_single_phase_fill
assert not phase_manager.is_fill_after_pre_alloc
def test_from_config_generate_all_and_pre_alloc(self) -> None:
"""Test both generate_all_formats and generate_pre_alloc_groups set."""
config = MockConfig(generate_pre_alloc_groups=True, generate_all_formats=True)
phase_manager = PhaseManager.from_config(
config # type: ignore[arg-type]
)
assert phase_manager.current_phase == FixtureFillingPhase.PRE_ALLOC_GENERATION
assert phase_manager.previous_phases == set()
assert phase_manager.is_pre_alloc_generation
def test_from_config_use_pre_alloc_with_generate_all(self) -> None:
"""Test phase 2 with generate_all_formats (passed by CLI)."""
config = MockConfig(use_pre_alloc_groups=True, generate_all_formats=True)
phase_manager = PhaseManager.from_config(
config # type: ignore[arg-type]
)
# use_pre_alloc_groups takes precedence
assert phase_manager.current_phase == FixtureFillingPhase.FILL
assert phase_manager.previous_phases == {FixtureFillingPhase.PRE_ALLOC_GENERATION}
assert phase_manager.is_fill_after_pre_alloc
def test_all_flag_combinations(self) -> None:
"""
Test all 8 possible flag combinations to ensure correct phase
determination.
"""
test_cases = [
# (generate_pre_alloc, use_pre_alloc, generate_all) ->
# (current_phase, has_previous)
# Normal fill
(False, False, False, FixtureFillingPhase.FILL, False),
# Generate all triggers phase 1
(False, False, True, FixtureFillingPhase.PRE_ALLOC_GENERATION, False),
(False, True, False, FixtureFillingPhase.FILL, True), # Phase 2
# Phase 2 with generate all
(False, True, True, FixtureFillingPhase.FILL, True),
(True, False, False, FixtureFillingPhase.PRE_ALLOC_GENERATION, False), # Phase 1
# Phase 1 with generate all
(True, False, True, FixtureFillingPhase.PRE_ALLOC_GENERATION, False),
# Invalid but use_pre_alloc wins
(True, True, False, FixtureFillingPhase.FILL, True),
# Invalid but use_pre_alloc wins
(True, True, True, FixtureFillingPhase.FILL, True),
]
for gen_pre, use_pre, gen_all, expected_phase, has_previous in test_cases:
config = MockConfig(
generate_pre_alloc_groups=gen_pre,
use_pre_alloc_groups=use_pre,
generate_all_formats=gen_all,
)
phase_manager = PhaseManager.from_config(
config # type: ignore[arg-type]
)
assert phase_manager.current_phase == expected_phase, (
f"Failed for flags: generate_pre_alloc={gen_pre}, "
f"use_pre_alloc={use_pre}, generate_all={gen_all}"
)
if has_previous:
assert FixtureFillingPhase.PRE_ALLOC_GENERATION in phase_manager.previous_phases
else:
assert phase_manager.previous_phases == set()
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_filler.py | src/pytest_plugins/filler/tests/test_filler.py | """
Test the filler plugin.
"""
import configparser
import json
import os
import textwrap
from datetime import datetime
from pathlib import Path
import pytest
from ethereum_test_tools import Environment
from ethereum_clis import ExecutionSpecsTransitionTool, TransitionTool
from ..filler import default_output_directory
# flake8: noqa
def get_all_files_in_directory(base_dir: str) -> list[Path]: # noqa: D103
base_path = Path(base_dir)
return [f.relative_to(os.getcwd()) for f in base_path.rglob("*") if f.is_file()]
def count_keys_in_fixture(file_path: Path) -> int: # noqa: D103
with open(file_path, "r") as f:
data = json.load(f)
if not isinstance(data, dict): # Ensure the loaded data is a dictionary
raise ValueError(
f"Expected a dictionary in {file_path}, but got {type(data).__name__}."
)
return len(data)
test_module_paris = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Account, Environment, TestAddress, Transaction
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Shanghai")
def test_paris_one(state_test) -> None:
state_test(env=Environment(),
pre={TestAddress: Account(balance=1_000_000)}, post={}, tx=Transaction())
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Shanghai")
def test_paris_two(state_test) -> None:
state_test(env=Environment(),
pre={TestAddress: Account(balance=1_000_000)}, post={}, tx=Transaction())
"""
)
test_count_paris = 4
test_module_shanghai = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Account, Environment, TestAddress, Transaction
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Shanghai")
def test_shanghai_one(state_test) -> None:
state_test(env=Environment(),
pre={TestAddress: Account(balance=1_000_000)}, post={}, tx=Transaction())
@pytest.mark.parametrize("x", [1, 2, 3])
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Shanghai")
def test_shanghai_two(state_test, x) -> None:
state_test(env=Environment(),
pre={TestAddress: Account(balance=1_000_000)}, post={}, tx=Transaction())
"""
)
test_count_shanghai = 8
total_test_count = test_count_paris + test_count_shanghai
@pytest.mark.parametrize(
"args, expected_fixture_files, expected_fixture_counts",
[
pytest.param(
[],
[
Path("fixtures/blockchain_tests/paris/module_paris/paris_one.json"),
Path("fixtures/blockchain_tests_engine/paris/module_paris/paris_one.json"),
Path("fixtures/state_tests/paris/module_paris/paris_one.json"),
Path("fixtures/blockchain_tests/paris/module_paris/paris_two.json"),
Path("fixtures/blockchain_tests_engine/paris/module_paris/paris_two.json"),
Path("fixtures/state_tests/paris/module_paris/paris_two.json"),
Path("fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one.json"),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_one.json"
),
Path("fixtures/state_tests/shanghai/module_shanghai/shanghai_one.json"),
Path("fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two.json"),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two.json"
),
Path("fixtures/state_tests/shanghai/module_shanghai/shanghai_two.json"),
],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 6, 6],
id="default-args",
),
pytest.param(
["--skip-index"],
[
Path("fixtures/blockchain_tests/paris/module_paris/paris_one.json"),
Path("fixtures/blockchain_tests_engine/paris/module_paris/paris_one.json"),
Path("fixtures/state_tests/paris/module_paris/paris_one.json"),
Path("fixtures/blockchain_tests/paris/module_paris/paris_two.json"),
Path("fixtures/blockchain_tests_engine/paris/module_paris/paris_two.json"),
Path("fixtures/state_tests/paris/module_paris/paris_two.json"),
Path("fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one.json"),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_one.json"
),
Path("fixtures/state_tests/shanghai/module_shanghai/shanghai_one.json"),
Path("fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two.json"),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two.json"
),
Path("fixtures/state_tests/shanghai/module_shanghai/shanghai_two.json"),
],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 6, 6],
id="skip-index",
),
pytest.param(
["--build-name", "test_build"],
[
Path("fixtures/blockchain_tests/paris/module_paris/paris_one.json"),
Path("fixtures/blockchain_tests_engine/paris/module_paris/paris_one.json"),
Path("fixtures/state_tests/paris/module_paris/paris_one.json"),
Path("fixtures/blockchain_tests/paris/module_paris/paris_two.json"),
Path("fixtures/blockchain_tests_engine/paris/module_paris/paris_two.json"),
Path("fixtures/state_tests/paris/module_paris/paris_two.json"),
Path("fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one.json"),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_one.json"
),
Path("fixtures/state_tests/shanghai/module_shanghai/shanghai_one.json"),
Path("fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two.json"),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two.json"
),
Path("fixtures/state_tests/shanghai/module_shanghai/shanghai_two.json"),
],
[2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 6, 6],
id="build-name-in-fixtures-ini-file",
),
pytest.param(
["--single-fixture-per-file"],
[
Path(
"fixtures/blockchain_tests/paris/module_paris/paris_one__fork_Paris_blockchain_test_from_state_test.json"
),
Path(
"fixtures/state_tests/paris/module_paris/paris_one__fork_Paris_state_test.json"
),
Path(
"fixtures/blockchain_tests_engine/paris/module_paris/paris_one__fork_Paris_blockchain_test_engine_from_state_test.json"
),
Path(
"fixtures/blockchain_tests/paris/module_paris/paris_one__fork_Shanghai_blockchain_test_from_state_test.json"
),
Path(
"fixtures/state_tests/paris/module_paris/paris_one__fork_Shanghai_state_test.json"
),
Path(
"fixtures/blockchain_tests_engine/paris/module_paris/paris_one__fork_Shanghai_blockchain_test_engine_from_state_test.json"
),
Path(
"fixtures/blockchain_tests/paris/module_paris/paris_two__fork_Paris_blockchain_test_from_state_test.json"
),
Path(
"fixtures/state_tests/paris/module_paris/paris_two__fork_Paris_state_test.json"
),
Path(
"fixtures/blockchain_tests_engine/paris/module_paris/paris_two__fork_Paris_blockchain_test_engine_from_state_test.json"
),
Path(
"fixtures/blockchain_tests/paris/module_paris/paris_two__fork_Shanghai_blockchain_test_from_state_test.json"
),
Path(
"fixtures/state_tests/paris/module_paris/paris_two__fork_Shanghai_state_test.json"
),
Path(
"fixtures/blockchain_tests_engine/paris/module_paris/paris_two__fork_Shanghai_blockchain_test_engine_from_state_test.json"
),
Path(
"fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one__fork_Paris_blockchain_test_from_state_test.json"
),
Path(
"fixtures/state_tests/shanghai/module_shanghai/shanghai_one__fork_Paris_state_test.json"
),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_one__fork_Paris_blockchain_test_engine_from_state_test.json"
),
Path(
"fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one__fork_Shanghai_blockchain_test_from_state_test.json"
),
Path(
"fixtures/state_tests/shanghai/module_shanghai/shanghai_one__fork_Shanghai_state_test.json"
),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_one__fork_Shanghai_blockchain_test_engine_from_state_test.json"
),
Path(
"fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_from_state_test_x_1.json"
),
Path(
"fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_1.json"
),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_engine_from_state_test_x_1.json"
),
Path(
"fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_from_state_test_x_2.json"
),
Path(
"fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_2.json"
),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_engine_from_state_test_x_2.json"
),
Path(
"fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_from_state_test_x_3.json"
),
Path(
"fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_3.json"
),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_engine_from_state_test_x_3.json"
),
Path(
"fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_from_state_test_x_1.json"
),
Path(
"fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_1.json"
),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_engine_from_state_test_x_1.json"
),
Path(
"fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_from_state_test_x_2.json"
),
Path(
"fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_2.json"
),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_engine_from_state_test_x_2.json"
),
Path(
"fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_from_state_test_x_3.json"
),
Path(
"fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_3.json"
),
Path(
"fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_engine_from_state_test_x_3.json"
),
],
[1] * 36,
id="single-fixture-per-file",
),
pytest.param(
["--single-fixture-per-file", "--output", "other_fixtures"],
[
Path(
"other_fixtures/blockchain_tests/paris/module_paris/paris_one__fork_Paris_blockchain_test_from_state_test.json"
),
Path(
"other_fixtures/state_tests/paris/module_paris/paris_one__fork_Paris_state_test.json"
),
Path(
"other_fixtures/blockchain_tests_engine/paris/module_paris/paris_one__fork_Paris_blockchain_test_engine_from_state_test.json"
),
Path(
"other_fixtures/blockchain_tests/paris/module_paris/paris_one__fork_Shanghai_blockchain_test_from_state_test.json"
),
Path(
"other_fixtures/state_tests/paris/module_paris/paris_one__fork_Shanghai_state_test.json"
),
Path(
"other_fixtures/blockchain_tests_engine/paris/module_paris/paris_one__fork_Shanghai_blockchain_test_engine_from_state_test.json"
),
Path(
"other_fixtures/blockchain_tests/paris/module_paris/paris_two__fork_Paris_blockchain_test_from_state_test.json"
),
Path(
"other_fixtures/state_tests/paris/module_paris/paris_two__fork_Paris_state_test.json"
),
Path(
"other_fixtures/blockchain_tests_engine/paris/module_paris/paris_two__fork_Paris_blockchain_test_engine_from_state_test.json"
),
Path(
"other_fixtures/blockchain_tests/paris/module_paris/paris_two__fork_Shanghai_blockchain_test_from_state_test.json"
),
Path(
"other_fixtures/state_tests/paris/module_paris/paris_two__fork_Shanghai_state_test.json"
),
Path(
"other_fixtures/blockchain_tests_engine/paris/module_paris/paris_two__fork_Shanghai_blockchain_test_engine_from_state_test.json"
),
Path(
"other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one__fork_Paris_blockchain_test_from_state_test.json"
),
Path(
"other_fixtures/state_tests/shanghai/module_shanghai/shanghai_one__fork_Paris_state_test.json"
),
Path(
"other_fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_one__fork_Paris_blockchain_test_engine_from_state_test.json"
),
Path(
"other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_one__fork_Shanghai_blockchain_test_from_state_test.json"
),
Path(
"other_fixtures/state_tests/shanghai/module_shanghai/shanghai_one__fork_Shanghai_state_test.json"
),
Path(
"other_fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_one__fork_Shanghai_blockchain_test_engine_from_state_test.json"
),
Path(
"other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_from_state_test_x_1.json"
),
Path(
"other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_1.json"
),
Path(
"other_fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_engine_from_state_test_x_1.json"
),
Path(
"other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_from_state_test_x_2.json"
),
Path(
"other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_2.json"
),
Path(
"other_fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_engine_from_state_test_x_2.json"
),
Path(
"other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_from_state_test_x_3.json"
),
Path(
"other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Paris_state_test_x_3.json"
),
Path(
"other_fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Paris_blockchain_test_engine_from_state_test_x_3.json"
),
Path(
"other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_from_state_test_x_1.json"
),
Path(
"other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_1.json"
),
Path(
"other_fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_engine_from_state_test_x_1.json"
),
Path(
"other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_from_state_test_x_2.json"
),
Path(
"other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_2.json"
),
Path(
"other_fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_engine_from_state_test_x_2.json"
),
Path(
"other_fixtures/blockchain_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_from_state_test_x_3.json"
),
Path(
"other_fixtures/state_tests/shanghai/module_shanghai/shanghai_two__fork_Shanghai_state_test_x_3.json"
),
Path(
"other_fixtures/blockchain_tests_engine/shanghai/module_shanghai/shanghai_two__fork_Shanghai_blockchain_test_engine_from_state_test_x_3.json"
),
],
[1] * 36,
id="single-fixture-per-file_custom_output_dir",
),
],
)
def test_fixture_output_based_on_command_line_args(
testdir: pytest.Testdir,
args: list[str],
expected_fixture_files: list[Path],
expected_fixture_counts: list[int],
default_t8n: TransitionTool,
) -> None:
"""
Test:
- fixture files are created at the expected paths.
- no other files are present in the output directory.
- each fixture file contains the expected number of fixtures.
The modules above generate the following test cases:
tests/paris/test_module_paris.py::test_paris_one[fork_Paris] PASSED
tests/paris/test_module_paris.py::test_paris_one[fork_Shanghai] PASSED
tests/paris/test_module_paris.py::test_paris_two[fork_Paris] PASSED
tests/paris/test_module_paris.py::test_paris_two[fork_Shanghai] PASSED
tests/shanghai/test_module_shanghai.py::test_shanghai_one[fork_Paris] PASSED
tests/shanghai/test_module_shanghai.py::test_shanghai_one[fork_Shanghai] PASSED
tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Paris-x=1] PASSED
tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Paris-x=2] PASSED
tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Paris-x=3] PASSED
tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Shanghai-x=1] PASSED
tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Shanghai-x=2] PASSED
tests/shanghai/test_module_shanghai.py::test_shanghai_two[fork_Shanghai-x=3] PASSED
"""
tests_dir = testdir.mkdir("tests")
paris_tests_dir = tests_dir.mkdir("paris")
test_module = paris_tests_dir.join("test_module_paris.py")
test_module.write(test_module_paris)
shanghai_tests_dir = tests_dir.mkdir("shanghai")
test_module = shanghai_tests_dir.join("test_module_shanghai.py")
test_module.write(test_module_shanghai)
testdir.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
args.append("-c")
args.append("pytest-fill.ini")
args.append("-v")
args.append("--no-html")
args.append("--t8n-server-url")
assert default_t8n.server_url is not None
args.append(default_t8n.server_url)
result = testdir.runpytest(*args)
result.assert_outcomes(
passed=total_test_count * 3,
failed=0,
skipped=0,
errors=0,
)
if "--output" in args:
output_dir = Path(args[args.index("--output") + 1]).absolute()
else:
output_dir = Path(default_output_directory()).absolute()
assert output_dir.exists()
all_files = get_all_files_in_directory(str(output_dir))
meta_dir = os.path.join(output_dir, ".meta")
assert os.path.exists(meta_dir), f"The directory {meta_dir} does not exist"
expected_ini_file = "fixtures.ini"
expected_index_file = "index.json"
expected_resolver_file = None
resolver_file = None
if TransitionTool.default_tool == ExecutionSpecsTransitionTool:
expected_resolver_file = "eels_resolutions.json"
ini_file = None
index_file = None
for file in all_files:
if file.name == expected_ini_file:
ini_file = file
elif file.name == expected_index_file:
index_file = file
elif expected_resolver_file and file.name == expected_resolver_file:
resolver_file = file
assert resolver_file.exists(), f"{resolver_file} does not exist"
expected_additional_files = {expected_ini_file, expected_index_file}
if resolver_file:
expected_additional_files.add(str(expected_resolver_file))
all_fixtures = [file for file in all_files if file.name not in expected_additional_files]
for fixture_file, fixture_count in zip(expected_fixture_files, expected_fixture_counts):
assert fixture_file.exists(), f"{fixture_file} does not exist"
assert fixture_count == count_keys_in_fixture(fixture_file), (
f"Fixture count mismatch for {fixture_file}"
)
assert set(all_fixtures) == set(expected_fixture_files), (
f"Unexpected files in directory: {set(all_fixtures) - set(expected_fixture_files)}"
)
assert ini_file is not None, f"No {expected_ini_file} file was found in {meta_dir}"
config = configparser.ConfigParser()
ini_file_text = ini_file.read_text()
ini_file_text = ini_file_text.replace(default_t8n.server_url, "t8n_server_path")
config.read_string(ini_file_text)
if "--skip-index" not in args:
assert index_file is not None, f"No {expected_index_file} file was found in {meta_dir}"
properties = {key: value for key, value in config.items("fixtures")}
assert "timestamp" in properties
timestamp = datetime.fromisoformat(properties["timestamp"])
assert timestamp.year == datetime.now().year
if "--build-name" in args:
assert "build" in properties
build_name = args[args.index("--build-name") + 1]
assert properties["build"] == build_name
test_module_environment_variables = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Account, Environment, Transaction
@pytest.mark.parametrize("block_gas_limit", [Environment().gas_limit])
@pytest.mark.valid_at("Cancun")
def test_max_gas_limit(state_test, pre, block_gas_limit) -> None:
env = Environment()
assert block_gas_limit == {expected_gas_limit}
tx = Transaction(gas_limit=block_gas_limit, sender=pre.fund_eoa())
state_test(env=env, pre=pre, post={{}}, tx=tx)
"""
)
@pytest.mark.parametrize(
"args, expected_fixture_files, expected_fixture_counts, expected_gas_limit",
[
pytest.param(
[],
[
Path(
"fixtures/state_tests/cancun/module_environment_variables/max_gas_limit.json"
),
],
[1],
Environment().gas_limit,
id="default-args",
),
pytest.param(
["--block-gas-limit", str(Environment().gas_limit * 2)],
[
Path(
"fixtures/state_tests/cancun/module_environment_variables/max_gas_limit.json"
),
],
[1],
Environment().gas_limit * 2,
id="higher-gas-limit",
),
],
)
def test_fill_variables(
testdir: pytest.Testdir,
args: list[str],
expected_fixture_files: list[Path],
expected_fixture_counts: list[int],
expected_gas_limit: int,
default_t8n: TransitionTool,
) -> None:
"""
Test filling tests that depend on variables such as the max block gas limit.
"""
tests_dir = testdir.mkdir("tests")
cancun_tests_dir = tests_dir.mkdir("cancun")
test_module = cancun_tests_dir.join("test_module_environment_variables.py")
test_module.write(
test_module_environment_variables.format(expected_gas_limit=expected_gas_limit)
)
testdir.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
args.append("-c")
args.append("pytest-fill.ini")
args.append("-v")
args.append("-m")
args.append("state_test")
args.append("--no-html")
args.append("--t8n-server-url")
assert default_t8n.server_url is not None
args.append(default_t8n.server_url)
result = testdir.runpytest(*args)
result.assert_outcomes(
passed=1,
failed=0,
skipped=0,
errors=0,
)
if "--output" in args:
output_dir = Path(args[args.index("--output") + 1]).absolute()
else:
output_dir = Path(default_output_directory()).absolute()
assert output_dir.exists()
all_files = get_all_files_in_directory(str(output_dir))
meta_dir = os.path.join(output_dir, ".meta")
assert os.path.exists(meta_dir), f"The directory {meta_dir} does not exist"
expected_ini_file = "fixtures.ini"
expected_index_file = "index.json"
expected_resolver_file = None
resolver_file = None
if TransitionTool.default_tool == ExecutionSpecsTransitionTool:
expected_resolver_file = "eels_resolutions.json"
ini_file = None
index_file = None
for file in all_files:
if file.name == expected_ini_file:
ini_file = file
elif file.name == expected_index_file:
index_file = file
elif expected_resolver_file and file.name == expected_resolver_file:
resolver_file = file
assert resolver_file.exists(), f"{resolver_file} does not exist"
expected_additional_files = {expected_ini_file, expected_index_file}
if resolver_file:
expected_additional_files.add(str(expected_resolver_file))
all_fixtures = [file for file in all_files if file.name not in expected_additional_files]
for fixture_file, fixture_count in zip(expected_fixture_files, expected_fixture_counts):
assert fixture_file.exists(), f"{fixture_file} does not exist"
assert fixture_count == count_keys_in_fixture(fixture_file), (
f"Fixture count mismatch for {fixture_file}"
)
assert set(all_fixtures) == set(expected_fixture_files), (
f"Unexpected files in directory: {set(all_fixtures) - set(expected_fixture_files)}"
)
assert ini_file is not None, f"No {expected_ini_file} file was found in {meta_dir}"
config = configparser.ConfigParser()
ini_file_text = ini_file.read_text()
ini_file_text = ini_file_text.replace(default_t8n.server_url, "t8n_server_path")
config.read_string(ini_file_text)
if "--skip-index" not in args:
assert index_file is not None, f"No {expected_index_file} file was found in {meta_dir}"
properties = {key: value for key, value in config.items("fixtures")}
assert "timestamp" in properties
timestamp = datetime.fromisoformat(properties["timestamp"])
assert timestamp.year == datetime.now().year
if "--build-name" in args:
assert "build" in properties
build_name = args[args.index("--build-name") + 1]
assert properties["build"] == build_name
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_pre_alloc.py | src/pytest_plugins/filler/tests/test_pre_alloc.py | """Test the pre-allocation methods in the filler module."""
from itertools import count
import pytest
from ethereum_test_base_types import Address, TestPrivateKey, TestPrivateKey2
from ethereum_test_forks import Fork, Prague
from ethereum_test_types import EOA
from ethereum_test_vm import EVMCodeType
from ethereum_test_vm import Opcodes as Op
from ..pre_alloc import (
CONTRACT_ADDRESS_INCREMENTS_DEFAULT,
CONTRACT_START_ADDRESS_DEFAULT,
Alloc,
AllocMode,
)
def create_test_alloc(
alloc_mode: AllocMode = AllocMode.PERMISSIVE,
fork: Fork = Prague,
evm_code_type: EVMCodeType = EVMCodeType.LEGACY,
) -> Alloc:
"""Create a test Alloc instance with default iterators."""
contract_iter = iter(
Address(CONTRACT_START_ADDRESS_DEFAULT + (i * CONTRACT_ADDRESS_INCREMENTS_DEFAULT))
for i in count()
)
eoa_iter = iter(
EOA(key=TestPrivateKey + i if i != 1 else TestPrivateKey2, nonce=0).copy() for i in count()
)
return Alloc(
alloc_mode=alloc_mode,
contract_address_iterator=contract_iter,
eoa_iterator=eoa_iter,
fork=fork,
evm_code_type=evm_code_type,
)
def test_alloc_deploy_contract_basic() -> None:
"""Test basic `Alloc.deploy_contract` functionality."""
pre = create_test_alloc()
contract_1 = pre.deploy_contract(Op.SSTORE(0, 1) + Op.STOP)
contract_2 = pre.deploy_contract(Op.SSTORE(0, 2) + Op.STOP)
# Contracts should be deployed to different addresses
assert contract_1 != contract_2
assert contract_1 in pre
assert contract_2 in pre
# Check that addresses follow expected pattern
assert contract_1 == Address(CONTRACT_START_ADDRESS_DEFAULT)
assert contract_2 == Address(
CONTRACT_START_ADDRESS_DEFAULT + CONTRACT_ADDRESS_INCREMENTS_DEFAULT
)
# Check accounts exist and have code
pre_contract_1_account = pre[contract_1]
pre_contract_2_account = pre[contract_2]
assert pre_contract_1_account is not None
assert pre_contract_2_account is not None
assert pre_contract_1_account.code is not None
assert pre_contract_2_account.code is not None
assert len(pre_contract_1_account.code) > 0
assert len(pre_contract_2_account.code) > 0
def test_alloc_deploy_contract_with_balance() -> None:
"""Test `Alloc.deploy_contract` with balance."""
pre = create_test_alloc()
balance = 10**18
contract = pre.deploy_contract(Op.STOP, balance=balance)
assert contract in pre
account = pre[contract]
assert account is not None
assert account.balance == balance
def test_alloc_deploy_contract_with_storage() -> None:
"""Test `Alloc.deploy_contract` with storage."""
pre = create_test_alloc()
storage = {0: 42, 1: 100}
contract = pre.deploy_contract(
Op.STOP,
storage=storage, # type: ignore
)
assert contract in pre
account = pre[contract]
assert account is not None
assert account.storage is not None
assert account.storage[0] == 42
assert account.storage[1] == 100
def test_alloc_fund_eoa_basic() -> None:
"""Test basic `Alloc.fund_eoa` functionality."""
pre = create_test_alloc()
eoa_1 = pre.fund_eoa(10**18)
eoa_2 = pre.fund_eoa(2 * 10**18)
# EOAs should be different
assert eoa_1 != eoa_2
assert eoa_1 in pre
assert eoa_2 in pre
# Check balances
account_1 = pre[eoa_1]
account_2 = pre[eoa_2]
assert account_1 is not None
assert account_2 is not None
assert account_1.balance == 10**18
assert account_2.balance == 2 * 10**18
def test_alloc_fund_address() -> None:
"""Test `Alloc.fund_address` functionality."""
pre = create_test_alloc()
address = Address(0x1234567890123456789012345678901234567890)
amount = 5 * 10**18
pre.fund_address(address, amount)
assert address in pre
account = pre[address]
assert account is not None
assert account.balance == amount
def test_alloc_empty_account() -> None:
"""Test `Alloc.empty_account` functionality."""
pre = create_test_alloc()
empty_addr = pre.empty_account()
# Check that we get a valid address (address generation works)
assert isinstance(empty_addr, Address)
# Note: empty_account() only returns address, doesn't add to pre
@pytest.mark.parametrize("evm_code_type", [EVMCodeType.LEGACY, EVMCodeType.EOF_V1])
def test_alloc_deploy_contract_code_types(evm_code_type: EVMCodeType) -> None:
"""Test `Alloc.deploy_contract` with different EVM code types."""
pre = create_test_alloc(evm_code_type=evm_code_type)
contract = pre.deploy_contract(Op.SSTORE(0, 1) + Op.STOP)
assert contract in pre
account = pre[contract]
assert account is not None
assert account.code is not None
if evm_code_type == EVMCodeType.LEGACY:
# Legacy bytecode should be raw opcodes
assert account.code == bytes.fromhex("600160005500")
elif evm_code_type == EVMCodeType.EOF_V1:
# EOF v1 should have the EOF container header
assert account.code.startswith(b"\xef\x00\x01")
@pytest.mark.parametrize("alloc_mode", [AllocMode.STRICT, AllocMode.PERMISSIVE])
def test_alloc_modes(alloc_mode: AllocMode) -> None:
"""Test different allocation modes."""
pre = create_test_alloc(alloc_mode=alloc_mode)
assert pre._alloc_mode == alloc_mode
# Test that we can deploy contracts regardless of mode
contract = pre.deploy_contract(Op.STOP)
assert contract in pre
def test_global_address_allocation_consistency() -> None:
"""Test that address allocation produces consistent results."""
# Create two alloc instances with same parameters
pre1 = create_test_alloc()
pre2 = create_test_alloc()
# Deploy contracts and check they get the same addresses
contract1_pre1 = pre1.deploy_contract(Op.STOP)
contract1_pre2 = pre2.deploy_contract(Op.STOP)
# Should get same starting address
assert contract1_pre1 == contract1_pre2
assert contract1_pre1 == Address(CONTRACT_START_ADDRESS_DEFAULT)
# Second contracts should also match
contract2_pre1 = pre1.deploy_contract(Op.STOP)
contract2_pre2 = pre2.deploy_contract(Op.STOP)
assert contract2_pre1 == contract2_pre2
assert contract2_pre1 == Address(
CONTRACT_START_ADDRESS_DEFAULT + CONTRACT_ADDRESS_INCREMENTS_DEFAULT
)
def test_alloc_deploy_contract_nonce() -> None:
"""Test that deployed contracts have correct nonce."""
pre = create_test_alloc()
contract = pre.deploy_contract(Op.STOP)
account = pre[contract]
assert account is not None
assert account.nonce == 1 # Deployed contracts should have nonce 1
def test_alloc_fund_eoa_returns_eoa_object() -> None:
"""Test that fund_eoa returns proper EOA object with private key access."""
pre = create_test_alloc()
eoa = pre.fund_eoa(10**18)
# Should be able to access private key (EOA object)
assert hasattr(eoa, "key")
assert eoa.key is not None
# Should also be in pre-allocation
assert eoa in pre
account = pre[eoa]
assert account is not None
assert account.balance == 10**18
def test_alloc_multiple_contracts_sequential_addresses() -> None:
"""Test that multiple contracts get sequential addresses."""
pre = create_test_alloc()
contracts = []
for i in range(5):
contract = pre.deploy_contract(Op.PUSH1(i) + Op.STOP)
contracts.append(contract)
# Check addresses are sequential
for i, contract in enumerate(contracts):
expected_addr = Address(
CONTRACT_START_ADDRESS_DEFAULT + (i * CONTRACT_ADDRESS_INCREMENTS_DEFAULT)
)
assert contract == expected_addr
assert contract in pre
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_filling_session.py | src/pytest_plugins/filler/tests/test_filling_session.py | """Unit tests for the FillingSession class."""
from pathlib import Path
from typing import Any
from unittest.mock import patch
import pytest
from ethereum_test_base_types import Alloc
from ethereum_test_fixtures import (
FixtureFillingPhase,
PreAllocGroup,
PreAllocGroups,
)
from ethereum_test_forks import Prague
from ethereum_test_types import Environment
from ..filler import FillingSession
class MockConfig:
"""Mock pytest config for testing."""
def __init__(self, **options: Any) -> None:
"""Initialize with option values."""
self._options = options
self.op_mode = "fill" # Default operation mode
def getoption(self, name: str, default: Any = None) -> Any:
"""Mock getoption method."""
return self._options.get(name, default)
class MockFixtureOutput:
"""Mock fixture output for testing."""
def __init__(self, pre_alloc_folder_exists: bool = True) -> None:
"""Initialize with test conditions."""
self._folder_exists = pre_alloc_folder_exists
self.pre_alloc_groups_folder_path = Path("/tmp/test_pre_alloc")
@classmethod
def from_config(cls, config: Any) -> "MockFixtureOutput":
"""Mock factory method."""
del config
return cls()
class TestFillingSession:
"""Test cases for FillingSession class."""
def test_init_normal_fill(self) -> None:
"""Test initialization for normal single-phase fill."""
config = MockConfig()
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
assert session.phase_manager.is_single_phase_fill
assert session.pre_alloc_groups is None
def test_init_pre_alloc_generation(self) -> None:
"""Test initialization for pre-alloc generation phase."""
config = MockConfig(generate_pre_alloc_groups=True)
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
assert session.phase_manager.is_pre_alloc_generation
assert session.pre_alloc_groups is not None
assert len(session.pre_alloc_groups.root) == 0
def test_init_use_pre_alloc(self) -> None:
"""Test initialization for phase 2 (using pre-alloc groups)."""
config = MockConfig(use_pre_alloc_groups=True)
# Mock the file system operations
test_group = PreAllocGroup(
pre=Alloc().model_dump(mode="json"),
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
mock_groups = PreAllocGroups(root={"test_hash": test_group})
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
with patch.object(Path, "exists", return_value=True):
with patch.object(PreAllocGroups, "from_folder", return_value=mock_groups):
session = FillingSession.from_config(config) # type: ignore[arg-type]
assert session.phase_manager.is_fill_after_pre_alloc
assert session.pre_alloc_groups is mock_groups
def test_init_use_pre_alloc_missing_folder(self) -> None:
"""Test initialization fails when pre-alloc folder is missing."""
config = MockConfig(use_pre_alloc_groups=True)
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
with patch.object(Path, "exists", return_value=False):
with pytest.raises(
FileNotFoundError, match="Pre-allocation groups folder not found"
):
FillingSession.from_config(config) # type: ignore[arg-type]
def test_should_generate_format(self) -> None:
"""Test format generation decision."""
config = MockConfig()
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
# Mock fixture format
class MockFormat:
format_phases = {FixtureFillingPhase.FILL}
assert session.should_generate_format(MockFormat()) # type: ignore[arg-type]
def test_should_generate_format_with_generate_all(self) -> None:
"""Test format generation with generate_all_formats flag."""
config = MockConfig(generate_all_formats=True, use_pre_alloc_groups=True)
mock_groups = PreAllocGroups(root={})
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
with patch.object(Path, "exists", return_value=True):
with patch.object(PreAllocGroups, "from_folder", return_value=mock_groups):
session = FillingSession.from_config(config) # type: ignore[arg-type]
# Mock fixture format that normally wouldn't generate in phase 2
class MockFormat:
format_phases = {FixtureFillingPhase.FILL}
# Should generate because generate_all=True
assert session.should_generate_format(MockFormat()) # type: ignore[arg-type]
def test_get_pre_alloc_group(self) -> None:
"""Test getting a pre-alloc group by hash."""
config = MockConfig(use_pre_alloc_groups=True)
test_group = PreAllocGroup(
pre=Alloc().model_dump(mode="json"),
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
mock_groups = PreAllocGroups(root={"test_hash": test_group})
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
with patch.object(Path, "exists", return_value=True):
with patch.object(PreAllocGroups, "from_folder", return_value=mock_groups):
session = FillingSession.from_config(config) # type: ignore[arg-type]
assert session.get_pre_alloc_group("test_hash") is test_group
def test_get_pre_alloc_group_not_found(self) -> None:
"""Test getting a non-existent pre-alloc group."""
config = MockConfig(use_pre_alloc_groups=True)
mock_groups = PreAllocGroups(root={})
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
with patch.object(Path, "exists", return_value=True):
with patch.object(PreAllocGroups, "from_folder", return_value=mock_groups):
session = FillingSession.from_config(config) # type: ignore[arg-type]
with pytest.raises(ValueError, match="Pre-allocation hash .* not found"):
session.get_pre_alloc_group("missing_hash")
def test_get_pre_alloc_group_not_initialized(self) -> None:
"""Test getting pre-alloc group when not initialized."""
config = MockConfig() # Normal fill, no pre-alloc groups
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
with pytest.raises(ValueError, match="Pre-allocation groups not initialized"):
session.get_pre_alloc_group("any_hash")
def test_update_pre_alloc_group(self) -> None:
"""Test updating a pre-alloc group."""
config = MockConfig(generate_pre_alloc_groups=True)
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
test_group = PreAllocGroup(
pre=Alloc().model_dump(mode="json"),
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
session.update_pre_alloc_group("test_hash", test_group)
assert "test_hash" in session.pre_alloc_groups # type: ignore[operator]
assert session.pre_alloc_groups["test_hash"] is test_group # type: ignore[index]
def test_update_pre_alloc_group_wrong_phase(self) -> None:
"""Test updating pre-alloc group in wrong phase."""
config = MockConfig() # Normal fill
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
test_group = PreAllocGroup(
pre=Alloc().model_dump(mode="json"),
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
with pytest.raises(
ValueError, match="Can only update pre-alloc groups in generation phase"
):
session.update_pre_alloc_group("test_hash", test_group)
def test_save_pre_alloc_groups(self) -> None:
"""Test saving pre-alloc groups to disk."""
config = MockConfig(generate_pre_alloc_groups=True)
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
# Add a test group
test_group = PreAllocGroup(
pre=Alloc().model_dump(mode="json"),
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
session.update_pre_alloc_group("test_hash", test_group)
# Mock file operations
with patch.object(Path, "mkdir") as mock_mkdir:
with patch.object(PreAllocGroups, "to_folder") as mock_to_folder:
session.save_pre_alloc_groups()
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_to_folder.assert_called_once()
def test_save_pre_alloc_groups_none(self) -> None:
"""Test saving when no pre-alloc groups exist."""
config = MockConfig() # Normal fill
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
# Should not raise, just return
session.save_pre_alloc_groups()
def test_aggregate_pre_alloc_groups(self) -> None:
"""Test aggregating pre-alloc groups from workers (xdist)."""
config = MockConfig(generate_pre_alloc_groups=True)
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
# Worker groups to aggregate
group1 = PreAllocGroup(
pre=Alloc().model_dump(mode="json"),
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
group2 = PreAllocGroup(
pre=Alloc().model_dump(mode="json"),
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
worker_groups = PreAllocGroups(root={"hash1": group1, "hash2": group2})
session.aggregate_pre_alloc_groups(worker_groups)
assert "hash1" in session.pre_alloc_groups # type: ignore[operator]
assert "hash2" in session.pre_alloc_groups # type: ignore[operator]
def test_aggregate_pre_alloc_groups_conflict(self) -> None:
"""Test aggregating conflicting pre-alloc groups."""
config = MockConfig(generate_pre_alloc_groups=True)
with patch("pytest_plugins.filler.filler.FixtureOutput", MockFixtureOutput):
session = FillingSession.from_config(config) # type: ignore[arg-type]
# Add initial group
alloc1 = Alloc().model_dump(mode="json")
group1 = PreAllocGroup(
pre=alloc1,
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
session.update_pre_alloc_group("hash1", group1)
# Try to aggregate conflicting group with same hash but different pre
alloc2_dict = Alloc().model_dump(mode="json")
alloc2_dict["0x1234567890123456789012345678901234567890"] = None # Make it different
group2 = PreAllocGroup(
pre=alloc2_dict,
environment=Environment().model_dump(mode="json"),
network=Prague.name(),
)
worker_groups = PreAllocGroups(root={"hash1": group2})
with pytest.raises(ValueError, match="Conflicting pre-alloc groups"):
session.aggregate_pre_alloc_groups(worker_groups)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_collect_only.py | src/pytest_plugins/filler/tests/test_collect_only.py | """Test the fill command's --collect-only pytest option."""
import textwrap
import pytest
test_module_dummy = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Environment
@pytest.mark.valid_at("Istanbul")
def test_dummy_collect_only_test(state_test) -> None:
state_test(env=Environment(), pre={}, post={}, tx=None)
"""
)
def test_collect_only_output(pytester: pytest.Pytester) -> None:
"""Test that --collect-only option produces expected output."""
tests_dir = pytester.mkdir("tests")
istanbul_tests_dir = tests_dir / "istanbul"
istanbul_tests_dir.mkdir()
dummy_dir = istanbul_tests_dir / "dummy_test_module"
dummy_dir.mkdir()
test_module = dummy_dir / "test_dummy_collect.py"
test_module.write_text(test_module_dummy)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest(
"-c",
"pytest-fill.ini",
"--fork",
"Istanbul",
"tests/istanbul/dummy_test_module/",
"--collect-only",
"-q",
)
assert result.ret == 0, f"Fill command failed:\n{result.outlines}"
assert any(
"tests/istanbul/dummy_test_module/test_dummy_collect.py::test_dummy_collect_only_test[fork_Istanbul-state_test]"
in line
for line in result.outlines
), f"Expected test output: {result.outlines}"
assert any(
"tests/istanbul/dummy_test_module/test_dummy_collect.py::test_dummy_collect_only_test[fork_Istanbul-blockchain_test_from_state_test]"
in line
for line in result.outlines
), f"Expected test output: {result.outlines}"
# fill generates 3 test variants: state_test,
# blockchain_test_from_state_test, blockchain_test_engine_from_state_test
assert any("3 tests collected" in line for line in result.outlines)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_slow_marker_pre_alloc.py | src/pytest_plugins/filler/tests/test_slow_marker_pre_alloc.py | """Test automatic pre_alloc_group marker application to slow tests."""
import textwrap
from typing import Any
from ethereum_clis import TransitionTool
def test_slow_marker_gets_pre_alloc_group(pytester: Any, default_t8n: TransitionTool) -> None:
"""
Test that slow tests without benchmark marker get pre_alloc_group
automatically.
"""
test_module = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Alloc, StateTestFiller, Transaction
@pytest.mark.slow
@pytest.mark.valid_from("Cancun")
def test_slow_without_benchmark(state_test: StateTestFiller, pre: Alloc) -> None:
sender = pre.fund_eoa()
contract = pre.deploy_contract(code=b"")
tx = Transaction(sender=sender, to=contract, gas_limit=100000)
state_test(pre=pre, tx=tx, post={})
"""
)
# Create test directory structure
tests_dir = pytester.mkdir("tests")
cancun_dir = tests_dir / "cancun"
cancun_dir.mkdir()
test_file = cancun_dir / "test_slow.py"
test_file.write_text(test_module)
# Copy the pytest configuration
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
# Run pytest with our plugin and check collection
args = [
"-c",
"pytest-fill.ini",
"--collect-only",
"-q",
"--t8n-server-url",
default_t8n.server_url,
"tests/cancun/test_slow.py",
]
result = pytester.runpytest(*args)
# The test should be collected successfully
result.stdout.fnmatch_lines(["*test_slow_without_benchmark*"])
def test_slow_with_benchmark_no_pre_alloc(pytester: Any, default_t8n: TransitionTool) -> None:
"""
Test that slow tests WITH benchmark marker do NOT get pre_alloc_group.
"""
test_module = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Alloc, StateTestFiller, Transaction
@pytest.mark.slow
@pytest.mark.benchmark
@pytest.mark.valid_from("Cancun")
def test_slow_with_benchmark(state_test: StateTestFiller, pre: Alloc) -> None:
sender = pre.fund_eoa()
contract = pre.deploy_contract(code=b"")
tx = Transaction(sender=sender, to=contract, gas_limit=100000)
state_test(pre=pre, tx=tx, post={})
"""
)
# Create test directory structure
tests_dir = pytester.mkdir("tests")
benchmark_dir = tests_dir / "benchmark"
benchmark_dir.mkdir()
test_file = benchmark_dir / "test_slow_benchmark.py"
test_file.write_text(test_module)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
# Run with collection only to verify test is collected
args = [
"-c",
"pytest-fill.ini",
"--collect-only",
"-q",
"--t8n-server-url",
default_t8n.server_url,
"tests/benchmark/test_slow_benchmark.py",
]
result = pytester.runpytest(*args)
# The test should be collected
result.stdout.fnmatch_lines(["*test_slow_with_benchmark*"])
def test_slow_with_existing_pre_alloc_unchanged(
pytester: Any, default_t8n: TransitionTool
) -> None:
"""
Test that slow tests with existing pre_alloc_group marker are unchanged.
"""
test_module = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Alloc, StateTestFiller, Transaction
@pytest.mark.slow
@pytest.mark.pre_alloc_group("custom_group", reason="Custom reason")
@pytest.mark.valid_from("Cancun")
def test_slow_with_existing_pre_alloc(state_test: StateTestFiller, pre: Alloc) -> None:
sender = pre.fund_eoa()
contract = pre.deploy_contract(code=b"")
tx = Transaction(sender=sender, to=contract, gas_limit=100000)
state_test(pre=pre, tx=tx, post={})
"""
)
# Create test directory structure
tests_dir = pytester.mkdir("tests")
cancun_dir = tests_dir / "cancun"
cancun_dir.mkdir()
test_file = cancun_dir / "test_existing_pre_alloc.py"
test_file.write_text(test_module)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
# Run with collection only to verify test is collected
args = [
"-c",
"pytest-fill.ini",
"--collect-only",
"-q",
"--t8n-server-url",
default_t8n.server_url,
"tests/cancun/test_existing_pre_alloc.py",
]
result = pytester.runpytest(*args)
# The test should be collected successfully
result.stdout.fnmatch_lines(["*test_slow_with_existing_pre_alloc*"])
def test_non_slow_no_pre_alloc(pytester: Any, default_t8n: TransitionTool) -> None:
"""Test that tests without slow marker do not get pre_alloc_group."""
test_module = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Alloc, StateTestFiller, Transaction
@pytest.mark.valid_from("Cancun")
def test_normal_speed(state_test: StateTestFiller, pre: Alloc) -> None:
sender = pre.fund_eoa()
contract = pre.deploy_contract(code=b"")
tx = Transaction(sender=sender, to=contract, gas_limit=100000)
state_test(pre=pre, tx=tx, post={})
"""
)
# Create test directory structure
tests_dir = pytester.mkdir("tests")
cancun_dir = tests_dir / "cancun"
cancun_dir.mkdir()
test_file = cancun_dir / "test_normal.py"
test_file.write_text(test_module)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
# Run with collection only to verify test is collected
args = [
"-c",
"pytest-fill.ini",
"--collect-only",
"-q",
"--t8n-server-url",
default_t8n.server_url,
"tests/cancun/test_normal.py",
]
result = pytester.runpytest(*args)
# The test should be collected successfully
result.stdout.fnmatch_lines(["*test_normal_speed*"])
def test_integration_with_fill(pytester: Any, default_t8n: TransitionTool) -> None:
"""
Integration test using actual fill command to verify marker application.
"""
test_module = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import (
Account,
Alloc,
StateTestFiller,
Transaction,
)
@pytest.mark.slow
@pytest.mark.valid_from("Cancun")
def test_slow_for_integration(state_test: StateTestFiller, pre: Alloc) -> None:
'''Test that should get pre_alloc_group marker automatically.'''
sender = pre.fund_eoa()
contract = pre.deploy_contract(code=b"")
tx = Transaction(sender=sender, to=contract, gas_limit=100000)
state_test(pre=pre, tx=tx, post={})
"""
)
# Create proper directory structure for tests
tests_dir = pytester.mkdir("tests")
cancun_tests_dir = tests_dir / "cancun"
cancun_tests_dir.mkdir()
slow_test_dir = cancun_tests_dir / "slow_test_module"
slow_test_dir.mkdir()
test_module_file = slow_test_dir / "test_slow_integration.py"
test_module_file.write_text(test_module)
# Copy pytest configuration
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
# Run fill command
args = [
"-c",
"pytest-fill.ini",
"-v",
"--no-html",
"--t8n-server-url",
default_t8n.server_url,
"tests/cancun/slow_test_module/",
]
# The test generates 3 formats (state_test, blockchain_test,
# blockchain_test_engine).
# But it also runs on multiple forks (Cancun and
# Prague), so expect more tests.
# This is fine - the important thing is that they all pass.
result = pytester.runpytest(*args)
# Verify that tests passed (don't care about exact count due to fork
# variations)
assert result.ret == 0, "Fill command should succeed"
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/conftest.py | src/pytest_plugins/filler/tests/conftest.py | """Local pytest configuration for filler tests."""
import os
import sysconfig
import pytest
@pytest.fixture(autouse=True)
def monkeypatch_path_for_entry_points(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""
Monkeypatch the PATH to add the "bin" directory where entrypoints are
installed.
This would typically be in the venv in which pytest is running these tests
and fill, which, with uv, is `./.venv/bin`.
This is required in order for fill to locate the ethereum-spec-evm-resolver
"binary" (entrypoint) when being executed using pytester.
"""
bin_dir = sysconfig.get_path("scripts")
monkeypatch.setenv("PATH", f"{bin_dir}:{os.environ['PATH']}")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_output_directory.py | src/pytest_plugins/filler/tests/test_output_directory.py | """Test the filler plugin's output directory handling."""
from pathlib import Path
from typing import Any, Callable
from unittest.mock import patch
import pytest
from pytest import TempPathFactory
from ethereum_clis import TransitionTool
from ..fixture_output import FixtureOutput
MINIMAL_TEST_FILE_NAME = "test_example.py"
MINIMAL_TEST_CONTENTS = """
from ethereum_test_tools import Transaction
def test_function(state_test, pre) -> None:
tx = Transaction(to=0, gas_limit=21_000, sender=pre.fund_eoa())
state_test(pre=pre, post={}, tx=tx)
"""
@pytest.fixture
def minimal_test_path(pytester: pytest.Pytester) -> Path:
"""
Minimal test file that's written to a file using pytester and ready to
fill.
"""
tests_dir = pytester.mkdir("tests")
test_file = tests_dir / MINIMAL_TEST_FILE_NAME
test_file.write_text(MINIMAL_TEST_CONTENTS)
return test_file
@pytest.fixture(scope="module")
def fill_fork_from() -> str:
"""Specify the value for `fill`'s `--from` argument."""
return "Paris"
@pytest.fixture(scope="module")
def fill_fork_until() -> str:
"""Specify the value for `fill`'s `--until` argument."""
return "Cancun"
@pytest.fixture
def run_fill(
pytester: pytest.Pytester,
minimal_test_path: Path,
fill_fork_from: str,
fill_fork_until: str,
default_t8n: TransitionTool,
) -> Callable[..., pytest.RunResult]:
"""
Create a function to run the fill command with various output directory
scenarios.
"""
def _run_fill(
output_dir: Path,
clean: bool = False,
expect_failure: bool = False,
disable_capture_output: bool = False,
) -> pytest.RunResult:
"""
Run the fill command with the specified output directory and clean
flag.
"""
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
args = [
"-c",
"pytest-fill.ini",
"-m",
"(not blockchain_test_engine) and (not eip_version_check)",
f"--from={fill_fork_from}",
f"--until={fill_fork_until}",
f"--output={str(output_dir)}",
f"--t8n-server-url={default_t8n.server_url}",
str(minimal_test_path),
]
if clean:
args.append("--clean")
if disable_capture_output:
# Required for tests on stdout
args.append("-s")
result = pytester.runpytest(*args)
if expect_failure:
assert result.ret != 0, "Fill command was expected to fail but succeeded"
else:
assert result.ret == 0, f"Fill command failed:\n{result.outlines}"
return result
return _run_fill
def test_fill_to_empty_directory(tmp_path_factory: TempPathFactory, run_fill: Any) -> None:
"""Test filling to a new, empty directory."""
output_dir = tmp_path_factory.mktemp("empty_fixtures")
run_fill(output_dir)
assert any(output_dir.glob("state_tests/**/*.json")), "No fixture files were created"
assert (output_dir / ".meta").exists(), "Metadata directory was not created"
def test_fill_to_nonexistent_directory(tmp_path_factory: TempPathFactory, run_fill: Any) -> None:
"""Test filling to a nonexistent directory."""
base_dir = tmp_path_factory.mktemp("base")
output_dir = base_dir / "nonexistent_fixtures"
run_fill(output_dir)
assert any(output_dir.glob("state_tests/**/*.json")), "No fixture files were created"
assert (output_dir / ".meta").exists(), "Metadata directory was not created"
def test_fill_to_nonempty_directory_fails(
tmp_path_factory: TempPathFactory, run_fill: Any
) -> None:
"""Test filling to a non-empty directory fails without --clean."""
# Create a directory with a file
output_dir = tmp_path_factory.mktemp("nonempty_fixtures")
(output_dir / "existing_file.txt").write_text("This directory is not empty")
result: pytest.RunResult = run_fill(output_dir, expect_failure=True)
outlines = result.errlines
assert isinstance(outlines, list)
assert any("is not empty" in line for line in outlines), (
f"Expected error about non-empty directory: {outlines}"
)
assert any("Use --clean" in line for line in outlines), (
f"Expected suggestion to use --clean flag: {outlines}"
)
def test_fill_to_nonempty_directory_with_clean(
tmp_path_factory: TempPathFactory, run_fill: Any
) -> None:
"""Test filling to a non-empty directory succeeds with --clean."""
# Create a directory with a file
output_dir = tmp_path_factory.mktemp("nonempty_fixtures_clean")
(output_dir / "existing_file.txt").write_text("This directory will be cleaned")
run_fill(output_dir, clean=True)
# Verify the existing file was removed
assert not (output_dir / "existing_file.txt").exists(), "Existing file was not removed"
assert any(output_dir.glob("state_tests/**/*.json")), "No fixture files were created"
def test_fill_to_directory_with_meta_fails(
tmp_path_factory: TempPathFactory, run_fill: Any
) -> None:
"""
Test filling to a directory with .meta subdirectory fails without --clean.
"""
# Create a directory with .meta
output_dir = tmp_path_factory.mktemp("directory_with_meta")
meta_dir = output_dir / ".meta"
meta_dir.mkdir()
(meta_dir / "existing_meta_file.txt").write_text("This is metadata")
result: pytest.RunResult = run_fill(output_dir, expect_failure=True)
assert any("is not empty" in line for line in result.errlines), (
"Expected error about non-empty directory"
)
def test_fill_to_directory_with_meta_with_clean(
tmp_path_factory: TempPathFactory, run_fill: Any
) -> None:
"""Test filling to a directory with .meta succeeds with --clean."""
# Create a directory with .meta
output_dir = tmp_path_factory.mktemp("directory_with_meta_clean")
meta_dir = output_dir / ".meta"
meta_dir.mkdir()
(meta_dir / "existing_meta_file.txt").write_text("This is metadata")
run_fill(output_dir, clean=True)
assert any(output_dir.glob("state_tests/**/*.json")), "No fixture files were created"
assert not (meta_dir / "existing_meta_file.txt").exists(), "Existing meta file was not removed"
def test_fill_stdout_always_works(tmp_path_factory: TempPathFactory, run_fill: Any) -> None:
"""Test filling to stdout always works regardless of output state."""
stdout_path = Path("stdout")
# create a directory called "stdout" - it should not have any effect
output_dir = tmp_path_factory.mktemp(stdout_path.name, numbered=False)
assert str(output_dir.stem) == "stdout"
meta_dir = output_dir / ".meta"
meta_dir.mkdir()
(meta_dir / "existing_meta_file.txt").write_text("This is metadata")
result: pytest.RunResult = run_fill(stdout_path, disable_capture_output=True)
assert any(
"test_example.py::test_function[fork_Cancun-state_test]" in line
for line in result.outlines
), f"Expected JSON output for state test: {result.outlines}"
assert not any(stdout_path.glob("*.json")), "Fixture files were created when stdout is used"
def test_fill_to_tarball_directory(tmp_path_factory: TempPathFactory, run_fill: Any) -> None:
"""Test filling to a tarball output."""
output_dir = tmp_path_factory.mktemp("tarball_fixtures")
tarball_path = output_dir / "fixtures.tar.gz"
run_fill(tarball_path)
assert tarball_path.exists(), "Tarball was not created"
extracted_dir = output_dir / "fixtures"
assert extracted_dir.exists(), "Extracted directory doesn't exist"
assert any(extracted_dir.glob("state_tests/**/*.json")), "No fixture files were created"
# New tests for the is_master functionality
def test_create_directories_skips_when_not_master() -> None:
"""
Test that create_directories skips operations when not the master process.
"""
fixture_output = FixtureOutput(
output_path=Path("/fake/path"),
clean=True,
)
# Mock directory operations to ensure they aren't called
with (
patch.object(FixtureOutput, "is_directory_empty") as mock_is_empty,
patch.object(Path, "exists", return_value=True),
patch.object(Path, "mkdir") as mock_mkdir,
patch("shutil.rmtree") as mock_rmtree,
):
# Call with is_master=False (worker process)
fixture_output.create_directories(is_master=False)
# Verify no directory operations occurred
mock_is_empty.assert_not_called()
mock_mkdir.assert_not_called()
mock_rmtree.assert_not_called()
def test_create_directories_operates_when_master() -> None:
"""
Test that create_directories performs operations when is the master
process.
"""
fixture_output = FixtureOutput(
output_path=Path("/fake/path"),
clean=True,
)
# Mock directory operations
with (
patch.object(FixtureOutput, "is_directory_empty", return_value=True),
patch.object(Path, "exists", return_value=True),
patch.object(Path, "mkdir") as mock_mkdir,
patch("shutil.rmtree") as mock_rmtree,
):
# Call with is_master=True (master process)
fixture_output.create_directories(is_master=True)
# Verify directory operations occurred
mock_rmtree.assert_called_once()
mock_mkdir.assert_called()
def test_create_directories_checks_empty_when_master() -> None:
"""Test that directory emptiness is checked only when is_master=True."""
fixture_output = FixtureOutput(
output_path=Path("/fake/path"),
clean=False, # Don't clean, so we'll check if empty
)
# Mock directory operations
with (
patch.object(FixtureOutput, "is_directory_empty", return_value=False) as mock_is_empty,
patch.object(
FixtureOutput, "get_directory_summary", return_value="not empty"
) as mock_summary,
patch.object(Path, "exists", return_value=True),
patch.object(Path, "mkdir"),
):
# Call with is_master=True and expect an error about non-empty
# directory
with pytest.raises(ValueError, match="not empty"):
fixture_output.create_directories(is_master=True)
# Verify emptiness check was performed
mock_is_empty.assert_called_once()
mock_summary.assert_called_once()
def test_stdout_skips_directory_operations_regardless_of_master() -> None:
"""
Test that stdout output skips directory operations regardless of is_master
value.
"""
fixture_output = FixtureOutput(
output_path=Path("stdout"),
clean=True,
)
# Mock directory operations to ensure they aren't called
with (
patch.object(FixtureOutput, "is_directory_empty") as mock_is_empty,
patch.object(Path, "exists") as mock_exists,
patch.object(Path, "mkdir") as mock_mkdir,
patch("shutil.rmtree") as mock_rmtree,
):
# Should skip operations even with is_master=True
fixture_output.create_directories(is_master=True)
# Verify no directory operations occurred
mock_is_empty.assert_not_called()
mock_exists.assert_not_called()
mock_mkdir.assert_not_called()
mock_rmtree.assert_not_called()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/__init__.py | src/pytest_plugins/filler/tests/__init__.py | """Filler tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_prealloc_group_usage_example.py | src/pytest_plugins/filler/tests/test_prealloc_group_usage_example.py | """
Example usage of the pre_alloc_group marker.
This file demonstrates how tests would use the marker in practice. Note: This
is just documentation, not executable tests.
"""
import pytest
# Example 1: Test that deploys beacon root contract with hardcoded deployer
@pytest.mark.pre_alloc_group(
"separate",
reason="Deploys beacon root contract using actual hardcoded deployer address",
)
def test_beacon_root_contract_deployment() -> None:
"""
Test beacon root contract deployment with the official deployer address.
"""
# This test uses the actual beacon root deployer address (e.g.,
# 0x4242...4242) which could conflict with dynamically allocated addresses
# in other tests
pass
# Example 2: Test with custom consolidation contract
@pytest.mark.pre_alloc_group(
"custom_consolidation",
reason="Deploys custom consolidation contract with different bytecode",
)
def test_custom_consolidation_contract() -> None:
"""Test that deploys a modified consolidation contract."""
# This test deploys a consolidation contract with custom bytecode that
# differs from the standard implementation, requiring isolation from other
# consolidation tests
pass
# Example 3: Group related tests that need custom contracts
@pytest.mark.pre_alloc_group(
"custom_consolidation",
reason="Uses same custom consolidation contract setup",
)
def test_custom_consolidation_edge_cases() -> None:
"""Test edge cases with the custom consolidation contract."""
# This test can share the pre-allocation with
# test_custom_consolidation_contract since they both use the same custom
# contract setup
pass
# Example 4: Test without marker - uses default grouping
def test_normal_consolidation() -> None:
"""Test that uses standard consolidation contract and default grouping."""
# This test uses dynamic allocation and standard contracts,
# so it can be grouped normally with other tests
pass
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_generate_all_formats.py | src/pytest_plugins/filler/tests/test_generate_all_formats.py | """Test the --generate-all-formats functionality."""
from typing import Any
from pytest_plugins.filler.fixture_output import FixtureOutput
def test_fixture_output_with_generate_all_formats() -> None:
"""
Test that FixtureOutput properly handles the should_generate_all_formats
parameter.
"""
# Test with should_generate_all_formats=True
fixture_output = FixtureOutput(
output_path="/tmp/test",
should_generate_all_formats=True,
)
assert fixture_output.should_generate_all_formats is True
# Test with should_generate_all_formats=False (default)
fixture_output = FixtureOutput(
output_path="/tmp/test",
)
assert fixture_output.should_generate_all_formats is False
def test_fixture_output_from_config_includes_generate_all_formats() -> None:
"""
Test that FixtureOutput.from_config includes the
should_generate_all_formats option.
"""
# Mock pytest config object
class MockConfig:
def getoption(self, option: str) -> Any:
option_values = {
"output": "/tmp/test",
"single_fixture_per_file": False,
"clean": False,
"generate_pre_alloc_groups": False,
"use_pre_alloc_groups": False,
"generate_all_formats": True, # Test the new option
}
return option_values.get(option, False)
config = MockConfig()
fixture_output = FixtureOutput.from_config(
config # type: ignore
)
assert fixture_output.should_generate_all_formats is True
assert fixture_output.output_path.name == "test"
def test_tarball_output_auto_enables_generate_all_formats() -> None:
"""
Test that tarball output (.tar.gz) automatically enables
should_generate_all_formats.
"""
# Mock pytest config object with tarball output
class MockConfig:
def getoption(self, option: str) -> Any:
option_values = {
"output": "/tmp/fixtures.tar.gz", # Tarball output
"single_fixture_per_file": False,
"clean": False,
"generate_pre_alloc_groups": False,
"use_pre_alloc_groups": False,
"generate_all_formats": False, # Explicitly False
}
return option_values.get(option, False)
config = MockConfig()
fixture_output = FixtureOutput.from_config(
config # type: ignore
)
# Should auto-enable should_generate_all_formats due to tarball output
assert fixture_output.should_generate_all_formats is True
assert fixture_output.is_tarball is True
def test_regular_output_does_not_auto_enable_generate_all_formats() -> None:
"""
Test that regular directory output doesn't auto-enable
should_generate_all_formats.
"""
# Mock pytest config object with regular output
class MockConfig:
def getoption(self, option: str) -> Any:
option_values = {
"output": "/tmp/fixtures", # Regular directory output
"single_fixture_per_file": False,
"clean": False,
"generate_pre_alloc_groups": False,
"use_pre_alloc_groups": False,
"generate_all_formats": False, # Explicitly False
}
return option_values.get(option, False)
config = MockConfig()
fixture_output = FixtureOutput.from_config(
config # type: ignore
)
# Should remain False for regular directory output
assert fixture_output.should_generate_all_formats is False
assert fixture_output.is_tarball is False
def test_explicit_generate_all_formats_overrides_tarball_auto_enable() -> None:
"""
Test that explicitly setting should_generate_all_formats=True works with
tarball output.
"""
# Mock pytest config object with tarball output and explicit flag
class MockConfig:
def getoption(self, option: str) -> Any:
option_values = {
"output": "/tmp/fixtures.tar.gz", # Tarball output
"single_fixture_per_file": False,
"clean": False,
"generate_pre_alloc_groups": False,
"use_pre_alloc_groups": False,
"generate_all_formats": True, # Explicitly True
}
return option_values.get(option, False)
config = MockConfig()
fixture_output = FixtureOutput.from_config(
config # type: ignore
)
# Should be True (both explicitly set and auto-enabled)
assert fixture_output.should_generate_all_formats is True
assert fixture_output.is_tarball is True
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_benchmarking.py | src/pytest_plugins/filler/tests/test_benchmarking.py | """Test the benchmarking pytest plugin for gas benchmark values."""
import textwrap
from pathlib import Path
import pytest
test_module_dummy = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Environment
@pytest.mark.valid_at("Istanbul")
def test_dummy_benchmark_test(state_test, gas_benchmark_value) -> None:
state_test(
env=env,pre={},post={},tx=None)
"""
)
test_module_without_fixture = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import Environment
@pytest.mark.valid_at("Istanbul")
def test_dummy_no_benchmark_test(state_test) -> None:
state_test(env=env, pre={}, post={}, tx=None)
"""
)
def setup_test_directory_structure(
pytester: pytest.Pytester, test_content: str, test_filename: str
) -> Path:
"""
Set up the common test directory structure used across multiple tests.
Args:
pytester: The pytest Pytester fixture
test_content: The content to write to the test file
test_filename: The name of the test file to create
Returns: The path to the created test module file
"""
tests_dir = pytester.mkdir("tests")
istanbul_tests_dir = tests_dir / "istanbul"
istanbul_tests_dir.mkdir()
dummy_dir = istanbul_tests_dir / "dummy_test_module"
dummy_dir.mkdir()
test_module = dummy_dir / test_filename
test_module.write_text(test_content)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
return test_module
def test_gas_benchmark_option_added(pytester: pytest.Pytester) -> None:
"""Test that the --gas-benchmark-values option is properly added."""
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
# Command: pytest -p pytest_plugins.filler.benchmarking --help
result = pytester.runpytest("-c", "pytest-fill.ini", "--help")
assert result.ret == 0
assert any("--gas-benchmark-values" in line for line in result.outlines)
assert any("Specify gas benchmark values for tests" in line for line in result.outlines)
def test_benchmarking_mode_configured_with_option(pytester: pytest.Pytester) -> None:
"""
Test that fill_mode is set to BENCHMARKING when --gas-benchmark-values is
used.
"""
setup_test_directory_structure(pytester, test_module_dummy, "test_dummy_benchmark.py")
# Test with gas benchmark values
result = pytester.runpytest(
"-c",
"pytest-fill.ini",
"--fork",
"Istanbul",
"--gas-benchmark-values",
"10,20,30",
"tests/istanbul/dummy_test_module/",
"--collect-only",
"-q",
)
assert result.ret == 0
assert any("9 tests collected" in line for line in result.outlines)
# Check that the test names include the benchmark gas values
assert any("benchmark-gas-value_10M" in line for line in result.outlines)
assert any("benchmark-gas-value_20M" in line for line in result.outlines)
assert any("benchmark-gas-value_30M" in line for line in result.outlines)
def test_benchmarking_mode_not_configured_without_option(pytester: pytest.Pytester) -> None:
"""
Test that fill_mode is not set to BENCHMARKING when --gas-benchmark-values
is not used.
"""
setup_test_directory_structure(pytester, test_module_dummy, "test_dummy_benchmark.py")
# Test without gas benchmark values
result = pytester.runpytest(
"-c",
"pytest-fill.ini",
"--fork",
"Istanbul",
"tests/istanbul/dummy_test_module/",
"--collect-only",
"-q",
)
assert result.ret == 0
# Should generate normal test variants (3) without parametrization
assert any("3 tests collected" in line for line in result.outlines)
assert not any("benchmark-gas-value_10M" in line for line in result.outlines)
assert not any("benchmark-gas-value_20M" in line for line in result.outlines)
assert not any("benchmark-gas-value_30M" in line for line in result.outlines)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_eip_checklist.py | src/pytest_plugins/filler/tests/test_eip_checklist.py | """Test the EIP checklist plugin functionality."""
import re
import textwrap
from typing import Any
def test_eip_checklist_collection(testdir: Any) -> None:
"""Test that checklist markers are collected correctly."""
# Create the test in an EIP-specific directory
tests_dir = testdir.mkdir("tests")
prague_tests_dir = tests_dir.mkdir("prague")
eip_7702_tests_dir = prague_tests_dir.mkdir("eip7702_set_code_tx")
test_7702_module = eip_7702_tests_dir.join("test_eip7702.py")
test_7702_module.write(
textwrap.dedent(
"""
import pytest
from ethereum_test_tools import StateTestFiller
from ethereum_test_checklists import EIPChecklist
REFERENCE_SPEC_GIT_PATH = "N/A"
REFERENCE_SPEC_VERSION = "N/A"
@pytest.mark.valid_at("Prague")
@EIPChecklist.TransactionType.Test.IntrinsicValidity.GasLimit.Exact()
def test_exact_gas(state_test: StateTestFiller) -> None:
pass
@pytest.mark.valid_at("Prague")
@EIPChecklist.TransactionType.Test.Signature.Invalid.V.Two(eip=[2930])
def test_invalid_v(state_test: StateTestFiller) -> None:
pass
"""
)
)
eip_7702_external_coverage_file = eip_7702_tests_dir.join(
"eip_checklist_external_coverage.txt"
)
eip_7702_external_coverage_file.write(
textwrap.dedent(
"""
general/code_coverage/eels = DEBUG EXTERNAL COVERAGE REASON
"""
)
)
berlin_tests_dir = tests_dir.mkdir("berlin")
eip_2930_tests_dir = berlin_tests_dir.mkdir("eip2930_access_list")
test_2930_module = eip_2930_tests_dir.join("test_eip2930.py")
test_2930_module.write(
textwrap.dedent(
"""
import pytest
from ethereum_test_tools import StateTestFiller
REFERENCE_SPEC_GIT_PATH = "N/A"
REFERENCE_SPEC_VERSION = "N/A"
@pytest.mark.valid_at("Berlin")
def test_berlin_one(state_test: StateTestFiller) -> None:
pass
"""
)
)
test_2930_n_a_file = eip_2930_tests_dir.join("eip_checklist_not_applicable.txt")
test_2930_n_a_file.write(
textwrap.dedent(
"""
system_contract = DEBUG NOT APPLICABLE REASON
"""
)
)
# Run pytest with checklist-only mode
testdir.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = testdir.runpytest(
"-c",
"pytest-fill.ini",
"-p",
"pytest_plugins.filler.eip_checklist",
"--collect-only",
"--checklist-output",
str(testdir.tmpdir / "checklists"),
str(tests_dir),
)
result.assert_outcomes(
passed=0,
failed=0,
skipped=0,
errors=0,
)
# Check that checklists were generated
checklist_dir = testdir.tmpdir / "checklists"
assert checklist_dir.exists()
checklist_file = checklist_dir / "eip7702_checklist.md"
assert checklist_file.exists()
# Verify the checklist contains the expected markers
content = checklist_file.readlines()
assert any(re.search(r"✅.*test_exact_gas", line) for line in content)
assert any(re.search(r"✅.*test_invalid_v", line) for line in content)
assert any(re.search(r"✅.*DEBUG EXTERNAL COVERAGE REASON", line) for line in content)
checklist_file = checklist_dir / "eip2930_checklist.md"
assert checklist_file.exists()
content = checklist_file.readlines()
assert not any(re.search(r"✅.*test_exact_gas", line) for line in content)
assert any(re.search(r"✅.*test_invalid_v", line) for line in content)
assert any(re.search(r"N/A.*DEBUG NOT APPLICABLE REASON", line) for line in content)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/tests/test_verify_sync_marker.py | src/pytest_plugins/filler/tests/test_verify_sync_marker.py | """Test blockchain sync fixture generation with verify_sync pytest marker."""
import textwrap
from typing import Any
from ethereum_clis import TransitionTool
test_module_with_verify_sync = textwrap.dedent(
"""\
import pytest
from ethereum_test_tools import (
Account,
BlockException,
Block,
Environment,
Header,
TestAddress,
Transaction,
)
TEST_ADDRESS = Account(balance=1_000_000)
@pytest.mark.valid_at("Cancun")
def test_verify_sync_default(blockchain_test) -> None:
blockchain_test(
pre={TestAddress: TEST_ADDRESS},
post={},
blocks=[Block(txs=[Transaction()])]
)
@pytest.mark.valid_at("Cancun")
@pytest.mark.verify_sync
def test_verify_sync_with_marker(blockchain_test) -> None:
blockchain_test(
pre={TestAddress: TEST_ADDRESS},
post={},
blocks=[Block(txs=[Transaction()])]
)
@pytest.mark.valid_at("Cancun")
@pytest.mark.parametrize(
"has_exception",
[
pytest.param(False, id="no_exception", marks=pytest.mark.verify_sync),
pytest.param(
True, id="with_exception", marks=pytest.mark.exception_test
),
]
)
def test_verify_sync_with_param_marks(blockchain_test, has_exception) -> None:
blockchain_test(
pre={TestAddress: TEST_ADDRESS},
post={},
blocks=[
Block(
txs=[Transaction()],
rlp_modifier=Header(gas_limit=0) if has_exception else None,
exception=BlockException.INCORRECT_BLOCK_FORMAT if has_exception else None,
)
],
)
"""
)
def test_verify_sync_marker(
pytester: Any,
default_t8n: TransitionTool,
) -> None:
"""
Test blockchain sync fixture generation with verify_sync marker.
The test module has 3 test functions (4 test cases with parametrization):
- test_verify_sync_default: generates all formats except sync
(no verify_sync marker)
- test_verify_sync_with_marker: generates all formats including sync
(has verify_sync marker)
- test_verify_sync_with_param_marks: tests parametrized marks with
verify_sync (2 cases)
Each test generates fixture formats:
- BlockchainFixture (always)
- BlockchainEngineFixture (always)
- BlockchainEngineSyncFixture (only when marked with verify_sync marker)
Expected outcomes:
- 4 test cases total
- Each generates BlockchainFixture (4) and BlockchainEngineFixture (4) =
8 fixtures
- Sync fixtures:
- test_verify_sync_with_marker: 1 sync fixture ✓
- test_verify_sync_with_param_marks[no_exception]: 1 sync fixture ✓
- Total sync fixtures: 2 - Not generated (due to exception_test marker):
- test_verify_sync_with_param_marks[with_exception]: sync fixture
not generated
Final counts:
- Passed: 8 (base fixtures) + 2 (sync fixtures) = 10 passed
- Skipped: 0 skipped
- Failed: 0 failed
"""
# Create proper directory structure for tests
tests_dir = pytester.mkdir("tests")
cancun_tests_dir = tests_dir / "cancun"
cancun_tests_dir.mkdir()
verify_sync_test_dir = cancun_tests_dir / "verify_sync_test_module"
verify_sync_test_dir.mkdir()
test_module = verify_sync_test_dir / "test_verify_sync_marker.py"
test_module.write_text(test_module_with_verify_sync)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
# Add the test directory to the arguments
args = [
"-c",
"pytest-fill.ini",
"-v",
"--no-html",
"--t8n-server-url",
default_t8n.server_url,
"tests/cancun/verify_sync_test_module/",
]
expected_outcomes = {"passed": 10, "failed": 0, "skipped": 0, "errors": 0}
result = pytester.runpytest(*args)
result.assert_outcomes(**expected_outcomes)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/gen_test_doc/page_props.py | src/pytest_plugins/filler/gen_test_doc/page_props.py | """
Classes and helpers used for templates, navigation menus and file output.
The dataclass fields are used to define the page properties fields which are
used in the jinja2 templates when generating site content (located in
docs/templates). The classes also define each page's navigation menu entry and
target output file.
A few helpers are defined with EEST logic in order to sanitize strings from
file paths for use in navigation menu.
"""
import re
from abc import abstractmethod
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import IO, Any, ContextManager, Dict, List, Protocol
from jinja2 import Environment
from ethereum_test_tools import Opcodes
def apply_name_filters(input_string: str) -> str:
"""
Apply a list of capitalizations/regexes to names used in titles & nav
menus.
Note: As of 2024-10-08, with 634 doc pages, this function constitutes ~2.0s
of the total runtime (~5.5s). This seems to be insignificant with the time
taken by mkdocstrings to include the docstrings in the final output (which)
is a separate mkdocs "build-step" occurs outside the scope of this plugin.
"""
word_replacements = {
"acl": "ACL",
"bls 12": "BLS12",
"bls12 g1add": "BLS12_G1ADD",
"bls12 g1msm": "BLS12_G1MSM",
"bls12 g1mul": "BLS12_G1MUL",
"bls12 g2add": "BLS12_G2ADD",
"bls12 g2msm": "BLS12_G2MSM",
"bls12 g2mul": "BLS12_G2MUL",
"bls12 map fp2 to g2": "BLS12_MAP_FP2_TO_G2",
"bls12 map fp to g1": "BLS12_MAP_FP_TO_G1",
"bls12 pairing": "BLS12_PAIRING_CHECK",
"eips": "EIPs",
"eof": "EOF",
"vm": "VM",
}
# adding these is the expensive part
opcode_replacements = {str(opcode): str(opcode) for opcode in Opcodes if str(opcode) != "GAS"}
all_replacements = {**word_replacements, **opcode_replacements}
for word, replacement in all_replacements.items():
input_string = re.sub(rf"(?i)\b{re.escape(word)}\b", replacement, input_string)
regex_patterns = [
(r"eip-?([1-9]{1,5})", r"EIP-\1"), # Matches "eip-123" or "eip123"
]
for pattern, replacement in regex_patterns:
input_string = re.sub(pattern, replacement, input_string, flags=re.IGNORECASE)
return input_string
def snake_to_capitalize(string: str) -> str: # noqa: D103
"""
Convert valid identifiers to a capitalized string, otherwise leave as-is.
"""
if string.isidentifier():
return " ".join(word.capitalize() for word in string.split("_"))
return string
def sanitize_string_title(string: str) -> str:
"""Sanitize a string to be used as a title."""
return apply_name_filters(snake_to_capitalize(string))
def nav_path_to_sanitized_str_tuple(nav_path: Path) -> tuple:
"""
Convert a nav path to a tuple of sanitized strings for use in mkdocs
navigation.
"""
return tuple(sanitize_string_title(part) for part in nav_path.parts)
class FileOpener(Protocol):
"""
Protocol to replace `mkdocs_gen_files` so it doesn't have to be
imported/installed for unit tests.
"""
def open(self, path: Path, mode: str) -> ContextManager[IO[Any]]:
"""Open a file for writing."""
raise NotImplementedError
@dataclass
class PagePropsBase:
"""
Common test reference doc page properties and definitions.
The dataclass attributes are made directly available in the jinja2 found in
`docs/templates/*.j2`.
"""
title: str
source_code_url: str
target_or_valid_fork: str
path: Path
pytest_node_id: str
package_name: str
is_benchmark: bool = False
is_stateful: bool = False
@property
@abstractmethod
def template(self) -> str:
"""Get the jinja2 template used to render this page."""
raise NotImplementedError
@property
@abstractmethod
def target_output_file(self) -> Path:
"""Get the target output file for this page."""
raise NotImplementedError
def nav_entry(self, top_level_nav_entry: str) -> tuple:
"""Get the mkdocs navigation entry for this page."""
if len(self.path.parts) == 1:
return (top_level_nav_entry,)
path = top_level_nav_entry / Path(*self.path.parts[1:]).with_suffix("")
return nav_path_to_sanitized_str_tuple(path)
def write_page(self, file_opener: FileOpener, jinja2_env: Environment) -> None:
"""Write the page to the target directory."""
template = jinja2_env.get_template(self.template)
rendered_content = template.render(**asdict(self))
with file_opener.open(self.target_output_file, "w") as destination:
for line in rendered_content.splitlines(keepends=True):
destination.write(line)
@dataclass
class EipChecklistPageProps(PagePropsBase):
"""Properties used to generate the EIP checklist page."""
eip: int = 0
lines: List[str] = field(default_factory=list)
@property
def template(self) -> str:
"""Get the jinja2 template used to render this page."""
raise Exception("EipChecklistPageProps does not have a template")
@property
def target_output_file(self) -> Path:
"""Get the target output file for this page."""
return self.path
def write_page(self, file_opener: FileOpener, jinja2_env: Environment) -> None:
"""Write the page to the target directory."""
del jinja2_env
with file_opener.open(self.target_output_file, "w") as destination:
destination.write("\n".join(self.lines))
@dataclass
class TestCase:
"""
Properties used to define a single test case in test function parameter
tables.
"""
full_id: str
abbreviated_id: str
fork: str
fixture_type: str
params: Dict[str, Any]
@dataclass
class FunctionPageProps(PagePropsBase):
"""
Definitions used for to generate test function (markdown) pages and their
corresponding static HTML pages.
"""
test_case_count: int = 0
fixture_formats: List[str] = field(default_factory=list)
test_type: str = ""
docstring_one_liner: str = ""
html_static_page_target: str = ""
mkdocs_function_page_target: str = ""
cases: List[TestCase] = field(default_factory=list)
@property
def template(self) -> str:
"""Get the filename of the jinja2 template used to render this page."""
return "function.md.j2"
@property
def target_output_file(self) -> Path:
"""Get the target output file for this page."""
return self.path.with_suffix("") / f"{self.title}.md"
def nav_entry(self, top_level_nav_entry: str) -> tuple:
"""Get the mkdocs navigation entry for this page."""
nav_path_prefix = super().nav_entry(top_level_nav_entry) # already sanitized
return (*nav_path_prefix, f"<code>{self.title}</code>")
def write_page(self, file_opener: FileOpener, jinja2_env: Environment) -> None:
"""
Test functions also get a static HTML page with parametrized test
cases.
This is intended for easier viewing (without mkdocs styling) of the
data-table that documents the parametrized test cases.
"""
super().write_page(file_opener, jinja2_env)
if not self.cases:
return
html_template = jinja2_env.get_template("function.html.j2")
rendered_html_content = html_template.render(**asdict(self))
html_output_file = self.target_output_file.with_suffix(".html")
with file_opener.open(html_output_file, "w") as destination:
for line in rendered_html_content.splitlines(keepends=True):
destination.write(line)
@dataclass
class TestFunction:
"""
Properties used to build the test function overview table in test module
pages.
"""
name: str
test_type: str
test_case_count: int
docstring_one_liner: str
@dataclass
class ModulePageProps(PagePropsBase):
"""
Definitions used for test modules, e.g.,
`tests/berlin/eip2930_access_list/test_acl.py`.
"""
test_functions: List[TestFunction] = field(default_factory=list)
@property
def template(self) -> str:
"""Get the filename of the jinja2 template used to render this page."""
return "module.md.j2"
@property
def target_output_file(self) -> Path:
"""Get the target output file for this page."""
if self.path.suffix == "spec.py":
return self.path.with_suffix(".md")
return self.path.with_suffix("") / "index.md"
@dataclass
class DirectoryPageProps(PagePropsBase):
"""
Definitions used for parent directories in test paths, e.g.,
`tests/berlin`.
"""
@property
def template(self) -> str:
"""Get the filename of the jinja2 template used to render this page."""
return "directory.md.j2"
@property
def target_output_file(self) -> Path:
"""Get the target output file for this page."""
return self.path / "index.md"
@dataclass
class MarkdownPageProps(PagePropsBase):
"""
Definitions used to verbatim include markdown files included in test paths.
"""
@property
def template(self) -> str:
"""Get the filename of the jinja2 template used to render this page."""
return "markdown_header.md.j2"
@property
def target_output_file(self) -> Path:
"""Get the target output file for this page."""
return self.path
def write_page(self, file_opener: FileOpener, jinja2_env: Environment) -> None:
"""
Write the page to the target directory.
We read the md file and write it with `mkdocs_gen_files`.
"""
template = jinja2_env.get_template(self.template)
rendered_content = template.render(**asdict(self))
with open(self.path, "r") as md_source:
with file_opener.open(self.target_output_file, "w") as destination:
for line in rendered_content.splitlines(keepends=True):
destination.write(line)
for line in md_source:
destination.write(line)
PageProps = (
DirectoryPageProps
| ModulePageProps
| FunctionPageProps
| MarkdownPageProps
| EipChecklistPageProps
)
PagePropsLookup = Dict[str, PageProps]
ModulePagePropsLookup = Dict[str, ModulePageProps]
FunctionPagePropsLookup = Dict[str, FunctionPageProps]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/gen_test_doc/__init__.py | src/pytest_plugins/filler/gen_test_doc/__init__.py | """A pytest plugin to generate test case documentation for mkdocs."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/filler/gen_test_doc/gen_test_doc.py | src/pytest_plugins/filler/gen_test_doc/gen_test_doc.py | """
A pytest plugin that generates test case documentation for use in mkdocs.
It generates the top-level "Test Case Reference" section in EEST's mkdocs site.
Note:
----
- No output directory is specified for the generated output; file IO
occurs via the `mkdocs-gen-files` plugin. `mkdocs serve` writes intermediate
files to our local `docs/` directory and then copies it to the site directory.
We modify `docs/navigation.md` and write all other output underneath
`docs/tests`. If mkdocs is interrupted, these intermediate artifacts are left
in `docs/`.
Usage:
------
!!! note "Ensuring a clean build"
In case mkdocs has polluted the `docs/` directory with intermediate files, run:
```console
git restore docs/navigation.md # Careful if you have local modifications!
rm -rf docs/tests docs/docs site
```
To test doc generation, run the plugin without mkdocs:
```console
uv run fill -p pytest_plugins.filler.gen_test_doc.gen_test_doc --gen-docs \
--fork=<fork> tests
```
Or to build and view the site:
```console
uv run mkdocs serve
```
"""
import glob
import logging
import os
import sys
import textwrap
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Set, Tuple, cast
import mkdocs_gen_files # type: ignore[unused-ignore, import-not-found]
import pytest
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from pytest import Item
from ethereum_test_forks import get_forks
from ethereum_test_specs import BaseTest
from ethereum_test_tools.utility.versioning import (
generate_github_url,
get_current_commit_hash_or_tag,
)
from .page_props import (
DirectoryPageProps,
FunctionPageProps,
FunctionPagePropsLookup,
MarkdownPageProps,
ModulePageProps,
ModulePagePropsLookup,
PageProps,
PagePropsLookup,
TestCase,
TestFunction,
sanitize_string_title,
)
logger = logging.getLogger("mkdocs")
docstring_test_function_history: Dict[str, str] = {}
def pytest_addoption(parser: pytest.Parser) -> None: # noqa: D103
gen_docs = parser.getgroup(
"gen_docs", "Arguments related to generating test case documentation"
)
gen_docs.addoption(
"--gen-docs",
action="store_true",
dest="gen_docs",
default=False,
help="Generate documentation for all collected tests for use in for mkdocs",
)
gen_docs.addoption(
"--gen-docs-target-fork",
action="store",
dest="gen_docs_target_fork",
default=None,
help=(
"The default fork to use generated in generated doc pages. Should be the name of the "
"next upcoming fork."
),
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None: # noqa: D103
if config.getoption("gen_docs"):
config.option.disable_html = True
config.pluginmanager.register(TestDocsGenerator(config), "test-case-doc-generator")
def get_test_function_id(item: Item) -> str:
"""Get the test function's ID from the item."""
return item.nodeid.split("[")[0]
def get_test_function_name(item: Item) -> str:
"""Get the test function's name from the item."""
return item.name.split("[")[0]
def get_test_case_id(item: Item) -> str:
"""Get the test case's ID from the item."""
return item.nodeid.split("[")[-1].rstrip("]")
def get_test_function_import_path(item: pytest.Item) -> str:
"""
Retrieve the fully qualified import path for an item's test function.
This is used in jinja2 templates to get the test function, respectively the
test function's class, documentation with mkdocstrings.
"""
item = cast(pytest.Function, item) # help mypy infer type
module_name = item.module.__name__
if hasattr(item.obj, "__self__"):
# it's a method bound to a class
test_class = item.obj.__self__.__class__.__name__
test_function = item.obj.__name__
full_path = f"{module_name}.{test_class}"
else:
# it's a standalone function, no class
test_function = item.obj.__name__
full_path = f"{module_name}.{test_function}"
return full_path
def get_import_path(path: Path) -> str:
"""
Get the import path for a given path.
- For modules, strip the file extension.
- For directories (i.e., packages such as `tests.berlin`),
`with_suffix()` is ignored.
To do:
------
- This should be combined with `get_test_function_import_path`.
"""
return str(path.with_suffix("")).replace("/", ".")
def create_github_issue_url(title: str) -> str:
"""Create a GitHub issue URL for the given title."""
url_base = "https://github.com/ethereum/execution-spec-tests/issues/new?"
title = title.replace(" ", "%20")
labels = "scope:docs,type:bug"
return f"{url_base}title={title}&labels={labels}"
def get_docstring_one_liner(item: pytest.Item) -> str:
"""
Extract either the first 100 characters or the first line of the docstring
from the function associated with the given pytest.Item.
"""
item = cast(pytest.Function, item) # help mypy infer type
func_obj = item.obj
docstring = func_obj.__doc__
test_function_name = get_test_function_name(item)
if not docstring:
github_issue_url = create_github_issue_url(
f"docs(bug): No docstring available for `{test_function_name}`"
)
logger.warning(f"No docstring available for `{test_function_name}`.")
return f"[📖🐛 No docstring available]({github_issue_url})"
docstring = docstring.strip()
test_function_id = get_test_function_id(item)
if (
docstring in docstring_test_function_history
and docstring_test_function_history[docstring] != test_function_id
):
logger.info(
f"Duplicate docstring for {test_function_id}: "
f"{docstring_test_function_history[docstring]} and {test_function_id}"
)
else:
docstring_test_function_history[docstring] = test_function_id
lines = docstring.splitlines()
bad_oneliner_issue_url = create_github_issue_url(
f"docs(bug): Bad docstring oneliner for `{test_function_name}`"
)
report_bad_oneliner_link = f"([📖🐛?]({bad_oneliner_issue_url}))"
if lines:
first_line = lines[0].strip()
if len(first_line) <= 100:
return (
first_line
if not first_line.endswith(":")
else first_line + report_bad_oneliner_link
)
else:
return first_line[:100] + f"... {report_bad_oneliner_link}"
else:
return docstring[:100] + f"... {report_bad_oneliner_link}"
def get_test_function_test_type(item: pytest.Item) -> str:
"""Get the test type for the test function based on its fixtures."""
test_types: List[str] = [
spec_type.pytest_parameter_name() for spec_type in BaseTest.spec_types.values()
]
item = cast(pytest.Function, item) # help mypy infer type
fixture_names = item.fixturenames
for test_type in test_types:
if test_type in fixture_names:
return test_type
logger.warning(f"Could not determine the test function type for {item.nodeid}")
return f"unknown ([📖🐛]({create_github_issue_url('docs(bug): unknown test function type')}))"
class TestDocsGenerator:
"""Pytest plugin class for generating test case documentation."""
def __init__(self, config: pytest.Config) -> None:
"""Initialize the plugin with the given pytest config."""
self.config = config
self.target_fork: str = config.getoption("gen_docs_target_fork")
self.deployed_forks = [fork.name() for fork in get_forks() if fork.is_deployed()]
self._setup_logger()
self.jinja2_env = Environment(
loader=FileSystemLoader("docs/templates"),
trim_blocks=True,
undefined=StrictUndefined,
)
self.source_dir = Path("tests")
self.ref = get_current_commit_hash_or_tag()
self.top_level_nav_entry = "Test Case Reference"
# intermediate collected pages and their properties
self.function_page_props: FunctionPagePropsLookup = {}
self.module_page_props: ModulePagePropsLookup = {}
# the complete set of pages and their properties
self.page_props: PagePropsLookup = {}
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(
self, config: pytest.Config, items: List[pytest.Item]
) -> object:
"""Generate html doc for each test item that pytest has collected."""
yield
self.add_global_page_props_to_env()
functions = defaultdict(list)
for item in items: # group test case by test function
functions[get_test_function_id(item)].append(item)
if hasattr(config, "checklist_props"):
checklist_props = config.checklist_props
self.page_props = {**self.page_props, **checklist_props}
# the heavy work
self.create_function_page_props(functions)
self.create_module_page_props()
# add the pages to the page_props dict
self.page_props = {**self.page_props, **self.function_page_props, **self.module_page_props}
# this adds pages for the intermediate directory structure (tests,
# tests/berlin)
self.add_directory_page_props()
# add other interesting pages
self.add_spec_page_props()
self.add_markdown_page_props()
# write pages and navigation menu
self.write_pages()
self.update_mkdocs_nav()
@pytest.hookimpl(tryfirst=True)
def pytest_runtestloop(self, session: pytest.Session) -> bool:
"""Skip test execution, only generate docs."""
session.testscollected = 0
return True
def pytest_terminal_summary(
self,
terminalreporter: Any,
exitstatus: int,
config: pytest.Config,
) -> None:
"""Add a summary line for the docs."""
del exitstatus, config
terminalreporter.write_sep("=", f"{len(self.page_props)} doc pages generated", bold=True)
def _setup_logger(self) -> None:
"""
Configure the mkdocs logger and adds a StreamHandler if outside mkdocs.
We use the mkdocs logger to report warnings if conditions are invalid -
this will inform the user and fail the build with `mkdocs build
--strict`.
"""
if not logger.hasHandlers() or logger.level == logging.NOTSET:
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
def get_doc_site_base_url(self) -> str:
"""
Return site's base in its URL for inclusion of local files.
This is required in order to include docs/javascripts/site.js, for
example, in the standalone html pages.
Github pages deploys to a sub-directory "execution-spec-tests" and mike
deploys a version of the site underneath a sub-directory named after
the version, e.g.:
- https://eest.ethereum.org/main/
- https://eest.ethereum.org/v4.1.0/
We need to be able to include the javascript available at:
- https://eest.ethereum.org/main/javascripts/site.js
"""
ci = os.getenv("CI", None)
github_ref_name = os.getenv("GITHUB_REF_NAME", None)
doc_version = os.getenv("GEN_TEST_DOC_VERSION", None)
if ci and github_ref_name:
return f"/execution-spec-tests/{github_ref_name}/"
if ci and not github_ref_name:
raise Exception("Failed to determine target doc version (no GITHUB_REF_NAME env?).")
if ("--strict" in sys.argv or "deploy" in sys.argv) and not doc_version:
# assume we're trying to deploy manually via mike (locally)
raise Exception(
"Failed to determine target doc version during strict build (set "
"GEN_TEST_DOC_VERSION env var)."
)
# local test build, e.g. via `uv run mkdocs serve`
return "/execution-spec-tests/"
def add_global_page_props_to_env(self) -> None:
"""Populate global page properties used in j2 templates."""
global_page_props = {
"target_fork": self.target_fork,
"base_url": self.get_doc_site_base_url(),
"deployed_forks": self.deployed_forks,
"short_git_ref": get_current_commit_hash_or_tag(shorten_hash=True),
}
self.jinja2_env.globals.update(global_page_props)
def create_function_page_props(self, test_functions: Dict["str", List[Item]]) -> None:
"""
Traverse all test items and create a lookup of doc pages & required
props.
To do: Needs refactor.
"""
skip_params = ["fork"] + [
spec_type.pytest_parameter_name() for spec_type in BaseTest.spec_types.values()
]
for function_id, function_items in test_functions.items():
assert all(isinstance(item, pytest.Function) for item in function_items)
# help mypy infer type
items = cast(List[pytest.Function], function_items)
# extract parametrized test cases for each test function
test_cases = []
if getattr(items[0], "callspec", None):
for item in items:
param_set = item.callspec.params
# Don't show skipped parameters as columns in the test case
# table
keys = [key for key in param_set.keys() if key not in skip_params]
values = [param_set[key] for key in keys]
# TODO: This formatting of bytes objects should be moved
# elsewhere
values = [
(
" ".join(
f"<code>{chunk}</code>" for chunk in textwrap.wrap(value.hex(), 32)
)
if isinstance(value, bytes)
else str(value)
)
for value in values
]
fork = item.callspec.params.get("fork").name() # type: ignore
test_type = get_test_function_test_type(item)
test_type_value = item.callspec.params.get(test_type)
fixture_type = test_type_value.format_name # type: ignore
test_cases.append(
TestCase(
full_id=item.nodeid,
abbreviated_id=item.nodeid.split("[")[-1].rstrip("]"),
fork=fork,
fixture_type=fixture_type,
params=dict(zip(keys, values, strict=False)),
)
)
module_relative_path = Path(items[0].module.__file__).relative_to(Path.cwd())
source_url = generate_github_url(
str(module_relative_path),
branch_or_commit_or_tag=self.ref,
line_number=items[0].function.__code__.co_firstlineno,
)
valid_from_marker = items[0].get_closest_marker("valid_from")
if not valid_from_marker:
valid_from_fork = "Frontier"
else:
# NOTE: The EOF tests cases contain two fork names in their
# valid_from marker, separated by a comma. Take the last.
valid_from_fork = valid_from_marker.args[0].split(",")[-1]
target_or_valid_fork = (
self.target_fork if valid_from_fork in self.deployed_forks else valid_from_fork
)
test_type = get_test_function_test_type(items[0])
test_case_count = len(
[
case
for case in test_cases
if case.fork == target_or_valid_fork and case.fixture_type == test_type
]
)
is_benchmark = items[0].get_closest_marker("benchmark") is not None
is_stateful = items[0].get_closest_marker("stateful") is not None
self.function_page_props[function_id] = FunctionPageProps(
title=get_test_function_name(items[0]),
source_code_url=source_url,
target_or_valid_fork=target_or_valid_fork,
path=module_relative_path,
pytest_node_id=function_id,
package_name=get_test_function_import_path(items[0]),
test_case_count=test_case_count,
cases=test_cases,
fixture_formats=sorted({case.fixture_type for case in test_cases}),
test_type=test_type,
docstring_one_liner=get_docstring_one_liner(items[0]),
html_static_page_target=f"./{get_test_function_name(items[0])}.html",
mkdocs_function_page_target=f"./{get_test_function_name(items[0])}/",
is_benchmark=is_benchmark,
is_stateful=is_stateful,
)
def create_module_page_props(self) -> None:
"""Discover the test module doc pages and extract their properties."""
for _function_id, function_page in self.function_page_props.items():
if str(function_page.path) not in self.module_page_props:
module_path = function_page.path
self.module_page_props[str(function_page.path)] = ModulePageProps(
title=sanitize_string_title(function_page.path.stem),
source_code_url=function_page.source_code_url,
target_or_valid_fork=function_page.target_or_valid_fork,
path=module_path,
pytest_node_id=str(module_path),
package_name=get_import_path(module_path),
is_benchmark=function_page.is_benchmark,
is_stateful=function_page.is_stateful,
test_functions=[
TestFunction(
name=function_page.title,
test_type=function_page.test_type,
test_case_count=function_page.test_case_count,
docstring_one_liner=function_page.docstring_one_liner,
)
],
)
else:
existing_module_page = self.module_page_props[str(function_page.path)]
if function_page.is_benchmark:
existing_module_page.is_benchmark = True
if function_page.is_stateful:
existing_module_page.is_stateful = True
existing_module_page.test_functions.append(
TestFunction(
name=function_page.title,
test_type=function_page.test_type,
test_case_count=function_page.test_case_count,
docstring_one_liner=function_page.docstring_one_liner,
)
)
def add_directory_page_props(self) -> None:
"""
Discover the intermediate directory pages and extract their properties.
These directories may not have any test modules within them, e.g.,
tests/berlin/.
"""
sub_paths: Set[Path] = set()
for module_page in self.module_page_props.values():
module_path_parts = module_page.path.parent.parts
sub_paths.update(
Path(*module_path_parts[: i + 1]) for i in range(len(module_path_parts))
)
for directory in sub_paths:
directory_fork_name = (
directory.relative_to(self.source_dir).parts[0].capitalize()
if directory != self.source_dir
else self.target_fork
)
if directory_fork_name in self.deployed_forks:
fork = self.target_fork
else:
fork = directory_fork_name
is_benchmark = any(
module_page.is_benchmark
for module_page in self.module_page_props.values()
if directory in module_page.path.parents or module_page.path.parent == directory
)
is_stateful = any(
module_page.is_stateful
for module_page in self.module_page_props.values()
if directory in module_page.path.parents or module_page.path.parent == directory
)
self.page_props[str(directory)] = DirectoryPageProps(
title=sanitize_string_title(str(directory.name)),
path=directory,
pytest_node_id=str(directory),
source_code_url=generate_github_url(
str(directory), branch_or_commit_or_tag=self.ref
),
# TODO: This won't work in all cases; should be from the
# development fork Currently breaks for
# `tests/unscheduled/eip7692_eof_v1/index.md`
target_or_valid_fork=fork.capitalize() if fork else "Unknown",
# init.py will be used for docstrings
package_name=get_import_path(directory),
is_benchmark=is_benchmark,
is_stateful=is_stateful,
)
def find_files_within_collection_scope(self, file_pattern: str) -> List[Path]:
"""
Find all files that match the scope of the collected test modules.
This to avoid adding matching files in uncollected test directories.
Note: could be optimized!
"""
files = []
for module_page in self.module_page_props.values():
# all files found in and under the modules' directory
files += glob.glob(f"{module_page.path.parent}/**/{file_pattern}", recursive=True)
for parent in module_page.path.parent.parents:
if parent == self.source_dir:
break
# add files in a module's parent directory
files += glob.glob(f"{parent}/{file_pattern}")
return [Path(file) for file in set(files)]
def add_spec_page_props(self) -> None:
"""
Add page path properties for spec files discovered in the collection
scope.
"""
for spec_path in self.find_files_within_collection_scope("spec.py"):
self.page_props[str(spec_path)] = ModulePageProps(
title="Spec",
path=spec_path,
source_code_url=generate_github_url(
str(spec_path), branch_or_commit_or_tag=self.ref
),
pytest_node_id=str(spec_path),
package_name=get_import_path(spec_path),
target_or_valid_fork="",
test_functions=[],
)
def add_markdown_page_props(self) -> None:
"""
Add page path properties for markdown files discovered in the
collection scope.
"""
for md_path in self.find_files_within_collection_scope("*.md"):
self.page_props[str(md_path)] = MarkdownPageProps(
title=md_path.stem,
path=md_path,
source_code_url=generate_github_url(
str(md_path), branch_or_commit_or_tag=self.ref
),
# abuse: not a test, but used in source code link
pytest_node_id=str(md_path),
target_or_valid_fork="",
package_name="",
)
def update_mkdocs_nav(self) -> None:
"""
Add the generated 'Test Case Reference' entries to the mkdocs
navigation menu.
"""
fork_order = {fork.name().lower(): i for i, fork in enumerate(reversed(get_forks()))}
def sort_by_fork_deployment_and_path(x: PageProps) -> Tuple[Any, ...]:
"""
Key function used to sort navigation menu entries for test case ref
docs.
Nav entries / output files contain special cases such as:
- ("Test Case Reference",) -> tests/index.md
- ("Test Case Reference", "Berlin") -> tests/berlin/index.md
- ("Test Case Reference", "EIP-7692 EOF V1", tracker.md")
tests/unscheduled/eip7692_eof_v1/tracker.md
- ("Test Case Reference", "Shanghai", "EIP-3855 PUSH0", "Spec") ->
tests/shanghai/eip3855_push0/spec.py
This function provides and ordering to sort nav men entries as
follows:
1. Forks are listed in the chronological order that they were
deployed.
2. Special files listed first (before test pages): "*.md"
and `Spec.py`,
3. The page's corresponding file path under
`./tests/`.
"""
length = len(x.path.parts)
if length > 1:
# the fork folder from the relative path
fork = str(x.path.parts[1]).lower()
# unscheduled features added to the end
if fork not in fork_order:
return (999, str(x.path))
if length == 1:
return (0,)
elif length == 2:
return (1, fork_order[fork])
elif x.path.name == "spec.py":
return (2, fork_order[fork], length, 0, x.path)
elif x.path.suffix == ".md":
return (2, fork_order[fork], length, 1, x.path)
else:
return (2, fork_order[fork], length, 2, x.path)
nav = mkdocs_gen_files.Nav()
for page in sorted(self.page_props.values(), key=sort_by_fork_deployment_and_path):
nav[page.nav_entry(self.top_level_nav_entry)] = str(page.target_output_file)
with mkdocs_gen_files.open("navigation.md", "a") as nav_file:
nav_file.writelines(nav.build_literate_nav())
def write_pages(self) -> None:
"""Write all pages to the target directory."""
for page in self.page_props.values():
page.write_page(mkdocs_gen_files, self.jinja2_env) # type: ignore[arg-type, unused-ignore]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/custom_logging/plugin_logging.py | src/pytest_plugins/custom_logging/plugin_logging.py | """
A Pytest plugin to configure logging for pytest sessions.
Note: While pytest's builtin logging is generally amazing, it does not write
timestamps when log output is written to pytest's caplog (the captured output
for a test). And having timestamps in this output is the main use case for
adding logging to our plugins. This output gets shown in the `FAILURES` summary
section, which is shown as the "simulator log" in hive simulations. For this
use case, timestamps are essential to verify timing issues against the clients
log.
This module provides both:
1. A standalone logging configuration system that can be used in any
Python project
2. A pytest plugin that automatically configures logging for pytest sessions
"""
import functools
import logging
import os
import sys
from datetime import datetime, timezone
from logging import LogRecord
from pathlib import Path
from typing import Any, ClassVar, Optional, cast
import pytest
from _pytest.terminal import TerminalReporter
file_handler: Optional[logging.FileHandler] = None
# Custom log levels
VERBOSE_LEVEL = 15 # Between INFO (10) and DEBUG (20)
FAIL_LEVEL = 35 # Between WARNING (30) and ERROR (40)
# Add custom log levels to the logging module
logging.addLevelName(VERBOSE_LEVEL, "VERBOSE")
logging.addLevelName(FAIL_LEVEL, "FAIL")
class EESTLogger(logging.Logger):
"""Define custom log levels via a dedicated Logger class."""
def verbose(
self,
msg: object,
*args: Any,
exc_info: BaseException | bool | None = None,
stack_info: bool = False,
stacklevel: int = 1,
extra: Optional[dict[str, Any]] = None,
) -> None:
"""
Log a message with VERBOSE level severity (15).
This level is between DEBUG (10) and INFO (20), intended for messages
more detailed than INFO but less verbose than DEBUG.
"""
if stacklevel is None:
stacklevel = 1
if self.isEnabledFor(VERBOSE_LEVEL):
self._log(VERBOSE_LEVEL, msg, args, exc_info, extra, stack_info, stacklevel)
def fail(
self,
msg: object,
*args: Any,
exc_info: BaseException | bool | None = None,
stack_info: bool = False,
stacklevel: int = 1,
extra: Optional[dict[str, Any]] = None,
) -> None:
"""
Log a message with FAIL level severity (35).
This level is between WARNING (30) and ERROR (40), intended for test
failures and similar issues.
"""
if stacklevel is None:
stacklevel = 1
if self.isEnabledFor(FAIL_LEVEL):
self._log(FAIL_LEVEL, msg, args, exc_info, extra, stack_info, stacklevel)
# Register the custom logger class
logging.setLoggerClass(EESTLogger)
def get_logger(name: str) -> EESTLogger:
"""Get a properly-typed logger with the EEST custom logging levels."""
return cast(EESTLogger, logging.getLogger(name))
# Module logger
logger = get_logger(__name__)
class UTCFormatter(logging.Formatter):
"""
Log formatter that formats UTC timestamps with milliseconds and +00:00
suffix.
"""
def formatTime(self, record: LogRecord, datefmt: str | None = None) -> str: # noqa: D102,N802
# camelcase required
del datefmt
dt = datetime.fromtimestamp(record.created, tz=timezone.utc)
return dt.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + "+00:00"
class ColorFormatter(UTCFormatter):
"""
Formatter that adds ANSI color codes to log level names for terminal
output.
"""
running_in_docker: ClassVar[bool] = Path("/.dockerenv").exists()
COLORS = {
logging.DEBUG: "\033[37m", # Gray
VERBOSE_LEVEL: "\033[36m", # Cyan
logging.INFO: "\033[36m", # Cyan
logging.WARNING: "\033[33m", # Yellow
FAIL_LEVEL: "\033[35m", # Magenta
logging.ERROR: "\033[31m", # Red
logging.CRITICAL: "\033[41m", # Red background
}
RESET = "\033[0m"
def format(self, record: LogRecord) -> str:
"""Apply colorful formatting only when not running in Docker."""
# First make a copy of the record to avoid modifying the original
record_copy = logging.makeLogRecord(record.__dict__)
if not self.running_in_docker:
color = self.COLORS.get(record_copy.levelno, self.RESET)
record_copy.levelname = f"{color}{record_copy.levelname}{self.RESET}"
return super().format(record_copy)
class LogLevel:
"""Help parse a log-level provided on the command-line."""
@classmethod
def from_cli(cls, value: str) -> int:
"""
Parse a logging level from CLI.
Accepts standard level names (e.g. 'INFO', 'debug') or numeric values.
"""
try:
return int(value)
except ValueError:
pass
level_name = value.upper()
if level_name in logging._nameToLevel:
return logging._nameToLevel[level_name]
valid = ", ".join(logging._nameToLevel.keys())
raise ValueError(f"Invalid log level '{value}'. Expected one of: {valid} or a number.")
# =========================================================================
# Standalone logging configuration (usable without pytest)
# =========================================================================
def configure_logging(
log_level: int | str = "INFO",
log_file: Optional[str | Path] = None,
log_to_stdout: bool = True,
log_format: str = "%(asctime)s [%(levelname)s] %(name)s: %(message)s",
use_color: Optional[bool] = None,
) -> Optional[logging.FileHandler]:
"""
Configure logging with EEST custom log levels and formatters.
This function can be used in any Python project to set up logging with the
same settings as the pytest plugin.
Args:
log_level: The logging level to use (name or numeric value)
log_file: Path to the log file (if None, no file logging is set up)
log_to_stdout: Whether to log to stdout
log_format: The log format string
use_color: Whether to use colors in stdout output (auto-detected if None)
Returns: The file handler if log_file is provided, otherwise None
"""
# Initialize root logger
root_logger = logging.getLogger()
# Convert log level if it's a string
if isinstance(log_level, str):
log_level = LogLevel.from_cli(log_level)
# Set log level
root_logger.setLevel(log_level)
# Remove any existing handlers
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
# File handler (optional)
file_handler_instance = None
if log_file:
log_path = Path(log_file)
log_path.parent.mkdir(exist_ok=True, parents=True)
file_handler_instance = logging.FileHandler(log_path, mode="w")
file_handler_instance.setFormatter(UTCFormatter(fmt=log_format))
root_logger.addHandler(file_handler_instance)
# Stdout handler (optional)
if log_to_stdout:
stream_handler = logging.StreamHandler(sys.stdout)
# Determine whether to use color
if use_color is None:
use_color = not ColorFormatter.running_in_docker
if use_color:
stream_handler.setFormatter(ColorFormatter(fmt=log_format))
else:
stream_handler.setFormatter(UTCFormatter(fmt=log_format))
root_logger.addHandler(stream_handler)
logger.verbose("Logging configured successfully.")
return file_handler_instance
# ==========================================================================
# Pytest plugin integration
# ==========================================================================
def pytest_addoption(parser: pytest.Parser) -> None: # noqa: D103
logging_group = parser.getgroup(
"logging", "Arguments related to logging from test fixtures and tests."
)
logging_group.addoption(
"--eest-log-level", # --log-level is defined by pytest's built-in
# logging
"--eestloglevel",
action="store",
default="INFO",
type=LogLevel.from_cli,
dest="eest_log_level",
help=(
"The logging level to use in the test session: DEBUG, INFO, WARNING, ERROR or "
"CRITICAL, default - INFO. An integer in [0, 50] may be also provided."
),
)
@functools.cache
def get_log_stem(argv0: str, argv1: Optional[str]) -> str:
"""Generate the stem (prefix-subcommand-timestamp) for log files."""
stem = Path(argv0).stem
prefix = "pytest" if stem in ("", "-c", "__main__") else stem
subcommand = argv1 if argv1 and not argv1.startswith("-") else None
timestamp = datetime.now(timezone.utc).strftime("%Y%m%d-%H%M%S")
name_parts = [prefix]
if subcommand:
name_parts.append(subcommand)
name_parts.append(timestamp)
return "-".join(name_parts)
def pytest_configure_node(node: Any) -> None:
"""Initialize a variable for use in the worker (xdist hook)."""
potential_subcommand = None
if len(sys.argv) > 1:
potential_subcommand = sys.argv[1]
node.workerinput["log_stem"] = get_log_stem(sys.argv[0], potential_subcommand)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""
Initialize logging for pytest sessions.
This goes to a lot of effort to ensure that a log file is created per
worker if xdist is used and that the timestamp used in the filename is the
same across main and all workers.
"""
global file_handler
# Determine log file path with consistent timestamp across workers
potential_subcommand = None
if len(sys.argv) > 1:
potential_subcommand = sys.argv[1]
log_stem = getattr(config, "workerinput", {}).get("log_stem") or get_log_stem(
sys.argv[0], potential_subcommand
)
worker_id = os.getenv("PYTEST_XDIST_WORKER", "main")
log_filename = f"{log_stem}-{worker_id}.log"
log_path = Path("logs")
log_path.mkdir(exist_ok=True)
log_file_path = log_path / log_filename
# Store the log file path in the pytest config
config.option.eest_log_file_path = log_file_path
# Configure logging using the standalone function
file_handler = configure_logging(
log_level=config.getoption("eest_log_level"),
log_file=log_file_path,
log_to_stdout=True,
)
def pytest_report_header(config: pytest.Config) -> list[str]:
"""Show the log file path in the test session header."""
if eest_log_file_path := config.option.eest_log_file_path:
return [f"Log file: {eest_log_file_path}"]
return []
def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
"""
Display the log file path in the terminal summary like the HTML report
does.
"""
if terminalreporter.config.option.collectonly:
return
if eest_log_file_path := terminalreporter.config.option.eest_log_file_path:
terminalreporter.write_sep("-", f"Log file: {eest_log_file_path.resolve()}", yellow=True)
def log_only_to_file(level: int, msg: str, *args: Any) -> None:
"""Log a message only to the file handler, bypassing stdout."""
if not file_handler:
return
handler: logging.Handler = file_handler
logger = logging.getLogger(__name__)
if not logger.isEnabledFor(level):
return
record: LogRecord = logger.makeRecord(
logger.name,
level,
fn=__file__,
lno=0,
msg=msg,
args=args,
exc_info=None,
func=None,
extra=None,
)
handler.handle(record)
def pytest_runtest_logstart(nodeid: str, location: tuple[str, int, str]) -> None:
"""Log test start to file."""
del location
log_only_to_file(logging.INFO, f"ℹ️ - START TEST: {nodeid}")
def pytest_runtest_logreport(report: pytest.TestReport) -> None:
"""Log test status and duration to file after it runs."""
if report.when != "call":
return
nodeid = report.nodeid
duration = report.duration
log_level = logging.INFO
if hasattr(report, "wasxfail"):
if report.skipped:
status = "XFAIL"
emoji = "💤"
elif report.passed:
status = "XPASS"
emoji = "🚨"
else:
status = "XFAIL ERROR"
emoji = "💣"
log_level = logging.ERROR
elif report.skipped:
status = "SKIPPED"
emoji = "⏭️"
elif report.failed:
status = "FAILED"
emoji = "❌"
log_level = FAIL_LEVEL
else:
status = "PASSED"
emoji = "✅"
log_only_to_file(log_level, f"{emoji} - {status} in {duration:.2f}s: {nodeid}")
def pytest_runtest_logfinish(nodeid: str, location: tuple[str, int, str]) -> None:
"""Log end of test to file."""
del location
log_only_to_file(logging.INFO, f"ℹ️ - END TEST: {nodeid}")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/custom_logging/__init__.py | src/pytest_plugins/custom_logging/__init__.py | """
Import the logging module content to make it available from
pytest_plugins.logging.
"""
from .plugin_logging import (
FAIL_LEVEL,
VERBOSE_LEVEL,
ColorFormatter,
EESTLogger,
LogLevel,
UTCFormatter,
configure_logging,
get_logger,
)
__all__ = [
"VERBOSE_LEVEL",
"FAIL_LEVEL",
"EESTLogger",
"UTCFormatter",
"ColorFormatter",
"LogLevel",
"get_logger",
"configure_logging",
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/custom_logging/tests/test_logging.py | src/pytest_plugins/custom_logging/tests/test_logging.py | """
Tests for the logging module.
These tests verify the functionality of the custom logging system,
including both the standalone configuration and the pytest integration.
"""
import io
import logging
import re
import tempfile
from pathlib import Path
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from ..plugin_logging import (
FAIL_LEVEL,
VERBOSE_LEVEL,
ColorFormatter,
EESTLogger,
UTCFormatter,
configure_logging,
get_logger,
)
class TestLoggerSetup:
"""Test the basic setup of loggers and custom levels."""
def test_custom_levels_registered(self) -> None:
"""Test that custom log levels are properly registered."""
assert logging.getLevelName(VERBOSE_LEVEL) == "VERBOSE"
assert logging.getLevelName(FAIL_LEVEL) == "FAIL"
assert logging.getLevelName("VERBOSE") == VERBOSE_LEVEL
assert logging.getLevelName("FAIL") == FAIL_LEVEL
def test_get_logger(self) -> None:
"""Test that get_logger returns a properly typed logger."""
logger = get_logger("test_logger")
assert isinstance(logger, EESTLogger)
assert logger.name == "test_logger"
assert hasattr(logger, "verbose")
assert hasattr(logger, "fail")
class TestEESTLogger:
"""Test the custom logger methods."""
def setup_method(self) -> None:
"""Set up a logger and string stream for capturing log output."""
self.log_output = io.StringIO()
self.logger = get_logger("test_eest_logger")
# Remove any existing handlers
for handler in self.logger.handlers[:]:
self.logger.removeHandler(handler)
# Configure a basic handler that writes to our string stream
handler = logging.StreamHandler(self.log_output)
handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG) # Set to lowest possible level for testing
def test_verbose_method(self) -> None:
"""Test the verbose() method logs at the expected level."""
self.logger.verbose("This is a verbose message")
assert "VERBOSE: This is a verbose message" in self.log_output.getvalue()
def test_fail_method(self) -> None:
"""Test the fail() method logs at the expected level."""
self.logger.fail("This is a fail message")
assert "FAIL: This is a fail message" in self.log_output.getvalue()
def test_standard_methods(self) -> None:
"""Test that standard log methods still work."""
self.logger.debug("Debug message")
self.logger.info("Info message")
self.logger.warning("Warning message")
log_output = self.log_output.getvalue()
assert "DEBUG: Debug message" in log_output
assert "INFO: Info message" in log_output
assert "WARNING: Warning message" in log_output
class TestFormatters:
"""Test the custom log formatters."""
def test_utc_formatter(self) -> None:
"""Test that UTCFormatter formats timestamps correctly."""
formatter = UTCFormatter(fmt="%(asctime)s: %(message)s")
record = logging.makeLogRecord(
{
"msg": "Test message",
"created": 1609459200.0, # 2021-01-01 00:00:00 UTC
}
)
formatted = formatter.format(record)
assert re.match(r"2021-01-01 00:00:00\.\d{3}\+00:00: Test message", formatted)
def test_color_formatter(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that ColorFormatter adds color codes to the log level."""
# Create the formatter and test record
formatter = ColorFormatter(fmt="[%(levelname)s] %(message)s")
record = logging.makeLogRecord(
{
"levelno": logging.ERROR,
"levelname": "ERROR",
"msg": "Error message",
}
)
# Test case 1: When not running in Docker, colors should be applied
# Override the class variable directly with monkeypatch
monkeypatch.setattr(ColorFormatter, "running_in_docker", False)
formatted = formatter.format(record)
assert "\033[31mERROR\033[0m" in formatted # Red color for ERROR
# Test case 2: When running in Docker, colors should not be applied
monkeypatch.setattr(ColorFormatter, "running_in_docker", True)
formatted = formatter.format(record)
assert "\033[31mERROR\033[0m" not in formatted
assert "ERROR" in formatted
class TestStandaloneConfiguration:
"""Test the standalone logging configuration function."""
def test_configure_logging_defaults(self) -> None:
"""Test configure_logging with default parameters."""
with patch("sys.stdout", new=io.StringIO()):
# Configure logging with default settings
handler = configure_logging()
# Should log to stdout by default
root_logger = logging.getLogger()
assert any(isinstance(h, logging.StreamHandler) for h in root_logger.handlers)
# Should set INFO level by default
assert root_logger.level == logging.INFO
# Should not return a file handler
assert handler is None
def test_configure_logging_with_file(self) -> None:
"""Test configure_logging with file output."""
# Create a temporary directory for log files
with tempfile.TemporaryDirectory() as temp_dir:
log_file = Path(temp_dir) / "test.log"
# Configure logging with a file
handler = configure_logging(log_file=log_file, log_to_stdout=False)
try:
# Should return a file handler
assert isinstance(handler, logging.FileHandler)
# Should create the log file
assert log_file.exists()
# Log a message and check it appears in the file
logger = get_logger("test_config")
logger.info("Test log message")
with open(log_file, "r") as f:
log_content = f.read()
assert "Test log message" in log_content
finally:
# Clean up
if handler:
handler.close()
logging.getLogger().handlers = [] # Remove all handlers
def test_configure_logging_with_level(self) -> None:
"""Test configure_logging with custom log level."""
# Test with string level name
configure_logging(log_level="DEBUG", log_to_stdout=False)
assert logging.getLogger().level == logging.DEBUG
# Test with numeric level
configure_logging(log_level=VERBOSE_LEVEL, log_to_stdout=False)
assert logging.getLogger().level == VERBOSE_LEVEL
# Clean up
logging.getLogger().handlers = []
# Only the TestPytestIntegration class tests require pytest to run properly
# We'll put the skip marker on that class instead of the whole module
class TestPytestIntegration:
"""Test the pytest integration of the logging module."""
def test_pytest_configure(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that pytest_configure sets up logging correctly."""
from pytest_plugins.custom_logging.plugin_logging import pytest_configure
# Create logs directory if it doesn't exist
log_dir = Path("logs")
if not log_dir.exists():
log_dir.mkdir()
# Save the original handlers to restore later
original_handlers = logging.getLogger().handlers.copy()
try:
# Remove existing handlers to start clean
for handler in logging.getLogger().handlers[:]:
logging.getLogger().removeHandler(handler)
# Create a mock pytest config
class MockConfig:
def __init__(self) -> None:
self.option = MagicMock()
self.option.eest_log_level = logging.INFO
self.workerinput: dict[str, Any] = {}
def getoption(self, name: str) -> Any:
if name == "eest_log_level":
return logging.INFO
# Set up environment
monkeypatch.setattr("sys.argv", ["pytest"])
monkeypatch.setenv("PYTEST_XDIST_WORKER", "worker1")
# Call pytest_configure
config = MockConfig()
pytest_configure(config) # type: ignore[arg-type]
# Check that logging is configured
assert hasattr(config.option, "eest_log_file_path")
# Check that a file handler was added to the root logger
file_handlers = [
h for h in logging.getLogger().handlers if isinstance(h, logging.FileHandler)
]
assert len(file_handlers) > 0
# Find the log file handler's file
log_file = Path(file_handlers[0].baseFilename)
# Check that the log file was created
assert log_file.exists()
# Verify the file is in the logs directory
assert log_file.parent.resolve() == log_dir.resolve()
# Clean up the test log file
log_file.unlink()
finally:
# Clean up: Remove any handlers we added
for handler in logging.getLogger().handlers[:]:
handler.close()
logging.getLogger().removeHandler(handler)
# Restore original handlers
for handler in original_handlers:
logging.getLogger().addHandler(handler)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/custom_logging/tests/__init__.py | src/pytest_plugins/custom_logging/tests/__init__.py | """Test package for the logging module."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/shared/execute_fill.py | src/pytest_plugins/shared/execute_fill.py | """
Shared pytest fixtures and hooks for EEST generation modes (fill and execute).
"""
from typing import List
import pytest
from ethereum_test_execution import BaseExecute, LabeledExecuteFormat
from ethereum_test_fixtures import BaseFixture, LabeledFixtureFormat
from ethereum_test_specs import BaseTest
from ethereum_test_specs.base import OpMode
from ethereum_test_types import EOA, Alloc, ChainConfig
from ..spec_version_checker.spec_version_checker import EIPSpecTestItem
ALL_FIXTURE_PARAMETERS = {
"gas_benchmark_value",
"genesis_environment",
"env",
}
"""
List of test parameters that have a default fixture value which can be
retrieved and used for the test instance if it was not explicitly specified
when calling from the test function.
All parameter names included in this list must define a fixture in one of the
plugins.
"""
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""
Pytest hook called after command line options have been parsed and before
test collection begins.
Couple of notes:
1. Register the plugin's custom markers and process command-line options.
Custom marker registration:
https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
2. `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
called before the pytest-html plugin's pytest_configure to ensure that
it uses the modified `htmlpath` option.
"""
if config.pluginmanager.has_plugin("pytest_plugins.filler.filler"):
for fixture_format in BaseFixture.formats.values():
config.addinivalue_line(
"markers",
(f"{fixture_format.format_name.lower()}: {fixture_format.description}"),
)
for label, labeled_fixture_format in LabeledFixtureFormat.registered_labels.items():
config.addinivalue_line(
"markers",
(f"{label}: {labeled_fixture_format.description}"),
)
elif config.pluginmanager.has_plugin("pytest_plugins.execute.execute"):
for execute_format in BaseExecute.formats.values():
config.addinivalue_line(
"markers",
(f"{execute_format.format_name.lower()}: {execute_format.description}"),
)
for label, labeled_execute_format in LabeledExecuteFormat.registered_labels.items():
config.addinivalue_line(
"markers",
(f"{label}: {labeled_execute_format.description}"),
)
else:
raise Exception("Neither the filler nor the execute plugin is loaded.")
for spec_type in BaseTest.spec_types.values():
for marker, description in spec_type.supported_markers.items():
config.addinivalue_line(
"markers",
(f"{marker}: {description}"),
)
if not hasattr(config, "op_mode"):
config.op_mode = OpMode.CONSENSUS # type: ignore[attr-defined]
config.addinivalue_line(
"markers",
"yul_test: a test case that compiles Yul code.",
)
config.addinivalue_line(
"markers",
"compile_yul_with(fork): Always compile Yul source using the corresponding evm version.",
)
config.addinivalue_line(
"markers",
"fill: Markers to be added in fill mode only.",
)
config.addinivalue_line(
"markers",
"execute: Markers to be added in execute mode only.",
)
config.addinivalue_line(
"markers",
"benchmark: Tests relevant to benchmarking EVMs.",
)
config.addinivalue_line(
"markers",
"stateful: Tests for stateful benchmarking scenarios.",
)
config.addinivalue_line(
"markers",
"exception_test: Negative tests that include an invalid block or transaction.",
)
config.addinivalue_line(
"markers",
"eip_checklist(item_id, eip=None): Mark a test as implementing a specific checklist item. "
"The first positional parameter is the checklist item ID. "
"The optional 'eip' keyword parameter specifies additional EIPs covered by the test.",
)
config.addinivalue_line(
"markers",
"derived_test: Mark a test as a derived test (E.g. a BlockchainTest that is derived "
"from a StateTest).",
)
config.addinivalue_line(
"markers",
"tagged: Marks a static test as tagged. Tags are used to generate dynamic "
"addresses for static tests at fill time. All tagged tests are compatible with "
"dynamic address generation.",
)
config.addinivalue_line(
"markers",
"untagged: Marks a static test as untagged. Tags are used to generate dynamic "
"addresses for static tests at fill time. Untagged tests are incompatible with "
"dynamic address generation.",
)
config.addinivalue_line(
"markers",
"verify_sync: Marks a test to be run with `consume sync`, verifying blockchain "
"engine tests and having hive clients sync after payload execution.",
)
config.addinivalue_line(
"markers",
"pre_alloc_group: Control shared pre-allocation grouping (use "
'"separate" for isolated group or custom string for named groups)',
)
config.addinivalue_line(
"markers",
"pre_alloc_modify: Marks a test to apply plugin-specific pre_alloc_group modifiers",
)
config.addinivalue_line(
"markers",
"slow: Marks a test as slow (deselect with '-m \"not slow\"')",
)
config.addinivalue_line(
"markers",
"ported_from: Marks a test as ported from ethereum/tests",
)
config.addinivalue_line(
"markers",
"valid_for_bpo_forks: Marks a test as valid for BPO forks",
)
config.addinivalue_line(
"markers",
"mainnet: Specialty tests crafted for running on mainnet and sanity checking.",
)
@pytest.fixture(scope="function")
def test_case_description(request: pytest.FixtureRequest) -> str:
"""
Fixture to extract and combine docstrings from the test class and the test
function.
"""
description_unavailable = (
"No description available - add a docstring to the python test class or function."
)
test_class_doc = ""
test_function_doc = ""
if hasattr(request.node, "cls"):
test_class_doc = f"Test class documentation:\n{request.cls.__doc__}" if request.cls else ""
if hasattr(request.node, "function"):
test_function_doc = f"{request.function.__doc__}" if request.function.__doc__ else ""
if not test_class_doc and not test_function_doc:
return description_unavailable
combined_docstring = f"{test_class_doc}\n\n{test_function_doc}".strip()
return combined_docstring
def pytest_make_parametrize_id(config: pytest.Config, val: str, argname: str) -> str:
"""
Pytest hook called when generating test ids. We use this to generate more
readable test ids for the generated tests.
"""
del config
return f"{argname}_{val}"
SPEC_TYPES_PARAMETERS: List[str] = list(BaseTest.spec_types.keys())
def pytest_runtest_call(item: pytest.Item) -> None:
"""Pytest hook called in the context of test execution."""
if isinstance(item, EIPSpecTestItem):
return
class InvalidFillerError(Exception):
def __init__(self, message: str):
super().__init__(message)
if not isinstance(item, pytest.Function):
return
if "state_test" in item.fixturenames and "blockchain_test" in item.fixturenames:
raise InvalidFillerError(
"A filler should only implement either a state test or a blockchain test; not both."
)
# Check that the test defines either test type as parameter.
if not any(i for i in item.funcargs if i in SPEC_TYPES_PARAMETERS):
pytest.fail(
"Test must define either one of the following parameters to "
+ "properly generate a test: "
+ ", ".join(SPEC_TYPES_PARAMETERS)
)
# Global `sender` fixture that can be overridden by tests.
@pytest.fixture
def sender(pre: Alloc) -> EOA:
"""Fund an EOA from pre-alloc."""
return pre.fund_eoa()
@pytest.fixture(scope="session")
def chain_config() -> ChainConfig:
"""Return chain configuration."""
return ChainConfig()
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
static_filler_group = parser.getgroup("static", "Arguments defining static filler behavior")
static_filler_group.addoption(
"--fill-static-tests",
action="store_true",
dest="fill_static_tests_enabled",
default=None,
help=("Enable reading and filling from static test files."),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/shared/transaction_fixtures.py | src/pytest_plugins/shared/transaction_fixtures.py | """
Pytest plugin providing default transaction fixtures for each transaction type.
Each fixture can be overridden in test files to customize transaction behavior.
"""
import pytest
from ethereum_test_base_types import AccessList
from ethereum_test_forks import Fork
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types import EOA, Alloc, AuthorizationTuple, Transaction, add_kzg_version
@pytest.fixture
def type_0_default_transaction(sender: EOA) -> Transaction:
"""Type 0 (legacy) default transaction available in all forks."""
return Transaction(
ty=0,
sender=sender,
gas_price=10**9,
gas_limit=100_000,
data=b"\x00" * 100,
protected=True,
)
@pytest.fixture
def type_1_default_transaction(sender: EOA) -> Transaction:
"""Type 1 (access list) default transaction introduced in Berlin fork."""
return Transaction(
ty=1,
sender=sender,
gas_price=10**9,
gas_limit=100_000,
data=b"\x00" * 100,
access_list=[
AccessList(address=0x1234, storage_keys=[0, 1, 2]),
AccessList(address=0x5678, storage_keys=[3, 4, 5]),
AccessList(address=0x9ABC, storage_keys=[]),
],
)
@pytest.fixture
def type_2_default_transaction(sender: EOA) -> Transaction:
"""Type 2 (dynamic fee) default transaction introduced in London fork."""
return Transaction(
ty=2,
sender=sender,
max_fee_per_gas=10**10,
max_priority_fee_per_gas=10**9,
gas_limit=100_000,
data=b"\x00" * 200,
access_list=[
AccessList(address=0x2468, storage_keys=[10, 20, 30]),
AccessList(address=0xACE0, storage_keys=[40, 50]),
],
)
@pytest.fixture
def type_3_default_transaction(sender: EOA) -> Transaction:
"""Type 3 (blob) default transaction introduced in Cancun fork."""
return Transaction(
ty=3,
sender=sender,
max_fee_per_gas=10**10,
max_priority_fee_per_gas=10**9,
max_fee_per_blob_gas=10**9,
gas_limit=100_000,
data=b"\x00" * 150,
access_list=[
AccessList(address=0x3690, storage_keys=[100, 200]),
AccessList(address=0xBEEF, storage_keys=[300]),
],
blob_versioned_hashes=add_kzg_version(
[
0x1111111111111111111111111111111111111111111111111111111111111111,
0x2222222222222222222222222222222222222222222222222222222222222222,
],
0x01,
),
)
@pytest.fixture
def type_4_default_transaction(sender: EOA, pre: Alloc) -> Transaction:
"""Type 4 (set code) default transaction introduced in Prague fork."""
# Create authorized accounts with funds
auth_signer1 = pre.fund_eoa(amount=10**18)
auth_signer2 = pre.fund_eoa(amount=10**18)
# Create target addresses that will be authorized
target1 = pre.deploy_contract(Op.SSTORE(0, 1))
target2 = pre.deploy_contract(Op.SSTORE(0, 1))
return Transaction(
ty=4,
sender=sender,
max_fee_per_gas=10**10,
max_priority_fee_per_gas=10**9,
gas_limit=150_000,
data=b"\x00" * 200,
access_list=[
AccessList(address=0x4567, storage_keys=[1000, 2000, 3000]),
AccessList(address=0xCDEF, storage_keys=[4000, 5000]),
],
authorization_list=[
AuthorizationTuple(
chain_id=1,
address=target1,
nonce=0,
signer=auth_signer1,
),
AuthorizationTuple(
chain_id=1,
address=target2,
nonce=0,
signer=auth_signer2,
),
],
)
@pytest.fixture
def typed_transaction(request: pytest.FixtureRequest, fork: Fork) -> Transaction:
"""
Fixture that provides a Transaction object based on the parametrized tx
type.
This fixture works with the @pytest.mark.with_all_typed_transactions
marker, which parametrizes the test with all transaction types supported by
the fork.
The actual transaction type value comes from the marker's parametrization.
"""
# The marker parametrizes 'typed_transaction' with tx type integers
# Get the parametrized tx_type value
if hasattr(request, "param"):
# When parametrized by the marker, request.param contains the tx type
tx_type = request.param
else:
raise ValueError(
"`typed_transaction` fixture must be used with "
"`@pytest.mark.with_all_typed_transactions` marker"
)
fixture_name = f"type_{tx_type}_default_transaction"
# Check if fixture exists - try to get it first
try:
# This will find fixtures defined in the test file or plugin
return request.getfixturevalue(fixture_name)
except pytest.FixtureLookupError as e:
# Get all supported tx types for better error message
supported_types = fork.tx_types()
raise NotImplementedError(
f"Fork {fork} supports transaction type {tx_type} but "
f"fixture '{fixture_name}' is not implemented!\n"
f"Fork {fork} supports transaction types: {supported_types}\n"
f"Please add the missing fixture to "
f"src/pytest_plugins/shared/transaction_fixtures.py"
) from e
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/shared/helpers.py | src/pytest_plugins/shared/helpers.py | """Helpers for pytest plugins."""
from typing import Any, Dict, Tuple, Type
import pytest
from _pytest.mark.structures import ParameterSet
from ethereum_test_execution import ExecuteFormat, LabeledExecuteFormat
from ethereum_test_fixtures import FixtureFormat, LabeledFixtureFormat
from ethereum_test_tools import BaseTest
def is_help_or_collectonly_mode(config: pytest.Config) -> bool:
"""Check if pytest is running in a help or collectonly mode."""
return (
config.getoption("markers", default=False)
or config.getoption("collectonly", default=False)
or config.getoption("show_ported_from", default=False)
or config.getoption("links_as_filled", default=False)
or config.getoption("help", default=False)
or config.pluginmanager.has_plugin("pytest_plugins.filler.eip_checklist")
or config.pluginmanager.has_plugin("pytest_plugins.filler.gen_test_doc.gen_test_doc")
)
def labeled_format_parameter_set(
format_with_or_without_label: LabeledExecuteFormat
| LabeledFixtureFormat
| ExecuteFormat
| FixtureFormat,
) -> ParameterSet:
"""
Return a parameter set from a fixture/execute format and parses a label if
there's any.
The label will be used in the test id and also will be added as a marker to
the generated test case when filling/executing the test.
"""
if isinstance(format_with_or_without_label, LabeledExecuteFormat) or isinstance(
format_with_or_without_label, LabeledFixtureFormat
):
return pytest.param(
format_with_or_without_label.format,
id=format_with_or_without_label.label,
marks=[
getattr(
pytest.mark,
format_with_or_without_label.format_name.lower(),
),
getattr(
pytest.mark,
format_with_or_without_label.label.lower(),
),
],
)
else:
return pytest.param(
format_with_or_without_label,
id=format_with_or_without_label.format_name.lower(),
marks=[
getattr(
pytest.mark,
format_with_or_without_label.format_name.lower(),
)
],
)
def get_spec_format_for_item(
params: Dict[str, Any],
) -> Tuple[Type[BaseTest], Any]:
"""Return the spec type and execute format for the given test item."""
for spec_type in BaseTest.spec_types.values():
if spec_type.pytest_parameter_name() in params:
return spec_type, params[spec_type.pytest_parameter_name()]
raise ValueError("No spec type format found in the test item.")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/shared/benchmarking.py | src/pytest_plugins/shared/benchmarking.py | """The module contains the pytest hooks for the gas benchmark values."""
import pytest
from ethereum_test_tools import Environment
from ethereum_test_types import EnvironmentDefaults
from .execute_fill import OpMode
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command line options for gas benchmark values."""
evm_group = parser.getgroup("evm", "Arguments defining evm executable behavior")
evm_group.addoption(
"--gas-benchmark-values",
action="store",
dest="gas_benchmark_value",
type=str,
default=None,
help="Specify gas benchmark values for tests as a comma-separated list.",
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""Configure the fill and execute mode to benchmarking."""
if config.getoption("gas_benchmark_value"):
config.op_mode = OpMode.BENCHMARKING # type: ignore[attr-defined]
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Generate tests for the gas benchmark values."""
if "gas_benchmark_value" in metafunc.fixturenames:
gas_benchmark_values = metafunc.config.getoption("gas_benchmark_value")
if gas_benchmark_values:
gas_values = [int(x.strip()) for x in gas_benchmark_values.split(",")]
gas_parameters = [
pytest.param(gas_value * 1_000_000, id=f"benchmark-gas-value_{gas_value}M")
for gas_value in gas_values
]
metafunc.parametrize("gas_benchmark_value", gas_parameters, scope="function")
@pytest.fixture(scope="function")
def gas_benchmark_value(request: pytest.FixtureRequest) -> int:
"""Return a single gas benchmark value for the current test."""
if hasattr(request, "param"):
return request.param
return EnvironmentDefaults.gas_limit
BENCHMARKING_MAX_GAS = 1_000_000_000_000
@pytest.fixture
def genesis_environment(request: pytest.FixtureRequest) -> Environment: # noqa: D103
"""
Return an Environment instance with appropriate gas limit based on test
type.
"""
if request.node.get_closest_marker("benchmark") is not None:
return Environment(gas_limit=BENCHMARKING_MAX_GAS)
return Environment()
@pytest.fixture
def env(request: pytest.FixtureRequest) -> Environment: # noqa: D103
"""
Return an Environment instance with appropriate gas limit based on test
type.
"""
if request.node.get_closest_marker("benchmark") is not None:
return Environment(gas_limit=BENCHMARKING_MAX_GAS)
return Environment()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/solc/solc.py | src/pytest_plugins/solc/solc.py | """Pytest plugin for configuring and verifying the solc compiler."""
import subprocess
from shutil import which
import pytest
from pytest_metadata.plugin import metadata_key
from semver import Version
SOLC_EXPECTED_MIN_VERSION: Version = Version.parse("0.8.24")
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
solc_group = parser.getgroup("solc", "Arguments defining the solc executable")
solc_group.addoption(
"--solc-bin",
action="store",
dest="solc_bin",
type=str,
default=None,
help=(
"Path to a solc executable (for Yul source compilation). Default: solc binary in PATH."
),
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""Ensure that solc is available and get its version."""
solc_bin = config.getoption("solc_bin")
# Use provided solc binary or find it in PATH
if solc_bin:
if not which(solc_bin):
pytest.exit(
f"Specified solc binary not found: {solc_bin}",
returncode=pytest.ExitCode.USAGE_ERROR,
)
else:
solc_bin = which("solc")
if not solc_bin:
pytest.exit(
"solc binary not found in PATH. Please install solc and ensure it's in your PATH.",
returncode=pytest.ExitCode.USAGE_ERROR,
)
# Get solc version using subprocess
try:
result = subprocess.run(
[solc_bin, "--version"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
check=True,
)
except subprocess.CalledProcessError as e:
pytest.exit(
f"Failed to get solc version. Command output: {e.stdout}",
returncode=pytest.ExitCode.USAGE_ERROR,
)
except subprocess.TimeoutExpired:
pytest.exit("Timeout while getting solc version.", returncode=pytest.ExitCode.USAGE_ERROR)
except Exception as e:
pytest.exit(
f"Unexpected error while getting solc version: {e}",
returncode=pytest.ExitCode.USAGE_ERROR,
)
# Parse version from output
version_output = result.stdout
version_line = None
# Look for version in output (format: "Version: X.Y.Z+commit.hash")
for line in version_output.split("\n"):
if line.startswith("Version:"):
version_line = line
break
if not version_line:
pytest.exit(
f"Could not parse solc version from output:\n{version_output}",
returncode=pytest.ExitCode.USAGE_ERROR,
)
# Extract version number
try:
# --version format is typically something like
# "0.8.24+commit.e11b9ed9.Linux.g++"
version_str = version_line.split()[1].split("+")[0]
solc_version_semver = Version.parse(version_str)
except (IndexError, ValueError) as e:
pytest.exit(
f"Failed to parse solc version from: {version_line}\nError: {e}",
returncode=pytest.ExitCode.USAGE_ERROR,
)
# Store version in metadata
if "Tools" not in config.stash[metadata_key]:
config.stash[metadata_key]["Tools"] = {
"solc": str(solc_version_semver),
}
else:
config.stash[metadata_key]["Tools"]["solc"] = str(solc_version_semver)
# Check minimum version requirement
solc_version_semver = Version.parse(str(solc_version_semver).split()[0].split("-")[0])
if solc_version_semver < SOLC_EXPECTED_MIN_VERSION:
pytest.exit(
f"Unsupported solc version: {solc_version_semver}. Minimum required version is "
f"{SOLC_EXPECTED_MIN_VERSION}",
returncode=pytest.ExitCode.USAGE_ERROR,
)
# Store for later use
config.solc_version = solc_version_semver # type: ignore
config.option.solc_bin = solc_bin # save for fixture
if config.getoption("verbose") > 0:
print(f"Using solc version {solc_version_semver} from {solc_bin}")
@pytest.fixture(autouse=True, scope="session")
def solc_bin(request: pytest.FixtureRequest) -> str | None:
"""Return configured solc binary path."""
return request.config.getoption("solc_bin") or which("solc")
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: pytest.Config, start_path: object) -> list[str] | None:
"""Add lines to pytest's console output header."""
del start_path
if config.option.collectonly:
return None
solc_version = config.stash[metadata_key]["Tools"]["solc"]
solc_path = config.option.solc_bin or which("solc")
return [f"solc: {solc_version}", f"solc path: {solc_path}"]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/solc/__init__.py | src/pytest_plugins/solc/__init__.py | """A pytest plugin that provides solc functionality to fill/execute tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/recover.py | src/pytest_plugins/execute/recover.py | """Pytest plugin to recover funds from a failed remote execution."""
import pytest
from ethereum_test_base_types import Address, HexNumber
from ethereum_test_types import EOA
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
recover_group = parser.getgroup("execute", "Arguments defining fund recovery behavior.")
recover_group.addoption(
"--start-eoa-index",
action="store",
dest="start_eoa_index",
type=HexNumber,
required=True,
default=None,
help=("Starting private key index to use for EOA generation."),
)
recover_group.addoption(
"--destination",
action="store",
dest="destination",
type=Address,
required=True,
default=None,
help=("Address to send the recovered funds to."),
)
recover_group.addoption(
"--max-index",
action="store",
dest="max_index",
type=int,
default=100,
help=("Maximum private key index to use for EOA generation."),
)
@pytest.fixture(scope="session")
def destination(request: pytest.FixtureRequest) -> Address:
"""Get the destination address."""
return request.config.option.destination
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Pytest hook used to dynamically generate test cases."""
max_index = metafunc.config.option.max_index
start_eoa_index = metafunc.config.option.start_eoa_index
print(f"Generating {max_index} test cases starting from index {start_eoa_index}")
indexes_keys = [(index, EOA(key=start_eoa_index + index)) for index in range(max_index)]
metafunc.parametrize(
["index", "eoa"],
indexes_keys,
ids=[f"{index}-{eoa}" for index, eoa in indexes_keys],
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/pre_alloc.py | src/pytest_plugins/execute/pre_alloc.py | """Pre-allocation fixtures using for test filling."""
from itertools import count
from pathlib import Path
from random import randint
from typing import Any, Dict, Generator, Iterator, List, Literal, Self, Tuple
import pytest
import yaml
from pydantic import PrivateAttr
from ethereum_test_base_types import (
Bytes,
EthereumTestRootModel,
Number,
StorageRootType,
ZeroPaddedHexNumber,
)
from ethereum_test_base_types.conversions import (
BytesConvertible,
FixedSizeBytesConvertible,
NumberConvertible,
)
from ethereum_test_forks import Fork
from ethereum_test_rpc import EthRPC
from ethereum_test_rpc.rpc_types import TransactionByHashResponse
from ethereum_test_tools import (
EOA,
Account,
Address,
AuthorizationTuple,
Initcode,
Storage,
Transaction,
)
from ethereum_test_tools import Alloc as BaseAlloc
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types import ChainConfig, TransactionTestMetadata
from ethereum_test_types.eof.v1 import Container
from ethereum_test_vm import Bytecode, EVMCodeType, Opcodes
MAX_BYTECODE_SIZE = 24576
MAX_INITCODE_SIZE = MAX_BYTECODE_SIZE * 2
class AddressStubs(EthereumTestRootModel[Dict[str, Address]]):
"""
Address stubs class.
The key represents the label that is used in the test to tag the contract,
and the value is the address where the contract is already located at in
the current network.
"""
root: Dict[str, Address]
def __contains__(self, item: str) -> bool:
"""Check if an item is in the address stubs."""
return item in self.root
def __getitem__(self, item: str) -> Address:
"""Get an item from the address stubs."""
return self.root[item]
@classmethod
def model_validate_json_or_file(cls, json_data_or_path: str) -> Self:
"""
Try to load from file if the value resembles a path that ends with
.json/.yml and the file exists.
"""
lower_json_data_or_path = json_data_or_path.lower()
if (
lower_json_data_or_path.endswith(".json")
or lower_json_data_or_path.endswith(".yml")
or lower_json_data_or_path.endswith(".yaml")
):
path = Path(json_data_or_path)
if path.is_file():
path_suffix = path.suffix.lower()
if path_suffix == ".json":
return cls.model_validate_json(path.read_text())
elif path_suffix in [".yml", ".yaml"]:
loaded_yaml = yaml.safe_load(path.read_text())
if loaded_yaml is None:
return cls(root={})
return cls.model_validate(loaded_yaml)
if json_data_or_path.strip() == "":
return cls(root={})
return cls.model_validate_json(json_data_or_path)
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
pre_alloc_group = parser.getgroup(
"pre_alloc", "Arguments defining pre-allocation behavior during test execution"
)
pre_alloc_group.addoption(
"--eoa-start",
action="store",
dest="eoa_iterator_start",
default=randint(0, 2**256),
type=int,
help="The start private key from which tests will deploy EOAs.",
)
pre_alloc_group.addoption(
"--evm-code-type",
action="store",
dest="evm_code_type",
default=None,
type=EVMCodeType,
choices=list(EVMCodeType),
help="Type of EVM code to deploy in each test by default.",
)
pre_alloc_group.addoption(
"--eoa-fund-amount-default",
action="store",
dest="eoa_fund_amount_default",
default=10**18,
type=int,
help="The default amount of wei to fund each EOA in each test with.",
)
pre_alloc_group.addoption(
"--skip-cleanup",
action="store_true",
dest="skip_cleanup",
default=False,
help="Skip cleanup phase after each test.",
)
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: pytest.Config) -> list[str]:
"""Pytest hook called to obtain the report header."""
bold = "\033[1m"
reset = "\033[39;49m"
eoa_start = config.getoption("eoa_iterator_start")
header = [
(bold + f"Start seed for EOA: {hex(eoa_start)} " + reset),
]
return header
@pytest.fixture(scope="session")
def address_stubs(
request: pytest.FixtureRequest,
) -> AddressStubs | None:
"""
Return an address stubs object.
If the address stubs are not supported by the subcommand, return None.
"""
return request.config.getoption("address_stubs", None)
@pytest.fixture(scope="session")
def skip_cleanup(request: pytest.FixtureRequest) -> bool:
"""Return whether to skip cleanup phase after each test."""
return request.config.getoption("skip_cleanup")
@pytest.fixture(scope="session")
def eoa_iterator(request: pytest.FixtureRequest) -> Iterator[EOA]:
"""Return an iterator that generates EOAs."""
eoa_start = request.config.getoption("eoa_iterator_start")
print(f"Starting EOA index: {hex(eoa_start)}")
return iter(EOA(key=i, nonce=0) for i in count(start=eoa_start))
class Alloc(BaseAlloc):
"""A custom class that inherits from the original Alloc class."""
_fork: Fork = PrivateAttr()
_sender: EOA = PrivateAttr()
_eth_rpc: EthRPC = PrivateAttr()
_txs: List[Transaction] = PrivateAttr(default_factory=list)
_deployed_contracts: List[Tuple[Address, Bytes]] = PrivateAttr(default_factory=list)
_funded_eoa: List[EOA] = PrivateAttr(default_factory=list)
_evm_code_type: EVMCodeType | None = PrivateAttr(None)
_chain_id: int = PrivateAttr()
_node_id: str = PrivateAttr("")
_address_stubs: AddressStubs = PrivateAttr()
def __init__(
self,
*args: Any,
fork: Fork,
sender: EOA,
eth_rpc: EthRPC,
eoa_iterator: Iterator[EOA],
chain_id: int,
eoa_fund_amount_default: int,
evm_code_type: EVMCodeType | None = None,
node_id: str = "",
address_stubs: AddressStubs | None = None,
**kwargs: Any,
) -> None:
"""Initialize the pre-alloc with the given parameters."""
super().__init__(*args, **kwargs)
self._fork = fork
self._sender = sender
self._eth_rpc = eth_rpc
self._eoa_iterator = eoa_iterator
self._evm_code_type = evm_code_type
self._chain_id = chain_id
self._eoa_fund_amount_default = eoa_fund_amount_default
self._node_id = node_id
self._address_stubs = address_stubs or AddressStubs(root={})
# always refresh _sender nonce from RPC ("pending") before building tx
def _refresh_sender_nonce(self) -> None:
"""
Synchronize self._sender.nonce with the node's view.
Prefer 'pending' to account for in-flight transactions.
"""
try:
rpc_nonce = self._eth_rpc.get_transaction_count(self._sender, block_number="pending")
except TypeError:
# If EthRPC.get_transaction_count has no 'block' kwarg
rpc_nonce = self._eth_rpc.get_transaction_count(self._sender)
self._sender.nonce = Number(rpc_nonce)
def __setitem__(
self,
address: Address | FixedSizeBytesConvertible,
account: Account | None,
) -> None:
"""Set account associated with an address."""
raise ValueError("Tests are not allowed to set pre-alloc items in execute mode")
def code_pre_processor(
self,
code: Bytecode | Container,
*,
evm_code_type: EVMCodeType | None,
) -> Bytecode | Container:
"""Pre-processes the code before setting it."""
if evm_code_type is None:
evm_code_type = self._evm_code_type
if evm_code_type == EVMCodeType.EOF_V1:
if not isinstance(code, Container):
if isinstance(code, Bytecode) and not code.terminating:
return Container.Code(code + Opcodes.STOP)
return Container.Code(code)
return code
def deploy_contract(
self,
code: BytesConvertible,
*,
storage: Storage | StorageRootType | None = None,
balance: NumberConvertible = 0,
nonce: NumberConvertible = 1,
address: Address | None = None,
evm_code_type: EVMCodeType | None = None,
label: str | None = None,
stub: str | None = None,
) -> Address:
"""Deploy a contract to the allocation."""
if storage is None:
storage = {}
assert address is None, "address parameter is not supported"
if not isinstance(storage, Storage):
storage = Storage(storage) # type: ignore
if stub is not None and self._address_stubs is not None:
if stub not in self._address_stubs:
raise ValueError(f"Stub name {stub} not found in address stubs")
contract_address = self._address_stubs[stub]
code = self._eth_rpc.get_code(contract_address)
if code == b"":
raise ValueError(f"Stub {stub} at {contract_address} has no code")
balance = self._eth_rpc.get_balance(contract_address)
nonce = self._eth_rpc.get_transaction_count(contract_address)
super().__setitem__(
contract_address,
Account(
nonce=nonce,
balance=balance,
code=code,
storage={},
),
)
return contract_address
initcode_prefix = Bytecode()
deploy_gas_limit = 21_000 + 32_000
if len(storage.root) > 0:
initcode_prefix += sum(Op.SSTORE(key, value) for key, value in storage.root.items())
deploy_gas_limit += len(storage.root) * 22_600
assert isinstance(code, Bytecode) or isinstance(code, Container), (
f"incompatible code type: {type(code)}"
)
code = self.code_pre_processor(code, evm_code_type=evm_code_type)
assert len(code) <= MAX_BYTECODE_SIZE, f"code too large: {len(code)} > {MAX_BYTECODE_SIZE}"
deploy_gas_limit += len(bytes(code)) * 200
initcode: Bytecode | Container
if evm_code_type == EVMCodeType.EOF_V1:
assert isinstance(code, Container)
initcode = Container.Init(deploy_container=code, initcode_prefix=initcode_prefix)
else:
initcode = Initcode(deploy_code=code, initcode_prefix=initcode_prefix)
memory_expansion_gas_calculator = self._fork.memory_expansion_gas_calculator()
deploy_gas_limit += memory_expansion_gas_calculator(new_bytes=len(bytes(initcode)))
assert len(initcode) <= MAX_INITCODE_SIZE, (
f"initcode too large {len(initcode)} > {MAX_INITCODE_SIZE}"
)
calldata_gas_calculator = self._fork.calldata_gas_calculator(block_number=0, timestamp=0)
deploy_gas_limit += calldata_gas_calculator(data=initcode)
# Limit the gas limit
deploy_gas_limit = min(deploy_gas_limit * 2, 30_000_000)
print(f"Deploying contract with gas limit: {deploy_gas_limit}")
self._refresh_sender_nonce()
deploy_tx = Transaction(
sender=self._sender,
to=None,
data=initcode,
value=balance,
gas_limit=deploy_gas_limit,
).with_signature_and_sender()
deploy_tx.metadata = TransactionTestMetadata(
test_id=self._node_id,
phase="setup",
action="deploy_contract",
target=label,
tx_index=len(self._txs),
)
self._eth_rpc.send_transaction(deploy_tx)
self._txs.append(deploy_tx)
contract_address = deploy_tx.created_contract
self._deployed_contracts.append((contract_address, Bytes(code)))
assert Number(nonce) >= 1, "impossible to deploy contract with nonce lower than one"
super().__setitem__(
contract_address,
Account(
nonce=nonce,
balance=balance,
code=code,
storage=storage,
),
)
contract_address.label = label
return contract_address
def fund_eoa(
self,
amount: NumberConvertible | None = None,
label: str | None = None,
storage: Storage | None = None,
delegation: Address | Literal["Self"] | None = None,
nonce: NumberConvertible | None = None,
) -> EOA:
"""
Add a previously unused EOA to the pre-alloc with the balance specified
by `amount`.
"""
assert nonce is None, "nonce parameter is not supported for execute"
eoa = next(self._eoa_iterator)
eoa.label = label
# Send a transaction to fund the EOA
if amount is None:
amount = self._eoa_fund_amount_default
fund_tx: Transaction | None = None
if delegation is not None or storage is not None:
if storage is not None:
sstore_address = self.deploy_contract(
code=(
sum(Op.SSTORE(key, value) for key, value in storage.root.items()) + Op.STOP
)
)
self._refresh_sender_nonce()
set_storage_tx = Transaction(
sender=self._sender,
to=eoa,
authorization_list=[
AuthorizationTuple(
chain_id=self._chain_id,
address=sstore_address,
nonce=eoa.nonce,
signer=eoa,
),
],
gas_limit=100_000,
).with_signature_and_sender()
eoa.nonce = Number(eoa.nonce + 1)
set_storage_tx.metadata = TransactionTestMetadata(
test_id=self._node_id,
phase="setup",
action="eoa_storage_set",
target=label,
tx_index=len(self._txs),
)
self._eth_rpc.send_transaction(set_storage_tx)
self._txs.append(set_storage_tx)
self._refresh_sender_nonce()
if delegation is not None:
if not isinstance(delegation, Address) and delegation == "Self":
delegation = eoa
# TODO: This tx has side-effects on the EOA state because of
# the delegation
fund_tx = Transaction(
sender=self._sender,
to=eoa,
value=amount,
authorization_list=[
AuthorizationTuple(
chain_id=self._chain_id,
address=delegation,
nonce=eoa.nonce,
signer=eoa,
),
],
gas_limit=100_000,
).with_signature_and_sender()
eoa.nonce = Number(eoa.nonce + 1)
else:
fund_tx = Transaction(
sender=self._sender,
to=eoa,
value=amount,
authorization_list=[
AuthorizationTuple(
chain_id=self._chain_id,
# Reset delegation to an address without code
address=0,
nonce=eoa.nonce,
signer=eoa,
),
],
gas_limit=100_000,
).with_signature_and_sender()
eoa.nonce = Number(eoa.nonce + 1)
else:
if Number(amount) > 0:
self._refresh_sender_nonce()
fund_tx = Transaction(
sender=self._sender,
to=eoa,
value=amount,
).with_signature_and_sender()
if fund_tx is not None:
fund_tx.metadata = TransactionTestMetadata(
test_id=self._node_id,
phase="setup",
action="fund_eoa",
target=label,
tx_index=len(self._txs),
)
self._eth_rpc.send_transaction(fund_tx)
self._txs.append(fund_tx)
super().__setitem__(
eoa,
Account(
nonce=eoa.nonce,
balance=amount,
),
)
self._funded_eoa.append(eoa)
return eoa
def fund_address(self, address: Address, amount: NumberConvertible) -> None:
"""
Fund an address with a given amount.
If the address is already present in the pre-alloc the amount will be
added to its existing balance.
"""
self._refresh_sender_nonce()
fund_tx = Transaction(
sender=self._sender,
to=address,
value=amount,
).with_signature_and_sender()
fund_tx.metadata = TransactionTestMetadata(
test_id=self._node_id,
phase="setup",
action="fund_address",
target=address.label,
tx_index=len(self._txs),
)
self._eth_rpc.send_transaction(fund_tx)
self._txs.append(fund_tx)
if address in self:
account = self[address]
if account is not None:
current_balance = account.balance or 0
account.balance = ZeroPaddedHexNumber(current_balance + Number(amount))
return
super().__setitem__(address, Account(balance=amount))
def empty_account(self) -> Address:
"""
Add a previously unused account guaranteed to be empty to the
pre-alloc.
This ensures the account has:
- Zero balance
- Zero nonce
- No code
- No storage
This is different from precompiles or system contracts. The function
does not send any transactions, ensuring that the account remains
"empty."
Returns:
Address: The address of the created empty account.
"""
eoa = next(self._eoa_iterator)
super().__setitem__(
eoa,
Account(
nonce=0,
balance=0,
),
)
return Address(eoa)
def wait_for_transactions(self) -> List[TransactionByHashResponse]:
"""Wait for all transactions to be included in blocks."""
return self._eth_rpc.wait_for_transactions(self._txs)
@pytest.fixture(autouse=True)
def evm_code_type(request: pytest.FixtureRequest) -> EVMCodeType:
"""Return default EVM code type for all tests (LEGACY)."""
parameter_evm_code_type = request.config.getoption("evm_code_type")
if parameter_evm_code_type is not None:
assert type(parameter_evm_code_type) is EVMCodeType, "Invalid EVM code type"
return parameter_evm_code_type
return EVMCodeType.LEGACY
@pytest.fixture(scope="session")
def eoa_fund_amount_default(request: pytest.FixtureRequest) -> int:
"""Get the gas price for the funding transactions."""
return request.config.option.eoa_fund_amount_default
@pytest.fixture(autouse=True, scope="function")
def pre(
fork: Fork,
sender_key: EOA,
eoa_iterator: Iterator[EOA],
eth_rpc: EthRPC,
evm_code_type: EVMCodeType,
chain_config: ChainConfig,
eoa_fund_amount_default: int,
default_gas_price: int,
address_stubs: AddressStubs | None,
skip_cleanup: bool,
request: pytest.FixtureRequest,
) -> Generator[Alloc, None, None]:
"""Return default pre allocation for all tests (Empty alloc)."""
# FIXME: Static tests dont have a fork so we need to get it from the node.
actual_fork = fork
if actual_fork is None:
assert hasattr(request.node, "fork")
actual_fork = request.node.fork
# Record the starting balance of the sender
sender_test_starting_balance = eth_rpc.get_balance(sender_key)
# Prepare the pre-alloc
pre = Alloc(
fork=fork,
sender=sender_key,
eth_rpc=eth_rpc,
eoa_iterator=eoa_iterator,
evm_code_type=evm_code_type,
chain_id=chain_config.chain_id,
eoa_fund_amount_default=eoa_fund_amount_default,
node_id=request.node.nodeid,
address_stubs=address_stubs,
)
# Yield the pre-alloc for usage during the test
yield pre
if not skip_cleanup:
# Refund all EOAs (regardless of whether the test passed or failed)
refund_txs = []
for idx, eoa in enumerate(pre._funded_eoa):
remaining_balance = eth_rpc.get_balance(eoa)
eoa.nonce = Number(eth_rpc.get_transaction_count(eoa))
refund_gas_limit = 21_000
tx_cost = refund_gas_limit * default_gas_price
if remaining_balance < tx_cost:
continue
refund_tx = Transaction(
sender=eoa,
to=sender_key,
gas_limit=21_000,
gas_price=default_gas_price,
value=remaining_balance - tx_cost,
).with_signature_and_sender()
refund_tx.metadata = TransactionTestMetadata(
test_id=request.node.nodeid,
phase="cleanup",
action="refund_from_eoa",
target=eoa.label,
tx_index=idx,
)
refund_txs.append(refund_tx)
eth_rpc.send_wait_transactions(refund_txs)
# Record the ending balance of the sender
sender_test_ending_balance = eth_rpc.get_balance(sender_key)
used_balance = sender_test_starting_balance - sender_test_ending_balance
print(f"Used balance={used_balance / 10**18:.18f}")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/sender.py | src/pytest_plugins/execute/sender.py | """Sender mutex class that allows sending transactions one at a time."""
from pathlib import Path
from typing import Generator, Iterator
import pytest
from filelock import FileLock
from pytest_metadata.plugin import metadata_key
from ethereum_test_base_types import Number, Wei
from ethereum_test_rpc import EthRPC
from ethereum_test_tools import EOA, Transaction
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
sender_group = parser.getgroup(
"sender",
"Arguments for the sender key fixtures",
)
sender_group.addoption(
"--seed-account-sweep-amount",
action="store",
dest="seed_account_sweep_amount",
type=Wei,
default=None,
help="Amount of wei to sweep from the seed account to the sender account. "
"Default=None (Entire balance)",
)
sender_group.addoption(
"--sender-funding-txs-gas-price",
action="store",
dest="sender_funding_transactions_gas_price",
type=Wei,
default=None,
help=("Gas price set for the funding transactions of each worker's sender key."),
)
sender_group.addoption(
"--sender-fund-refund-gas-limit",
action="store",
dest="sender_fund_refund_gas_limit",
type=Wei,
default=21_000,
help=("Gas limit set for the funding transactions of each worker's sender key."),
)
@pytest.fixture(scope="session")
def sender_funding_transactions_gas_price(
request: pytest.FixtureRequest, default_gas_price: int
) -> int:
"""Get the gas price for the funding transactions."""
gas_price: int | None = request.config.option.sender_funding_transactions_gas_price
if gas_price is None:
gas_price = default_gas_price
assert gas_price > 0, "Gas price must be greater than 0"
return gas_price
@pytest.fixture(scope="session")
def sender_fund_refund_gas_limit(request: pytest.FixtureRequest) -> int:
"""Get the gas limit of the funding transactions."""
return request.config.option.sender_fund_refund_gas_limit
@pytest.fixture(scope="session")
def seed_account_sweep_amount(request: pytest.FixtureRequest) -> int | None:
"""Get the seed account sweep amount."""
return request.config.option.seed_account_sweep_amount
@pytest.fixture(scope="session")
def sender_key_initial_balance(
seed_sender: EOA,
eth_rpc: EthRPC,
session_temp_folder: Path,
worker_count: int,
sender_funding_transactions_gas_price: int,
sender_fund_refund_gas_limit: int,
seed_account_sweep_amount: int | None,
) -> int:
"""
Calculate the initial balance of each sender key.
The way to do this is to fetch the seed sender balance and divide it by the
number of workers. This way we can ensure that each sender key has the same
initial balance.
We also only do this once per session, because if we try to fetch the
balance again, it could be that another worker has already sent a
transaction and the balance is different.
It's not really possible to calculate the transaction costs of each test
that each worker is going to run, so we can't really calculate the initial
balance of each sender key based on that.
"""
base_name = "sender_key_initial_balance"
base_file = session_temp_folder / base_name
base_lock_file = session_temp_folder / f"{base_name}.lock"
with FileLock(base_lock_file):
if base_file.exists():
with base_file.open("r") as f:
sender_key_initial_balance = int(f.read())
else:
if seed_account_sweep_amount is None:
seed_account_sweep_amount = eth_rpc.get_balance(seed_sender)
seed_sender_balance_per_worker = seed_account_sweep_amount // worker_count
assert seed_sender_balance_per_worker > 100, "Seed sender balance too low"
# Subtract the cost of the transaction that is going to be sent to
# the seed sender
sender_key_initial_balance = seed_sender_balance_per_worker - (
sender_fund_refund_gas_limit * sender_funding_transactions_gas_price
)
with base_file.open("w") as f:
f.write(str(sender_key_initial_balance))
return sender_key_initial_balance
@pytest.fixture(scope="session")
def sender_key(
request: pytest.FixtureRequest,
seed_sender: EOA,
sender_key_initial_balance: int,
eoa_iterator: Iterator[EOA],
eth_rpc: EthRPC,
session_temp_folder: Path,
sender_funding_transactions_gas_price: int,
sender_fund_refund_gas_limit: int,
) -> Generator[EOA, None, None]:
"""
Get the sender keys for all tests.
The seed sender is going to be shared among different processes, so we need
to lock it before we produce each funding transaction.
"""
# For the seed sender we do need to keep track of the nonce because it is
# shared among different processes, and there might not be a new block
# produced between the transactions.
seed_sender_nonce_file_name = "seed_sender_nonce"
seed_sender_lock_file_name = f"{seed_sender_nonce_file_name}.lock"
seed_sender_nonce_file = session_temp_folder / seed_sender_nonce_file_name
seed_sender_lock_file = session_temp_folder / seed_sender_lock_file_name
sender = next(eoa_iterator)
# prepare funding transaction
with FileLock(seed_sender_lock_file):
if seed_sender_nonce_file.exists():
with seed_sender_nonce_file.open("r") as f:
seed_sender.nonce = Number(f.read())
fund_tx = Transaction(
sender=seed_sender,
to=sender,
gas_limit=sender_fund_refund_gas_limit,
gas_price=sender_funding_transactions_gas_price,
value=sender_key_initial_balance,
).with_signature_and_sender()
eth_rpc.send_transaction(fund_tx)
with seed_sender_nonce_file.open("w") as f:
f.write(str(seed_sender.nonce))
eth_rpc.wait_for_transaction(fund_tx)
yield sender
# refund seed sender
remaining_balance = eth_rpc.get_balance(sender)
sender.nonce = Number(eth_rpc.get_transaction_count(sender))
used_balance = sender_key_initial_balance - remaining_balance
request.config.stash[metadata_key]["Senders"][str(sender)] = (
f"Used balance={used_balance / 10**18:.18f}"
)
refund_gas_limit = sender_fund_refund_gas_limit
# double the gas price to ensure the transaction is included and overwrites
# any other transaction that might have been sent by the sender.
refund_gas_price = sender_funding_transactions_gas_price * 2
tx_cost = refund_gas_limit * refund_gas_price
if (remaining_balance - 1) < tx_cost:
return
# Update the nonce of the sender in case one of the pre-alloc transactions
# failed
sender.nonce = Number(eth_rpc.get_transaction_count(sender))
refund_tx = Transaction(
sender=sender,
to=seed_sender,
gas_limit=refund_gas_limit,
gas_price=refund_gas_price,
value=remaining_balance - tx_cost - 1,
).with_signature_and_sender()
eth_rpc.send_wait_transaction(refund_tx)
def pytest_sessionstart(session: pytest.Session) -> None:
"""Reset the sender info before the session starts."""
session.config.stash[metadata_key]["Senders"] = {}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/execute_recover.py | src/pytest_plugins/execute/execute_recover.py | """Pytest test to recover funds from a failed remote execution."""
import pytest
from ethereum_test_base_types import Address
from ethereum_test_rpc import EthRPC
from ethereum_test_types import EOA, Transaction
@pytest.fixture(scope="session")
def gas_price(eth_rpc: EthRPC) -> int:
"""Get the gas price for the funding transactions."""
return eth_rpc.gas_price()
def test_recover_funds(
destination: Address,
index: int,
eoa: EOA,
gas_price: int,
eth_rpc: EthRPC,
) -> None:
"""Recover funds from a failed remote execution."""
del index
remaining_balance = eth_rpc.get_balance(eoa)
refund_gas_limit = 21_000
tx_cost = refund_gas_limit * gas_price
if remaining_balance < tx_cost:
pytest.skip(f"Balance {remaining_balance} is less than the transaction cost {tx_cost}")
refund_tx = Transaction(
sender=eoa,
to=destination,
gas_limit=refund_gas_limit,
gas_price=gas_price,
value=remaining_balance - tx_cost,
).with_signature_and_sender()
eth_rpc.send_wait_transaction(refund_tx)
print(f"Recovered {remaining_balance} from {eoa} to {destination}")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/__init__.py | src/pytest_plugins/execute/__init__.py | """
A pytest plugin that provides fixtures that execute tests in live
devnets/testnets.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/execute.py | src/pytest_plugins/execute/execute.py | """
Test execution plugin for pytest, to run Ethereum tests using in live networks.
"""
import os
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, Generator, List, Type
import pytest
from pytest_metadata.plugin import metadata_key
from ethereum_test_execution import BaseExecute
from ethereum_test_forks import Fork
from ethereum_test_rpc import EngineRPC, EthRPC
from ethereum_test_tools import BaseTest
from ethereum_test_types import ChainConfigDefaults, EnvironmentDefaults, TransactionDefaults
from ..shared.execute_fill import ALL_FIXTURE_PARAMETERS
from ..shared.helpers import (
get_spec_format_for_item,
is_help_or_collectonly_mode,
labeled_format_parameter_set,
)
from ..spec_version_checker.spec_version_checker import EIPSpecTestItem
from .pre_alloc import Alloc
def print_migration_warning(terminalreporter: Any = None) -> None:
"""Print migration warning about repository merge."""
lines = [
"",
"=" * 80,
"⚠️ IMPORTANT: Repository Migration in Progress - 'The Weld' ⚠️",
"=" * 80,
"",
"This repository is being merged into ethereum/execution-specs (EELS) during the",
"week of October 20-24, 2025.",
"",
"📅 Timeline:",
" • Week of Oct 13-17: Closing PRs, porting issues to EELS",
" • Week of Oct 20-24: Migration week - fixing CI and fixture building",
" • Oct 24 (ETA): Weld finalized - all development moves to EELS",
"",
"👉 What This Means:",
" • Test Contributors: After Oct 24, reopen draft PRs in ethereum/execution-specs",
" • All future test development happens in EELS after completion",
" • Fixture releases continue as usual during transition",
"",
"For details: https://steel.ethereum.foundation/blog/2025-09-11_weld-announcement/",
"=" * 80,
"",
]
if terminalreporter:
for line in lines:
if "⚠️" in line or "IMPORTANT" in line:
terminalreporter.write_line(line, bold=True, yellow=True)
elif line.startswith("="):
terminalreporter.write_line(line, yellow=True)
else:
terminalreporter.write_line(line)
else:
for line in lines:
print(line)
def default_html_report_file_path() -> str:
"""
File (default) to store the generated HTML test report. Defined as a
function to allow for easier testing.
"""
return "./execution_results/report_execute.html"
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
execute_group = parser.getgroup("execute", "Arguments defining test execution behavior")
execute_group.addoption(
"--default-gas-price",
action="store",
dest="default_gas_price",
type=int,
default=10**9,
help=("Default gas price used for transactions, unless overridden by the test."),
)
execute_group.addoption(
"--default-max-fee-per-gas",
action="store",
dest="default_max_fee_per_gas",
type=int,
default=10**9,
help=("Default max fee per gas used for transactions, unless overridden by the test."),
)
execute_group.addoption(
"--default-max-priority-fee-per-gas",
action="store",
dest="default_max_priority_fee_per_gas",
type=int,
default=10**9,
help=(
"Default max priority fee per gas used for transactions, "
"unless overridden by the test."
),
)
execute_group.addoption(
"--transaction-gas-limit",
action="store",
dest="transaction_gas_limit",
default=EnvironmentDefaults.gas_limit // 4,
type=int,
help=(
"Maximum gas used to execute a single transaction. "
"Will be used as ceiling for tests that attempt to consume the entire block gas limit."
f"(Default: {EnvironmentDefaults.gas_limit // 4})"
),
)
execute_group.addoption(
"--transactions-per-block",
action="store",
dest="transactions_per_block",
type=int,
default=None,
help=("Number of transactions to send before producing the next block."),
)
execute_group.addoption(
"--get-payload-wait-time",
action="store",
dest="get_payload_wait_time",
type=float,
default=0.3,
help=("Time to wait after sending a forkchoice_updated before getting the payload."),
)
execute_group.addoption(
"--chain-id",
action="store",
dest="chain_id",
required=False,
type=int,
default=None,
help="ID of the chain where the tests will be executed.",
)
report_group = parser.getgroup("tests", "Arguments defining html report behavior")
report_group.addoption(
"--no-html",
action="store_true",
dest="disable_html",
default=False,
help=(
"Don't generate an HTML test report. "
"The --html flag can be used to specify a different path."
),
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""
Pytest hook called after command line options have been parsed and before
test collection begins.
Couple of notes:
1. Register the plugin's custom markers and process command-line options.
Custom marker registration:
https://docs.pytest.org/en/7.1.x/how-to/writing_plugins.html#registering-custom-markers
2. `@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
called before the pytest-html plugin's pytest_configure to ensure that
it uses the modified `htmlpath` option.
"""
print_migration_warning()
# Modify the block gas limit if specified.
if config.getoption("transaction_gas_limit"):
EnvironmentDefaults.gas_limit = config.getoption("transaction_gas_limit")
if is_help_or_collectonly_mode(config):
return
config.engine_rpc_supported = False # type: ignore[attr-defined]
if config.getoption("disable_html") and config.getoption("htmlpath") is None:
# generate an html report by default, unless explicitly disabled
config.option.htmlpath = Path(default_html_report_file_path())
command_line_args = "execute " + " ".join(config.invocation_params.args)
config.stash[metadata_key]["Command-line args"] = f"<code>{command_line_args}</code>"
# Configuration for the forks pytest plugin
config.skip_transition_forks = True # type: ignore[attr-defined]
config.single_fork_mode = True # type: ignore[attr-defined]
# Configure the chain ID for the tests.
rpc_chain_id = config.getoption("rpc_chain_id", None)
chain_id = config.getoption("chain_id")
if rpc_chain_id is not None or chain_id is not None:
if rpc_chain_id is not None and chain_id is not None:
if chain_id != rpc_chain_id:
pytest.exit(
"Conflicting chain ID configuration. "
"The --rpc-chain-id flag is deprecated and will be removed in a future "
"release. Use --chain-id instead."
)
if rpc_chain_id is not None:
ChainConfigDefaults.chain_id = rpc_chain_id
if chain_id is not None:
ChainConfigDefaults.chain_id = chain_id
def pytest_metadata(metadata: dict[str, Any]) -> None:
"""Add or remove metadata to/from the pytest report."""
metadata.pop("JAVA_HOME", None)
def pytest_html_results_table_header(cells: list[str]) -> None:
"""Customize the table headers of the HTML report table."""
cells.insert(3, '<th class="sortable" data-column-type="sender">Sender</th>')
cells.insert(4, '<th class="sortable" data-column-type="fundedAccounts">Funded Accounts</th>')
cells.insert(
5, '<th class="sortable" data-column-type="fundedAccounts">Deployed Contracts</th>'
)
del cells[-1] # Remove the "Links" column
def pytest_html_results_table_row(report: Any, cells: list[str]) -> None:
"""Customize the table rows of the HTML report table."""
if hasattr(report, "user_properties"):
user_props = dict(report.user_properties)
if "sender_address" in user_props and user_props["sender_address"] is not None:
sender_address = user_props["sender_address"]
cells.insert(3, f"<td>{sender_address}</td>")
else:
cells.insert(3, "<td>Not available</td>")
if "funded_accounts" in user_props and user_props["funded_accounts"] is not None:
funded_accounts = user_props["funded_accounts"]
cells.insert(4, f"<td>{funded_accounts}</td>")
else:
cells.insert(4, "<td>Not available</td>")
del cells[-1] # Remove the "Links" column
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(
item: pytest.Item, call: pytest.CallInfo[None]
) -> Generator[None, Any, None]:
"""
Make each test's fixture json path available to the test report via
user_properties.
This hook is called when each test is run and a report is being made.
"""
outcome = yield
report = outcome.get_result()
if call.when == "call":
for property_name in ["sender_address", "funded_accounts"]:
if hasattr(item.config, property_name):
report.user_properties.append((property_name, getattr(item.config, property_name)))
def pytest_html_report_title(report: Any) -> None:
"""Set the HTML report title (pytest-html plugin)."""
report.title = "Execute Test Report"
@pytest.fixture(scope="session")
def transactions_per_block(
request: pytest.FixtureRequest,
) -> int:
"""
Return the number of transactions to send before producing the next block.
"""
if transactions_per_block := request.config.getoption("transactions_per_block"):
return transactions_per_block
# Get the number of workers for the test
worker_count_env = os.environ.get("PYTEST_XDIST_WORKER_COUNT")
if not worker_count_env:
return 1
return max(int(worker_count_env), 1)
@pytest.fixture(scope="session")
def default_gas_price(request: pytest.FixtureRequest) -> int:
"""Return default gas price used for transactions."""
gas_price = request.config.getoption("default_gas_price")
assert gas_price > 0, "Gas price must be greater than 0"
return gas_price
@pytest.fixture(scope="session")
def default_max_fee_per_gas(
request: pytest.FixtureRequest,
) -> int:
"""Return default max fee per gas used for transactions."""
return request.config.getoption("default_max_fee_per_gas")
@pytest.fixture(scope="session")
def default_max_priority_fee_per_gas(
request: pytest.FixtureRequest,
) -> int:
"""Return default max priority fee per gas used for transactions."""
return request.config.getoption("default_max_priority_fee_per_gas")
@pytest.fixture(autouse=True, scope="session")
def modify_transaction_defaults(
default_gas_price: int,
default_max_fee_per_gas: int,
default_max_priority_fee_per_gas: int,
) -> None:
"""
Modify transaction defaults to values better suited for live networks.
"""
TransactionDefaults.gas_price = default_gas_price
TransactionDefaults.max_fee_per_gas = default_max_fee_per_gas
TransactionDefaults.max_priority_fee_per_gas = default_max_priority_fee_per_gas
@dataclass(kw_only=True)
class Collector:
"""
A class that collects transactions and post-allocations for every test
case.
"""
eth_rpc: EthRPC
collected_tests: Dict[str, BaseExecute] = field(default_factory=dict)
def collect(self, test_name: str, execute_format: BaseExecute) -> None:
"""Collect transactions and post-allocations for the test case."""
self.collected_tests[test_name] = execute_format
@pytest.fixture(scope="session")
def collector(
request: pytest.FixtureRequest,
eth_rpc: EthRPC,
) -> Generator[Collector, None, None]:
"""
Return configured fixture collector instance used for all tests in one test
module.
"""
del request
collector = Collector(eth_rpc=eth_rpc)
yield collector
def base_test_parametrizer(cls: Type[BaseTest]) -> Any:
"""
Generate pytest.fixture for a given BaseTest subclass.
Implementation detail: All spec fixtures must be scoped on test function
level to avoid leakage between tests.
"""
cls_fixture_parameters = [p for p in ALL_FIXTURE_PARAMETERS if p in cls.model_fields]
@pytest.fixture(
scope="function",
name=cls.pytest_parameter_name(),
)
def base_test_parametrizer_func(
request: Any,
fork: Fork,
pre: Alloc,
eth_rpc: EthRPC,
engine_rpc: EngineRPC | None,
collector: Collector,
) -> Type[BaseTest]:
"""
Fixture used to instantiate an auto-fillable BaseTest object from
within a test function.
Every test that defines a test filler must explicitly specify its
parameter name (see `pytest_parameter_name` in each implementation of
BaseTest) in its function arguments.
When parametrize, indirect must be used along with the fixture format
as value.
"""
execute_format = request.param
assert execute_format in BaseExecute.formats.values()
assert issubclass(execute_format, BaseExecute)
if execute_format.requires_engine_rpc:
assert engine_rpc is not None, "Engine RPC is required for this format."
class BaseTestWrapper(cls): # type: ignore
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs["t8n_dump_dir"] = None
if "pre" not in kwargs:
kwargs["pre"] = pre
elif kwargs["pre"] != pre:
raise ValueError("The pre-alloc object was modified by the test.")
# Set default for expected_benchmark_gas_used
if "expected_benchmark_gas_used" not in kwargs:
kwargs["expected_benchmark_gas_used"] = request.getfixturevalue(
"gas_benchmark_value"
)
kwargs |= {
p: request.getfixturevalue(p)
for p in cls_fixture_parameters
if p not in kwargs
}
request.node.config.sender_address = str(pre._sender)
super(BaseTestWrapper, self).__init__(*args, **kwargs)
self._request = request
# wait for pre-requisite transactions to be included in blocks
pre.wait_for_transactions()
for deployed_contract, expected_code in pre._deployed_contracts:
actual_code = eth_rpc.get_code(deployed_contract)
if actual_code != expected_code:
raise Exception(
f"Deployed test contract didn't match expected code at address "
f"{deployed_contract} (not enough gas_limit?).\n"
f"Expected: {expected_code}\n"
f"Actual: {actual_code}"
)
request.node.config.funded_accounts = ", ".join(
[str(eoa) for eoa in pre._funded_eoa]
)
execute = self.execute(fork=fork, execute_format=execute_format)
execute.execute(fork=fork, eth_rpc=eth_rpc, engine_rpc=engine_rpc, request=request)
collector.collect(request.node.nodeid, execute)
return BaseTestWrapper
return base_test_parametrizer_func
# Dynamically generate a pytest fixture for each test spec type.
for cls in BaseTest.spec_types.values():
# Fixture needs to be defined in the global scope so pytest can detect it.
globals()[cls.pytest_parameter_name()] = base_test_parametrizer(cls)
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""
Pytest hook used to dynamically generate test cases for each fixture format
a given test spec supports.
"""
engine_rpc_supported = metafunc.config.engine_rpc_supported # type: ignore
for test_type in BaseTest.spec_types.values():
if test_type.pytest_parameter_name() in metafunc.fixturenames:
parameter_set = []
for format_with_or_without_label in test_type.supported_execute_formats:
param = labeled_format_parameter_set(format_with_or_without_label)
if format_with_or_without_label.requires_engine_rpc and not engine_rpc_supported:
param.marks.append(pytest.mark.skip(reason="Engine RPC is not supported")) # type: ignore
parameter_set.append(param)
metafunc.parametrize(
[test_type.pytest_parameter_name()],
parameter_set,
scope="function",
indirect=True,
)
def pytest_collection_modifyitems(
items: List[pytest.Item],
) -> None:
"""
Remove transition tests and add the appropriate execute markers to the
test.
"""
items_for_removal = []
for i, item in enumerate(items):
if isinstance(item, EIPSpecTestItem):
continue
params: Dict[str, Any] = item.callspec.params # type: ignore
if "fork" not in params or params["fork"] is None:
items_for_removal.append(i)
continue
fork: Fork = params["fork"]
spec_type, execute_format = get_spec_format_for_item(params)
assert issubclass(execute_format, BaseExecute)
markers = list(item.iter_markers())
if spec_type.discard_execute_format_by_marks(execute_format, fork, markers):
items_for_removal.append(i)
continue
for marker in markers:
if marker.name == "execute":
for mark in marker.args:
item.add_marker(mark)
elif marker.name == "valid_at_transition_to":
items_for_removal.append(i)
continue
elif marker.name == "pre_alloc_modify":
item.add_marker(pytest.mark.skip(reason="Pre-alloc modification not supported"))
if "yul" in item.fixturenames: # type: ignore
item.add_marker(pytest.mark.yul_test)
for i in reversed(items_for_removal):
items.pop(i)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/tests/test_pre_alloc.py | src/pytest_plugins/execute/tests/test_pre_alloc.py | """Test the pre-allocation models used during test execution."""
from typing import Any
import pytest
from ethereum_test_base_types import Address
from ..pre_alloc import AddressStubs
@pytest.mark.parametrize(
"input_value,expected",
[
pytest.param(
"{}",
AddressStubs({}),
id="empty_address_stubs_string",
),
pytest.param(
'{"some_address": "0x0000000000000000000000000000000000000001"}',
AddressStubs({"some_address": Address("0x0000000000000000000000000000000000000001")}),
id="address_stubs_string_with_some_address",
),
],
)
def test_address_stubs(input_value: Any, expected: AddressStubs) -> None:
"""Test the address stubs."""
assert AddressStubs.model_validate_json_or_file(input_value) == expected
@pytest.mark.parametrize(
"file_name,file_contents,expected",
[
pytest.param(
"empty.json",
"{}",
AddressStubs({}),
id="empty_address_stubs_json",
),
pytest.param(
"empty.yaml",
"",
AddressStubs({}),
id="empty_address_stubs_yaml",
),
pytest.param(
"one_address.json",
'{"DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa"}',
AddressStubs(
{
"DEPOSIT_CONTRACT_ADDRESS": Address(
"0x00000000219ab540356cbb839cbe05303d7705fa"
),
}
),
id="single_address_json",
),
pytest.param(
"one_address.yaml",
"DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cbb839cbe05303d7705fa",
AddressStubs(
{
"DEPOSIT_CONTRACT_ADDRESS": Address(
"0x00000000219ab540356cbb839cbe05303d7705fa"
),
}
),
id="single_address_yaml",
),
],
)
def test_address_stubs_from_files(
pytester: pytest.Pytester,
file_name: str,
file_contents: str,
expected: AddressStubs,
) -> None:
"""Test the address stubs."""
filename = pytester.path.joinpath(file_name)
filename.write_text(file_contents)
assert AddressStubs.model_validate_json_or_file(str(filename)) == expected
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/tests/__init__.py | src/pytest_plugins/execute/tests/__init__.py | """Unit tests for the execute pytest plugin."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/eth_config/execute_eth_config.py | src/pytest_plugins/execute/eth_config/execute_eth_config.py | """
Pytest test to verify a client's configuration using `eth_config` RPC endpoint.
"""
import json
import time
from hashlib import sha256
from typing import Dict, List
import pytest
from ethereum_test_rpc import EthConfigResponse, EthRPC
from pytest_plugins.custom_logging import get_logger
from .execute_types import NetworkConfig
logger = get_logger(__name__)
@pytest.fixture(scope="function")
def eth_config_response(eth_rpc: List[EthRPC]) -> EthConfigResponse | None:
"""
Get the `eth_config` response from the client to be verified by all tests.
"""
for rpc in eth_rpc:
try:
response = rpc.config()
if response is not None:
return response
except Exception:
pass
else:
raise Exception("Could not connect to any RPC client.")
@pytest.fixture(scope="function")
def network(request: pytest.FixtureRequest) -> NetworkConfig:
"""Get the network that will be used to verify all tests."""
return request.config.network # type: ignore
@pytest.fixture(scope="function")
def current_time() -> int:
"""
Get the `eth_config` response from the client to be verified by all tests.
"""
return int(time.time())
@pytest.fixture(scope="function")
def expected_eth_config(network: NetworkConfig, current_time: int) -> EthConfigResponse:
"""
Calculate the current fork value to verify against the client's response.
"""
return network.get_eth_config(current_time)
def test_eth_config_current(
eth_config_response: EthConfigResponse | None,
expected_eth_config: EthConfigResponse,
) -> None:
"""Validate `current` field of the `eth_config` RPC endpoint."""
assert eth_config_response is not None, "Client did not return a valid `eth_config` response."
assert eth_config_response.current is not None, (
"Client did not return a valid `current` fork config."
)
expected_current = expected_eth_config.current
assert eth_config_response.current == expected_current, (
"Client's `current` fork config does not match expected value: "
f"{eth_config_response.current.model_dump_json(indent=2)} != "
f"{expected_current.model_dump_json(indent=2)}"
)
def test_eth_config_current_fork_id(
eth_config_response: EthConfigResponse | None,
expected_eth_config: EthConfigResponse,
) -> None:
"""Validate `forkId` field within the `current` configuration object."""
assert eth_config_response is not None, "Client did not return a valid `eth_config` response."
assert eth_config_response.current is not None, (
"Client did not return a valid `current` fork config."
)
assert eth_config_response.current.fork_id is not None, (
"Client did not return a valid `forkId` in the current fork config."
)
assert eth_config_response.current.fork_id == expected_eth_config.current.fork_id, (
"Client's `current.forkId` does not match expected value: "
f"{eth_config_response.current.fork_id} != "
f"{expected_eth_config.current.fork_id}"
)
def test_eth_config_next(
eth_config_response: EthConfigResponse | None,
expected_eth_config: EthConfigResponse,
) -> None:
"""Validate `next` field of the `eth_config` RPC endpoint."""
assert eth_config_response is not None, "Client did not return a valid `eth_config` response."
expected_next = expected_eth_config.next
if expected_next is None:
assert eth_config_response.next is None, (
"Client returned a `next` fork config but expected None."
)
else:
assert eth_config_response.next is not None, (
"Client did not return a valid `next` fork config."
)
assert eth_config_response.next == expected_next, (
"Client's `next` fork config does not match expected value: "
f"{eth_config_response.next.model_dump_json(indent=2)} != "
f"{expected_next.model_dump_json(indent=2)}"
)
def test_eth_config_next_fork_id(
eth_config_response: EthConfigResponse | None,
expected_eth_config: EthConfigResponse,
) -> None:
"""Validate `forkId` field within the `next` configuration object."""
assert eth_config_response is not None, "Client did not return a valid `eth_config` response."
expected_next = expected_eth_config.next
if expected_next is None:
assert eth_config_response.next is None, (
"Client returned a `next` fork config but expected None."
)
else:
assert eth_config_response.next is not None, (
"Client did not return a valid `next` fork config."
)
expected_next_fork_id = expected_next.fork_id
if expected_next_fork_id is None:
assert eth_config_response.next.fork_id is None, (
"Client returned a `next.forkId` but expected None."
)
else:
received_fork_id = eth_config_response.next.fork_id
assert received_fork_id is not None, "Client did not return a valid `next.forkId`."
assert received_fork_id == expected_next_fork_id, (
"Client's `next.forkId` does not match expected value: "
f"{received_fork_id} != "
f"{expected_next_fork_id}"
)
def test_eth_config_last(
eth_config_response: EthConfigResponse | None,
expected_eth_config: EthConfigResponse,
) -> None:
"""Validate `last` field of the `eth_config` RPC endpoint."""
expected_last = expected_eth_config.last
assert eth_config_response is not None, "Client did not return a valid `eth_config` response."
if expected_last is None:
assert eth_config_response.last is None, (
"Client returned a `last` fork config but expected None."
)
else:
assert eth_config_response.last is not None, (
"Client did not return a valid `last` fork config."
)
assert eth_config_response.last == expected_last, (
"Client's `last` fork config does not match expected value: "
f"{eth_config_response.last.model_dump_json(indent=2)} != "
f"{expected_last.model_dump_json(indent=2)}"
)
def test_eth_config_last_fork_id(
eth_config_response: EthConfigResponse | None,
expected_eth_config: EthConfigResponse,
) -> None:
"""Validate `forkId` field within the `last` configuration object."""
assert eth_config_response is not None, "Client did not return a valid `eth_config` response."
expected_last = expected_eth_config.last
if expected_last is None:
assert eth_config_response.last is None, (
"Client returned a `last` fork config but expected None."
)
else:
assert eth_config_response.last is not None, (
"Client did not return a valid `last` fork config."
)
expected_last_fork_id = expected_last.fork_id
if expected_last_fork_id is None:
assert eth_config_response.last.fork_id is None, (
"Client returned a `last.forkId` but expected None."
)
else:
received_fork_id = eth_config_response.last.fork_id
assert received_fork_id is not None, "Client did not return a valid `last.forkId`."
assert received_fork_id == expected_last_fork_id, (
"Client's `last.forkId` does not match expected value: "
f"{received_fork_id} != "
f"{expected_last_fork_id}"
)
def test_eth_config_majority(
all_rpc_endpoints: Dict[str, List[EthRPC]],
) -> None:
"""
Queries devnet exec clients for their eth_config and fails if not all have
the same response.
"""
responses = dict() # Dict[exec_client_name : response] # noqa: C408
client_to_url_used_dict = dict() # noqa: C408
for exec_client in all_rpc_endpoints.keys():
# try only as many consensus+exec client combinations until you receive
# a response if all combinations for a given exec client fail we panic
for eth_rpc_target in all_rpc_endpoints[exec_client]:
try:
response = eth_rpc_target.config(timeout=5)
if response is None:
logger.warning(f"Got 'None' as eth_config response from {eth_rpc_target}")
continue
except Exception as e:
logger.warning(
f"When trying to get eth_config from {eth_rpc_target} a problem occurred: {e}"
)
continue
response_str = json.dumps(response.model_dump(mode="json"), sort_keys=True)
responses[exec_client] = response_str
client_to_url_used_dict[exec_client] = (
eth_rpc_target.url
) # remember which cl+el combination was used
logger.info(f"Response of {exec_client}: {response_str}\n\n")
break # no need to gather more responses for this client
assert len(responses.keys()) == len(all_rpc_endpoints.keys()), (
"Failed to get an eth_config response "
f" from each specified execution client. Full list of execution clients is "
f"{all_rpc_endpoints.keys()} but we were only able to gather eth_config responses "
f"from: {responses.keys()}\n"
"Will try again with a different consensus-execution client combination for "
"this execution client"
)
# determine hashes of client responses
client_to_hash_dict = {} # Dict[exec_client : response hash] # noqa: C408
for client in responses.keys():
response_bytes = responses[client].encode("utf-8")
response_hash = sha256(response_bytes).digest().hex()
logger.info(f"Response hash of client {client}: {response_hash}")
client_to_hash_dict[client] = response_hash
# if not all responses have the same hash there is a critical consensus
# issue
expected_hash = ""
for h in client_to_hash_dict.keys():
if expected_hash == "":
expected_hash = client_to_hash_dict[h]
continue
assert client_to_hash_dict[h] == expected_hash, (
"Critical consensus issue: Not all eth_config responses are the "
" same!\n"
"Here is an overview of client response hashes:\n"
+ "\n\t".join(f"{k}: {v}" for k, v in client_to_hash_dict.items())
+ "\n\n"
"Here is an overview of which URLs were contacted:\n\t"
+ "\n\t".join(f"{k}: @{v.split('@')[1]}" for k, v in client_to_url_used_dict.items())
+ "\n\n"
# log which cl+el combinations were used without leaking full url
"Here is a dump of all client responses:\n"
+ "\n\n".join(f"{k}: {v}" for k, v in responses.items())
)
assert expected_hash != ""
logger.info("All clients returned the same eth_config response. Test has been passed!")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/eth_config/eth_config.py | src/pytest_plugins/execute/eth_config/eth_config.py | """Pytest plugin to test the `eth_config` RPC endpoint in a node."""
import re
from os.path import realpath
from pathlib import Path
from typing import Dict, List
from urllib.parse import urlparse
import pytest
import requests
from ethereum_test_rpc import EthRPC
from pytest_plugins.custom_logging import get_logger
from .execute_types import Genesis, NetworkConfigFile
CURRENT_FILE = Path(realpath(__file__))
CURRENT_FOLDER = CURRENT_FILE.parent
DEFAULT_NETWORK_CONFIGS_FILE = CURRENT_FOLDER / "networks.yml"
DEFAULT_NETWORKS = NetworkConfigFile.from_yaml(DEFAULT_NETWORK_CONFIGS_FILE)
EXECUTION_CLIENTS = ["besu", "erigon", "geth", "nethermind", "nimbusel", "reth"]
CONSENSUS_CLIENTS = ["grandine", "lighthouse", "lodestar", "nimbus", "prysm", "teku"]
logger = get_logger(__name__)
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
eth_config_group = parser.getgroup("execute", "Arguments defining eth_config test behavior.")
eth_config_group.addoption(
"--network",
action="store",
dest="network",
required=False,
type=str,
default=None,
help=(
"Name of the network to verify for the RPC client. Supported networks by default: "
f"{', '.join(DEFAULT_NETWORKS.root.keys())}."
),
)
eth_config_group.addoption(
"--network-config-file",
action="store",
dest="network_config_file",
required=False,
type=Path,
default=None,
help="Path to the yml file that contains custom network configuration "
"(e.g. ./src/pytest_plugins/execute/eth_config/networks.yml).\nIf no config is provided "
"then majority mode will be used for devnet testing (clients that have a different "
"response than the majority of clients will fail the test)",
)
eth_config_group.addoption(
"--clients",
required=False,
action="store",
dest="clients",
type=str,
default=None,
help="Comma-separated list of clients to be tested in majority mode. Example: "
'"besu,erigon,geth,nethermind,nimbusel,reth"\nIf you do not pass a value, majority mode '
"testing will be disabled.",
)
eth_config_group.addoption(
"--genesis-config-file",
action="store",
dest="genesis_config_file",
required=False,
type=Path,
default=None,
help="Path to a genesis JSON file from which a custom network configuration "
"must be derived.",
)
eth_config_group.addoption(
"--genesis-config-url",
action="store",
dest="genesis_config_url",
required=False,
type=str,
default=None,
help="URL to a genesis JSON file from which a custom network configuration "
"must be derived.",
)
eth_config_group.addoption(
"--rpc-endpoint",
required=True,
action="store",
dest="rpc_endpoint",
help="RPC endpoint to the execution client that will be tested.",
)
def pytest_configure(config: pytest.Config) -> None:
"""
Load the network configuration file and load the specific network to be
used for the test.
"""
genesis_config_file = config.getoption("genesis_config_file")
genesis_config_url = config.getoption("genesis_config_url")
network_configs_path = config.getoption("network_config_file")
network_name = config.getoption("network")
rpc_endpoint = config.getoption("rpc_endpoint")
# majority mode
clients = config.getoption("clients")
config.option.majority_clients = [] # List[str]
if genesis_config_file and genesis_config_url:
pytest.exit(
"Cannot specify both the --genesis-config-file and --genesis-config-url flags."
)
if (genesis_config_file or genesis_config_url) and network_name:
pytest.exit(
"Cannot specify a network name when using the --genesis-config-file or "
"--genesis-config-url flag."
)
# handle the one of the three flags that was passed
# case 1: genesis_config_file
if genesis_config_file:
genesis_config_contents = genesis_config_file.read_text()
genesis_config = Genesis.model_validate_json(genesis_config_contents)
config.network = genesis_config.network_config() # type: ignore
# case 2: genesis_config_url
elif genesis_config_url:
genesis_config_contents = requests.get(genesis_config_url).text
genesis_config = Genesis.model_validate_json(genesis_config_contents)
config.network = genesis_config.network_config() # type: ignore
# case 3: network_name
elif network_name:
# load provided networks file
if network_configs_path is None:
network_configs_path = DEFAULT_NETWORK_CONFIGS_FILE
if not network_configs_path.exists():
pytest.exit(f'Specified networks file "{network_configs_path}" does not exist.')
try:
network_configs = NetworkConfigFile.from_yaml(network_configs_path)
except Exception as e:
pytest.exit(f"Could not load file {network_configs_path}: {e}")
if network_name not in network_configs.root:
pytest.exit(
f'Network "{network_name}" could not be found in file "{network_configs_path}".'
)
config.network = network_configs.root[network_name] # type: ignore
# determine whether to activate majority mode or not
if clients:
clients.replace(" ", "")
clients = clients.split(",")
for c in clients:
if c not in EXECUTION_CLIENTS:
pytest.exit(f"Unsupported client was passed: {c}")
logger.info(f"Provided client list: {clients}")
# activate majority mode if also URL condition is met
if ".ethpandaops.io" in rpc_endpoint:
logger.info("Ethpandaops RPC detected")
logger.info("Toggling majority test on")
config.option.majority_clients = clients # List[str]
else:
logger.info("Majority test mode is disabled because no --clients value was passed.")
if config.getoption("collectonly", default=False):
return
# Test out the RPC endpoint to be able to fail fast if it's not working
eth_rpc = EthRPC(rpc_endpoint)
try:
logger.debug("Will now perform a connection check (request chain_id)..")
chain_id = eth_rpc.chain_id()
logger.debug(f"Connection check ok (successfully got chain id {chain_id})")
except Exception as e:
pytest.exit(f"Could not connect to RPC endpoint {rpc_endpoint}: {e}")
try:
logger.debug("Will now briefly check whether eth_config is supported by target rpc..")
eth_rpc.config()
logger.debug("Connection check ok (successfully got eth_config response)")
except Exception as e:
pytest.exit(f"RPC endpoint {rpc_endpoint} does not support `eth_config`: {e}")
@pytest.fixture(autouse=True, scope="session")
def rpc_endpoint(request: pytest.FixtureRequest) -> str:
"""
Return remote RPC endpoint to be used to make requests to the execution
client.
"""
return request.config.getoption("rpc_endpoint")
def all_rpc_endpoints(config: pytest.Config) -> Dict[str, List[EthRPC]]:
"""
Derive a mapping of exec clients to the RPC URLs they are reachable at.
"""
rpc_endpoint = config.getoption("rpc_endpoint")
# besu, erigon, ..
el_clients: List[str] = config.getoption("majority_clients")
if len(el_clients) == 0:
endpoint_name = rpc_endpoint
try:
parsed = urlparse(rpc_endpoint)
endpoint_name = parsed.hostname
except Exception:
pass
return {endpoint_name: [EthRPC(rpc_endpoint)]}
pattern = r"(.*?@rpc\.)([^-]+)-([^-]+)(-.*)"
url_dict: Dict[str, List[EthRPC]] = {
exec_client: [
EthRPC(
re.sub(
pattern,
f"\\g<1>{consensus}-{exec_client}\\g<4>",
rpc_endpoint,
)
)
for consensus in CONSENSUS_CLIENTS
]
for exec_client in el_clients
}
# url_dict looks like this:
# { 'besu': [<EthRPC that holds url for grandine+besu>,
# <EthRPC that holds url for lighthouse+besu>, ..],
# 'erigon': ... ... }
return url_dict
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Generate tests for all clients under test."""
# all_rpc_endpoints is a dictionary with the name of the exec client as key
# and the possible URLs to contact it (different cl combinations) as value
# list
all_rpc_endpoints_dict = all_rpc_endpoints(metafunc.config)
if metafunc.definition.name == "test_eth_config_majority":
if len(all_rpc_endpoints_dict) < 2:
# The test function is not run because we only have a single
# client, so no majority comparison
logger.info(
"Skipping eth_config majority because less than 2 exec clients were passed"
)
metafunc.parametrize(
["all_rpc_endpoints"],
[
pytest.param(
all_rpc_endpoints_dict,
id=metafunc.definition.name,
marks=pytest.mark.skip("Only one client"),
)
],
)
else:
metafunc.parametrize(
["all_rpc_endpoints"],
[
pytest.param(
all_rpc_endpoints_dict,
id=metafunc.definition.name,
)
],
scope="function",
)
else:
metafunc.parametrize(
["eth_rpc"],
[
pytest.param(
rpc_endpoint,
id=f"{metafunc.definition.name}[{endpoint_name}]",
)
for endpoint_name, rpc_endpoint in all_rpc_endpoints_dict.items()
],
scope="function",
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/eth_config/__init__.py | src/pytest_plugins/execute/eth_config/__init__.py | """Execute module to test the `eth_config` RPC endpoint."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/eth_config/execute_types.py | src/pytest_plugins/execute/eth_config/execute_types.py | """Types used to test `eth_config`."""
from binascii import crc32
from collections import defaultdict
from functools import cached_property
from pathlib import Path
from typing import Annotated, Any, ClassVar, Dict, List, Self, Set
import yaml
from pydantic import BaseModel, BeforeValidator, Field, model_validator
from ethereum_test_base_types import (
Address,
Bytes,
CamelModel,
EthereumTestRootModel,
ForkHash,
Hash,
HeaderNonce,
HexNumber,
Number,
)
from ethereum_test_fixtures.blockchain import FixtureHeader
from ethereum_test_forks import Fork, Frontier
from ethereum_test_rpc import (
EthConfigResponse,
ForkConfig,
ForkConfigBlobSchedule,
)
from ethereum_test_types import Alloc, Environment
class AddressOverrideDict(EthereumTestRootModel):
"""
Dictionary with overrides to the default addresses specified for each fork.
Required for testnets or devnets which have a different location of
precompiles or system contracts.
"""
root: Dict[Address, Address]
class ForkConfigBuilder(BaseModel):
"""Class to describe a current or next fork + bpo configuration."""
fork: Fork
activation_time: int
chain_id: int
address_overrides: AddressOverrideDict
blob_schedule: ForkConfigBlobSchedule | None = None
@property
def precompiles(self) -> Dict[str, Address]:
"""Get the precompiles."""
precompiles = {}
for a in self.fork.precompiles():
label = a.label
if a in self.address_overrides.root:
a = self.address_overrides.root[a]
precompiles[f"{label}"] = a
return precompiles
@property
def system_contracts(self) -> Dict[str, Address]:
"""Get the system contracts."""
system_contracts = {}
for a in self.fork.system_contracts():
label = a.label
if a in self.address_overrides.root:
a = self.address_overrides.root[a]
system_contracts[f"{label}"] = a
return system_contracts
def get_config(self, fork_id: ForkHash) -> ForkConfig:
"""
Get the current and next fork configurations given the current time and
the network configuration.
"""
return ForkConfig(
activation_time=self.activation_time,
blob_schedule=self.blob_schedule,
chain_id=self.chain_id,
fork_id=fork_id,
precompiles=self.precompiles,
system_contracts=self.system_contracts,
)
def calculate_fork_id(genesis_hash: Hash, activation_times: Set[int]) -> ForkHash:
"""
Calculate the fork Id given the genesis hash and each fork activation
times.
"""
buffer = bytes(genesis_hash)
for activation_time in sorted(activation_times):
if activation_time == 0:
continue
buffer += activation_time.to_bytes(length=8, byteorder="big")
return ForkHash(crc32(buffer))
class ForkActivationTimes(EthereumTestRootModel[Dict[Fork, int]]):
"""Fork activation times."""
root: Dict[Fork, int]
def forks_by_activation_time(self) -> Dict[int, Set[Fork]]:
"""Get the forks by activation time."""
forks_by_activation_time = defaultdict(set)
for fork, activation_time in self.root.items():
forks_by_activation_time[activation_time].add(fork)
return forks_by_activation_time
def active_forks(self, current_time: int) -> List[Fork]:
"""Get the active forks."""
forks_by_activation_time = self.forks_by_activation_time()
active_forks = []
for activation_time in sorted(forks_by_activation_time.keys()):
if activation_time <= current_time:
active_forks.extend(sorted(forks_by_activation_time[activation_time]))
return active_forks
def next_forks(self, current_time: int) -> List[Fork]:
"""Get the next forks."""
forks_by_activation_time = self.forks_by_activation_time()
next_forks = []
for activation_time in sorted(forks_by_activation_time.keys()):
if activation_time > current_time:
next_forks.extend(sorted(forks_by_activation_time[activation_time]))
return next_forks
def active_fork(self, current_time: int) -> Fork:
"""Get the active fork."""
return self.active_forks(current_time)[-1]
def next_fork(self, current_time: int) -> Fork | None:
"""Get the next fork."""
next_forks = self.next_forks(current_time)
if next_forks:
return next_forks[0]
return None
def last_fork(self, current_time: int) -> Fork | None:
"""Get the last fork."""
next_forks = self.next_forks(current_time)
if next_forks:
return next_forks[-1]
return None
def __getitem__(self, key: Fork) -> int:
"""Get the activation time for a given fork."""
return self.root[key]
class NetworkConfig(CamelModel):
"""Ethereum network config."""
chain_id: HexNumber
genesis_hash: Hash
fork_activation_times: ForkActivationTimes
blob_schedule: Dict[Fork, ForkConfigBlobSchedule] = Field(default_factory=dict)
address_overrides: AddressOverrideDict = Field(default_factory=lambda: AddressOverrideDict({}))
def get_eth_config(self, current_time: int) -> EthConfigResponse:
"""Get the current and next forks based on the given time."""
network_kwargs = {
"chain_id": self.chain_id,
"address_overrides": self.address_overrides,
}
activation_times = set(self.fork_activation_times.forks_by_activation_time().keys())
current_activation_times = {
activation_time
for activation_time in activation_times
if activation_time <= current_time
}
next_activation_times = {
activation_time
for activation_time in activation_times
if activation_time > current_time
}
active_fork = self.fork_activation_times.active_fork(current_time)
current_config_builder: ForkConfigBuilder = ForkConfigBuilder(
fork=active_fork,
activation_time=self.fork_activation_times[active_fork],
blob_schedule=self.blob_schedule.get(active_fork),
**network_kwargs,
)
current_config = current_config_builder.get_config(
calculate_fork_id(self.genesis_hash, current_activation_times)
)
kwargs = {"current": current_config}
next_fork = self.fork_activation_times.next_fork(current_time)
if next_fork:
next_config_builder: ForkConfigBuilder = ForkConfigBuilder(
fork=next_fork,
activation_time=self.fork_activation_times[next_fork],
blob_schedule=self.blob_schedule.get(next_fork),
**network_kwargs,
)
kwargs["next"] = next_config_builder.get_config(
calculate_fork_id(
self.genesis_hash,
current_activation_times | {sorted(next_activation_times)[0]},
)
)
last_fork = self.fork_activation_times.last_fork(current_time)
if last_fork:
last_config_builder: ForkConfigBuilder = ForkConfigBuilder(
fork=last_fork,
activation_time=self.fork_activation_times[last_fork],
blob_schedule=self.blob_schedule.get(last_fork),
**network_kwargs,
)
kwargs["last"] = last_config_builder.get_config(
calculate_fork_id(
self.genesis_hash,
current_activation_times | next_activation_times,
)
)
return EthConfigResponse(**kwargs)
class NetworkConfigFile(EthereumTestRootModel):
"""Root model to describe a file that contains network configurations."""
root: Dict[str, NetworkConfig]
@classmethod
def from_yaml(cls, path: Path) -> Self:
"""Read the network configuration from a yaml file."""
with path.open("r") as file:
config_data = yaml.safe_load(file)
return cls.model_validate(config_data)
class GenesisConfig(CamelModel):
"""Config model contained in a Geth-type genesis file."""
chain_id: int
terminal_total_difficulty: int
terminal_total_difficulty_passed: bool
deposit_contract_address: Address = Address(0x00000000219AB540356CBB839CBE05303D7705FA)
fork_activation_times: ForkActivationTimes
blob_schedule: Dict[Fork, ForkConfigBlobSchedule]
fork_synonyms: ClassVar[Dict[str, str | None]] = {
# TODO: Ideally add fork synonyms, but not important for now.
"eip150": None,
"eip155": None,
"eip158": None,
"petersburg": None,
"mergeNetsplit": "paris",
}
@property
def address_overrides(self) -> AddressOverrideDict:
"""Get the address overrides."""
if self.deposit_contract_address == Address(0x00000000219AB540356CBB839CBE05303D7705FA):
return AddressOverrideDict({})
return AddressOverrideDict(
{Address(0x00000000219AB540356CBB839CBE05303D7705FA): self.deposit_contract_address}
)
def fork(self) -> Fork:
"""Return the latest fork active at genesis."""
current_fork: Fork = Frontier
for fork, activation_block_time in self.fork_activation_times.root.items():
if activation_block_time == 0 and fork > current_fork:
current_fork = fork
return current_fork
@model_validator(mode="before")
@classmethod
def preprocess_fork_times_blocks(cls, data: Any) -> Any:
"""
Pre-process the dictionary to put fork block numbers and times in the
correct format.
Fork times and block numbers have the following format in the root of
the object:
```
"berlinBlock": 0,
"londonBlock": 0,
...
"pragueTime": 0,
"osakaTime": 1753379304,
```
This function strips the "*Block" and "*Time" part and moves the
values.
"""
if isinstance(data, dict):
fork_activation_times: Dict[str, int] = {}
for key in list(data.keys()):
assert isinstance(key, str)
if key.endswith("Block") or key.endswith("Time"):
if key.endswith("Block"):
stripped_key = key.removesuffix("Block")
else:
stripped_key = key.removesuffix("Time")
if stripped_key in cls.fork_synonyms:
synonym = cls.fork_synonyms[stripped_key]
if synonym:
stripped_key = synonym
else:
continue
fork_activation_times[stripped_key] = data.pop(key)
if fork_activation_times:
data["forkActivationTimes"] = fork_activation_times
return data
class Genesis(CamelModel):
"""Geth-type genesis file."""
config: GenesisConfig
alloc: Alloc
fee_recipient: Address = Field(validation_alias="coinbase")
difficulty: HexNumber
extra_data: Bytes
gas_limit: HexNumber
nonce: Annotated[HeaderNonce, BeforeValidator(lambda x: HexNumber(x))]
mixhash: Hash
timestamp: Number
parent_hash: Hash
base_fee_per_gas: HexNumber = HexNumber(10**9)
number: HexNumber = HexNumber(0)
@cached_property
def hash(self) -> Hash:
"""Calculate the genesis hash."""
dumped_genesis = self.model_dump(mode="json", exclude={"config", "alloc"})
genesis_fork = self.config.fork()
env = Environment(**dumped_genesis).set_fork_requirements(genesis_fork)
genesis_header = FixtureHeader.genesis(genesis_fork, env, self.alloc.state_root())
genesis_header.extra_data = self.extra_data
genesis_header.nonce = self.nonce
return genesis_header.block_hash
def network_config(self) -> NetworkConfig:
"""Get the network config."""
return NetworkConfig(
chain_id=self.config.chain_id,
genesis_hash=self.hash,
fork_activation_times=self.config.fork_activation_times,
blob_schedule=self.config.blob_schedule,
address_overrides=self.config.address_overrides,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/eth_config/tests/test_genesis.py | src/pytest_plugins/execute/eth_config/tests/test_genesis.py | """Test parsing a genesis file to generate a network configuration."""
from os.path import realpath
from pathlib import Path
import pytest
from ethereum_test_base_types import Hash
from ethereum_test_forks import (
BPO1,
BPO2,
BPO3,
BPO4,
BPO5,
Berlin,
Byzantium,
Cancun,
Constantinople,
Homestead,
Istanbul,
London,
Osaka,
Paris,
Prague,
Shanghai,
)
from ethereum_test_rpc import (
ForkConfigBlobSchedule,
)
from ..execute_types import ForkActivationTimes, Genesis, NetworkConfig
CURRENT_FILE = Path(realpath(__file__))
CURRENT_FOLDER = CURRENT_FILE.parent
@pytest.fixture
def genesis_contents(genesis_file_name: str) -> str:
"""Read the genesis file contents."""
genesis_path = CURRENT_FOLDER / genesis_file_name
return genesis_path.read_text()
@pytest.mark.parametrize(
"genesis_file_name,expected_hash,expected_network_config",
[
pytest.param(
"genesis_example.json",
Hash(0x3A8C8CEF63859865AA1D40DED77B083EEF06A1702B8188D5586434B9C3ADC4BE),
NetworkConfig(
chain_id=7023102237,
genesis_hash=Hash(
0x3A8C8CEF63859865AA1D40DED77B083EEF06A1702B8188D5586434B9C3ADC4BE
),
fork_activation_times=ForkActivationTimes(
root={
Homestead: 0,
Byzantium: 0,
Constantinople: 0,
Istanbul: 0,
Berlin: 0,
London: 0,
Paris: 0,
Shanghai: 0,
Cancun: 0,
Prague: 0,
Osaka: 1753379304,
BPO1: 1753477608,
BPO2: 1753575912,
BPO3: 1753674216,
BPO4: 1753772520,
BPO5: 1753889256,
},
),
blob_schedule={
Cancun: ForkConfigBlobSchedule(target=3, max=6, baseFeeUpdateFraction=3338477),
Prague: ForkConfigBlobSchedule(target=6, max=9, baseFeeUpdateFraction=5007716),
Osaka: ForkConfigBlobSchedule(target=6, max=9, baseFeeUpdateFraction=5007716),
BPO1: ForkConfigBlobSchedule(target=9, max=12, baseFeeUpdateFraction=5007716),
BPO2: ForkConfigBlobSchedule(target=12, max=15, baseFeeUpdateFraction=5007716),
BPO3: ForkConfigBlobSchedule(target=15, max=18, baseFeeUpdateFraction=5007716),
BPO4: ForkConfigBlobSchedule(target=6, max=9, baseFeeUpdateFraction=5007716),
BPO5: ForkConfigBlobSchedule(target=15, max=20, baseFeeUpdateFraction=5007716),
},
),
),
],
)
def test_genesis_parsing(
genesis_contents: str,
expected_hash: Hash,
expected_network_config: NetworkConfig,
) -> None:
"""
Verify genesis config file is parsed and correctly converted into a network
configuration.
"""
parsed_genesis = Genesis.model_validate_json(genesis_contents)
assert parsed_genesis.hash == expected_hash, (
f"Unexpected genesis hash: {parsed_genesis.hash}, expected: {expected_hash}"
)
network_config = parsed_genesis.network_config()
assert network_config == expected_network_config, (
f"Unexpected network config: {network_config}, expected: {expected_network_config}"
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/eth_config/tests/test_execute_eth_config.py | src/pytest_plugins/execute/eth_config/tests/test_execute_eth_config.py | """Unit tests for the `eth_config` execute tests."""
import json
from os.path import realpath
from pathlib import Path
import pytest
import yaml
from ethereum_test_base_types import ForkHash
from ethereum_test_rpc import EthConfigResponse
from ..execute_types import NetworkConfig, NetworkConfigFile
EXPECTED_CANCUN = json.loads("""
{
"activationTime": 0,
"blobSchedule": {
"baseFeeUpdateFraction": 3338477,
"max": 6,
"target": 3
},
"chainId": "0x88bb0",
"forkId": "0xbef71d30",
"precompiles": {
"BLAKE2F": "0x0000000000000000000000000000000000000009",
"BN254_ADD": "0x0000000000000000000000000000000000000006",
"BN254_MUL": "0x0000000000000000000000000000000000000007",
"BN254_PAIRING": "0x0000000000000000000000000000000000000008",
"ECREC": "0x0000000000000000000000000000000000000001",
"ID": "0x0000000000000000000000000000000000000004",
"KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
"MODEXP": "0x0000000000000000000000000000000000000005",
"RIPEMD160": "0x0000000000000000000000000000000000000003",
"SHA256": "0x0000000000000000000000000000000000000002"
},
"systemContracts": {
"BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02"
}
}
""")
EXPECTED_CANCUN_FORK_ID = ForkHash("0xbef71d30")
EXPECTED_PRAGUE = json.loads("""
{
"activationTime": 1742999832,
"blobSchedule": {
"baseFeeUpdateFraction": 5007716,
"max": 9,
"target": 6
},
"chainId": "0x88bb0",
"forkId": "0x0929e24e",
"precompiles": {
"BLAKE2F": "0x0000000000000000000000000000000000000009",
"BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
"BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
"BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
"BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
"BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
"BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
"BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
"BN254_ADD": "0x0000000000000000000000000000000000000006",
"BN254_MUL": "0x0000000000000000000000000000000000000007",
"BN254_PAIRING": "0x0000000000000000000000000000000000000008",
"ECREC": "0x0000000000000000000000000000000000000001",
"ID": "0x0000000000000000000000000000000000000004",
"KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
"MODEXP": "0x0000000000000000000000000000000000000005",
"RIPEMD160": "0x0000000000000000000000000000000000000003",
"SHA256": "0x0000000000000000000000000000000000000002"
},
"systemContracts": {
"BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
"CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
"DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
"HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
"WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
}
}
""")
EXPECTED_PRAGUE_FORK_ID = ForkHash("0x0929e24e")
EXPECTED_BPO1 = json.loads("""
{
"activationTime": 1753477608,
"blobSchedule": {
"baseFeeUpdateFraction": 5007716,
"max": 12,
"target": 9
},
"chainId": "0x88bb0",
"forkId": "0x5e2e4e84",
"precompiles": {
"BLAKE2F": "0x0000000000000000000000000000000000000009",
"BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
"BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
"BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
"BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
"BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
"BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
"BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
"BN254_ADD": "0x0000000000000000000000000000000000000006",
"BN254_MUL": "0x0000000000000000000000000000000000000007",
"BN254_PAIRING": "0x0000000000000000000000000000000000000008",
"ECREC": "0x0000000000000000000000000000000000000001",
"ID": "0x0000000000000000000000000000000000000004",
"KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
"MODEXP": "0x0000000000000000000000000000000000000005",
"P256VERIFY": "0x0000000000000000000000000000000000000100",
"RIPEMD160": "0x0000000000000000000000000000000000000003",
"SHA256": "0x0000000000000000000000000000000000000002"
},
"systemContracts": {
"BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
"CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
"DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
"HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
"WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
}
}
""")
EXPECTED_BPO1_FORK_ID = ForkHash("0x5e2e4e84")
EXPECTED_BPO2 = json.loads("""
{
"activationTime": 1753575912,
"blobSchedule": {
"baseFeeUpdateFraction": 5007716,
"max": 15,
"target": 12
},
"chainId": "0x88bb0",
"forkId": "0x9d7b6bfb",
"precompiles": {
"BLAKE2F": "0x0000000000000000000000000000000000000009",
"BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
"BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
"BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
"BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
"BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
"BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
"BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
"BN254_ADD": "0x0000000000000000000000000000000000000006",
"BN254_MUL": "0x0000000000000000000000000000000000000007",
"BN254_PAIRING": "0x0000000000000000000000000000000000000008",
"ECREC": "0x0000000000000000000000000000000000000001",
"ID": "0x0000000000000000000000000000000000000004",
"KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
"MODEXP": "0x0000000000000000000000000000000000000005",
"P256VERIFY": "0x0000000000000000000000000000000000000100",
"RIPEMD160": "0x0000000000000000000000000000000000000003",
"SHA256": "0x0000000000000000000000000000000000000002"
},
"systemContracts": {
"BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
"CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
"DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
"HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
"WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
}
}
""")
EXPECTED_BPO2_FORK_ID = ForkHash("0x9d7b6bfb")
EXPECTED_BPO3 = json.loads("""
{
"activationTime": 1753674216,
"blobSchedule": {
"baseFeeUpdateFraction": 5007716,
"max": 18,
"target": 15
},
"chainId": "0x88bb0",
"forkId": "0xbebdd3a1",
"precompiles": {
"BLAKE2F": "0x0000000000000000000000000000000000000009",
"BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
"BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
"BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
"BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
"BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
"BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
"BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
"BN254_ADD": "0x0000000000000000000000000000000000000006",
"BN254_MUL": "0x0000000000000000000000000000000000000007",
"BN254_PAIRING": "0x0000000000000000000000000000000000000008",
"ECREC": "0x0000000000000000000000000000000000000001",
"ID": "0x0000000000000000000000000000000000000004",
"KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
"MODEXP": "0x0000000000000000000000000000000000000005",
"P256VERIFY": "0x0000000000000000000000000000000000000100",
"RIPEMD160": "0x0000000000000000000000000000000000000003",
"SHA256": "0x0000000000000000000000000000000000000002"
},
"systemContracts": {
"BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
"CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
"DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
"HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
"WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
}
}
""")
EXPECTED_BPO3_FORK_ID = ForkHash("0xbebdd3a1")
EXPECTED_BPO4 = json.loads("""
{
"activationTime": 1753772520,
"blobSchedule": {
"baseFeeUpdateFraction": 5007716,
"max": 9,
"target": 6
},
"chainId": "0x88bb0",
"forkId": "0x190c2054",
"precompiles": {
"BLAKE2F": "0x0000000000000000000000000000000000000009",
"BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
"BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
"BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
"BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
"BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
"BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
"BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
"BN254_ADD": "0x0000000000000000000000000000000000000006",
"BN254_MUL": "0x0000000000000000000000000000000000000007",
"BN254_PAIRING": "0x0000000000000000000000000000000000000008",
"ECREC": "0x0000000000000000000000000000000000000001",
"ID": "0x0000000000000000000000000000000000000004",
"KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
"MODEXP": "0x0000000000000000000000000000000000000005",
"P256VERIFY": "0x0000000000000000000000000000000000000100",
"RIPEMD160": "0x0000000000000000000000000000000000000003",
"SHA256": "0x0000000000000000000000000000000000000002"
},
"systemContracts": {
"BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
"CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
"DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
"HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
"WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
}
}
""")
EXPECTED_BPO4_FORK_ID = ForkHash("0x190c2054")
EXPECTED_BPO5 = json.loads("""
{
"activationTime": 1753889256,
"blobSchedule": {
"baseFeeUpdateFraction": 5007716,
"max": 20,
"target": 15
},
"chainId": "0x88bb0",
"forkId": "0xd3a4880b",
"precompiles": {
"BLAKE2F": "0x0000000000000000000000000000000000000009",
"BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
"BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
"BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
"BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
"BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
"BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
"BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
"BN254_ADD": "0x0000000000000000000000000000000000000006",
"BN254_MUL": "0x0000000000000000000000000000000000000007",
"BN254_PAIRING": "0x0000000000000000000000000000000000000008",
"ECREC": "0x0000000000000000000000000000000000000001",
"ID": "0x0000000000000000000000000000000000000004",
"KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
"MODEXP": "0x0000000000000000000000000000000000000005",
"P256VERIFY": "0x0000000000000000000000000000000000000100",
"RIPEMD160": "0x0000000000000000000000000000000000000003",
"SHA256": "0x0000000000000000000000000000000000000002"
},
"systemContracts": {
"BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
"CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
"DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
"HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
"WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
}
}
""")
EXPECTED_BPO5_FORK_ID = ForkHash("0xd3a4880b")
CURRENT_FILE = Path(realpath(__file__))
CURRENT_FOLDER = CURRENT_FILE.parent
STATIC_NETWORK_CONFIGS = """
# Static network configs so updates to the network configs don't break the tests.
Mainnet:
chainId: 0x1
genesisHash: 0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3
forkActivationTimes:
Frontier: 0
Homestead: 1150000
DAOFork: 1920000
Tangerine: 2463000
SpuriousDragon: 2675000
Byzantium: 4370000
Constantinople: 7280000
Istanbul: 9069000
MuirGlacier: 9200000
Berlin: 12244000
London: 12965000
ArrowGlacier: 13773000
GrayGlacier: 15050000
Shanghai: 1681338455
Cancun: 1710338135
Prague: 1746612311
blobSchedule:
Cancun:
target: 3
max: 6
baseFeeUpdateFraction: 3338477
Prague:
target: 6
max: 9
baseFeeUpdateFraction: 5007716
Sepolia:
chainId: 0xaa36a7
genesisHash: 0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9
forkActivationTimes:
Berlin: 0
London: 1735371
Shanghai: 1677557088
Cancun: 1706655072
Prague: 1741159776
blobSchedule:
Cancun:
target: 3
max: 6
baseFeeUpdateFraction: 3338477
Prague:
target: 6
max: 9
baseFeeUpdateFraction: 5007716
addressOverrides:
0x00000000219ab540356cbb839cbe05303d7705fa: 0x7f02c3e3c98b133055b8b348b2ac625669ed295d
Hoodi:
chainId: 0x88BB0
genesisHash: 0xbbe312868b376a3001692a646dd2d7d1e4406380dfd86b98aa8a34d1557c971b
forkActivationTimes:
Cancun: 0
Prague: 1742999832
blobSchedule:
Cancun:
target: 3
max: 6
baseFeeUpdateFraction: 3338477
Prague:
target: 6
max: 9
baseFeeUpdateFraction: 5007716
Holesky:
chainId: 0x4268
genesisHash: 0xb5f7f912443c940f21fd611f12828d75b534364ed9e95ca4e307729a4661bde4
forkActivationTimes:
Paris: 0
Shanghai: 1696000704
Cancun: 1707305664
Prague: 1740434112
addressOverrides:
0x00000000219ab540356cbb839cbe05303d7705fa: 0x4242424242424242424242424242424242424242
blobSchedule:
Cancun:
target: 3
max: 6
baseFeeUpdateFraction: 3338477
Prague:
target: 6
max: 9
baseFeeUpdateFraction: 5007716
# Test-only network configs.
HoodiWithBPOs:
chainId: 0x88BB0
genesisHash: 0xbbe312868b376a3001692a646dd2d7d1e4406380dfd86b98aa8a34d1557c971b
forkActivationTimes:
Cancun: 0
Prague: 1742999832
Osaka: 1753477608
BPO1: 1753575912
BPO2: 1753674216
BPO3: 1753772520
BPO4: 1753889256
blobSchedule:
Cancun:
target: 3
max: 6
baseFeeUpdateFraction: 3338477
Prague:
target: 6
max: 9
baseFeeUpdateFraction: 5007716
Osaka:
target: 9
max: 12
baseFeeUpdateFraction: 5007716
BPO1:
target: 12
max: 15
baseFeeUpdateFraction: 5007716
BPO2:
target: 15
max: 18
baseFeeUpdateFraction: 5007716
BPO3:
target: 6
max: 9
baseFeeUpdateFraction: 5007716
BPO4:
target: 15
max: 20
baseFeeUpdateFraction: 5007716
""" # W505
@pytest.fixture(scope="session")
def network_configs() -> NetworkConfigFile:
"""Get the file contents from the provided network configs file."""
return NetworkConfigFile(root=yaml.safe_load(STATIC_NETWORK_CONFIGS))
@pytest.fixture
def network(request: pytest.FixtureRequest, network_configs: NetworkConfigFile) -> NetworkConfig:
"""Get the network that is under test."""
network_name = request.param
assert network_name in network_configs.root, (
f"Network {network_name} could not be found in network_configs."
)
return network_configs.root[network_name]
@pytest.fixture
def eth_config(network: NetworkConfig, current_time: int) -> EthConfigResponse:
"""
Get the `eth_config` response from the client to be verified by all tests.
"""
return network.get_eth_config(current_time)
@pytest.mark.parametrize(
[
"network",
"current_time",
"expected_eth_config",
],
[
pytest.param(
"Hoodi",
0,
EthConfigResponse(
current=EXPECTED_CANCUN,
next=EXPECTED_PRAGUE,
last=EXPECTED_PRAGUE,
),
id="Hoodi_cancun",
),
pytest.param(
"Hoodi",
1753477608,
EthConfigResponse(
current=EXPECTED_PRAGUE,
),
id="Hoodi_prague",
),
pytest.param(
"HoodiWithBPOs",
1742999832,
EthConfigResponse(
current=EXPECTED_PRAGUE,
next=EXPECTED_BPO1,
last=EXPECTED_BPO5,
),
id="Hoodi_prague_with_bpos_1",
),
pytest.param(
"HoodiWithBPOs",
1753575912,
EthConfigResponse(
current=EXPECTED_BPO2,
next=EXPECTED_BPO3,
last=EXPECTED_BPO5,
),
id="Hoodi_prague_with_bpos_2",
),
pytest.param(
"HoodiWithBPOs",
1753674216,
EthConfigResponse(
current=EXPECTED_BPO3,
next=EXPECTED_BPO4,
last=EXPECTED_BPO5,
),
id="Hoodi_prague_with_bpos_3",
),
pytest.param(
"HoodiWithBPOs",
1753772520,
EthConfigResponse(
current=EXPECTED_BPO4,
next=EXPECTED_BPO5,
last=EXPECTED_BPO5,
),
id="Hoodi_prague_with_bpos_4",
),
pytest.param(
"HoodiWithBPOs",
1753889256,
EthConfigResponse(
current=EXPECTED_BPO5,
),
id="Hoodi_prague_with_bpos_5",
),
],
indirect=["network"],
)
def test_fork_config_from_fork(
eth_config: EthConfigResponse,
expected_eth_config: EthConfigResponse,
) -> None:
"""Test the `fork_config_from_fork` function."""
current_config, next_config = (eth_config.current, eth_config.next)
assert current_config.model_dump(
mode="json", by_alias=True
) == expected_eth_config.current.model_dump(mode="json", by_alias=True), (
f"Expected {expected_eth_config.current.model_dump_json()} but got "
f"{current_config.model_dump_json()}"
)
assert current_config.fork_id == expected_eth_config.current.fork_id, (
f"Expected {expected_eth_config.current.fork_id} but got {current_config.fork_id}"
)
if expected_eth_config.next is not None:
assert next_config is not None, "Expected next to be not None"
assert next_config.model_dump(
mode="json", by_alias=True
) == expected_eth_config.next.model_dump(mode="json", by_alias=True), (
f"Expected {expected_eth_config.next.model_dump_json()} but got "
f"{next_config.model_dump_json()}"
)
assert next_config.fork_id == expected_eth_config.next.fork_id, (
f"Expected {expected_eth_config.next.fork_id} but got {next_config.fork_id}"
)
else:
assert next_config is None, "Expected next to be None"
if expected_eth_config.last is not None:
assert eth_config.last is not None, "Expected last to be not None"
assert eth_config.last.model_dump(
mode="json", by_alias=True
) == expected_eth_config.last.model_dump(mode="json", by_alias=True), (
f"Expected {expected_eth_config.last.model_dump_json()} but got "
f"{eth_config.last.model_dump_json()}"
)
assert eth_config.last.fork_id == expected_eth_config.last.fork_id, (
f"Expected {expected_eth_config.last.fork_id} but got {eth_config.last.fork_id}"
)
else:
assert eth_config.last is None, "Expected last to be None"
@pytest.mark.parametrize(
[
"network",
"current_time",
"expected_current_fork_id",
"expected_next_fork_id",
"expected_last_fork_id",
],
[
pytest.param(
"Mainnet",
1746612310, # Right before Prague activation
ForkHash(0x9F3D2254),
ForkHash(0xC376CF8B),
ForkHash(0xC376CF8B),
id="mainnet_cancun",
),
pytest.param(
"Sepolia",
1741159775, # Right before Prague activation
ForkHash(0x88CF81D9),
ForkHash(0xED88B5FD),
ForkHash(0xED88B5FD),
id="sepolia_cancun",
),
pytest.param(
"Holesky",
1740434111, # Right before Prague activation
ForkHash(0x9B192AD0),
ForkHash(0xDFBD9BED),
ForkHash(0xDFBD9BED),
id="holesky_cancun",
),
pytest.param(
"Hoodi",
1742999831, # Right before Prague activation
ForkHash(0xBEF71D30),
ForkHash(0x0929E24E),
ForkHash(0x0929E24E),
id="hoodi_prague",
),
],
indirect=["network"],
)
def test_fork_ids(
eth_config: EthConfigResponse,
expected_current_fork_id: ForkHash,
expected_next_fork_id: ForkHash | None,
expected_last_fork_id: ForkHash | None,
) -> None:
"""Test various configurations of fork Ids for different timestamps."""
assert expected_current_fork_id == eth_config.current.fork_id, (
f"Unexpected current fork id: {eth_config.current.fork_id} != {expected_current_fork_id}"
)
if expected_next_fork_id is not None:
assert eth_config.next is not None, "Expected next to be not None"
assert expected_next_fork_id == eth_config.next.fork_id, (
f"Unexpected next fork id: {eth_config.next.fork_id} != {expected_next_fork_id}"
)
else:
assert eth_config.next is None, "Expected next to be None"
if expected_last_fork_id is not None:
assert eth_config.last is not None, "Expected last to be not None"
assert expected_last_fork_id == eth_config.last.fork_id, (
f"Unexpected last fork id: {eth_config.last.fork_id} != {expected_last_fork_id}"
)
else:
assert eth_config.last is None, "Expected last to be None"
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/eth_config/tests/__init__.py | src/pytest_plugins/execute/eth_config/tests/__init__.py | """Unit tests for the `eth_config` pytest plugin package."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/rpc/chain_builder_eth_rpc.py | src/pytest_plugins/execute/rpc/chain_builder_eth_rpc.py | """
Chain builder Ethereum RPC that can drive the chain when new transactions are
submitted.
"""
import time
from pathlib import Path
from typing import Any, Dict, Iterator, List
from filelock import FileLock
from pydantic import RootModel
from typing_extensions import Self
from ethereum_test_base_types import HexNumber
from ethereum_test_forks import Fork
from ethereum_test_rpc import EngineRPC
from ethereum_test_rpc import EthRPC as BaseEthRPC
from ethereum_test_rpc.rpc_types import (
ForkchoiceState,
PayloadAttributes,
PayloadStatusEnum,
TransactionByHashResponse,
)
from ethereum_test_tools import (
Address,
Hash,
Transaction,
)
from ethereum_test_types.trie import keccak256
class HashList(RootModel[List[Hash]]):
"""Hash list class."""
root: List[Hash]
def append(self, item: Hash) -> None:
"""Append an item to the list."""
self.root.append(item)
def clear(self) -> None:
"""Clear the list."""
self.root.clear()
def remove(self, item: Hash) -> None:
"""Remove an item from the list."""
self.root.remove(item)
def __contains__(self, item: Hash) -> bool:
"""Check if an item is in the list."""
return item in self.root
def __len__(self) -> int:
"""Get the length of the list."""
return len(self.root)
def __iter__(self) -> Iterator[Hash]: # type: ignore
"""Iterate over the list."""
return iter(self.root)
class AddressList(RootModel[List[Address]]):
"""Address list class."""
root: List[Address]
def append(self, item: Address) -> None:
"""Append an item to the list."""
self.root.append(item)
def clear(self) -> None:
"""Clear the list."""
self.root.clear()
def remove(self, item: Address) -> None:
"""Remove an item from the list."""
self.root.remove(item)
def __contains__(self, item: Address) -> bool:
"""Check if an item is in the list."""
return item in self.root
def __len__(self) -> int:
"""Get the length of the list."""
return len(self.root)
def __iter__(self) -> Iterator[Address]: # type: ignore
"""Iterate over the list."""
return iter(self.root)
class PendingTxHashes:
"""
A class to manage the pending transaction hashes in a multi-process
environment.
It uses a lock file to ensure that only one process can access the pending
hashes file at a time.
"""
pending_hashes_file: Path
pending_hashes_lock: Path
pending_tx_hashes: HashList | None
lock: FileLock | None
def __init__(self, temp_folder: Path):
"""Initialize the pending transaction hashes manager."""
self.pending_hashes_file = temp_folder / "pending_tx_hashes"
self.pending_hashes_lock = temp_folder / "pending_tx_hashes.lock"
self.pending_tx_hashes = None
self.lock = None
def __enter__(self) -> Self:
"""Lock the pending hashes file and load it."""
assert self.lock is None, "Lock already acquired"
self.lock = FileLock(self.pending_hashes_lock, timeout=-1)
self.lock.acquire()
assert self.pending_tx_hashes is None, "Pending transaction hashes already loaded"
if self.pending_hashes_file.exists():
with open(self.pending_hashes_file, "r") as f:
self.pending_tx_hashes = HashList.model_validate_json(f.read())
else:
self.pending_tx_hashes = HashList([])
return self
def __exit__(self, exc_type: object, exc_value: object, traceback: object) -> None:
"""Flush the pending hashes to the file and release the lock."""
assert self.lock is not None, "Lock not acquired"
assert self.pending_tx_hashes is not None, "Pending transaction hashes not loaded"
with open(self.pending_hashes_file, "w") as f:
f.write(self.pending_tx_hashes.model_dump_json())
self.lock.release()
self.lock = None
self.pending_tx_hashes = None
def append(self, tx_hash: Hash) -> None:
"""Add a transaction hash to the pending list."""
assert self.lock is not None, "Lock not acquired"
assert self.pending_tx_hashes is not None, "Pending transaction hashes not loaded"
self.pending_tx_hashes.append(tx_hash)
def clear(self) -> None:
"""Remove a transaction hash from the pending list."""
assert self.lock is not None, "Lock not acquired"
assert self.pending_tx_hashes is not None
self.pending_tx_hashes.clear()
def remove(self, tx_hash: Hash) -> None:
"""Remove a transaction hash from the pending list."""
assert self.lock is not None, "Lock not acquired"
assert self.pending_tx_hashes is not None, "Pending transaction hashes not loaded"
self.pending_tx_hashes.remove(tx_hash)
def __contains__(self, tx_hash: Hash) -> bool:
"""Check if a transaction hash is in the pending list."""
assert self.lock is not None, "Lock not acquired"
assert self.pending_tx_hashes is not None, "Pending transaction hashes not loaded"
return tx_hash in self.pending_tx_hashes
def __len__(self) -> int:
"""Get the number of pending transaction hashes."""
assert self.lock is not None, "Lock not acquired"
assert self.pending_tx_hashes is not None, "Pending transaction hashes not loaded"
return len(self.pending_tx_hashes)
def __iter__(self) -> Iterator[Hash]:
"""Iterate over the pending transaction hashes."""
assert self.lock is not None, "Lock not acquired"
assert self.pending_tx_hashes is not None, "Pending transaction hashes not loaded"
return iter(self.pending_tx_hashes)
class ChainBuilderEthRPC(BaseEthRPC, namespace="eth"):
"""
Special type of Ethereum RPC client that also has access to the Engine API
and automatically coordinates block generation based on the number of
pending transactions or a block generation interval.
"""
fork: Fork
engine_rpc: EngineRPC
transactions_per_block: int
get_payload_wait_time: float
pending_tx_hashes: PendingTxHashes
def __init__(
self,
*,
rpc_endpoint: str,
fork: Fork,
engine_rpc: EngineRPC,
transactions_per_block: int,
session_temp_folder: Path,
get_payload_wait_time: float,
initial_forkchoice_update_retries: int = 5,
transaction_wait_timeout: int = 60,
):
"""Initialize the Ethereum RPC client for the hive simulator."""
super().__init__(
rpc_endpoint,
transaction_wait_timeout=transaction_wait_timeout,
)
self.fork = fork
self.engine_rpc = engine_rpc
self.transactions_per_block = transactions_per_block
self.pending_tx_hashes = PendingTxHashes(session_temp_folder)
self.get_payload_wait_time = get_payload_wait_time
# Send initial forkchoice updated only if we are the first worker
base_name = "eth_rpc_forkchoice_updated"
base_file = session_temp_folder / base_name
base_error_file = session_temp_folder / f"{base_name}.err"
base_lock_file = session_temp_folder / f"{base_name}.lock"
with FileLock(base_lock_file):
if base_error_file.exists():
raise Exception("Error occurred during initial forkchoice_updated")
if not base_file.exists():
base_error_file.touch() # Assume error
# Get the head block hash
head_block = self.get_block_by_number("latest")
assert head_block is not None
# Send initial forkchoice updated
forkchoice_state = ForkchoiceState(
head_block_hash=head_block["hash"],
)
forkchoice_version = self.fork.engine_forkchoice_updated_version()
assert forkchoice_version is not None, (
"Fork does not support engine forkchoice_updated"
)
for _ in range(initial_forkchoice_update_retries):
response = self.engine_rpc.forkchoice_updated(
forkchoice_state,
None,
version=forkchoice_version,
)
if response.payload_status.status == PayloadStatusEnum.VALID:
break
time.sleep(0.5)
else:
raise Exception("Initial forkchoice_updated was invalid")
base_error_file.unlink() # Success
base_file.touch()
def generate_block(self: "ChainBuilderEthRPC") -> None:
"""Generate a block using the Engine API."""
# Get the head block hash
head_block = self.get_block_by_number("latest")
assert head_block is not None
forkchoice_state = ForkchoiceState(
head_block_hash=head_block["hash"],
)
parent_beacon_block_root = (
Hash(0) if self.fork.header_beacon_root_required(block_number=0, timestamp=0) else None
)
payload_attributes = PayloadAttributes(
timestamp=HexNumber(head_block["timestamp"]) + 1,
prev_randao=Hash(0),
suggested_fee_recipient=Address(0),
withdrawals=[] if self.fork.header_withdrawals_required() else None,
parent_beacon_block_root=parent_beacon_block_root,
target_blobs_per_block=(
self.fork.target_blobs_per_block(block_number=0, timestamp=0)
if self.fork.engine_payload_attribute_target_blobs_per_block(
block_number=0, timestamp=0
)
else None
),
max_blobs_per_block=(
self.fork.max_blobs_per_block(block_number=0, timestamp=0)
if self.fork.engine_payload_attribute_max_blobs_per_block(
block_number=0, timestamp=0
)
else None
),
)
forkchoice_updated_version = self.fork.engine_forkchoice_updated_version()
assert forkchoice_updated_version is not None, (
"Fork does not support engine forkchoice_updated"
)
response = self.engine_rpc.forkchoice_updated(
forkchoice_state,
payload_attributes,
version=forkchoice_updated_version,
)
assert response.payload_status.status == PayloadStatusEnum.VALID, "Payload was invalid"
assert response.payload_id is not None, "payload_id was not returned by the client"
time.sleep(self.get_payload_wait_time)
get_payload_version = self.fork.engine_get_payload_version()
assert get_payload_version is not None, "Fork does not support engine get_payload"
new_payload = self.engine_rpc.get_payload(
response.payload_id,
version=get_payload_version,
)
new_payload_args: List[Any] = [new_payload.execution_payload]
if new_payload.blobs_bundle is not None:
new_payload_args.append(new_payload.blobs_bundle.blob_versioned_hashes())
if parent_beacon_block_root is not None:
new_payload_args.append(parent_beacon_block_root)
if new_payload.execution_requests is not None:
new_payload_args.append(new_payload.execution_requests)
new_payload_version = self.fork.engine_new_payload_version()
assert new_payload_version is not None, "Fork does not support engine new_payload"
new_payload_response = self.engine_rpc.new_payload(
*new_payload_args, version=new_payload_version
)
assert new_payload_response.status == PayloadStatusEnum.VALID, "Payload was invalid"
new_forkchoice_state = ForkchoiceState(
head_block_hash=new_payload.execution_payload.block_hash,
)
response = self.engine_rpc.forkchoice_updated(
new_forkchoice_state,
None,
version=forkchoice_updated_version,
)
assert response.payload_status.status == PayloadStatusEnum.VALID, "Payload was invalid"
for tx in new_payload.execution_payload.transactions:
tx_hash = Hash(keccak256(tx))
if tx_hash in self.pending_tx_hashes:
self.pending_tx_hashes.remove(tx_hash)
def send_transaction(self, transaction: Transaction) -> Hash:
"""`eth_sendRawTransaction`: Send a transaction to the client."""
returned_hash = super().send_transaction(transaction)
with self.pending_tx_hashes:
self.pending_tx_hashes.append(transaction.hash)
if len(self.pending_tx_hashes) >= self.transactions_per_block:
self.generate_block()
return returned_hash
def wait_for_transaction(self, transaction: Transaction) -> TransactionByHashResponse:
"""
Wait for a specific transaction to be included in a block.
Waits for a specific transaction to be included in a block by polling
`eth_getTransactionByHash` until it is confirmed or a timeout occurs.
Args:
transaction: The transaction to track.
Returns:
The transaction details after it is included in a block.
"""
return self.wait_for_transactions([transaction])[0]
def wait_for_transactions(
self, transactions: List[Transaction]
) -> List[TransactionByHashResponse]:
"""
Wait for all transactions in the provided list to be included in a
block.
Waits for all transactions in the provided list to be included in a
block by polling `eth_getTransactionByHash` until they are confirmed or
a timeout occurs.
Args:
transactions: A list of transactions to track.
Returns:
A list of transaction details after they are included in a block.
Raises:
Exception: If one or more transactions are not included in a block
within the timeout period.
"""
tx_hashes = [tx.hash for tx in transactions]
responses: List[TransactionByHashResponse] = []
pending_responses: Dict[Hash, TransactionByHashResponse] = {}
start_time = time.time()
pending_transactions_handler = PendingTransactionHandler(self)
while True:
tx_id = 0
pending_responses = {}
while tx_id < len(tx_hashes):
tx_hash = tx_hashes[tx_id]
tx = self.get_transaction_by_hash(tx_hash)
assert tx is not None, f"Transaction {tx_hash} not found"
if tx.block_number is not None:
responses.append(tx)
tx_hashes.pop(tx_id)
else:
pending_responses[tx_hash] = tx
tx_id += 1
if not tx_hashes:
return responses
pending_transactions_handler.handle()
if (time.time() - start_time) > self.transaction_wait_timeout:
break
time.sleep(0.1)
missing_txs_strings = [
f"{tx.hash} ({tx.model_dump_json()})" for tx in transactions if tx.hash in tx_hashes
]
pending_tx_responses_string = "\n".join(
[f"{tx_hash}: {tx.model_dump_json()}" for tx_hash, tx in pending_responses.items()]
)
raise Exception(
f"Transactions {', '.join(missing_txs_strings)} were not included in a block "
f"within {self.transaction_wait_timeout} seconds:\n"
f"{pending_tx_responses_string}"
)
class PendingTransactionHandler:
"""
Manages block generation based on the number of pending transactions or a
block generation interval.
Attributes:
block_generation_interval: The number of iterations after which a block
is generated if no new transactions are added (default: 10).
"""
chain_builder_eth_rpc: ChainBuilderEthRPC
block_generation_interval: int
last_pending_tx_hashes_count: int | None = None
i: int = 0
def __init__(
self, chain_builder_eth_rpc: ChainBuilderEthRPC, block_generation_interval: int = 10
):
"""Initialize the pending transaction handler."""
self.chain_builder_eth_rpc = chain_builder_eth_rpc
self.block_generation_interval = block_generation_interval
def handle(self) -> None:
"""
Handle pending transactions and generate blocks if necessary.
If the number of pending transactions reaches the limit, a block is
generated.
If no new transactions have been added to the pending list and the
block generation interval has been reached, a block is generated to
avoid potential deadlock.
"""
with self.chain_builder_eth_rpc.pending_tx_hashes:
if (
len(self.chain_builder_eth_rpc.pending_tx_hashes)
>= self.chain_builder_eth_rpc.transactions_per_block
):
self.chain_builder_eth_rpc.generate_block()
else:
if (
self.last_pending_tx_hashes_count is not None
and len(self.chain_builder_eth_rpc.pending_tx_hashes)
== self.last_pending_tx_hashes_count
and self.i % self.block_generation_interval == 0
):
# If no new transactions have been added to the pending
# list, generate a block to avoid potential deadlock.
self.chain_builder_eth_rpc.generate_block()
self.last_pending_tx_hashes_count = len(self.chain_builder_eth_rpc.pending_tx_hashes)
self.i += 1
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/rpc/remote_seed_sender.py | src/pytest_plugins/execute/rpc/remote_seed_sender.py | """Seed sender on a remote execution client."""
import pytest
from ethereum_test_base_types import Hash, Number
from ethereum_test_rpc import EthRPC
from ethereum_test_types import EOA
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
remote_seed_sender_group = parser.getgroup(
"remote_seed_sender",
"Arguments for the remote seed sender",
)
remote_seed_sender_group.addoption(
"--rpc-seed-key",
action="store",
required=True,
dest="rpc_seed_key",
help=(
"Seed key used to fund all sender keys. This account must have a balance of at least "
"`sender_key_initial_balance` * `workers` + gas fees. It should also be "
"exclusively used by this command because the nonce is only checked once and if "
"it's externally increased, the seed transactions might fail."
),
)
@pytest.fixture(scope="session")
def seed_sender(request: pytest.FixtureRequest, eth_rpc: EthRPC) -> EOA:
"""Create seed sender account by checking its balance and nonce."""
rpc_seed_key = Hash(request.config.getoption("rpc_seed_key"))
# check the nonce through the rpc client
seed_sender = EOA(key=rpc_seed_key)
seed_sender.nonce = Number(eth_rpc.get_transaction_count(seed_sender))
return seed_sender
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/rpc/hive.py | src/pytest_plugins/execute/rpc/hive.py | """Pytest plugin to run the test-execute in hive-mode."""
import io
import json
from dataclasses import asdict, replace
from pathlib import Path
from random import randint
from typing import Generator, Mapping, Tuple, cast
import pytest
from filelock import FileLock
from hive.client import Client, ClientType
from hive.simulation import Simulation
from hive.testing import HiveTest, HiveTestResult, HiveTestSuite
from ethereum_test_base_types import EmptyOmmersRoot, EmptyTrieRoot, to_json
from ethereum_test_fixtures.blockchain import FixtureHeader
from ethereum_test_forks import Fork
from ethereum_test_rpc import EngineRPC, EthRPC
from ethereum_test_tools import (
EOA,
Account,
Alloc,
Environment,
Hash,
Withdrawal,
)
from ethereum_test_types import ChainConfig, Requests
from ...consume.simulators.helpers.ruleset import ruleset
from .chain_builder_eth_rpc import ChainBuilderEthRPC
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
hive_rpc_group = parser.getgroup(
"hive_rpc", "Arguments defining the hive RPC client properties for the test."
)
hive_rpc_group.addoption(
"--sender-key-initial-balance",
action="store",
dest="sender_key_initial_balance",
type=int,
default=10**26,
help=(
"Initial balance of each sender key. There is one sender key per worker process "
"(`-n` option)."
),
)
hive_rpc_group.addoption(
"--tx-wait-timeout",
action="store",
dest="tx_wait_timeout",
type=int,
default=10, # Lowered from Remote RPC because of the consistent block production
help="Maximum time in seconds to wait for a transaction to be included in a block",
)
@pytest.hookimpl(trylast=True)
def pytest_configure(config: pytest.Config) -> None: # noqa: D103
config.test_suite_scope = "session" # type: ignore
config.engine_rpc_supported = True # type: ignore
@pytest.fixture(scope="session")
def seed_sender(session_temp_folder: Path) -> EOA:
"""Determine the seed sender account for the client's genesis."""
base_name = "seed_sender"
base_file = session_temp_folder / base_name
base_lock_file = session_temp_folder / f"{base_name}.lock"
with FileLock(base_lock_file):
if base_file.exists():
with base_file.open("r") as f:
seed_sender_key = Hash(f.read())
seed_sender = EOA(key=seed_sender_key)
else:
seed_sender = EOA(key=randint(0, 2**256))
with base_file.open("w") as f:
f.write(str(seed_sender.key))
return seed_sender
@pytest.fixture(scope="session")
def base_pre(
request: pytest.FixtureRequest,
seed_sender: EOA,
worker_count: int,
) -> Alloc:
"""Pre-allocation for the client's genesis."""
sender_key_initial_balance = request.config.getoption("sender_key_initial_balance")
return Alloc(
{seed_sender: Account(balance=(worker_count * sender_key_initial_balance) + 10**18)}
)
@pytest.fixture(scope="session")
def base_pre_genesis(
session_fork: Fork,
base_pre: Alloc,
) -> Tuple[Alloc, FixtureHeader]:
"""Create a genesis block from the blockchain test definition."""
env = Environment().set_fork_requirements(session_fork)
assert env.withdrawals is None or len(env.withdrawals) == 0, (
"withdrawals must be empty at genesis"
)
assert env.parent_beacon_block_root is None or env.parent_beacon_block_root == Hash(0), (
"parent_beacon_block_root must be empty at genesis"
)
pre_alloc = Alloc.merge(
Alloc.model_validate(session_fork.pre_allocation_blockchain()),
base_pre,
)
if empty_accounts := pre_alloc.empty_accounts():
raise Exception(f"Empty accounts in pre state: {empty_accounts}")
state_root = pre_alloc.state_root()
block_number = 0
timestamp = 1
genesis = FixtureHeader(
parent_hash=0,
ommers_hash=EmptyOmmersRoot,
fee_recipient=0,
state_root=state_root,
transactions_trie=EmptyTrieRoot,
receipts_root=EmptyTrieRoot,
logs_bloom=0,
difficulty=0x20000 if env.difficulty is None else env.difficulty,
number=block_number,
gas_limit=env.gas_limit,
gas_used=0,
timestamp=timestamp,
extra_data=b"\x00",
prev_randao=0,
nonce=0,
base_fee_per_gas=env.base_fee_per_gas,
blob_gas_used=env.blob_gas_used,
excess_blob_gas=env.excess_blob_gas,
withdrawals_root=(
Withdrawal.list_root(env.withdrawals) if env.withdrawals is not None else None
),
parent_beacon_block_root=env.parent_beacon_block_root,
requests_hash=Requests()
if session_fork.header_requests_required(block_number=block_number, timestamp=timestamp)
else None,
)
return (pre_alloc, genesis)
@pytest.fixture(scope="session")
def client_genesis(base_pre_genesis: Tuple[Alloc, FixtureHeader]) -> dict:
"""
Convert the fixture's genesis block header and pre-state to a client
genesis state.
"""
genesis = to_json(base_pre_genesis[1]) # NOTE: to_json() excludes None values
alloc = to_json(base_pre_genesis[0])
# NOTE: nethermind requires account keys without '0x' prefix
genesis["alloc"] = {k.replace("0x", ""): v for k, v in alloc.items()}
return genesis
@pytest.fixture(scope="session")
def buffered_genesis(client_genesis: dict) -> io.BufferedReader:
"""
Create a buffered reader for the genesis block header of the current test
fixture.
"""
genesis_json = json.dumps(client_genesis)
genesis_bytes = genesis_json.encode("utf-8")
return io.BufferedReader(cast(io.RawIOBase, io.BytesIO(genesis_bytes)))
@pytest.fixture(scope="session")
def client_files(
buffered_genesis: io.BufferedReader,
) -> Mapping[str, io.BufferedReader]:
"""
Define the files that hive will start the client with.
For this type of test, only the genesis is passed
"""
files = {}
files["/genesis.json"] = buffered_genesis
return files
@pytest.fixture(scope="session")
def environment(session_fork: Fork, chain_config: ChainConfig) -> dict:
"""
Define the environment that hive will start the client with using the fork
rules specific for the simulator.
"""
assert session_fork in ruleset, f"fork '{session_fork}' missing in hive ruleset"
return {
"HIVE_CHAIN_ID": str(chain_config.chain_id),
"HIVE_FORK_DAO_VOTE": "1",
"HIVE_NODETYPE": "full",
**{k: f"{v:d}" for k, v in ruleset[session_fork].items()},
}
@pytest.fixture(scope="session")
def test_suite_name() -> str:
"""The name of the hive test suite used in this simulator."""
return "eest/execute, hive mode"
@pytest.fixture(scope="session")
def test_suite_description() -> str:
"""The description of the hive test suite used in this simulator."""
return "Execute EEST tests using hive endpoint."
@pytest.fixture(autouse=True, scope="session")
def base_hive_test(
request: pytest.FixtureRequest, test_suite: HiveTestSuite, session_temp_folder: Path
) -> Generator[HiveTest, None, None]:
"""
Test (base) used to deploy the main client to be used throughout all tests.
"""
base_name = "base_hive_test"
base_file = session_temp_folder / base_name
base_lock_file = session_temp_folder / f"{base_name}.lock"
with FileLock(base_lock_file):
if base_file.exists():
with open(base_file, "r") as f:
test = HiveTest(**json.load(f))
else:
test = test_suite.start_test(
name="Base Hive Test",
description=(
"Base test used to deploy the main client to be used throughout all tests."
),
)
with open(base_file, "w") as f:
json.dump(asdict(test), f)
users_file_name = f"{base_name}_users"
users_file = session_temp_folder / users_file_name
users_lock_file = session_temp_folder / f"{users_file_name}.lock"
with FileLock(users_lock_file):
if users_file.exists():
with open(users_file, "r") as f:
users = json.load(f)
else:
users = 0
users += 1
with open(users_file, "w") as f:
json.dump(users, f)
yield test
test_pass = True
test_details = "All tests have completed"
if request.session.testsfailed > 0:
test_pass = False
test_details = "One or more tests have failed"
with FileLock(users_lock_file):
with open(users_file, "r") as f:
users = json.load(f)
users -= 1
with open(users_file, "w") as f:
json.dump(users, f)
if users == 0:
test.end(result=HiveTestResult(test_pass=test_pass, details=test_details))
base_file.unlink()
users_file.unlink()
@pytest.fixture(scope="session")
def client_type(simulator: Simulation) -> ClientType:
"""Type of client to be used in the test."""
return simulator.client_types()[0]
@pytest.fixture(autouse=True, scope="session")
def client(
base_hive_test: HiveTest,
client_files: dict,
environment: dict,
client_type: ClientType,
session_temp_folder: Path,
) -> Generator[Client, None, None]:
"""
Initialize the client with the appropriate files and environment variables.
"""
base_name = "hive_client"
base_file = session_temp_folder / base_name
base_error_file = session_temp_folder / f"{base_name}.err"
base_lock_file = session_temp_folder / f"{base_name}.lock"
client: Client | None = None
with FileLock(base_lock_file):
if not base_error_file.exists():
if base_file.exists():
with open(base_file, "r") as f:
client = Client(**json.load(f))
else:
base_error_file.touch() # Assume error
client = base_hive_test.start_client(
client_type=client_type, environment=environment, files=client_files
)
if client is not None:
base_error_file.unlink() # Success
with open(base_file, "w") as f:
json.dump(
asdict(replace(client, config=None)), # type: ignore
f,
)
error_message = (
f"Unable to connect to the client container ({client_type.name}) via Hive during test "
"setup. Check the client or Hive server logs for more information."
)
assert client is not None, error_message
users_file_name = f"{base_name}_users"
users_file = session_temp_folder / users_file_name
users_lock_file = session_temp_folder / f"{users_file_name}.lock"
with FileLock(users_lock_file):
if users_file.exists():
with open(users_file, "r") as f:
users = json.load(f)
else:
users = 0
users += 1
with open(users_file, "w") as f:
json.dump(users, f)
yield client
with FileLock(users_lock_file):
with open(users_file, "r") as f:
users = json.load(f)
users -= 1
with open(users_file, "w") as f:
json.dump(users, f)
if users == 0:
client.stop()
base_file.unlink()
users_file.unlink()
@pytest.fixture(scope="session")
def engine_rpc(client: Client) -> EngineRPC | None:
"""Return the engine RPC client."""
return EngineRPC(f"http://{client.ip}:8551")
@pytest.fixture(autouse=True, scope="session")
def eth_rpc(
request: pytest.FixtureRequest,
client: Client,
engine_rpc: EngineRPC,
session_fork: Fork,
transactions_per_block: int,
session_temp_folder: Path,
) -> EthRPC:
"""Initialize ethereum RPC client for the execution client under test."""
get_payload_wait_time = request.config.getoption("get_payload_wait_time")
tx_wait_timeout = request.config.getoption("tx_wait_timeout")
return ChainBuilderEthRPC(
rpc_endpoint=f"http://{client.ip}:8545",
fork=session_fork,
engine_rpc=engine_rpc,
transactions_per_block=transactions_per_block,
session_temp_folder=session_temp_folder,
get_payload_wait_time=get_payload_wait_time,
transaction_wait_timeout=tx_wait_timeout,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/rpc/__init__.py | src/pytest_plugins/execute/rpc/__init__.py | """RPC plugins to execute tests in different environments."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/execute/rpc/remote.py | src/pytest_plugins/execute/rpc/remote.py | """Pytest plugin to run the execute in remote-rpc-mode."""
from pathlib import Path
import pytest
from ethereum_test_forks import Fork
from ethereum_test_rpc import EngineRPC, EthRPC
from ethereum_test_types.chain_config_types import ChainConfigDefaults
from ..pre_alloc import AddressStubs
from .chain_builder_eth_rpc import ChainBuilderEthRPC
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
remote_rpc_group = parser.getgroup("remote_rpc", "Arguments defining remote RPC configuration")
remote_rpc_group.addoption(
"--rpc-endpoint",
required=True,
action="store",
dest="rpc_endpoint",
help="RPC endpoint to an execution client",
)
remote_rpc_group.addoption(
"--rpc-chain-id",
action="store",
dest="rpc_chain_id",
required=False,
type=int,
default=None,
help="DEPRECATED: ID of the chain where the tests will be executed. "
"This flag is deprecated and will be removed in a future release."
"Use --chain-id instead.",
)
remote_rpc_group.addoption(
"--tx-wait-timeout",
action="store",
dest="tx_wait_timeout",
type=int,
default=60,
help="Maximum time in seconds to wait for a transaction to be included in a block",
)
remote_rpc_group.addoption(
"--address-stubs",
action="store",
dest="address_stubs",
default=AddressStubs(root={}),
type=AddressStubs.model_validate_json_or_file,
help="The address stubs for contracts that have already been placed in the chain and to "
"use for the test. Can be a JSON formatted string or a path to a YAML or JSON file.",
)
engine_rpc_group = parser.getgroup("engine_rpc", "Arguments defining engine RPC configuration")
engine_rpc_group.addoption(
"--engine-endpoint",
required=False,
action="store",
default=None,
dest="engine_endpoint",
help="Engine endpoint to an execution client, which implies that the execute command "
"will be used to drive the chain. If not provided, it's assumed that the execution client"
"is connected to a beacon node and the chain progresses automatically. If provided, the"
"JWT secret must be provided as well.",
)
engine_rpc_group.addoption(
"--engine-jwt-secret",
required=False,
action="store",
default=None,
dest="engine_jwt_secret",
help="JWT secret to be used to authenticate with the engine endpoint. Provided string "
"will be converted to bytes using the UTF-8 encoding.",
)
engine_rpc_group.addoption(
"--engine-jwt-secret-file",
required=False,
action="store",
default=None,
dest="engine_jwt_secret_file",
help="Path to a file containing the JWT secret to be used to authenticate with the engine"
"endpoint. The file must contain only the JWT secret as a hex string.",
)
def pytest_configure(config: pytest.Config) -> None:
"""Check if a chain ID configuration is provided."""
if config.getoption("rpc_chain_id") is None and config.getoption("chain_id") is None:
pytest.exit("No chain ID configuration found. Please use --chain-id.")
# Verify the chain ID configuration is consistent with the remote RPC
# endpoint
rpc_endpoint = config.getoption("rpc_endpoint")
eth_rpc = EthRPC(rpc_endpoint)
remote_chain_id = eth_rpc.chain_id()
if remote_chain_id != ChainConfigDefaults.chain_id:
pytest.exit(
f"Chain ID obtained from the remote RPC endpoint ({remote_chain_id}) does not match "
f"the configured chain ID ({ChainConfigDefaults.chain_id})."
"Please check if the chain ID is correctly configured with the --chain-id flag."
)
engine_endpoint = config.getoption("engine_endpoint")
engine_rpc = None
if engine_endpoint is not None:
jwt_secret = config.getoption("engine_jwt_secret")
jwt_secret_file = config.getoption("engine_jwt_secret_file")
if jwt_secret is None and jwt_secret_file is None:
pytest.exit(
"JWT secret must be provided if engine endpoint is provided. "
"Please check if the JWT secret is correctly configured with the "
"--engine-jwt-secret or --engine-jwt-secret-file flag."
)
elif jwt_secret_file is not None:
with open(jwt_secret_file, "r") as f:
jwt_secret = f.read().strip()
if jwt_secret.startswith("0x"):
jwt_secret = jwt_secret[2:]
try:
jwt_secret = bytes.fromhex(jwt_secret)
except ValueError:
pytest.exit(
"JWT secret must be a hex string if provided as a file. "
"Please check if the JWT secret is correctly configured with the "
"--engine-jwt-secret-file flag."
)
if isinstance(jwt_secret, str):
jwt_secret = jwt_secret.encode("utf-8")
assert isinstance(jwt_secret, bytes), (
f"JWT secret must be a bytes object, got {type(jwt_secret)}"
)
engine_rpc = EngineRPC(engine_endpoint, jwt_secret=jwt_secret)
# TODO: Perform a request to the engine endpoint to verify that the JWT
# secret is valid. Potentially could be `engine_getClientVersionV1` but
# need to implement this in rpc.py.
config.engine_rpc = engine_rpc # type: ignore
@pytest.fixture(scope="session")
def engine_rpc(request: pytest.FixtureRequest) -> EngineRPC | None:
"""Execute remote command does not have access to the engine RPC."""
return request.config.engine_rpc # type: ignore
@pytest.fixture(autouse=True, scope="session")
def rpc_endpoint(request: pytest.FixtureRequest) -> str:
"""
Return remote RPC endpoint to be used to make requests to the execution
client.
"""
return request.config.getoption("rpc_endpoint")
@pytest.fixture(autouse=True, scope="session")
def eth_rpc(
request: pytest.FixtureRequest,
rpc_endpoint: str,
engine_rpc: EngineRPC | None,
session_fork: Fork,
transactions_per_block: int,
session_temp_folder: Path,
) -> EthRPC:
"""Initialize ethereum RPC client for the execution client under test."""
tx_wait_timeout = request.config.getoption("tx_wait_timeout")
if engine_rpc is None:
return EthRPC(rpc_endpoint, transaction_wait_timeout=tx_wait_timeout)
get_payload_wait_time = request.config.getoption("get_payload_wait_time")
return ChainBuilderEthRPC(
rpc_endpoint=rpc_endpoint,
fork=session_fork,
engine_rpc=engine_rpc,
transactions_per_block=transactions_per_block,
session_temp_folder=session_temp_folder,
get_payload_wait_time=get_payload_wait_time,
transaction_wait_timeout=tx_wait_timeout,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_execution/blob_transaction.py | src/ethereum_test_execution/blob_transaction.py | """Test execution format to get blobs from the execution client."""
from hashlib import sha256
from typing import ClassVar, Dict, List
from pytest import FixtureRequest
from ethereum_test_base_types import Address, Hash
from ethereum_test_base_types.base_types import Bytes
from ethereum_test_forks import Fork
from ethereum_test_rpc import BlobAndProofV1, BlobAndProofV2, EngineRPC, EthRPC
from ethereum_test_rpc.rpc_types import GetBlobsResponse
from ethereum_test_types import NetworkWrappedTransaction, Transaction
from ethereum_test_types.transaction_types import TransactionTestMetadata
from pytest_plugins.custom_logging import get_logger
from .base import BaseExecute
logger = get_logger(__name__)
def versioned_hashes_with_blobs_and_proofs(
tx: NetworkWrappedTransaction,
) -> Dict[Hash, BlobAndProofV1 | BlobAndProofV2]:
"""
Return a dictionary of versioned hashes with their corresponding blobs and
proofs.
"""
versioned_hashes: Dict[Hash, BlobAndProofV1 | BlobAndProofV2] = {}
for blob in tx.blob_objects:
if isinstance(blob.proof, Bytes):
versioned_hashes[blob.versioned_hash] = BlobAndProofV1(
blob=blob.data, proof=blob.proof
)
elif isinstance(blob.proof, list):
versioned_hashes[blob.versioned_hash] = BlobAndProofV2(
blob=blob.data, proofs=blob.proof
)
else:
raise ValueError(
f"Blob with versioned hash {blob.versioned_hash.hex()} requires a proof "
"that is not None"
)
return versioned_hashes
class BlobTransaction(BaseExecute):
"""
Represents a test execution format to send blob transactions to the client
and then use `engine_getBlobsV*` end points to validate the proofs
generated by the execution client.
"""
format_name: ClassVar[str] = "blob_transaction_test"
description: ClassVar[str] = (
"Send blob transactions to the execution client and validate their availability via "
"`engine_getBlobsV*`"
)
requires_engine_rpc: ClassVar[bool] = True
txs: List[NetworkWrappedTransaction | Transaction]
nonexisting_blob_hashes: List[Hash] | None = None
def execute(
self,
fork: Fork,
eth_rpc: EthRPC,
engine_rpc: EngineRPC | None,
request: FixtureRequest,
) -> None:
"""Execute the format."""
assert engine_rpc is not None, "Engine RPC is required for this format."
versioned_hashes: Dict[Hash, BlobAndProofV1 | BlobAndProofV2] = {}
sent_txs: List[Transaction] = []
for tx_index, tx in enumerate(self.txs):
if isinstance(tx, NetworkWrappedTransaction):
tx.tx = tx.tx.with_signature_and_sender()
sent_txs.append(tx.tx)
expected_hash = tx.tx.hash
versioned_hashes.update(versioned_hashes_with_blobs_and_proofs(tx))
to_address = tx.tx.to
else:
tx = tx.with_signature_and_sender()
sent_txs.append(tx)
expected_hash = tx.hash
to_address = tx.to
label = to_address.label if isinstance(to_address, Address) else None
metadata = TransactionTestMetadata(
test_id=request.node.nodeid,
phase="testing",
target=label,
tx_index=tx_index,
)
received_hash = eth_rpc.send_raw_transaction(tx.rlp(), request_id=metadata.to_json())
assert expected_hash == received_hash, (
f"Expected hash {expected_hash} does not match received hash {received_hash}."
)
version = fork.engine_get_blobs_version()
assert version is not None, "Engine get blobs version is not supported by the fork."
# ensure that clients respond 'null' when they have no access to at
# least one blob
list_versioned_hashes = list(versioned_hashes.keys())
if self.nonexisting_blob_hashes is not None:
list_versioned_hashes.extend(self.nonexisting_blob_hashes)
blob_response: GetBlobsResponse | None = engine_rpc.get_blobs(
list_versioned_hashes, version=version
)
# if non-existing blob hashes were request then the response must be
# 'null'
if self.nonexisting_blob_hashes is not None:
if blob_response is not None:
raise ValueError(
f"Non-existing blob hashes were requested and "
"the client was expected to respond with 'null', but instead it replied: "
f"{blob_response.root}"
)
else:
logger.info(
"Test was passed (partial responses are not allowed and the client "
"correctly returned 'null')"
)
eth_rpc.wait_for_transactions(sent_txs)
return
assert blob_response is not None
local_blobs_and_proofs = list(versioned_hashes.values())
assert len(blob_response) == len(local_blobs_and_proofs), (
f"Expected {len(local_blobs_and_proofs)} blobs and proofs, got {len(blob_response)}."
)
for expected_blob, received_blob in zip(
local_blobs_and_proofs, blob_response.root, strict=True
):
if received_blob is None:
raise ValueError("Received blob is empty.")
if isinstance(expected_blob, BlobAndProofV1):
if not isinstance(received_blob, BlobAndProofV1):
raise ValueError("Received blob is not a BlobAndProofV1.")
if expected_blob.blob != received_blob.blob:
raise ValueError("Blob mismatch.")
if expected_blob.proof != received_blob.proof:
raise ValueError("Proof mismatch.")
elif isinstance(expected_blob, BlobAndProofV2):
if not isinstance(received_blob, BlobAndProofV2):
raise ValueError("Received blob is not a BlobAndProofV2.")
if expected_blob.blob != received_blob.blob:
raise ValueError("Blob mismatch.")
if expected_blob.proofs != received_blob.proofs:
error_message = "Proofs mismatch."
error_message += f"len(expected_blob.proofs) = {len(expected_blob.proofs)}, "
error_message += f"len(received_blob.proofs) = {len(received_blob.proofs)}\n"
if len(expected_blob.proofs) == len(received_blob.proofs):
index = 0
for expected_proof, received_proof in zip(
expected_blob.proofs, received_blob.proofs, strict=False
):
if len(expected_proof) != len(received_proof):
error_message += f"Proof length mismatch. index = {index},"
error_message += f"expected_proof length = {len(expected_proof)}, "
error_message += f"received_proof length = {len(received_proof)}\n"
index += 1
continue
if expected_proof != received_proof:
error_message += f"Proof mismatch. index = {index},"
error_message += (
f"expected_proof hash = {sha256(expected_proof).hexdigest()}, "
)
error_message += (
f"received_proof hash = {sha256(received_proof).hexdigest()}\n"
)
index += 1
raise ValueError(error_message)
else:
raise ValueError(f"Unexpected blob type: {type(expected_blob)}")
eth_rpc.wait_for_transactions(sent_txs)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_execution/__init__.py | src/ethereum_test_execution/__init__.py | """Ethereum test execution package."""
from .base import BaseExecute, ExecuteFormat, LabeledExecuteFormat
from .blob_transaction import BlobTransaction
from .transaction_post import TransactionPost
__all__ = [
"BaseExecute",
"ExecuteFormat",
"BlobTransaction",
"LabeledExecuteFormat",
"TransactionPost",
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_execution/base.py | src/ethereum_test_execution/base.py | """Ethereum test execution base types."""
from abc import abstractmethod
from typing import Annotated, Any, ClassVar, Dict, Type
from pydantic import PlainSerializer, PlainValidator
from pytest import FixtureRequest
from ethereum_test_base_types import CamelModel
from ethereum_test_forks import Fork
from ethereum_test_rpc import EngineRPC, EthRPC
class BaseExecute(CamelModel):
"""Represents a base execution format."""
# Base Execute class properties
formats: ClassVar[Dict[str, Type["BaseExecute"]]] = {}
# Execute format properties
format_name: ClassVar[str] = ""
description: ClassVar[str] = "Unknown execute format; it has not been set."
requires_engine_rpc: ClassVar[bool] = False
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
"""
Register all subclasses of BaseExecute with a execute format
name set as possible execute formats.
"""
if cls.format_name:
# Register the new execute format
BaseExecute.formats[cls.format_name] = cls
@abstractmethod
def execute(
self,
fork: Fork,
eth_rpc: EthRPC,
engine_rpc: EngineRPC | None,
request: FixtureRequest,
) -> None:
"""Execute the format."""
pass
class LabeledExecuteFormat:
"""
Represents an execution format with a custom label.
This label will be used in the test id and also will be added as a marker
to the generated test case when executing the test.
"""
format: Type[BaseExecute]
label: str
description: str
registered_labels: ClassVar[Dict[str, "LabeledExecuteFormat"]] = {}
def __init__(
self,
execute_format: "Type[BaseExecute] | LabeledExecuteFormat",
label: str,
description: str,
):
"""Initialize the execute format with a custom label."""
self.format = (
execute_format.format
if isinstance(execute_format, LabeledExecuteFormat)
else execute_format
)
self.label = label
self.description = description
if label not in LabeledExecuteFormat.registered_labels:
LabeledExecuteFormat.registered_labels[label] = self
@property
def format_name(self) -> str:
"""Get the execute format name."""
return self.format.format_name
@property
def requires_engine_rpc(self) -> bool:
"""Get the requires engine RPC flag."""
return self.format.requires_engine_rpc
def __eq__(self, other: Any) -> bool:
"""
Check if two labeled execute formats are equal.
If the other object is a ExecuteFormat type, the format of the labeled
execute format will be compared with the format of the other object.
"""
if isinstance(other, LabeledExecuteFormat):
return self.format == other.format
if isinstance(other, type) and issubclass(other, BaseExecute):
return self.format == other
return False
# Type alias for a base execute class
ExecuteFormat = Annotated[
Type[BaseExecute],
PlainSerializer(lambda f: f.format_name),
PlainValidator(lambda f: BaseExecute.formats[f] if f in BaseExecute.formats else f),
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_execution/transaction_post.py | src/ethereum_test_execution/transaction_post.py | """Simple transaction-send then post-check execution format."""
from typing import ClassVar, List
import pytest
from pytest import FixtureRequest
from ethereum_test_base_types import Address, Alloc, Hash
from ethereum_test_forks import Fork
from ethereum_test_rpc import EngineRPC, EthRPC, SendTransactionExceptionError
from ethereum_test_types import Transaction, TransactionTestMetadata
from .base import BaseExecute
class TransactionPost(BaseExecute):
"""
Represents a simple transaction-send then post-check execution format.
"""
blocks: List[List[Transaction]]
post: Alloc
# Gas validation fields for benchmark tests
expected_benchmark_gas_used: int | None = None # Expected total gas to be consumed
skip_gas_used_validation: bool = False # Skip gas validation even if expected is set
format_name: ClassVar[str] = "transaction_post_test"
description: ClassVar[str] = (
"Simple transaction sending, then post-check after all transactions are included"
)
def execute(
self,
fork: Fork,
eth_rpc: EthRPC,
engine_rpc: EngineRPC | None,
request: FixtureRequest,
) -> None:
"""Execute the format."""
del fork
del engine_rpc
assert not any(tx.ty == 3 for block in self.blocks for tx in block), (
"Transaction type 3 is not supported in execute mode."
)
# Track transaction hashes for gas validation (benchmarking)
all_tx_hashes = []
for block in self.blocks:
signed_txs = []
for tx_index, tx in enumerate(block):
# Add metadata
tx = tx.with_signature_and_sender()
to_address = tx.to
label = to_address.label if isinstance(to_address, Address) else None
phase = (
"testing"
if (tx.test_phase == "execution" or tx.test_phase is None)
else "setup"
)
tx.metadata = TransactionTestMetadata(
test_id=request.node.nodeid,
phase=phase,
target=label,
tx_index=tx_index,
)
signed_txs.append(tx)
if any(tx.error is not None for tx in signed_txs):
for transaction in signed_txs:
if transaction.error is None:
eth_rpc.send_wait_transaction(transaction)
all_tx_hashes.append(transaction.hash)
else:
with pytest.raises(SendTransactionExceptionError):
eth_rpc.send_transaction(transaction)
else:
eth_rpc.send_wait_transactions(signed_txs)
all_tx_hashes.extend([tx.hash for tx in signed_txs])
# Perform gas validation if required for benchmarking
# Ensures benchmark tests consume exactly the expected gas
if not self.skip_gas_used_validation and self.expected_benchmark_gas_used is not None:
total_gas_used = 0
# Fetch transaction receipts to get actual gas used
for tx_hash in all_tx_hashes:
receipt = eth_rpc.get_transaction_receipt(tx_hash)
assert receipt is not None, f"Failed to get receipt for transaction {tx_hash}"
gas_used = int(receipt["gasUsed"], 16)
total_gas_used += gas_used
# Verify that the total gas consumed matches expectations
assert total_gas_used == self.expected_benchmark_gas_used, (
f"Total gas used ({total_gas_used}) does not match "
f"expected benchmark gas ({self.expected_benchmark_gas_used}), "
f"difference: {total_gas_used - self.expected_benchmark_gas_used}"
)
for address, account in self.post.root.items():
balance = eth_rpc.get_balance(address)
code = eth_rpc.get_code(address)
nonce = eth_rpc.get_transaction_count(address)
if account is None:
assert balance == 0, f"Balance of {address} is {balance}, expected 0."
assert code == b"", f"Code of {address} is {code}, expected 0x."
assert nonce == 0, f"Nonce of {address} is {nonce}, expected 0."
else:
if "balance" in account.model_fields_set:
assert balance == account.balance, (
f"Balance of {address} is {balance}, expected {account.balance}."
)
if "code" in account.model_fields_set:
assert code == account.code, (
f"Code of {address} is {code}, expected {account.code}."
)
if "nonce" in account.model_fields_set:
assert nonce == account.nonce, (
f"Nonce of {address} is {nonce}, expected {account.nonce}."
)
if "storage" in account.model_fields_set:
for key, value in account.storage.items():
storage_value = eth_rpc.get_storage_at(address, Hash(key))
assert storage_value == value, (
f"Storage value at {key} of {address} is {storage_value},"
f"expected {value}."
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/__init__.py | src/ethereum_test_tools/__init__.py | """
Module containing tools for generating cross-client Ethereum execution layer
tests.
"""
from ethereum_test_base_types import (
AccessList,
Account,
Address,
Bytes,
Hash,
Storage,
TestAddress,
TestAddress2,
TestPrivateKey,
TestPrivateKey2,
)
from ethereum_test_base_types.reference_spec import ReferenceSpec, ReferenceSpecTypes
from ethereum_test_benchmark import (
BenchmarkCodeGenerator,
ExtCallGenerator,
JumpLoopGenerator,
)
from ethereum_test_exceptions import (
BlockException,
EngineAPIError,
EOFException,
TransactionException,
)
from ethereum_test_fixtures import BaseFixture, FixtureCollector
from ethereum_test_specs import (
BaseTest,
BenchmarkTest,
BenchmarkTestFiller,
BlobsTest,
BlobsTestFiller,
BlockchainTest,
BlockchainTestFiller,
EOFStateTest,
EOFStateTestFiller,
EOFTest,
EOFTestFiller,
StateTest,
StateTestFiller,
TransactionTest,
TransactionTestFiller,
)
from ethereum_test_specs.blockchain import Block, Header
from ethereum_test_types import (
EOA,
Alloc,
AuthorizationTuple,
BalAccountChange,
BalBalanceChange,
BalCodeChange,
BalNonceChange,
BalStorageChange,
BalStorageSlot,
Blob,
BlockAccessList,
ChainConfig,
ConsolidationRequest,
DepositRequest,
Environment,
NetworkWrappedTransaction,
Removable,
Requests,
TestParameterGroup,
Transaction,
TransactionReceipt,
Withdrawal,
WithdrawalRequest,
add_kzg_version,
ceiling_division,
compute_create2_address,
compute_create_address,
compute_eofcreate_address,
keccak256,
)
from ethereum_test_vm import (
Bytecode,
EVMCodeType,
Macro,
Macros,
MemoryVariable,
Opcode,
OpcodeCallArg,
Opcodes,
UndefinedOpcodes,
call_return_code,
)
from .tools_code import (
CalldataCase,
Case,
CodeGasMeasure,
Conditional,
Initcode,
Switch,
While,
)
from .utility.generators import (
DeploymentTestType,
generate_system_contract_deploy_test,
generate_system_contract_error_test,
)
from .utility.pytest import extend_with_defaults
__all__ = (
"AccessList",
"Account",
"Address",
"Alloc",
"AuthorizationTuple",
"BalAccountChange",
"BalBalanceChange",
"BalCodeChange",
"BalNonceChange",
"BalStorageChange",
"BalStorageSlot",
"BaseFixture",
"BaseTest",
"BenchmarkCodeGenerator",
"BenchmarkTest",
"BenchmarkTestFiller",
"Blob",
"BlockAccessList",
"BlobsTest",
"BlobsTestFiller",
"Block",
"BlockchainTest",
"BlockchainTestFiller",
"BlockException",
"Bytecode",
"Bytes",
"CalldataCase",
"Case",
"ChainConfig",
"CodeGasMeasure",
"Conditional",
"ConsolidationRequest",
"ExtCallGenerator",
"DeploymentTestType",
"DepositRequest",
"EngineAPIError",
"Environment",
"EOA",
"EOFException",
"EOFStateTest",
"EOFStateTestFiller",
"EOFTest",
"EOFTestFiller",
"EVMCodeType",
"FixtureCollector",
"Hash",
"Header",
"Initcode",
"JumpLoopGenerator",
"Macro",
"Macros",
"MemoryVariable",
"NetworkWrappedTransaction",
"Opcode",
"OpcodeCallArg",
"Opcodes",
"ReferenceSpec",
"ReferenceSpecTypes",
"Removable",
"Requests",
"StateTest",
"StateTestFiller",
"Storage",
"Switch",
"TestAddress",
"TestAddress2",
"TestParameterGroup",
"TestPrivateKey",
"TestPrivateKey2",
"Transaction",
"TransactionException",
"TransactionReceipt",
"TransactionTest",
"TransactionTestFiller",
"UndefinedOpcodes",
"While",
"Withdrawal",
"WithdrawalRequest",
"add_kzg_version",
"call_return_code",
"ceiling_division",
"compute_create_address",
"compute_create2_address",
"compute_eofcreate_address",
"extend_with_defaults",
"generate_system_contract_deploy_test",
"generate_system_contract_error_test",
"keccak256",
"vm",
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/tools_code/yul.py | src/ethereum_test_tools/tools_code/yul.py | """Yul frontend."""
import re
import warnings
from functools import cached_property
from pathlib import Path
from shutil import which
from subprocess import CompletedProcess, run
from typing import Optional, Type
from semver import Version
from typing_extensions import Self
from ethereum_test_forks import Fork
from ethereum_test_vm import Bytecode
DEFAULT_SOLC_ARGS = ("--assemble", "-")
VERSION_PATTERN = re.compile(r"Version: (.*)")
class Solc:
"""Solc compiler."""
binary: Path
def __init__(
self,
binary: Optional[Path | str] = None,
):
"""Initialize the solc compiler."""
if not binary:
which_path = which("solc")
if which_path is not None:
binary = Path(which_path)
if not binary or not Path(binary).exists():
raise Exception(
"""`solc` binary executable not found, please refer to
https://docs.soliditylang.org/en/latest/installing-solidity.html
for help downloading and installing `solc`"""
)
self.binary = Path(binary)
def run(self, *args: str, input_value: str | None = None) -> CompletedProcess:
"""Run solc with the given arguments."""
return run(
[self.binary, *args],
capture_output=True,
text=True,
input=input_value,
)
@cached_property
def version(self) -> Version:
"""Return solc's version."""
for line in self.run("--version").stdout.splitlines():
if match := VERSION_PATTERN.search(line):
# Sanitize
solc_version_string = match.group(1).replace("g++", "gpp")
return Version.parse(solc_version_string)
warnings.warn("Unable to determine solc version.", stacklevel=2)
return Version(0)
class Yul(Bytecode):
"""
Yul compiler.
Compiles Yul source code into bytecode.
"""
source: str
evm_version: str | None
def __new__(
cls,
source: str,
fork: Optional[Fork] = None,
binary: Optional[Path | str] = None,
) -> Self:
"""Compile Yul source code into bytecode."""
solc = Solc(binary)
evm_version = fork.solc_name() if fork else None
solc_args = ("--evm-version", evm_version) if evm_version else ()
result = solc.run(*solc_args, *DEFAULT_SOLC_ARGS, input_value=source)
if result.returncode:
stderr_lines = result.stderr.splitlines()
stderr_message = "\n".join(line.strip() for line in stderr_lines)
raise Exception(f"failed to compile yul source:\n{stderr_message[7:]}")
lines = result.stdout.splitlines()
hex_str = lines[lines.index("Binary representation:") + 1]
bytecode = bytes.fromhex(hex_str)
instance = super().__new__(
cls,
bytecode,
popped_stack_items=0,
pushed_stack_items=0,
)
instance.source = source
instance.evm_version = evm_version
return instance
YulCompiler = Type[Yul]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/tools_code/generators.py | src/ethereum_test_tools/tools_code/generators.py | """Code generating classes and functions."""
from dataclasses import dataclass
from typing import Any, List, SupportsBytes
from typing_extensions import Self
from ethereum_test_base_types import Bytes
from ethereum_test_types import ceiling_division
from ethereum_test_vm import Bytecode, EVMCodeType
from ethereum_test_vm import Opcodes as Op
GAS_PER_DEPLOYED_CODE_BYTE = 0xC8
class Initcode(Bytecode):
"""
Helper class used to generate initcode for the specified deployment code.
The execution gas cost of the initcode is calculated, and also the
deployment gas costs for the deployed code.
The initcode can be padded to a certain length if necessary, which does not
affect the deployed code.
Other costs such as the CREATE2 hashing costs or the initcode_word_cost of
EIP-3860 are *not* taken into account by any of these calculated costs.
"""
deploy_code: SupportsBytes | Bytes
"""
Bytecode to be deployed by the initcode.
"""
execution_gas: int
"""
Gas cost of executing the initcode, without considering deployment gas
costs.
"""
deployment_gas: int
"""
Gas cost of deploying the cost, subtracted after initcode execution,
"""
def __new__(
cls,
*,
deploy_code: SupportsBytes | Bytes | None = None,
initcode_length: int | None = None,
initcode_prefix: Bytecode | None = None,
initcode_prefix_execution_gas: int = 0,
padding_byte: int = 0x00,
name: str = "",
) -> Self:
"""
Generate legacy initcode that inits a contract with the specified code.
The initcode can be padded to a specified length for testing purposes.
"""
if deploy_code is None:
deploy_code = Bytecode()
if initcode_prefix is None:
initcode_prefix = Bytecode()
initcode = initcode_prefix
code_length = len(bytes(deploy_code))
execution_gas = initcode_prefix_execution_gas
# PUSH2: length=<bytecode length>
initcode += Op.PUSH2(code_length)
execution_gas = 3
# PUSH1: offset=0
initcode += Op.PUSH1(0)
execution_gas += 3
# DUP2
initcode += Op.DUP2
execution_gas += 3
# PUSH1: initcode_length=11 + len(initcode_prefix_bytes) (constant)
no_prefix_length = 0x0B
assert no_prefix_length + len(initcode_prefix) <= 0xFF, "initcode prefix too long"
initcode += Op.PUSH1(no_prefix_length + len(initcode_prefix))
execution_gas += 3
# DUP3
initcode += Op.DUP3
execution_gas += 3
# CODECOPY: destinationOffset=0, offset=0, length
initcode += Op.CODECOPY
execution_gas += (
3
+ (3 * ceiling_division(code_length, 32))
+ (3 * code_length)
+ ((code_length * code_length) // 512)
)
# RETURN: offset=0, length
initcode += Op.RETURN
execution_gas += 0
initcode_plus_deploy_code = bytes(initcode) + bytes(deploy_code)
padding_bytes = bytes()
if initcode_length is not None:
assert initcode_length >= len(initcode_plus_deploy_code), (
"specified invalid length for initcode"
)
padding_bytes = bytes(
[padding_byte] * (initcode_length - len(initcode_plus_deploy_code))
)
initcode_bytes = initcode_plus_deploy_code + padding_bytes
instance = super().__new__(
cls,
initcode_bytes,
popped_stack_items=initcode.popped_stack_items,
pushed_stack_items=initcode.pushed_stack_items,
max_stack_height=initcode.max_stack_height,
min_stack_height=initcode.min_stack_height,
)
instance._name_ = name
instance.deploy_code = deploy_code
instance.execution_gas = execution_gas
instance.deployment_gas = GAS_PER_DEPLOYED_CODE_BYTE * len(bytes(instance.deploy_code))
return instance
class CodeGasMeasure(Bytecode):
"""
Helper class used to generate bytecode that measures gas usage of a
bytecode, taking into account and subtracting any extra overhead gas costs
required to execute. By default, the result gas calculation is saved to
storage key 0.
"""
code: Bytecode
"""
Bytecode to be executed to measure the gas usage.
"""
overhead_cost: int
"""
Extra gas cost to be subtracted from extra operations.
"""
extra_stack_items: int
"""
Extra stack items that remain at the end of the execution.
To be considered when subtracting the value of the previous GAS operation,
and to be popped at the end of the execution.
"""
sstore_key: int | Bytes
"""
Storage key to save the gas used.
"""
def __new__(
cls,
*,
code: Bytecode,
overhead_cost: int = 0,
extra_stack_items: int = 0,
sstore_key: int | Bytes = 0,
stop: bool = True,
) -> Self:
"""Assemble the bytecode that measures gas usage."""
res = Op.GAS + code + Op.GAS
# We need to swap and pop for each extra stack item that remained from
# the execution of the code
res += (Op.SWAP1 + Op.POP) * extra_stack_items
res += (
Op.SWAP1
+ Op.SUB
+ Op.PUSH1(overhead_cost + 2)
+ Op.SWAP1
+ Op.SSTORE(sstore_key, Op.SUB)
)
if stop:
res += Op.STOP
instance = super().__new__(cls, res)
instance.code = code
instance.overhead_cost = overhead_cost
instance.extra_stack_items = extra_stack_items
instance.sstore_key = sstore_key
return instance
class Conditional(Bytecode):
"""Helper class used to generate conditional bytecode."""
def __new__(
cls,
*,
condition: Bytecode | Op,
if_true: Bytecode | Op | None = None,
if_false: Bytecode | Op | None = None,
evm_code_type: EVMCodeType = EVMCodeType.LEGACY,
) -> Self:
"""
Assemble the conditional bytecode by generating the necessary jump and
jumpdest opcodes surrounding the condition and the two possible
execution paths.
In the future, PC usage should be replaced by using RJUMP and RJUMPI
"""
if if_true is None:
if_true = Bytecode()
if if_false is None:
if_false = Bytecode()
if evm_code_type == EVMCodeType.LEGACY:
# First we append a jumpdest to the start of the true branch
if_true = Op.JUMPDEST + if_true
# Then we append the unconditional jump to the end of the false
# branch, used to skip the true branch
if_false += Op.JUMP(Op.ADD(Op.PC, len(if_true) + 3))
# Then we need to do the conditional jump by skipping the false
# branch
condition = Op.JUMPI(Op.ADD(Op.PC, len(if_false) + 3), condition)
# Finally we append the condition, false and true branches, plus
# the jumpdest at the very end
bytecode = condition + if_false + if_true + Op.JUMPDEST
elif evm_code_type == EVMCodeType.EOF_V1:
if not if_false.terminating:
if_false += Op.RJUMP[len(if_true)]
condition = Op.RJUMPI[len(if_false)](condition)
# Finally we append the condition, false and true branches
bytecode = condition + if_false + if_true
return super().__new__(cls, bytecode)
class While(Bytecode):
"""Helper class used to generate while-loop bytecode."""
def __new__(
cls,
*,
body: Bytecode | Op,
condition: Bytecode | Op | None = None,
evm_code_type: EVMCodeType = EVMCodeType.LEGACY,
) -> Self:
"""
Assemble the loop bytecode.
The condition nor the body can leave a stack item on the stack.
"""
bytecode = Bytecode()
if evm_code_type == EVMCodeType.LEGACY:
bytecode += Op.JUMPDEST
bytecode += body
if condition is not None:
bytecode += Op.JUMPI(
Op.SUB(Op.PC, Op.PUSH4[len(body) + len(condition) + 6]), condition
)
else:
bytecode += Op.JUMP(Op.SUB(Op.PC, Op.PUSH4[len(body) + 6]))
elif evm_code_type == EVMCodeType.EOF_V1:
raise NotImplementedError("EOF while loops are not implemented")
return super().__new__(cls, bytecode)
@dataclass(kw_only=True, slots=True)
class Case:
"""
Small helper class to represent a single, generic case in a `Switch` cases
list.
"""
condition: Bytecode | Op
action: Bytecode | Op
terminating: bool | None = None
@property
def is_terminating(self) -> bool:
"""Returns whether the case is terminating."""
return self.terminating if self.terminating is not None else self.action.terminating
class CalldataCase(Case):
"""
Small helper class to represent a single case whose condition depends on
the value of the contract's calldata in a Switch case statement.
By default the calldata is read from position zero, but this can be
overridden using `position`.
The `condition` is generated automatically based on the `value` (and
optionally `position`) and may not be set directly.
"""
def __init__(self, value: int | str | Bytecode, position: int = 0, **kwargs: Any) -> None:
"""Generate the condition base on `value` and `position`."""
condition = Op.EQ(Op.CALLDATALOAD(position), value)
super().__init__(condition=condition, **kwargs)
class Switch(Bytecode):
"""
Helper class used to generate switch-case expressions in EVM bytecode.
Switch-case behavior:
- If no condition is met in the list of BytecodeCases
conditions, the `default_action` bytecode is executed.
- If multiple conditions are met, the action from the first valid
condition is the only one executed.
- There is no fall through; it is not possible to execute
multiple actions.
"""
default_action: Bytecode | Op | None
"""
The default bytecode to execute; if no condition is met, this bytecode is
executed.
"""
cases: List[Case]
"""
A list of Cases: The first element with a condition that
evaluates to a non-zero value is the one that is executed.
"""
evm_code_type: EVMCodeType
"""
The EVM code type to use for the switch-case bytecode.
"""
def __new__(
cls,
*,
default_action: Bytecode | Op | None = None,
cases: List[Case],
evm_code_type: EVMCodeType = EVMCodeType.LEGACY,
) -> Self:
"""
Assemble the bytecode by looping over the list of cases and adding the
necessary [R]JUMPI and JUMPDEST opcodes in order to replicate
switch-case behavior.
"""
# The length required to jump over subsequent actions to the final
# JUMPDEST at the end of the switch-case block:
# - add 6 per case for the length of the JUMPDEST and
# JUMP(ADD(PC, action_jump_length)) bytecode
#
# - add 3 to the total to account for this action's JUMP;
# the PC within the call requires a "correction" of 3.
bytecode = Bytecode()
# All conditions get prepended to this bytecode; if none are met, we
# reach the default
if evm_code_type == EVMCodeType.LEGACY:
action_jump_length = sum(len(case.action) + 6 for case in cases) + 3
bytecode = default_action + Op.JUMP(Op.ADD(Op.PC, action_jump_length))
# The length required to jump over the default action and its JUMP
# bytecode
condition_jump_length = len(bytecode) + 3
elif evm_code_type == EVMCodeType.EOF_V1:
action_jump_length = sum(
len(case.action) + (len(Op.RJUMP[0]) if not case.is_terminating else 0)
for case in cases
# On not terminating cases, we need to add 3 bytes for the
# RJUMP
)
bytecode = default_action + Op.RJUMP[action_jump_length]
# The length required to jump over the default action and its JUMP
# bytecode
condition_jump_length = len(bytecode)
# Reversed: first case in the list has priority; it will become the
# outer-most onion layer. We build up layers around the default_action,
# after 1 iteration of the loop, a simplified representation of the
# bytecode is:
#
# JUMPI(case[n-1].condition)
# + default_action + JUMP()
# + JUMPDEST + case[n-1].action + JUMP()
#
# and after n=len(cases) iterations:
#
# JUMPI(case[0].condition)
# + JUMPI(case[1].condition)
# ...
# + JUMPI(case[n-1].condition) + default_action + JUMP() + JUMPDEST +
# case[n-1].action + JUMP() + ... + JUMPDEST + case[1].action + JUMP()
# + JUMPDEST + case[0].action + JUMP()
for case in reversed(cases):
action = case.action
if evm_code_type == EVMCodeType.LEGACY:
action_jump_length -= len(action) + 6
action = Op.JUMPDEST + action + Op.JUMP(Op.ADD(Op.PC, action_jump_length))
condition = Op.JUMPI(Op.ADD(Op.PC, condition_jump_length), case.condition)
elif evm_code_type == EVMCodeType.EOF_V1:
action_jump_length -= len(action) + (
len(Op.RJUMP[0]) if not case.is_terminating else 0
)
if not case.is_terminating:
action += Op.RJUMP[action_jump_length]
condition = Op.RJUMPI[condition_jump_length](case.condition)
# wrap the current case around the onion as its next layer
bytecode = condition + bytecode + action
condition_jump_length += len(condition) + len(action)
bytecode += Op.JUMPDEST
instance = super().__new__(cls, bytecode)
instance.default_action = default_action
instance.cases = cases
return instance
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/tools_code/__init__.py | src/ethereum_test_tools/tools_code/__init__.py | """Code related utilities and classes."""
from .generators import CalldataCase, Case, CodeGasMeasure, Conditional, Initcode, Switch, While
from .yul import Solc, Yul, YulCompiler
__all__ = (
"CalldataCase",
"Case",
"CodeGasMeasure",
"Conditional",
"Initcode",
"Solc",
"Switch",
"While",
"Yul",
"YulCompiler",
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/tests/__init__.py | src/ethereum_test_tools/tests/__init__.py | """`ethereum_test_tools` verification tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/tests/test_code.py | src/ethereum_test_tools/tests/test_code.py | """Test suite for `ethereum_test.code` module."""
from string import Template
from typing import Mapping
import pytest
from semver import Version
from ethereum_clis import TransitionTool
from ethereum_test_base_types import Account, Address, Hash, TestAddress, TestPrivateKey
from ethereum_test_fixtures import BlockchainFixture
from ethereum_test_forks import (
Cancun,
Fork,
Homestead,
Shanghai,
get_deployed_forks,
)
from ethereum_test_specs import StateTest
from ethereum_test_types import Alloc, Environment, Transaction
from ethereum_test_vm import Opcodes as Op
from ethereum_test_vm import UndefinedOpcodes
from pytest_plugins.solc.solc import SOLC_EXPECTED_MIN_VERSION
from ..tools_code import CalldataCase, Case, Conditional, Initcode, Switch
@pytest.fixture(params=get_deployed_forks())
def fork(request: pytest.FixtureRequest) -> Fork:
"""Return the target evm-version (fork) for solc compilation."""
return request.param
@pytest.fixture()
def expected_bytes(request: pytest.FixtureRequest, solc_version: Version, fork: Fork) -> bytes:
"""Return the expected bytes for the test."""
expected_bytes = request.param
if isinstance(expected_bytes, Template):
if solc_version < SOLC_EXPECTED_MIN_VERSION or fork <= Homestead:
solc_padding = ""
else:
solc_padding = "00"
return bytes.fromhex(expected_bytes.substitute(solc_padding=solc_padding))
if isinstance(expected_bytes, bytes):
if fork >= Shanghai:
expected_bytes = b"\x5f" + expected_bytes[2:]
if solc_version < SOLC_EXPECTED_MIN_VERSION or fork <= Homestead:
return expected_bytes
else:
return expected_bytes + b"\x00"
raise Exception("Unsupported expected_bytes type: {}".format(type(expected_bytes)))
@pytest.mark.parametrize(
"initcode,bytecode",
[
pytest.param(
Initcode(),
bytes(
[
0x61,
0x00,
0x00,
0x60,
0x00,
0x81,
0x60,
0x0B,
0x82,
0x39,
0xF3,
]
),
id="empty-deployed-code",
),
pytest.param(
Initcode(initcode_prefix=Op.STOP),
bytes(
[
0x00,
0x61,
0x00,
0x00,
0x60,
0x00,
0x81,
0x60,
0x0C,
0x82,
0x39,
0xF3,
]
),
id="empty-deployed-code-with-prefix",
),
pytest.param(
Initcode(initcode_length=20),
bytes(
[
0x61,
0x00,
0x00,
0x60,
0x00,
0x81,
0x60,
0x0B,
0x82,
0x39,
0xF3,
]
+ [0x00] * 9 # padding
),
id="empty-deployed-code-with-padding",
),
pytest.param(
Initcode(deploy_code=Op.STOP, initcode_length=20),
bytes(
[
0x61,
0x00,
0x01,
0x60,
0x00,
0x81,
0x60,
0x0B,
0x82,
0x39,
0xF3,
]
+ [0x00] # deployed code
+ [0x00] * 8 # padding
),
id="single-byte-deployed-code-with-padding",
),
pytest.param(
Initcode(
deploy_code=Op.STOP,
initcode_prefix=Op.SSTORE(0, 1),
initcode_length=20,
),
bytes(
[
0x60,
0x01,
0x60,
0x00,
0x55,
0x61,
0x00,
0x01,
0x60,
0x00,
0x81,
0x60,
0x10,
0x82,
0x39,
0xF3,
]
+ [0x00] # deployed code
+ [0x00] * 3 # padding
),
id="single-byte-deployed-code-with-padding-and-prefix",
),
],
)
def test_initcode(initcode: Initcode, bytecode: bytes) -> None: # noqa: D103
assert bytes(initcode) == bytecode
@pytest.mark.parametrize(
"conditional_bytecode,expected",
[
(
Conditional(
condition=Op.CALLDATALOAD(0),
if_true=Op.MSTORE(0, Op.SLOAD(0)) + Op.RETURN(0, 32),
if_false=Op.SSTORE(0, 69),
),
bytes.fromhex("600035600d5801576045600055600f5801565b60005460005260206000f35b"),
),
],
)
def test_opcodes_if(conditional_bytecode: bytes, expected: bytes) -> None:
"""
Test that the if opcode macro is transformed into bytecode as expected.
"""
assert bytes(conditional_bytecode) == expected
@pytest.mark.parametrize(
"tx_data,switch_bytecode,expected_storage",
[
pytest.param(
Hash(1),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
],
default_action=None,
),
{0: 1},
id="no-default-action-condition-met",
),
pytest.param(
Hash(1),
Switch(
cases=[
CalldataCase(value=1, action=Op.SSTORE(0, 1)),
CalldataCase(value=2, action=Op.SSTORE(0, 2)),
],
default_action=None,
),
{0: 1},
id="no-default-action-condition-met-calldata",
),
pytest.param(
Hash(0),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
],
default_action=None,
),
{0: 0},
id="no-default-action-no-condition-met",
),
pytest.param(
Hash(1),
Switch(
cases=[],
default_action=Op.SSTORE(0, 3),
),
{0: 3},
id="no-cases",
),
pytest.param(
Hash(1),
Switch(
cases=[Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1))],
default_action=Op.SSTORE(0, 3),
),
{0: 1},
id="one-case-condition-met",
),
pytest.param(
Hash(0),
Switch(
cases=[Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1))],
default_action=Op.SSTORE(0, 3),
),
{0: 3},
id="one-case-condition-not-met",
),
pytest.param(
Hash(0),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
],
default_action=Op.SSTORE(0, 3),
),
{0: 3},
id="two-cases-no-condition-met",
),
pytest.param(
Hash(1),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
],
default_action=Op.SSTORE(0, 3),
),
{0: 1},
id="two-cases-first-condition-met",
),
pytest.param(
Hash(2),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
],
default_action=Op.SSTORE(0, 3),
),
{0: 2},
id="two-cases-second-condition-met",
),
pytest.param(
Hash(1),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 3), action=Op.SSTORE(0, 3)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 4), action=Op.SSTORE(0, 4)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 5), action=Op.SSTORE(0, 5)),
],
default_action=Op.SSTORE(0, 6),
),
{0: 1},
id="five-cases-first-condition-met",
),
pytest.param(
Hash(1),
Switch(
cases=[
CalldataCase(value=1, action=Op.SSTORE(0, 1)),
CalldataCase(value=2, action=Op.SSTORE(0, 2)),
CalldataCase(value=3, action=Op.SSTORE(0, 3)),
CalldataCase(value=4, action=Op.SSTORE(0, 4)),
CalldataCase(value=5, action=Op.SSTORE(0, 5)),
],
default_action=Op.SSTORE(0, 6),
),
{0: 1},
id="five-cases-first-condition-met-calldata",
),
pytest.param(
Hash(3),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 3), action=Op.SSTORE(0, 3)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 4), action=Op.SSTORE(0, 4)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 5), action=Op.SSTORE(0, 5)),
],
default_action=Op.SSTORE(0, 6),
),
{0: 3},
id="five-cases-third-condition-met",
),
pytest.param(
Hash(3),
Switch(
cases=[
CalldataCase(value=1, action=Op.SSTORE(0, 1)),
CalldataCase(value=2, action=Op.SSTORE(0, 2)),
CalldataCase(value=3, action=Op.SSTORE(0, 3)),
CalldataCase(value=4, action=Op.SSTORE(0, 4)),
CalldataCase(value=5, action=Op.SSTORE(0, 5)),
],
default_action=Op.SSTORE(0, 6),
),
{0: 3},
id="five-cases-third-condition-met-calldata",
),
pytest.param(
Hash(5),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 3), action=Op.SSTORE(0, 3)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 4), action=Op.SSTORE(0, 4)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 5), action=Op.SSTORE(0, 5)),
],
default_action=Op.SSTORE(0, 6),
),
{0: 5},
id="five-cases-last-met",
),
pytest.param(
Hash(3),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 3), action=Op.SSTORE(0, 3)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 3), action=Op.SSTORE(0, 4)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 3), action=Op.SSTORE(0, 5)),
],
default_action=Op.SSTORE(0, 6),
),
{0: 3},
id="five-cases-multiple-conditions-met", # first in list should be evaluated
),
pytest.param(
Hash(9),
Switch(
cases=[
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 2), action=Op.SSTORE(0, 2)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 3), action=Op.SSTORE(0, 3)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 4), action=Op.SSTORE(0, 4)),
Case(condition=Op.EQ(Op.CALLDATALOAD(0), 5), action=Op.SSTORE(0, 5)),
],
default_action=Op.SSTORE(0, 6),
),
{0: 6},
id="five-cases-no-condition-met",
),
pytest.param(
Hash(0),
Switch(
cases=[
Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(1, 1), action=Op.SSTORE(0, 2)),
Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)),
],
default_action=None,
),
{0: 2},
id="no-calldataload-condition-met",
),
pytest.param(
Hash(0),
Switch(
cases=[
Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)),
Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)),
Case(
condition=Op.EQ(1, 2),
action=Op.SSTORE(0, 1) + Op.SSTORE(1, 1) + Op.SSTORE(2, 1),
),
Case(condition=Op.EQ(1, 1), action=Op.SSTORE(0, 2) + Op.SSTORE(1, 2)),
Case(condition=Op.EQ(1, 2), action=Op.SSTORE(0, 1)),
],
default_action=None,
),
{0: 2, 1: 2},
id="no-calldataload-condition-met-different-length-actions",
),
pytest.param(
Hash(0),
Switch(
cases=[
Case(
condition=Op.EQ(1, 2),
action=Op.SSTORE(0, 1),
),
Case(
condition=Op.EQ(Op.CALLDATALOAD(0), 1),
action=Op.SSTORE(0, 1),
),
Case(
condition=Op.EQ(1, 2),
action=Op.SSTORE(0, 1) + Op.SSTORE(1, 1) + Op.SSTORE(2, 1),
),
Case(
condition=Op.EQ(1, 1),
action=Op.SSTORE(0, 2) + Op.SSTORE(1, 2),
),
Case(
condition=Op.EQ(Op.CALLDATALOAD(0), 1),
action=Op.SSTORE(0, 1),
),
],
default_action=None,
),
{0: 2, 1: 2},
id="different-length-conditions-condition-met-different-length-actions",
),
pytest.param(
Hash(0),
Op.SSTORE(0x10, 1)
+ Switch(
cases=[
Case(
condition=Op.EQ(1, 2),
action=Op.SSTORE(0, 1),
),
Case(
condition=Op.EQ(Op.CALLDATALOAD(0), 1),
action=Op.SSTORE(0, 1),
),
Case(
condition=Op.EQ(1, 2),
action=Op.SSTORE(0, 1) + Op.SSTORE(1, 1) + Op.SSTORE(2, 1),
),
Case(
condition=Op.EQ(1, 1),
action=Op.SSTORE(0, 2) + Op.SSTORE(1, 2),
),
Case(
condition=Op.EQ(Op.CALLDATALOAD(0), 1),
action=Op.SSTORE(0, 1),
),
],
default_action=None,
)
+ Op.SSTORE(0x11, 1),
{0: 2, 1: 2, 0x10: 1, 0x11: 1},
id="nested-within-bytecode",
),
pytest.param(
Hash(1),
Switch(
cases=[Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1))],
default_action=Op.PUSH32(2**256 - 1) * 8,
),
{0: 1},
id="jumpi-larger-than-1-byte",
),
pytest.param(
Hash(1),
Switch(
cases=[Case(condition=Op.EQ(Op.CALLDATALOAD(0), 1), action=Op.SSTORE(0, 1))],
default_action=Op.PUSH32(2**256 - 1) * 2048,
),
{0: 1},
id="jumpi-larger-than-4-bytes",
),
],
)
def test_switch(
tx_data: bytes, switch_bytecode: bytes, expected_storage: Mapping, default_t8n: TransitionTool
) -> None:
"""
Test that the switch opcode macro gets executed as using the t8n tool.
"""
code_address = Address(0x1000)
pre = Alloc(
{
code_address: Account(code=switch_bytecode),
TestAddress: Account(balance=10_000_000),
}
)
tx = Transaction(to=code_address, data=tx_data, gas_limit=1_000_000, secret_key=TestPrivateKey)
post = {TestAddress: Account(nonce=1), code_address: Account(storage=expected_storage)}
state_test = StateTest(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
state_test.generate(
t8n=default_t8n,
fork=Cancun,
fixture_format=BlockchainFixture,
)
def test_full_opcode_range() -> None:
"""
Test that the full opcode range is covered by the opcode set defined by
Opcodes and UndefineOpcodes.
"""
assert len(set(Op) & set(UndefinedOpcodes)) == 0
full_possible_opcode_set = set(Op) | set(UndefinedOpcodes)
assert len(full_possible_opcode_set) == 257
assert {op.hex() for op in full_possible_opcode_set} == {f"{i:02x}" for i in range(256)}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/utility/generators.py | src/ethereum_test_tools/utility/generators.py | """Test generator decorators."""
import json
from enum import StrEnum
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Protocol
import pytest
from ethereum_test_base_types import Account, Address, Hash
from ethereum_test_exceptions import BlockException
from ethereum_test_forks import Fork
from ethereum_test_specs import BlockchainTestFiller
from ethereum_test_specs.blockchain import Block
from ethereum_test_types import Alloc, Transaction
from ethereum_test_vm import Bytecode
from ethereum_test_vm import Opcodes as Op
class DeploymentTestType(StrEnum):
"""Represents the type of deployment test."""
DEPLOY_BEFORE_FORK = "deploy_before_fork"
DEPLOY_ON_FORK_BLOCK = "deploy_on_fork_block"
DEPLOY_AFTER_FORK = "deploy_after_fork"
class SystemContractTestType(StrEnum):
"""Represents the type of system contract test."""
GAS_LIMIT = "system_contract_reaches_gas_limit"
OUT_OF_GAS_ERROR = "system_contract_out_of_gas"
REVERT_ERROR = "system_contract_reverts"
EXCEPTION_ERROR = "system_contract_throws"
def param(self) -> Any:
"""Return the parameter for the test."""
return pytest.param(
self,
id=self.value,
marks=pytest.mark.exception_test if self != SystemContractTestType.GAS_LIMIT else [],
)
class ContractAddressHasBalance(StrEnum):
"""
Represents whether the target deployment test has a balance before
deployment.
"""
ZERO_BALANCE = "zero_balance"
NONZERO_BALANCE = "nonzero_balance"
class SystemContractDeployTestFunction(Protocol):
"""
Represents a function to be decorated with the
`generate_system_contract_deploy_test` decorator.
"""
def __call__(
self,
*,
fork: Fork,
pre: Alloc,
post: Alloc,
test_type: DeploymentTestType,
) -> Generator[Block, None, None]:
"""
Arguments:
fork (Fork): The fork to test.
pre (Alloc): The pre state of the blockchain.
post (Alloc): The post state of the blockchain.
test_type(DeploymentTestType): The type of deployment test
currently being filled.
Yields:
Block: To add after the block where the contract was deployed
(e.g. can contain extra transactions to execute after
the system contract has been deployed, and/or a header
object to verify that the headers are correct).
"""
pass
def generate_system_contract_deploy_test(
*,
fork: Fork,
tx_json_path: Path,
expected_deploy_address: Address,
fail_on_empty_code: bool,
expected_system_contract_storage: Dict | None = None,
) -> Callable[[SystemContractDeployTestFunction], Callable]:
"""
Generate a test that verifies the correct deployment of a system contract.
Generates following test cases:
| before/after fork | fail on | invalid block |
empty block | |
--------------------|-------------------|--------------|----------------|
`deploy_before_fork-| before | False | False |
nonzero_balance`
`deploy_before_fork-| before | True | False |
zero_balance`
`deploy_on_fork_ | on fork block | False | False |
block-nonzero_
balance`
`deploy_on_fork_ | on fork block | True | False |
block-zero_balance`
`deploy_after_fork | after | False | False |
-nonzero_balance`
`deploy_after_fork | after | True | True |
-zero_balance`
The `has balance` parametrization does not have an effect on the
expectation of the test.
Arguments:
fork (Fork): The fork to test.
tx_json_path (Path): Path to the JSON file with the transaction to
deploy the system contract. Providing a JSON
file is useful to copy-paste the transaction
from the EIP.
expected_deploy_address (Address): The expected address of the deployed
contract.
fail_on_empty_code (bool): If True, the test is expected to fail
on empty code.
expected_system_contract_storage (Dict | None): The expected storage of
the system contract.
"""
with open(tx_json_path, mode="r") as f:
tx_json = json.loads(f.read())
if "gasLimit" not in tx_json and "gas" in tx_json:
tx_json["gasLimit"] = tx_json["gas"]
del tx_json["gas"]
if "protected" not in tx_json:
tx_json["protected"] = False
deploy_tx = Transaction.model_validate(tx_json).with_signature_and_sender()
gas_price = deploy_tx.gas_price
assert gas_price is not None
deployer_required_balance = deploy_tx.gas_limit * gas_price
deployer_address = deploy_tx.sender
if "hash" in tx_json:
assert deploy_tx.hash == Hash(tx_json["hash"])
if "sender" in tx_json:
assert deploy_tx.sender == Address(tx_json["sender"])
def decorator(func: SystemContractDeployTestFunction) -> Callable:
@pytest.mark.parametrize(
"has_balance",
[
pytest.param(ContractAddressHasBalance.NONZERO_BALANCE),
pytest.param(ContractAddressHasBalance.ZERO_BALANCE),
],
ids=lambda x: x.name.lower(),
)
@pytest.mark.parametrize(
"test_type",
[
pytest.param(DeploymentTestType.DEPLOY_BEFORE_FORK),
pytest.param(DeploymentTestType.DEPLOY_ON_FORK_BLOCK),
pytest.param(
DeploymentTestType.DEPLOY_AFTER_FORK,
marks=[pytest.mark.exception_test] if fail_on_empty_code else [],
),
],
ids=lambda x: x.name.lower(),
)
@pytest.mark.execute(pytest.mark.skip(reason="modifies pre-alloc"))
@pytest.mark.valid_at_transition_to(fork.name())
def wrapper(
blockchain_test: BlockchainTestFiller,
has_balance: ContractAddressHasBalance,
pre: Alloc,
test_type: DeploymentTestType,
fork: Fork,
) -> None:
assert deployer_address is not None
assert deploy_tx.created_contract == expected_deploy_address
blocks: List[Block] = []
if test_type == DeploymentTestType.DEPLOY_BEFORE_FORK:
blocks = [
Block( # Deployment block
txs=[deploy_tx],
timestamp=14_999,
),
Block( # Empty block on fork
txs=[],
timestamp=15_000,
),
]
elif test_type == DeploymentTestType.DEPLOY_ON_FORK_BLOCK:
blocks = [
Block( # Deployment on fork block
txs=[deploy_tx],
timestamp=15_000,
),
Block( # Empty block after fork
txs=[],
timestamp=15_001,
),
]
elif test_type == DeploymentTestType.DEPLOY_AFTER_FORK:
blocks = [
Block( # Empty block on fork
txs=[],
timestamp=15_000,
exception=BlockException.SYSTEM_CONTRACT_EMPTY
if fail_on_empty_code
else None,
)
]
if not fail_on_empty_code:
blocks.append(
Block( # Deployment after fork block
txs=[deploy_tx],
timestamp=15_001,
)
)
blocks.append(
Block( # Empty block after deployment
txs=[],
timestamp=15_002,
),
)
balance = 1 if has_balance == ContractAddressHasBalance.NONZERO_BALANCE else 0
pre[expected_deploy_address] = Account(
code=b"", # Remove the code that is automatically allocated on
# the fork
nonce=0,
balance=balance,
)
pre[deployer_address] = Account(
balance=deployer_required_balance,
)
expected_deploy_address_int = int.from_bytes(expected_deploy_address, "big")
post = Alloc()
fork_pre_allocation = fork.pre_allocation_blockchain()
assert expected_deploy_address_int in fork_pre_allocation
expected_code = fork_pre_allocation[expected_deploy_address_int]["code"]
# Note: balance check is omitted; it may be modified by the
# underlying, decorated test
account_kwargs = {
"code": expected_code,
"nonce": 1,
}
if expected_system_contract_storage:
account_kwargs["storage"] = expected_system_contract_storage
if test_type != DeploymentTestType.DEPLOY_AFTER_FORK or not fail_on_empty_code:
post[expected_deploy_address] = Account(**account_kwargs)
post[deployer_address] = Account(
nonce=1,
)
# Extra blocks (if any) returned by the decorated function to add
# after the contract is deployed.
if test_type != DeploymentTestType.DEPLOY_AFTER_FORK or not fail_on_empty_code:
# Only fill more blocks if the deploy block does not fail.
blocks += list(func(fork=fork, pre=pre, post=post, test_type=test_type))
blockchain_test(
pre=pre,
blocks=blocks,
post=post,
)
wrapper.__name__ = func.__name__ # type: ignore
wrapper.__doc__ = func.__doc__
return wrapper
return decorator
def generate_system_contract_error_test(
*,
max_gas_limit: int,
) -> Callable[[SystemContractDeployTestFunction], Callable]:
"""
Generate a test that verifies the correct behavior when a system contract
fails execution.
Parametrizations required:
- system_contract (Address): The address of the system contract to deploy.
- valid_from (Fork): The fork from which the test is valid.
Arguments:
max_gas_limit (int): The maximum gas limit for the system transaction.
"""
def decorator(func: SystemContractDeployTestFunction) -> Callable:
@pytest.mark.parametrize("test_type", [v.param() for v in SystemContractTestType])
@pytest.mark.execute(pytest.mark.skip(reason="modifies pre-alloc"))
def wrapper(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
test_type: SystemContractTestType,
system_contract: Address,
fork: Fork,
) -> None:
modified_system_contract_code = Bytecode()
# Depending on the test case, we need to modify the system contract
# code accordingly.
if (
test_type == SystemContractTestType.GAS_LIMIT
or test_type == SystemContractTestType.OUT_OF_GAS_ERROR
):
# Run code so that it reaches the gas limit.
gas_costs = fork.gas_costs()
# The code works by storing N values to storage, and N is
# calculated based on the gas costs for the given fork. This
# code will only work once, so if the system contract is re-
# executed in a subsequent block, it will consume less gas.
gas_used_per_storage = (
gas_costs.G_STORAGE_SET + gas_costs.G_COLD_SLOAD + (gas_costs.G_VERY_LOW * 2)
)
modified_system_contract_code += sum(
Op.SSTORE(i, 1) for i in range(max_gas_limit // gas_used_per_storage)
)
# If the gas limit is not divisible by the gas used per
# storage, we need to add some NO-OP (JUMPDEST) to the code
# that each consume 1 gas.
assert gas_costs.G_JUMPDEST == 1, (
f"JUMPDEST gas cost should be 1, but got {gas_costs.G_JUMPDEST}. "
"Generator `generate_system_contract_error_test` needs to be updated."
)
modified_system_contract_code += sum(
Op.JUMPDEST for _ in range(max_gas_limit % gas_used_per_storage)
)
if test_type == SystemContractTestType.OUT_OF_GAS_ERROR:
# If the test type is OUT_OF_GAS_ERROR, we need to add a
# JUMPDEST to the code to ensure that we go over the limit
# by one gas.
modified_system_contract_code += Op.JUMPDEST
modified_system_contract_code += Op.STOP
elif test_type == SystemContractTestType.REVERT_ERROR:
# Run a simple revert.
modified_system_contract_code = Op.REVERT(0, 0)
elif test_type == SystemContractTestType.EXCEPTION_ERROR:
# Run a simple exception.
modified_system_contract_code = Op.INVALID()
else:
raise ValueError(f"Invalid test type: {test_type}")
pre[system_contract] = Account(
code=modified_system_contract_code,
nonce=1,
balance=0,
)
# Simple test transaction to verify the block failed to modify the
# state.
value_receiver = pre.fund_eoa(amount=0)
test_tx = Transaction(
to=value_receiver,
value=1,
gas_limit=100_000,
sender=pre.fund_eoa(),
)
post = Alloc()
post[value_receiver] = (
Account.NONEXISTENT
if test_type != SystemContractTestType.GAS_LIMIT
else Account(
balance=1,
)
)
blockchain_test(
pre=pre,
blocks=[
Block( # Deployment block
txs=[test_tx],
exception=BlockException.SYSTEM_CONTRACT_CALL_FAILED
if test_type != SystemContractTestType.GAS_LIMIT
else None,
)
],
post=post,
)
wrapper.__name__ = func.__name__ # type: ignore
wrapper.__doc__ = func.__doc__
return wrapper
return decorator
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/utility/__init__.py | src/ethereum_test_tools/utility/__init__.py | """Sub-package for utility functions and classes."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/utility/pytest.py | src/ethereum_test_tools/utility/pytest.py | """Pytest utility functions used to write Ethereum tests."""
from typing import Any, Dict, List
import pytest
from _pytest.mark.structures import ParameterSet
class UnknownParameterInCasesError(Exception):
"""
Exception raised when a test case contains parameters that are not present
in the defaults.
"""
def __init__(self) -> None:
"""Initialize the exception."""
super().__init__("each case must only contain parameters present in defaults")
def extend_with_defaults(
defaults: Dict[str, Any], cases: List[ParameterSet], **parametrize_kwargs: Any
) -> Dict[str, Any]:
"""
Extend test cases with default parameter values.
This utility function extends test case parameters by adding default values
from the `defaults` dictionary to each case in the `cases` list. If a case
already specifies a value for a parameter, its default is ignored.
This function is particularly useful in scenarios where you want to define
a common set of default values but allow individual test cases to override
them as needed.
The function returns a dictionary that can be directly unpacked and passed
to the `@pytest.mark.parametrize` decorator.
Arguments:
defaults (Dict[str, Any]): A dictionary of default parameter names
and their values. These values will be added
to each case unless the case already defines
a value for each parameter.
cases (List[ParameterSet]): A list of `pytest.param` objects
representing different test cases.
Its first argument must be a dictionary
defining parameter names and values.
parametrize_kwargs (Any): Additional keyword arguments to be passed to
`@pytest.mark.parametrize`. These arguments are
not modified by this function and are passed
through unchanged.
Returns:
Dict[str, Any]: A dictionary with the following structure:
`argnames`: A list of parameter names.
`argvalues`: A list of test cases with modified parameter values.
`parametrize_kwargs`: Additional keyword arguments passed
through unchanged.
Example:
```python
@pytest.mark.parametrize(**extend_with_defaults(
defaults=dict(
min_value=0, # default minimum value is 0
max_value=100, # default maximum value is 100
average=50, # default average value is 50
),
cases=[
pytest.param(
dict(), # use default
values id='default_case',
),
pytest.param(
dict(min_value=10), # override with min_value=10
id='min_value_10',
),
pytest.param(
dict(max_value=200), # override with max_value=200
id='max_value_200',
),
pytest.param(
dict(min_value=-10, max_value=50), # override both min_value
# and max_value
id='min_-10_max_50',
),
pytest.param(
# all defaults are overridden
dict(min_value=20, max_value=80, average=50),
id="min_20_max_80_avg_50",
),
pytest.param(
dict(min_value=100, max_value=0), # invalid range
id='invalid_range',
marks=pytest.mark.xfail(reason='invalid range'),
)
],
))
def test_range(min_value, max_value, average):
assert min_value <= max_value
assert min_value <= average <= max_value
```
The above test will execute with the following sets of parameters:
```python
"default_case": {"min_value": 0, "max_value": 100, "average": 50}
"min_value_10": {"min_value": 10, "max_value": 100, "average": 50}
"max_value_200": {"min_value": 0, "max_value": 200, "average": 50}
"min_-10_max_50": {"min_value": -10, "max_value": 50, "average": 50}
"min_20_max_80_avg_50": {"min_value": 20, "max_value": 80, "average": 50}
# expected to fail
"invalid_range": {"min_value": 100, "max_value": 0, "average": 50}
```
Notes:
- Each case in `cases` must contain exactly one value, which is a
dictionary of parameter values.
- The function performs an in-place update of the `cases` list, so
the original `cases` list is modified.
"""
for i, case in enumerate(cases):
if not (len(case.values) == 1 and isinstance(case.values[0], dict)):
raise ValueError(
"each case must contain exactly one value; a dict of parameter values"
)
if set(case.values[0].keys()) - set(defaults.keys()):
raise UnknownParameterInCasesError()
# Overwrite values in defaults if the parameter is present in the test
# case values
merged_params = {**defaults, **case.values[0]}
cases[i] = pytest.param(*merged_params.values(), id=case.id, marks=case.marks)
return {"argnames": list(defaults), "argvalues": cases, **parametrize_kwargs}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/utility/versioning.py | src/ethereum_test_tools/utility/versioning.py | """Utility module with helper functions for versioning."""
import re
from typing import Union
from git import InvalidGitRepositoryError, Repo
def get_current_commit_hash_or_tag(repo_path: str = ".", shorten_hash: bool = False) -> str:
"""
Get the latest commit tag or commit hash from the repository.
If a tag points to the current commit, return the tag name. If no tag
exists:
- If shorten_hash is True, return the first 8 characters of the
commit hash.
- Otherwise, return the full commit hash.
"""
try:
repo = Repo(repo_path)
current_commit = repo.head.commit
# Check if current commit has a tag using lookup
for tag in repo.tags:
if tag.commit == current_commit:
return tag.name
# No tag found, return commit hash
return current_commit.hexsha[:8] if shorten_hash else current_commit.hexsha
except InvalidGitRepositoryError:
# Handle the case where the repository is not a valid Git repository
return "Not a git repository; this should only be seen in framework tests."
def generate_github_url(
file_path: str, branch_or_commit_or_tag: str = "main", line_number: Union[str, int] = ""
) -> str:
"""Generate a permalink to a source file in Github."""
base_url = "https://github.com"
username = "ethereum"
repository = "execution-spec-tests"
if line_number:
line_number = f"#L{line_number}"
release_tag_regex = r"^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}(a[0-9]+|b[0-9]+|rc[0-9]+)?$"
tree_or_blob = "tree" if re.match(release_tag_regex, branch_or_commit_or_tag) else "blob"
return (
f"{base_url}/{username}/{repository}/{tree_or_blob}/"
f"{branch_or_commit_or_tag}/{file_path}{line_number}"
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_tools/utility/tests/test_pytest.py | src/ethereum_test_tools/utility/tests/test_pytest.py | """Tests for ethereum_test_tools.utility.pytest."""
import pytest
from ethereum_test_tools import extend_with_defaults
from ethereum_test_tools.utility.pytest import UnknownParameterInCasesError
# TODO: This is from the docstring in extend_with_defaults; should be tested
# automatically
@pytest.mark.parametrize(
**extend_with_defaults(
defaults={
"min_value": 0, # default minimum value is 0
"max_value": 100, # default maximum value is 100
"average": 50, # default average value is 50
},
cases=[
pytest.param(
{}, # use default values
id="default_case",
),
pytest.param(
{"min_value": 10}, # override with min_value=10
id="min_value_10",
),
pytest.param(
{"max_value": 200}, # override with max_value=200
id="max_value_200",
),
pytest.param(
# override both min_value and max_value
{"min_value": -10, "max_value": 50},
id="min_-10_max_50",
),
pytest.param(
# all defaults are overridden
{"min_value": 20, "max_value": 80, "average": 50},
id="min_20_max_80_avg_50",
),
pytest.param(
{"min_value": 100, "max_value": 0}, # invalid range
id="invalid_range",
marks=pytest.mark.xfail(reason="invalid range"),
),
],
)
)
def test_range(min_value: int, max_value: int, average: int) -> None: # noqa: D103
assert min_value <= max_value
assert min_value <= average <= max_value
@pytest.mark.parametrize(
"defaults,cases,parametrize_kwargs,expected",
[
pytest.param(
{"min_value": 0, "max_value": 100, "average": 50},
[
pytest.param(
{},
id="default_case",
),
pytest.param(
{"min_value": 10},
id="min_value_10",
),
pytest.param(
{"max_value": 200},
id="max_value_200",
),
pytest.param(
{"min_value": -10, "max_value": 50},
id="min_-10_max_50",
),
pytest.param(
{"min_value": 20, "max_value": 80, "average": 50},
id="min_20_max_80_avg_50",
),
pytest.param(
{"min_value": 100, "max_value": 0},
id="invalid_range",
marks=pytest.mark.xfail(reason="invalid range"),
),
],
{},
{
"argnames": ["min_value", "max_value", "average"],
"argvalues": [
pytest.param(0, 100, 50, id="default_case"),
pytest.param(10, 100, 50, id="min_value_10"),
pytest.param(0, 200, 50, id="max_value_200"),
pytest.param(-10, 50, 50, id="min_-10_max_50"),
pytest.param(20, 80, 50, id="min_20_max_80_avg_50"),
pytest.param(
100,
0,
50,
id="invalid_range",
marks=pytest.mark.xfail(reason="invalid range"),
),
],
},
id="defaults_and_cases_empty_parametrize_kwargs",
),
pytest.param(
{"min_value": 0, "max_value": 100, "average": 50},
[
pytest.param(
{},
id="default_case",
),
pytest.param(
{"min_value": 10},
id="min_value_10",
),
],
{"scope": "session"},
{
"argnames": ["min_value", "max_value", "average"],
"argvalues": [
pytest.param(0, 100, 50, id="default_case"),
pytest.param(10, 100, 50, id="min_value_10"),
],
},
id="defaults_and_cases_with_parametrize_kwargs",
),
],
)
def test_extend_with_defaults(
defaults: dict, cases: list, parametrize_kwargs: dict, expected: dict
) -> None: # noqa: D103
"""Test the extend_with_defaults function."""
result = extend_with_defaults(defaults, cases, **parametrize_kwargs)
assert result["argnames"] == expected["argnames"]
assert result["argvalues"] == expected["argvalues"]
result.pop("argnames")
result.pop("argvalues")
assert result == parametrize_kwargs
def test_extend_with_defaults_raises_for_unknown_default() -> None: # noqa: D103
with pytest.raises(
UnknownParameterInCasesError, match="only contain parameters present in defaults"
):
extend_with_defaults({"a": 0, "b": 1}, [pytest.param({"c": 2})])
@pytest.mark.parametrize(
"defaults, cases",
[
pytest.param(
{"param_1": "default1"},
[pytest.param({"param_1": "value1"}, {"param_2": "value2"})],
id="multiple_values",
),
pytest.param(
{"param_1": "default1"},
[pytest.param("not_a_dict")],
id="non_dict_value",
),
],
)
def test_extend_with_defaults_raises_value_error(defaults: dict, cases: list) -> None: # noqa: D103
expected_message = "each case must contain exactly one value; a dict of parameter values"
with pytest.raises(ValueError, match=expected_message):
extend_with_defaults(defaults, cases)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_vm/bytecode.py | src/ethereum_test_vm/bytecode.py | """Ethereum Virtual Machine bytecode primitives and utilities."""
from typing import Any, Self, SupportsBytes
from pydantic import GetCoreSchemaHandler
from pydantic_core.core_schema import (
PlainValidatorFunctionSchema,
no_info_plain_validator_function,
plain_serializer_function_ser_schema,
)
from ethereum_test_base_types import Bytes, Hash
class Bytecode:
"""
Base class to represent EVM bytecode.
Stack calculations are automatically done after an addition operation
between two bytecode objects. The stack height is not guaranteed to be
correct, so the user must take this into consideration.
Parameters
----------
- popped_stack_items: number of items the bytecode pops from the stack
- pushed_stack_items: number of items the bytecode pushes to the stack
- min_stack_height: minimum stack height required by the bytecode
- max_stack_height: maximum stack height reached by the bytecode
"""
_name_: str = ""
_bytes_: bytes
popped_stack_items: int
pushed_stack_items: int
max_stack_height: int
min_stack_height: int
terminating: bool
def __new__(
cls,
bytes_or_byte_code_base: "bytes | Bytecode | None" = None,
*,
popped_stack_items: int | None = None,
pushed_stack_items: int | None = None,
max_stack_height: int | None = None,
min_stack_height: int | None = None,
terminating: bool = False,
name: str = "",
) -> Self:
"""Create new opcode instance."""
if bytes_or_byte_code_base is None:
instance = super().__new__(cls)
instance._bytes_ = b""
instance.popped_stack_items = 0
instance.pushed_stack_items = 0
instance.min_stack_height = 0
instance.max_stack_height = 0
instance.terminating = False
instance._name_ = name
return instance
if isinstance(bytes_or_byte_code_base, Bytecode):
# Required because Enum class calls the base class with the
# instantiated object as parameter.
obj = super().__new__(cls)
obj._bytes_ = bytes_or_byte_code_base._bytes_
obj.popped_stack_items = bytes_or_byte_code_base.popped_stack_items
obj.pushed_stack_items = bytes_or_byte_code_base.pushed_stack_items
obj.min_stack_height = bytes_or_byte_code_base.min_stack_height
obj.max_stack_height = bytes_or_byte_code_base.max_stack_height
obj.terminating = bytes_or_byte_code_base.terminating
obj._name_ = bytes_or_byte_code_base._name_
return obj
if isinstance(bytes_or_byte_code_base, bytes):
obj = super().__new__(cls)
obj._bytes_ = bytes_or_byte_code_base
assert popped_stack_items is not None
assert pushed_stack_items is not None
obj.popped_stack_items = popped_stack_items
obj.pushed_stack_items = pushed_stack_items
if min_stack_height is None:
obj.min_stack_height = obj.popped_stack_items
else:
obj.min_stack_height = min_stack_height
if max_stack_height is None:
obj.max_stack_height = max(obj.popped_stack_items, obj.pushed_stack_items)
else:
obj.max_stack_height = max_stack_height
obj.terminating = terminating
obj._name_ = name
return obj
raise TypeError("Bytecode constructor '__new__' didn't return an instance!")
def __bytes__(self) -> bytes:
"""Return the opcode byte representation."""
return self._bytes_
def __len__(self) -> int:
"""Return the length of the opcode byte representation."""
return len(self._bytes_)
def __str__(self) -> str:
"""Return the name of the opcode, assigned at Enum creation."""
return self._name_
def __eq__(self, other: object) -> bool:
"""
Allow comparison between Bytecode instances and bytes objects.
Raises:
- NotImplementedError: if the comparison is not between an
Bytecode or a bytes object.
"""
if isinstance(other, Bytecode):
return (
bytes(self) == bytes(other)
and self.popped_stack_items == other.popped_stack_items
and self.pushed_stack_items == other.pushed_stack_items
and self.max_stack_height == other.max_stack_height
and self.min_stack_height == other.min_stack_height
)
if isinstance(other, SupportsBytes) or isinstance(other, bytes):
return bytes(self) == bytes(other)
raise NotImplementedError(f"Unsupported type for comparison: {type(other)}")
def __hash__(self) -> int:
"""Return the hash of the bytecode representation."""
return hash(
(
bytes(self),
self.popped_stack_items,
self.pushed_stack_items,
self.max_stack_height,
self.min_stack_height,
)
)
def __add__(self, other: "Bytecode | bytes | int | None") -> "Bytecode":
"""
Concatenate the bytecode representation with another bytecode object.
"""
if other is None or (isinstance(other, int) and other == 0):
# Edge case for sum() function
return self
if isinstance(other, bytes):
c = Bytecode(self)
c._bytes_ += other
c._name_ = ""
return c
assert isinstance(other, Bytecode), "Can only concatenate Bytecode instances"
# Figure out the stack height after executing the two opcodes.
a_pop, a_push = self.popped_stack_items, self.pushed_stack_items
a_min, a_max = self.min_stack_height, self.max_stack_height
b_pop, b_push = other.popped_stack_items, other.pushed_stack_items
b_min, b_max = other.min_stack_height, other.max_stack_height
# NOTE: "_pop" is understood as the number of elements required by an
# instruction or bytecode to be popped off the stack before it starts
# returning (pushing).
# Auxiliary variables representing "stages" of the execution of
# `c = a + b` bytecode: Assume starting point 0 as reference:
a_start = 0
# A (potentially) pops some elements and reaches its "bottom", might be
# negative:
a_bottom = a_start - a_pop
# After this A pushes some elements, then B pops and reaches its
# "bottom":
b_bottom = a_bottom + a_push - b_pop
# C's bottom is either at the bottom of A or B:
c_bottom = min(a_bottom, b_bottom)
if c_bottom == a_bottom:
# C pops the same as A to reach its bottom, then the rest of A and
# B are C's "push"
c_pop = a_pop
c_push = a_push - b_pop + b_push
else:
# A and B are C's "pop" to reach its bottom, then pushes the same
# as B
c_pop = a_pop - a_push + b_pop
c_push = b_push
# C's minimum required stack is either A's or B's shifted by the net
# stack balance of A
c_min = max(a_min, b_min + a_pop - a_push)
# C starts from c_min, then reaches max either in the spot where A
# reached a_max or in the spot where B reached b_max, after A had
# completed.
c_max = max(c_min + a_max - a_min, c_min - a_pop + a_push + b_max - b_min)
return Bytecode(
bytes(self) + bytes(other),
popped_stack_items=c_pop,
pushed_stack_items=c_push,
min_stack_height=c_min,
max_stack_height=c_max,
terminating=other.terminating,
)
def __radd__(self, other: "Bytecode | int | None") -> "Bytecode":
"""
Concatenate the opcode byte representation with another bytes object.
"""
if other is None or (isinstance(other, int) and other == 0):
# Edge case for sum() function
return self
assert isinstance(other, Bytecode), "Can only concatenate Bytecode instances"
return other.__add__(self)
def __mul__(self, other: int) -> "Bytecode":
"""
Concatenate another bytes object with the opcode byte representation.
"""
if other < 0:
raise ValueError("Cannot multiply by a negative number")
if other == 0:
return Bytecode()
output = self
for _ in range(other - 1):
output += self
return output
def hex(self) -> str:
"""
Return the hexadecimal representation of the opcode byte
representation.
"""
return bytes(self).hex()
def keccak256(self) -> Hash:
"""Return the keccak256 hash of the opcode byte representation."""
return Bytes(self._bytes_).keccak256()
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Provide Pydantic core schema for Bytecode
serialization and validation.
"""
return no_info_plain_validator_function(
cls,
serialization=plain_serializer_function_ser_schema(
lambda bytecode: "0x" + bytecode.hex(),
info_arg=False,
),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_vm/evm_types.py | src/ethereum_test_vm/evm_types.py | """EVM types definitions."""
from enum import Enum
class EVMCodeType(str, Enum):
"""
Enum representing the type of EVM code that is supported in a given fork.
"""
LEGACY = "legacy"
EOF_V1 = "eof_v1"
def __str__(self) -> str:
"""Return the name of the EVM code type."""
return self.name
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_vm/opcodes.py | src/ethereum_test_vm/opcodes.py | """
Ethereum Virtual Machine opcode definitions.
Acknowledgments: The individual opcode documentation below is due to the work
by [smlXL](https://github.com/smlxl) on [evm.codes](https://www.evm.codes/),
available as open source [github.com/smlxl/
evm.codes](https://github.com/smlxl/evm.codes) - thank you! And thanks
to @ThreeHrSleep for integrating it in the docstrings.
"""
from enum import Enum
from typing import Any, Callable, Iterable, List, Mapping, Optional, SupportsBytes
from ethereum_test_base_types import to_bytes
from .bytecode import Bytecode
def _get_int_size(n: int) -> int:
"""Return size of an integer in bytes."""
if n < 0:
# Negative numbers in the EVM are represented as two's complement
# of 32 bytes
return 32
byte_count = 0
while n:
byte_count += 1
n >>= 8
return byte_count
KW_ARGS_DEFAULTS_TYPE = Mapping[str, "int | bytes | str | Opcode | Bytecode"]
def _stack_argument_to_bytecode(
arg: "int | bytes | SupportsBytes | str | Opcode | Bytecode | Iterable[int]",
) -> Bytecode:
"""Convert stack argument in an opcode or macro to bytecode."""
if isinstance(arg, Bytecode):
return arg
# We are going to push a constant to the stack.
data_size = 0
if isinstance(arg, int):
signed = arg < 0
data_size = _get_int_size(arg)
if data_size > 32:
raise ValueError("Opcode stack data must be less than 32 bytes")
elif data_size == 0:
# Pushing 0 is done with the PUSH1 opcode
# for compatibility reasons.
data_size = 1
arg = arg.to_bytes(
length=data_size,
byteorder="big",
signed=signed,
)
else:
arg = to_bytes(arg).lstrip(b"\0") # type: ignore
if arg == b"":
# Pushing 0 is done with the PUSH1 opcode for
# compatibility reasons.
arg = b"\x00"
data_size = len(arg)
assert isinstance(arg, bytes)
assert data_size > 0
new_opcode = _push_opcodes_byte_list[data_size - 1][arg]
return new_opcode
class Opcode(Bytecode):
"""
Represents a single Opcode instruction in the EVM, with extra
metadata useful to parametrize tests.
Parameters
----------
- data_portion_length: number of bytes after the opcode in the bytecode
that represent data
- data_portion_formatter: function to format the data portion of the
opcode, if any
- stack_properties_modifier: function to modify the stack properties of
the opcode after the data portion has been processed
- kwargs: list of keyword arguments that can be passed to the opcode,
in the order they are meant to be placed in the stack
- kwargs_defaults: default values for the keyword arguments if any,
otherwise 0
- unchecked_stack: whether the bytecode should ignore stack checks
when being called
"""
data_portion_length: int
data_portion_formatter: Optional[Callable[[Any], bytes]]
stack_properties_modifier: Optional[Callable[[Any], tuple[int, int, int, int]]]
kwargs: List[str]
kwargs_defaults: KW_ARGS_DEFAULTS_TYPE
unchecked_stack: bool = False
def __new__(
cls,
opcode_or_byte: "int | bytes | Opcode",
*,
popped_stack_items: int = 0,
pushed_stack_items: int = 0,
max_stack_height: int | None = None,
min_stack_height: int | None = None,
data_portion_length: int = 0,
data_portion_formatter: Any = None,
stack_properties_modifier: Any = None,
unchecked_stack: bool = False,
terminating: bool = False,
kwargs: List[str] | None = None,
kwargs_defaults: Optional[KW_ARGS_DEFAULTS_TYPE] = None,
) -> "Opcode":
"""Create new opcode instance."""
if kwargs_defaults is None:
kwargs_defaults = {}
if type(opcode_or_byte) is Opcode:
# Required because Enum class calls the base class
# with the instantiated object as parameter.
return opcode_or_byte
elif isinstance(opcode_or_byte, int) or isinstance(opcode_or_byte, bytes):
obj_bytes = (
bytes([opcode_or_byte]) if isinstance(opcode_or_byte, int) else opcode_or_byte
)
if min_stack_height is None:
min_stack_height = popped_stack_items
if max_stack_height is None:
max_stack_height = max(
min_stack_height - popped_stack_items + pushed_stack_items, min_stack_height
)
obj = super().__new__(
cls,
obj_bytes,
popped_stack_items=popped_stack_items,
pushed_stack_items=pushed_stack_items,
max_stack_height=max_stack_height,
min_stack_height=min_stack_height,
terminating=terminating,
)
obj.data_portion_length = data_portion_length
obj.data_portion_formatter = data_portion_formatter
obj.stack_properties_modifier = stack_properties_modifier
obj.unchecked_stack = unchecked_stack
if kwargs is None:
obj.kwargs = []
else:
obj.kwargs = kwargs
obj.kwargs_defaults = kwargs_defaults
return obj
raise TypeError("Opcode constructor '__new__' didn't return an instance!")
def __getitem__(self, *args: "int | bytes | str | Iterable[int]") -> "Opcode":
"""
Initialize a new instance of the opcode with the data portion set,
and also clear the data portion variables to avoid reusing them.
"""
if self.data_portion_formatter is None and self.data_portion_length == 0:
raise ValueError("Opcode does not have a data portion or has already been set")
data_portion = bytes()
if self.data_portion_formatter is not None:
if len(args) == 1 and isinstance(args[0], Iterable) and not isinstance(args[0], bytes):
data_portion = self.data_portion_formatter(*args[0])
else:
data_portion = self.data_portion_formatter(*args)
elif self.data_portion_length > 0:
# For opcodes with a data portion, the first argument is the
# data and the rest of the arguments form the stack.
assert len(args) == 1, "Opcode with data portion requires exactly one argument"
data = args[0]
if isinstance(data, bytes) or isinstance(data, SupportsBytes) or isinstance(data, str):
if isinstance(data, str):
if data.startswith("0x"):
data = data[2:]
data = bytes.fromhex(data)
elif isinstance(data, SupportsBytes):
data = bytes(data)
assert len(data) <= self.data_portion_length
data_portion = data.rjust(self.data_portion_length, b"\x00")
elif isinstance(data, int):
signed = data < 0
data_portion = data.to_bytes(
length=self.data_portion_length,
byteorder="big",
signed=signed,
)
else:
raise TypeError("Opcode data portion must be either an int or bytes/hex string")
popped_stack_items = self.popped_stack_items
pushed_stack_items = self.pushed_stack_items
min_stack_height = self.min_stack_height
max_stack_height = self.max_stack_height
assert (
popped_stack_items is not None
and pushed_stack_items is not None
and min_stack_height is not None
)
if self.stack_properties_modifier is not None:
(
popped_stack_items,
pushed_stack_items,
min_stack_height,
max_stack_height,
) = self.stack_properties_modifier(data_portion)
new_opcode = Opcode(
bytes(self) + data_portion,
popped_stack_items=popped_stack_items,
pushed_stack_items=pushed_stack_items,
min_stack_height=min_stack_height,
max_stack_height=max_stack_height,
data_portion_length=0,
data_portion_formatter=None,
unchecked_stack=self.unchecked_stack,
terminating=self.terminating,
kwargs=self.kwargs,
kwargs_defaults=self.kwargs_defaults,
)
new_opcode._name_ = f"{self._name_}_0x{data_portion.hex()}"
return new_opcode
def __call__(
self,
*args_t: "int | bytes | str | Opcode | Bytecode | Iterable[int]",
unchecked: bool = False,
**kwargs: "int | bytes | str | Opcode | Bytecode",
) -> Bytecode:
"""
Make all opcode instances callable to return formatted bytecode, which
constitutes a data portion, that is located after the opcode byte,
and pre-opcode bytecode, which is normally used to set up the stack.
This useful to automatically format, e.g., call opcodes and their
stack arguments as
`Opcodes.CALL(Opcodes.GAS, 0x1234, 0x0, 0x0, 0x0, 0x0, 0x0)`.
Data sign is automatically detected but for this reason the range
of the input must be:
`[-2^(data_portion_bits-1), 2^(data_portion_bits)]`
where:
`data_portion_bits == data_portion_length * 8`
For the stack, the arguments are set up in the opposite order they
are given, so the first argument is the last item pushed to the stack.
The resulting stack arrangement does not take into account
opcode stack element consumption, so the stack height is not
guaranteed to be correct and the user must take this into
consideration.
Integers can also be used as stack elements, in which case they
are automatically converted to PUSH operations, and negative numbers
always use a PUSH32 operation.
Hex-strings will be automatically converted to bytes.
"""
args: List["int | bytes | str | Opcode | Bytecode | Iterable[int]"] = list(args_t)
if self.has_data_portion():
if len(args) == 0:
raise ValueError("Opcode with data portion requires at least one argument")
assert type(self) is Opcode
get_item_arg = args.pop()
assert not isinstance(get_item_arg, Bytecode)
return self[get_item_arg](*args)
if self.kwargs is not None and len(kwargs) > 0:
assert len(args) == 0, f"Cannot mix positional and keyword arguments {args} {kwargs}"
# Validate that all provided kwargs are valid
invalid_kwargs = set(kwargs.keys()) - set(self.kwargs)
if invalid_kwargs:
raise ValueError(
f"Invalid keyword argument(s) {list(invalid_kwargs)} for opcode "
f"{self._name_}. Valid arguments are: {self.kwargs}"
f"Valid arguments are: {self.kwargs}"
)
for kw in self.kwargs:
args.append(kwargs[kw] if kw in kwargs else self.kwargs_defaults.get(kw, 0))
# The rest of the arguments form the stack.
if len(args) != self.popped_stack_items and not (unchecked or self.unchecked_stack):
raise ValueError(
f"Opcode {self._name_} requires {self.popped_stack_items} stack elements, but "
f"{len(args)} were provided. Use 'unchecked=True' parameter to ignore this check."
)
pre_opcode_bytecode = Bytecode()
while len(args) > 0:
pre_opcode_bytecode += _stack_argument_to_bytecode(args.pop())
return pre_opcode_bytecode + self
def __lt__(self, other: "Opcode") -> bool:
"""Compare two opcodes by their integer value."""
return self.int() < other.int()
def __gt__(self, other: "Opcode") -> bool:
"""Compare two opcodes by their integer value."""
return self.int() > other.int()
def int(self) -> int:
"""Return integer representation of the opcode."""
return int.from_bytes(self, byteorder="big")
def has_data_portion(self) -> bool:
"""Return whether the opcode has a data portion."""
return self.data_portion_length > 0 or self.data_portion_formatter is not None
OpcodeCallArg = int | bytes | str | Bytecode | Iterable[int]
class Macro(Bytecode):
"""Represents opcode macro replacement, basically holds bytes."""
lambda_operation: Callable[..., Bytecode] | None
def __new__(
cls,
macro_or_bytes: Optional["Bytecode | Macro"] = None,
*,
lambda_operation: Callable[..., Bytecode] | None = None,
) -> "Macro":
"""Create new opcode macro instance."""
if macro_or_bytes is None:
macro_or_bytes = Bytecode()
if isinstance(macro_or_bytes, Macro):
# Required because Enum class calls the base class
# with the instantiated object as parameter.
return macro_or_bytes
else:
instance = super().__new__(cls, macro_or_bytes)
instance.lambda_operation = lambda_operation
return instance
def __call__(self, *args_t: OpcodeCallArg, **kwargs: Any) -> Bytecode:
"""Perform macro operation if any. Otherwise is a no-op."""
del kwargs
if self.lambda_operation is not None:
return self.lambda_operation(*args_t)
pre_opcode_bytecode = Bytecode()
for arg in args_t:
pre_opcode_bytecode += _stack_argument_to_bytecode(arg)
return pre_opcode_bytecode + self
# Constants
RJUMPV_MAX_INDEX_BYTE_LENGTH = 1
RJUMPV_BRANCH_OFFSET_BYTE_LENGTH = 2
# TODO: Allowing Iterable here is a hacky way to support `range`,
# because Python 3.11+ will allow `Op.RJUMPV[*range(5)]`.
# This is a temporary solution until Python 3.11+ is the minimum required
# version.
def _rjumpv_encoder(*args: int | bytes | Iterable[int]) -> bytes:
if len(args) == 1:
if isinstance(args[0], bytes) or isinstance(args[0], SupportsBytes):
return bytes(args[0])
elif isinstance(args[0], Iterable):
int_args = list(args[0])
return b"".join(
[(len(int_args) - 1).to_bytes(RJUMPV_MAX_INDEX_BYTE_LENGTH, "big")]
+ [
i.to_bytes(RJUMPV_BRANCH_OFFSET_BYTE_LENGTH, "big", signed=True)
for i in int_args
]
)
return b"".join(
[(len(args) - 1).to_bytes(RJUMPV_MAX_INDEX_BYTE_LENGTH, "big")]
+ [
i.to_bytes(RJUMPV_BRANCH_OFFSET_BYTE_LENGTH, "big", signed=True)
for i in args
if isinstance(i, int)
]
)
def _exchange_encoder(*args: int) -> bytes:
assert 1 <= len(args) <= 2, f"Exchange opcode requires one or two arguments, got {len(args)}"
if len(args) == 1:
return int.to_bytes(args[0], 1, "big")
# n = imm >> 4 + 1
# m = imm & 0xF + 1
# x = n + 1
# y = n + m + 1
# ...
# n = x - 1
# m = y - x
# m = y - n - 1
x, y = args
assert 2 <= x <= 0x11
assert x + 1 <= y <= x + 0x10
n = x - 1
m = y - x
imm = (n - 1) << 4 | m - 1
return int.to_bytes(imm, 1, "big")
def _swapn_stack_properties_modifier(data: bytes) -> tuple[int, int, int, int]:
imm = int.from_bytes(data, "big")
n = imm + 1
min_stack_height = n + 1
return 0, 0, min_stack_height, min_stack_height
def _dupn_stack_properties_modifier(data: bytes) -> tuple[int, int, int, int]:
imm = int.from_bytes(data, "big")
n = imm + 1
min_stack_height = n
return 0, 1, min_stack_height, min_stack_height + 1
def _exchange_stack_properties_modifier(data: bytes) -> tuple[int, int, int, int]:
imm = int.from_bytes(data, "big")
n = (imm >> 4) + 1
m = (imm & 0x0F) + 1
min_stack_height = n + m + 1
return 0, 0, min_stack_height, min_stack_height
class Opcodes(Opcode, Enum):
"""
Enum containing all known opcodes.
Contains deprecated and not yet implemented opcodes.
This enum is !! NOT !! meant to be iterated over by the tests.
Instead, create a list with cherry-picked opcodes from this Enum
within the test if iteration is needed.
Do !! NOT !! remove or modify existing opcodes from this list.
"""
STOP = Opcode(0x00, terminating=True)
"""
STOP()
----
Description
----
Stop execution
Inputs
----
- None
Outputs
----
- None
Fork
----
Frontier
Gas
----
0
Source: [evm.codes/#00](https://www.evm.codes/#00)
"""
ADD = Opcode(0x01, popped_stack_items=2, pushed_stack_items=1)
"""
ADD(a, b) = c
----
Description
----
Addition operation
Inputs
----
- a: first integer value to add
- b: second integer value to add
Outputs
----
- c: integer result of the addition modulo 2**256
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#01](https://www.evm.codes/#01)
"""
MUL = Opcode(0x02, popped_stack_items=2, pushed_stack_items=1)
"""
MUL(a, b) = c
----
Description
----
Multiplication operation
Inputs
----
- a: first integer value to multiply
- b: second integer value to multiply
Outputs
----
- c: integer result of the multiplication modulo 2**256
Fork
----
Frontier
Gas
----
5
Source: [evm.codes/#02](https://www.evm.codes/#02)
"""
SUB = Opcode(0x03, popped_stack_items=2, pushed_stack_items=1)
"""
SUB(a, b) = c
----
Description
----
Subtraction operation
Inputs
----
- a: first integer value
- b: second integer value
Outputs
----
- c: integer result of the subtraction modulo 2**256
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#03](https://www.evm.codes/#03)
"""
DIV = Opcode(0x04, popped_stack_items=2, pushed_stack_items=1)
"""
DIV(a, b) = c
----
Description
----
Division operation
Inputs
----
- a: numerator
- b: denominator (must be non-zero)
Outputs
----
- c: integer result of the division
Fork
----
Frontier
Gas
----
5
Source: [evm.codes/#04](https://www.evm.codes/#04)
"""
SDIV = Opcode(0x05, popped_stack_items=2, pushed_stack_items=1)
"""
SDIV(a, b) = c
----
Description
----
Signed division operation
Inputs
----
- a: signed numerator
- b: signed denominator
Outputs
----
- c: signed integer result of the division. If the denominator is 0,
the result will be 0
----
Fork
----
Frontier
Gas
----
5
Source: [evm.codes/#05](https://www.evm.codes/#05)
"""
MOD = Opcode(0x06, popped_stack_items=2, pushed_stack_items=1)
"""
MOD(a, b) = c
----
Description
----
Modulo operation
Inputs
----
- a: integer numerator
- b: integer denominator
Outputs
----
- a % b: integer result of the integer modulo. If the denominator is 0,
the result will be 0
Fork
----
Frontier
Gas
----
5
Source: [evm.codes/#06](https://www.evm.codes/#06)
"""
SMOD = Opcode(0x07, popped_stack_items=2, pushed_stack_items=1)
"""
SMOD(a, b) = c
----
Description
----
Signed modulo remainder operation
Inputs
----
- a: integer numerator
- b: integer denominator
Outputs
----
- a % b: integer result of the signed integer modulo. If the denominator
is 0, the result will be 0
Fork
----
Frontier
Gas
----
5
Source: [evm.codes/#07](https://www.evm.codes/#07)
"""
ADDMOD = Opcode(0x08, popped_stack_items=3, pushed_stack_items=1)
"""
ADDMOD(a, b, c) = d
----
Description
----
Modular addition operation with overflow check
Inputs
----
- a: first integer value
- b: second integer value
- c: integer denominator
Outputs
----
- (a + b) % N: integer result of the addition followed by a modulo.
If the denominator is 0, the result will be 0
Fork
----
Frontier
Gas
----
8
Source: [evm.codes/#08](https://www.evm.codes/#08)
"""
MULMOD = Opcode(0x09, popped_stack_items=3, pushed_stack_items=1)
"""
MULMOD(a, b, N) = d
----
Description
----
Modulo multiplication operation
Inputs
----
- a: first integer value to multiply
- b: second integer value to multiply
- N: integer denominator
Outputs
----
- (a * b) % N: integer result of the multiplication followed by a modulo.
If the denominator is 0, the result will be 0
Fork
----
Frontier
Gas
----
8
Source: [evm.codes/#09](https://www.evm.codes/#09)
"""
EXP = Opcode(0x0A, popped_stack_items=2, pushed_stack_items=1)
"""
EXP(a, exponent) = a ** exponent
----
Description
----
Exponential operation
Inputs
----
- a: integer base
- exponent: integer exponent
Outputs
----
- a ** exponent: integer result of the exponential operation modulo 2**256
Fork
----
Frontier
Gas
----
- static_gas = 10
- dynamic_gas = 50 * exponent_byte_size
Source: [evm.codes/#0A](https://www.evm.codes/#0A)
"""
SIGNEXTEND = Opcode(0x0B, popped_stack_items=2, pushed_stack_items=1)
"""
SIGNEXTEND(b, x) = y
----
Description
----
Sign extension operation
Inputs
----
- b: size in byte - 1 of the integer to sign extend
- x: integer value to sign extend
Outputs
----
- y: integer result of the sign extend
Fork
----
Frontier
Gas
----
5
Source: [evm.codes/#0B](https://www.evm.codes/#0B)
"""
LT = Opcode(0x10, popped_stack_items=2, pushed_stack_items=1)
"""
LT(a, b) = a < b
----
Description
----
Less-than comparison
Inputs
----
- a: left side integer value
- b: right side integer value
Outputs
----
- a < b: 1 if the left side is smaller, 0 otherwise
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#10](https://www.evm.codes/#10)
"""
GT = Opcode(0x11, popped_stack_items=2, pushed_stack_items=1)
"""
GT(a, b) = a > b
----
Description
----
Greater-than comparison
Inputs
----
- a: left side integer
- b: right side integer
Outputs
----
- a > b: 1 if the left side is bigger, 0 otherwise
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#11](https://www.evm.codes/#11)
"""
SLT = Opcode(0x12, popped_stack_items=2, pushed_stack_items=1)
"""
SLT(a, b) = a < b
----
Description
----
Signed less-than comparison
Inputs
----
- a: left side signed integer
- b: right side signed integer
Outputs
----
- a < b: 1 if the left side is smaller, 0 otherwise
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#12](https://www.evm.codes/#12)
"""
SGT = Opcode(0x13, popped_stack_items=2, pushed_stack_items=1)
"""
SGT(a, b) = a > b
----
Description
----
Signed greater-than comparison
Inputs
----
- a: left side signed integer
- b: right side signed integer
Outputs
----
- a > b: 1 if the left side is bigger, 0 otherwise
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#13](https://www.evm.codes/#13)
"""
EQ = Opcode(0x14, popped_stack_items=2, pushed_stack_items=1)
"""
EQ(a, b) = a == b
----
Description
----
Equality comparison
Inputs
----
- a: left side integer
- b: right side integer
Outputs
----
- a == b: 1 if the left side is equal to the right side, 0 otherwise
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#14](https://www.evm.codes/#14)
"""
ISZERO = Opcode(0x15, popped_stack_items=1, pushed_stack_items=1)
"""
ISZERO(a) = a == 0
----
Description
----
Is-zero comparison
Inputs
----
- a: integer
Outputs
----
- a == 0: 1 if a is 0, 0 otherwise
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#15](https://www.evm.codes/#15)
"""
AND = Opcode(0x16, popped_stack_items=2, pushed_stack_items=1)
"""
AND(a, b) = a & b
----
Description
----
Bitwise AND operation
Inputs
----
- a: first binary value
- b: second binary value
Outputs
----
- a & b: the bitwise AND result
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#16](https://www.evm.codes/#16)
"""
OR = Opcode(0x17, popped_stack_items=2, pushed_stack_items=1)
"""
OR(a, b) = a | b
----
Description
----
Bitwise OR operation
Inputs
----
- a: first binary value
- b: second binary value
Outputs
----
- a | b: the bitwise OR result
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#17](https://www.evm.codes/#17)
"""
XOR = Opcode(0x18, popped_stack_items=2, pushed_stack_items=1)
"""
XOR(a, b) = a ^ b
----
Description
----
Bitwise XOR operation
Inputs
----
- a: first binary value
- b: second binary value
Outputs
----
- a ^ b: the bitwise XOR result
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#18](https://www.evm.codes/#18)
"""
NOT = Opcode(0x19, popped_stack_items=1, pushed_stack_items=1)
"""
NOT(a) = ~a
----
Description
----
Bitwise NOT operation
Inputs
----
- a: binary value
Outputs
----
- ~a: the bitwise NOT result
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#19](https://www.evm.codes/#19)
"""
BYTE = Opcode(0x1A, popped_stack_items=2, pushed_stack_items=1)
"""
BYTE(i, x) = y
----
Description
----
Extract a byte from the given position in the value
Inputs
----
- i: byte offset starting from the most significant byte
- x: 32-byte value
Outputs
----
- y: the indicated byte at the least significant position.
If the byte offset is out of range, the result is 0
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#1A](https://www.evm.codes/#1A)
"""
SHL = Opcode(0x1B, popped_stack_items=2, pushed_stack_items=1)
"""
SHL(shift, value) = value << shift
----
Description
----
Shift left operation
Inputs
----
- shift: number of bits to shift to the left
- value: 32 bytes to shift
Outputs
----
- value << shift: the shifted value. If shift is bigger than 255, returns 0
Fork
----
Constantinople
Gas
----
3
Source: [evm.codes/#1B](https://www.evm.codes/#1B)
"""
SHR = Opcode(0x1C, popped_stack_items=2, pushed_stack_items=1)
"""
SHR(shift, value) = value >> shift
----
Description
----
Logical shift right operation
Inputs
----
- shift: number of bits to shift to the right.
- value: 32 bytes to shift
Outputs
----
- value >> shift: the shifted value. If shift is bigger than 255, returns 0
Fork
----
Constantinople
Gas
----
3
Source: [evm.codes/#1C](https://www.evm.codes/#1C)
"""
SAR = Opcode(0x1D, popped_stack_items=2, pushed_stack_items=1)
"""
SAR(shift, value) = value >> shift
----
Description
----
Arithmetic shift right operation
Inputs
----
- shift: number of bits to shift to the right
- value: integer to shift
Outputs
----
- value >> shift: the shifted value
Fork
----
Constantinople
Gas
----
3
Source: [evm.codes/#1D](https://www.evm.codes/#1D)
"""
CLZ = Opcode(0x1E, popped_stack_items=1, pushed_stack_items=1)
"""
CLZ(value) = count_leading_zeros(value)
----
Description
----
Counts leading zeros (bitwise).
Inputs
----
- value: integer to count zeros on
Outputs
----
- zeros: leading zero bits
Fork
----
Osaka
Gas
----
3
Source: [evm.codes/#1E](https://www.evm.codes/#1E)
"""
SHA3 = Opcode(0x20, popped_stack_items=2, pushed_stack_items=1, kwargs=["offset", "size"])
"""
SHA3(offset, size) = hash
----
Description
----
Compute Keccak-256 hash
Inputs
----
- offset: byte offset in the memory
- size: byte size to read in the memory
Outputs
----
- hash: Keccak-256 hash of the given data in memory
Fork
----
Frontier
Gas
----
- minimum_word_size = (size + 31) / 32
- static_gas = 30
- dynamic_gas = 6 * minimum_word_size + memory_expansion_cost
Source: [evm.codes/#20](https://www.evm.codes/#20)
"""
ADDRESS = Opcode(0x30, pushed_stack_items=1)
"""
ADDRESS() = address
----
Description
----
Get address of currently executing account
Inputs
----
- None
Outputs
----
- address: the 20-byte address of the current account
Fork
----
Frontier
Gas
----
2
Source: [evm.codes/#30](https://www.evm.codes/#30)
"""
BALANCE = Opcode(0x31, popped_stack_items=1, pushed_stack_items=1, kwargs=["address"])
"""
BALANCE(address) = balance
----
Description
----
Get the balance of the specified account
Inputs
----
- address: 20-byte address of the account to check
Outputs
----
- balance: balance of the given account in wei. Returns 0 if the
account doesn't exist
Fork
----
Frontier
Gas
----
- static_gas = 0
- dynamic_gas = 100 if warm_address, 2600 if cold_address
Source: [evm.codes/#31](https://www.evm.codes/#31)
"""
ORIGIN = Opcode(0x32, pushed_stack_items=1)
"""
ORIGIN() = address
----
Description
----
Get execution origination address
Inputs
----
- None
Outputs
----
- address: the 20-byte address of the sender of the transaction.
It can only be an account without code
Fork
----
Frontier
Gas
----
2
Source: [evm.codes/#32](https://www.evm.codes/#32)
"""
CALLER = Opcode(0x33, pushed_stack_items=1)
"""
CALLER() = address
----
Description
----
Get caller address
Inputs
----
- None
Outputs
----
- address: the 20-byte address of the caller account.
This is the account that did the last
call (except delegate call)
Fork
----
Frontier
Gas
----
2
Source: [evm.codes/#33](https://www.evm.codes/#33)
"""
CALLVALUE = Opcode(0x34, pushed_stack_items=1)
"""
CALLVALUE() = value
----
Description
----
Get deposited value by the instruction/transaction responsible
for this execution
Inputs
----
- None
Outputs
----
- value: the value of the current call in wei
Fork
----
Frontier
Gas
----
2
Source: [evm.codes/#34](https://www.evm.codes/#34)
"""
CALLDATALOAD = Opcode(0x35, popped_stack_items=1, pushed_stack_items=1, kwargs=["offset"])
"""
CALLDATALOAD(offset) = data[offset]
----
Description
----
Get input data of current environment
Inputs
----
- offset: byte offset in the calldata
Outputs
----
- data[offset]: 32-byte value starting from the given offset of
the calldata. All bytes after the end of the calldata
are set to 0
Fork
----
Frontier
Gas
----
3
Source: [evm.codes/#35](https://www.evm.codes/#35)
"""
CALLDATASIZE = Opcode(0x36, pushed_stack_items=1)
"""
CALLDATASIZE() = size
----
Description
----
Get size of input data in current environment
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_vm/helpers.py | src/ethereum_test_vm/helpers.py | """Helper functions for the EVM."""
from .bytecode import Bytecode
from .opcodes import Opcodes as Op
class MemoryVariable(Bytecode):
"""
Variable abstraction to help keep track values that are stored in memory.
To use, simply declare a variable with an unique offset that is not used
by any other variable.
The variable then can be used in-place to read the value from memory:
```python
v = MemoryVariable(128)
bytecode = Op.ADD(v, Op.CALLDATASIZE())
```
The previous example is equivalent to:
```python
bytecode = Op.ADD(Op.MLOAD(offset=128), Op.CALLDATASIZE())
```
The variable also contains methods to add and subtract values from the
memory offset.
```python
v = MemoryVariable(128)
bytecode = (
v.set(0xff)
+ v.add(1)
+ v.return_value()
)
```
The previous example is equivalent to:
```python
bytecode = (
Op.MSTORE(offset=128, value=0xff)
+ Op.MSTORE(offset=128, value=Op.ADD(Op.MLOAD(offset=128), 1))
+ Op.RETURN(offset=128, size=32)
)
```
"""
offset: int
def __new__(cls, offset: int) -> "MemoryVariable":
"""
Instantiate a new EVM memory variable.
When used with normal bytecode, this class simply returns the MLOAD
with the provided offset.
"""
instance = super().__new__(cls, Op.MLOAD(offset=offset))
instance.offset = offset
return instance
def set(self, value: int | Bytecode) -> Bytecode:
"""Set the given value at the memory location of this variable."""
return Op.MSTORE(offset=self.offset, value=value)
def add(self, value: int | Bytecode) -> Bytecode:
"""In-place add the given value to the one currently in memory."""
return Op.MSTORE(offset=self.offset, value=Op.ADD(Op.MLOAD(offset=self.offset), value))
def sub(self, value: int | Bytecode) -> Bytecode:
"""
In-place subtract the given value from the one currently
in memory.
"""
return Op.MSTORE(offset=self.offset, value=Op.SUB(Op.MLOAD(offset=self.offset), value))
def store_value(self, key: int | Bytecode) -> Bytecode:
"""Op.SSTORE the value that is currently in memory."""
return Op.SSTORE(key, Op.MLOAD(offset=self.offset))
def return_value(self) -> Bytecode:
"""Op.RETURN the value that is currently in memory."""
return Op.RETURN(offset=self.offset, size=32)
def call_return_code(opcode: Op, success: bool, *, revert: bool = False) -> int:
"""Return return code for a CALL operation."""
if opcode in [Op.CALL, Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL]:
return int(success)
elif opcode in [Op.EXTCALL, Op.EXTDELEGATECALL, Op.EXTSTATICCALL]:
if success:
return 0
if revert:
return 1
return 2
raise ValueError(f"Not a call opcode: {opcode}")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_vm/__init__.py | src/ethereum_test_vm/__init__.py | """Ethereum Virtual Machine related definitions and utilities."""
from .bytecode import Bytecode
from .evm_types import EVMCodeType
from .helpers import MemoryVariable, call_return_code
from .opcodes import Macro, Macros, Opcode, OpcodeCallArg, Opcodes, UndefinedOpcodes
__all__ = (
"Bytecode",
"EVMCodeType",
"Macro",
"Macros",
"MemoryVariable",
"Opcode",
"OpcodeCallArg",
"Opcodes",
"UndefinedOpcodes",
"call_return_code",
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_vm/tests/test_vm.py | src/ethereum_test_vm/tests/test_vm.py | """Test suite for `ethereum_test_vm` module."""
import pytest
from ethereum_test_base_types import Address
from ..opcodes import Bytecode
from ..opcodes import Macros as Om
from ..opcodes import Opcodes as Op
@pytest.mark.parametrize(
"opcodes,expected",
[
pytest.param(Op.PUSH1(0x01), b"\x60\x01", id="PUSH1(0x01)"),
pytest.param(Op.PUSH1[0x01], b"\x60\x01", id="PUSH1[0x01]"),
pytest.param(Op.PUSH1("0x01"), b"\x60\x01", id="PUSH1('0x01')"),
pytest.param(Op.PUSH1["0x01"], b"\x60\x01", id="PUSH1['0x01']"),
pytest.param(Op.PUSH1(0xFF), b"\x60\xff", id="PUSH1(0xFF)"),
pytest.param(Op.PUSH1(-1), b"\x60\xff", id="PUSH1(-1)"),
pytest.param(Op.PUSH1[-1], b"\x60\xff", id="PUSH1[-1]"),
pytest.param(Op.PUSH1(-2), b"\x60\xfe", id="PUSH1(-2)"),
pytest.param(Op.PUSH20(0x01), b"\x73" + b"\x00" * 19 + b"\x01", id="PUSH20(0x01)"),
pytest.param(Op.PUSH20[0x01], b"\x73" + b"\x00" * 19 + b"\x01", id="PUSH20[0x01]"),
pytest.param(Op.PUSH32(0xFF), b"\x7f" + b"\x00" * 31 + b"\xff", id="PUSH32(0xFF)"),
pytest.param(Op.PUSH32(-1), b"\x7f" + b"\xff" * 32, id="PUSH32(-1)"),
pytest.param(
sum(Op.PUSH1(i) for i in range(0x2)),
b"\x60\x00\x60\x01",
id="sum(PUSH1(i) for i in range(0x2))",
),
pytest.param(
sum(Op.PUSH1[i] for i in range(0x2)),
b"\x60\x00\x60\x01",
id="sum(PUSH1[i] for i in range(0x2))",
),
pytest.param(
Op.SSTORE(
-1,
Op.CALL(
Op.GAS,
Op.ADDRESS,
Op.PUSH1(0x20),
0,
0,
0x20,
0x1234,
),
),
bytes(
[
0x61,
0x12,
0x34,
0x60,
0x20,
0x60,
0x00,
0x60,
0x00,
0x60,
0x20,
0x30,
0x5A,
0xF1,
0x7F,
]
+ [0xFF] * 32
+ [0x55]
),
id="SSTORE(-1, CALL(GAS, ADDRESS, PUSH1(0x20), 0, 0, 0x20, 0x1234))",
),
pytest.param(
Op.CALL(Op.GAS, Op.PUSH20(0x1234), 0, 0, 0, 0, 32),
b"\x60\x20\x60\x00\x60\x00\x60\x00\x60\x00\x73\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x00\x00\x00\x00\x00\x00\x00\x00\x12\x34\x5a\xf1",
id="CALL(GAS, PUSH20(0x1234), 0, 0, 0, 0, 32)",
),
pytest.param(
Op.CALL(Op.GAS, Address(0x1234), 0, 0, 0, 0, 32),
b"\x60\x20\x60\x00\x60\x00\x60\x00\x60\x00\x61\x12\x34\x5a\xf1",
id="CALL(GAS, Address(0x1234), 0, 0, 0, 0, 32)",
),
pytest.param(Op.ADD(1, 2), bytes([0x60, 0x02, 0x60, 0x01, 0x01]), id="ADD(1, 2)"),
pytest.param(
Op.ADD(Op.ADD(1, 2), 3),
bytes([0x60, 0x03, 0x60, 0x02, 0x60, 0x01, 0x01, 0x01]),
id="ADD(ADD(1, 2), 3)",
),
pytest.param(
Op.CALL(1, 123, 4, 5, 6, 7, 8),
b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x60\x7b\x60\x01\xf1",
id="CALL(1, 123, 4, 5, 6, 7, 8)",
),
pytest.param(
Op.CALL(1, Address(0x0123), 4, 5, 6, 7, 8),
b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x61\x01\x23\x60\x01\xf1",
id="CALL(1, Address(0x0123), 4, 5, 6, 7, 8)",
),
pytest.param(
Op.CALL(1, 0x0123, 4, 5, 6, 7, 8),
b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x61\x01\x23\x60\x01\xf1",
id="CALL(1, 0x0123, 4, 5, 6, 7, 8)",
),
pytest.param(
Op.CALL(1, 123, 4, 5, 6, 7, 8),
b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x60\x7b\x60\x01\xf1",
id="CALL(1, 123, 4, 5, 6, 7, 8)",
),
pytest.param(
Op.CREATE(1, Address(12), 4, 5, 6, 7, 8, unchecked=True),
b"\x60\x08\x60\x07\x60\x06\x60\x05\x60\x04\x60\x0c\x60\x01\xf0",
id="CREATE(1, Address(12), 4, 5, 6, 7, 8, unchecked=True)",
),
pytest.param(
Om.OOG(),
bytes([0x64, 0x17, 0x48, 0x76, 0xE8, 0x00, 0x60, 0x00, 0x20]),
id="OOG()",
),
pytest.param(
Op.RJUMPV[1, 2, 3](Op.ORIGIN),
bytes(
[
Op.ORIGIN.int(),
Op.RJUMPV.int(),
0x02, # Data portion, defined by the [1, 2, 3] argument
0x00,
0x01,
0x00,
0x02,
0x00,
0x03,
]
),
id="RJUMPV[1, 2, 3](ORIGIN)",
),
pytest.param(
Op.RJUMPV[b"\x00"],
bytes(
[
Op.RJUMPV.int(),
0x00,
]
),
id="RJUMPV[b'\\x00']",
),
pytest.param(
Op.RJUMPV[-1, -2, -3],
bytes(
[
Op.RJUMPV.int(),
0x02,
0xFF,
0xFF,
0xFF,
0xFE,
0xFF,
0xFD,
]
),
id="RJUMPV[-1, -2, -3]",
),
pytest.param(
Op.RJUMPV[range(5)], # TODO: on Python 3.11+: Op.RJUMPV[*range(5)]
bytes(
[
Op.RJUMPV.int(),
0x04,
0x00,
0x00,
0x00,
0x01,
0x00,
0x02,
0x00,
0x03,
0x00,
0x04,
]
),
id="RJUMPV[range(5)]",
),
pytest.param(
Op.RJUMPV[1, 2, 3](Op.ORIGIN) + Op.STOP,
bytes(
[
Op.ORIGIN.int(),
Op.RJUMPV.int(),
0x02, # Data portion, defined by the [1, 2, 3] argument
0x00,
0x01,
0x00,
0x02,
0x00,
0x03,
Op.STOP.int(),
]
),
id="RJUMPV[1, 2, 3](ORIGIN) + STOP",
),
pytest.param(
Op.STOP * 2,
bytes(
[
Op.STOP.int(),
Op.STOP.int(),
]
),
id="STOP * 2",
),
pytest.param(
Op.RJUMPV[0, 3, 6, 9], bytes.fromhex("e2030000000300060009"), id="RJUMPV[0, 3, 6, 9]"
),
pytest.param(Op.RJUMPV[2, 0], bytes.fromhex("e20100020000"), id="RJUMPV[2, 0]"),
pytest.param(
Op.RJUMPV[b"\x02\x00\x02\xff\xff"],
bytes.fromhex("e2020002ffff"),
id="RJUMPV[b'\\x02\\x00\\x02\\xFF\\xFF']",
),
pytest.param(
Op.EXCHANGE[0x2 + 0x0, 0x3 + 0x0],
bytes.fromhex("e800"),
id="EXCHANGE[0x2 + 0x0, 0x3 + 0x0]",
),
pytest.param(
Op.EXCHANGE[0x2 + 0x0, 0x3 + 0xF],
bytes.fromhex("e80f"),
id="EXCHANGE[0x2 + 0x0, 0x3 + 0xF]",
),
pytest.param(
Op.EXCHANGE[0x2 + 0xF, 0x3 + 0xF + 0x0],
bytes.fromhex("e8f0"),
id="EXCHANGE[0x2 + 0xF, 0x3 + 0xF + 0x0]",
),
pytest.param(
Op.EXCHANGE[0x2 + 0xF, 0x3 + 0xF + 0xF],
bytes.fromhex("e8ff"),
id="EXCHANGE[0x2 + 0xF, 0x3 + 0xF + 0xF]",
),
pytest.param(Op.PUSH0 * 0, bytes(), id="PUSH0 * 0"),
pytest.param(
Op.CREATE(value=1, offset=2, size=3),
b"\x60\x03\x60\x02\x60\x01\xf0",
id="Op.CREATE(value=1, offset=2, size=3)",
),
pytest.param(
Op.CREATE2(value=1, offset=2, size=3),
b"\x60\x00\x60\x03\x60\x02\x60\x01\xf5",
id="Op.CREATE2(value=1, offset=2, size=3)",
),
pytest.param(
Op.CALL(address=1),
b"\x60\x00\x60\x00\x60\x00\x60\x00\x60\x00\x60\x01\x5a\xf1",
id="Op.CALL(address=1)",
),
pytest.param(
Op.STATICCALL(address=1),
b"\x60\x00\x60\x00\x60\x00\x60\x00\x60\x01\x5a\xfa",
id="Op.STATICCALL(address=1)",
),
pytest.param(
Op.CALLCODE(address=1),
b"\x60\x00\x60\x00\x60\x00\x60\x00\x60\x00\x60\x01\x5a\xf2",
id="Op.CALLCODE(address=1)",
),
pytest.param(
Op.DELEGATECALL(address=1),
b"\x60\x00\x60\x00\x60\x00\x60\x00\x60\x01\x5a\xf4",
id="Op.DELEGATECALL(address=1)",
),
pytest.param(
Op.EXTCALL(address=1),
b"\x60\x00\x60\x00\x60\x00\x60\x01\xf8",
id="Op.EXTCALL(address=1)",
),
pytest.param(
Op.EXTSTATICCALL(address=1),
b"\x60\x00\x60\x00\x60\x01\xfb",
id="Op.EXTSTATICCALL(address=1)",
),
pytest.param(
Op.EXTDELEGATECALL(address=1),
b"\x60\x00\x60\x00\x60\x01\xf9",
id="Op.EXTDELEGATECALL(address=1)",
),
pytest.param(
Om.MSTORE(b""),
b"",
id='Om.MSTORE(b"")',
),
pytest.param(
Om.MSTORE(bytes(range(32))),
bytes(Op.MSTORE(0, bytes(range(32)))),
id="Om.MSTORE(bytes(range(32)))",
),
pytest.param(
Om.MSTORE(bytes(range(64))),
bytes(Op.MSTORE(0, bytes(range(32))) + Op.MSTORE(32, bytes(range(32, 64)))),
id="Om.MSTORE(bytes(range(64)))",
),
pytest.param(
Om.MSTORE(bytes(range(33))),
bytes(
Op.MSTORE(0, bytes(range(32)))
+ Op.MLOAD(32)
+ Op.PUSH31[-1]
+ Op.AND
+ Op.PUSH32[b"\x20".ljust(32, b"\x00")]
+ Op.OR
+ Op.PUSH1(32)
+ Op.MSTORE
),
id="Om.MSTORE(bytes(range(33)))",
),
pytest.param(
Om.MSTORE(bytes(range(63))),
bytes(
Op.MSTORE(0, bytes(range(32)))
+ Op.MLOAD(32)
+ Op.PUSH1[-1]
+ Op.AND
+ Op.PUSH32[bytes(range(32, 63)).ljust(32, b"\x00")]
+ Op.OR
+ Op.PUSH1(32)
+ Op.MSTORE
),
id="Om.MSTORE(bytes(range(63)))",
),
],
)
def test_opcodes(opcodes: bytes, expected: bytes) -> None:
"""Test that the `opcodes` are transformed into bytecode as expected."""
assert bytes(opcodes) == expected
def test_opcodes_repr() -> None:
"""Test that the `repr` of an `Op` is the same as its name."""
assert f"{Op.CALL}" == "CALL"
assert f"{Op.DELEGATECALL}" == "DELEGATECALL"
assert f"{Om.OOG}" == "OOG"
assert str(Op.ADD) == "ADD"
assert f"{Op.DUPN[1]}" == "DUPN_0x01"
assert f"{Op.DATALOADN[1]}" == "DATALOADN_0x0001"
def test_macros() -> None:
"""Test opcode and macros interaction."""
assert (Op.PUSH1(1) + Om.OOG) == (Op.PUSH1(1) + Op.SHA3(0, 100000000000))
for opcode in Op:
assert opcode != Om.OOG
@pytest.mark.parametrize(
"bytecode,expected_popped_items,expected_pushed_items,"
"expected_max_stack_height,expected_min_stack_height",
[
pytest.param(Op.PUSH1 + Op.POP, 0, 0, 1, 0, id="PUSH1 + POP"),
pytest.param(Op.PUSH1 + Op.PUSH1, 0, 2, 2, 0, id="PUSH1 + PUSH1"),
pytest.param(Op.PUSH1 * 3, 0, 3, 3, 0, id="PUSH1 * 3"),
pytest.param(Op.POP + Op.POP, 2, 0, 2, 2, id="POP + POP"),
pytest.param(Op.POP * 3, 3, 0, 3, 3, id="POP * 3"),
pytest.param((Op.POP * 3) + Op.PUSH1, 3, 1, 3, 3, id="(POP * 3) + PUSH1"),
pytest.param(Op.SWAP2 + Op.POP * 3, 3, 0, 3, 3, id="SWAP2 + POP * 3"),
pytest.param(Op.SWAP2 + Op.PUSH1 * 3, 0, 3, 6, 3, id="SWAP2 + PUSH1 * 3"),
pytest.param(Op.SWAP1 + Op.SWAP2, 0, 0, 3, 3, id="SWAP1 + SWAP2"),
pytest.param(
Op.POP * 2 + Op.PUSH1 + Op.POP * 2 + Op.PUSH1 * 3,
3,
3,
3,
3,
id="POP * 2 + PUSH1 + POP * 2 + PUSH1 * 3",
),
pytest.param(Op.CALL(1, 2, 3, 4, 5, 6, 7), 0, 1, 7, 0, id="CALL(1, 2, 3, 4, 5, 6, 7)"),
pytest.param(
Op.POP(Op.CALL(1, 2, 3, 4, 5, 6, 7)), 0, 0, 7, 0, id="POP(CALL(1, 2, 3, 4, 5, 6, 7))"
),
pytest.param(
Op.PUSH0 * 2 + Op.PUSH0 + Op.ADD + Op.PUSH0 + Op.POP * 2, 0, 1, 3, 0, id="parens1"
),
pytest.param(
Op.PUSH0 * 2 + (Op.PUSH0 + Op.ADD + Op.PUSH0 + Op.POP * 2), 0, 1, 3, 0, id="parens2"
),
pytest.param(
Op.PUSH0 * 2 + Op.PUSH0 + (Op.ADD + Op.PUSH0 + Op.POP * 2), 0, 1, 3, 0, id="parens3"
),
pytest.param(
Op.PUSH0 * 2 + Op.PUSH0 + (Op.ADD + Op.PUSH0) + Op.POP * 2, 0, 1, 3, 0, id="parens4"
),
pytest.param(
Op.PUSH0 * 2 + (Op.PUSH0 + Op.ADD + Op.PUSH0) + Op.POP * 2, 0, 1, 3, 0, id="parens5"
),
],
)
def test_bytecode_properties(
bytecode: Bytecode,
expected_popped_items: int,
expected_pushed_items: int,
expected_max_stack_height: int,
expected_min_stack_height: int,
) -> None:
"""Test that the properties of the bytecode are as expected."""
assert bytecode.popped_stack_items == expected_popped_items, "Popped stack items mismatch"
assert bytecode.pushed_stack_items == expected_pushed_items, "Pushed stack items mismatch"
assert bytecode.max_stack_height == expected_max_stack_height, "Max stack height mismatch"
assert bytecode.min_stack_height == expected_min_stack_height, "Min stack height mismatch"
def test_opcode_comparison() -> None:
"""Test that the opcodes are comparable."""
assert Op.STOP < Op.ADD
assert Op.ADD == Op.ADD
assert Op.ADD != Op.STOP
assert Op.ADD > Op.STOP
def test_bytecode_concatenation_with_bytes() -> None:
"""
Test that the bytecode can be concatenated with bytes.
Bytes work as verbatim code and don't affect the bytecode properties.
"""
base = Op.PUSH1[0xFF] + Op.NOT
assert str(base) == ""
code = base + b"\x01\x02"
assert code == bytes([0x60, 0xFF, 0x19, 0x01, 0x02])
assert str(code) == ""
assert code.popped_stack_items == base.popped_stack_items
assert code.pushed_stack_items == base.pushed_stack_items
assert code.max_stack_height == base.max_stack_height
assert code.min_stack_height == base.min_stack_height
assert code.terminating == base.terminating
def test_opcode_kwargs_validation() -> None:
"""Test that invalid keyword arguments raise ValueError."""
# Test valid kwargs work
Op.MSTORE(offset=0, value=1)
Op.CALL(gas=1, address=2, value=3, args_offset=4, args_size=5, ret_offset=6, ret_size=7)
# Test invalid kwargs raise ValueError
with pytest.raises(
ValueError, match=r"Invalid keyword argument\(s\) \['offest'\] for opcode MSTORE"
):
Op.MSTORE(offest=0, value=1) # codespell:ignore offest
with pytest.raises(
ValueError, match=r"Invalid keyword argument\(s\) \['wrong_arg'\] for opcode MSTORE"
):
Op.MSTORE(offset=0, value=1, wrong_arg=2)
with pytest.raises(
ValueError, match=r"Invalid keyword argument\(s\) \['addres'\] for opcode CALL"
):
Op.CALL(
gas=1,
addres=2, # codespell:ignore
value=3,
args_offset=4,
args_size=5,
ret_offset=6,
ret_size=7,
)
# Test multiple invalid kwargs
with pytest.raises(ValueError, match=r"Invalid keyword argument\(s\).*for opcode MSTORE"):
Op.MSTORE(offest=0, valu=1, extra=2) # codespell:ignore offest,valu
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_vm/tests/__init__.py | src/ethereum_test_vm/tests/__init__.py | """Tests for the ethereum_test_vm package."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_benchmark/benchmark_code_generator.py | src/ethereum_test_benchmark/benchmark_code_generator.py | """
Benchmark code generator classes for creating
optimized bytecode patterns.
"""
from dataclasses import dataclass
from ethereum_test_base_types import Address
from ethereum_test_forks import Fork
from ethereum_test_specs.benchmark import BenchmarkCodeGenerator
from ethereum_test_types import Alloc
from ethereum_test_vm.opcodes import Opcodes as Op
@dataclass(kw_only=True)
class JumpLoopGenerator(BenchmarkCodeGenerator):
"""Generates bytecode that loops execution using JUMP operations."""
def deploy_contracts(self, *, pre: Alloc, fork: Fork) -> Address:
"""Deploy the looping contract."""
# Benchmark Test Structure:
# setup + JUMPDEST +
# attack + attack + ... + attack +
# cleanup + JUMP(setup_length)
code = self.generate_repeated_code(
repeated_code=self.attack_block, setup=self.setup, cleanup=self.cleanup, fork=fork
)
self._contract_address = pre.deploy_contract(code=code)
return self._contract_address
@dataclass(kw_only=True)
class ExtCallGenerator(BenchmarkCodeGenerator):
"""
Generates bytecode that fills the contract to
maximum allowed code size.
"""
contract_balance: int = 0
def deploy_contracts(self, *, pre: Alloc, fork: Fork) -> Address:
"""Deploy both target and caller contracts."""
# Benchmark Test Structure:
# There are two contracts:
# 1. The target contract that executes certain operation
# but not loop (e.g. PUSH)
# 2. The loop contract that calls the target contract in a loop
pushed_stack_items = self.attack_block.pushed_stack_items
popped_stack_items = self.attack_block.popped_stack_items
stack_delta = pushed_stack_items - popped_stack_items
max_iterations = fork.max_code_size() // len(self.attack_block)
if stack_delta > 0:
max_iterations = min(fork.max_stack_height() // stack_delta, max_iterations)
# Deploy target contract that contains the actual attack block
self._target_contract_address = pre.deploy_contract(
code=self.setup + self.attack_block * max_iterations,
balance=self.contract_balance,
)
# Create caller contract that repeatedly calls the target contract
# attack = POP(
# STATICCALL(GAS, target_contract_address, 0, 0, 0, 0)
# )
#
# setup + JUMPDEST + attack + attack + ... + attack +
# JUMP(setup_length)
code_sequence = Op.POP(Op.STATICCALL(Op.GAS, self._target_contract_address, 0, 0, 0, 0))
caller_code = self.generate_repeated_code(
repeated_code=code_sequence, cleanup=self.cleanup, fork=fork
)
self._contract_address = pre.deploy_contract(code=caller_code)
return self._contract_address
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_benchmark/__init__.py | src/ethereum_test_benchmark/__init__.py | """
Benchmark code generator classes for
creating optimized bytecode patterns.
"""
from .benchmark_code_generator import (
BenchmarkCodeGenerator,
ExtCallGenerator,
JumpLoopGenerator,
)
__all__ = (
"BenchmarkCodeGenerator",
"ExtCallGenerator",
"JumpLoopGenerator",
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/__init__.py | tests/__init__.py | """
Cross-client test cases organized by fork. Each directory underneath `tests/`
contains test cases corresponding [to the fork](https://ethereum.org/en/history)
in which the functionality-under-test was introduced.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/__init__.py | tests/static/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/state_tests/__init__.py | tests/static/state_tests/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/state_tests/stSStoreTest/__init__.py | tests/static/state_tests/stSStoreTest/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/state_tests/stCreate2/__init__.py | tests/static/state_tests/stCreate2/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/state_tests/stZeroKnowledge2/__init__.py | tests/static/state_tests/stZeroKnowledge2/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/state_tests/stHomesteadSpecific/__init__.py | tests/static/state_tests/stHomesteadSpecific/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/state_tests/stEIP2930/__init__.py | tests/static/state_tests/stEIP2930/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/state_tests/stInitCodeTest/__init__.py | tests/static/state_tests/stInitCodeTest/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/static/state_tests/stMemExpandingEIP150Calls/__init__.py | tests/static/state_tests/stMemExpandingEIP150Calls/__init__.py | """Static State Tests Fillers from ethereum/tests repo."""
REFERENCE_SPEC_GIT_PATH = ""
REFERENCE_SPEC_VERSION = ""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.