repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/evm_bytes.py | src/cli/evm_bytes.py | """Define an entry point wrapper for pytest."""
from dataclasses import dataclass, field
from typing import Any, List
import click
from ethereum_test_base_types import ZeroPaddedHexNumber
from ethereum_test_vm import Bytecode, Macro
from ethereum_test_vm import Opcodes as Op
OPCODES_WITH_EMPTY_LINES_AFTER = {
Op.STOP,
Op.REVERT,
Op.INVALID,
Op.JUMP,
Op.JUMPI,
}
OPCODES_WITH_EMPTY_LINES_BEFORE = {
Op.JUMPDEST,
}
@dataclass(kw_only=True)
class OpcodeWithOperands:
"""Simple opcode with its operands."""
opcode: Op | None
operands: List[int] = field(default_factory=list)
def format(self, assembly: bool) -> str:
"""Format the opcode with its operands."""
if self.opcode is None:
return ""
if assembly:
return self.format_assembly()
if self.operands:
operands = ", ".join(hex(operand) for operand in self.operands)
return f"Op.{self.opcode._name_}[{operands}]"
return f"Op.{self.opcode._name_}"
def format_assembly(self) -> str:
"""Format the opcode with its operands as assembly."""
if self.opcode is None:
return ""
opcode_name = self.opcode._name_.lower()
if self.opcode.data_portion_length == 0:
return f"{opcode_name}"
elif self.opcode == Op.RJUMPV:
operands = ", ".join(str(ZeroPaddedHexNumber(operand)) for operand in self.operands)
return f"{opcode_name} {operands}"
else:
operands = ", ".join(str(ZeroPaddedHexNumber(operand)) for operand in self.operands)
return f"{opcode_name} {operands}"
@property
def terminating(self) -> bool:
"""Terminating opcode boolean."""
return self.opcode.terminating if self.opcode else False
@property
def bytecode(self) -> Bytecode:
"""Opcode as bytecode with its operands if any."""
# opcode.opcode[*opcode.operands] crashes `black` formatter and doesn't
# work.
if self.opcode:
return self.opcode.__getitem__(*self.operands) if self.operands else self.opcode
else:
return Bytecode()
def process_evm_bytes(evm_bytes: bytes) -> List[OpcodeWithOperands]: # noqa: D103
evm_bytes_array = bytearray(evm_bytes)
opcodes: List[OpcodeWithOperands] = []
while evm_bytes_array:
opcode_byte = evm_bytes_array.pop(0)
opcode: Op
for op in Op:
if not isinstance(op, Macro) and op.int() == opcode_byte:
opcode = op
break
else:
raise ValueError(f"Unknown opcode: {opcode_byte}")
if opcode.data_portion_length > 0:
signed = opcode in [Op.RJUMP, Op.RJUMPI]
opcodes.append(
OpcodeWithOperands(
opcode=opcode,
operands=[
int.from_bytes(
evm_bytes_array[: opcode.data_portion_length], "big", signed=signed
)
],
)
)
evm_bytes_array = evm_bytes_array[opcode.data_portion_length :]
elif opcode == Op.RJUMPV:
if len(evm_bytes_array) == 0:
opcodes.append(OpcodeWithOperands(opcode=opcode))
else:
max_index = evm_bytes_array.pop(0)
operands: List[int] = []
for _ in range(max_index + 1):
operands.append(int.from_bytes(evm_bytes_array[:2], "big", signed=True))
evm_bytes_array = evm_bytes_array[2:]
opcodes.append(OpcodeWithOperands(opcode=opcode, operands=operands))
else:
opcodes.append(OpcodeWithOperands(opcode=opcode))
return opcodes
def format_opcodes(opcodes: List[OpcodeWithOperands], assembly: bool = False) -> str: # noqa: D103
if assembly:
opcodes_with_empty_lines: List[OpcodeWithOperands] = []
for i, op_with_operands in enumerate(opcodes):
if (
op_with_operands.opcode in OPCODES_WITH_EMPTY_LINES_BEFORE
and len(opcodes_with_empty_lines) > 0
and opcodes_with_empty_lines[-1].opcode is not None
):
opcodes_with_empty_lines.append(OpcodeWithOperands(opcode=None))
opcodes_with_empty_lines.append(op_with_operands)
if op_with_operands.opcode in OPCODES_WITH_EMPTY_LINES_AFTER and i < len(opcodes) - 1:
opcodes_with_empty_lines.append(OpcodeWithOperands(opcode=None))
return "\n".join(op.format(assembly) for op in opcodes_with_empty_lines)
return " + ".join(op.format(assembly) for op in opcodes)
def process_evm_bytes_string(evm_bytes_hex_string: str, assembly: bool = False) -> str:
"""Process the given EVM bytes hex string."""
if evm_bytes_hex_string.startswith("0x"):
evm_bytes_hex_string = evm_bytes_hex_string[2:]
evm_bytes = bytes.fromhex(evm_bytes_hex_string)
return format_opcodes(process_evm_bytes(evm_bytes), assembly=assembly)
assembly_option = click.option(
"-a",
"--assembly",
default=False,
is_flag=True,
help="Output the code as assembly instead of Python Opcodes.",
)
@click.group(
"evm_bytes",
context_settings={
"help_option_names": ["-h", "--help"],
},
)
def evm_bytes() -> None:
"""
Convert EVM bytecode to EEST's Python Opcodes or an assembly string.
The input can be either a hex string or a binary file.
"""
pass
@evm_bytes.command(short_help="Convert a hex string to Python Opcodes or assembly.")
@assembly_option
@click.argument("hex_string")
def hex_string(hex_string: str, assembly: bool) -> None:
"""
Convert the HEX_STRING representing EVM bytes to EEST Python Opcodes.
HEX_STRING is a string containing EVM bytecode.
Returns:
(str): The processed EVM opcodes in Python or assembly format.
Example 1: Convert a hex string to EEST Python `Opcodes`
uv run evm_bytes hex-string 604260005260206000F3
Output 1:
\b
Op.PUSH1[0x42] + Op.PUSH1[0x0] + Op.MSTORE + Op.PUSH1[0x20] +
Op.PUSH1[0x0] + Op.RETURN
Example 2: Convert a hex string to assembly
uv run evm_bytes hex-string --assembly 604260005260206000F3
Output 2:
\b
push1 0x42
push1 0x00
mstore
push1 0x20
push1 0x00
return
""" # noqa: D301
processed_output = process_evm_bytes_string(hex_string, assembly=assembly)
click.echo(processed_output)
@evm_bytes.command(short_help="Convert a binary file to Python Opcodes or assembly.")
@assembly_option
@click.argument("binary_file", type=click.File("rb"))
def binary_file(binary_file: Any, assembly: bool) -> None:
"""
Convert the BINARY_FILE containing EVM bytes to Python Opcodes or assembly.
BINARY_FILE is a binary file containing EVM bytes, use `-` to read from
stdin.
Returns:
(str): The processed EVM opcodes in Python or assembly format.
Example: Convert the Withdrawal Request contract to assembly
\b
uv run evm_bytes binary-file ./src/ethereum_test_forks/forks/
contracts/withdrawal_request.bin --assembly
Output:
\b
caller
push20 0xfffffffffffffffffffffffffffffffffffffffe
eq
push1 0x90
jumpi
...
""" # noqa: D301
processed_output = format_opcodes(process_evm_bytes(binary_file.read()), assembly=assembly)
click.echo(processed_output)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/modify_static_test_gas_limits.py | src/cli/modify_static_test_gas_limits.py | """
Command to scan and overwrite the static tests' gas limits to new optimized
value given in the input file.
"""
import json
import re
from pathlib import Path
from typing import Dict, List, Set
import click
import yaml
from ethereum_test_base_types import EthereumTestRootModel, HexNumber, ZeroPaddedHexNumber
from ethereum_test_specs import StateStaticTest
from pytest_plugins.filler.static_filler import NoIntResolver
class GasLimitDict(EthereumTestRootModel):
"""Formatted JSON file with new gas limits in each test."""
root: Dict[str, int | None]
def unique_files(self) -> Set[Path]:
"""Return a list of unique test files."""
files = set()
for test in self.root:
filename, _ = test.split("::")
files.add(Path(filename))
return files
def get_tests_by_file_path(self, file: Path | str) -> Set[str]:
"""Return a list of all tests that belong to a given file path."""
tests = set()
for test in self.root:
current_file, _ = test.split("::")
if current_file == str(file):
tests.add(test)
return tests
class StaticTestFile(EthereumTestRootModel):
"""A static test file."""
root: Dict[str, StateStaticTest]
def _check_fixtures(
*,
input_path: Path,
max_gas_limit: int | None,
dry_run: bool,
verbose: bool,
) -> None:
"""
Perform checks on fixtures in the specified directory.
"""
# Load the test dictionary from the input JSON file
test_dict = GasLimitDict.model_validate_json(input_path.read_text())
# Iterate through each unique test file that needs modification
for test_file in test_dict.unique_files():
tests = test_dict.get_tests_by_file_path(test_file)
test_file_contents = test_file.read_text()
# Parse the test file based on its format (YAML or JSON)
if test_file.suffix == ".yml" or test_file.suffix == ".yaml":
loaded_yaml = yaml.load(test_file.read_text(), Loader=NoIntResolver)
try:
parsed_test_file = StaticTestFile.model_validate(loaded_yaml)
except Exception as e:
raise Exception(
f"Unable to parse file {test_file}: {json.dumps(loaded_yaml, indent=2)}"
) from e
else:
parsed_test_file = StaticTestFile.model_validate_json(test_file_contents)
# Validate that the file contains exactly one test
assert len(parsed_test_file.root) == 1, f"File {test_file} contains more than one test."
_, parsed_test = parsed_test_file.root.popitem()
# Skip files with multiple gas limit values
if len(parsed_test.transaction.gas_limit) != 1:
if dry_run or verbose:
print(
f"Test file {test_file} contains more than one test (after parsing), skipping."
)
continue
# Get the current gas limit and check if modification is needed
current_gas_limit = int(parsed_test.transaction.gas_limit[0])
if max_gas_limit is not None and current_gas_limit <= max_gas_limit:
# Nothing to do, finished
for test in tests:
test_dict.root.pop(test)
continue
# Collect valid gas values for this test file
gas_values: List[int] = []
for gas_value in [test_dict.root[test] for test in tests]:
if gas_value is None:
if dry_run or verbose:
print(
f"Test file {test_file} contains at least one test that cannot "
"be updated, skipping."
)
continue
else:
gas_values.append(gas_value)
# Calculate the new gas limit (rounded up to nearest 100,000)
new_gas_limit = max(gas_values)
modified_new_gas_limit = ((new_gas_limit // 100000) + 1) * 100000
if verbose:
print(
f"Changing exact new gas limit ({new_gas_limit}) to "
f"rounded ({modified_new_gas_limit})"
)
new_gas_limit = modified_new_gas_limit
# Check if the new gas limit exceeds the maximum allowed
if max_gas_limit is not None and new_gas_limit > max_gas_limit:
if dry_run or verbose:
print(f"New gas limit ({new_gas_limit}) exceeds max ({max_gas_limit})")
continue
if dry_run or verbose:
print(f"Test file {test_file} requires modification ({new_gas_limit})")
# Find the appropriate pattern to replace the current gas limit
potential_types = [int, HexNumber, ZeroPaddedHexNumber]
substitute_pattern = None
substitute_string = None
attempted_patterns = []
for current_type in potential_types:
potential_substitute_pattern = rf"\b{current_type(current_gas_limit)}\b"
potential_substitute_string = f"{current_type(new_gas_limit)}"
if (
re.search(
potential_substitute_pattern, test_file_contents, flags=re.RegexFlag.MULTILINE
)
is not None
):
substitute_pattern = potential_substitute_pattern
substitute_string = potential_substitute_string
break
attempted_patterns.append(potential_substitute_pattern)
# Validate that a replacement pattern was found
assert substitute_pattern is not None, (
f"Current gas limit ({attempted_patterns}) not found in {test_file}"
)
assert substitute_string is not None
# Perform the replacement in the test file content
new_test_file_contents = re.sub(substitute_pattern, substitute_string, test_file_contents)
assert test_file_contents != new_test_file_contents, "Could not modify test file"
# Skip writing changes if this is a dry run
if dry_run:
continue
# Write the modified content back to the test file
test_file.write_text(new_test_file_contents)
for test in tests:
test_dict.root.pop(test)
if dry_run:
return
# Write changes to the input file
input_path.write_text(test_dict.model_dump_json(indent=2))
MAX_GAS_LIMIT = 16_777_216
@click.command()
@click.option(
"--input",
"-i",
"input_str",
type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),
required=True,
help="The input json file or directory containing json listing the new gas limits for the "
"static test files files.",
)
@click.option(
"--max-gas-limit",
default=MAX_GAS_LIMIT,
expose_value=True,
help="Gas limit that triggers a test modification, and also the maximum value that a test "
"should have after modification.",
)
@click.option(
"--dry-run",
"-d",
"dry_run",
is_flag=True,
default=False,
expose_value=True,
help="Don't modify any files, simply print operations to be performed.",
)
@click.option(
"--verbose",
"-v",
"verbose",
is_flag=True,
default=False,
expose_value=True,
help="Print extra information.",
)
def main(input_str: str, max_gas_limit: int | None, dry_run: bool, verbose: bool) -> None:
"""
Perform checks on fixtures in the specified directory.
"""
input_path = Path(input_str)
if not dry_run:
# Always dry-run first before actually modifying
_check_fixtures(
input_path=input_path,
max_gas_limit=max_gas_limit,
dry_run=True,
verbose=False,
)
_check_fixtures(
input_path=input_path,
max_gas_limit=max_gas_limit,
dry_run=dry_run,
verbose=verbose,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/__init__.py | src/cli/__init__.py | """Ethereum execution spec tests command-line tools."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/order_fixtures.py | src/cli/order_fixtures.py | """
Functions and CLI interface for recursively ordering and sorting .json files.
Usage Example:
```console
order_fixtures -i input_dir -o output_dir
```
The CLI interface takes the paths of an input directory and an output
directory. It recursively processes each .json file in the input directory and
its subdirectories, and sorts lists and dictionaries alphabetically and writes
the sorted output to .json files to the corresponding locations in the output
directory.
"""
import json
from pathlib import Path
from typing import Any, Dict, List, cast
import click
def recursive_sort(item: Dict[str, Any] | List[Any]) -> Dict[str, Any] | List[Any]:
"""
Recursively sorts an item.
If the item is a dictionary, it returns a new dictionary that is a sorted
version of the input dictionary. If the item is a list, it returns a new
list that is a sorted version of the input list. The elements of the list
are also sorted if they are lists or dictionaries.
Args:
item: The item to be sorted. This can be a list or a dictionary.
Returns:
The sorted item.
"""
if isinstance(item, dict):
return dict(sorted((k, recursive_sort(v)) for k, v in item.items()))
elif isinstance(item, list):
try:
return sorted(cast(List[Any], [recursive_sort(x) for x in item])) # type: ignore[redundant-cast]
except TypeError:
# If a TypeError is raised, we might be dealing with a list of
# dictionaries Sort them based on their string representation
return sorted((recursive_sort(x) for x in item), key=str)
else:
return item
def order_fixture(input_path: Path, output_path: Path) -> None:
"""
Sorts a .json fixture.
Reads a .json file from the input path, sorts the .json data and writes it
to the output path.
Args:
input_path: The Path object of the input .json file.
output_path: The Path object of the output .json file.
Returns:
None.
"""
with input_path.open("r") as f:
data = json.load(f)
data = recursive_sort(data)
with output_path.open("w") as f:
json.dump(data, f, indent=4)
def process_directory(input_dir: Path, output_dir: Path) -> None:
"""
Process a directory.
Processes each .json file in the input directory and its subdirectories,
and writes the sorted .json files to the corresponding locations in the
output directory.
Args:
input_dir: The Path object of the input directory.
output_dir: The Path object of the output directory.
Returns:
None.
"""
if not output_dir.exists():
output_dir.mkdir(parents=True)
for child in input_dir.iterdir():
if child.is_dir():
process_directory(child, output_dir / child.name)
elif child.suffix == ".json":
order_fixture(child, output_dir / child.name)
@click.command()
@click.option(
"--input",
"-i",
"input_dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),
required=True,
help="input directory",
)
@click.option(
"--output",
"-o",
"output_dir",
type=click.Path(writable=True, file_okay=False, dir_okay=True),
required=True,
help="The output directory",
)
def order_fixtures(input_dir: str, output_dir: str) -> None:
"""Order json fixture by key recursively from the input directory."""
input_dir_path = Path(input_dir)
output_dir_path = Path(output_dir)
process_directory(input_dir_path, output_dir_path)
if __name__ == "__main__":
order_fixtures()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/extract_config.py | src/cli/extract_config.py | #!/usr/bin/env python
"""
CLI tool to extract client configuration files (chainspec/genesis.json) from
Ethereum clients.
This tool spawns an Ethereum client using Hive and extracts the generated
configuration files such as /chainspec/test.json, /configs/test.cfg, or
/genesis.json from the Docker container.
"""
import io
import json
import subprocess
import sys
from pathlib import Path
from typing import Dict, Optional, Tuple, cast
import click
from hive.simulation import Simulation
from hive.testing import HiveTestResult
from ethereum_test_base_types import Alloc, to_json
from ethereum_test_fixtures import BlockchainFixtureCommon
from ethereum_test_fixtures.blockchain import FixtureHeader
from ethereum_test_fixtures.file import Fixtures
from ethereum_test_fixtures.pre_alloc_groups import PreAllocGroup
from ethereum_test_forks import Fork
from pytest_plugins.consume.simulators.helpers.ruleset import ruleset
def get_docker_containers() -> set[str]:
"""Get the current list of Docker container IDs."""
result = subprocess.run(["docker", "ps", "-q"], capture_output=True, text=True, check=True)
return set(result.stdout.strip().split("\n")) if result.stdout.strip() else set()
def extract_client_files(
container_id: str,
output_dir: Path,
fixture_name: str,
client_name: str,
) -> Dict[str, Path]:
"""
Extract configuration files from a running client container.
Returns a dictionary mapping file type to extracted file path.
"""
extracted_files = {}
# List of files to try to extract
files_to_extract = [
("/chainspec/test.json", "chainspec.json"),
("/configs/test.cfg", "config.cfg"),
("/genesis.json", "genesis.json"),
]
for container_path, output_filename in files_to_extract:
try:
# Use docker exec to read the file from the container
# First check if file exists
check_cmd = ["docker", "exec", container_id, "test", "-f", container_path]
check_result = subprocess.run(check_cmd, capture_output=True)
if check_result.returncode == 0:
# File exists, now read it
read_cmd = ["docker", "exec", container_id, "cat", container_path]
result = subprocess.run(read_cmd, capture_output=True, text=True)
if result.returncode == 0 and result.stdout:
output_folder = output_dir / fixture_name / client_name
if not output_folder.exists():
output_folder.mkdir(parents=True)
output_path = output_folder / output_filename
output = result.stdout
if output_filename == "genesis.json":
# Indent the json
output = json.dumps(json.loads(output), indent=4)
output_path.write_text(output)
extracted_files[container_path] = output_path
click.echo(f"✓ Extracted {container_path} to {output_path}")
else:
click.echo(f"✗ Failed to read {container_path}: {result.stderr}", err=True)
else:
click.echo(f"- File {container_path} does not exist in container")
except Exception as e:
click.echo(f"✗ Error extracting {container_path}: {e}", err=True)
return extracted_files
def create_genesis_from_fixture(fixture_path: Path) -> Tuple[FixtureHeader, Alloc, int]:
"""Create a client genesis state from a fixture file."""
genesis: FixtureHeader
alloc: Alloc
chain_id: int = 1
with open(fixture_path, "r") as f:
fixture_json = json.load(f)
if "_info" in fixture_json:
# Load the fixture
fixtures = Fixtures.model_validate_json(fixture_path.read_text())
# Get the first fixture (assuming single fixture file)
fixture_id = list(fixtures.keys())[0]
base_fixture = fixtures[fixture_id]
if not isinstance(base_fixture, BlockchainFixtureCommon):
raise ValueError(f"Fixture {fixture_id} is not a blockchain fixture")
genesis = base_fixture.genesis
alloc = base_fixture.pre
chain_id = int(base_fixture.config.chain_id)
else:
pre_alloc_group = PreAllocGroup.model_validate(fixture_json)
genesis = pre_alloc_group.genesis
alloc = pre_alloc_group.pre
return genesis, alloc, chain_id
def get_client_environment_for_fixture(fork: Fork, chain_id: int) -> dict:
"""
Get the environment variables for starting a client with the given fixture.
"""
if fork not in ruleset:
raise ValueError(f"Fork '{fork}' not found in hive ruleset")
return {
"HIVE_CHAIN_ID": str(chain_id),
"HIVE_FORK_DAO_VOTE": "1",
"HIVE_NODETYPE": "full",
"HIVE_CHECK_LIVE_PORT": "8545", # Using RPC port for liveness check
**{k: f"{v:d}" for k, v in ruleset[fork].items()},
}
@click.command()
@click.option(
"--client",
"-c",
required=False,
default=None,
help="Client name (e.g., go-ethereum, besu, nethermind)",
)
@click.option(
"--fixture",
"-f",
type=click.Path(exists=True, path_type=Path),
help="Path to a fixture JSON file to use for genesis",
default=None,
)
@click.option(
"--output",
"-o",
type=click.Path(path_type=Path),
default="./extracted_configs",
help="Output directory for extracted files",
)
@click.option(
"--hive-url",
default="http://127.0.0.1:3000",
help="Hive server URL",
)
@click.option(
"--list-files",
"-l",
is_flag=True,
help="List files in the container root before extraction",
)
def extract_config(
client: str,
fixture: Optional[Path],
output: Path,
hive_url: str,
list_files: bool,
) -> None:
"""
Extract client configuration files from Ethereum clients.
This tool spawns an Ethereum client using Hive and extracts the generated
configuration files such as /chainspec/test.json, /configs/test.cfg, or
/genesis.json from the Docker container.
"""
if not fixture:
raise click.UsageError("No fixture provided, use --fixture to specify a fixture")
if fixture.is_dir():
fixture_files = list(fixture.glob("*.json"))
elif fixture.is_file():
fixture_files = [fixture]
else:
raise click.UsageError(f"Invalid fixture path: {fixture}")
# Create output directory
output.mkdir(parents=True, exist_ok=True)
# Initialize Hive test
simulation = Simulation(url=hive_url)
suite = simulation.start_suite(
name="extract-config",
description="Extract client configuration files",
)
hive_test = suite.start_test(
name="extract-config",
description="Extract client configuration files",
)
client_types = []
for client_type in simulation.client_types():
if client and client not in client_type.name:
continue
client_types.append(client_type)
if not client_types:
raise click.UsageError(f"No client types found for {client}")
for fixture_path in fixture_files:
# Prepare client files and environment
click.echo(f"Using fixture: {fixture_path}")
# Load fixture and create genesis
genesis, alloc, chain_id = create_genesis_from_fixture(fixture_path)
fork = genesis.fork
assert fork is not None
client_environment = get_client_environment_for_fixture(fork, chain_id)
genesis_json = to_json(genesis)
alloc_json = to_json(alloc)
genesis_json["alloc"] = {k.replace("0x", ""): v for k, v in alloc_json.items()}
genesis_json_str = json.dumps(genesis_json)
genesis_bytes = genesis_json_str.encode("utf-8")
for client_type in client_types:
client_files = {}
client_files["/genesis.json"] = io.BufferedReader(
cast(io.RawIOBase, io.BytesIO(genesis_bytes))
)
# Get containers before starting client
containers_before = get_docker_containers()
# Start the client
click.echo(f"Starting client: {client_type.name}")
client_instance = hive_test.start_client(
client_type=client_type,
environment=client_environment,
files=client_files,
)
if not client_instance:
click.echo("Failed to start client", err=True)
sys.exit(1)
try:
# Get containers after starting client
containers_after = get_docker_containers()
new_containers = containers_after - containers_before
if len(new_containers) != 1:
click.echo(
f"Expected exactly 1 new container, found {len(new_containers)}", err=True
)
sys.exit(1)
container_id = new_containers.pop()
click.echo(f"Client started successfully (Container ID: {container_id})")
# Optionally list files in container
if list_files:
click.echo("\nListing files in container root:")
list_cmd = ["docker", "exec", container_id, "ls", "-la", "/"]
result = subprocess.run(list_cmd, capture_output=True, text=True)
if result.returncode == 0:
click.echo(result.stdout)
else:
click.echo(f"Failed to list files: {result.stderr}", err=True)
# Extract files
click.echo("\nExtracting configuration files...")
extract_client_files(container_id, output, fixture_path.stem, client_type.name)
except Exception as e:
click.echo(f"Error: {e}", err=True)
import traceback
traceback.print_exc()
sys.exit(1)
finally:
# Clean up
click.echo("\nStopping client...")
client_instance.stop()
click.echo()
hive_test.end(result=HiveTestResult(test_pass=True, details=""))
suite.end()
if __name__ == "__main__":
extract_config()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/compare_fixtures.py | src/cli/compare_fixtures.py | """
Compare two fixture folders and remove duplicates based on fixture hashes.
This tool reads the .meta/index.json files from two fixture directories and
identifies fixtures with identical hashes on a test case basis, then removes
the duplicates from both of the folders. Used within the coverage workflow.
"""
import json
import shutil
import sys
from collections import defaultdict
from pathlib import Path
from typing import List, Set
import click
from ethereum_test_base_types import HexNumber
from ethereum_test_fixtures.consume import IndexFile, TestCaseIndexFile
def get_index_path(folder: Path) -> Path:
"""Get the path to an index in a given folder."""
return folder / ".meta" / "index.json"
def load_index(folder: Path) -> IndexFile:
"""Load the index.json file from a fixture folder."""
index_path = get_index_path(folder)
if not index_path.exists():
raise FileNotFoundError(f"Index file not found: {index_path}")
return IndexFile.model_validate_json(index_path.read_text())
def get_fixture_hashes(index: IndexFile) -> Set[HexNumber]:
"""Extract fixture hashes and their corresponding file paths from index."""
hash_set = set()
for test_case in index.test_cases:
if test_case.fixture_hash is None:
continue
hash_set.add(test_case.fixture_hash)
return hash_set
def find_duplicates(base_hashes: Set[HexNumber], patch_hashes: Set[HexNumber]) -> Set[HexNumber]:
"""Find fixture hashes that exist in both base and patch."""
return base_hashes & patch_hashes
def pop_all_by_hash(index: IndexFile, fixture_hash: HexNumber) -> List[TestCaseIndexFile]:
"""Pops all test cases from an index file by their hash."""
test_cases = []
remaining_cases = []
for test_case in index.test_cases:
if test_case.fixture_hash == fixture_hash:
test_cases.append(test_case)
else:
remaining_cases.append(test_case)
if not test_cases:
raise Exception(f"Hash {fixture_hash} not found in index.")
index.test_cases = remaining_cases
return test_cases
def remove_fixture_from_file(file: Path, test_case_id: str) -> None:
"""Remove a single fixture by its ID from a generic fixture file."""
try:
# Load from json to a dict
full_file = json.loads(file.read_text())
full_file.pop(test_case_id)
file.write_text(json.dumps(full_file, indent=2))
except FileNotFoundError:
raise FileNotFoundError(f"Fixture file not found: {file}") from None
except KeyError:
raise KeyError(f"Test case {test_case_id} not found in {file}") from None
def batch_remove_fixtures_from_files(removals_by_file: dict[Path, list[str]]) -> None:
"""Batch process file removals to minimize I/O."""
for file_path, test_case_ids in removals_by_file.items():
try:
full_file = json.loads(file_path.read_text())
for test_case_id in test_case_ids:
full_file.pop(test_case_id, None)
if len(full_file) > 0:
file_path.write_text(json.dumps(full_file, indent=2))
else:
file_path.unlink()
except Exception as e:
print(f"Error processing {file_path}: {e}")
def rewrite_index(folder: Path, index: IndexFile, dry_run: bool) -> None:
"""
Rewrite index to file, or if test count is zero, delete directory.
"""
if len(index.test_cases) > 0:
# Just rewrite the index
if not dry_run:
with open(get_index_path(folder), "w") as f:
f.write(index.model_dump_json(exclude_none=False, indent=2))
else:
print(f"Would rewrite index for {folder}")
else:
# Delete the folder
if not dry_run:
shutil.rmtree(folder)
else:
print(f"Would delete {folder}")
@click.command()
@click.argument("base", type=click.Path(exists=True, file_okay=False, path_type=Path))
@click.argument("patch", type=click.Path(exists=True, file_okay=False, path_type=Path))
@click.option(
"--dry-run", is_flag=True, help="Show what would be removed without actually removing"
)
@click.option(
"--abort-on-empty-patch",
is_flag=True,
help="Abort if the patch folder would be empty after fixture removal.",
)
def main(
base: Path,
patch: Path,
dry_run: bool,
abort_on_empty_patch: bool,
) -> None:
"""
Compare two folders and remove duplicates based on fixture hashes.
"""
try:
# Load indices
base_index = load_index(base)
base_hashes = get_fixture_hashes(base_index)
patch_index = load_index(patch)
patch_hashes = get_fixture_hashes(patch_index)
# Find duplicates
duplicate_hashes = find_duplicates(base_hashes, patch_hashes)
if not duplicate_hashes:
click.echo("No duplicates found.")
sys.exit(0)
else:
click.echo(f"Found {len(duplicate_hashes)} duplicates.")
if abort_on_empty_patch and duplicate_hashes == patch_hashes:
click.echo("Patch folder would be empty after fixture removal.")
sys.exit(0)
# Collect removals by file for batching
base_removals_by_file = defaultdict(list)
patch_removals_by_file = defaultdict(list)
for duplicate_hash in duplicate_hashes:
base_test_cases = pop_all_by_hash(base_index, duplicate_hash)
patch_test_cases = pop_all_by_hash(patch_index, duplicate_hash)
for base_test_case in base_test_cases:
base_file = base / base_test_case.json_path
if dry_run:
print(f"Remove {base_test_case.id} from {base_file}")
else:
base_removals_by_file[base_file].append(base_test_case.id)
for patch_test_case in patch_test_cases:
patch_file = patch / patch_test_case.json_path
if dry_run:
print(f"Remove {patch_test_case.id} from {patch_file}")
else:
patch_removals_by_file[patch_file].append(patch_test_case.id)
# Batch process file operations
if not dry_run:
batch_remove_fixtures_from_files(base_removals_by_file)
batch_remove_fixtures_from_files(patch_removals_by_file)
# Rewrite indices if necessary
rewrite_index(base, base_index, dry_run)
rewrite_index(patch, patch_index, dry_run)
except Exception as e:
click.echo(f"Error: {e}", err=True)
sys.exit(1)
if __name__ == "__main__":
main()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/show_pre_alloc_group_stats.py | src/cli/show_pre_alloc_group_stats.py | """Script to display statistics about pre-allocation groups."""
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Set, Tuple
import click
from pydantic import Field
from rich.console import Console
from rich.table import Table
from ethereum_test_base_types import CamelModel
from ethereum_test_fixtures import PreAllocGroups
def extract_test_module(test_id: str) -> str:
"""Extract test module path from test ID."""
# Example:
# tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::
# test_beacon_root_contract_calls[fork_Cancun]
if "::" in test_id:
return test_id.split("::")[0]
return "unknown"
def extract_test_function(test_id: str) -> str:
"""Extract test function name from test ID (without parameters)."""
# Example:
# tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::
# test_beacon_root_contract_calls[fork_Cancun]
# Returns:
# tests/cancun/eip4788_beacon_root/test_beacon_root_contract.py::
# test_beacon_root_contract_calls
if "::" in test_id:
parts = test_id.split("::")
if len(parts) >= 2:
function_part = parts[1]
# Remove parameter brackets if present
if "[" in function_part:
function_part = function_part.split("[")[0]
return f"{parts[0]}::{function_part}"
return test_id
def calculate_size_distribution(
test_counts: List[int],
) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int, int, int]]]:
"""
Calculate frequency distribution of group sizes with appropriate binning.
Returns:
Group count distribution: [(range_label, group_count), ...]
Test count distribution: [(range_label, test_count,
cumulative_remaining,
group_count), ...]
"""
if not test_counts:
return [], []
# Define bins based on the data characteristics
# Using logarithmic-style bins for better distribution visibility
bins = [
(1, 1, "1"),
(2, 5, "2-5"),
(6, 10, "6-10"),
(11, 20, "11-20"),
(21, 50, "21-50"),
(51, 100, "51-100"),
(101, 200, "101-200"),
(201, 500, "201-500"),
(501, 1000, "501-1000"),
(1001, float("inf"), "1000+"),
]
# Calculate both distributions
group_distribution = []
test_distribution = []
for min_val, max_val, label in bins:
# Group count distribution
groups_in_bin = [tc for tc in test_counts if min_val <= tc <= max_val]
group_count = len(groups_in_bin)
if group_count > 0:
group_distribution.append((label, group_count))
# Test count distribution with group count
tests_in_bin = sum(groups_in_bin)
# Added group_count
test_distribution.append((label, tests_in_bin, 0, group_count))
# Calculate cumulative values For the table sorted from largest to
# smallest:
# Row N shows: if we exclude groups of size N and smaller, what
# percent of tests remain?
# Row N shows: if we include groups of size N and
# larger, how many groups is that?
cumulative_remaining_tests = 0
cumulative_groups = 0
# Process from bottom to top
for i in range(len(test_distribution) - 1, -1, -1):
label, tests_in_bin, _, group_count = test_distribution[i]
test_distribution[i] = (label, tests_in_bin, cumulative_remaining_tests, cumulative_groups)
cumulative_remaining_tests += tests_in_bin
cumulative_groups += group_count
return group_distribution, test_distribution
def analyze_pre_alloc_folder(folder: Path) -> Dict:
"""Analyze pre-allocation folder and return statistics."""
pre_alloc_groups = PreAllocGroups.from_folder(folder, lazy_load=False)
# Basic stats
total_groups = len(pre_alloc_groups)
total_tests = sum(group.test_count for group in pre_alloc_groups.values())
total_accounts = sum(group.pre_account_count for group in pre_alloc_groups.values())
# Group by fork
fork_stats: Dict[str, Dict] = defaultdict(lambda: {"groups": 0, "tests": 0})
for group in pre_alloc_groups.values():
fork_stats[group.fork.name()]["groups"] += 1
fork_stats[group.fork.name()]["tests"] += group.test_count
# Group by test module
module_stats: Dict[str, Dict] = defaultdict(lambda: {"groups": set(), "tests": 0})
for hash_key, group in pre_alloc_groups.items():
# Count tests per module in this group
module_test_count: defaultdict = defaultdict(int)
for test_id in group.test_ids:
module = extract_test_module(test_id)
module_test_count[module] += 1
# Add to module stats
for module, test_count in module_test_count.items():
module_stats[module]["groups"].add(hash_key)
module_stats[module]["tests"] += test_count
# Convert sets to counts
for module in module_stats:
module_stats[module]["groups"] = len(module_stats[module]["groups"])
# Per-group details
group_details = []
for hash_key, group in pre_alloc_groups.items():
group_details.append(
{
"hash": hash_key[:8] + "...", # Shortened hash for display
"tests": group.test_count,
"accounts": group.pre_account_count,
"fork": group.fork.name(),
}
)
# Calculate frequency distribution of group sizes
group_distribution, test_distribution = calculate_size_distribution(
[g["tests"] for g in group_details] # type: ignore
)
# Analyze test functions split across multiple size-1 groups
class SplitTestFunction(CamelModel):
groups: int = 0
forks: Set[str] = Field(default_factory=set)
split_test_functions: Dict[str, SplitTestFunction] = defaultdict(lambda: SplitTestFunction())
# Process all size-1 groups directly from pre_state
for _hash_key, group_data in pre_alloc_groups.items():
if group_data.test_count == 1: # Size-1 group
test_id = group_data.test_ids[0]
test_function = extract_test_function(test_id)
fork = group_data.fork.name()
split_test_functions[test_function].groups += 1
split_test_functions[test_function].forks.add(fork)
# Filter to only test functions with multiple size-1 groups and calculate
# ratios
split_functions = {}
for func, split_test_function in split_test_functions.items():
if split_test_function.groups > 1:
fork_count = len(split_test_function.forks)
groups_per_fork = (
split_test_function.groups / fork_count
if fork_count > 0
else split_test_function.groups
)
split_functions[func] = {
"total_groups": split_test_function.groups,
"fork_count": fork_count,
"groups_per_fork": groups_per_fork,
}
return {
"total_groups": total_groups,
"total_tests": total_tests,
"total_accounts": total_accounts,
"fork_stats": dict(fork_stats),
"module_stats": dict(module_stats),
"group_details": group_details,
"group_distribution": group_distribution,
"test_distribution": test_distribution,
"split_functions": split_functions,
}
def display_stats(stats: Dict, console: Console, verbose: int = 0) -> None:
"""Display statistics in a formatted way."""
# Overall summary
console.print("\n[bold cyan]Pre-Allocation Statistics Summary[/bold cyan]")
console.print(f"Total groups: [green]{stats['total_groups']}[/green]")
console.print(f"Total tests: [green]{stats['total_tests']}[/green]")
console.print(f"Total accounts: [green]{stats['total_accounts']}[/green]")
if stats.get("skipped_count", 0) > 0:
console.print(
f"Skipped groups: [yellow]{stats['skipped_count']}[/yellow] "
"(use --verbose to see details)"
)
# Per-group details table (only with -v or -vv)
if verbose >= 1:
console.print("\n[bold yellow]Tests and Accounts per Group[/bold yellow]")
group_table = Table(show_header=True, header_style="bold magenta")
group_table.add_column("Group Hash", style="dim")
group_table.add_column("Fork", style="cyan")
group_table.add_column("Tests", justify="right")
group_table.add_column("Accounts", justify="right")
# Sort by test count (descending)
sorted_groups = sorted(stats["group_details"], key=lambda x: -x["tests"])
# Show all groups if -vv, otherwise top 20
groups_to_show = sorted_groups if verbose >= 2 else sorted_groups[:20]
for group in groups_to_show:
group_table.add_row(
group["hash"],
group["fork"],
str(group["tests"]),
str(group["accounts"]),
)
if verbose < 2 and len(stats["group_details"]) > 20:
group_table.add_row(
"...",
"...",
"...",
"...",
)
console.print(group_table)
# Fork statistics table
console.print("\n[bold yellow]Groups and Tests per Fork[/bold yellow]")
fork_table = Table(show_header=True, header_style="bold magenta")
fork_table.add_column("Fork", style="cyan")
fork_table.add_column("Groups", justify="right")
fork_table.add_column("Tests", justify="right")
fork_table.add_column("Avg Tests/Group", justify="right")
# Sort forks by name
sorted_forks = sorted(stats["fork_stats"].items())
for fork, fork_data in sorted_forks:
avg_tests = fork_data["tests"] / fork_data["groups"] if fork_data["groups"] > 0 else 0
fork_table.add_row(
fork,
str(fork_data["groups"]),
str(fork_data["tests"]),
f"{avg_tests:.1f}",
)
console.print(fork_table)
# Group size frequency distribution table
console.print("\n[bold yellow]Group Size Distribution[/bold yellow]")
dist_table = Table(show_header=True, header_style="bold magenta")
dist_table.add_column("Test Count Range", style="cyan")
dist_table.add_column("Number of Groups", justify="right")
dist_table.add_column("Percentage", justify="right")
total_groups_in_dist = sum(count for _, count in stats.get("group_distribution", []))
for size_range, count in stats.get("group_distribution", []):
percentage = (count / total_groups_in_dist * 100) if total_groups_in_dist > 0 else 0
dist_table.add_row(
size_range,
str(count),
f"{percentage:.1f}%",
)
console.print(dist_table)
# Test coverage distribution table
console.print("\n[bold yellow]Test Coverage by Group Size[/bold yellow]")
coverage_table = Table(show_header=True, header_style="bold magenta")
coverage_table.add_column("Test Count Range", style="cyan")
coverage_table.add_column("Tests in Range", justify="right")
coverage_table.add_column("Coverage if Excluded (%)", justify="right")
coverage_table.add_column("Cumulative Groups", justify="right")
total_tests = stats.get("total_tests", 0)
total_groups = stats.get("total_groups", 0)
# Define bin order from largest to smallest for proper sorting
bin_order = [
"1000+",
"501-1000",
"201-500",
"101-200",
"51-100",
"21-50",
"11-20",
"6-10",
"2-5",
"1",
]
# Create a mapping for easy lookup
test_dist_map = {item[0]: item for item in stats.get("test_distribution", [])}
# Display in the defined order
test_dist_sorted = [
test_dist_map[bin_range] for bin_range in bin_order if bin_range in test_dist_map
]
# Need to recalculate cumulative groups from top for display
cumulative_groups_display = 0
for _i, (size_range, tests_in_range, cumulative_remaining_tests, _) in enumerate(
test_dist_sorted
):
coverage_percentage = (
(cumulative_remaining_tests / total_tests * 100) if total_tests > 0 else 0
)
# Find how many groups in this bin
groups_in_bin = next(
(count for label, count in stats.get("group_distribution", []) if label == size_range),
0,
)
cumulative_groups_display += groups_in_bin
coverage_table.add_row(
size_range,
str(tests_in_range),
f"{coverage_percentage:.1f}%",
f"{cumulative_groups_display} ({cumulative_groups_display / total_groups * 100:.1f}%)"
if total_groups > 0
else "0",
)
console.print(coverage_table)
# Module statistics table (only with -v or -vv)
if verbose >= 1:
console.print("\n[bold yellow]Groups and Tests per Test Module[/bold yellow]")
module_table = Table(show_header=True, header_style="bold magenta")
module_table.add_column("Test Module", style="dim")
module_table.add_column("Groups", justify="right")
module_table.add_column("Tests", justify="right")
module_table.add_column("Avg Tests/Group", justify="right")
# Sort modules by group count (descending) - shows execution complexity
sorted_modules = sorted(
stats["module_stats"].items(),
# Secondary sort by tests
key=lambda x: (-x[1]["groups"], -x[1]["tests"]),
)
# Show all modules if -vv, otherwise top 15
modules_to_show = sorted_modules if verbose >= 2 else sorted_modules[:15]
for module, module_data in modules_to_show:
# Shorten module path for display
if module.startswith("tests/"):
module_display = module[6:] # Remove "tests/" prefix
else:
module_display = module
avg_tests = (
module_data["tests"] / module_data["groups"] if module_data["groups"] > 0 else 0
)
module_table.add_row(
module_display,
str(module_data["groups"]),
str(module_data["tests"]),
f"{avg_tests:.1f}",
)
if verbose < 2 and len(stats["module_stats"]) > 15:
module_table.add_row(
"...",
"...",
"...",
"...",
)
console.print(module_table)
# Split test functions analysis (only show if there are any)
if stats.get("split_functions"):
console.print("\n[bold yellow]Test Functions Split Across Multiple Groups[/bold yellow]")
console.print(
"[dim]These test functions create multiple size-1 groups (due to different "
"forks/parameters), preventing pre-allocation group optimization:[/dim]",
highlight=False,
)
split_table = Table(show_header=True, header_style="bold magenta")
split_table.add_column("Test Function", style="dim")
split_table.add_column("Total Groups", justify="right")
split_table.add_column("Fork Count", justify="right")
split_table.add_column("Groups/Fork", justify="right", style="yellow")
# Sort by groups per fork (descending) to show worst offenders first
sorted_split = sorted(
stats["split_functions"].items(), key=lambda x: x[1]["groups_per_fork"], reverse=True
)
for test_function, data in sorted_split:
# Shorten function path for display
display_function = test_function
if display_function.startswith("tests/"):
display_function = display_function[6:] # Remove "tests/"
# prefix
split_table.add_row(
display_function,
str(data["total_groups"]),
str(data["fork_count"]),
f"{data['groups_per_fork']:.1f}",
)
console.print(split_table)
# Summary of optimization potential
total_split_groups = sum(
data["total_groups"] for data in stats["split_functions"].values()
)
total_split_functions = len(stats["split_functions"])
console.print(
f"\n[yellow]Optimization Potential:[/yellow] Excluding these {total_split_functions} "
f"split functions would save {total_split_groups} groups"
)
# Verbosity hint
console.print()
if verbose == 0:
console.print(
"[dim]Hint: Use -v to see detailed group and module statistics, or -vv to see all "
"groups and modules[/dim]"
)
elif verbose == 1:
console.print(
"[dim]Hint: Use -vv to see all groups and modules (currently showing top entries "
"only)[/dim]"
)
@click.command()
@click.argument(
"pre_alloc_folder",
type=click.Path(exists=True, path_type=Path),
default="fixtures/blockchain_tests_engine_x/pre_alloc",
)
@click.option(
"--verbose",
"-v",
count=True,
help="Show verbose output (-v for warnings, -vv for all groups)",
)
def main(pre_alloc_folder: Path, verbose: int) -> None:
"""
Display statistics about pre-allocation groups.
This script analyzes a pre_alloc folder generated by the test framework's
pre-allocation group optimization feature and displays:
- Total number of groups, tests, and accounts
- Number of tests and accounts per group (tabulated)
- Number of groups and tests per fork (tabulated)
- Number of groups and tests per test module (tabulated)
The pre_alloc file is generated when running tests with the
--generate-pre-alloc-groups and --use-pre-alloc-groups flags to optimize
test execution by grouping tests with identical pre-allocation state.
"""
console = Console()
try:
stats = analyze_pre_alloc_folder(pre_alloc_folder)
display_stats(stats, console, verbose=verbose)
except FileNotFoundError:
console.print(f"[red]Error: Folder not found: {pre_alloc_folder}[/red]")
raise click.Abort() from None
except Exception as e:
console.print(f"[red]Error: {e}[/red]")
raise click.Abort() from None
if __name__ == "__main__":
main()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/check_fixtures.py | src/cli/check_fixtures.py | """
Perform sanity checks on the framework's pydantic serialization and
deserialization using generated json fixtures files.
"""
from pathlib import Path
from typing import Generator
import click
from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeElapsedColumn
from ethereum_test_base_types import to_json
from ethereum_test_fixtures.file import Fixtures
from ethereum_test_specs.base import HashMismatchExceptionError
def count_json_files_exclude_index(start_path: Path) -> int:
"""
Return the number of json files in the specified directory, excluding
index.json files.
"""
json_file_count = sum(1 for file in start_path.rglob("*.json") if file.name != "index.json")
return json_file_count
def check_json(json_file_path: Path) -> None:
"""
Check all fixtures in the specified json file:
1. Load the json file into a pydantic model. This checks there are no
Validation errors when loading fixtures into EEST models.
2. Serialize the loaded pydantic model to "json" (actually python data
structures, ready to written as json).
3. Load the serialized data back into a pydantic model (to get an updated
hash) from step 2.
4. Compare hashes:
a. Compare the newly calculated hashes from step 2. and 3. and
b. If present, compare info["hash"] with the calculated hash from
step 2.
"""
fixtures: Fixtures = Fixtures.model_validate_json(json_file_path.read_text())
fixtures_json = to_json(fixtures)
fixtures_deserialized: Fixtures = Fixtures.model_validate(fixtures_json)
for fixture_name, fixture in fixtures.items():
new_hash = fixtures_deserialized[fixture_name].hash
if (original_hash := fixture.hash) != new_hash:
raise HashMismatchExceptionError(
original_hash,
new_hash,
message=f"Fixture hash attributes do not match for {fixture_name}",
)
if "hash" in fixture.info and fixture.info["hash"] != original_hash:
info_hash = fixture.info["hash"]
info_hash_str = str(info_hash) if not isinstance(info_hash, str) else info_hash
raise HashMismatchExceptionError(
original_hash,
info_hash_str,
message=(
f"Fixture info['hash'] does not match calculated "
f"hash for {fixture_name}: '{info_hash}' != "
f"'{original_hash}'"
),
)
@click.command()
@click.option(
"--input",
"-i",
"input_str",
type=click.Path(exists=True, file_okay=True, dir_okay=True, readable=True),
required=True,
help="The input json file or directory containing json fixture files",
)
@click.option(
"--quiet",
"-q",
"quiet_mode",
is_flag=True,
default=False,
expose_value=True,
help="Don't show the progress bar while processing fixture files.",
)
@click.option(
"--stop-on-error",
"--raise-on-error",
"-s",
"stop_on_error",
is_flag=True,
default=False,
expose_value=True,
help="Stop and raise any exceptions encountered while checking fixtures.",
)
def check_fixtures(input_str: str, quiet_mode: bool, stop_on_error: bool) -> bool:
"""
Perform checks on fixtures in the specified directory.
"""
input_path = Path(input_str)
success = True
file_count = 0
filename_display_width = 25
if input_path.is_file():
file_count = 1
elif not quiet_mode:
file_count = count_json_files_exclude_index(input_path)
def get_input_files() -> Generator[Path, None, None]:
if input_path.is_file():
yield input_path
else:
yield from input_path.rglob("*.json")
with Progress(
TextColumn(
f"[bold cyan]{{task.fields[filename]:<{filename_display_width}}}[/]", justify="left"
),
BarColumn(bar_width=None, complete_style="green3", finished_style="bold green3"),
TaskProgressColumn(),
TimeElapsedColumn(),
expand=True,
disable=quiet_mode,
) as progress: # type: Progress
task_id = progress.add_task("Checking fixtures", total=file_count, filename="...")
for json_file_path in get_input_files():
if json_file_path.name == "index.json":
continue
display_filename = json_file_path.name
if len(display_filename) > filename_display_width:
display_filename = display_filename[: filename_display_width - 3] + "..."
else:
display_filename = display_filename.ljust(filename_display_width)
try:
progress.update(task_id, advance=1, filename=f"Checking {display_filename}")
check_json(json_file_path)
except Exception as e:
success = False
if stop_on_error:
raise e
else:
progress.console.print(f"\nError checking {json_file_path}:")
progress.console.print(f" {e}")
reward_string = "🦄" if success else "🐢"
progress.update(
task_id, completed=file_count, filename=f"Completed checking all files {reward_string}"
)
return success
if __name__ == "__main__":
check_fixtures()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/diff_opcode_counts.py | src/cli/diff_opcode_counts.py | #!/usr/bin/env python
"""
Compare opcode counts between two folders of JSON fixtures.
This script crawls two folders for JSON files, parses them using the Fixtures
model, and compares the opcode_count field from the info section between
fixtures with the same name.
"""
import sys
from pathlib import Path
from typing import Dict, List, Optional
import click
from ethereum_clis.cli_types import OpcodeCount
from ethereum_test_fixtures.file import Fixtures
def find_json_files(directory: Path) -> List[Path]:
"""Find all JSON files in a directory, excluding index.json files."""
json_files = []
if directory.is_dir():
for file_path in directory.rglob("*.json"):
if file_path.name != "index.json":
json_files.append(file_path)
return json_files
def load_fixtures_from_file(
file_path: Path, remove_from_fixture_names: List[str]
) -> Optional[Fixtures]:
"""Load fixtures from a JSON file using the Fixtures model."""
try:
fixtures = Fixtures.model_validate_json(file_path.read_text())
renames = []
for k in fixtures.root:
new_name = None
for s in remove_from_fixture_names:
if s in k:
if new_name is None:
new_name = k.replace(s, "")
else:
new_name = new_name.replace(s, "")
if new_name is not None:
renames.append((k, new_name))
for old_name, new_name in renames:
fixtures.root[new_name] = fixtures.root.pop(old_name)
return fixtures
except Exception as e:
print(f"Error loading {file_path}: {e}", file=sys.stderr)
return None
def extract_opcode_counts_from_fixtures(fixtures: Fixtures) -> Dict[str, OpcodeCount]:
"""Extract opcode_count from info field for each fixture."""
opcode_counts = {}
for fixture_name, fixture in fixtures.items():
if hasattr(fixture, "info") and fixture.info and "opcode_count" in fixture.info:
try:
opcode_count = OpcodeCount.model_validate(fixture.info["opcode_count"])
opcode_counts[fixture_name] = opcode_count
except Exception as e:
print(f"Error parsing opcode_count for {fixture_name}: {e}", file=sys.stderr)
return opcode_counts
def load_all_opcode_counts(
directory: Path, remove_from_fixture_names: List[str]
) -> Dict[str, OpcodeCount]:
"""Load all opcode counts from all JSON files in a directory."""
all_opcode_counts = {}
json_files = find_json_files(directory)
for json_file in json_files:
fixtures = load_fixtures_from_file(
json_file, remove_from_fixture_names=remove_from_fixture_names
)
if fixtures:
file_opcode_counts = extract_opcode_counts_from_fixtures(fixtures)
# Use fixture name as key, if there are conflicts, choose the last
all_opcode_counts.update(file_opcode_counts)
return all_opcode_counts
def compare_opcode_counts(count1: OpcodeCount, count2: OpcodeCount) -> Dict[str, int]:
"""Compare two opcode counts and return the differences."""
differences = {}
# Get all unique opcodes from both counts
all_opcodes = set(count1.root.keys()) | set(count2.root.keys())
for opcode in all_opcodes:
val1 = count1.root.get(opcode, 0)
val2 = count2.root.get(opcode, 0)
diff = val2 - val1
if diff != 0:
differences[str(opcode)] = diff
return differences
@click.command()
@click.argument("base", type=click.Path(exists=True, file_okay=False, path_type=Path))
@click.argument("patch", type=click.Path(exists=True, file_okay=False, path_type=Path))
@click.option(
"--show-common",
is_flag=True,
help="Print fixtures that contain identical opcode counts.",
)
@click.option(
"--show-missing",
is_flag=True,
help="Print fixtures only found in one of the folders.",
)
@click.option(
"--remove-from-fixture-names",
"-r",
multiple=True,
help="String to be removed from the fixture name, in case the fixture names have changed, "
"in order to make the comparison easier. "
"Can be specified multiple times.",
)
def main(
base: Path,
patch: Path,
show_common: bool,
show_missing: bool,
remove_from_fixture_names: List[str],
) -> None:
"""Crawl two folders, compare and print opcode count diffs."""
print(f"Loading opcode counts from {base}...")
opcode_counts1 = load_all_opcode_counts(base, remove_from_fixture_names)
print(f"Found {len(opcode_counts1)} fixtures with opcode counts")
print(f"Loading opcode counts from {patch}...")
opcode_counts2 = load_all_opcode_counts(patch, remove_from_fixture_names)
print(f"Found {len(opcode_counts2)} fixtures with opcode counts")
# Find common fixture names
common_names = set(opcode_counts1.keys()) & set(opcode_counts2.keys())
only_in_1 = set(opcode_counts1.keys()) - set(opcode_counts2.keys())
only_in_2 = set(opcode_counts2.keys()) - set(opcode_counts1.keys())
print("\nSummary:")
print(f" Common fixtures: {len(common_names)}")
print(f" Only in {base.name}: {len(only_in_1)}")
print(f" Only in {patch.name}: {len(only_in_2)}")
# Show missing fixtures if requested
if show_missing:
if only_in_1:
print(f"\nFixtures only in {base.name}:")
for name in sorted(only_in_1):
print(f" {name}")
if only_in_2:
print(f"\nFixtures only in {patch.name}:")
for name in sorted(only_in_2):
print(f" {name}")
# Compare common fixtures
differences_found = False
common_with_same_counts = 0
for fixture_name in sorted(common_names):
count1 = opcode_counts1[fixture_name]
count2 = opcode_counts2[fixture_name]
differences = compare_opcode_counts(count1, count2)
if differences:
differences_found = True
print(f"\n{fixture_name}:")
for opcode, diff in sorted(differences.items()):
if diff > 0:
print(f" +{diff} {opcode}")
else:
print(f" {diff} {opcode}")
elif show_common:
print(f"\n{fixture_name}: No differences")
common_with_same_counts += 1
if not differences_found:
print("\nNo differences found in opcode counts between common fixtures!")
elif show_common:
print(f"\n{common_with_same_counts} fixtures have identical opcode counts")
if __name__ == "__main__":
main()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gentest/test_context_providers.py | src/cli/gentest/test_context_providers.py | """
Various providers which generate contexts required to create test scripts.
Classes:
Provider: An provider generates required context for creating a
test.
BlockchainTestProvider: The BlockchainTestProvider takes a transaction
hash and creates required context to create a test.
Example:
provider = BlockchainTestContextProvider(transaction=transaction)
context = provider.get_context()
"""
from abc import ABC, abstractmethod
from sys import stderr
from typing import Any, Dict, Optional
from pydantic import BaseModel
from ethereum_test_base_types import Account, Hash
from ethereum_test_rpc.rpc_types import TransactionByHashResponse
from ethereum_test_tools import Environment, Transaction
from .request_manager import RPCRequest
class Provider(ABC, BaseModel):
"""An provider generates required context for creating a test."""
@abstractmethod
def get_context(self) -> Dict:
"""Get the context for generating a test."""
pass
class StateTestProvider(Provider):
"""Provides context required to generate a `state_test` using pytest."""
transaction_hash: Hash
block: Optional[Environment] = None
transaction_response: Optional[TransactionByHashResponse] = None
state: Optional[Dict[str, Dict]] = None
def _make_rpc_calls(self) -> None:
"""Make RPC calls to fetch transaction and block data."""
request = RPCRequest()
print(
f"Perform tx request: eth_get_transaction_by_hash({self.transaction_hash})",
file=stderr,
)
self.transaction_response = request.eth_get_transaction_by_hash(self.transaction_hash)
print("Perform debug_trace_call", file=stderr)
self.state = request.debug_trace_call(self.transaction_response)
print("Perform eth_get_block_by_number", file=stderr)
assert self.transaction_response.block_number is not None
self.block = request.eth_get_block_by_number(int(self.transaction_response.block_number))
print("Generate py test", file=stderr)
def _get_environment(self) -> Environment:
assert self.block is not None
return self.block
def _get_pre_state(self) -> Dict[str, Account]:
assert self.state is not None
assert self.transaction_response is not None
pre_state: Dict[str, Account] = {}
for address, account_data in self.state.items():
# TODO: Check if this is required. Ideally,
# the pre-state tracer should have the correct
# values without requiring any additional modifications.
if address == self.transaction_response.sender:
account_data["nonce"] = self.transaction_response.nonce
pre_state[address] = Account(**account_data)
return pre_state
def _get_transaction(self) -> Transaction:
assert self.transaction_response is not None
# Validate the RPC TransactionHashResponse and convert it to a
# Transaction instance.
return Transaction.model_validate(self.transaction_response.model_dump())
def get_context(self) -> Dict[str, Any]:
"""
Get the context for generating a blockchain test.
Returns:
Dict[str, Any]: A dictionary containing environment,
pre-state, a transaction and its hash.
"""
self._make_rpc_calls()
return {
"environment": self._get_environment(),
"pre_state": self._get_pre_state(),
"transaction": self._get_transaction(),
"tx_hash": self.transaction_hash,
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gentest/source_code_generator.py | src/cli/gentest/source_code_generator.py | """
Pytest source code generator.
This module maps a test provider instance to pytest source code.
"""
import subprocess
import sys
import tempfile
from pathlib import Path
import jinja2
from config import AppConfig
from .test_context_providers import Provider
template_loader = jinja2.PackageLoader("cli.gentest")
template_env = jinja2.Environment(loader=template_loader, keep_trailing_newline=True)
# This filter maps python objects to string
template_env.filters["stringify"] = lambda value: repr(value)
# generates a formatted pytest source code by writing provided data on a given
# template.
def get_test_source(provider: Provider, template_path: str) -> str:
"""
Generate formatted pytest source code by rendering a template with provided
data.
This function uses the given template path to create a pytest-compatible
source code string. It retrieves context data from the specified provider
and applies it to the template.
Args:
provider: An object that provides the necessary context for rendering
the template.
template_path (str): The path to the Jinja2 template file
used to generate tests.
Returns:
str: The formatted pytest source code.
"""
template = template_env.get_template(template_path)
rendered_template = template.render(provider.get_context())
# return rendered_template
return format_code(rendered_template)
def format_code(code: str) -> str:
"""
Format the provided Python code using the Black code formatter.
This function writes the given code to a temporary Python file, formats it
using the Black formatter, and returns the formatted code as a string.
Args:
code (str): The Python code to be formatted.
Returns:
str: The formatted Python code.
"""
# Create a temporary python file
with tempfile.NamedTemporaryFile(suffix=".py") as temp_file:
# Write the code to the temporary file
temp_file.write(code.encode("utf-8"))
# Ensure the file is written
temp_file.flush()
# Create a Path object for the input file
input_file_path = Path(temp_file.name)
# Get the path to the formatter executable in the virtual environment
if sys.platform.startswith("win"):
formatter_path = Path(sys.prefix) / "Scripts" / "ruff.exe"
else:
formatter_path = Path(sys.prefix) / "bin" / "ruff"
# Call ruff to format the file
config_path = AppConfig().ROOT_DIR / "pyproject.toml"
try:
subprocess.run(
[
str(formatter_path),
"format",
str(input_file_path),
"--quiet",
"--config",
str(config_path),
],
check=True,
)
except subprocess.CalledProcessError as e:
raise Exception(f"Error formatting code using formatter '{formatter_path}'") from e
# Return the formatted source code
return input_file_path.read_text()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gentest/cli.py | src/cli/gentest/cli.py | """
CLI interface for generating blockchain test scripts.
It extracts a specified transaction and its required state from a
blockchain network using the transaction hash and generates a Python
test script based on that information.
"""
from sys import stderr
from typing import TextIO
import click
from ethereum_test_base_types import Hash
from .source_code_generator import get_test_source
from .test_context_providers import StateTestProvider
@click.command()
@click.argument("transaction_hash")
@click.argument("output_file", type=click.File("w", lazy=True))
def generate(transaction_hash: str, output_file: TextIO) -> None:
"""
Extract a transaction and state from network to make blockchain test.
TRANSACTION_HASH is the hash of the transaction to be used.
OUTPUT_FILE is the path to the output python script.
"""
provider = StateTestProvider(transaction_hash=Hash(transaction_hash))
source = get_test_source(provider=provider, template_path="blockchain_test/transaction.py.j2")
output_file.write(source)
print("Finished", file=stderr)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gentest/request_manager.py | src/cli/gentest/request_manager.py | """
A request manager Ethereum RPC calls.
The RequestManager handles transactions and block data retrieval from a remote
Ethereum node, utilizing Pydantic models to define the structure of
transactions and blocks.
Classes:
RequestManager: The main class for managing RPC requests and
responses.
RemoteTransaction: A Pydantic model representing a transaction
retrieved from the node.
RemoteBlock: A Pydantic model representing a block retrieved from
the node.
"""
from typing import Any, Dict
from config import EnvConfig
from ethereum_test_base_types import Hash
from ethereum_test_rpc import BlockNumberType, DebugRPC, EthRPC
from ethereum_test_rpc.rpc_types import TransactionByHashResponse
from ethereum_test_types import Environment
class RPCRequest:
"""Interface for the RPC interaction with remote node."""
node_url: str
headers: dict[str, str]
def __init__(self) -> None:
"""Initialize RequestManager with specific client config."""
node_config = EnvConfig().remote_nodes[0]
self.node_url = str(node_config.node_url)
headers = node_config.rpc_headers
self.rpc = EthRPC(node_config.node_url, extra_headers=headers)
self.debug_rpc = DebugRPC(node_config.node_url, extra_headers=headers)
def eth_get_transaction_by_hash(self, transaction_hash: Hash) -> TransactionByHashResponse:
"""Get transaction data."""
res = self.rpc.get_transaction_by_hash(transaction_hash)
assert res is not None, "Transaction not found"
block_number = res.block_number
assert block_number is not None, "Transaction does not seem to be included in any block"
return res
def eth_get_block_by_number(self, block_number: BlockNumberType) -> Environment:
"""Get block by number."""
res = self.rpc.get_block_by_number(block_number)
assert res is not None, "Block not found"
return Environment(
fee_recipient=res["miner"],
number=res["number"],
difficulty=res["difficulty"],
gas_limit=res["gasLimit"],
timestamp=res["timestamp"],
)
def debug_trace_call(
self, transaction: TransactionByHashResponse
) -> Dict[str, Dict[Any, Any]]:
"""Get pre-state required for transaction."""
assert transaction.sender is not None
assert transaction.to is not None
result = self.debug_rpc.trace_call(
{
"from": transaction.sender.hex(),
"to": transaction.to.hex(),
"data": transaction.data.hex(),
},
f"{transaction.block_number}",
)
assert result is not None, "trace_call returned None"
return result
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gentest/__init__.py | src/cli/gentest/__init__.py | """
Generate a Python blockchain test from a transaction hash.
This script can be used to generate Python source for a blockchain test case
that replays a mainnet or testnet transaction from its transaction hash.
Requirements:
1. Access to an archive node for the network where the transaction
originates. A provider may be used.
2. The transaction hash of a type 0 transaction (currently only legacy
transactions are supported).
Example Usage:
1. Generate a test for a transaction with hash
```console
uv run gentest \
0xa41f343be7a150b740e5c939fa4d89f3a2850dbe21715df96b612fc20d1906be \
tests/paris/test_0xa41f.py
```
2. Fill the test:
```console
fill --fork=Paris tests/paris/test_0xa41f.py
```
Limitations:
1. Only legacy transaction types (type 0) are currently supported.
"""
from .cli import generate
__all__ = ["generate"]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gentest/test_providers.py | src/cli/gentest/test_providers.py | """
Contains various providers which generates context required to create test
scripts.
Classes:
BlockchainTestProvider: The BlockchainTestProvider class takes
information about a block, a transaction, and the
associated state, and provides methods to generate
various elements needed for testing, such as module
docstrings, test names, and pre-state items.
Example:
provider = BlockchainTestProvider(
block=block,
transaction=transaction,
state=state
)
context = provider.get_context()
"""
from typing import Any, Dict
from pydantic import BaseModel
from ethereum_test_base_types import Account, Address, ZeroPaddedHexNumber
from ethereum_test_rpc.rpc_types import TransactionByHashResponse
from ethereum_test_types import Environment
class BlockchainTestProvider(BaseModel):
"""
Provides context required to generate a `blockchain_test` using pytest.
"""
block: Environment
transaction: TransactionByHashResponse
state: Dict[Address, Account]
def _get_environment_kwargs(self) -> str:
env_str = ""
pad = " "
for field, value in self.block.dict().items():
env_str += (
f'{pad}{field}="{value}",\n' if field == "coinbase" else f"{pad}{field}={value},\n"
)
return env_str
# TODO: Output should be dict. Formatting should happen in the template.
def _get_pre_state_items(self) -> str:
# Print a nice .py storage pre
pad = " "
state_str = ""
for address, account_obj in self.state.items():
state_str += f' "{address}": Account(\n'
state_str += f"{pad}balance={str(account_obj.balance)},\n"
if address == self.transaction.sender:
state_str += f"{pad}nonce={self.transaction.nonce},\n"
else:
state_str += f"{pad}nonce={str(account_obj.nonce)},\n"
if account_obj.code is None:
state_str += f'{pad}code="0x",\n'
else:
state_str += f'{pad}code="{str(account_obj.code)}",\n'
state_str += pad + "storage={\n"
if account_obj.storage is not None:
for record, value in account_obj.storage.root.items():
pad_record = ZeroPaddedHexNumber(record)
pad_value = ZeroPaddedHexNumber(value)
state_str += f'{pad} "{pad_record}" : "{pad_value}",\n'
state_str += pad + "}\n"
state_str += " ),\n"
return state_str
# TODO: Output should be dict. Formatting should happen in the template.
def _get_transaction_items(self) -> str:
"""Print legacy transaction in .py."""
pad = " "
tr_str = ""
quoted_fields_array = ["data", "to"]
hex_fields_array = ["v", "r", "s"]
legacy_fields_array = [
"ty",
"chain_id",
"nonce",
"gas_price",
"protected",
"gas_limit",
"value",
]
for field, value in iter(self.transaction):
if value is None:
continue
if field in legacy_fields_array:
tr_str += f"{pad}{field}={value},\n"
if field in quoted_fields_array:
tr_str += f'{pad}{field}="{value}",\n'
if field in hex_fields_array:
tr_str += f"{pad}{field}={hex(value)},\n"
return tr_str
def get_context(self) -> Dict[str, Any]:
"""
Get the context for generating a blockchain test.
Returns:
Dict[str, Any]: A dictionary containing module docstring, test
name, test docstring, environment kwargs,
pre-state items, and transaction items.
"""
return {
"environment_kwargs": self._get_environment_kwargs(),
"pre_state_items": self._get_pre_state_items(),
"transaction_items": self._get_transaction_items(),
"tx_hash": self.transaction.hash,
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gentest/tests/test_cli.py | src/cli/gentest/tests/test_cli.py | """Tests for the gentest CLI command."""
from pathlib import Path
from typing import Any
import pytest
from click.testing import CliRunner
from ethereum_test_base_types import Account
from ethereum_test_tools import Environment, Storage, Transaction
from ..cli import generate
from ..test_context_providers import StateTestProvider
transactions_by_type = {
0: {
"environment": Environment(
fee_recipient="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
gas_limit=9916577,
number=9974504,
timestamp=1588257377,
difficulty=2315196811272822,
parent_ommers_hash="0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
extra_data=b"\x00",
),
"pre_state": {
"0x5a0b54d5dc17e0aadc383d2db43b0a0d3e029c4c": Account(
nonce=6038603, balance=23760714652307793035, code=b"", storage=Storage(root={})
),
"0x8a4a4d396a06cba2a7a4a73245991de40cdec289": Account(
nonce=2, balance=816540000000000000, code=b"", storage=Storage(root={})
),
"0xc6d96786477f82491bfead8f00b8294688f77abc": Account(
nonce=25, balance=29020266497911578313, code=b"", storage=Storage(root={})
),
},
"transaction": Transaction(
ty=0,
chain_id=1,
nonce=2,
gas_price=10000000000,
gas_limit=21000,
to="0xc6d96786477f82491bfead8f00b8294688f77abc",
value=668250000000000000,
data=b"",
v=38,
r=57233334052658009540326312124836763247359579695589124499839562829147086216092,
s=49687643984819828983661675232336138386174947240467726918882054280625462464348,
sender="0x8a4a4d396a06cba2a7a4a73245991de40cdec289",
),
},
2: {
"environment": Environment(
fee_recipient="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
gas_limit=30172625,
number=21758000,
timestamp=1738489319,
parent_ommers_hash="0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
extra_data=b"\x00",
),
"pre_state": {
"0x24d6c74d811cfde65995ed26fd08af445f8aab06": Account(
nonce=1011, balance=139840767390685635650, code=b"", storage=Storage(root={})
),
"0xd5fbda4c79f38920159fe5f22df9655fde292d47": Account(
nonce=553563, balance=162510989019530720334, code=b"", storage=Storage(root={})
),
"0xe2e29f9a85cfecb9cdaa83a81c7aa2792f24d93f": Account(
nonce=104, balance=553317651330968100, code=b"", storage=Storage(root={})
),
},
"transaction": Transaction(
ty=2,
chain_id=1,
nonce=553563,
max_priority_fee_per_gas=1900000,
max_fee_per_gas=3992652948,
gas_limit=63000,
to="0xe2e29f9a85cfecb9cdaa83a81c7aa2792f24d93f",
value=221305417266040400,
v=1,
r=23565967349511399087318407428036702220029523660288023156323795583373026415631,
s=9175853102116430015855393834807954374677057556696757715994220939907579927771,
sender="0xd5fbda4c79f38920159fe5f22df9655fde292d47",
),
},
}
@pytest.fixture
def transaction_hash(tx_type: int) -> str: # noqa: D103
return str(transactions_by_type[tx_type]["transaction"].hash) # type: ignore
@pytest.mark.parametrize("tx_type", list(transactions_by_type.keys()))
def test_tx_type(
pytester: pytest.Pytester,
tmp_path: Path,
monkeypatch: Any,
tx_type: int,
transaction_hash: str,
default_t8n: Any,
) -> None:
"""Generates a test case for any transaction type."""
# This test is run in a CI environment, where connection to a
# node could be unreliable. Therefore, we mock the RPC request to avoid any
# network issues. This is done by patching the `get_context` method of the
# `StateTestProvider`.
runner = CliRunner()
tmp_path_tests = tmp_path / "tests"
tmp_path_tests.mkdir()
tmp_path_output = tmp_path / "output"
tmp_path_output.mkdir()
generated_py_file = str(tmp_path_tests / f"gentest_type_{tx_type}.py")
tx = transactions_by_type[tx_type]
def get_mock_context(self: StateTestProvider) -> dict:
del self
return tx
monkeypatch.setattr(StateTestProvider, "get_context", get_mock_context)
## Generate ##
gentest_result = runner.invoke(generate, [transaction_hash, generated_py_file])
assert gentest_result.exit_code == 0
## Fill ##
with open(generated_py_file, "r") as f:
pytester.makepyfile(f.read())
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
args = [
"-c",
"pytest-fill.ini",
"-m",
"state_test",
"--fork",
"Cancun",
"--t8n-server-url",
default_t8n.server_url,
]
result = pytester.runpytest("-v", *args)
assert result.ret == pytest.ExitCode.OK, f"Fill command failed:\n{result}"
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gentest/tests/__init__.py | src/cli/gentest/tests/__init__.py | """Test cases for the `generate` CLI."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tests/test_order_fixtures.py | src/cli/tests/test_order_fixtures.py | """Tests for the order_fixtures module and click CLI."""
import json
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Generator
import pytest
from click.testing import CliRunner
from ..order_fixtures import order_fixtures, process_directory
def create_temp_json_file(directory: Path, name: str, content: dict[str, Any]) -> Path:
"""Create a temporary JSON file with specified content."""
file_path = directory / name
with file_path.open("w") as f:
json.dump(content, f)
return file_path
@pytest.fixture
def input_output_dirs() -> Generator[tuple[Path, Path], None, None]:
"""Create temporary input and output directories."""
with TemporaryDirectory() as input_dir, TemporaryDirectory() as output_dir:
yield Path(input_dir), Path(output_dir)
def test_order_fixture(input_output_dirs: tuple[Path, Path]) -> None:
"""Test sorting a single JSON fixture."""
input_dir, output_dir = input_output_dirs
create_temp_json_file(input_dir, "test.json", {"z": 0, "a": [3, 2, 1]})
expected_output = {"a": [1, 2, 3], "z": 0}
process_directory(input_dir, output_dir)
output_file = output_dir / "test.json"
assert output_file.exists()
with output_file.open("r") as f:
output_content = json.load(f)
assert output_content == expected_output
def test_cli_invocation(input_output_dirs: tuple[Path, Path]) -> None:
"""Test the CLI interface."""
runner = CliRunner()
input_dir, output_dir = input_output_dirs
create_temp_json_file(input_dir, "test.json", {"c": 2, "b": [4, 3, 5]})
result = runner.invoke(
order_fixtures, ["--input", str(input_dir), "--output", str(output_dir)]
)
assert result.exit_code == 0
assert (output_dir / "test.json").exists()
def test_input_is_file_instead_of_directory() -> None:
"""
Test the CLI interface when the input path is a file, not a directory.
"""
runner = CliRunner()
with TemporaryDirectory() as temp_dir:
temp_file = Path(temp_dir) / "temp_file.txt"
temp_file.touch()
result = runner.invoke(order_fixtures, ["--input", str(temp_file), "--output", temp_dir])
assert result.exit_code != 0
assert "Error: Invalid value for '--input'" in result.output
def test_input_directory_does_not_exist() -> None:
"""Test the CLI interface when the input directory does not exist."""
runner = CliRunner()
with TemporaryDirectory() as temp_dir:
non_existent_dir = Path(temp_dir) / "nonexistent"
result = runner.invoke(
order_fixtures, ["--input", str(non_existent_dir), "--output", temp_dir]
)
assert result.exit_code != 0
assert "Error: Invalid value for '--input'" in result.output
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tests/test_eofwrap.py | src/cli/tests/test_eofwrap.py | """Tests for the eofwrap module and click CLI."""
from typing import Any
import pytest
from ethereum_test_base_types.conversions import to_hex
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types.eof.v1 import Container
from ..eofwrap import wrap_code
@pytest.mark.parametrize(
"code,result",
[
[Op.STOP, Container.Code(Op.STOP)],
[Op.RETURN(0, 0), Container.Code(Op.RETURN(0, 0))],
[Op.REVERT(0, 0), Container.Code(Op.REVERT(0, 0))],
[Op.INVALID, Container.Code(Op.INVALID)],
[Op.PUSH1, Container.Code(Op.PUSH1[0] + Op.STOP)],
[Op.PUSH1[0], Container.Code(Op.PUSH1[0] + Op.STOP)],
[Op.PUSH1[0] + Op.STOP, Container.Code(Op.PUSH1[0] + Op.STOP)],
[Op.STOP + Op.STOP, Container.Code(Op.STOP)],
[Op.RETURN(0, 0) + Op.STOP, Container.Code(Op.RETURN(0, 0))],
[Op.REVERT(0, 0) + Op.STOP, Container.Code(Op.REVERT(0, 0))],
[Op.INVALID + Op.STOP, Container.Code(Op.INVALID)],
[Op.ADDRESS, Container.Code(Op.ADDRESS + Op.STOP)],
[Op.ADDRESS + Op.STOP, Container.Code(Op.ADDRESS + Op.STOP)],
[Op.ADDRESS + Op.RETURN(0, 0), Container.Code(Op.ADDRESS + Op.RETURN(0, 0))],
[Op.ADDRESS + Op.REVERT(0, 0), Container.Code(Op.ADDRESS + Op.REVERT(0, 0))],
[Op.ADDRESS + Op.INVALID, Container.Code(Op.ADDRESS + Op.INVALID)],
[Op.ADDRESS + Op.STOP + Op.STOP, Container.Code(Op.ADDRESS + Op.STOP)],
[Op.ADDRESS + Op.RETURN(0, 0) + Op.STOP, Container.Code(Op.ADDRESS + Op.RETURN(0, 0))],
[Op.ADDRESS + Op.REVERT(0, 0) + Op.STOP, Container.Code(Op.ADDRESS + Op.REVERT(0, 0))],
[Op.ADDRESS + Op.INVALID + Op.STOP, Container.Code(Op.ADDRESS + Op.INVALID)],
[Op.GAS + Op.STOP, Container.Code(Op.GAS + Op.STOP)],
[Op.GAS + Op.RETURN(0, 0), Container.Code(Op.GAS + Op.RETURN(0, 0))],
[Op.GAS + Op.REVERT(0, 0), Container.Code(Op.GAS + Op.REVERT(0, 0))],
[Op.GAS + Op.INVALID, Container.Code(Op.GAS + Op.INVALID)],
[Op.RJUMPV[1, 2, 3], Container.Code(Op.RJUMPV[1, 2, 3] + Op.STOP)],
[Op.RJUMPV, Container.Code(Op.RJUMPV + Op.STOP)],
[
Op.RJUMPV[-1, 0x7FFF, -0x7FFF],
Container.Code(Op.RJUMPV[-1, 0x7FFF, -0x7FFF] + Op.STOP),
],
],
ids=lambda param: to_hex(param),
)
def test_wrap_code(code: Any, result: Any) -> None:
"""Tests for the EOF wrapping logic and heuristics."""
assert wrap_code(bytes(code)) == result
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tests/test_pytest_execute_command.py | src/cli/tests/test_pytest_execute_command.py | """Tests for execute command click CLI."""
import pytest
from click.testing import CliRunner
from ..pytest_commands.execute import execute
@pytest.fixture
def runner() -> CliRunner:
"""Provide a Click CliRunner for invoking command-line interfaces."""
return CliRunner()
def test_execute_help_shows_subcommand_docstrings(runner: CliRunner) -> None:
"""Test that execute --help shows sub-command docstrings."""
result = runner.invoke(execute, ["--help"])
assert result.exit_code == 0
# Check that all sub-commands are shown with their help text
assert "hive" in result.output
assert "Execute tests using hive as a backend" in result.output
assert "remote" in result.output
assert "Execute tests using a remote RPC endpoint" in result.output
assert "recover" in result.output
assert "Recover funds from test executions" in result.output
def test_execute_subcommands_have_help_text() -> None:
"""Test that execute sub-commands have proper help text defined."""
from ..pytest_commands.execute import hive, recover, remote
# Test that each sub-command has a docstring
assert hive.__doc__ is not None
assert "hive" in hive.__doc__.lower()
assert remote.__doc__ is not None
assert "remote" in remote.__doc__.lower()
assert recover.__doc__ is not None
assert "recover" in recover.__doc__.lower()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tests/test_evm_bytes.py | src/cli/tests/test_evm_bytes.py | """Test suite for `cli.evm_bytes` module."""
import pytest
from ethereum_test_tools import Opcodes as Op
from ..evm_bytes import process_evm_bytes_string
basic_vector = [
"0x60008080808061AAAA612d5ff1600055",
"Op.PUSH1[0x0] + Op.DUP1 + Op.DUP1 + Op.DUP1 + Op.DUP1 + "
"Op.PUSH2[0xaaaa] + Op.PUSH2[0x2d5f] + Op.CALL + Op.PUSH1[0x0] + "
"Op.SSTORE",
]
complex_vector = [
"0x7fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebf5f527fc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedf6020527fe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff60405260786040356020355f35608a565b5f515f55602051600155604051600255005b5e56", # noqa: E501
"Op.PUSH32[0xa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebf] + Op.PUSH0 + Op.MSTORE + Op.PUSH32[0xc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedf] + Op.PUSH1[0x20] + Op.MSTORE + Op.PUSH32[0xe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff] + Op.PUSH1[0x40] + Op.MSTORE + Op.PUSH1[0x78] + Op.PUSH1[0x40] + Op.CALLDATALOAD + Op.PUSH1[0x20] + Op.CALLDATALOAD + Op.PUSH0 + Op.CALLDATALOAD + Op.PUSH1[0x8a] + Op.JUMP + Op.JUMPDEST + Op.PUSH0 + Op.MLOAD + Op.PUSH0 + Op.SSTORE + Op.PUSH1[0x20] + Op.MLOAD + Op.PUSH1[0x1] + Op.SSTORE + Op.PUSH1[0x40] + Op.MLOAD + Op.PUSH1[0x2] + Op.SSTORE + Op.STOP + Op.JUMPDEST + Op.MCOPY + Op.JUMP", # noqa: E501
]
rjump_vector = [
"0xe0fffe",
"Op.RJUMP[-0x2]",
]
rjumpi_vector = [
"0xe1fffe",
"Op.RJUMPI[-0x2]",
]
rjumpv_vector = [
"0xe213b1465aef60276095472e3250cf64736f6c63430008150033a26469706673582212206eab0a7969fe",
"Op.RJUMPV[-0x4eba, 0x5aef, 0x6027, 0x6095, 0x472e, 0x3250, -0x309c, "
"0x736f, 0x6c63, 0x4300," + " 0x815, 0x33, -0x5d9c, 0x6970, 0x6673, 0x5822, 0x1220, 0x6eab, "
"0xa79, 0x69fe]",
]
@pytest.mark.parametrize(
"evm_bytes, python_opcodes",
[
(basic_vector[0], basic_vector[1]),
(basic_vector[0][2:], basic_vector[1]), # no "0x" prefix
(complex_vector[0], complex_vector[1]),
(complex_vector[0][2:], complex_vector[1]), # no "0x" prefix
(rjump_vector[0], rjump_vector[1]),
(rjump_vector[0][2:], rjump_vector[1]), # no "0x" prefix
(rjumpi_vector[0], rjumpi_vector[1]),
(rjumpi_vector[0][2:], rjumpi_vector[1]), # no "0x" prefix
(rjumpv_vector[0], rjumpv_vector[1]),
(rjumpv_vector[0][2:], rjumpv_vector[1]), # no "0x" prefix
],
)
def test_evm_bytes(evm_bytes: str, python_opcodes: str) -> None:
"""Test evm_bytes using the basic and complex vectors."""
assert process_evm_bytes_string(evm_bytes) == python_opcodes
DUPLICATES = [Op.NOOP]
@pytest.mark.parametrize(
"opcode",
[op for op in Op if op not in DUPLICATES],
ids=lambda op: op._name_,
)
def test_individual_opcodes(opcode: Op) -> None:
"""Test each opcode individually."""
data_portion = b""
if opcode.data_portion_length > 0:
expected_output = f"Op.{opcode._name_}[0x0]"
data_portion = b"\x00" * opcode.data_portion_length
elif opcode == Op.RJUMPV:
expected_output = f"Op.{opcode._name_}[0x0]"
data_portion = b"\0\0\0"
else:
expected_output = f"Op.{opcode._name_}"
bytecode = opcode.int().to_bytes(1, byteorder="big") + data_portion
assert process_evm_bytes_string("0x" + bytecode.hex()) == expected_output
def test_invalid_opcode() -> None:
"""Invalid hex string."""
with pytest.raises(ValueError):
process_evm_bytes_string("0xZZ")
def test_unknown_opcode() -> None:
"""Opcode not defined in Op."""
with pytest.raises(ValueError):
process_evm_bytes_string("0x0F")
process_evm_bytes_string("0x0F")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tests/test_pytest_fill_command.py | src/cli/tests/test_pytest_fill_command.py | """Tests for pytest commands (e.g., fill) click CLI."""
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Generator
import pytest
from click.testing import CliRunner
from pytest import MonkeyPatch
import pytest_plugins.filler.filler
from ..pytest_commands.fill import fill
@pytest.fixture
def runner() -> CliRunner:
"""Provide a Click CliRunner for invoking command-line interfaces."""
return CliRunner()
def test_fill_help(runner: CliRunner) -> None:
"""Test the `--help` option of the `fill` command."""
result = runner.invoke(fill, ["--help"])
assert result.exit_code == pytest.ExitCode.OK
assert "[--evm-bin EVM_BIN]" in result.output
assert "[--traces]" in result.output
assert "[--evm-code-type EVM_CODE_TYPE]" in result.output
assert "--help" in result.output
assert "Arguments defining evm executable behavior:" in result.output
def test_fill_pytest_help(runner: CliRunner) -> None:
"""Test the `--pytest-help` option of the `fill` command."""
result = runner.invoke(fill, ["--pytest-help"])
assert result.exit_code == pytest.ExitCode.OK
assert "[options] [file_or_dir] [file_or_dir] [...]" in result.output
assert "-k EXPRESSION" in result.output
def test_fill_with_invalid_option(runner: CliRunner) -> None:
"""Test invoking `fill` with an invalid option."""
result = runner.invoke(fill, ["--invalid-option"])
assert result.exit_code != 0
assert "unrecognized arguments" in result.output
class TestHtmlReportFlags:
"""Test html report generation and output options."""
@pytest.fixture
def fill_args(self, default_t8n: Any) -> list[str]:
"""
Provide default arguments for the `fill` command when testing html
report generation.
Specifies a single existing example test case for faster fill
execution, and to allow for tests to check for the fixture generation
location.
"""
return [
"-k",
"test_dup and state_test-DUP16",
"--fork",
"Frontier",
f"--t8n-server-url={default_t8n.server_url}",
]
@pytest.fixture()
def default_html_report_file_path(self) -> str:
"""File path for fill's pytest html report."""
return pytest_plugins.filler.filler.default_html_report_file_path()
@pytest.fixture(scope="function")
def temp_dir(self) -> Generator[Path, None, None]:
"""Provide a temporary directory as a pytest fixture."""
temp_dir = TemporaryDirectory()
yield Path(temp_dir.name)
temp_dir.cleanup()
@pytest.fixture(scope="function", autouse=True)
def monkeypatch_default_output_directory(
self, monkeypatch: MonkeyPatch, temp_dir: Path
) -> None:
"""
Monkeypatch default output directory for the pytest commands.
This avoids using the local directory in user space for the output of
pytest commands and uses the a temporary directory instead.
"""
def mock_default_output_directory() -> Path:
return temp_dir
monkeypatch.setattr(
pytest_plugins.filler.filler,
"default_output_directory",
mock_default_output_directory,
)
def test_fill_default_output_options(
self,
runner: CliRunner,
temp_dir: Path,
fill_args: list[str],
default_html_report_file_path: str,
) -> None:
"""
Test default pytest html behavior: Neither `--html` or `--output` is
specified.
"""
default_html_path = temp_dir / default_html_report_file_path
result = runner.invoke(fill, fill_args)
assert result.exit_code == pytest.ExitCode.OK
assert default_html_path.exists()
def test_fill_no_html_option(
self,
runner: CliRunner,
temp_dir: Path,
fill_args: list[str],
default_html_report_file_path: str,
) -> None:
"""Test pytest html report is disabled with the `--no-html` flag."""
default_html_path = temp_dir / default_html_report_file_path
fill_args += ["--no-html"]
result = runner.invoke(fill, fill_args)
assert result.exit_code == pytest.ExitCode.OK
assert not default_html_path.exists()
def test_fill_html_option(
self,
runner: CliRunner,
temp_dir: Path,
fill_args: list[str],
) -> None:
"""Tests pytest html report generation with only the `--html` flag."""
non_default_html_path = temp_dir / "non_default_output_dir" / "report.html"
fill_args += ["--html", str(non_default_html_path)]
result = runner.invoke(fill, fill_args)
assert result.exit_code == pytest.ExitCode.OK
assert non_default_html_path.exists()
def test_fill_output_option(
self,
runner: CliRunner,
temp_dir: Path,
fill_args: list[str],
default_html_report_file_path: str,
) -> None:
"""
Tests pytest html report generation with only the `--output` flag.
"""
output_dir = temp_dir / "non_default_output_dir"
non_default_html_path = output_dir / default_html_report_file_path
fill_args += ["--output", str(output_dir)]
result = runner.invoke(fill, fill_args)
assert result.exit_code == pytest.ExitCode.OK
assert non_default_html_path.exists()
assert (output_dir / "state_tests").exists(), "No fixtures in output directory"
def test_fill_html_and_output_options(
self,
runner: CliRunner,
temp_dir: Path,
fill_args: list[str],
) -> None:
"""
Tests pytest html report generation with both `--output` and `--html`
flags.
"""
output_dir = temp_dir / "non_default_output_dir_fixtures"
html_path = temp_dir / "non_default_output_dir_html" / "non_default.html"
fill_args += ["--output", str(output_dir), "--html", str(html_path)]
result = runner.invoke(fill, fill_args)
assert result.exit_code == pytest.ExitCode.OK
assert html_path.exists()
assert (output_dir / "state_tests").exists(), "No fixtures in output directory"
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tests/test_fuzzer_bridge.py | src/cli/tests/test_fuzzer_bridge.py | """Test suite for fuzzer bridge DTO parsing and conversion."""
import json
from pathlib import Path
from typing import Any, Dict
import pytest
from pydantic import ValidationError
from ethereum_test_base_types import Address, HexNumber
from ethereum_test_forks import Osaka
from ethereum_test_tools import Account, AuthorizationTuple, Transaction
from ethereum_test_types import Alloc, Environment
from ..fuzzer_bridge.converter import (
blockchain_test_from_fuzzer,
create_sender_eoa_map,
fuzzer_account_to_eest_account,
fuzzer_authorization_to_eest,
fuzzer_transaction_to_eest_transaction,
)
from ..fuzzer_bridge.models import (
FuzzerAccountInput,
FuzzerAuthorizationInput,
FuzzerOutput,
FuzzerTransactionInput,
)
def load_fuzzer_vector(filename: str) -> Dict[str, Any]:
"""
Load fuzzer test vector from vectors/ directory.
Follows the pattern from
tests/prague/eip2537_bls_12_381_precompiles/helpers.py
"""
vector_path = Path(__file__).parent / "vectors" / filename
with open(vector_path) as f:
return json.load(f)
class TestFuzzerOutputParsing:
"""Test parsing of fuzzer output JSON into DTOs."""
@pytest.fixture
def fuzzer_data(self) -> Dict[str, Any]:
"""Load test vector."""
return load_fuzzer_vector("fuzzer_test_0.json")
def test_parse_fuzzer_output(self, fuzzer_data: Dict[str, Any]) -> None:
"""Test parsing complete fuzzer output."""
fuzzer_output = FuzzerOutput(**fuzzer_data)
assert fuzzer_output.version == "2.0"
assert fuzzer_output.fork == Osaka
assert fuzzer_output.chain_id == HexNumber(1)
assert len(fuzzer_output.transactions) == 17
assert len(fuzzer_output.accounts) > 0
assert fuzzer_output.parent_beacon_block_root is not None # EIP-4788
def test_parse_account_with_private_key(self, fuzzer_data: Dict[str, Any]) -> None:
"""Test parsing account with private key."""
account_data = next(acc for acc in fuzzer_data["accounts"].values() if "privateKey" in acc)
account = FuzzerAccountInput(**account_data)
assert account.private_key is not None
assert isinstance(account.balance, HexNumber)
assert isinstance(account.nonce, HexNumber)
def test_parse_account_without_private_key(self, fuzzer_data: Dict[str, Any]) -> None:
"""Test parsing contract account (no private key)."""
account_data = next(
(acc for acc in fuzzer_data["accounts"].values() if "privateKey" not in acc),
None,
)
if account_data:
account = FuzzerAccountInput(**account_data)
assert account.private_key is None
def test_parse_transaction_with_authorization_list(self, fuzzer_data: Dict[str, Any]) -> None:
"""Test parsing EIP-7702 transaction with authorization list."""
tx_data = next(
(
tx
for tx in fuzzer_data["transactions"]
if "authorizationList" in tx and tx["authorizationList"]
),
None,
)
if tx_data:
tx = FuzzerTransactionInput(**tx_data)
assert tx.authorization_list is not None
assert len(tx.authorization_list) > 0
assert isinstance(tx.authorization_list[0], FuzzerAuthorizationInput)
# Verify authorization fields
auth = tx.authorization_list[0]
assert isinstance(auth.chain_id, HexNumber)
assert isinstance(auth.address, Address)
assert isinstance(auth.nonce, HexNumber)
def test_parse_authorization_tuple(self, fuzzer_data: Dict[str, Any]) -> None:
"""Test parsing individual authorization tuple."""
tx_with_auth = next(
(
tx
for tx in fuzzer_data["transactions"]
if "authorizationList" in tx and tx["authorizationList"]
),
None,
)
if tx_with_auth:
auth_data = tx_with_auth["authorizationList"][0]
auth = FuzzerAuthorizationInput(**auth_data)
assert auth.chain_id is not None
assert auth.address is not None
assert auth.v is not None
assert auth.r is not None
assert auth.s is not None
def test_parse_environment(self, fuzzer_data: Dict[str, Any]) -> None:
"""Test Environment parsing (using EEST Environment directly)."""
env = Environment(**fuzzer_data["env"])
assert env.fee_recipient is not None
assert env.gas_limit is not None
assert env.number is not None
assert env.timestamp is not None
class TestDTOConversion:
"""Test conversion from DTOs to EEST domain models."""
@pytest.fixture
def fuzzer_output(self) -> FuzzerOutput:
"""Parsed fuzzer output."""
data = load_fuzzer_vector("fuzzer_test_0.json")
return FuzzerOutput(**data)
def test_fuzzer_account_to_eest_account(self, fuzzer_output: FuzzerOutput) -> None:
"""Test account DTO to EEST Account conversion."""
fuzzer_account = next(iter(fuzzer_output.accounts.values()))
eest_account = fuzzer_account_to_eest_account(fuzzer_account)
assert isinstance(eest_account, Account)
assert eest_account.balance == fuzzer_account.balance
assert eest_account.nonce == fuzzer_account.nonce
assert eest_account.code == fuzzer_account.code
def test_fuzzer_authorization_to_eest(self, fuzzer_output: FuzzerOutput) -> None:
"""Test authorization DTO to EEST AuthorizationTuple conversion."""
tx_with_auth = next(
(tx for tx in fuzzer_output.transactions if tx.authorization_list), None
)
if tx_with_auth and tx_with_auth.authorization_list:
fuzzer_auth = tx_with_auth.authorization_list[0]
eest_auth = fuzzer_authorization_to_eest(fuzzer_auth)
assert isinstance(eest_auth, AuthorizationTuple)
assert eest_auth.chain_id == fuzzer_auth.chain_id
assert eest_auth.address == fuzzer_auth.address
assert eest_auth.nonce == fuzzer_auth.nonce
def test_create_sender_eoa_map(self, fuzzer_output: FuzzerOutput) -> None:
"""Test EOA map creation from accounts."""
sender_map = create_sender_eoa_map(fuzzer_output.accounts)
# Verify all senders are valid
assert len(sender_map) > 0
for addr, eoa in sender_map.items():
# Verify private key matches address
assert Address(eoa) == addr
def test_sender_eoa_map_validates_address(self, fuzzer_output: FuzzerOutput) -> None:
"""Test that EOA map validates private key matches address."""
# This test verifies the assertion in create_sender_eoa_map
sender_map = create_sender_eoa_map(fuzzer_output.accounts)
# All created EOAs should pass validation
assert all(Address(eoa) == addr for addr, eoa in sender_map.items())
def test_fuzzer_transaction_to_eest_transaction(self, fuzzer_output: FuzzerOutput) -> None:
"""Test transaction DTO to EEST Transaction conversion."""
fuzzer_tx = fuzzer_output.transactions[0]
sender_map = create_sender_eoa_map(fuzzer_output.accounts)
sender_eoa = sender_map[fuzzer_tx.from_]
eest_tx = fuzzer_transaction_to_eest_transaction(fuzzer_tx, sender_eoa)
assert isinstance(eest_tx, Transaction)
assert eest_tx.sender == sender_eoa
assert eest_tx.to == fuzzer_tx.to
assert eest_tx.gas_limit == fuzzer_tx.gas # Key mapping!
assert eest_tx.data == fuzzer_tx.data
def test_transaction_gas_field_mapping(self, fuzzer_output: FuzzerOutput) -> None:
"""Test critical field mapping: gas → gas_limit."""
fuzzer_tx = fuzzer_output.transactions[0]
sender_map = create_sender_eoa_map(fuzzer_output.accounts)
sender_eoa = sender_map[fuzzer_tx.from_]
eest_tx = fuzzer_transaction_to_eest_transaction(fuzzer_tx, sender_eoa)
# Fuzzer uses 'gas' (JSON-RPC), EEST uses 'gas_limit'
assert eest_tx.gas_limit == fuzzer_tx.gas
def test_transaction_authorization_list_conversion(self, fuzzer_output: FuzzerOutput) -> None:
"""Test authorization list conversion in transaction."""
tx_with_auth = next(
(tx for tx in fuzzer_output.transactions if tx.authorization_list), None
)
if tx_with_auth and tx_with_auth.authorization_list:
sender_map = create_sender_eoa_map(fuzzer_output.accounts)
sender_eoa = sender_map[tx_with_auth.from_]
eest_tx = fuzzer_transaction_to_eest_transaction(tx_with_auth, sender_eoa)
assert eest_tx.authorization_list is not None
assert len(eest_tx.authorization_list) == len(tx_with_auth.authorization_list)
assert all(isinstance(auth, AuthorizationTuple) for auth in eest_tx.authorization_list)
class TestBlockchainTestGeneration:
"""Test end-to-end conversion to BlockchainTest."""
@pytest.fixture
def fuzzer_output(self) -> FuzzerOutput:
"""Parsed fuzzer output."""
data = load_fuzzer_vector("fuzzer_test_0.json")
return FuzzerOutput(**data)
def test_blockchain_test_from_fuzzer_single_block(self, fuzzer_output: FuzzerOutput) -> None:
"""Test single-block blockchain test generation."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
num_blocks=1,
)
assert blockchain_test.pre is not None
assert len(blockchain_test.blocks) == 1
assert len(blockchain_test.blocks[0].txs) == 17
assert blockchain_test.genesis_environment is not None
def test_blockchain_test_multi_block_distribute(self, fuzzer_output: FuzzerOutput) -> None:
"""Test multi-block generation with distribute strategy."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
num_blocks=3,
block_strategy="distribute",
)
assert len(blockchain_test.blocks) == 3
# Verify all transactions distributed
total_txs = sum(len(block.txs) for block in blockchain_test.blocks)
assert total_txs == 17
# Verify transactions maintain nonce order
assert len(blockchain_test.blocks[0].txs) > 0
def test_blockchain_test_multi_block_first_block(self, fuzzer_output: FuzzerOutput) -> None:
"""Test multi-block generation with first-block strategy."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
num_blocks=3,
block_strategy="first-block",
)
assert len(blockchain_test.blocks) == 3
assert len(blockchain_test.blocks[0].txs) == 17
assert len(blockchain_test.blocks[1].txs) == 0
assert len(blockchain_test.blocks[2].txs) == 0
def test_blockchain_test_pre_state(self, fuzzer_output: FuzzerOutput) -> None:
"""Test pre-state (Alloc) generation."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
)
assert isinstance(blockchain_test.pre, Alloc)
# Verify all accounts are in pre-state
for addr in fuzzer_output.accounts:
assert addr in blockchain_test.pre
def test_blockchain_test_genesis_environment(self, fuzzer_output: FuzzerOutput) -> None:
"""Test genesis environment derivation."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
)
genesis_env = blockchain_test.genesis_environment
assert genesis_env.number == 0
# Genesis timestamp should be 12 seconds before block 1
assert int(genesis_env.timestamp) == int(fuzzer_output.env.timestamp) - 12
def test_blockchain_test_block_timestamps(self, fuzzer_output: FuzzerOutput) -> None:
"""Test block timestamp incrementing."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
num_blocks=3,
block_time=12,
)
# Check timestamps increment correctly
base_ts = int(fuzzer_output.env.timestamp)
assert blockchain_test.blocks[0].timestamp == base_ts
assert blockchain_test.blocks[1].timestamp == base_ts + 12
assert blockchain_test.blocks[2].timestamp == base_ts + 24
def test_blockchain_test_beacon_root_first_block_only(
self, fuzzer_output: FuzzerOutput
) -> None:
"""Test parent beacon block root only in first block (EIP-4788)."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
num_blocks=3,
)
# First block should have beacon root
assert blockchain_test.blocks[0].parent_beacon_block_root is not None
# Subsequent blocks should NOT have beacon root
assert blockchain_test.blocks[1].parent_beacon_block_root is None
assert blockchain_test.blocks[2].parent_beacon_block_root is None
class TestEIPFeatures:
"""Test EIP-specific feature handling."""
@pytest.fixture
def fuzzer_output(self) -> FuzzerOutput:
"""Parsed fuzzer output."""
data = load_fuzzer_vector("fuzzer_test_0.json")
return FuzzerOutput(**data)
def test_eip7702_authorization_lists(self, fuzzer_output: FuzzerOutput) -> None:
"""Test EIP-7702 authorization list handling."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
)
# Find transactions with authorization lists
txs_with_auth = [
tx for block in blockchain_test.blocks for tx in block.txs if tx.authorization_list
]
assert len(txs_with_auth) > 0
for tx in txs_with_auth:
if tx.authorization_list:
assert all(isinstance(auth, AuthorizationTuple) for auth in tx.authorization_list)
def test_eip4788_parent_beacon_block_root(self, fuzzer_output: FuzzerOutput) -> None:
"""Test EIP-4788 parent beacon block root handling."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
)
# Beacon root should match fuzzer output
assert (
blockchain_test.blocks[0].parent_beacon_block_root
== fuzzer_output.parent_beacon_block_root
)
def test_sender_is_eoa_not_test_address(self, fuzzer_output: FuzzerOutput) -> None:
"""Test that transaction senders are EOAs, not TestAddress."""
blockchain_test = blockchain_test_from_fuzzer(
fuzzer_output,
fork=Osaka,
)
for block in blockchain_test.blocks:
for tx in block.txs:
# Verify sender is EOA with private key
assert hasattr(tx.sender, "key")
if tx.sender:
assert tx.sender.key is not None
class TestErrorHandling:
"""Test error handling and validation."""
def test_invalid_version_fails(self) -> None:
"""Test that invalid version is rejected."""
data = load_fuzzer_vector("fuzzer_test_0.json")
data["version"] = "1.0" # Invalid version
with pytest.raises(ValidationError):
FuzzerOutput(**data)
def test_missing_private_key_fails(self) -> None:
"""Test that transaction without sender private key fails."""
data = load_fuzzer_vector("fuzzer_test_0.json")
# Remove all private keys
for account in data["accounts"].values():
if "privateKey" in account:
del account["privateKey"]
fuzzer_output = FuzzerOutput(**data)
# Conversion should fail due to missing sender keys
with pytest.raises(AssertionError):
blockchain_test_from_fuzzer(fuzzer_output, fork=Osaka)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tests/__init__.py | src/cli/tests/__init__.py | """Tests for scripts and apps in `cli` ."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tests/test_generate_all_formats.py | src/cli/tests/test_generate_all_formats.py | """Test the --generate-all-formats CLI flag functionality."""
from unittest.mock import patch
from cli.pytest_commands.fill import FillCommand
def test_generate_all_formats_creates_two_phase_execution() -> None:
"""Test that --generate-all-formats triggers two-phase execution."""
command = FillCommand()
# Mock the argument processing to bypass click context requirements
with patch.object(command, "process_arguments", side_effect=lambda x: x):
# Test that --generate-all-formats triggers two-phase execution
pytest_args = ["--generate-all-formats", "tests/somedir/"]
executions = command.create_executions(pytest_args)
assert len(executions) == 2, "Expected two-phase execution"
# Phase 1: Should have --generate-pre-alloc-groups
phase1_args = executions[0].args
assert "--generate-pre-alloc-groups" in phase1_args
assert "--generate-all-formats" not in phase1_args
# Phase 2: Should have --use-pre-alloc-groups and --generate-all-formats
phase2_args = executions[1].args
assert "--use-pre-alloc-groups" in phase2_args
assert "--generate-all-formats" in phase2_args
assert "--generate-pre-alloc-groups" not in phase2_args
def test_generate_all_formats_preserves_other_args() -> None:
"""
Test that --generate-all-formats preserves other command line arguments.
"""
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
pytest_args = [
"--generate-all-formats",
"--output=custom-output",
"--fork=Paris",
"-v",
"tests/somedir/",
]
executions = command.create_executions(pytest_args)
assert len(executions) == 2
# Both phases should preserve most args
for execution in executions:
assert "--output=custom-output" in execution.args
assert "--fork=Paris" in execution.args
assert "-v" in execution.args
assert "tests/somedir/" in execution.args
def test_generate_all_formats_removes_clean_from_phase2() -> None:
"""Test that --clean is removed from phase 2."""
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
pytest_args = ["--generate-all-formats", "--clean", "tests/somedir/"]
executions = command.create_executions(pytest_args)
assert len(executions) == 2
# Phase 1: Actually keeps --clean (it's needed for cleaning before phase 1)
# Note: --clean actually remains in phase 1 args but gets filtered out
# in _remove_unwanted_phase1_args
# Phase 2: Should not have --clean (gets removed)
phase2_args = executions[1].args
assert "--clean" not in phase2_args
def test_legacy_generate_pre_alloc_groups_still_works() -> None:
"""Test that the legacy --generate-pre-alloc-groups flag still works."""
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
pytest_args = ["--generate-pre-alloc-groups", "tests/somedir/"]
executions = command.create_executions(pytest_args)
assert len(executions) == 2
# Phase 1: Should have --generate-pre-alloc-groups
phase1_args = executions[0].args
assert "--generate-pre-alloc-groups" in phase1_args
# Phase 2: Should have --use-pre-alloc-groups but NOT --generate-all-
# formats
phase2_args = executions[1].args
assert "--use-pre-alloc-groups" in phase2_args
assert "--generate-all-formats" not in phase2_args
assert "--generate-pre-alloc-groups" not in phase2_args
def test_single_phase_without_flags() -> None:
"""Test that normal execution without flags creates single phase."""
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
pytest_args = ["tests/somedir/"]
executions = command.create_executions(pytest_args)
assert len(executions) == 1
execution = executions[0]
assert "--generate-pre-alloc-groups" not in execution.args
assert "--use-pre-alloc-groups" not in execution.args
assert "--generate-all-formats" not in execution.args
def test_tarball_output_auto_enables_generate_all_formats() -> None:
"""
Test that tarball output automatically enables --generate-all-formats.
"""
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
pytest_args = ["--output=fixtures.tar.gz", "tests/somedir/"]
executions = command.create_executions(pytest_args)
# Should trigger two-phase execution due to tarball output
assert len(executions) == 2
# Phase 1: Should have --generate-pre-alloc-groups
phase1_args = executions[0].args
assert "--generate-pre-alloc-groups" in phase1_args
# Phase 2: Should have --generate-all-formats (auto-added) and --use-pre-
# alloc-groups
phase2_args = executions[1].args
assert "--generate-all-formats" in phase2_args
assert "--use-pre-alloc-groups" in phase2_args
assert "--output=fixtures.tar.gz" in phase2_args
def test_tarball_output_with_explicit_generate_all_formats() -> None:
"""
Test that explicit --generate-all-formats with tarball output works
correctly.
"""
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
pytest_args = ["--output=fixtures.tar.gz", "--generate-all-formats", "tests/somedir/"]
executions = command.create_executions(pytest_args)
# Should trigger two-phase execution
assert len(executions) == 2
# Phase 2: Should have --generate-all-formats (explicit, not duplicated)
phase2_args = executions[1].args
assert "--generate-all-formats" in phase2_args
# Ensure no duplicate flags
assert phase2_args.count("--generate-all-formats") == 1
def test_regular_output_does_not_auto_trigger_two_phase() -> None:
"""
Test that regular directory output doesn't auto-trigger two-phase
execution.
"""
command = FillCommand()
with patch.object(command, "process_arguments", side_effect=lambda x: x):
pytest_args = ["--output=fixtures/", "tests/somedir/"]
executions = command.create_executions(pytest_args)
# Should remain single-phase execution
assert len(executions) == 1
execution = executions[0]
assert "--generate-pre-alloc-groups" not in execution.args
assert "--use-pre-alloc-groups" not in execution.args
assert "--generate-all-formats" not in execution.args
def test_tarball_output_detection_various_formats() -> None:
"""Test tarball output detection with various argument formats."""
command = FillCommand()
# Test --output=file.tar.gz format
args1 = ["--output=test.tar.gz", "tests/somedir/"]
assert command._is_tarball_output(args1) is True
# Test --output file.tar.gz format
args2 = ["--output", "test.tar.gz", "tests/somedir/"]
assert command._is_tarball_output(args2) is True
# Test regular directory
args3 = ["--output=test/", "tests/somedir/"]
assert command._is_tarball_output(args3) is False
# Test no output argument
args4 = ["tests/somedir/"]
assert command._is_tarball_output(args4) is False
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fuzzer_bridge/performance_utils.py | src/cli/fuzzer_bridge/performance_utils.py | """Performance utilities for fuzzer bridge processing."""
import json
import mmap
import os
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, List, Optional
try:
import orjson # type: ignore # Fast JSON library (optional dependency)
HAS_ORJSON = True
except ImportError:
HAS_ORJSON = False
class FastJSONHandler:
"""Fast JSON operations using memory-mapped files and optimized parsing."""
@staticmethod
def load_json_mmap(file_path: Path) -> Dict[str, Any]:
"""Load JSON using memory-mapped file for better I/O performance."""
with open(file_path, "r+b") as f:
# Memory-map the file
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mmapped_file:
# Read the entire content at once
content = mmapped_file.read()
if HAS_ORJSON:
return orjson.loads(content)
else:
# Fallback to standard json
return json.loads(content.decode("utf-8"))
@staticmethod
def dump_json_fast(data: Dict[str, Any], file_path: Path, pretty: bool = False) -> None:
"""Dump JSON using optimized serialization."""
if HAS_ORJSON:
# Use orjson for faster serialization
if pretty:
content = orjson.dumps(data, option=orjson.OPT_INDENT_2)
else:
content = orjson.dumps(data)
with open(file_path, "wb") as f:
f.write(content)
else:
# Fallback to standard json
with open(file_path, "w") as f:
if pretty:
json.dump(data, f, indent=2)
else:
json.dump(data, f)
class BatchProcessor:
"""Optimized batch processing utilities."""
@staticmethod
def calculate_optimal_batch_size(file_count: int, num_workers: int) -> int:
"""Calculate optimal batch size based on file count and workers."""
# Aim for at least 4 batches per worker for better load balancing
# But not too small to avoid overhead
min_batch_size = 5
ideal_batches_per_worker = 4
ideal_batch_size = max(
min_batch_size, file_count // (num_workers * ideal_batches_per_worker)
)
# Adjust for very large file counts
if file_count > 10000:
# Larger batches for many files to reduce overhead
ideal_batch_size = max(50, ideal_batch_size)
elif file_count < 100:
# Smaller batches for few files to maximize parallelism
ideal_batch_size = max(1, file_count // (num_workers * 2))
return ideal_batch_size
@staticmethod
def calculate_optimal_workers(file_count: int) -> int:
"""Calculate optimal number of workers for processing."""
cpu_count = os.cpu_count() or 4
# Scale workers based on file count
if file_count < 10:
return 1 # Sequential for very small workloads
elif file_count < 50:
return min(2, cpu_count)
elif file_count < 200:
return min(4, cpu_count)
elif file_count < 1000:
return min(cpu_count, 8)
else:
# For large file counts, use all available CPUs but cap at 16
return min(cpu_count, 16)
@lru_cache(maxsize=128)
def cached_fork_lookup(fork_name: str) -> Optional[Any]:
"""Cache fork lookups to avoid repeated module imports."""
try:
import ethereum_test_forks
return getattr(ethereum_test_forks, fork_name, None)
except (ImportError, AttributeError):
return None
class ParallelProgressTracker:
"""Thread-safe progress tracking for parallel processing."""
def __init__(self, total: int):
"""Initialize progress tracker."""
from threading import Lock
self.total = total
self.completed = 0
self.errors = 0
self.lock = Lock()
def update(self, success: int = 0, error: int = 0) -> tuple[int, int]:
"""Update progress counters thread-safely."""
with self.lock:
self.completed += success
self.errors += error
return self.completed, self.errors
def get_stats(self) -> tuple[int, int, int]:
"""Get current statistics."""
with self.lock:
return self.completed, self.errors, self.total
# Memory pool for reusing buffers
class BufferPool:
"""Reusable buffer pool to reduce memory allocations."""
def __init__(self, buffer_size: int = 1024 * 1024): # 1MB default
"""Initialize buffer pool."""
self.buffer_size = buffer_size
self.buffers: List[bytearray] = []
def get_buffer(self) -> bytearray:
"""Get a buffer from pool or create new one."""
if self.buffers:
return self.buffers.pop()
return bytearray(self.buffer_size)
def return_buffer(self, buffer: bytearray) -> None:
"""Return buffer to pool for reuse."""
if len(buffer) == self.buffer_size:
self.buffers.append(buffer)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fuzzer_bridge/cli.py | src/cli/fuzzer_bridge/cli.py | """Command-line interface for the fuzzer bridge."""
import json
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor, as_completed
from functools import partial
from pathlib import Path
from typing import Any, Dict, Generator, Optional, Tuple
import click
from rich.progress import BarColumn, Progress, TaskProgressColumn, TextColumn, TimeElapsedColumn
from ethereum_clis import GethTransitionTool, TransitionTool
from .blocktest_builder import BlocktestBuilder
def count_json_files(start_path: Path) -> int:
"""Count JSON files in directory."""
return sum(1 for _ in start_path.rglob("*.json"))
def get_input_files(input_path: Path) -> Generator[Path, None, None]:
"""Generate input file paths."""
if input_path.is_file():
yield input_path
else:
yield from input_path.rglob("*.json")
def generate_test_name(file_path: Path, index: int = 0) -> str:
"""Generate unique test name from file path."""
stem = file_path.stem
if index > 0:
return f"fuzzer_{stem}_{index}"
return f"fuzzer_{stem}"
def process_single_file(
input_file: Path,
output_path: Path,
builder: BlocktestBuilder,
fork: Optional[str],
pretty: bool,
quiet: bool,
num_blocks: int = 1,
block_strategy: str = "distribute",
block_time: int = 12,
random_blocks: bool = False,
) -> Dict[str, Any]:
"""Process a single fuzzer output file."""
with open(input_file) as f:
fuzzer_data = json.load(f)
# Override fork if specified
if fork:
fuzzer_data["fork"] = fork
# Determine number of blocks
if random_blocks:
from .blocktest_builder import choose_random_num_blocks
actual_num_blocks = choose_random_num_blocks(len(fuzzer_data.get("transactions", [])))
else:
actual_num_blocks = num_blocks
# Build blocktest
blocktest = builder.build_blocktest(
fuzzer_data,
num_blocks=actual_num_blocks,
block_strategy=block_strategy,
block_time=block_time,
)
test_name = generate_test_name(input_file)
fixtures = {test_name: blocktest}
# Write output
output_file = output_path / f"{input_file.stem}.json"
with open(output_file, "w") as f:
if pretty:
json.dump(fixtures, f, indent=2)
else:
json.dump(fixtures, f)
if not quiet:
click.echo(f"Generated: {output_file}", err=True)
return fixtures
def process_single_file_worker(
file_info: Tuple[Path, Path],
fork: Optional[str],
pretty: bool,
merge: bool,
evm_bin: Optional[Path],
num_blocks: int = 1,
block_strategy: str = "distribute",
block_time: int = 12,
random_blocks: bool = False,
) -> Tuple[Optional[Tuple[Path, Dict[str, Any]]], Optional[Tuple[Path, Exception]]]:
"""Process a single file in a worker process."""
json_file_path, output_file = file_info
# Create transition tool and builder for this worker
t8n = GethTransitionTool(binary=evm_bin) if evm_bin else GethTransitionTool()
builder = BlocktestBuilder(t8n)
try:
with open(json_file_path) as f:
fuzzer_data = json.load(f)
# Override fork if specified
if fork:
fuzzer_data["fork"] = fork
# Determine number of blocks
if random_blocks:
from .blocktest_builder import choose_random_num_blocks
actual_num_blocks = choose_random_num_blocks(len(fuzzer_data.get("transactions", [])))
else:
actual_num_blocks = num_blocks
# Build blocktest
blocktest = builder.build_blocktest(
fuzzer_data,
num_blocks=actual_num_blocks,
block_strategy=block_strategy,
block_time=block_time,
)
test_name = generate_test_name(json_file_path)
fixtures = {test_name: blocktest}
if not merge:
# Write individual file preserving structure
output_file.parent.mkdir(parents=True, exist_ok=True)
with open(output_file, "w") as f:
if pretty:
json.dump(fixtures, f, indent=2)
else:
json.dump(fixtures, f)
return (json_file_path, fixtures), None
except Exception as e:
return None, (json_file_path, e)
def process_file_batch(
file_batch: list[Tuple[Path, Path]],
fork: Optional[str],
pretty: bool,
merge: bool,
evm_bin: Optional[Path],
num_blocks: int = 1,
block_strategy: str = "distribute",
block_time: int = 12,
random_blocks: bool = False,
) -> Tuple[list[Tuple[Path, Dict[str, Any]]], list[Tuple[Path, Exception]]]:
"""Process a batch of files in a worker process."""
# Create transition tool per worker
t8n = GethTransitionTool(binary=evm_bin) if evm_bin else GethTransitionTool()
builder = BlocktestBuilder(t8n)
results = []
errors = []
for json_file_path, rel_path in file_batch:
try:
with open(json_file_path) as f:
fuzzer_data = json.load(f)
# Override fork if specified
if fork:
fuzzer_data["fork"] = fork
# Determine number of blocks
if random_blocks:
from .blocktest_builder import choose_random_num_blocks
actual_num_blocks = choose_random_num_blocks(
len(fuzzer_data.get("transactions", []))
)
else:
actual_num_blocks = num_blocks
# Build blocktest
blocktest = builder.build_blocktest(
fuzzer_data,
num_blocks=actual_num_blocks,
block_strategy=block_strategy,
block_time=block_time,
)
test_name = generate_test_name(json_file_path)
fixtures = {test_name: blocktest}
if not merge:
# Write individual file preserving structure
output_file = rel_path.with_suffix(".json")
output_file.parent.mkdir(parents=True, exist_ok=True)
with open(output_file, "w") as f:
if pretty:
json.dump(fixtures, f, indent=2)
else:
json.dump(fixtures, f)
results.append((json_file_path, fixtures))
except Exception as e:
errors.append((json_file_path, e))
return results, errors
def process_directory_parallel(
input_dir: Path,
output_dir: Path,
fork: Optional[str],
pretty: bool,
merge: bool,
quiet: bool,
evm_bin: Optional[Path],
num_workers: Optional[int] = None,
num_blocks: int = 1,
block_strategy: str = "distribute",
block_time: int = 12,
random_blocks: bool = False,
) -> None:
"""Process directory of fuzzer output files with parallel processing."""
all_fixtures = {}
# Collect all files to process
files_to_process = []
for json_file_path in get_input_files(input_dir):
rel_path = json_file_path.relative_to(input_dir)
output_file = output_dir / rel_path
files_to_process.append((json_file_path, output_file))
file_count = len(files_to_process)
if file_count == 0:
if not quiet:
click.echo("No JSON files found to process.", err=True)
return
# Determine optimal number of workers
if num_workers is None:
num_workers = min(mp.cpu_count(), max(1, file_count // 10))
success_count = 0
error_count = 0
with Progress(
TextColumn("[bold cyan]{task.fields[filename]}", justify="left"),
BarColumn(bar_width=None, complete_style="green3", finished_style="bold green3"),
TaskProgressColumn(),
TextColumn("[dim]({task.fields[workers]} workers)[/dim]"),
TimeElapsedColumn(),
expand=True,
disable=quiet,
) as progress:
task_id = progress.add_task(
"Processing", total=file_count, filename="Starting...", workers=num_workers
)
# Process files individually in parallel (better progress tracking)
process_func = partial(
process_single_file_worker,
fork=fork,
pretty=pretty,
merge=merge,
evm_bin=evm_bin,
num_blocks=num_blocks,
block_strategy=block_strategy,
block_time=block_time,
random_blocks=random_blocks,
)
with ProcessPoolExecutor(max_workers=num_workers) as executor:
# Submit all files to the pool
futures_to_files = {
executor.submit(process_func, file_info): file_info[0]
for file_info in files_to_process
}
# Process completions as they happen for real-time progress
for future in as_completed(futures_to_files):
file_path = futures_to_files[future]
# Update progress with current file
rel_path = file_path.relative_to(input_dir)
display_name = str(rel_path)
if len(display_name) > 40:
display_name = "..." + display_name[-37:]
try:
result, error = future.result()
if result:
success_count += 1
_, fixtures = result
if merge:
all_fixtures.update(fixtures)
elif error:
error_count += 1
error_file, exception = error
if not quiet:
progress.console.print(
f"[red]Error processing {error_file}: {exception}[/red]"
)
# Update progress bar
progress.update(task_id, advance=1, filename=display_name, workers=num_workers)
except Exception as e:
error_count += 1
if not quiet:
progress.console.print(f"[red]Worker error for {file_path}: {e}[/red]")
progress.update(task_id, advance=1, filename=display_name)
# Write merged file if requested
if merge and all_fixtures:
merged_file = output_dir / "merged_fixtures.json"
with open(merged_file, "w") as f:
if pretty:
json.dump(all_fixtures, f, indent=2)
else:
json.dump(all_fixtures, f)
if not quiet:
progress.console.print(f"[green]Merged fixtures written to: {merged_file}[/green]")
# Final status
if not quiet:
emoji = "✅" if error_count == 0 else "⚠️"
progress.update(
task_id,
completed=file_count,
filename=f"Done! {success_count} succeeded, {error_count} failed {emoji}",
workers=num_workers,
)
def process_directory(
input_dir: Path,
output_dir: Path,
builder: BlocktestBuilder,
fork: Optional[str],
pretty: bool,
merge: bool,
quiet: bool,
num_blocks: int = 1,
block_strategy: str = "distribute",
block_time: int = 12,
random_blocks: bool = False,
) -> None:
"""Process directory of fuzzer output files."""
all_fixtures = {}
file_count = count_json_files(input_dir) if not quiet else 0
success_count = 0
error_count = 0
with Progress(
TextColumn("[bold cyan]{task.fields[filename]}", justify="left"),
BarColumn(bar_width=None, complete_style="green3", finished_style="bold green3"),
TaskProgressColumn(),
TimeElapsedColumn(),
expand=True,
disable=quiet,
) as progress:
task_id = progress.add_task("Processing", total=file_count, filename="...")
for json_file_path in get_input_files(input_dir):
# Preserve directory structure
rel_path = json_file_path.relative_to(input_dir)
display_name = str(rel_path)
if len(display_name) > 40:
display_name = "..." + display_name[-37:]
progress.update(task_id, advance=1, filename=display_name)
try:
with open(json_file_path) as f:
fuzzer_data = json.load(f)
# Override fork if specified
if fork:
fuzzer_data["fork"] = fork
# Determine number of blocks
if random_blocks:
from .blocktest_builder import choose_random_num_blocks
actual_num_blocks = choose_random_num_blocks(
len(fuzzer_data.get("transactions", []))
)
else:
actual_num_blocks = num_blocks
# Build blocktest
blocktest = builder.build_blocktest(
fuzzer_data,
num_blocks=actual_num_blocks,
block_strategy=block_strategy,
block_time=block_time,
)
test_name = generate_test_name(json_file_path)
if merge:
# Add to merged fixtures
all_fixtures[test_name] = blocktest
else:
# Write individual file preserving structure
output_file = output_dir / rel_path.with_suffix(".json")
output_file.parent.mkdir(parents=True, exist_ok=True)
fixtures = {test_name: blocktest}
with open(output_file, "w") as f:
if pretty:
json.dump(fixtures, f, indent=2)
else:
json.dump(fixtures, f)
success_count += 1
except Exception as e:
error_count += 1
if not quiet:
progress.console.print(f"[red]Error processing {json_file_path}: {e}[/red]")
# Write merged file if requested
if merge and all_fixtures:
merged_file = output_dir / "merged_fixtures.json"
with open(merged_file, "w") as f:
if pretty:
json.dump(all_fixtures, f, indent=2)
else:
json.dump(all_fixtures, f)
if not quiet:
progress.console.print(f"[green]Merged fixtures written to: {merged_file}[/green]")
# Final status
if not quiet:
emoji = "✅" if error_count == 0 else "⚠️"
progress.update(
task_id,
completed=file_count,
filename=f"Done! {success_count} succeeded, {error_count} failed {emoji}",
)
def batch_mode(
fork: Optional[str],
evm_bin: Optional[Path],
pretty: bool,
num_blocks: int,
block_strategy: str,
block_time: int,
random_blocks: bool,
) -> None:
"""
Persistent batch processing mode.
Reads input/output pairs from stdin, processes each, and outputs status
to stdout. Protocol:
- INPUT (stdin): <input_json_path> <output_directory>
- OUTPUT (stdout): DONE <generated_blocktest_path> or ERROR <error_message>
"""
import sys
import traceback
# Pre-initialize transition tool and builder once for performance
t8n: TransitionTool
if evm_bin:
t8n = GethTransitionTool(binary=evm_bin)
else:
t8n = GethTransitionTool()
builder = BlocktestBuilder(t8n)
# Write ready signal to stderr for debugging
print("Batch mode initialized. Ready to process files.", file=sys.stderr, flush=True)
while True:
try:
# Read line from stdin
line = sys.stdin.readline()
if not line: # EOF
break
line = line.strip()
if not line:
continue
# Parse input/output paths
parts = line.split()
if len(parts) != 2:
print(f"ERROR: invalid input format: {line}", flush=True)
continue
input_path_str, output_dir_str = parts
input_path = Path(input_path_str)
output_dir = Path(output_dir_str)
# Process the file
try:
# Read fuzzer input
if not input_path.exists():
print(f"ERROR: file not found: {input_path}", flush=True)
continue
with open(input_path) as f:
fuzzer_data = json.load(f)
# Override fork if specified
if fork:
fuzzer_data["fork"] = fork
# Determine number of blocks
if random_blocks:
from .blocktest_builder import choose_random_num_blocks
actual_num_blocks = choose_random_num_blocks(
len(fuzzer_data.get("transactions", []))
)
else:
actual_num_blocks = num_blocks
# Build blocktest (existing logic)
blocktest = builder.build_blocktest(
fuzzer_data,
num_blocks=actual_num_blocks,
block_strategy=block_strategy,
block_time=block_time,
)
# Generate test name
test_name = generate_test_name(input_path)
fixtures = {test_name: blocktest}
# Write output
output_dir.mkdir(parents=True, exist_ok=True)
input_stem = input_path.stem
output_file = output_dir / f"{input_stem}.json"
with open(output_file, "w") as f:
if pretty:
json.dump(fixtures, f, indent=2)
else:
json.dump(fixtures, f)
# Report success (CRITICAL: must flush)
print(f"DONE {output_file}", flush=True)
except FileNotFoundError:
print(f"ERROR: file not found: {input_path}", flush=True)
except json.JSONDecodeError as e:
print(f"ERROR: invalid JSON in {input_path}: {e}", flush=True)
# Log full traceback to stderr for debugging
traceback.print_exc(file=sys.stderr)
except Exception as e:
print(f"ERROR: conversion failed for {input_path}: {e}", flush=True)
# Log full traceback to stderr for debugging
traceback.print_exc(file=sys.stderr)
except KeyboardInterrupt:
break
except Exception as e:
# Catch-all for unexpected errors in the main loop
print(f"ERROR: unexpected error in batch loop: {e}", flush=True)
traceback.print_exc(file=sys.stderr)
@click.command()
@click.argument(
"input_path",
type=click.Path(exists=True, dir_okay=True, file_okay=True, path_type=Path),
required=False,
)
@click.argument(
"output_path",
type=click.Path(dir_okay=True, file_okay=False, path_type=Path),
required=False,
)
@click.option(
"--fork",
default=None,
help="Override fork specified in fuzzer output",
)
@click.option(
"--evm-bin",
type=click.Path(exists=True, path_type=Path),
help="Path to evm binary for transition tool",
)
@click.option(
"--pretty",
is_flag=True,
help="Pretty-print JSON output",
)
@click.option(
"--merge",
is_flag=True,
help="Merge all tests into a single output file",
)
@click.option(
"--quiet",
is_flag=True,
help="Suppress progress output",
)
@click.option(
"--parallel/--no-parallel",
default=True,
help="Enable/disable parallel processing (default: enabled)",
)
@click.option(
"-n",
"--workers",
type=int,
default=None,
help="Number of parallel workers (default: auto-detect based on CPU count)",
)
@click.option(
"-b",
"--num-blocks",
type=int,
default=1,
help="Number of blocks to generate from fuzzer input (default: 1)",
)
@click.option(
"--block-strategy",
type=click.Choice(["distribute", "first-block"]),
default="distribute",
help="Transaction distribution strategy: 'distribute' splits txs evenly, "
"'first-block' puts all txs in first block (default: distribute)",
)
@click.option(
"--block-time",
type=int,
default=12,
help="Seconds between blocks (default: 12)",
)
@click.option(
"--random-blocks",
is_flag=True,
help="Randomly choose number of blocks (1 to min(num_txs, 10))",
)
@click.option(
"--batch",
is_flag=True,
help="Persistent batch mode: read file paths from stdin, output to stdout",
)
def main(
input_path: Optional[Path],
output_path: Optional[Path],
fork: Optional[str],
evm_bin: Optional[Path],
pretty: bool,
merge: bool,
quiet: bool,
parallel: bool,
workers: Optional[int],
num_blocks: int,
block_strategy: str,
block_time: int,
random_blocks: bool,
batch: bool,
) -> None:
"""
Convert fuzzer output to valid blocktest fixtures.
INPUT_PATH: Input JSON file or directory (not required in --batch mode)
OUTPUT_PATH: Output directory for fixtures (not required in --batch mode)
In batch mode (--batch), reads input/output pairs from stdin.
"""
# Batch mode: persistent stdin/stdout processing
if batch:
batch_mode(
fork=fork,
evm_bin=evm_bin,
pretty=pretty,
num_blocks=num_blocks,
block_strategy=block_strategy,
block_time=block_time,
random_blocks=random_blocks,
)
return
# Standard mode: require input_path and output_path
if input_path is None or output_path is None:
raise click.UsageError(
"INPUT_PATH and OUTPUT_PATH are required when not using --batch mode"
)
# Create transition tool
t8n: TransitionTool
if evm_bin:
t8n = GethTransitionTool(binary=evm_bin)
else:
t8n = GethTransitionTool()
# Create builder
builder = BlocktestBuilder(t8n)
# Ensure output directory exists
output_path.mkdir(parents=True, exist_ok=True)
# Process input
if input_path.is_file():
# Single file processing
process_single_file(
input_path,
output_path,
builder,
fork,
pretty,
quiet,
num_blocks,
block_strategy,
block_time,
random_blocks,
)
else:
# Directory processing with optional parallel mode
if parallel:
process_directory_parallel(
input_path,
output_path,
fork,
pretty,
merge,
quiet,
evm_bin,
workers,
num_blocks,
block_strategy,
block_time,
random_blocks,
)
else:
process_directory(
input_path,
output_path,
builder,
fork,
pretty,
merge,
quiet,
num_blocks,
block_strategy,
block_time,
random_blocks,
)
if __name__ == "__main__":
main()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fuzzer_bridge/blocktest_builder.py | src/cli/fuzzer_bridge/blocktest_builder.py | """Build valid blocktests from fuzzer-generated transactions and pre-state."""
import json
import random
from pathlib import Path
from typing import Any, Dict, Optional
from ethereum_clis import GethTransitionTool, TransitionTool
from ethereum_test_fixtures import BlockchainFixture
from .converter import blockchain_test_from_fuzzer
from .models import FuzzerOutput
def choose_random_num_blocks(num_txs: int, max_blocks: int = 10) -> int:
"""
Choose random number of blocks for given transaction count.
Selects a random number between 1 and min(num_txs, max_blocks) to enable
testing of various block configurations.
Args:
num_txs: Number of transactions to distribute
max_blocks: Maximum number of blocks (default: 10)
Returns:
Random integer between 1 and min(num_txs, max_blocks)
"""
if num_txs == 0:
return 1 # Allow empty block testing
return random.randint(1, min(num_txs, max_blocks))
class BlocktestBuilder:
"""Build valid blocktests from fuzzer-generated transactions."""
def __init__(self, transition_tool: Optional[TransitionTool] = None):
"""Initialize the builder with optional transition tool."""
self.t8n = transition_tool or GethTransitionTool()
def build_blocktest(
self,
fuzzer_output: Dict[str, Any],
num_blocks: int = 1,
block_strategy: str = "distribute",
block_time: int = 12,
) -> Dict[str, Any]:
"""Build a valid blocktest from fuzzer output."""
# Parse and validate using Pydantic model
fuzzer_data = FuzzerOutput(**fuzzer_output)
# Get fork
fork = fuzzer_data.fork
# Create BlockchainTest using converter
test = blockchain_test_from_fuzzer(
fuzzer_data,
fork,
num_blocks=num_blocks,
block_strategy=block_strategy,
block_time=block_time,
)
# Generate fixture
fixture = test.generate(
t8n=self.t8n,
fork=fork,
fixture_format=BlockchainFixture,
)
return fixture.model_dump(exclude_none=True, by_alias=True, mode="json")
def build_and_save(self, fuzzer_output: Dict[str, Any], output_path: Path) -> Path:
"""Build blocktest and save to file."""
blocktest = self.build_blocktest(fuzzer_output)
fixtures = {"fuzzer_generated_test": blocktest}
with open(output_path, "w") as f:
json.dump(fixtures, f, indent=2)
return output_path
def build_blocktest_from_fuzzer(
fuzzer_data: Dict[str, Any], t8n: Optional[TransitionTool] = None
) -> Dict[str, Any]:
"""Build blocktest from fuzzer output."""
builder = BlocktestBuilder(t8n)
return builder.build_blocktest(fuzzer_data)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fuzzer_bridge/models.py | src/cli/fuzzer_bridge/models.py | """
Pydantic models for fuzzer output format v2.
This module defines Data Transfer Objects (DTOs) for parsing
fuzzer output. These DTOs are intentionally separate from EEST
domain models (Transaction, Account) to maintain clean separation
between external data format and internal representation.
Design Principle:
- DTOs (this file): Parse external JSON-RPC standard format
- Domain Models (EEST): Internal test generation logic
- Converter (converter.py): Explicit transformation between the two
"""
from typing import Dict, List
from pydantic import BaseModel, Field
from ethereum_test_base_types import AccessList, Address, Bytes, CamelModel, Hash, HexNumber
from ethereum_test_forks import Fork
from ethereum_test_types import Environment
class FuzzerAccountInput(BaseModel):
"""
Raw account data from fuzzer output.
This is a DTO that accepts fuzzer's JSON format without triggering
EEST's Account validation logic or defaults.
"""
balance: HexNumber
nonce: HexNumber = HexNumber(0)
code: Bytes = Bytes(b"")
storage: Dict[HexNumber, HexNumber] = Field(default_factory=dict)
private_key: Hash | None = Field(None, alias="privateKey")
class Config:
"""Pydantic configuration."""
populate_by_name = True
class FuzzerAuthorizationInput(BaseModel):
"""
Raw authorization tuple from fuzzer output (EIP-7702).
Accepts fuzzer's camelCase JSON format.
"""
chain_id: HexNumber = Field(..., alias="chainId")
address: Address
nonce: HexNumber
v: HexNumber # yParity
r: HexNumber
s: HexNumber
class Config:
"""Pydantic configuration."""
populate_by_name = True
class FuzzerTransactionInput(BaseModel):
"""
Raw transaction data from fuzzer output.
This is a DTO that accepts standard Ethereum JSON-RPC transaction format
without triggering EEST's Transaction.model_post_init logic.
Key differences from EEST Transaction:
- Uses "gas" not "gas_limit" (JSON-RPC standard)
- Uses "data" not "input" (JSON-RPC standard)
- Uses "from" not "sender" (JSON-RPC standard)
- No automatic TestAddress injection
- No automatic transaction type detection
- No automatic signature handling
"""
from_: Address = Field(..., alias="from")
to: Address | None = None
gas: HexNumber # Will be mapped to gas_limit in converter
gas_price: HexNumber | None = Field(None, alias="gasPrice")
max_fee_per_gas: HexNumber | None = Field(None, alias="maxFeePerGas")
max_priority_fee_per_gas: HexNumber | None = Field(None, alias="maxPriorityFeePerGas")
nonce: HexNumber
data: Bytes = Bytes(b"") # Will be mapped to data/input in converter
value: HexNumber = HexNumber(0)
access_list: List[AccessList] | None = Field(None, alias="accessList")
blob_versioned_hashes: List[Hash] | None = Field(None, alias="blobVersionedHashes")
max_fee_per_blob_gas: HexNumber | None = Field(None, alias="maxFeePerBlobGas")
authorization_list: List[FuzzerAuthorizationInput] | None = Field(
None, alias="authorizationList"
)
class Config:
"""Pydantic configuration."""
populate_by_name = True
class FuzzerOutput(CamelModel):
"""
Main fuzzer output format v2.
This is the top-level DTO that parses the complete fuzzer
output JSON. It uses pure DTOs (FuzzerAccountInput,
FuzzerTransactionInput) to avoid triggering EEST domain
model logic during parsing.
After parsing, the converter will transform these DTOs into
EEST domain models.
"""
version: str = Field(..., pattern="^2\\.0$")
fork: Fork
chain_id: HexNumber = Field(HexNumber(1))
accounts: Dict[Address, FuzzerAccountInput]
transactions: List[FuzzerTransactionInput]
env: Environment
parent_beacon_block_root: Hash | None = None
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fuzzer_bridge/converter.py | src/cli/fuzzer_bridge/converter.py | """
Converter module for transforming fuzzer DTOs to EEST domain models.
This module performs explicit transformation from fuzzer's
JSON-RPC format (captured in DTOs) to EEST's internal domain
models (Transaction, Account, etc.).
Key Responsibilities:
1. Field mapping (gas → gas_limit, from → sender, etc.)
2. Creating EOA objects from private keys
3. Building proper EEST domain models with all required context
4. Preventing TestAddress pollution by setting sender
BEFORE model_post_init
"""
from typing import Dict
from ethereum_test_base_types import Address, Hash, HexNumber
from ethereum_test_forks import Fork
from ethereum_test_specs import BlockchainTest
from ethereum_test_tools import Account, AuthorizationTuple, Block, Transaction
from ethereum_test_types import Alloc, Environment
from ethereum_test_types.account_types import EOA
from .models import (
FuzzerAccountInput,
FuzzerAuthorizationInput,
FuzzerOutput,
FuzzerTransactionInput,
)
def fuzzer_account_to_eest_account(fuzzer_account: FuzzerAccountInput) -> Account:
"""
Convert fuzzer account DTO to EEST Account domain model.
Args:
fuzzer_account: Raw account data from fuzzer
Returns:
EEST Account ready for pre-state
"""
return Account(
balance=fuzzer_account.balance,
nonce=fuzzer_account.nonce,
code=fuzzer_account.code,
storage=fuzzer_account.storage,
)
def fuzzer_authorization_to_eest(
fuzzer_auth: FuzzerAuthorizationInput,
) -> AuthorizationTuple:
"""
Convert fuzzer authorization DTO to EEST AuthorizationTuple.
Args:
fuzzer_auth: Raw authorization data from fuzzer
Returns:
EEST AuthorizationTuple for EIP-7702 transactions
"""
return AuthorizationTuple(
chain_id=fuzzer_auth.chain_id,
address=fuzzer_auth.address,
nonce=fuzzer_auth.nonce,
v=fuzzer_auth.v,
r=fuzzer_auth.r,
s=fuzzer_auth.s,
)
def fuzzer_transaction_to_eest_transaction(
fuzzer_tx: FuzzerTransactionInput,
sender_eoa: EOA,
) -> Transaction:
"""
Convert fuzzer transaction DTO to EEST Transaction domain model.
This function performs explicit field mapping and MUST set sender BEFORE
calling Transaction constructor to prevent TestAddress injection.
Key Mappings:
- fuzzer_tx.gas → transaction.gas_limit (JSON-RPC → EEST naming)
- fuzzer_tx.from_ → sender_eoa (Address → EOA with private key)
- fuzzer_tx.data → transaction.data (same field, explicit for clarity)
Args:
fuzzer_tx: Raw transaction data from fuzzer
sender_eoa: EOA object created from private key (prevents TestAddress)
Returns:
EEST Transaction ready for block generation
"""
# Build authorization list if present
auth_list = None
if fuzzer_tx.authorization_list:
auth_list = [fuzzer_authorization_to_eest(auth) for auth in fuzzer_tx.authorization_list]
# Create Transaction with sender set BEFORE model_post_init runs
# This prevents Transaction.model_post_init from injecting TestAddress
return Transaction(
sender=sender_eoa, # ✓ Set explicitly to prevent TestAddress
to=fuzzer_tx.to,
gas_limit=fuzzer_tx.gas, # ✓ Explicit mapping: gas → gas_limit
gas_price=fuzzer_tx.gas_price,
max_fee_per_gas=fuzzer_tx.max_fee_per_gas,
max_priority_fee_per_gas=fuzzer_tx.max_priority_fee_per_gas,
nonce=fuzzer_tx.nonce,
data=fuzzer_tx.data,
value=fuzzer_tx.value,
access_list=fuzzer_tx.access_list,
blob_versioned_hashes=fuzzer_tx.blob_versioned_hashes,
max_fee_per_blob_gas=fuzzer_tx.max_fee_per_blob_gas,
authorization_list=auth_list,
)
def create_sender_eoa_map(accounts: Dict[Address, FuzzerAccountInput]) -> Dict[Address, EOA]:
"""
Create map of addresses to EOA objects from accounts with private keys.
Args:
accounts: Dictionary of address to fuzzer account data
Returns:
Dictionary mapping addresses to EOA objects for transaction signing
Raises:
AssertionError: If private key doesn't match the account address
"""
senders: Dict[Address, EOA] = {}
for addr, account in accounts.items():
if account.private_key is None:
continue
# Create EOA from private key
sender = EOA(key=account.private_key)
# Verify private key matches address (safety check)
assert Address(sender) == addr, (
f"Private key for account {addr} does not match derived address {sender}"
)
senders[addr] = sender
return senders
def blockchain_test_from_fuzzer(
fuzzer_output: FuzzerOutput,
fork: Fork,
num_blocks: int = 1,
block_strategy: str = "distribute",
block_time: int = 12,
) -> BlockchainTest:
"""
Convert fuzzer output to BlockchainTest instance.
This is the main entry point for fuzzer-to-EEST conversion.
It orchestrates:
1. Parsing and validation (already done by FuzzerOutput DTO)
2. Creating EOA objects from private keys
3. Converting DTOs to domain models
4. Building blocks and test structure
Args:
fuzzer_output: Parsed and validated fuzzer output (DTO)
fork: Fork to use for the test
num_blocks: Number of blocks to generate
block_strategy: How to distribute transactions across blocks
- "distribute": Split evenly maintaining
nonce order
- "first-block": All transactions in first
block
block_time: Seconds between block timestamps
Returns:
BlockchainTest instance ready for fixture generation
Raises:
AssertionError: If invariants are violated
(sender validation, etc.)
"""
# Step 1: Convert accounts to EEST Account domain models
pre_dict: Dict[Address, Account | None] = {}
for addr, fuzzer_account in fuzzer_output.accounts.items():
pre_dict[addr] = fuzzer_account_to_eest_account(fuzzer_account)
pre = Alloc(pre_dict)
# Step 2: Create EOA map for transaction signing
sender_eoa_map = create_sender_eoa_map(fuzzer_output.accounts)
# Step 3: Convert transactions to EEST Transaction domain models
eest_transactions: list[Transaction] = []
for fuzzer_tx in fuzzer_output.transactions:
# Verify sender has private key
assert fuzzer_tx.from_ in sender_eoa_map, (
f"Sender {fuzzer_tx.from_} not found in accounts with private keys"
)
# Convert with explicit sender (prevents TestAddress injection)
eest_tx = fuzzer_transaction_to_eest_transaction(
fuzzer_tx,
sender_eoa=sender_eoa_map[fuzzer_tx.from_],
)
eest_transactions.append(eest_tx)
# Step 4: Build genesis environment
env = fuzzer_output.env
genesis_env = Environment(
fee_recipient=env.fee_recipient,
difficulty=0, # Post-merge
gas_limit=int(env.gas_limit),
number=0,
timestamp=HexNumber(int(env.timestamp) - 12),
prev_randao=env.prev_randao or Hash(0),
base_fee_per_gas=env.base_fee_per_gas if env.base_fee_per_gas else None,
excess_blob_gas=env.excess_blob_gas if env.excess_blob_gas else None,
blob_gas_used=env.blob_gas_used if env.blob_gas_used else None,
).set_fork_requirements(fork)
# Step 5: Distribute transactions across blocks
blocks = _distribute_transactions_to_blocks(
eest_transactions,
num_blocks,
block_strategy,
block_time,
env,
fuzzer_output.parent_beacon_block_root,
)
return BlockchainTest(
pre=pre,
blocks=blocks,
post={}, # Post-state verification can be added later
genesis_environment=genesis_env,
chain_id=fuzzer_output.chain_id,
)
def _distribute_transactions_to_blocks(
transactions: list[Transaction],
num_blocks: int,
strategy: str,
block_time: int,
base_env: Environment,
parent_beacon_block_root: Hash | None,
) -> list[Block]:
"""
Distribute transactions across multiple blocks.
Args:
transactions: List of EEST Transaction objects (ready for execution)
num_blocks: Number of blocks to create
strategy: Distribution strategy ("distribute" or "first-block")
block_time: Seconds between blocks
base_env: Base environment for first block
parent_beacon_block_root: Beacon root (only for first block)
Returns:
List of Block objects
"""
if strategy == "first-block":
# All transactions in first block, rest empty
tx_distribution = [transactions] + [[] for _ in range(num_blocks - 1)]
elif strategy == "distribute":
# Split transactions evenly maintaining nonce order
if not transactions:
tx_distribution = [[] for _ in range(num_blocks)]
else:
result = []
chunk_size = len(transactions) // num_blocks
remainder = len(transactions) % num_blocks
start = 0
for i in range(num_blocks):
# Distribute remainder across first blocks
current_chunk_size = chunk_size + (1 if i < remainder else 0)
end = start + current_chunk_size
result.append(transactions[start:end])
start = end
tx_distribution = result
else:
raise ValueError(f"Unknown block strategy: {strategy}")
# Create blocks with incrementing timestamps
base_timestamp = int(base_env.timestamp)
blocks = []
for i, block_txs in enumerate(tx_distribution):
blocks.append(
Block(
txs=block_txs,
timestamp=base_timestamp + (i * block_time),
fee_recipient=base_env.fee_recipient,
parent_beacon_block_root=parent_beacon_block_root if i == 0 else None,
)
)
return blocks
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fuzzer_bridge/__init__.py | src/cli/fuzzer_bridge/__init__.py | """Fuzzer bridge for converting blocktest-fuzzer output to blocktests."""
from .blocktest_builder import BlocktestBuilder, build_blocktest_from_fuzzer
__all__ = ["BlocktestBuilder", "build_blocktest_from_fuzzer"]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fuzzer_bridge/production_test.py | src/cli/fuzzer_bridge/production_test.py | #!/usr/bin/env python
"""
Production-ready test suite for fuzzer bridge with geth verification.
This script:
1. Loads fuzzer output
2. Converts to blockchain test
3. Generates fixtures
4. Verifies with go-ethereum
5. Reports comprehensive results
"""
import argparse
import json
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from typing import Any, Dict, List, Optional
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from ethereum_clis import GethTransitionTool
from ethereum_test_fixtures.blockchain import BlockchainFixture
from ethereum_test_specs.blockchain import BlockchainTest
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
Environment,
Transaction,
)
class FuzzerBridge:
"""Production-ready fuzzer bridge with validation and verification."""
def __init__(
self, t8n_path: Optional[str] = None, verbose: bool = False, keep_fixtures: bool = False
):
"""Initialize bridge with optional transition tool path."""
self.t8n = GethTransitionTool(binary=Path(t8n_path) if t8n_path else None)
self.verbose = verbose
self.keep_fixtures = keep_fixtures
self.stats: Dict[str, Any] = {
"tests_generated": 0,
"tests_passed": 0,
"tests_failed": 0,
"validation_errors": [],
}
def validate_fuzzer_output(self, data: Dict[str, Any]) -> List[str]:
"""Validate fuzzer output format and return list of errors."""
errors = []
# Check version
version = data.get("version", "1.0")
if version != "2.0":
errors.append(f"Unsupported version {version}, expected 2.0")
# Check required fields
required_fields = ["accounts", "transactions", "env", "fork", "chainId"]
for field in required_fields:
if field not in data:
errors.append(f"Missing required field: {field}")
# Validate accounts with transactions have private keys
if "accounts" in data and "transactions" in data:
senders = set()
for tx in data["transactions"]:
sender = tx.get("from") or tx.get("sender")
if sender:
senders.add(sender)
for sender in senders:
if sender not in data["accounts"]:
errors.append(f"Sender {sender} not in accounts")
elif "privateKey" not in data["accounts"][sender]:
errors.append(f"No private key for sender {sender}")
else:
# Validate private key matches address
if not self._validate_key_address(
data["accounts"][sender]["privateKey"], sender
):
errors.append(f"Private key doesn't match address {sender}")
return errors
def _validate_key_address(self, private_key: str, expected_address: str) -> bool:
"""Validate that private key generates expected address."""
try:
from ethereum_test_types import EOA
eoa = EOA(key=private_key)
# EOA class returns the address directly via str()
return str(eoa).lower() == expected_address.lower()
except Exception:
return False
def convert_to_test(self, fuzzer_data: Dict[str, Any]) -> Dict[str, Any]:
"""Convert fuzzer output to test parameters."""
# Validate first
errors = self.validate_fuzzer_output(fuzzer_data)
if errors:
raise ValueError("Validation failed:\n" + "\n".join(errors))
# Build pre-state
pre_state = Alloc()
private_keys = {}
for addr_str, account_data in fuzzer_data["accounts"].items():
addr = Address(addr_str)
if "privateKey" in account_data:
private_keys[addr_str] = account_data["privateKey"]
pre_state[addr] = Account(
balance=int(account_data.get("balance", "0x0"), 16),
nonce=int(account_data.get("nonce", "0x0"), 16),
code=account_data.get("code", ""),
storage=account_data.get("storage", {}),
)
# Create genesis environment (block 0)
env_data = fuzzer_data["env"]
genesis_env = Environment(
fee_recipient=Address(env_data.get("currentCoinbase")),
difficulty=0, # Post-merge
gas_limit=int(env_data.get("currentGasLimit", "0x1000000"), 16),
number=0, # Genesis is block 0
timestamp=int(env_data.get("currentTimestamp", "0x1000"), 16) - 12,
base_fee_per_gas=int(env_data.get("currentBaseFee", "0x7"), 16),
)
# Block 1 environment overrides
block1_env = {
"timestamp": int(env_data.get("currentTimestamp", "0x1000"), 16),
"fee_recipient": Address(env_data.get("currentCoinbase")),
}
# Create transactions
txs = []
for tx_data in fuzzer_data["transactions"]:
sender_addr = tx_data.get("from") or tx_data.get("sender")
secret_key = private_keys[sender_addr]
txs.append(
Transaction(
to=Address(tx_data["to"]) if tx_data.get("to") else None,
value=int(tx_data.get("value", "0x0"), 16),
gas_limit=int(tx_data.get("gas", "0x5208"), 16),
gas_price=int(tx_data.get("gasPrice", "0x1"), 16),
nonce=int(tx_data.get("nonce", "0x0"), 16),
data=bytes.fromhex(tx_data.get("data", "0x")[2:])
if tx_data.get("data", "0x") != "0x"
else b"",
secret_key=secret_key,
)
)
# Create block
block = Block(txs=txs, **block1_env)
return {
"genesis_environment": genesis_env,
"pre": pre_state,
"post": {},
"blocks": [block],
"chain_id": fuzzer_data.get("chainId", 1),
"fork": fuzzer_data.get("fork", "Prague"),
}
def generate_fixture(self, test_params: Dict[str, Any]) -> Dict[str, Any]:
"""Generate blockchain test fixture."""
# Get fork
from ethereum_test_forks import Cancun, Osaka, Prague, Shanghai
fork_map = {"Osaka": Osaka, "Prague": Prague, "Shanghai": Shanghai, "Cancun": Cancun}
fork = fork_map.get(test_params["fork"], Prague)
# Create test
test = BlockchainTest(
genesis_environment=test_params["genesis_environment"],
pre=test_params["pre"],
post=test_params["post"],
blocks=test_params["blocks"],
chain_id=test_params["chain_id"],
)
# Generate fixture
fixture = test.generate(
t8n=self.t8n,
fork=fork,
fixture_format=BlockchainFixture,
)
self.stats["tests_generated"] += 1
return fixture.model_dump(exclude_none=True, by_alias=True)
def verify_with_geth(
self, fixture: Dict[str, Any], geth_path: str, test_name: str = "test"
) -> Dict[str, Any]:
"""Verify fixture with go-ethereum evm tool."""
# Write fixture to temp file
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
json.dump({test_name: fixture}, f, indent=2)
fixture_path = f.name
try:
# Run geth blocktest
result = subprocess.run(
[geth_path, "blocktest", fixture_path], capture_output=True, text=True, timeout=30
)
# Parse output
output = result.stdout + result.stderr
# Check if test passed
if result.returncode == 0 and '"pass": true' in output:
self.stats["tests_passed"] += 1
return {"pass": True, "output": output, "fixture_path": fixture_path}
else:
self.stats["tests_failed"] += 1
# Extract error message
error = "Unknown error"
if '"error":' in output:
import re
match = re.search(r'"error":\s*"([^"]+)"', output)
if match:
error = match.group(1)
return {
"pass": False,
"error": error,
"output": output,
"fixture_path": fixture_path,
}
except subprocess.TimeoutExpired:
self.stats["tests_failed"] += 1
return {"pass": False, "error": "Timeout", "fixture_path": fixture_path}
except Exception as e:
self.stats["tests_failed"] += 1
return {"pass": False, "error": str(e), "fixture_path": fixture_path}
finally:
# Optionally clean up
# Note: args is not defined here - should be passed as parameter
pass
def run_full_test(self, fuzzer_file: str, geth_path: str) -> bool:
"""Run full test pipeline from fuzzer output to geth verification."""
print(f"🔧 Loading fuzzer output from {fuzzer_file}")
with open(fuzzer_file) as f:
fuzzer_data = json.load(f)
print("📋 Fuzzer Output Summary:")
print(f" Version: {fuzzer_data.get('version', 'unknown')}")
print(f" Fork: {fuzzer_data.get('fork', 'unknown')}")
print(f" Accounts: {len(fuzzer_data.get('accounts', {}))}")
print(f" Transactions: {len(fuzzer_data.get('transactions', []))}")
# Validate
print("\n✓ Validating fuzzer output...")
errors = self.validate_fuzzer_output(fuzzer_data)
if errors:
print("❌ Validation failed:")
for error in errors:
print(f" - {error}")
return False
# Convert
print("✓ Converting to test parameters...")
try:
test_params = self.convert_to_test(fuzzer_data)
except Exception as e:
print(f"❌ Conversion failed: {e}")
return False
# Generate fixture
print("✓ Generating blockchain test fixture...")
try:
fixture = self.generate_fixture(test_params)
except Exception as e:
print(f"❌ Fixture generation failed: {e}")
import traceback
traceback.print_exc()
return False
# Extract test info
genesis_hash = fixture.get("genesisBlockHeader", {}).get("hash", "unknown")
pre_count = len(fixture.get("pre", {}))
print(f" Genesis hash: {genesis_hash[:16]}...")
print(f" Pre-state accounts: {pre_count}")
# Verify with geth
print("\n🔍 Verifying with go-ethereum...")
result = self.verify_with_geth(fixture, geth_path, "fuzzer_test")
if result["pass"]:
print("✅ Test PASSED!")
if self.verbose:
print(f" Fixture: {result.get('fixture_path', 'N/A')}")
else:
print("❌ Test FAILED!")
print(f" Error: {result.get('error', 'Unknown')}")
if self.verbose:
print(f" Output:\n{result.get('output', '')}")
if self.keep_fixtures:
print(f" Fixture saved: {result.get('fixture_path', 'N/A')}")
# Print statistics
print("\n📊 Statistics:")
print(f" Tests generated: {self.stats['tests_generated']}")
print(f" Tests passed: {self.stats['tests_passed']}")
print(f" Tests failed: {self.stats['tests_failed']}")
return result["pass"]
def main() -> int:
"""Main entry point for production test."""
parser = argparse.ArgumentParser(
description="Production test for fuzzer bridge with geth verification"
)
parser.add_argument(
"--fuzzer-output",
required=True,
help="Path to fuzzer output JSON file",
)
parser.add_argument("--geth-path", required=True, help="Path to go-ethereum evm binary")
parser.add_argument("--t8n-path", help="Path to transition tool binary (optional)")
parser.add_argument(
"--keep-fixtures",
action="store_true",
help="Keep generated fixture files for debugging",
)
parser.add_argument("--verbose", action="store_true", help="Verbose output")
args = parser.parse_args()
# Check paths exist
if not Path(args.fuzzer_output).exists():
print(f"❌ Fuzzer output not found: {args.fuzzer_output}")
return 1
if not Path(args.geth_path).exists():
print(f"❌ Geth binary not found: {args.geth_path}")
return 1
# Run test
bridge = FuzzerBridge(t8n_path=args.t8n_path)
start_time = time.time()
success = bridge.run_full_test(args.fuzzer_output, args.geth_path)
elapsed = time.time() - start_time
print(f"\n⏱️ Completed in {elapsed:.2f} seconds")
return 0 if success else 1
if __name__ == "__main__":
sys.exit(main())
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/check_eip_versions.py | src/cli/pytest_commands/check_eip_versions.py | """CLI entry point for the EIP version checker pytest-based command."""
from typing import Any, List
import click
from config.check_eip_versions import CheckEipVersionsConfig
from .base import PytestCommand, common_pytest_options
from .processors import HelpFlagsProcessor
@click.command(context_settings={"ignore_unknown_options": True})
@common_pytest_options
def check_eip_versions(pytest_args: List[str], **kwargs: Any) -> None:
"""Run pytest with the `spec_version_checker` plugin."""
del kwargs
command = PytestCommand(
config_file="pytest-check-eip-versions.ini",
argument_processors=[HelpFlagsProcessor("check-eip-versions")],
)
args_with_until = [
"--until",
CheckEipVersionsConfig().UNTIL_FORK,
] + list(pytest_args)
command.execute(args_with_until)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/checklist.py | src/cli/pytest_commands/checklist.py | """CLI entry point for the `checklist` pytest-based command."""
from typing import Any
import click
from .fill import FillCommand
@click.command()
@click.option(
"--output",
"-o",
type=click.Path(file_okay=False, dir_okay=True, writable=True),
default="./checklists",
help="Directory to output the generated checklists (default: ./checklists)",
)
@click.option(
"--eip",
"-e",
type=int,
multiple=True,
help="Generate checklist only for specific EIP(s)",
)
def checklist(output: str, eip: tuple[int, ...], **kwargs: Any) -> None:
"""
Generate EIP test checklists based on pytest.mark.eip_checklist markers.
This command scans test files for eip_checklist markers and generates
filled checklists showing which checklist items have been implemented.
Examples:
# Generate checklists for all EIPs
uv run checklist
# Generate checklist for specific EIP
uv run checklist --eip 7702
# Generate checklists for specific test path
uv run checklist tests/prague/eip7702*
# Specify output directory
uv run checklist --output ./my-checklists
"""
del kwargs
# Add output directory to pytest args
args = ["--checklist-output", output]
# Add EIP filter if specified
for eip_num in eip:
args.extend(["--checklist-eip", str(eip_num)])
command = FillCommand(
plugins=["pytest_plugins.filler.eip_checklist"],
)
command.execute(args)
if __name__ == "__main__":
checklist()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/consume.py | src/cli/pytest_commands/consume.py | """CLI entry point for the `consume` pytest-based command."""
import functools
from pathlib import Path
from typing import Any, Callable, List
import click
from .base import ArgumentProcessor, PytestCommand, common_pytest_options
from .processors import ConsumeCommandProcessor, HelpFlagsProcessor, HiveEnvironmentProcessor
def create_consume_command(
*,
command_logic_test_paths: List[Path],
is_hive: bool = False,
command_name: str = "",
) -> PytestCommand:
"""Initialize consume command with paths and processors."""
processors: List[ArgumentProcessor] = [HelpFlagsProcessor("consume")]
if is_hive:
processors.extend(
[
HiveEnvironmentProcessor(command_name=command_name),
ConsumeCommandProcessor(is_hive=True),
]
)
else:
processors.append(ConsumeCommandProcessor(is_hive=False))
return PytestCommand(
config_file="pytest-consume.ini",
argument_processors=processors,
command_logic_test_paths=command_logic_test_paths,
)
def get_command_logic_test_paths(command_name: str) -> List[Path]:
"""Determine the command paths based on the command name and hive flag."""
base_path = Path("pytest_plugins/consume")
if command_name in ["engine", "rlp"]:
command_logic_test_paths = [
base_path / "simulators" / "simulator_logic" / f"test_via_{command_name}.py"
]
elif command_name == "sync":
command_logic_test_paths = [
base_path / "simulators" / "simulator_logic" / "test_via_sync.py"
]
elif command_name == "direct":
command_logic_test_paths = [base_path / "direct" / "test_via_direct.py"]
else:
raise ValueError(f"Unexpected command: {command_name}.")
return command_logic_test_paths
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
def consume() -> None:
"""Consume command to aid client consumption of test fixtures."""
pass
def consume_command(
is_hive: bool = False,
) -> Callable[[Callable[..., Any]], click.Command]:
"""Generate a consume sub-command."""
def decorator(func: Callable[..., Any]) -> click.Command:
command_name = func.__name__
command_help = func.__doc__
command_logic_test_paths = get_command_logic_test_paths(command_name)
@consume.command(
name=command_name,
help=command_help,
context_settings={"ignore_unknown_options": True},
)
@common_pytest_options
@functools.wraps(func)
def command(pytest_args: List[str], **kwargs: Any) -> None:
del kwargs
consume_cmd = create_consume_command(
command_logic_test_paths=command_logic_test_paths,
is_hive=is_hive,
command_name=command_name,
)
consume_cmd.execute(list(pytest_args))
return command
return decorator
@consume_command(is_hive=False)
def direct() -> None:
"""Clients consume directly via the `blocktest` interface."""
pass
@consume_command(is_hive=True)
def rlp() -> None:
"""Client consumes RLP-encoded blocks on startup."""
pass
@consume_command(is_hive=True)
def engine() -> None:
"""Client consumes via the Engine API."""
pass
@consume_command(is_hive=True)
def sync() -> None:
"""Client consumes via the Engine API with sync testing."""
pass
@consume.command(
context_settings={"ignore_unknown_options": True},
)
@common_pytest_options
def cache(pytest_args: List[str], **kwargs: Any) -> None:
"""Consume command to cache test fixtures."""
del kwargs
cache_cmd = create_consume_command(command_logic_test_paths=[], is_hive=False)
cache_cmd.execute(list(pytest_args))
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/fill.py | src/cli/pytest_commands/fill.py | """CLI entry point for the `fill` pytest-based command."""
from typing import Any, List
import click
from .base import PytestCommand, PytestExecution, common_pytest_options
from .processors import HelpFlagsProcessor, StdoutFlagsProcessor, WatchFlagsProcessor
from .watcher import FileWatcher
class FillCommand(PytestCommand):
"""Pytest command for the fill operation."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize fill command with processors."""
super().__init__(
config_file="pytest-fill.ini",
argument_processors=[
HelpFlagsProcessor("fill"),
StdoutFlagsProcessor(),
WatchFlagsProcessor(),
],
**kwargs,
)
def create_executions(self, pytest_args: List[str]) -> List[PytestExecution]:
"""
Create execution plan that supports two-phase pre-allocation group
generation.
Returns single execution for normal filling, or two-phase execution
when --generate-pre-alloc-groups or --generate-all-formats is
specified.
"""
processed_args = self.process_arguments(pytest_args)
# Check if we need two-phase execution
if self._should_use_two_phase_execution(processed_args):
processed_args = self._ensure_generate_all_formats_for_tarball(processed_args)
return self._create_two_phase_executions(processed_args)
elif "--use-pre-alloc-groups" in processed_args:
# Only phase 2: using existing pre-allocation groups
return self._create_single_phase_with_pre_alloc_groups(processed_args)
else:
# Normal single-phase execution
return [
PytestExecution(
config_file=self.config_path,
args=processed_args,
)
]
def _create_two_phase_executions(self, args: List[str]) -> List[PytestExecution]:
"""
Create two-phase execution: pre-allocation group generation + fixture
filling.
"""
# Phase 1: Pre-allocation group generation (clean and minimal output)
phase1_args = self._create_phase1_args(args)
# Phase 2: Main fixture generation (full user options)
phase2_args = self._create_phase2_args(args)
return [
PytestExecution(
config_file=self.config_path,
args=phase1_args,
description="generating pre-allocation groups",
),
PytestExecution(
config_file=self.config_path,
args=phase2_args,
description="filling test fixtures",
),
]
def _create_single_phase_with_pre_alloc_groups(self, args: List[str]) -> List[PytestExecution]:
"""Create single execution using existing pre-allocation groups."""
return [
PytestExecution(
config_file=self.config_path,
args=args,
)
]
def _create_phase1_args(self, args: List[str]) -> List[str]:
"""Create arguments for phase 1 (pre-allocation group generation)."""
# Start with all args, then remove what we don't want for phase 1
filtered_args = self._remove_unwanted_phase1_args(args)
# Add required phase 1 flags (with quiet output by default)
phase1_args = [
"--generate-pre-alloc-groups",
"-qq", # Quiet pytest output by default (user -v/-vv/-vvv can
# override)
] + filtered_args
return phase1_args
def _create_phase2_args(self, args: List[str]) -> List[str]:
"""Create arguments for phase 2 (fixture filling)."""
# Remove --generate-pre-alloc-groups and --clean, then add --use-pre-
# alloc-groups
phase2_args = self._remove_generate_pre_alloc_groups_flag(args)
phase2_args = self._remove_clean_flag(phase2_args)
phase2_args = self._add_use_pre_alloc_groups_flag(phase2_args)
return phase2_args
def _remove_unwanted_phase1_args(self, args: List[str]) -> List[str]:
"""Remove arguments we don't want in phase 1 (pre-state generation)."""
unwanted_flags = {
# Output format flags
"--html",
# Report flags (we'll add our own -qq)
"-q",
"--quiet",
"-qq",
"--tb",
# Pre-allocation group flags (we'll add our own)
"--generate-pre-alloc-groups",
"--use-pre-alloc-groups",
"--generate-all-formats",
}
filtered_args = []
i = 0
while i < len(args):
arg = args[i]
# Skip unwanted flags
if arg in unwanted_flags:
# Skip flag and its value if it takes one
if arg in ["--html", "--tb", "-n"] and i + 1 < len(args):
i += 2 # Skip flag and value
else:
i += 1 # Skip just the flag
# Skip unwanted flags with = format
elif any(arg.startswith(f"{flag}=") for flag in unwanted_flags):
i += 1
else:
filtered_args.append(arg)
i += 1
return filtered_args
def _remove_generate_pre_alloc_groups_flag(self, args: List[str]) -> List[str]:
"""
Remove --generate-pre-alloc-groups flag but keep --generate-all-formats
for phase 2.
"""
return [arg for arg in args if arg != "--generate-pre-alloc-groups"]
def _remove_clean_flag(self, args: List[str]) -> List[str]:
"""Remove --clean flag from argument list."""
return [arg for arg in args if arg != "--clean"]
def _add_use_pre_alloc_groups_flag(self, args: List[str]) -> List[str]:
"""Add --use-pre-alloc-groups flag to argument list."""
return args + ["--use-pre-alloc-groups"]
def _should_use_two_phase_execution(self, args: List[str]) -> bool:
"""Determine if two-phase execution is needed."""
return (
"--generate-pre-alloc-groups" in args
or "--generate-all-formats" in args
or self._is_tarball_output(args)
)
def _ensure_generate_all_formats_for_tarball(self, args: List[str]) -> List[str]:
"""Auto-add --generate-all-formats for tarball output."""
if self._is_tarball_output(args) and "--generate-all-formats" not in args:
return args + ["--generate-all-formats"]
return args
def _is_tarball_output(self, args: List[str]) -> bool:
"""Check if output argument specifies a tarball (.tar.gz) path."""
from pathlib import Path
for i, arg in enumerate(args):
if arg.startswith("--output="):
output_path = Path(arg.split("=", 1)[1])
return str(output_path).endswith(".tar.gz")
elif arg == "--output" and i + 1 < len(args):
output_path = Path(args[i + 1])
return str(output_path).endswith(".tar.gz")
return False
def _is_watch_mode(self, args: List[str]) -> bool:
"""Check if any watch flag is present in arguments."""
return any(flag in args for flag in ["--watch", "--watcherfall"])
def _is_verbose_watch_mode(self, args: List[str]) -> bool:
"""
Check if verbose watch flag (--watcherfall)
is present in arguments.
"""
return "--watcherfall" in args
def execute(self, pytest_args: List[str]) -> None:
"""Execute the command with optional watch mode support."""
if self._is_watch_mode(pytest_args):
self._execute_with_watch(pytest_args)
else:
super().execute(pytest_args)
def _execute_with_watch(self, pytest_args: List[str]) -> None:
"""Execute fill command in watch mode."""
verbose = self._is_verbose_watch_mode(pytest_args)
watcher = FileWatcher(console=self.runner.console, verbose=verbose)
watcher.run_with_watch(pytest_args)
class PhilCommand(FillCommand):
"""Friendly fill command with emoji reporting."""
def create_executions(self, pytest_args: List[str]) -> List[PytestExecution]:
"""Create execution with emoji report options."""
processed_args = self.process_arguments(pytest_args)
emoji_args = processed_args + [
"-o",
"report_passed=🦄",
"-o",
"report_xpassed=🌈",
"-o",
"report_failed=👾",
"-o",
"report_xfailed=🦺",
"-o",
"report_skipped=🦘",
"-o",
"report_error=🚨",
]
return [
PytestExecution(
config_file=self.config_path,
args=emoji_args,
)
]
@click.command(
context_settings={
"ignore_unknown_options": True,
}
)
@common_pytest_options
def fill(pytest_args: List[str], **kwargs: Any) -> None:
"""Entry point for the fill command."""
del kwargs
command = FillCommand()
command.execute(list(pytest_args))
@click.command(
context_settings={
"ignore_unknown_options": True,
}
)
@common_pytest_options
def phil(pytest_args: List[str], **kwargs: Any) -> None:
"""Friendly alias for the fill command."""
del kwargs
command = PhilCommand()
command.execute(list(pytest_args))
if __name__ == "__main__":
# to allow debugging in vscode: in launch config, set "module":
# "cli.pytest_commands.fill"
fill(prog_name="fill")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/watcher.py | src/cli/pytest_commands/watcher.py | """File watcher implementation for --watch flag functionality."""
import os
import subprocess
import time
from pathlib import Path
from typing import Dict, List
from rich.console import Console
class FileWatcher:
"""Simple file watcher that re-runs the fill command on changes."""
def __init__(self, console: Console | None = None, verbose: bool = False) -> None:
"""Initialize the file watcher."""
self.console = console or Console(highlight=False)
self.verbose = verbose
def run_with_watch(self, args: List[str]) -> None:
"""Watch for file changes and re-run fill command."""
file_mtimes: Dict[Path, float] = {}
def get_file_mtimes() -> Dict[Path, float]:
"""Get current modification times of all test and source files."""
mtimes = {}
# Watch tests directory
tests_dir = Path("tests")
if tests_dir.exists():
for py_file in tests_dir.rglob("*.py"):
try:
mtimes[py_file] = py_file.stat().st_mtime
except (OSError, FileNotFoundError):
pass
# Watch src directory
src_dir = Path("src")
if src_dir.exists():
for py_file in src_dir.rglob("*.py"):
try:
mtimes[py_file] = py_file.stat().st_mtime
except (OSError, FileNotFoundError):
pass
return mtimes
def run_fill() -> None:
"""Run fill command without --watch / --watcherfall flag."""
clean_args = [arg for arg in args if arg not in ["--watch", "--watcherfall"]]
cmd = ["uv", "run", "fill"] + clean_args
result = subprocess.run(cmd)
if result.returncode == 0:
self.console.print("[green]✓ Fill completed[/green]")
else:
self.console.print(f"[red]✗ Fill failed (exit {result.returncode})[/red]")
# Setup
mode_desc = "watcherfall mode (verbose)" if self.verbose else "watch mode"
self.console.print(f"[blue]Starting {mode_desc}...[/blue]")
file_mtimes = get_file_mtimes()
# Initial run
self.console.print("[green]Running initial fill...[/green]")
run_fill()
file_count = len(file_mtimes)
self.console.print(
f"[blue]Watching {file_count} files in tests/ and src/ directories."
"\n Press Ctrl+C to stop.[/blue]"
)
# Watch loop
try:
while True:
time.sleep(0.5)
current_mtimes = get_file_mtimes()
if current_mtimes != file_mtimes:
if not self.verbose:
os.system("clear" if os.name != "nt" else "cls")
self.console.print("[yellow]File changes detected, re-running...[/yellow]\n")
run_fill()
file_mtimes = current_mtimes
self.console.print("\n[blue]Watching for changes...[/blue]")
except KeyboardInterrupt:
self.console.print("\n[yellow]Watch mode stopped.[/yellow]")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/processors.py | src/cli/pytest_commands/processors.py | """Argument processors for different pytest command types."""
import os
import sys
import warnings
from typing import List
import click
from .base import ArgumentProcessor
class HelpFlagsProcessor(ArgumentProcessor):
"""Processes help-related flags to provide cleaner help output."""
def __init__(self, command_type: str, required_args: List[str] | None = None):
"""
Initialize the help processor.
Args:
command_type: The type of command (e.g., "fill", "consume",
"execute")
required_args: The arguments that are required for the command to run
"""
self.command_type = command_type
self.required_args = required_args or []
def process_args(self, args: List[str]) -> List[str]:
"""
Modify help arguments to provide cleaner help output.
This makes `--help` more useful because `pytest --help` is extremely
verbose and lists all flags from pytest and pytest plugins.
"""
ctx = click.get_current_context()
if ctx.params.get("help_flag"):
# And also add the required arguments to the help output
return [f"--{self.command_type}-help"] + self.required_args
elif ctx.params.get("pytest_help_flag"):
return ["--help"]
return args
class StdoutFlagsProcessor(ArgumentProcessor):
"""Processes stdout-related flags for the fill command."""
def process_args(self, args: List[str]) -> List[str]:
"""
If the user has requested to write to stdout, add pytest arguments to
suppress pytest's test session header and summary output.
"""
if not self._is_writing_to_stdout(args):
return args
# Check for incompatible xdist plugin
if any(arg == "-n" or arg.startswith("-n=") for arg in args):
sys.exit("error: xdist-plugin not supported with --output=stdout (remove -n args).")
# Add flags to suppress pytest output when writing to stdout
return args + ["-qq", "-s", "--no-html"]
def _is_writing_to_stdout(self, args: List[str]) -> bool:
"""Check if the command is configured to write to stdout."""
if any(arg == "--output=stdout" for arg in args):
return True
if "--output" in args:
output_index = args.index("--output")
if output_index + 1 < len(args) and args[output_index + 1] == "stdout":
return True
return False
class HiveEnvironmentProcessor(ArgumentProcessor):
"""Processes Hive environment variables for consume commands."""
def __init__(self, command_name: str):
"""Initialize the processor with command name to determine plugin."""
self.command_name = command_name
def process_args(self, args: List[str]) -> List[str]:
"""Convert hive environment variables into pytest flags."""
modified_args = args[:]
hive_test_pattern = os.getenv("HIVE_TEST_PATTERN")
if hive_test_pattern and not self._has_regex_or_sim_limit(args):
modified_args.extend(["--sim.limit", hive_test_pattern])
hive_parallelism = os.getenv("HIVE_PARALLELISM")
if hive_parallelism not in [None, "", "1"] and not self._has_parallelism_flag(args):
modified_args.extend(["-n", str(hive_parallelism)])
if os.getenv("HIVE_RANDOM_SEED") is not None:
warnings.warn("HIVE_RANDOM_SEED is not yet supported.", stacklevel=2)
if os.getenv("HIVE_LOGLEVEL") is not None:
warnings.warn("HIVE_LOG_LEVEL is not yet supported.", stacklevel=2)
if self.command_name == "engine":
modified_args.extend(["-p", "pytest_plugins.consume.simulators.engine.conftest"])
elif self.command_name == "sync":
modified_args.extend(["-p", "pytest_plugins.consume.simulators.sync.conftest"])
elif self.command_name == "rlp":
modified_args.extend(["-p", "pytest_plugins.consume.simulators.rlp.conftest"])
else:
raise ValueError(f"Unknown command name: {self.command_name}")
return modified_args
def _has_regex_or_sim_limit(self, args: List[str]) -> bool:
"""Check if args already contain --regex or --sim.limit."""
return "--regex" in args or "--sim.limit" in args
def _has_parallelism_flag(self, args: List[str]) -> bool:
"""Check if args already contain parallelism flag."""
return "-n" in args
class WatchFlagsProcessor(ArgumentProcessor):
"""
Processes --watch and --watcherfall flags
for file watching functionality.
"""
def process_args(self, args: List[str]) -> List[str]:
"""
Remove --watch and --watcherfall
flags from args passed to pytest.
"""
return [arg for arg in args if arg not in ["--watch", "--watcherfall"]]
class ConsumeCommandProcessor(ArgumentProcessor):
"""Processes consume-specific command arguments."""
def __init__(self, is_hive: bool = False):
"""
Initialize the consume processor.
Args:
is_hive: Whether this is a hive-based consume command
"""
self.is_hive = is_hive
def process_args(self, args: List[str]) -> List[str]:
"""Process consume-specific arguments."""
if self.is_hive:
return self._handle_timing_data_stdout(args)
return args
def _handle_timing_data_stdout(self, args: List[str]) -> List[str]:
"""Ensure stdout is captured when timing data is enabled."""
if "--timing-data" in args and "-s" not in args:
return args + ["-s"]
return args
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/__init__.py | src/cli/pytest_commands/__init__.py | """
CLI entry points for the main pytest-based commands provided by
execution-spec-tests.
These can be directly accessed in a prompt if the user has directly installed
the package via:
```
python -m venv venv
source venv/bin/activate
pip install -e .
# or
pip install -e .[docs,lint,test]
```
Then, the entry points can be executed via:
```
fill --help
# for example, or
consume engine
# or
checklist --help
```
They can also be executed (and debugged) directly in an interactive python
shell:
```
from src.cli.pytest_commands.fill import fill
from click.testing import CliRunner
runner = CliRunner()
result = runner.invoke(fill, ["--help"])
print(result.output)
```
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/base.py | src/cli/pytest_commands/base.py | """Base classes and utilities for pytest-based CLI commands."""
import sys
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from os.path import realpath
from pathlib import Path
from typing import Any, Callable, List, Optional
import click
import pytest
from rich.console import Console
CURRENT_FOLDER = Path(realpath(__file__)).parent
PACKAGE_INSTALL_FOLDER = CURRENT_FOLDER.parent.parent
PYTEST_INI_FOLDER = CURRENT_FOLDER / "pytest_ini_files"
@dataclass
class PytestExecution:
"""Configuration for a single pytest execution."""
config_file: Path
"""Path to the pytest configuration file (e.g., 'pytest-fill.ini')."""
command_logic_test_paths: List[str] = field(default_factory=list)
"""
List of tests that have to be appended to the start of pytest command
arguments.
"""
args: List[str] = field(default_factory=list)
"""Arguments to pass to pytest."""
description: Optional[str] = None
"""Optional description for this execution phase."""
class ArgumentProcessor(ABC):
"""Base class for processing command-line arguments."""
@abstractmethod
def process_args(self, args: List[str]) -> List[str]:
"""Process the given arguments and return modified arguments."""
pass
@dataclass(kw_only=True)
class PytestRunner:
"""Handles execution of pytest commands."""
console: Console = field(default_factory=lambda: Console(highlight=False))
"""Console to use for output."""
def run_single(self, execution: PytestExecution) -> int:
"""Run pytest once with the given configuration and arguments."""
root_dir_arg = ["--rootdir", "."]
pytest_args = (
["-c", str(execution.config_file)]
+ root_dir_arg
+ [
str(PACKAGE_INSTALL_FOLDER / test_path)
for test_path in execution.command_logic_test_paths
]
+ execution.args
)
if execution.command_logic_test_paths:
pytest_args += [
"-p",
"pytest_plugins.fix_package_test_path",
]
if self._is_verbose(execution.args):
pytest_cmd = f"pytest {' '.join(pytest_args)}"
self.console.print(f"Executing: [bold]{pytest_cmd}[/bold]")
return pytest.main(pytest_args)
def _is_verbose(self, args: List[str]) -> bool:
"""Check if verbose output is requested."""
return any(arg in ["-v", "--verbose", "-vv", "-vvv"] for arg in args)
def run_multiple(self, executions: List[PytestExecution]) -> int:
"""
Run multiple pytest executions in sequence.
Returns the exit code of the final execution, or the first non-zero
exit code.
"""
for i, execution in enumerate(executions):
if execution.description and len(executions) > 1:
phase_text = (
f"[bold blue]phase {i + 1}/{len(executions)}: "
f"{execution.description}[/bold blue]"
)
self.console.rule(phase_text, style="bold blue")
result = self.run_single(execution)
if result != 0:
return result
return 0
@dataclass(kw_only=True)
class PytestCommand:
"""
Base class for pytest-based CLI commands.
Provides a standard structure for commands that execute pytest with
specific configurations and argument processing.
"""
config_file: str
"""File name of the pytest configuration file (e.g., 'pytest-fill.ini')."""
argument_processors: List[ArgumentProcessor] = field(default_factory=list)
"""Processors to apply to the pytest arguments."""
runner: PytestRunner = field(default_factory=PytestRunner)
"""Runner to execute the pytest command."""
plugins: List[str] = field(default_factory=list)
"""Plugins to load for the pytest command."""
command_logic_test_paths: List[Path] | None = None
"""Path to test files that contain the command logic."""
pytest_ini_folder: Path = PYTEST_INI_FOLDER
"""Folder where the pytest configuration files are located."""
@property
def config_path(self) -> Path:
"""Path to the pytest configuration file."""
return self.pytest_ini_folder / self.config_file
def execute(self, pytest_args: List[str]) -> None:
"""Execute the command with the given pytest arguments."""
executions = self.create_executions(pytest_args)
result = self.runner.run_multiple(executions)
sys.exit(result)
@property
def test_args(self) -> List[str]:
"""
Return the test-path arguments that have to be appended to all
PytestExecution instances.
"""
if self.command_logic_test_paths:
return [str(path) for path in self.command_logic_test_paths]
return []
def create_executions(self, pytest_args: List[str]) -> List[PytestExecution]:
"""
Create the list of pytest executions for this command.
This method can be overridden by subclasses to implement multi-phase
execution (e.g., for future fill command).
"""
processed_args = self.process_arguments(pytest_args)
return [
PytestExecution(
config_file=self.config_path,
command_logic_test_paths=self.test_args,
args=processed_args,
)
]
def process_arguments(self, args: List[str]) -> List[str]:
"""Apply all argument processors to the given arguments."""
processed_args = args[:]
for processor in self.argument_processors:
processed_args = processor.process_args(processed_args)
for plugin in self.plugins:
processed_args.extend(["-p", plugin])
return processed_args
def common_pytest_options(func: Callable[..., Any]) -> Callable[..., Any]:
"""
Apply common Click options for pytest-based commands.
This decorator adds the standard help options that all pytest commands use.
"""
func = click.option(
"-h",
"--help",
"help_flag",
is_flag=True,
default=False,
expose_value=True,
help="Show help message.",
)(func)
func = click.option(
"--pytest-help",
"pytest_help_flag",
is_flag=True,
default=False,
expose_value=True,
help="Show pytest's help message.",
)(func)
return click.argument("pytest_args", nargs=-1, type=click.UNPROCESSED)(func)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/pytest_commands/execute.py | src/cli/pytest_commands/execute.py | """CLI entry point for the `execute` pytest-based command."""
from pathlib import Path
from typing import Any, List
import click
from .base import PytestCommand, common_pytest_options
from .processors import HelpFlagsProcessor
@click.group(
context_settings={
"help_option_names": ["-h", "--help"],
}
)
def execute() -> None:
"""Execute command to run tests in hive or live networks."""
pass
def _create_execute_subcommand(
command_name: str,
config_file: str,
help_text: str,
required_args: List[str] | None = None,
command_logic_test_paths: List[Path] | None = None,
) -> click.Command:
"""Create an execute subcommand with standardized structure."""
pytest_command = PytestCommand(
config_file=config_file,
argument_processors=[HelpFlagsProcessor(f"execute-{command_name}", required_args)],
command_logic_test_paths=command_logic_test_paths,
)
@execute.command(
name=command_name,
help=help_text,
context_settings={"ignore_unknown_options": True},
)
@common_pytest_options
def command(pytest_args: List[str], **_kwargs: Any) -> None:
pytest_command.execute(list(pytest_args))
command.__doc__ = help_text
return command
# Create the subcommands
hive = _create_execute_subcommand(
"hive",
"pytest-execute-hive.ini",
"Execute tests using hive as a backend (`./hive --dev`).",
)
remote = _create_execute_subcommand(
"remote",
"pytest-execute.ini",
"Execute tests using a remote RPC endpoint.",
required_args=[
"--rpc-endpoint=http://localhost:8545",
"--rpc-chain-id=1",
"--rpc-seed-key=1",
],
)
eth_config = _create_execute_subcommand(
"eth-config",
"pytest-execute-eth-config.ini",
"Test a client's configuration using the `eth_config` RPC endpoint.",
required_args=["--network=Mainnet", "--rpc-endpoint=http://localhost:8545"],
command_logic_test_paths=[Path("pytest_plugins/execute/eth_config/execute_eth_config.py")],
)
recover = _create_execute_subcommand(
"recover",
"pytest-execute-recover.ini",
"Recover funds from test executions using a remote RPC endpoint.",
required_args=[
"--rpc-endpoint=http://localhost:8545",
"--rpc-chain-id=1",
"--start-eoa-index=1",
"--destination=0x0000000000000000000000000000000000000000",
],
command_logic_test_paths=[Path("pytest_plugins/execute/execute_recover.py")],
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/input/input_repository.py | src/cli/input/input_repository.py | """An abstract base class for handling interactive CLI inputs."""
from abc import ABC, abstractmethod
from typing import List
class InputRepository(ABC):
"""
Abstract base class for input handling. This class defines the interface
for different input types that can be swapped out.
"""
@abstractmethod
def input_text(self, question: str) -> str:
"""Ask a text input question."""
pass
@abstractmethod
def input_password(self, question: str) -> str:
"""Ask a password input question (hidden)."""
pass
@abstractmethod
def input_select(self, question: str, choices: List[str]) -> str:
"""Ask a single-choice selection question."""
pass
@abstractmethod
def input_checkbox(self, question: str, choices: List[str]) -> List[str]:
"""Ask a multi-choice question."""
pass
@abstractmethod
def input_confirm(self, question: str) -> bool:
"""Ask a yes/no confirmation question."""
pass
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/input/questionary_input_repository.py | src/cli/input/questionary_input_repository.py | """
Interactive CLI inputs using questionary library.
See: https://questionary.readthedocs.io/.
"""
from questionary import checkbox, confirm, password, select, text
from .input_repository import InputRepository
class QuestionaryInputRepository(InputRepository):
"""
Repository for handling various types of user inputs using the Questionary
library.
"""
def input_text(self, question: str) -> str:
"""Ask a text input question."""
return text(message=question).ask()
def input_password(self, question: str) -> str:
"""Ask a password input question (hidden)."""
return password(message=question).ask()
def input_select(self, question: str, choices: list) -> str:
"""Ask a single-choice selection question."""
return select(message=question, choices=choices).ask()
def input_checkbox(self, question: str, choices: list) -> list:
"""Ask a multi-choice question."""
return checkbox(message=question, choices=choices).ask()
def input_confirm(self, question: str) -> bool:
"""Ask a yes/no confirmation question."""
return confirm(message=question).ask()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/input/__init__.py | src/cli/input/__init__.py | """A standard interface for interactive CLI inputs."""
from .questionary_input_repository import QuestionaryInputRepository
# Instantiate the input repository
input_repository = QuestionaryInputRepository()
def input_text(question: str) -> str:
"""
Ask a simple text input question.
Args:
question (str): The question to ask.
Returns:
str: The user's response.
"""
return input_repository.input_text(question)
def input_password(question: str) -> str:
"""
Ask a password input question (hidden text).
Args:
question (str): The question to ask.
Returns:
str: The user's response (password).
"""
return input_repository.input_password(question)
def input_select(question: str, choices: list) -> str:
"""
Ask a single-choice question from a list of options.
Args:
question (str): The question to ask.
choices (list): A list of options for the user to choose from.
Returns:
str: The selected choice.
"""
return input_repository.input_select(question, choices)
def input_checkbox(question: str, choices: list) -> list:
"""
Ask a multi-choice question and return a list of selected choices.
Args:
question (str): The question to ask.
choices (list): A list of options for the user to choose from.
Returns:
list: The list of selected choices.
"""
return input_repository.input_checkbox(question, choices)
def input_confirm(question: str) -> bool:
"""
Ask a yes/no confirmation question.
Args:
question (str): The question to ask.
Returns:
bool: True for 'yes', False for 'no'.
"""
return input_repository.input_confirm(question)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/cli.py | src/cli/eest/cli.py | """
`eest` is a CLI tool that helps with routine tasks.
Invoke using `uv run eest`.
"""
import click
from .commands import clean, info
from .make.cli import make
@click.group(
context_settings={
"help_option_names": ["-h", "--help"],
"max_content_width": 120,
}
)
def eest() -> None:
"""`eest` is a CLI tool that helps with routine tasks."""
pass
"""
################################
|| ||
|| Command Registration ||
|| ||
################################
Register nested commands here. For more information, see Click documentation:
https://click.palletsprojects.com/en/8.0.x/commands/#nested-handling-and-contexts
"""
eest.add_command(make)
eest.add_command(clean)
eest.add_command(info)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/quotes.py | src/cli/eest/quotes.py | """List of quotes related to system design."""
import random
import textwrap
make_something_great = [
"🎨 Simplicity is the ultimate sophistication. - Leonardo D.",
"🖌️ Simplicity is an acquired taste. - Katharine G.",
"💡 To create a memorable design you need to start with a thought that’s worth remembering."
" - Thomas M.",
"🚀 Well begun is half done. - Aristotle",
"🖌️ Designers are crazy and yet sane enough to know where to draw the line. - Benjamin W.",
"🌟 Creativity is piercing the mundane to find the marvelous. - Bill M.",
"🔍 Mistakes are the portals of discovery. - James J.",
"🧠 It’s extremely difficult to be simultaneously concerned with the end-user experience of"
" whatever it is that you’re building and the architecture of the program that delivers that"
"experience. - James H.",
"🧠 Good design is a lot like clear thinking made visual. - Edward T.",
"🚀 Innovation leads one to see the new in the old and distinguishes the ingenious from the"
" ingenuous. - Paul R.",
"🔮 The best way to predict the future is to invent it. - Alan K.",
"🌟 Perfection is achieved, not when there is nothing more to add, but when there is nothing"
" left to take away. - Antoine d.",
"📏 You can’t improve what you don’t measure. - Tom D.",
]
def wrap_quote(quote: str, width: int = 80) -> str:
"""Wrap quote text to the given width."""
return textwrap.fill(quote, width=width)
def box_quote(quote: str) -> str:
"""Return quote wrapped in a box with borders."""
# Wrap the quote first
wrapped_quote = wrap_quote(quote)
# Calculate the width of the box
box_width = max(len(line) for line in wrapped_quote.split("\n")) + 2 # +2 for side borders
# Create top and bottom border
top_bottom_border = "+" + "-" * (box_width) + "+"
# Create the sides of the box
lines = wrapped_quote.split("\n")
boxed_lines = [f"{line.ljust(box_width - 2)}" for line in lines]
# Return the full boxed quote
quote = "\n".join([top_bottom_border] + boxed_lines + [top_bottom_border])
return f"\n {quote} \n"
def get_quote() -> str:
"""
Return random inspirational quote formatted in a box.
"""
return box_quote(random.choice(make_something_great))
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/__init__.py | src/cli/eest/__init__.py | """Entry point for the `eest` command line interface."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/commands/clean.py | src/cli/eest/commands/clean.py | """Clean CLI command removes generated files and directories."""
import glob
import os
import shutil
import click
@click.command(short_help="Remove all generated files and directories.")
@click.option(
"--all",
"all_files",
is_flag=True,
help="Remove the virtual environment and .tox directory as well.",
)
@click.option("--dry-run", is_flag=True, help="Simulate the cleanup without removing files.")
@click.option("-v", "--verbose", is_flag=True, help="Show verbose output.")
def clean(all_files: bool, dry_run: bool, verbose: bool) -> None:
"""
Remove all generated files and directories from the repository.
If `--all` is specified, the virtual environment and .tox directory will
also be removed.
Args:
all_files (bool): Remove the virtual environment and .tox directory
as well.
dry_run (bool): Simulate the cleanup without removing files.
verbose (bool): Show verbose output.
Note: The virtual environment and .tox directory are not removed by
default.
Example: Cleaning all generated files and directories and show the deleted
items.
uv run eest clean --all -v
Output:
\b
🗑️ Deleted: .tox
🗑️ Deleted: .venv
🗑️ Deleted: src/cli/et/__pycache__
🗑️ Deleted: src/cli/et/commands/__pycache__
🗑️ Deleted: src/cli/et/make/__pycache__
🗑️ Deleted: src/cli/et/make/commands/__pycache__
...
🧹 Cleanup complete!
""" # noqa: D417, D301
# List of items to remove can contain files and directories.
items_to_remove = [
".pytest_cache",
".mypy_cache",
"fixtures",
"build",
"site",
"cached_downloads",
".pyspelling_en.dict",
]
# glob patterns to remove.
patterns_to_remove = ["src/**/__pycache__", "tests/**/__pycache__"]
for pattern in patterns_to_remove:
matching_files = glob.glob(pattern, recursive=True)
items_to_remove.extend(matching_files)
if all_files:
items_to_remove.extend([".tox", ".venv"])
# Perform dry run or actual deletion
for item in items_to_remove:
if os.path.exists(item):
if dry_run:
click.echo(f"[🧐 Dry run] File would be deleted: {item}")
else:
try:
if os.path.isdir(item):
shutil.rmtree(item, ignore_errors=False)
else:
os.remove(item)
# Verbose flag: Output the name of the deleted item
if verbose:
click.echo(f"🗑️ Deleted: {item}")
except PermissionError:
click.echo(f"❌ Permission denied to remove {item}.")
except Exception as e:
click.echo(f"❌ Failed to remove {item}: {e}")
if not dry_run:
click.echo("🧹 Cleanup complete!")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/commands/__init__.py | src/cli/eest/commands/__init__.py | """
A collection of commands supported by `eest` CLI.
Run `uv run eest` for complete list.
"""
from .clean import clean
from .info import info
__all__ = ["clean", "info"]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/commands/info.py | src/cli/eest/commands/info.py | """Command to display EEST and system information."""
import platform
import subprocess
import sys
import click
from config.app import AppConfig
from ethereum_test_tools.utility.versioning import get_current_commit_hash_or_tag
def run_command(command: list[str]) -> str:
"""Run a CLI command and return its output."""
try:
result = subprocess.run(
command,
capture_output=True,
text=True,
check=True,
)
return result.stdout.strip()
except (subprocess.SubprocessError, FileNotFoundError):
return "unknown"
def get_uv_version() -> str:
"""Get the installed uv package manager version."""
return run_command(["uv", "--version"])
@click.command(name="info")
def info() -> None:
"""Display EEST and system information."""
# Format headers
title = click.style("EEST", fg="green", bold=True)
version = AppConfig().version
info_text = f"""
{title} {click.style(f"v{version}", fg="blue", bold=True)}
{"─" * 50}
Git commit: {click.style(get_current_commit_hash_or_tag(shorten_hash=True), fg="yellow")}
Python: {click.style(platform.python_version(), fg="blue")}
uv: {click.style(get_uv_version(), fg="magenta")}
OS: {click.style(f"{platform.system()} {platform.release()}", fg="cyan")}
Platform: {click.style(platform.machine(), fg="cyan")}
Python Path: {click.style(sys.executable, dim=True)}
"""
click.echo(info_text)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/make/cli.py | src/cli/eest/make/cli.py | """
The `make` CLI streamlines the process of scaffolding tasks, such as generating
new test files, enabling developers to concentrate on the core aspects of
specification testing.
The module calls the appropriate function for the subcommand. If an invalid
subcommand is chosen, it throws an error and shows a list of valid subcommands.
If no subcommand is present, it shows a list of valid subcommands to choose
from.
"""
import click
from .commands import create_default_env, test
@click.group(short_help="Generate project files.")
def make() -> None:
"""Generate project files."""
pass
"""
################################
|| ||
|| Command Registration ||
|| ||
################################
Register nested commands here. For more information, see Click documentation:
https://click.palletsprojects.com/en/8.0.x/commands/#nested-handling-and-contexts
"""
make.add_command(test)
make.add_command(create_default_env)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/make/__init__.py | src/cli/eest/make/__init__.py | """
Make CLI.
This module provides the `make` CLI command that helps you quickly scaffold
files.
"""
from .cli import test
__all__ = ["test"]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/make/commands/__init__.py | src/cli/eest/make/commands/__init__.py | """
Holds subcommands for the make command.
New subcommands must be created as modules and exported from this package,
then registered under the make command in `cli.py`.
"""
from .env import create_default_env
from .test import test
__all__ = ["test", "create_default_env"]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/make/commands/test.py | src/cli/eest/make/commands/test.py | """
Provides a CLI command to scaffold a test file.
The `test` command guides the user through a series of prompts to generate a
test file based on the selected test type, fork, EIP number, and EIP name. The
generated test file is saved in the appropriate directory with a rendered
template using Jinja2.
"""
import os
import sys
from pathlib import Path
import click
import jinja2
from config.docs import DocsConfig
from ethereum_test_forks import get_development_forks, get_forks
from ....input import input_select, input_text
template_loader = jinja2.PackageLoader("cli.eest.make")
template_env = jinja2.Environment(
loader=template_loader, keep_trailing_newline=True, trim_blocks=True, lstrip_blocks=True
)
def exit_now() -> None:
"""Interrupt execution instantly via ctrl+C."""
print("Ctrl+C detected, exiting..")
exit(0)
@click.command(
short_help="Generate a new test file for an EIP.",
epilog=f"Further help: {DocsConfig().DOCS_URL__WRITING_TESTS}",
)
def test() -> None:
"""
Generate a new test file for an EIP.
This function guides the user through a series of prompts to generate a
test file for Ethereum execution specifications. The user is prompted to
select the type of test, the fork to use, and to provide the EIP number and
name. Based on the inputs, a test file is created in the appropriate
directory with a rendered template.
Example:
uv run eest make test
\f
<figure class="video_container">
<video controls="true" allowfullscreen="true">
<source
src="/execution-spec-tests/writing_tests/img/eest_make_test.mp4"
type="video/mp4"
/>
</video>
</figure>
""" # noqa: D301
test_type = input_select(
"Choose the type of test to generate", choices=["State", "Blockchain"]
)
if test_type is None:
exit_now()
fork_choices = [str(fork) for fork in get_forks()]
fork = input_select("Select the fork", choices=fork_choices)
if fork is None:
exit_now()
base_path = Path("tests") / fork.lower()
base_path.mkdir(parents=True, exist_ok=True)
existing_dirs = [d.name for d in base_path.iterdir() if d.is_dir() and d.name != "__pycache__"]
location_choice = input_select(
"Select test directory",
choices=[
{"name": "Use current location", "value": "current"},
*existing_dirs,
{"name": "** Create new sub-directory **", "value": "new"},
],
)
if location_choice is None:
exit_now()
if location_choice == "new":
eip_number = input_text("Enter the EIP number (int)").strip()
eip_name = input_text("Enter the EIP name (spaces ok, only used in docstrings)").strip()
directory_name = input_text(
"Enter directory name (snake_case, part after eipXXXX_)"
).strip()
dir_name = f"eip{eip_number}_{directory_name}"
directory_path = base_path / dir_name
raw_module = input_text("Enter module name (snake_case)").strip()
module_name = raw_module if raw_module.startswith("test_") else f"test_{raw_module}"
elif location_choice == "current":
eip_number = input_text("Enter the EIP number (int)").strip()
eip_name = input_text("Enter the EIP name (spaces ok, only used in docstrings)").strip()
raw_module = input_text("Enter module name (snake_case)").strip()
module_name = raw_module if raw_module.startswith("test_") else f"test_{raw_module}"
directory_path = base_path
else:
dir_parts = location_choice.split("_")
eip_number = dir_parts[0][3:]
eip_name = " ".join(dir_parts[1:]).title()
raw_module = input_text("Enter module name (snake_case)").strip()
module_name = raw_module if raw_module.startswith("test_") else f"test_{raw_module}"
directory_path = base_path / location_choice
file_name = f"{module_name}.py"
module_path = directory_path / file_name
if module_path.exists():
click.echo(
click.style(
f"\n 🛑 The target test module {module_path} already exists!",
fg="red",
),
err=True,
)
sys.exit(1)
os.makedirs(directory_path, exist_ok=True)
template = template_env.get_template(f"{test_type.lower()}_test.py.j2")
rendered_template = template.render(
fork=fork,
eip_number=eip_number,
eip_name=eip_name,
module_name=module_name,
)
with open(module_path, "w") as file:
file.write(rendered_template)
click.echo(
click.style(
f"\n 🎉 Success! Test file created at: {module_path}",
fg="green",
)
)
fork_option = ""
if fork in [dev_fork.name() for dev_fork in get_development_forks()]:
fork_option = f" --until={fork}"
click.echo(
click.style(
f"\n 📝 Get started with tests: {DocsConfig().DOCS_URL__WRITING_TESTS}"
f"\n ⛽ To fill this test, run: `uv run fill {module_path}{fork_option}`",
fg="cyan",
)
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eest/make/commands/env.py | src/cli/eest/make/commands/env.py | """CLI commands for managing the environment file."""
import click
from jinja2 import Environment, PackageLoader
from config.env import ENV_PATH, Config
from ...quotes import get_quote
@click.command(
short_help="Generate the default environment file (env.yaml).",
name="env",
)
def create_default_env() -> None:
"""
CLI command to generate the default environment file (env.yaml).
If an `env.yaml` already exists, this command will NOT override it.
In that case, it is recommended to manually make changes.
_Easter egg: Shows a random quote after creating the environment file._
Example:
uv run eest make env
Output:
🎉 Success! Environment file created at: <path>/env.yaml
🚀 Well begun is half done. - Aristotle
"""
# Check if the env file already exists
if ENV_PATH.exists():
click.echo(
click.style(
f"🚧 The env file '{ENV_PATH}' already exists. "
"Please update it manually if needed.",
fg="red",
)
)
exit(1)
template_environment = Environment(
loader=PackageLoader("config"), trim_blocks=True, lstrip_blocks=True
)
template = template_environment.get_template("env.yaml.j2")
env_yaml = template.render(config=Config())
with ENV_PATH.open("w") as file:
file.write(env_yaml)
click.echo(click.style(f"🎉 Success! Environment file created at: {ENV_PATH}", fg="green"))
click.echo(get_quote())
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fillerconvert/verify_filled.py | src/cli/fillerconvert/verify_filled.py | """Verify refilled test vs original generated test."""
import re
from pathlib import Path
from pydantic import BaseModel, RootModel
# Define only relevant data we need to read from the files
class Indexes(BaseModel):
"""Post Section Indexes."""
data: int
gas: int
value: int
class PostRecord(BaseModel):
"""Post results record."""
hash: str
indexes: Indexes
class StateTest(BaseModel):
"""StateTest in filled file."""
post: dict[str, list[PostRecord]]
class FilledStateTest(RootModel[dict[str, StateTest]]):
"""State Test Wrapper."""
def verify_refilled(refilled: Path, original: Path) -> int:
"""
Verify post hash of the refilled test against original: Regex the original
d,g,v from the refilled test name. Find the post record for this d,g,v and
the fork of refilled test. Compare the post hash.
"""
verified_vectors = 0
json_str = refilled.read_text(encoding="utf-8")
refilled_test_wrapper = FilledStateTest.model_validate_json(json_str)
json_str = original.read_text(encoding="utf-8")
original_test_wrapper = FilledStateTest.model_validate_json(json_str)
# Each original test has only 1 test with many posts for each fork and many
# txs
original_test_name, test_original = list(original_test_wrapper.root.items())[0]
for refilled_test_name, refilled_test in refilled_test_wrapper.root.items():
# Each refilled test has only 1 post for 1 fork and 1 transaction
refilled_fork, refilled_result = list(refilled_test.post.items())[0]
pattern = r"v=(\d+)-g=(\d+)-d=(\d+)"
match = re.search(pattern, refilled_test_name)
if match:
v, g, d = match.groups()
v, g, d = int(v), int(g), int(d)
found = False
original_result = test_original.post[refilled_fork]
for res in original_result:
if res.indexes.data == d and res.indexes.gas == g and res.indexes.value == v:
print(f"check: {refilled_fork}, d:{d}, g:{g}, v:{v}")
if res.hash != refilled_result[0].hash:
raise Exception(
"\nRefilled test post hash mismatch: \n"
f"test_name: {refilled_test_name}\n"
f"original_name: {original}\n"
f"refilled_hash: {refilled_result[0].hash}\n"
f"original_hash: {res.hash} f: {refilled_fork}, d: {d}, g: {g}, v: {v}"
)
found = True
verified_vectors += 1
break
if not found:
raise Exception(
"\nRefilled test not found in original: \n"
f"test_name: {refilled_test_name}\n"
f"original_name: {original}\n"
)
else:
raise Exception("Could not regex match d.g.v indexes from refilled test name!")
return verified_vectors
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/fillerconvert/fillerconvert.py | src/cli/fillerconvert/fillerconvert.py | """Simple CLI tool that reads filler files in the `ethereum/tests` format."""
import argparse
from glob import glob
from pathlib import Path
from .verify_filled import verify_refilled
def main() -> None:
"""Run the main function."""
parser = argparse.ArgumentParser(description="Filler parser.")
parser.add_argument(
"mode", type=str, help="The type of filler we are trying to parse: blockchain/state."
)
parser.add_argument("folder_path", type=Path, help="The path to the JSON/YML filler directory")
parser.add_argument("legacy_path", type=Path, help="The path to the legacy tests directory")
args = parser.parse_args()
args.folder_path = Path(str(args.folder_path).split("=")[-1])
args.mode = str(args.mode).split("=")[-1]
print("Scanning: " + str(args.folder_path))
files = glob(str(args.folder_path / "**" / "*.json"), recursive=True) + glob(
str(args.folder_path / "**" / "*.yml"), recursive=True
)
if args.mode == "blockchain":
raise NotImplementedError("Blockchain filler not implemented yet.")
if args.mode == "verify":
verified_vectors = 0
for file in files:
print("Verify: " + file)
refilled_file = file
relative_file = file.removeprefix(str(args.folder_path))[1:]
original_file = args.legacy_path / "GeneralStateTests" / relative_file
verified_vectors += verify_refilled(Path(refilled_file), original_file)
print(f"Total vectors verified: {verified_vectors}")
# Solidity skipped tests
# or file.endswith("stExample/solidityExampleFiller.yml")
# or file.endswith("vmPerformance/performanceTesterFiller.yml")
# or file.endswith("vmPerformance/loopExpFiller.yml")
# or file.endswith("vmPerformance/loopMulFiller.yml")
# or
# file.endswith("stRevertTest/RevertRemoteSubCallStorageOOGFiller.yml")
# or file.endswith("stSolidityTest/SelfDestructFiller.yml")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/fix_package_test_path.py | src/pytest_plugins/fix_package_test_path.py | """
Pytest plugin to fix the test IDs for all pytest command that use a
command-logic test file.
"""
from typing import List
import pytest
def pytest_collection_modifyitems(items: List[pytest.Item]) -> None:
"""
Modify collected item names to remove the test runner function from the
name.
"""
for item in items:
original_name = item.originalname # type: ignore
remove = f"{original_name}["
if item.name.startswith(remove):
item.name = item.name.removeprefix(remove)[:-1]
if remove in item.nodeid:
item._nodeid = item.nodeid[item.nodeid.index(remove) + len(remove) : -1]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/__init__.py | src/pytest_plugins/__init__.py | """Package containing pytest plugins related to test filling."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/eels_resolver.py | src/pytest_plugins/eels_resolver.py | """
Pytest plugin to help working with the `ethereum-spec-evm-resolver`.
This plugin sets the `EELS_RESOLUTIONS_FILE` environment variable to the path
of the `eels_resolutions.json` file in the pytest root directory. If the
environment variable is already set, the plugin will not override it.
"""
import os
import shutil
from os.path import realpath
from pathlib import Path
import pytest
from pytest_metadata.plugin import metadata_key
CURRENT_FOLDER = Path(realpath(__file__)).parent
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None:
"""
Set the EELS_RESOLUTIONS_FILE environment variable.
Args:
config (pytest.Config): The pytest configuration object.
"""
evm_bin = config.getoption("evm_bin", default=None)
if evm_bin and "resolver" not in str(evm_bin):
# evm_bin is not set for the framework tests: always set the env var.
return
env_var_name = "EELS_RESOLUTIONS_FILE"
eels_resolutions_file = os.getenv(env_var_name)
if os.getenv("EELS_RESOLUTIONS"):
# If the user sets this variable, assume they know what they're doing.
return
if eels_resolutions_file:
file_path = Path(eels_resolutions_file)
if not file_path.is_absolute():
raise ValueError(f"The path provided in {env_var_name} must be an absolute path.")
if not file_path.exists():
raise FileNotFoundError(
f"The file {file_path} does not exist. "
f"Ensure the {env_var_name} points to an existing file."
)
else:
default_file_path = CURRENT_FOLDER / "eels_resolutions.json"
os.environ[env_var_name] = str(default_file_path)
eels_resolutions_file = str(default_file_path)
if "Tools" in config.stash[metadata_key]:
# don't overwrite existing tools metadata added by other plugins
config.stash[metadata_key]["Tools"]["EELS Resolutions"] = str(eels_resolutions_file)
else:
config.stash[metadata_key]["Tools"] = {"EELS Resolutions": str(eels_resolutions_file)}
config._eels_resolutions_file = eels_resolutions_file # type: ignore
def pytest_report_header(config: pytest.Config, start_path: Path) -> str:
"""
Report the EELS_RESOLUTIONS_FILE path to the pytest report header.
Args:
config (pytest.Config): The pytest configuration object.
start_path (Path): The starting directory for the test run.
Returns:
str: A string to add to the pytest report header.
"""
del start_path
eels_resolutions_file = getattr(config, "_eels_resolutions_file", None)
if eels_resolutions_file:
return f"EELS resolutions file: {eels_resolutions_file}"
return ""
@pytest.fixture(scope="session", autouse=True)
def output_metadata_dir_with_teardown(
request: pytest.FixtureRequest,
) -> object:
"""
Session-scoped fixture that attempts to retrieve the filler's
"output_metadata_dir" fixture value and copies the EELS resolutions
file there, if `_eels_resolutions_file` is set on the config object.
"""
yield
try:
output_metadata_dir = request.getfixturevalue("output_metadata_dir")
if output_metadata_dir.name == "stdout":
return
except pytest.FixtureLookupError:
output_metadata_dir = None
eels_resolutions_file = getattr(request.config, "_eels_resolutions_file", None)
if output_metadata_dir and eels_resolutions_file:
shutil.copy(
Path(eels_resolutions_file),
Path(output_metadata_dir) / Path(eels_resolutions_file).name,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/concurrency.py | src/pytest_plugins/concurrency.py | """
Pytest plugin to create a temporary folder for the session where multi-process
tests can store data that is shared between processes.
The provided `session_temp_folder` fixture is used, for example, by `consume`
when running hive simulators to ensure that only one `test_suite` is created
(used to create tests on the hive simulator) when they are being created using
multiple workers with pytest-xdist.
"""
import os
import shutil
from pathlib import Path
from tempfile import gettempdir as get_temp_dir
from typing import Generator
import pytest
from filelock import FileLock
@pytest.fixture(scope="session")
def session_temp_folder_name(testrun_uid: str) -> str:
"""
Define the name of the temporary folder that will be shared among all the
xdist workers to coordinate the tests.
"testrun_uid" is a fixture provided by the xdist plugin, and is unique for
each test run, so it is used to create the unique folder name.
"""
return f"pytest-{testrun_uid}"
@pytest.fixture(scope="session")
def session_temp_folder(
session_temp_folder_name: str,
) -> Generator[Path, None, None]:
"""
Create a global temporary folder that will be shared among all the xdist
workers to coordinate the tests.
We also create a file to keep track of how many workers are still using the
folder, so we can delete it when the last worker is done.
"""
session_temp_folder = Path(get_temp_dir()) / session_temp_folder_name
session_temp_folder.mkdir(exist_ok=True)
folder_users_file_name = "folder_users"
folder_users_file = session_temp_folder / folder_users_file_name
folder_users_lock_file = session_temp_folder / f"{folder_users_file_name}.lock"
with FileLock(folder_users_lock_file):
if folder_users_file.exists():
with folder_users_file.open("r") as f:
folder_users = int(f.read())
else:
folder_users = 0
folder_users += 1
with folder_users_file.open("w") as f:
f.write(str(folder_users))
yield session_temp_folder
with FileLock(folder_users_lock_file):
with folder_users_file.open("r") as f:
folder_users = int(f.read())
folder_users -= 1
if folder_users == 0:
shutil.rmtree(session_temp_folder)
else:
with folder_users_file.open("w") as f:
f.write(str(folder_users))
@pytest.fixture(scope="session")
def worker_count() -> int:
"""Get the number of workers for the test."""
worker_count_env = os.environ.get("PYTEST_XDIST_WORKER_COUNT", "1")
return max(int(worker_count_env), 1)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/forks.py | src/pytest_plugins/forks/forks.py | """Pytest plugin to enable fork range configuration for the test session."""
import itertools
import re
import sys
import textwrap
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from types import FunctionType
from typing import Any, Callable, ClassVar, Dict, Iterable, Iterator, List, Set, Tuple, Type
import pytest
from _pytest.mark.structures import ParameterSet
from pytest import Mark, Metafunc
from ethereum_clis import TransitionTool
from ethereum_test_forks import (
ALL_FORKS,
ALL_FORKS_WITH_TRANSITIONS,
Fork,
ForkSetAdapter,
InvalidForkError,
get_deployed_forks,
get_selected_fork_set,
get_transition_forks,
transition_fork_to,
)
from pytest_plugins.custom_logging import get_logger
logger = get_logger(__name__)
def pytest_addoption(parser: pytest.Parser) -> None:
"""Add command-line options to pytest."""
fork_group = parser.getgroup("Forks", "Specify the fork range to generate fixtures for")
fork_group.addoption(
"--forks",
action="store_true",
dest="show_fork_help",
default=False,
help="Display forks supported by the test framework and exit.",
)
fork_group.addoption(
"--fork",
action="store",
dest="single_fork",
default="",
help="Only fill tests for the specified fork.",
)
fork_group.addoption(
"--from",
action="store",
dest="forks_from",
default="",
help="Fill tests from and including the specified fork.",
)
fork_group.addoption(
"--until",
action="store",
dest="forks_until",
default="",
help="Fill tests until and including the specified fork.",
)
@dataclass(kw_only=True)
class ForkCovariantParameter:
"""Value list for a fork covariant parameter in a given fork."""
names: List[str]
values: List[ParameterSet]
class ForkParametrizer:
"""A parametrizer for a test case that is parametrized by the fork."""
fork: Fork
fork_covariant_parameters: List[ForkCovariantParameter] = field(default_factory=list)
def __init__(
self,
fork: Fork,
marks: List[pytest.MarkDecorator | pytest.Mark] | None = None,
fork_covariant_parameters: List[ForkCovariantParameter] | None = None,
):
"""
Initialize a new fork parametrizer object for a given fork.
Args:
fork: The fork for which the test cases will be parametrized.
marks: A list of pytest marks to apply to all the test cases
parametrized by the fork.
fork_covariant_parameters: A list of fork covariant parameters
for the test case, for unit testing
purposes only.
"""
if marks is None:
marks = []
self.fork_covariant_parameters = [
ForkCovariantParameter(
names=["fork"],
values=[
pytest.param(
fork,
marks=marks,
)
],
)
]
if fork_covariant_parameters is not None:
self.fork_covariant_parameters.extend(fork_covariant_parameters)
self.fork = fork
@property
def argnames(self) -> List[str]:
"""Return the parameter names for the test case."""
argnames = []
for p in self.fork_covariant_parameters:
argnames.extend(p.names)
return argnames
@property
def argvalues(self) -> List[ParameterSet]:
"""Return the parameter values for the test case."""
parameter_set_combinations = itertools.product(
# Add the values for each parameter, all of them are lists of at
# least one element.
*[p.values for p in self.fork_covariant_parameters],
)
parameter_set_list: List[ParameterSet] = []
for parameter_set_combination in parameter_set_combinations:
params: List[Any] = []
marks: List[pytest.Mark | pytest.MarkDecorator] = []
test_id: str | None = None
for p in parameter_set_combination:
assert isinstance(p, ParameterSet)
params.extend(p.values)
if p.marks:
marks.extend(p.marks)
if p.id:
if test_id is None:
test_id = f"fork_{self.fork.name()}-{p.id}"
else:
test_id = f"{test_id}-{p.id}"
parameter_set_list.append(pytest.param(*params, marks=marks, id=test_id))
return parameter_set_list
class CovariantDescriptor:
"""
A descriptor for a parameter that is covariant with the fork: the
parametrized values change depending on the fork.
"""
argnames: List[str] = []
fn: Callable[[Fork], List[Any] | Iterable[Any]] | None = None
selector: Callable[..., bool] | None = None
marks: None | pytest.Mark | pytest.MarkDecorator | List[pytest.Mark | pytest.MarkDecorator] = (
None
)
def __init__(
self,
argnames: List[str] | str,
fn: Callable[[Fork], List[Any] | Iterable[Any]] | None = None,
*,
selector: Callable[..., bool] | None = None,
marks: None
| pytest.Mark
| pytest.MarkDecorator
| List[pytest.Mark | pytest.MarkDecorator] = None,
):
"""
Initialize a new covariant descriptor.
Args:
argnames: The names of the parameters that are covariant with the
fork.
fn: A function that takes the fork as the single parameter and
returns the values for the parameter for each fork.
selector: A function that filters the values for the parameter.
marks: A list of pytest marks to apply to the test cases
parametrized by the parameter.
"""
self.argnames = (
[argname.strip() for argname in argnames.split(",")]
if isinstance(argnames, str)
else argnames
)
self.fn = fn
self.selector = selector
self.marks = marks
def process_value(
self,
parameters_values: Any | List[Any] | Tuple[Any] | ParameterSet,
) -> ParameterSet | None:
"""
Process a value for a covariant parameter.
The `selector` is applied to parameters_values in order to filter them.
"""
if isinstance(parameters_values, ParameterSet):
return parameters_values
if len(self.argnames) == 1:
# Wrap values that are meant for a single parameter in a list
parameters_values = [parameters_values]
marks = self.marks
if self.selector is None or self.selector(
*parameters_values[: self.selector.__code__.co_argcount]
):
if isinstance(marks, FunctionType):
marks = marks(*parameters_values[: marks.__code__.co_argcount])
assert not isinstance(marks, FunctionType), "marks must be a list or None"
if marks is None:
marks = []
elif not isinstance(marks, list):
marks = [marks]
return pytest.param(*parameters_values, marks=marks)
return None
def process_values(self, values: Iterable[Any]) -> List[ParameterSet]:
"""
Filter the values for the covariant parameter.
I.e. if the marker has an argument, the argument is interpreted as a
lambda function that filters the values.
"""
processed_values: List[ParameterSet] = []
for value in values:
processed_value = self.process_value(value)
if processed_value is not None:
processed_values.append(processed_value)
return processed_values
def add_values(self, fork_parametrizer: ForkParametrizer) -> None:
"""Add the values for the covariant parameter to the parametrizer."""
if self.fn is None:
return
fork = fork_parametrizer.fork
values = self.fn(fork)
values = self.process_values(values)
assert len(values) > 0
fork_parametrizer.fork_covariant_parameters.append(
ForkCovariantParameter(names=self.argnames, values=values)
)
class CovariantDecorator(CovariantDescriptor):
"""
A marker used to parametrize a function by a covariant parameter with the
values returned by a fork method.
The decorator must be subclassed with the appropriate class variables
before initialization.
Attributes:
marker_name: Name of the marker.
description: Description of the marker.
fork_attribute_name: Name of the method to call on the fork to
get the values.
marker_parameter_names: Names of the parameters to be parametrized
in the test function.
indirect: Whether the parameters should be passed through fixtures
(indirect parametrization).
"""
marker_name: ClassVar[str]
description: ClassVar[str]
fork_attribute_name: ClassVar[str]
marker_parameter_names: ClassVar[List[str]]
indirect: ClassVar[bool]
def __init__(self, metafunc: Metafunc):
"""
Initialize the covariant decorator.
The decorator must already be subclassed with the appropriate class
variables before initialization.
Args:
metafunc: The metafunc object that pytest uses when generating
tests.
"""
self.metafunc = metafunc
m = metafunc.definition.iter_markers(self.marker_name)
if m is None:
return
marker_list = list(m)
assert len(marker_list) <= 1, f"Multiple markers {self.marker_name} found"
if len(marker_list) == 0:
return
marker = marker_list[0]
assert marker is not None
assert len(marker.args) == 0, "Only keyword arguments are supported"
kwargs = dict(marker.kwargs)
selector = kwargs.pop("selector", lambda _: True)
assert isinstance(selector, FunctionType), "selector must be a function"
marks = kwargs.pop("marks", None)
if len(kwargs) > 0:
raise ValueError(f"Unknown arguments to {self.marker_name}: {kwargs}")
def fn(fork: Fork) -> List[Any]:
return getattr(fork, self.fork_attribute_name)(block_number=0, timestamp=0)
super().__init__(
argnames=self.marker_parameter_names,
fn=fn,
selector=selector,
marks=marks,
)
def covariant_decorator(
*,
marker_name: str,
description: str,
fork_attribute_name: str,
argnames: List[str],
indirect: bool = False,
) -> Type[CovariantDecorator]:
"""Generate a new covariant decorator subclass."""
return type(
marker_name,
(CovariantDecorator,),
{
"marker_name": marker_name,
"description": description,
"fork_attribute_name": fork_attribute_name,
"marker_parameter_names": argnames,
"indirect": indirect,
},
)
fork_covariant_decorators: List[Type[CovariantDecorator]] = [
covariant_decorator(
marker_name="with_all_tx_types",
description="marks a test to be parametrized for all tx types at parameter named tx_type"
" of type int",
fork_attribute_name="tx_types",
argnames=["tx_type"],
),
covariant_decorator(
marker_name="with_all_contract_creating_tx_types",
description="marks a test to be parametrized for all tx types that can create a contract"
" at parameter named tx_type of type int",
fork_attribute_name="contract_creating_tx_types",
argnames=["tx_type"],
),
covariant_decorator(
marker_name="with_all_typed_transactions",
description="marks a test to be parametrized with default typed transactions named "
"typed_transaction",
fork_attribute_name="tx_types",
argnames=["typed_transaction"],
# indirect means the values from `tx_types` will be passed to the
# `typed_transaction` fixture which will then be used in the test
indirect=True,
),
covariant_decorator(
marker_name="with_all_precompiles",
description="marks a test to be parametrized for all precompiles at parameter named"
" precompile of type int",
fork_attribute_name="precompiles",
argnames=["precompile"],
),
covariant_decorator(
marker_name="with_all_evm_code_types",
description="marks a test to be parametrized for all EVM code types at parameter named"
" `evm_code_type` of type `EVMCodeType`, such as `LEGACY` and `EOF_V1`",
fork_attribute_name="evm_code_types",
argnames=["evm_code_type"],
),
covariant_decorator(
marker_name="with_all_call_opcodes",
description="marks a test to be parametrized for all *CALL opcodes at parameter named"
" call_opcode, and also the appropriate EVM code type at parameter named evm_code_type",
fork_attribute_name="call_opcodes",
argnames=["call_opcode", "evm_code_type"],
),
covariant_decorator(
marker_name="with_all_create_opcodes",
description="marks a test to be parametrized for all *CREATE* opcodes at parameter named"
" create_opcode, and also the appropriate EVM code type at parameter named evm_code_type",
fork_attribute_name="create_opcodes",
argnames=["create_opcode", "evm_code_type"],
),
covariant_decorator(
marker_name="with_all_system_contracts",
description="marks a test to be parametrized for all system contracts at parameter named"
" system_contract of type int",
fork_attribute_name="system_contracts",
argnames=["system_contract"],
),
]
def pytest_configure(config: pytest.Config) -> None:
"""
Register the plugin's custom markers and process command-line options.
Custom marker registration:
https://docs.pytest.org/en/7.1.x/how-to/
writing_plugins.html# registering-custom-markers
"""
config.addinivalue_line(
"markers",
(
"valid_at_transition_to(fork, subsequent_forks: bool = False, "
"until: str | None = None): specifies a test case is only valid "
"at the specified fork transition boundaries"
),
)
config.addinivalue_line(
"markers",
"valid_from(fork): specifies from which fork a test case is valid",
)
config.addinivalue_line(
"markers",
"valid_until(fork): specifies until which fork a test case is valid",
)
config.addinivalue_line(
"markers",
"valid_at(fork): specifies at which fork a test case is valid",
)
config.addinivalue_line(
"markers",
(
"parametrize_by_fork(names, values_fn): parametrize a test case by fork using the "
"specified names and values returned by the function values_fn(fork)"
),
)
for d in fork_covariant_decorators:
config.addinivalue_line("markers", f"{d.marker_name}: {d.description}")
available_forks_help = textwrap.dedent(
f"""\
Available forks:
{", ".join(fork.name() for fork in ALL_FORKS)}
"""
)
available_forks_help += textwrap.dedent(
f"""\
Available transition forks:
{", ".join([fork.name() for fork in get_transition_forks()])}
"""
)
def get_fork_option(config: pytest.Config, option_name: str, parameter_name: str) -> Set[Fork]:
"""Post-process get option to allow for external fork conditions."""
config_str = config.getoption(option_name)
try:
return ForkSetAdapter.validate_python(config_str)
except InvalidForkError:
print(
f"Error: Unsupported fork provided to {parameter_name}:",
config_str,
"\n",
file=sys.stderr,
)
print(available_forks_help, file=sys.stderr)
pytest.exit("Invalid command-line options.", returncode=pytest.ExitCode.USAGE_ERROR)
single_fork = get_fork_option(config, "single_fork", "--fork")
forks_from = get_fork_option(config, "forks_from", "--from")
forks_until = get_fork_option(config, "forks_until", "--until")
show_fork_help = config.getoption("show_fork_help")
dev_forks_help = textwrap.dedent(
"To run tests for a fork under active development, it must be "
"specified explicitly via --until=FORK.\n"
"Tests are only ran for deployed mainnet forks by default, i.e., "
f"until {get_deployed_forks()[-1].name()}.\n"
)
if show_fork_help:
print(available_forks_help)
print(dev_forks_help)
pytest.exit("After displaying help.", returncode=0)
if single_fork and (forks_from or forks_until):
print(
"Error: --fork cannot be used in combination with --from or --until", file=sys.stderr
)
pytest.exit("Invalid command-line options.", returncode=pytest.ExitCode.USAGE_ERROR)
transition_forks = not getattr(config, "skip_transition_forks", False)
selected_fork_set = get_selected_fork_set(
single_fork=single_fork,
forks_from=forks_from,
forks_until=forks_until,
transition_forks=transition_forks,
)
if getattr(config, "single_fork_mode", False) and len(selected_fork_set) != 1:
pytest.exit(
f"""
Expected exactly one fork to be specified, got {len(selected_fork_set)}
({selected_fork_set}).
Make sure to specify exactly one fork using the --fork command line argument.
""",
returncode=pytest.ExitCode.USAGE_ERROR,
)
config.selected_fork_set = selected_fork_set # type: ignore
if not selected_fork_set:
print(
f"Error: --from {','.join(fork.name() for fork in forks_from)} "
f"--until {','.join(fork.name() for fork in forks_until)} "
"creates an empty fork range.",
file=sys.stderr,
)
pytest.exit(
"Command-line options produce empty fork range.",
returncode=pytest.ExitCode.USAGE_ERROR,
)
config.unsupported_forks: Set[Fork] = set() # type: ignore
t8n: TransitionTool | None = getattr(config, "t8n", None)
if t8n:
config.unsupported_forks = frozenset( # type: ignore
fork for fork in selected_fork_set if not t8n.is_fork_supported(fork)
)
logger.debug(f"List of unsupported forks: {list(config.unsupported_forks)}") # type: ignore
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: pytest.Config, start_path: Any) -> List[str]:
"""Pytest hook called to obtain the report header."""
del start_path
bold = "\033[1m"
warning = "\033[93m"
reset = "\033[39;49m"
header = [
(
bold
+ "Generating fixtures for: "
+ ", ".join([f.name() for f in sorted(config.selected_fork_set)]) # type: ignore[attr-defined]
+ reset
),
]
if all(fork.is_deployed() for fork in config.selected_fork_set): # type: ignore[attr-defined]
header += [
(
bold + warning + "Only generating fixtures with stable/deployed forks: "
"Specify an upcoming fork via --until=fork to "
"add forks under development." + reset
)
]
return header
@pytest.fixture(autouse=True)
def fork(request: pytest.FixtureRequest) -> None:
"""Parametrize test cases by fork."""
pass
@pytest.fixture(scope="session")
def session_fork(request: pytest.FixtureRequest) -> Fork | None:
"""
Session-wide fork object used if the plugin is configured in single-fork
mode.
"""
if hasattr(request.config, "single_fork_mode") and request.config.single_fork_mode:
return list(request.config.selected_fork_set)[0] # type: ignore
raise AssertionError(
"Plugin used `session_fork` fixture without the correct configuration (single_fork_mode)."
)
ALL_VALIDITY_MARKERS: Dict[str, "Type[ValidityMarker]"] = {}
MARKER_NAME_REGEX = re.compile(r"(?<!^)(?=[A-Z])")
@dataclass(kw_only=True)
class ValidityMarker(ABC):
"""
Abstract class to represent any fork validity marker.
Subclassing this class allows for the creation of new validity markers.
Instantiation must be done per test function, and the `process` method must
be called to process the fork arguments.
When subclassing, the following optional parameters can be set:
- marker_name: Name of the marker, if not set, the class name is
converted to underscore.
- mutually_exclusive: List of other marker types incompatible
with this one.
- flag: Whether the marker is a flag and should always be included.
"""
marker_name: ClassVar[str]
mutually_exclusive: ClassVar[List[Type["ValidityMarker"]]]
flag: ClassVar[bool]
mark: Mark | None
def __init_subclass__(
cls,
marker_name: str | None = None,
mutually_exclusive: List[Type["ValidityMarker"]] | None = None,
flag: bool = False,
**kwargs: Any,
) -> None:
"""Register the validity marker subclass."""
super().__init_subclass__(**kwargs)
if marker_name is None:
# Use the class name converted to underscore:
# https://stackoverflow.com/a/1176023
marker_name = MARKER_NAME_REGEX.sub("_", cls.__name__).lower()
cls.marker_name = marker_name
cls.mutually_exclusive = mutually_exclusive if mutually_exclusive else []
cls.flag = flag
if marker_name in ALL_VALIDITY_MARKERS:
raise ValueError(f"Duplicate validity marker class: {cls}")
ALL_VALIDITY_MARKERS[marker_name] = cls
def __post_init__(self) -> None:
"""Post-initialize the validity marker."""
if self.flag:
return
if self.mark is None:
raise Exception(f"Marker error '{self.marker_name}'")
if len(self.mark.args) == 0:
raise Exception(f"Missing fork argument with '{self.marker_name}' marker")
def process_fork_arguments(self, *fork_args: str) -> Set[Fork]:
"""Process the fork arguments."""
fork_set = ForkSetAdapter.validate_python(fork_args)
if len(fork_set) != len(fork_args):
raise Exception(f"Duplicate argument specified in '{self.marker_name}'")
return fork_set
@staticmethod
def get_all_validity_markers(markers: Iterator[pytest.Mark]) -> List["ValidityMarker"]:
"""Get all the validity markers applied to the test function."""
markers_dict: Dict[str, ValidityMarker] = {}
for marker in markers:
for marker_name in ALL_VALIDITY_MARKERS:
if marker.name == marker_name:
if marker_name in markers_dict:
raise Exception(f"Too many '{marker_name}' markers applied to test")
cls = ALL_VALIDITY_MARKERS[marker.name]
markers_dict[marker_name] = cls(mark=marker)
for cls in ALL_VALIDITY_MARKERS.values():
if cls.flag and cls.marker_name not in markers_dict:
markers_dict[cls.marker_name] = cls(mark=None)
for marker_name, validity_marker in markers_dict.items():
for incompatible_marker in validity_marker.mutually_exclusive:
if incompatible_marker.marker_name in markers_dict:
raise Exception(
f"The markers '{incompatible_marker.marker_name}' and "
f"'{marker_name}' can't be combined. "
)
return list(markers_dict.values())
@staticmethod
def get_test_fork_set(validity_markers: List["ValidityMarker"]) -> Set[Fork]:
"""
Get the set of forks where a test is valid from the validity markers
and filters.
"""
if not len(
[validity_marker for validity_marker in validity_markers if not validity_marker.flag]
):
# Limit to non-transition forks if no validity markers were applied
test_fork_set = set(ALL_FORKS)
else:
# Start with all forks and transitions if any validity markers were
# applied
test_fork_set = set(ALL_FORKS_WITH_TRANSITIONS)
for v in validity_markers:
# Apply the validity markers to the test function if applicable
test_fork_set = v.process(test_fork_set)
return test_fork_set
@staticmethod
def get_test_fork_set_from_markers(markers: Iterator[pytest.Mark]) -> Set[Fork]:
"""
Get the set of forks where a test is valid using the markers applied to
the test.
"""
return ValidityMarker.get_test_fork_set(ValidityMarker.get_all_validity_markers(markers))
@staticmethod
def get_test_fork_set_from_metafunc(
metafunc: Metafunc,
) -> Set[Fork]:
"""
Get the set of forks where a test is valid using its pytest
meta-function.
"""
return ValidityMarker.get_test_fork_set_from_markers(metafunc.definition.iter_markers())
@staticmethod
def is_validity_or_filter_marker(name: str) -> bool:
"""Check if a marker is a validity or filter marker."""
return name in ALL_VALIDITY_MARKERS
def process(self, forks: Set[Fork]) -> Set[Fork]:
"""Process the fork arguments."""
if self.mark is None:
fork_set = self._process_with_marker_args()
else:
fork_set = self._process_with_marker_args(*self.mark.args, **self.mark.kwargs)
if self.flag:
return forks - fork_set
return forks & fork_set
@abstractmethod
def _process_with_marker_args(self, *args: Any, **kwargs: Any) -> Set[Fork]:
"""
Process the fork arguments as specified for the marker.
Method must be implemented by the subclass.
If the validity marker is of flag type, the returned forks will be
subtracted from the fork set, otherwise the returned forks will be
intersected with the current set.
"""
pass
class ValidFrom(ValidityMarker):
"""
Marker used to specify the fork from which the test is valid. The test will
not be filled for forks before the specified fork.
```python
import pytest
from ethereum_test_tools import Alloc, StateTestFiller
@pytest.mark.valid_from("London")
def test_something_only_valid_after_london(
state_test: StateTestFiller,
pre: Alloc
):
pass
```
In this example, the test will only be filled for the London fork and
after, e.g. London, Paris, Shanghai, Cancun, etc.
"""
def _process_with_marker_args(self, *fork_args: str) -> Set[Fork]:
"""Process the fork arguments."""
forks: Set[Fork] = self.process_fork_arguments(*fork_args)
resulting_set: Set[Fork] = set()
for fork in forks:
resulting_set |= {f for f in ALL_FORKS if f >= fork}
return resulting_set
class ValidUntil(ValidityMarker):
"""
Marker to specify the fork until which the test is valid. The test will not
be filled for forks after the specified fork.
```python
import pytest
from ethereum_test_tools import Alloc, StateTestFiller
@pytest.mark.valid_until("London")
def test_something_only_valid_until_london(
state_test: StateTestFiller,
pre: Alloc
):
pass
```
In this example, the test will only be filled for the London fork and
before, e.g. London, Berlin, Istanbul, etc.
"""
def _process_with_marker_args(self, *fork_args: str) -> Set[Fork]:
"""Process the fork arguments."""
forks: Set[Fork] = self.process_fork_arguments(*fork_args)
resulting_set: Set[Fork] = set()
for fork in forks:
resulting_set |= {f for f in ALL_FORKS if f <= fork}
return resulting_set
class ValidAt(ValidityMarker):
"""
Marker to specify each fork individually for which the test is valid.
```python
import pytest
from ethereum_test_tools import Alloc, StateTestFiller
@pytest.mark.valid_at("London", "Cancun")
def test_something_only_valid_at_london_and_cancun(
state_test: StateTestFiller,
pre: Alloc
):
pass
```
In this example, the test will only be filled for the London and Cancun
forks.
"""
def _process_with_marker_args(self, *fork_args: str) -> Set[Fork]:
"""Process the fork arguments."""
return self.process_fork_arguments(*fork_args)
class ValidAtTransitionTo(ValidityMarker, mutually_exclusive=[ValidAt, ValidFrom, ValidUntil]):
"""
Marker to specify that a test is only meant to be filled at the transition
to the specified fork.
The test usually starts at the fork prior to the specified fork at genesis
and at block 5 (for pre-merge forks) or at timestamp 15,000 (for post-merge
forks) the fork transition occurs.
```python
import pytest
from ethereum_test_tools import Alloc, BlockchainTestFiller
@pytest.mark.valid_at_transition_to("London")
def test_something_that_happens_during_the_fork_transition_to_london(
blockchain_test: BlockchainTestFiller,
pre: Alloc
):
pass
```
In this example, the test will only be filled for the fork that transitions
to London at block number 5, `BerlinToLondonAt5`, and no other forks.
To see or add a new transition fork, see the
`ethereum_test_forks.forks.transition` module.
Note that the test uses a `BlockchainTestFiller` fixture instead of a
`StateTestFiller`, as the transition forks are used to test changes
throughout the blockchain progression, and not just the state change of a
single transaction.
This marker also accepts the following keyword arguments:
- `subsequent_transitions`: Force the test to also fill for subsequent fork
transitions.
- `until`: Implies `subsequent_transitions` and puts a limit
on which transition fork will the test filling will be limited to.
For example:
```python
@pytest.mark.valid_at_transition_to("Cancun", subsequent_transitions=True)
```
produces tests on `ShanghaiToCancunAtTime15k` and
`CancunToPragueAtTime15k`, and any transition fork after that.
And:
```python
@pytest.mark.valid_at_transition_to("Cancun",
subsequent_transitions=True, until="Prague")
```
produces tests on `ShanghaiToCancunAtTime15k` and
`CancunToPragueAtTime15k`, but no forks after Prague.
"""
def _process_with_marker_args(
self, *fork_args: str, subsequent_forks: bool = False, until: str | None = None
) -> Set[Fork]:
"""Process the fork arguments."""
forks: Set[Fork] = self.process_fork_arguments(*fork_args)
until_forks: Set[Fork] | None = (
None if until is None else self.process_fork_arguments(until)
)
if len(forks) == 0:
raise Exception("Missing fork argument with 'valid_at_transition_to' marker.")
if len(forks) > 1:
raise Exception("Too many forks specified to 'valid_at_transition_to' marker.")
resulting_set: Set[Fork] = set()
for fork in forks:
resulting_set |= transition_fork_to(fork)
if subsequent_forks:
for transition_forks in (transition_fork_to(f) for f in ALL_FORKS if f > fork):
for transition_fork in transition_forks:
if transition_fork and (
until_forks is None
or any(transition_fork <= until_fork for until_fork in until_forks)
):
resulting_set.add(transition_fork)
return resulting_set
class ValidForBPOForks(ValidityMarker, marker_name="valid_for_bpo_forks", flag=True):
"""
Marker to specify that a test is valid for BPO forks.
```python
import pytest
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/__init__.py | src/pytest_plugins/forks/__init__.py | """
A pytest plugin to configure the forks in the test session. It parametrizes
tests based on the user-provided fork range the tests' specified validity
markers.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/tests/test_forks.py | src/pytest_plugins/forks/tests/test_forks.py | """Test the forks plugin."""
import pytest
from ethereum_test_fixtures import LabeledFixtureFormat
from ethereum_test_forks import ArrowGlacier, Fork, forks_from_until, get_deployed_forks, get_forks
from ethereum_test_tools import StateTest
@pytest.fixture
def fork_map() -> dict[str, Fork]:
"""Lookup fork.name() : fork class."""
return {fork.name(): fork for fork in get_forks()}
def test_no_options_no_validity_marker(pytester: pytest.Pytester) -> None:
"""
Test test parametrization with:
- no fork command-line options,
- no fork validity marker.
"""
pytester.makepyfile(
f"""
import pytest
def test_all_forks({StateTest.pytest_parameter_name()}):
pass
"""
)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest("-c", "pytest-fill.ini", "-v")
all_forks = get_deployed_forks()
forks_under_test = forks_from_until(all_forks[0], all_forks[-1])
expected_skipped = 2 # eels doesn't support Constantinople
expected_passed = (
len(forks_under_test) * len(StateTest.supported_fixture_formats) - expected_skipped
)
stdout = "\n".join(result.stdout.lines)
for test_fork in forks_under_test:
for fixture_format in StateTest.supported_fixture_formats:
if isinstance(fixture_format, LabeledFixtureFormat):
fixture_format_label = fixture_format.label
fixture_format = fixture_format.format
else:
fixture_format_label = fixture_format.format_name.lower()
if (
not fixture_format.supports_fork(test_fork)
or "blockchain_test_engine_x" in fixture_format_label
):
expected_passed -= 1
assert f":test_all_forks[fork_{test_fork}-{fixture_format_label}]" not in stdout
continue
assert f":test_all_forks[fork_{test_fork}-{fixture_format_label}]" in stdout
result.assert_outcomes(
passed=expected_passed,
failed=0,
skipped=expected_skipped,
errors=0,
)
@pytest.mark.parametrize("fork", ["London", "Paris"])
def test_from_london_option_no_validity_marker(
pytester: pytest.Pytester, fork_map: dict[str, Fork], fork: str
) -> None:
"""
Test test parametrization with:
- --from London command-line option,
- no until command-line option,
- no fork validity marker.
"""
pytester.makepyfile(
f"""
import pytest
def test_all_forks({StateTest.pytest_parameter_name()}):
pass
"""
)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest("-c", "pytest-fill.ini", "-v", "--from", fork)
all_forks = get_deployed_forks()
forks_under_test = forks_from_until(fork_map[fork], all_forks[-1])
expected_passed = len(forks_under_test) * len(StateTest.supported_fixture_formats)
stdout = "\n".join(result.stdout.lines)
for test_fork in forks_under_test:
for fixture_format in StateTest.supported_fixture_formats:
if isinstance(fixture_format, LabeledFixtureFormat):
fixture_format_label = fixture_format.label
fixture_format = fixture_format.format
else:
fixture_format_label = fixture_format.format_name.lower()
if (
not fixture_format.supports_fork(test_fork)
or "blockchain_test_engine_x" in fixture_format_label
):
expected_passed -= 1
assert f":test_all_forks[fork_{test_fork}-{fixture_format_label}]" not in stdout
continue
assert f":test_all_forks[fork_{test_fork}-{fixture_format_label}]" in stdout
result.assert_outcomes(
passed=expected_passed,
failed=0,
skipped=0,
errors=0,
)
def test_from_london_until_shanghai_option_no_validity_marker(
pytester: pytest.Pytester, fork_map: dict[str, Fork]
) -> None:
"""
Test test parametrization with:
- --from London command-line option,
- --until Shanghai command-line option,
- no fork validity marker.
"""
pytester.makepyfile(
f"""
import pytest
def test_all_forks({StateTest.pytest_parameter_name()}):
pass
"""
)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest(
"-c", "pytest-fill.ini", "-v", "--from", "London", "--until", "shanghai"
)
forks_under_test = forks_from_until(fork_map["London"], fork_map["Shanghai"])
expected_passed = len(forks_under_test) * len(StateTest.supported_fixture_formats)
stdout = "\n".join(result.stdout.lines)
if ArrowGlacier in forks_under_test:
forks_under_test.remove(ArrowGlacier)
expected_passed -= len(StateTest.supported_fixture_formats)
for test_fork in forks_under_test:
for fixture_format in StateTest.supported_fixture_formats:
if isinstance(fixture_format, LabeledFixtureFormat):
fixture_format_label = fixture_format.label
fixture_format = fixture_format.format
else:
fixture_format_label = fixture_format.format_name.lower()
if (
not fixture_format.supports_fork(test_fork)
or "blockchain_test_engine_x" in fixture_format_label
):
expected_passed -= 1
assert f":test_all_forks[fork_{test_fork}-{fixture_format_label}]" not in stdout
continue
assert f":test_all_forks[fork_{test_fork}-{fixture_format_label}]" in stdout
result.assert_outcomes(
passed=expected_passed,
failed=0,
skipped=0,
errors=0,
)
def test_from_paris_until_paris_option_no_validity_marker(
pytester: pytest.Pytester, fork_map: dict[str, Fork]
) -> None:
"""
Test test parametrization with:
- --from Paris command-line option,
- --until Paris command-line option,
- no fork validity marker.
"""
pytester.makepyfile(
f"""
import pytest
def test_all_forks({StateTest.pytest_parameter_name()}):
pass
"""
)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest(
"-c", "pytest-fill.ini", "-v", "--from", "paris", "--until", "paris"
)
forks_under_test = forks_from_until(fork_map["Paris"], fork_map["Paris"])
expected_passed = len(forks_under_test) * len(StateTest.supported_fixture_formats)
stdout: str = "\n".join(result.stdout.lines)
assert len(stdout) > 0, "stdout is empty string"
for test_fork in forks_under_test:
for fixture_format in StateTest.supported_fixture_formats:
if isinstance(fixture_format, LabeledFixtureFormat):
fixture_format_label = fixture_format.label
fixture_format = fixture_format.format
else:
fixture_format_label = fixture_format.format_name.lower()
if "blockchain_test_engine_x" in fixture_format_label:
expected_passed -= 1
assert f":test_all_forks[fork_{test_fork}-{fixture_format_label}]" not in stdout
continue
assert f":test_all_forks[fork_{test_fork}-{fixture_format_label}]" in stdout
result.assert_outcomes(
passed=expected_passed,
failed=0,
skipped=0,
errors=0,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/tests/test_covariant_markers.py | src/pytest_plugins/forks/tests/test_covariant_markers.py | """Test fork covariant markers and their effect on test parametrization."""
import pytest
@pytest.mark.parametrize(
"test_function,outcomes,error_string",
[
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types()
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
pass
""",
{"passed": 3, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_tx_types",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types(selector=lambda tx_type: tx_type != 0)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
pass
""",
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_tx_types_with_selector",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types(
marks=lambda tx_type: pytest.mark.skip("incompatible") if tx_type == 1 else None,
)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
assert tx_type != 1
""",
{"passed": 2, "xpassed": 0, "failed": 0, "skipped": 1, "errors": 0},
None,
id="with_all_tx_types_with_marks_lambda",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types(marks=pytest.mark.skip("incompatible"))
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
assert False
""",
{"passed": 0, "xpassed": 0, "failed": 0, "skipped": 3, "errors": 0},
None,
id="with_all_tx_types_with_marks_lambda",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types(marks=[pytest.mark.skip("incompatible")])
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
assert False
""",
{"passed": 0, "xpassed": 0, "failed": 0, "skipped": 3, "errors": 0},
None,
id="with_all_tx_types_with_marks_lambda",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types(
marks=(
lambda tx_type:
[pytest.mark.xfail, pytest.mark.slow]
if tx_type == 1 else None
),
)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(request, state_test, tx_type):
mark_names = [mark.name for mark in request.node.iter_markers()]
assert "state_test" in mark_names
if tx_type == 1:
assert "slow" in mark_names
""",
{"passed": 2, "xpassed": 1, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_tx_types_with_marks_lambda_multiple_marks",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_contract_creating_tx_types()
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
pass
""",
{"passed": 3, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_contract_creating_tx_types",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_contract_creating_tx_types()
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
pass
""",
{"passed": 3, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_contract_creating_tx_types",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_precompiles()
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, precompile):
pass
""",
{"passed": 10, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_precompiles",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_evm_code_types()
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, evm_code_type):
pass
""",
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_evm_code_types",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_call_opcodes()
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, call_opcode):
pass
""",
{"passed": 4, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_call_opcodes",
),
pytest.param(
"""
import pytest
from ethereum_test_tools import EVMCodeType
@pytest.mark.with_all_call_opcodes(
selector=(lambda _, evm_code_type: evm_code_type == EVMCodeType.LEGACY)
)
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, call_opcode):
pass
""",
{"passed": 4, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_call_opcodes_with_selector_for_evm_code_type",
),
pytest.param(
"""
import pytest
from ethereum_test_tools import Opcodes as Op
@pytest.mark.with_all_call_opcodes(selector=lambda call_opcode: call_opcode == Op.CALL)
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, call_opcode):
pass
""",
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_call_opcodes_with_selector",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_create_opcodes()
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, create_opcode):
pass
""",
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_create_opcodes",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_call_opcodes()
@pytest.mark.with_all_precompiles()
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, call_opcode, precompile):
pass
""",
{"passed": 4 * 10, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_call_opcodes_and_precompiles",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_call_opcodes()
@pytest.mark.with_all_create_opcodes()
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, call_opcode, create_opcode):
pass
""",
{"passed": 2 * 4, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_call_opcodes_and_create_opcodes",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_system_contracts()
@pytest.mark.valid_from("Cancun")
@pytest.mark.valid_until("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, system_contract):
pass
""",
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_system_contracts",
),
pytest.param(
"""
import pytest
from ethereum_test_tools import Transaction
@pytest.mark.with_all_typed_transactions
@pytest.mark.valid_from("Berlin")
@pytest.mark.valid_until("Berlin")
@pytest.mark.state_test_only
def test_case(state_test, typed_transaction):
assert isinstance(typed_transaction, Transaction)
assert typed_transaction.ty in [0, 1] # Berlin supports types 0 and 1
""",
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_typed_transactions_berlin",
),
pytest.param(
"""
import pytest
from ethereum_test_tools import Transaction
@pytest.mark.with_all_typed_transactions()
@pytest.mark.valid_from("London")
@pytest.mark.valid_until("London")
@pytest.mark.state_test_only
def test_case(state_test, typed_transaction, pre):
assert isinstance(typed_transaction, Transaction)
assert typed_transaction.ty in [0, 1, 2] # London supports types 0, 1, 2
""",
{"passed": 3, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_typed_transactions_london",
),
pytest.param(
"""
import pytest
from ethereum_test_tools import Transaction
from ethereum_test_base_types import AccessList
# Override the type 3 transaction fixture
@pytest.fixture
def type_3_default_transaction(pre):
sender = pre.fund_eoa()
return Transaction(
ty=3,
sender=sender,
max_fee_per_gas=10**10,
max_priority_fee_per_gas=10**9,
max_fee_per_blob_gas=10**8,
gas_limit=300_000,
data=b"\\xFF" * 50,
access_list=[
AccessList(address=0x1111, storage_keys=[10, 20]),
],
blob_versioned_hashes=[
0x0111111111111111111111111111111111111111111111111111111111111111,
],
)
@pytest.mark.with_all_typed_transactions()
@pytest.mark.valid_at("Cancun")
@pytest.mark.state_test_only
def test_case(state_test, typed_transaction, pre):
assert isinstance(typed_transaction, Transaction)
if typed_transaction.ty == 3:
# Verify our override worked
assert typed_transaction.data == b"\\xFF" * 50
assert len(typed_transaction.blob_versioned_hashes) == 1
""",
{"passed": 4, "failed": 0, "skipped": 0, "errors": 0},
None,
id="with_all_typed_transactions_with_override",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types(invalid_parameter="invalid")
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
pass
""",
{"passed": 0, "failed": 0, "skipped": 0, "errors": 1},
"Unknown arguments to with_all_tx_types",
id="invalid_covariant_marker_parameter",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types(selector=None)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
pass
""",
{"passed": 0, "failed": 0, "skipped": 0, "errors": 1},
"selector must be a function",
id="invalid_selector",
),
pytest.param(
"""
import pytest
@pytest.mark.with_all_tx_types(lambda tx_type: tx_type != 0)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Paris")
@pytest.mark.state_test_only
def test_case(state_test, tx_type):
pass
""",
{"passed": 0, "failed": 0, "skipped": 0, "errors": 1},
"Only keyword arguments are supported",
id="selector_as_positional_argument",
),
pytest.param(
"""
import pytest
def covariant_function(fork):
return [1, 2] if fork.name() == "Paris" else [3, 4, 5]
@pytest.mark.parametrize_by_fork(
argnames=["test_parameter"],
fn=covariant_function,
)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Shanghai")
@pytest.mark.state_test_only
def test_case(state_test, test_parameter):
pass
""",
{"passed": 5, "failed": 0, "skipped": 0, "errors": 0},
None,
id="custom_covariant_marker",
),
pytest.param(
"""
import pytest
def covariant_function(fork):
return [[1, 2], [3, 4]] if fork.name() == "Paris" else [[4, 5], [5, 6], [6, 7]]
@pytest.mark.parametrize_by_fork("test_parameter,test_parameter_2", covariant_function)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Shanghai")
@pytest.mark.state_test_only
def test_case(state_test, test_parameter, test_parameter_2):
pass
""",
{"passed": 5, "failed": 0, "skipped": 0, "errors": 0},
None,
id="multi_parameter_custom_covariant_marker",
),
pytest.param(
"""
import pytest
def covariant_function(fork):
return [
pytest.param(1, id="first_value"),
2,
] if fork.name() == "Paris" else [
pytest.param(3, id="third_value"),
4,
5,
]
@pytest.mark.parametrize_by_fork("test_parameter",covariant_function)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Shanghai")
@pytest.mark.state_test_only
def test_case(state_test, test_parameter):
pass
""",
{"passed": 5, "failed": 0, "skipped": 0, "errors": 0},
None,
id="custom_covariant_marker_pytest_param_id",
),
pytest.param(
"""
import pytest
def covariant_function(fork):
return [
pytest.param(1, 2, id="first_test"),
pytest.param(3, 4, id="second_test"),
] if fork.name() == "Paris" else [
pytest.param(4, 5, id="fourth_test"),
pytest.param(5, 6, id="fifth_test"),
pytest.param(6, 7, id="sixth_test"),
]
@pytest.mark.parametrize_by_fork(argnames=[
"test_parameter", "test_parameter_2"
], fn=covariant_function)
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Shanghai")
@pytest.mark.state_test_only
def test_case(state_test, test_parameter, test_parameter_2):
pass
""",
{"passed": 5, "failed": 0, "skipped": 0, "errors": 0},
None,
id="multi_parameter_custom_covariant_marker_pytest_param_id",
),
],
)
def test_fork_covariant_markers(
pytester: pytest.Pytester, test_function: str, outcomes: dict, error_string: str | None
) -> None:
"""
Test fork covariant markers in an isolated test session, i.e., in
a `fill` execution.
In the case of an error, check that the expected error string is in the
console output.
"""
pytester.makepyfile(test_function)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest("-c", "pytest-fill.ini")
result.assert_outcomes(**outcomes)
if outcomes["errors"]:
assert error_string is not None
assert error_string in "\n".join(result.stdout.lines)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/tests/test_bad_command_line_options.py | src/pytest_plugins/forks/tests/test_bad_command_line_options.py | """
Test the correct error is produced with bad/invalid command-line arguments.
"""
import pytest
invalid_cli_option_test_cases = (
(
"from_nonexistent_fork",
(
("--from", "Marge"), # codespell:ignore marge
"Unsupported fork provided to --from: Marge", # codespell:ignore marge
),
),
(
"until_nonexistent_fork",
(
("--until", "Shangbye"),
"Unsupported fork provided to --until: Shangbye",
),
),
(
"fork_nonexistent_fork",
(
("--fork", "Cantcun"),
"Unsupported fork provided to --fork: Cantcun",
),
),
(
"fork_and_from",
(
("--fork", "Frontier", "--from", "Frontier"),
"--fork cannot be used in combination with --from or --until",
),
),
(
"fork_and_until",
(
("--fork", "Frontier", "--until", "Frontier"),
"--fork cannot be used in combination with --from or --until",
),
),
(
"invalid_fork_range",
(
("--from", "Paris", "--until", "Frontier"),
"--from Paris --until Frontier creates an empty fork range",
),
),
)
@pytest.mark.parametrize(
"options, error_string",
[test_case for _, test_case in invalid_cli_option_test_cases],
ids=[test_id for test_id, _ in invalid_cli_option_test_cases],
)
def test_bad_options(pytester: pytest.Pytester, options: tuple, error_string: str) -> None:
"""
Test that a test with an invalid command-line options:
- Creates an outcome with exactly one error.
- Triggers the expected error string in pytest's console output.
Each invalid marker/marker combination is tested with one test in its own
test session.
"""
pytester.makepyfile(
"""
def test_should_not_run(state_test):
assert 0
"""
)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest("-c", "pytest-fill.ini", "-v", *options)
assert result.ret == pytest.ExitCode.USAGE_ERROR
assert error_string in "\n".join(result.stderr.lines)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/tests/test_markers.py | src/pytest_plugins/forks/tests/test_markers.py | """Test fork markers and their effect on test parametrization."""
from typing import List
import pytest
from ethereum_clis import TransitionTool
def generate_test(**kwargs: str) -> str:
"""Generate a test function with the given fork markers."""
markers = [f"@pytest.mark.{key}({value})" for key, value in kwargs.items()]
marker_lines = "\n".join(markers)
return f"""
import pytest
{marker_lines}
@pytest.mark.state_test_only
def test_case(state_test):
pass
"""
@pytest.mark.parametrize(
"test_function,pytest_args,outcomes",
[
pytest.param(
generate_test(
valid_until='"Cancun"',
),
[],
{"passed": 10, "failed": 0, "skipped": 1, "errors": 0},
id="valid_until",
),
pytest.param(
generate_test(
valid_until='"Cancun"',
),
["--from=Berlin"],
{"passed": 5, "failed": 0, "skipped": 0, "errors": 0},
id="valid_until,--from",
),
pytest.param(
generate_test(
valid_from='"Paris"',
),
["--until=Prague"],
{"passed": 4, "failed": 0, "skipped": 0, "errors": 0},
id="valid_from",
),
pytest.param(
generate_test(
valid_from='"Paris"',
valid_until='"Cancun"',
),
[],
{"passed": 3, "failed": 0, "skipped": 0, "errors": 0},
id="valid_from_until",
),
pytest.param(
generate_test(
valid_from='"Paris"',
valid_until='"Cancun"',
),
["--until=Prague"],
{"passed": 3, "failed": 0, "skipped": 0, "errors": 0},
id="valid_from_until,--until=Prague",
),
pytest.param(
generate_test(
valid_from='"Paris"',
valid_until='"Cancun"',
),
["--until=Shanghai"],
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
id="valid_from_until,--until=Shanghai",
),
pytest.param(
generate_test(
valid_at_transition_to='"Shanghai"',
),
[],
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_to",
),
pytest.param(
generate_test(
valid_at_transition_to='"Shanghai"',
),
["--until=Prague"],
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_to,--until=Prague",
),
pytest.param(
generate_test(
valid_at_transition_to='"Shanghai"',
),
["--until=Berlin"],
{"passed": 0, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_to,--until=Berlin",
),
pytest.param(
generate_test(
valid_at_transition_to='"Paris", subsequent_forks=True',
),
["--until=Prague"],
{"passed": 3, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_to,subsequent_forks=True",
),
pytest.param(
generate_test(
valid_at_transition_to='"Paris", subsequent_forks=True, until="Cancun"',
),
["--until=Prague"],
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_to,subsequent_forks=True,until",
),
pytest.param(
generate_test(
valid_at_transition_to='"Cancun"',
),
["--fork=ShanghaiToCancunAtTime15k"],
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_to,--fork=transition_fork_only",
),
pytest.param(
generate_test(
valid_from='"Osaka"',
valid_until='"BPO1"',
),
["--until=BPO1"],
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
id="valid_until_bpo_fork_without_bpo_test_marker",
),
pytest.param(
generate_test(
valid_from='"Osaka"',
valid_until='"BPO1"',
valid_for_bpo_forks="",
),
["--until=BPO1"],
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
id="valid_until_bpo_fork_with_bpo_test_marker",
),
pytest.param(
generate_test(
valid_at_transition_to='"Osaka", subsequent_forks=True, until="BPO1"',
),
["--until=BPO1"],
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_without_bpo_test_marker",
),
pytest.param(
generate_test(
valid_at_transition_to='"Osaka", subsequent_forks=True, until="BPO1"',
valid_for_bpo_forks="",
),
["--until=BPO1"],
{"passed": 2, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_with_bpo_test_marker",
),
pytest.param(
generate_test(
valid_at_transition_to='"Cancun"',
),
["--fork=Cancun"],
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_to_with_exact_fork",
),
pytest.param(
generate_test(
valid_at_transition_to='"Cancun"',
),
["--from=Cancun", "--until=Prague"],
{"passed": 1, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_to_from_fork_until_later_fork",
),
pytest.param(
generate_test(
valid_at_transition_to='"BPO1"',
valid_for_bpo_forks="",
),
["--fork=Osaka"],
{"passed": 0, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_with_bpo_test_marker_fork_parent",
),
pytest.param(
generate_test(
valid_at_transition_to='"BPO1"',
valid_for_bpo_forks="",
),
["--from=Osaka", "--until=Osaka"],
{"passed": 0, "failed": 0, "skipped": 0, "errors": 0},
id="valid_at_transition_with_bpo_test_marker_from_parent",
),
],
)
def test_fork_markers(
pytester: pytest.Pytester,
test_function: str,
outcomes: dict,
pytest_args: List[str],
default_t8n: TransitionTool,
) -> None:
"""
Test fork markers in an isolated test session, i.e., in
a `fill` execution.
In the case of an error, check that the expected error string is in the
console output.
"""
pytester.makepyfile(test_function)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
assert default_t8n.server_url is not None
result = pytester.runpytest(
"-c",
"pytest-fill.ini",
"-v",
*pytest_args,
"--t8n-server-url",
default_t8n.server_url,
)
result.assert_outcomes(**outcomes)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/tests/test_bad_validity_markers.py | src/pytest_plugins/forks/tests/test_bad_validity_markers.py | """
Test the correct error is produced with bad/invalid validity markers.
"""
import pytest
invalid_merge_marker = "Marge" # codespell:ignore marge
invalid_validity_marker_test_cases = (
(
"too_many_valid_from_markers",
(
"""
import pytest
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_from("Paris")
def test_case(state_test):
assert 0
""",
"Too many 'valid_from' markers applied to test",
),
),
(
"too_many_valid_until_markers",
(
"""
import pytest
@pytest.mark.valid_until("Paris")
@pytest.mark.valid_until("Paris")
def test_case(state_test):
assert 0
""",
"Too many 'valid_until' markers applied to test",
),
),
(
"too_many_valid_at_transition_to_markers",
(
"""
import pytest
@pytest.mark.valid_at_transition_to("Paris")
@pytest.mark.valid_at_transition_to("Paris")
def test_case(state_test):
assert 0
""",
"Too many 'valid_at_transition_to' markers applied to test",
),
),
(
"valid_from_no_args",
(
"""
import pytest
@pytest.mark.valid_from()
def test_case(state_test):
assert 0
""",
"Missing fork argument with 'valid_from' marker",
),
),
(
"valid_until_no_args",
(
"""
import pytest
@pytest.mark.valid_until()
def test_case(state_test):
assert 0
""",
"Missing fork argument with 'valid_until' marker",
),
),
(
"valid_at_transition_to_no_args",
(
"""
import pytest
@pytest.mark.valid_at_transition_to()
def test_case(state_test):
assert 0
""",
"Missing fork argument with 'valid_at_transition_to' marker",
),
),
(
"valid_from_duplicate_arg",
(
"""
import pytest
@pytest.mark.valid_from("Paris", "Paris")
def test_case(state_test):
assert 0
""",
"Duplicate argument specified in 'valid_from'",
),
),
(
"valid_until_duplicate_arg",
(
"""
import pytest
@pytest.mark.valid_until("Paris", "Paris")
def test_case(state_test):
assert 0
""",
"Duplicate argument specified in 'valid_until'",
),
),
(
"valid_at_transition_duplicate_arg",
(
"""
import pytest
@pytest.mark.valid_at_transition_to("Paris", "Paris")
def test_case(state_test):
assert 0
""",
"Duplicate argument specified in 'valid_at_transition_to'",
),
),
(
"valid_from_nonexistent_fork",
(
f"""
import pytest
@pytest.mark.valid_from("{invalid_merge_marker}")
def test_case(state_test):
assert 0
""",
f"Invalid fork '{invalid_merge_marker}'",
),
),
(
"valid_until_nonexistent_fork",
(
"""
import pytest
@pytest.mark.valid_until("Shangbye")
def test_case(state_test):
assert 0
""",
"Invalid fork 'Shangbye'",
),
),
(
"valid_at_transition_to_nonexistent_fork",
(
"""
import pytest
@pytest.mark.valid_at_transition_to("Cantcun")
def test_case(state_test):
assert 0
""",
"Invalid fork 'Cantcun'",
),
),
(
"valid_at_transition_to_until_nonexistent_fork",
(
"""
import pytest
@pytest.mark.valid_at_transition_to("Shanghai", until="Cantcun")
def test_case(state_test):
assert 0
""",
"Invalid fork 'Cantcun'",
),
),
(
"valid_at_transition_to_and_valid_from",
(
"""
import pytest
@pytest.mark.valid_at_transition_to("Cancun")
@pytest.mark.valid_from("Paris")
def test_case(state_test):
assert 0
""",
"The markers 'valid_from' and 'valid_at_transition_to' can't be combined",
),
),
(
"valid_at_transition_to_and_valid_until",
(
"""
import pytest
@pytest.mark.valid_at_transition_to("Shanghai")
@pytest.mark.valid_until("Cancun")
def test_case(state_test):
assert 0
""",
"The markers 'valid_until' and 'valid_at_transition_to' can't be combined",
),
),
(
"invalid_validity_range",
(
"""
import pytest
@pytest.mark.valid_from("Paris")
@pytest.mark.valid_until("Frontier")
def test_case(state_test):
assert 0
""",
"fork validity markers generate an empty fork range",
),
),
)
@pytest.mark.parametrize(
"test_function, error_string",
[test_case for _, test_case in invalid_validity_marker_test_cases],
ids=[test_id for test_id, _ in invalid_validity_marker_test_cases],
)
def test_invalid_validity_markers(
pytester: pytest.Pytester, error_string: str, test_function: str
) -> None:
"""
Test that a test with an invalid marker cases:
- Creates an outcome with exactly one error.
- Triggers the expected error string in pytest's console output.
Each invalid marker/marker combination is tested with one test in its own
test session.
"""
pytester.makepyfile(test_function)
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
result = pytester.runpytest("-c", "pytest-fill.ini")
result.assert_outcomes(
passed=0,
failed=0,
skipped=0,
errors=1,
)
assert error_string in "\n".join(result.stdout.lines)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/tests/test_fork_parametrizer_types.py | src/pytest_plugins/forks/tests/test_fork_parametrizer_types.py | """Test the types used to parametrize forks."""
from typing import List
import pytest
from _pytest.mark.structures import ParameterSet
from ethereum_test_forks import Frontier
from ..forks import (
ForkCovariantParameter,
ForkParametrizer,
parameters_from_fork_parametrizer_list,
)
@pytest.mark.parametrize(
"fork_parametrizers,expected_names,expected_parameter_sets",
[
pytest.param(
[ForkParametrizer(fork=Frontier)],
["fork"],
[pytest.param(Frontier)],
id="only_fork",
),
pytest.param(
[
ForkParametrizer(
fork=Frontier,
fork_covariant_parameters=[
ForkCovariantParameter(names=["some_value"], values=[pytest.param(1)])
],
)
],
["fork", "some_value"],
[pytest.param(Frontier, 1)],
id="fork_with_single_covariant_parameter",
),
pytest.param(
[
ForkParametrizer(
fork=Frontier,
fork_covariant_parameters=[
ForkCovariantParameter(
names=["some_value"],
values=[pytest.param(1), pytest.param(2)],
)
],
)
],
["fork", "some_value"],
[pytest.param(Frontier, 1), pytest.param(Frontier, 2)],
id="fork_with_single_covariant_parameter_multiple_values",
),
pytest.param(
[
ForkParametrizer(
fork=Frontier,
fork_covariant_parameters=[
ForkCovariantParameter(
names=["some_value"],
values=[
pytest.param(1, marks=[pytest.mark.some_mark]),
pytest.param(2),
],
)
],
)
],
["fork", "some_value"],
[pytest.param(Frontier, 1, marks=[pytest.mark.some_mark]), pytest.param(Frontier, 2)],
id="fork_with_single_covariant_parameter_multiple_values_one_mark",
),
pytest.param(
[
ForkParametrizer(
fork=Frontier,
fork_covariant_parameters=[
ForkCovariantParameter(names=["some_value"], values=[pytest.param(1)]),
ForkCovariantParameter(names=["another_value"], values=[pytest.param(2)]),
],
)
],
["fork", "some_value", "another_value"],
[pytest.param(Frontier, 1, 2)],
id="fork_with_multiple_covariant_parameters",
),
pytest.param(
[
ForkParametrizer(
fork=Frontier,
fork_covariant_parameters=[
ForkCovariantParameter(names=["some_value"], values=[pytest.param(1)]),
ForkCovariantParameter(
names=["another_value"],
values=[pytest.param(2), pytest.param(3)],
),
],
)
],
["fork", "some_value", "another_value"],
[pytest.param(Frontier, 1, 2), pytest.param(Frontier, 1, 3)],
id="fork_with_multiple_covariant_parameters_multiple_values",
),
pytest.param(
[
ForkParametrizer(
fork=Frontier,
fork_covariant_parameters=[
ForkCovariantParameter(
names=["some_value", "another_value"],
values=[
pytest.param(1, "a"),
pytest.param(2, "b"),
],
)
],
)
],
["fork", "some_value", "another_value"],
[pytest.param(Frontier, 1, "a"), pytest.param(Frontier, 2, "b")],
id="fork_with_single_multi_value_covariant_parameter_multiple_values",
),
pytest.param(
[
ForkParametrizer(
fork=Frontier,
fork_covariant_parameters=[
ForkCovariantParameter(
names=["some_value", "another_value"],
values=[
pytest.param(1, "a"),
pytest.param(2, "b"),
],
),
ForkCovariantParameter(
names=["yet_another_value", "last_value"],
values=[
pytest.param(3, "x"),
pytest.param(4, "y"),
],
),
],
)
],
["fork", "some_value", "another_value", "yet_another_value", "last_value"],
[
pytest.param(Frontier, 1, "a", 3, "x"),
pytest.param(Frontier, 1, "a", 4, "y"),
pytest.param(Frontier, 2, "b", 3, "x"),
pytest.param(Frontier, 2, "b", 4, "y"),
],
id="fork_with_multiple_multi_value_covariant_parameter_multiple_values",
),
pytest.param(
[
ForkParametrizer(
fork=Frontier,
fork_covariant_parameters=[
ForkCovariantParameter(
names=["shared_value", "different_value_1"],
values=[
pytest.param(1, "a"),
pytest.param(2, "b"),
],
),
ForkCovariantParameter(
names=["shared_value", "different_value_2"],
values=[
pytest.param(1, "x"),
pytest.param(2, "y"),
],
),
],
)
],
["fork", "shared_value", "different_value_1", "different_value_2"],
[
pytest.param(Frontier, 1, "a", "x"),
pytest.param(Frontier, 2, "b", "y"),
],
id="fork_with_multiple_multi_value_covariant_parameter_shared_values",
),
],
)
def test_fork_parametrizer(
fork_parametrizers: List[ForkParametrizer],
expected_names: List[str],
expected_parameter_sets: List[ParameterSet],
) -> None:
"""
Test the fork parametrizer correctly parametrizes using the fork name.
"""
argnames, values = parameters_from_fork_parametrizer_list(fork_parametrizers)
assert argnames == expected_names
assert len(values) == len(expected_parameter_sets)
for i in range(len(values)):
assert len(values[i].values) == len(expected_parameter_sets[i].values)
for j in range(len(values[i].values)):
assert values[i].values[j] == expected_parameter_sets[i].values[j]
assert len(values[i].marks) == len(expected_parameter_sets[i].marks)
for j in range(len(values[i].marks)):
assert values[i].marks[j] == expected_parameter_sets[i].marks[j] # type: ignore
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/forks/tests/__init__.py | src/pytest_plugins/forks/tests/__init__.py | """Tests for the forks plugin."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/consume.py | src/pytest_plugins/consume/consume.py | """
A pytest plugin providing common functionality for consuming test fixtures.
"""
import re
import sys
import tarfile
from dataclasses import dataclass
from io import BytesIO
from pathlib import Path
from typing import Any, Generator, List, Optional, Tuple
from urllib.parse import urlparse
import platformdirs
import pytest
import requests
import rich
from cli.gen_index import generate_fixtures_index
from ethereum_test_fixtures import BaseFixture, FixtureFormat
from ethereum_test_fixtures.consume import IndexFile, TestCases
from ethereum_test_forks import get_forks, get_relative_fork_markers, get_transition_forks
from ethereum_test_tools.utility.versioning import get_current_commit_hash_or_tag
from .releases import ReleaseTag, get_release_page_url, get_release_url, is_release_url, is_url
CACHED_DOWNLOADS_DIRECTORY = (
Path(platformdirs.user_cache_dir("ethereum-execution-spec-tests")) / "cached_downloads"
)
def print_migration_warning(terminalreporter: Any = None) -> None:
"""Print migration warning about repository merge."""
lines = [
"",
"=" * 80,
"⚠️ IMPORTANT: Repository Migration in Progress - 'The Weld' ⚠️",
"=" * 80,
"",
"This repository is being merged into ethereum/execution-specs (EELS) during the",
"week of October 20-24, 2025.",
"",
"📅 Timeline:",
" • Week of Oct 13-17: Closing PRs, porting issues to EELS",
" • Week of Oct 20-24: Migration week - fixing CI and fixture building",
" • Oct 24 (ETA): Weld finalized - all development moves to EELS",
"",
"👉 What This Means:",
" • Test Contributors: After Oct 24, reopen draft PRs in EELS",
" • All future test development happens in EELS after completion",
" • Fixture releases continue as usual during transition",
"",
"For details: https://steel.ethereum.foundation/blog/2025-09-11_weld-announcement/",
"=" * 80,
"",
]
if terminalreporter:
for line in lines:
if "⚠️" in line or "IMPORTANT" in line:
terminalreporter.write_line(line, bold=True, yellow=True)
elif line.startswith("="):
terminalreporter.write_line(line, yellow=True)
else:
terminalreporter.write_line(line)
else:
for line in lines:
print(line)
def default_input() -> str:
"""
Directory (default) to consume generated test fixtures from. Defined as a
function to allow for easier testing.
"""
return "./fixtures"
def default_html_report_file_path() -> str:
"""
Filepath (default) to store the generated HTML test report. Defined as a
function to allow for easier testing.
"""
return ".meta/report_consume.html"
class FixtureDownloader:
"""Handles downloading and extracting fixture archives."""
def __init__(self, url: str, destination_folder: Path): # noqa: D107
self.url = url
self.destination_folder = destination_folder
self.parsed_url = urlparse(url)
self.archive_name = self.strip_archive_extension(Path(self.parsed_url.path).name)
def download_and_extract(self) -> Tuple[bool, Path]:
"""
Download the URL and extract it locally if it hasn't already been
downloaded.
"""
if self.destination_folder.exists():
return True, self.detect_extracted_directory()
return False, self.fetch_and_extract()
@staticmethod
def strip_archive_extension(filename: str) -> str:
"""Remove .tar.gz or .tgz extensions from filename."""
return filename.removesuffix(".tar.gz").removesuffix(".tgz")
@staticmethod
def get_cache_path(url: str, cache_folder: Path) -> Path:
"""Get the appropriate cache path for a given URL."""
parsed_url = urlparse(url)
archive_name = FixtureDownloader.strip_archive_extension(Path(parsed_url.path).name)
if is_release_url(url):
version = Path(parsed_url.path).parts[-2]
parts = parsed_url.path.strip("/").split("/")
org_repo = (
f"{parts[0]}/{parts[1]}"
if parsed_url.netloc == "github.com" and len(parts) >= 2
else "other"
)
return cache_folder / org_repo / version / archive_name
return cache_folder / "other" / archive_name
def fetch_and_extract(self) -> Path:
"""Download and extract an archive from the given URL."""
self.destination_folder.mkdir(parents=True, exist_ok=True)
response = requests.get(self.url)
response.raise_for_status()
with tarfile.open(fileobj=BytesIO(response.content), mode="r:gz") as tar:
tar.extractall(path=self.destination_folder)
return self.detect_extracted_directory()
def detect_extracted_directory(self) -> Path:
"""
Detect a single top-level dir within the extracted archive, otherwise
return destination_folder.
""" # noqa: D200
extracted_dirs = [
d for d in self.destination_folder.iterdir() if d.is_dir() and d.name != ".meta"
]
return extracted_dirs[0] if len(extracted_dirs) == 1 else self.destination_folder
@dataclass
class FixturesSource:
"""Represents the source of test fixtures."""
input_option: str
path: Path
url: str = ""
release_page: str = ""
is_local: bool = True
is_stdin: bool = False
was_cached: bool = False
extract_to_local_path: bool = False
@classmethod
def from_input(
cls,
input_source: str,
cache_folder: Optional[Path] = None,
extract_to: Optional[Path] = None,
) -> "FixturesSource":
"""Determine the fixture source type and return an instance."""
if cache_folder is None:
cache_folder = CACHED_DOWNLOADS_DIRECTORY
if input_source == "stdin":
return cls(input_option=input_source, path=Path(), is_local=False, is_stdin=True)
if is_release_url(input_source):
return cls.from_release_url(input_source, cache_folder, extract_to)
if is_url(input_source):
return cls.from_url(input_source, cache_folder, extract_to)
if ReleaseTag.is_release_string(input_source):
return cls.from_release_spec(input_source, cache_folder, extract_to)
return cls.validate_local_path(Path(input_source))
@classmethod
def from_release_url(
cls, url: str, cache_folder: Optional[Path] = None, extract_to: Optional[Path] = None
) -> "FixturesSource":
"""Create a fixture source from a supported github repo release URL."""
if cache_folder is None:
cache_folder = CACHED_DOWNLOADS_DIRECTORY
destination_folder = extract_to or FixtureDownloader.get_cache_path(url, cache_folder)
downloader = FixtureDownloader(url, destination_folder)
# Skip cache check for extract_to (always download fresh)
if extract_to is not None:
was_cached = False
path = downloader.fetch_and_extract()
else:
was_cached, path = downloader.download_and_extract()
return cls(
input_option=url,
path=path,
url=url,
release_page="",
is_local=False,
was_cached=was_cached,
extract_to_local_path=extract_to is not None,
)
@classmethod
def from_url(
cls, url: str, cache_folder: Optional[Path] = None, extract_to: Optional[Path] = None
) -> "FixturesSource":
"""Create a fixture source from a direct URL."""
if cache_folder is None:
cache_folder = CACHED_DOWNLOADS_DIRECTORY
destination_folder = extract_to or FixtureDownloader.get_cache_path(url, cache_folder)
downloader = FixtureDownloader(url, destination_folder)
# Skip cache check for extract_to (always download fresh)
if extract_to is not None:
was_cached = False
path = downloader.fetch_and_extract()
else:
was_cached, path = downloader.download_and_extract()
return cls(
input_option=url,
path=path,
url=url,
release_page="",
is_local=False,
was_cached=was_cached,
extract_to_local_path=extract_to is not None,
)
@classmethod
def from_release_spec(
cls, spec: str, cache_folder: Optional[Path] = None, extract_to: Optional[Path] = None
) -> "FixturesSource":
"""
Create a fixture source from a release spec (e.g., develop@latest).
"""
if cache_folder is None:
cache_folder = CACHED_DOWNLOADS_DIRECTORY
url = get_release_url(spec)
release_page = get_release_page_url(url)
destination_folder = extract_to or FixtureDownloader.get_cache_path(url, cache_folder)
downloader = FixtureDownloader(url, destination_folder)
# Skip cache check for extract_to (always download fresh)
if extract_to is not None:
was_cached = False
path = downloader.fetch_and_extract()
else:
was_cached, path = downloader.download_and_extract()
return cls(
input_option=spec,
path=path,
url=url,
release_page=release_page,
is_local=False,
was_cached=was_cached,
extract_to_local_path=extract_to is not None,
)
@staticmethod
def validate_local_path(path: Path) -> "FixturesSource":
"""
Validate that a local fixture path exists and contains JSON files.
"""
if not path.exists():
pytest.exit(f"Specified fixture directory '{path}' does not exist.")
if not any(path.glob("**/*.json")):
pytest.exit(f"Specified fixture directory '{path}' does not contain any JSON files.")
return FixturesSource(input_option=str(path), path=path)
class SimLimitBehavior:
"""Represents options derived from the `--sim.limit` argument."""
def __init__(self, pattern: str, collectonly: bool = False): # noqa: D107
self.pattern = pattern
self.collectonly = collectonly
@staticmethod
def _escape_id(pattern: str) -> str:
"""
Escape regex char in the pattern; prepend and append '.*' (for `fill`
IDs).
The `pattern` is prefixed and suffixed with a wildcard match to allow
`fill` test case IDs to be specified, otherwise the full `consume` test
ID must be specified.
"""
return f".*{re.escape(pattern)}.*"
@classmethod
def from_string(cls, pattern: str) -> "SimLimitBehavior":
"""
Parse the `--sim.limit` argument and return a `SimLimitBehavior`
instance.
If `pattern`:
- Is "collectonly", enable collection mode without filtering.
- Starts with "collectonly:", enable collection mode and use the
rest as a regex pattern.
- Starts with "id:", treat the rest as a literal test ID and escape
special regex chars.
- Starts with "collectonly:id:", enable collection mode with a
literal test ID.
"""
if pattern == "collectonly":
return cls(pattern=".*", collectonly=True)
if pattern.startswith("collectonly:id:"):
literal_id = pattern.removeprefix("collectonly:id:")
if not literal_id:
raise ValueError("Empty literal ID provided.")
return cls(pattern=cls._escape_id(literal_id), collectonly=True)
if pattern.startswith("collectonly:"):
return cls(pattern=pattern.removeprefix("collectonly:"), collectonly=True)
if pattern.startswith("id:"):
literal_id = pattern.removeprefix("id:")
if not literal_id:
raise ValueError("Empty literal ID provided.")
return cls(pattern=cls._escape_id(literal_id))
return cls(pattern=pattern)
def pytest_addoption(parser: pytest.Parser) -> None: # noqa: D103
consume_group = parser.getgroup(
"consume", "Arguments related to consuming fixtures via a client"
)
consume_group.addoption(
"--input",
action="store",
dest="fixtures_source",
default=None,
help=(
"Specify the JSON test fixtures source. Can be a local directory, a URL pointing to a "
" fixtures.tar.gz archive, a release name and version in the form of `NAME@v1.2.3` "
"(`stable` and `develop` are valid release names, and `latest` is a valid version), "
"or the special keyword 'stdin'. "
f"Defaults to the following local directory: '{default_input()}'."
),
)
consume_group.addoption(
"--cache-folder",
action="store",
dest="fixture_cache_folder",
default=CACHED_DOWNLOADS_DIRECTORY,
help=(
"Specify the path where the downloaded fixtures are cached. "
f"Defaults to the following directory: '{CACHED_DOWNLOADS_DIRECTORY}'."
),
)
consume_group.addoption(
"--extract-to",
action="store",
dest="extract_to_folder",
default=None,
help=(
"Extract downloaded fixtures to the specified directory. Only valid with 'cache' "
"command. When used, fixtures are extracted directly to this path instead of the "
"user's execution-spec-tests cache directory."
),
)
if "cache" in sys.argv:
return
consume_group.addoption(
"--no-html",
action="store_true",
dest="disable_html",
default=False,
help=(
"Don't generate an HTML test report (in the output directory). "
"The --html flag can be used to specify a different path."
),
)
consume_group.addoption(
"--sim.limit",
action="store",
dest="sim_limit",
type=SimLimitBehavior.from_string,
default=SimLimitBehavior(".*"),
help=(
"Filter tests by either a regex pattern or a literal test case ID. To match a "
"test case by its exact ID, prefix the ID with `id:`. The string following `id:` "
"will be automatically escaped so that all special regex characters are treated as "
"literals. Without the `id:` prefix, the argument is interpreted as a Python regex "
"pattern. To see which test cases are matched, without executing them, prefix with "
'`collectonly:`, e.g. `--sim.limit "collectonly:.*eip4788.*fork_Prague.*"`. '
"To list all available test case IDs, set the value to `collectonly`."
),
)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config: pytest.Config) -> None: # noqa: D103
"""
Pytest hook called after command line options have been parsed and before
test collection begins.
`@pytest.hookimpl(tryfirst=True)` is applied to ensure that this hook is
called before the pytest-html plugin's pytest_configure to ensure that it
uses the modified `htmlpath` option.
"""
print_migration_warning()
# Validate --extract-to usage
if config.option.extract_to_folder is not None and "cache" not in sys.argv:
pytest.exit("The --extract-to flag is only valid with the 'cache' command.")
fixtures_source: FixturesSource
if config.option.fixtures_source is None:
# NOTE: Setting the default value here is necessary for correct
# stdin/piping behavior.
fixtures_source = FixturesSource(input_option=default_input(), path=Path(default_input()))
else:
# NOTE: Setting `type=FixturesSource.from_input` in pytest_addoption()
# causes the option to be evaluated twice which breaks the result of
# `was_cached`; the work-around is to call it manually here.
fixtures_source = FixturesSource.from_input(
config.option.fixtures_source,
Path(config.option.fixture_cache_folder),
Path(config.option.extract_to_folder)
if config.option.extract_to_folder is not None
else None,
)
config.fixtures_source = fixtures_source # type: ignore[attr-defined]
config.fixture_source_flags = ["--input", fixtures_source.input_option] # type: ignore[attr-defined]
if "cache" in sys.argv and not fixtures_source:
pytest.exit("The --input flag is required when using the cache command.")
if "cache" in sys.argv:
reason = ""
if fixtures_source.extract_to_local_path:
reason += "Fixtures downloaded and extracted to specified directory."
elif fixtures_source.was_cached:
reason += "Fixtures already cached."
elif not fixtures_source.is_local:
reason += "Fixtures downloaded and cached."
reason += f"\nPath: {fixtures_source.path}"
reason += f"\nInput: {fixtures_source.url or fixtures_source.path}"
if fixtures_source.release_page:
reason += f"\nRelease page: {fixtures_source.release_page}"
pytest.exit(
returncode=0,
reason=reason,
)
if fixtures_source.is_stdin:
config.test_cases = TestCases.from_stream(sys.stdin) # type: ignore[attr-defined]
return
index_file = fixtures_source.path / ".meta" / "index.json"
index_file.parent.mkdir(parents=True, exist_ok=True)
if not index_file.exists():
rich.print(f"Generating index file [bold cyan]{index_file}[/]...")
generate_fixtures_index(
fixtures_source.path,
quiet_mode=False,
force_flag=False,
)
index = IndexFile.model_validate_json(index_file.read_text())
config.test_cases = index.test_cases # type: ignore[attr-defined]
for fixture_format in BaseFixture.formats.values():
config.addinivalue_line(
"markers",
f"{fixture_format.format_name}: Tests in `{fixture_format.format_name}` format ",
)
# All forked defined within EEST
all_forks = {fork for fork in set(get_forks()) | get_transition_forks() if not fork.ignore()}
# Append all forks within the index file (compatibility with
# `ethereum/tests`)
all_forks.update(getattr(index, "forks", []))
for fork in all_forks:
config.addinivalue_line("markers", f"{fork}: Tests for the {fork} fork")
if config.option.sim_limit:
if config.option.dest_regex != ".*":
pytest.exit(
"Both the --sim.limit (via env var?) and the --regex flags are set. "
"Please only set one of them."
)
config.option.dest_regex = config.option.sim_limit.pattern
if config.option.sim_limit.collectonly:
config.option.collectonly = True
config.option.verbose = -1 # equivalent to -q; only print test ids
if config.option.collectonly or config.option.markers:
return
if not config.getoption("disable_html") and config.getoption("htmlpath") is None:
# generate an html report by default, unless explicitly disabled
config.option.htmlpath = Path(default_html_report_file_path())
def pytest_html_report_title(report: Any) -> None:
"""Set the HTML report title (pytest-html plugin)."""
report.title = "Consume Test Report"
def pytest_report_header(config: pytest.Config) -> List[str]:
"""Add the consume version and fixtures source to the report header."""
source = config.fixtures_source # type: ignore[attr-defined]
lines = [
f"consume ref: {get_current_commit_hash_or_tag()}",
f"fixtures: {source.path}",
]
if not source.is_local and not source.is_stdin:
lines.append(f"fixtures url: {source.url}")
lines.append(f"fixtures release: {source.release_page}")
return lines
@pytest.fixture(scope="session")
def fixture_source_flags(request: pytest.FixtureRequest) -> List[str]:
"""Return the input flags used to specify the JSON test fixtures source."""
return request.config.fixture_source_flags # type: ignore[attr-defined]
@pytest.fixture(scope="session")
def fixtures_source(request: pytest.FixtureRequest) -> FixturesSource: # noqa: D103
return request.config.fixtures_source # type: ignore[attr-defined]
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""
Generate test cases for every test fixture in all the JSON fixture files
within the specified fixtures directory, or read from stdin if the
directory is 'stdin'.
"""
if "cache" in sys.argv:
return
test_cases = metafunc.config.test_cases # type: ignore[attr-defined]
supported_fixture_formats: List[FixtureFormat] = getattr(
metafunc.config, "supported_fixture_formats", []
)
param_list = []
for test_case in test_cases:
if test_case.format not in supported_fixture_formats:
continue
fork_markers = get_relative_fork_markers(test_case.fork, strict_mode=False)
param = pytest.param(
test_case,
id=test_case.id,
marks=[getattr(pytest.mark, m) for m in fork_markers]
+ [getattr(pytest.mark, test_case.format.format_name)],
)
param_list.append(param)
metafunc.parametrize("test_case", param_list)
if "client_type" in metafunc.fixturenames:
metafunc.parametrize(
"client_type",
metafunc.config.hive_execution_clients, # type: ignore[attr-defined]
ids=[client.name for client in metafunc.config.hive_execution_clients], # type: ignore[attr-defined]
)
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_terminal_summary(
terminalreporter: Any,
exitstatus: int,
config: pytest.Config,
) -> Generator[None, None, None]:
"""Print migration warning at end of test session."""
del exitstatus
yield
if not hasattr(config, "workerinput"):
print_migration_warning(terminalreporter)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/__init__.py | src/pytest_plugins/consume/__init__.py | """Pytest plugins for consume commands."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/releases.py | src/pytest_plugins/consume/releases.py | """Procedures to consume fixtures from Github releases."""
import json
import os
import re
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import List
from urllib.parse import urlparse
import platformdirs
import requests
from pydantic import BaseModel, Field, RootModel
CACHED_RELEASE_INFORMATION_FILE = (
Path(platformdirs.user_cache_dir("ethereum-execution-spec-tests")) / "release_information.json"
)
SUPPORTED_REPOS = ["ethereum/execution-spec-tests", "ethereum/tests", "ethereum/legacytests"]
class NoSuchReleaseError(Exception):
"""Raised when a release does not exist."""
def __init__(self, release_string: str):
"""Initialize the exception."""
super().__init__(f"Unknown release source: {release_string}")
class AssetNotFoundError(Exception):
"""Raised when a release has no assets."""
def __init__(self, release_string: str):
"""Initialize the exception."""
super().__init__(f"Asset not found: {release_string}")
@dataclass(kw_only=True)
class ReleaseTag:
"""A descriptor for a release."""
tag_name: str
version: str | None
@classmethod
def from_string(cls, release_string: str) -> "ReleaseTag":
"""
Create a release descriptor from a string.
The release source can be in the format `tag_name@version` or just
`tag_name`.
"""
version: str | None
if "@" in release_string:
tag_name, version = release_string.split("@")
if version == "" or version.lower() == "latest":
version = None
else:
tag_name = release_string
version = None
return cls(tag_name=tag_name, version=version)
@staticmethod
def is_release_string(release_string: str) -> bool:
"""Check if the release string is in the correct format."""
return "@" in release_string
def __eq__(self, value: object) -> bool:
"""
Check if the release descriptor matches the string value.
Returns True if the value is the same as the tag name or the tag name
and version.
"""
assert isinstance(value, str), f"Expected a string, but got: {value}"
if self.version is not None:
# normal release, e.g., stable@v4.0.0
normal_release_match = value == self.version
# pre release, e.g., pectra-devnet-6@v1.0.0
pre_release_match = value == f"{self.tag_name}@{self.version}"
return normal_release_match or pre_release_match
return value.startswith(self.tag_name)
@property
def asset_name(self) -> str:
"""Get the asset name."""
return f"fixtures_{self.tag_name}.tar.gz"
class Asset(BaseModel):
"""Information about a release asset."""
url: str = Field(..., alias="browser_download_url")
id: int
name: str
content_type: str
size: int
class Assets(RootModel[List[Asset]]):
"""A list of assets and their information."""
root: List[Asset]
def __contains__(self, release_descriptor: ReleaseTag) -> bool:
"""Check if the assets contain the release descriptor."""
return any(release_descriptor.asset_name == asset.name for asset in self.root)
class ReleaseInformation(BaseModel):
"""Information about a release."""
url: str = Field(..., alias="html_url")
id: int
tag_name: str
name: str
created_at: datetime
published_at: datetime
assets: Assets
def __contains__(self, release_descriptor: ReleaseTag) -> bool:
"""Check if the release information contains the release descriptor."""
if release_descriptor.version is not None:
return release_descriptor == self.tag_name
for asset in self.assets.root:
if asset.name == release_descriptor.asset_name:
return True
return False
def get_asset(self, release_descriptor: ReleaseTag) -> Asset:
"""Get the asset URL."""
for asset in self.assets.root:
if asset.name == release_descriptor.asset_name:
return asset
raise AssetNotFoundError(release_descriptor.tag_name)
class Releases(RootModel[List[ReleaseInformation]]):
"""A list of releases and their information."""
root: List[ReleaseInformation]
def is_docker_or_ci() -> bool:
"""
Check if the code is running inside a Docker container or a CI environment.
"""
return "GITHUB_ACTIONS" in os.environ or Path("/.dockerenv").exists()
def is_url(string: str) -> bool:
"""Check if a string is a remote URL."""
result = urlparse(string)
return all([result.scheme, result.netloc])
def is_release_url(input_str: str) -> bool:
"""Check if the release string is a URL."""
if not is_url(input_str):
return False
repo_pattern = "|".join(re.escape(repo) for repo in SUPPORTED_REPOS)
regex_pattern = rf"https://github\.com/({repo_pattern})/releases/download/"
return re.match(regex_pattern, input_str) is not None
def parse_release_information(release_information: List) -> List[ReleaseInformation]:
"""Parse the release information from the Github API."""
return Releases.model_validate(release_information).root
def download_release_information(destination_file: Path | None) -> List[ReleaseInformation]:
"""
Download all releases from the GitHub API, handling pagination properly.
GitHub's API returns releases in pages of 30 by default. This function
follows the pagination links to ensure we get every release, which is
crucial for finding older version or latest releases.
"""
all_releases = []
for repo in SUPPORTED_REPOS:
current_url: str | None = f"https://api.github.com/repos/{repo}/releases"
max_pages = 2
while current_url and max_pages > 0:
max_pages -= 1
response = requests.get(current_url)
response.raise_for_status()
all_releases.extend(response.json())
current_url = None
if "link" in response.headers:
for link in requests.utils.parse_header_links(response.headers["link"]):
if link["rel"] == "next":
current_url = link["url"]
break
if destination_file:
destination_file.parent.mkdir(parents=True, exist_ok=True)
with open(destination_file, "w") as file:
json.dump(all_releases, file)
return parse_release_information(all_releases)
def parse_release_information_from_file(
release_information_file: Path,
) -> List[ReleaseInformation]:
"""Parse the release information from a file."""
with open(release_information_file, "r") as file:
release_information = json.load(file)
return parse_release_information(release_information)
def get_release_url_from_release_information(
release_string: str, release_information: List[ReleaseInformation]
) -> str:
"""Get the URL for a specific release."""
release_descriptor = ReleaseTag.from_string(release_string)
for release in release_information:
if release_descriptor in release:
return release.get_asset(release_descriptor).url
raise NoSuchReleaseError(release_string)
def get_release_page_url(release_string: str) -> str:
"""
Return the GitHub Release page URL for a specific release descriptor.
This function can handle:
- A standard release string (e.g., "eip7692@latest") from
execution-spec-tests only.
- A direct asset download link (e.g.,
"https://github.com/ethereum/execution-spec-tests/releases/
download/v4.0.0/fixtures_eip7692.tar.gz").
"""
release_information = get_release_information()
# Case 1: If it's a direct GitHub Releases download link, find which
# release in `release_information` has an asset with this exact URL.
repo_pattern = "|".join(re.escape(repo) for repo in SUPPORTED_REPOS)
regex_pattern = rf"https://github\.com/({repo_pattern})/releases/download/"
if re.match(regex_pattern, release_string):
for release in release_information:
for asset in release.assets.root:
if asset.url == release_string:
return release.url # The HTML page for this release
raise NoSuchReleaseError(f"No release found for asset URL: {release_string}")
# Case 2: Otherwise, treat it as a release descriptor (e.g.,
# "eip7692@latest")
release_descriptor = ReleaseTag.from_string(release_string)
for release in release_information:
if release_descriptor in release:
return release.url
# If nothing matched, raise
raise NoSuchReleaseError(release_string)
def get_release_information() -> List[ReleaseInformation]:
"""
Get the release information.
First check if the cached release information file exists. If it does, but
it is older than 4 hours, delete the file, unless running inside a CI
environment or a Docker container. Then download the release information
from the Github API and save it to the cache file.
"""
if CACHED_RELEASE_INFORMATION_FILE.exists():
last_modified = CACHED_RELEASE_INFORMATION_FILE.stat().st_mtime
if (datetime.now().timestamp() - last_modified) < 4 * 60 * 60 or is_docker_or_ci():
return parse_release_information_from_file(CACHED_RELEASE_INFORMATION_FILE)
CACHED_RELEASE_INFORMATION_FILE.unlink()
if not CACHED_RELEASE_INFORMATION_FILE.exists():
return download_release_information(CACHED_RELEASE_INFORMATION_FILE)
return parse_release_information_from_file(CACHED_RELEASE_INFORMATION_FILE)
def get_release_url(release_string: str) -> str:
"""Get the URL for a specific release."""
release_information = get_release_information()
return get_release_url_from_release_information(release_string, release_information)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/single_test_client.py | src/pytest_plugins/consume/simulators/single_test_client.py | """
Common pytest fixtures for simulators with single-test client architecture.
"""
import io
import json
import logging
from typing import Generator, Literal, cast
import pytest
from hive.client import Client, ClientType
from hive.testing import HiveTest
from ethereum_test_base_types import Number, to_json
from ethereum_test_fixtures import BlockchainFixtureCommon
from ethereum_test_fixtures.blockchain import FixtureHeader
from .helpers.ruleset import (
ruleset, # TODO: generate dynamically
)
from .helpers.timing import TimingData
logger = logging.getLogger(__name__)
@pytest.fixture(scope="function")
def client_genesis(fixture: BlockchainFixtureCommon) -> dict:
"""
Convert the fixture genesis block header and pre-state to a client genesis
state.
"""
genesis = to_json(fixture.genesis)
alloc = to_json(fixture.pre)
# NOTE: nethermind requires account keys without '0x' prefix
genesis["alloc"] = {k.replace("0x", ""): v for k, v in alloc.items()}
return genesis
@pytest.fixture(scope="function")
def environment(
fixture: BlockchainFixtureCommon,
check_live_port: Literal[8545, 8551],
) -> dict:
"""Define the environment that hive will start the client with."""
assert fixture.fork in ruleset, f"fork '{fixture.fork}' missing in hive ruleset"
chain_id = str(Number(fixture.config.chain_id))
return {
"HIVE_CHAIN_ID": chain_id,
"HIVE_NETWORK_ID": chain_id, # Use same value for P2P network compatibility
"HIVE_FORK_DAO_VOTE": "1",
"HIVE_NODETYPE": "full",
"HIVE_CHECK_LIVE_PORT": str(check_live_port),
**{k: f"{v:d}" for k, v in ruleset[fixture.fork].items()},
}
@pytest.fixture(scope="function")
def buffered_genesis(client_genesis: dict) -> io.BufferedReader:
"""
Create a buffered reader for the genesis block header of the current test
fixture.
"""
genesis_json = json.dumps(client_genesis)
genesis_bytes = genesis_json.encode("utf-8")
return io.BufferedReader(cast(io.RawIOBase, io.BytesIO(genesis_bytes)))
@pytest.fixture(scope="function")
def genesis_header(fixture: BlockchainFixtureCommon) -> FixtureHeader:
"""Provide the genesis header from the shared pre-state group."""
return fixture.genesis
@pytest.fixture(scope="function")
def client(
hive_test: HiveTest,
client_files: dict, # configured within: rlp/conftest.py & engine/conftest.py
environment: dict,
client_type: ClientType,
total_timing_data: TimingData,
) -> Generator[Client, None, None]:
"""
Initialize the client with the appropriate files and environment variables.
"""
logger.info(f"Starting client ({client_type.name})...")
logger.debug(f"Main client Network ID: {environment.get('HIVE_NETWORK_ID', 'NOT SET!')}")
logger.debug(f"Main client Chain ID: {environment.get('HIVE_CHAIN_ID', 'NOT SET!')}")
with total_timing_data.time("Start client"):
client = hive_test.start_client(
client_type=client_type, environment=environment, files=client_files
)
error_message = (
f"Unable to connect to the client container ({client_type.name}) via Hive during test "
"setup. Check the client or Hive server logs for more information."
)
assert client is not None, error_message
logger.info(f"Client ({client_type.name}) ready!")
yield client
logger.info(f"Stopping client ({client_type.name})...")
with total_timing_data.time("Stop client"):
client.stop()
logger.info(f"Client ({client_type.name}) stopped!")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/exceptions.py | src/pytest_plugins/consume/simulators/exceptions.py | """Pytest plugin that defines options and fixtures for client exceptions."""
from typing import Dict, List
import pytest
from hive.client import ClientType
from ethereum_test_exceptions import ExceptionMapper
from ethereum_test_fixtures import (
BlockchainFixtureCommon,
)
from .helpers.exceptions import EXCEPTION_MAPPERS
def pytest_addoption(parser: pytest.Parser) -> None:
"""Hive simulator specific consume command line options."""
consume_group = parser.getgroup(
"consume", "Arguments related to consuming fixtures via a client"
)
consume_group.addoption(
"--disable-strict-exception-matching",
action="store",
dest="disable_strict_exception_matching",
default="",
help=(
"Comma-separated list of client names and/or forks which should NOT use strict "
"exception matching."
),
)
@pytest.fixture(scope="session")
def client_exception_mapper_cache() -> Dict[str, ExceptionMapper | None]:
"""Cache for exception mappers by client type."""
return {}
@pytest.fixture(scope="function")
def client_exception_mapper(
client_type: ClientType, client_exception_mapper_cache: Dict[str, ExceptionMapper | None]
) -> ExceptionMapper | None:
"""Return the exception mapper for the client type, with caching."""
if client_type.name not in client_exception_mapper_cache:
for client in EXCEPTION_MAPPERS:
if client in client_type.name:
client_exception_mapper_cache[client_type.name] = EXCEPTION_MAPPERS[client]
break
else:
client_exception_mapper_cache[client_type.name] = None
return client_exception_mapper_cache[client_type.name]
@pytest.fixture(scope="session")
def disable_strict_exception_matching(request: pytest.FixtureRequest) -> List[str]:
"""
Return the list of clients or forks that should NOT use strict exception
matching.
"""
config_string = request.config.getoption("disable_strict_exception_matching")
return config_string.split(",") if config_string else []
@pytest.fixture(scope="function")
def client_strict_exception_matching(
client_type: ClientType,
disable_strict_exception_matching: List[str],
) -> bool:
"""Return True if the client type should use strict exception matching."""
return not any(
client.lower() in client_type.name.lower() for client in disable_strict_exception_matching
)
@pytest.fixture(scope="function")
def fork_strict_exception_matching(
fixture: BlockchainFixtureCommon,
disable_strict_exception_matching: List[str],
) -> bool:
"""Return True if the fork should use strict exception matching."""
# NOTE: `in` makes it easier for transition forks ("Prague" in
# "CancunToPragueAtTime15k")
return not any(
s.lower() in str(fixture.fork).lower() for s in disable_strict_exception_matching
)
@pytest.fixture(scope="function")
def strict_exception_matching(
client_strict_exception_matching: bool,
fork_strict_exception_matching: bool,
) -> bool:
"""Return True if the test should use strict exception matching."""
return client_strict_exception_matching and fork_strict_exception_matching
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/timing_data.py | src/pytest_plugins/consume/simulators/timing_data.py | """Pytest plugin that helps measure and log timing data in Hive simulators."""
from typing import Generator
import pytest
import rich
from hive.client import Client
from .helpers.timing import TimingData
def pytest_addoption(parser: pytest.Parser) -> None:
"""Hive simulator specific consume command line options."""
consume_group = parser.getgroup(
"consume", "Arguments related to consuming fixtures via a client"
)
consume_group.addoption(
"--timing-data",
action="store_true",
dest="timing_data",
default=False,
help="Log the timing data for each test case execution.",
)
@pytest.fixture(scope="function", autouse=True)
def total_timing_data(request: pytest.FixtureRequest) -> Generator[TimingData, None, None]:
"""Record timing data for various stages of executing test case."""
with TimingData("Total (seconds)") as total_timing_data:
yield total_timing_data
if request.config.getoption("timing_data"):
rich.print(f"\n{total_timing_data.formatted()}")
if hasattr(request.node, "rep_call"): # make available for test reports
request.node.rep_call.timings = total_timing_data
@pytest.fixture(scope="function", autouse=True)
def timing_data(
total_timing_data: TimingData, client: Client
) -> Generator[TimingData, None, None]:
"""Record timing data for the main execution of the test case."""
del client
with total_timing_data.time("Test case execution") as timing_data:
yield timing_data
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/__init__.py | src/pytest_plugins/consume/simulators/__init__.py | """Consume hive simulators test functions."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/base.py | src/pytest_plugins/consume/simulators/base.py | """Common pytest fixtures for the Hive simulators."""
from pathlib import Path
from typing import Dict, Literal
import pytest
from hive.client import Client
from ethereum_test_fixtures import (
BaseFixture,
)
from ethereum_test_fixtures.consume import TestCaseIndexFile, TestCaseStream
from ethereum_test_fixtures.file import Fixtures
from ethereum_test_rpc import EthRPC
from ..consume import FixturesSource
@pytest.fixture(scope="function")
def eth_rpc(client: Client) -> EthRPC:
"""Initialize ethereum RPC client for the execution client under test."""
return EthRPC(f"http://{client.ip}:8545")
@pytest.fixture(scope="function")
def check_live_port(test_suite_name: str) -> Literal[8545, 8551]:
"""Port used by hive to check for liveness of the client."""
if test_suite_name == "eest/consume-rlp":
return 8545
elif test_suite_name in {"eest/consume-engine", "eest/consume-sync"}:
return 8551
raise ValueError(
f"Unexpected test suite name '{test_suite_name}' while setting HIVE_CHECK_LIVE_PORT."
)
class FixturesDict(Dict[Path, Fixtures]):
"""
A dictionary caches loaded fixture files to avoid reloading the same file
multiple times.
"""
def __init__(self) -> None:
"""Initialize the dictionary that caches loaded fixture files."""
self._fixtures: Dict[Path, Fixtures] = {}
def __getitem__(self, key: Path) -> Fixtures:
"""
Return the fixtures from the index file, if not found, load from disk.
"""
assert key.is_file(), f"Expected a file path, got '{key}'"
if key not in self._fixtures:
self._fixtures[key] = Fixtures.model_validate_json(key.read_text())
return self._fixtures[key]
@pytest.fixture(scope="session")
def fixture_file_loader() -> Dict[Path, Fixtures]:
"""
Return a singleton dictionary that caches loaded fixture files used in all
tests.
"""
return FixturesDict()
@pytest.fixture(scope="function")
def fixture(
fixtures_source: FixturesSource,
fixture_file_loader: Dict[Path, Fixtures],
test_case: TestCaseIndexFile | TestCaseStream,
) -> BaseFixture:
"""
Load the fixture from a file or from stream in any of the supported fixture
formats.
The fixture is either already available within the test case (if consume is
taking input on stdin) or loaded from the fixture json file if taking input
from disk (fixture directory with index file).
"""
fixture: BaseFixture
if fixtures_source.is_stdin:
assert isinstance(test_case, TestCaseStream), "Expected a stream test case"
fixture = test_case.fixture
else:
assert isinstance(test_case, TestCaseIndexFile), "Expected an index file test case"
fixtures_file_path = fixtures_source.path / test_case.json_path
fixtures: Fixtures = fixture_file_loader[fixtures_file_path]
fixture = fixtures[test_case.id]
assert isinstance(fixture, test_case.format), (
f"Expected a {test_case.format.format_name} test fixture"
)
return fixture
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/test_case_description.py | src/pytest_plugins/consume/simulators/test_case_description.py | """
Pytest fixtures that help create the test case "Description" displayed in the
Hive UI.
"""
import logging
import textwrap
import urllib
import warnings
from typing import List
import pytest
from hive.client import ClientType
from ethereum_test_fixtures import BaseFixture
from ethereum_test_fixtures.consume import TestCaseIndexFile, TestCaseStream
from ...pytest_hive.hive_info import ClientFile, HiveInfo
logger = logging.getLogger(__name__)
@pytest.fixture(scope="function")
def hive_clients_yaml_target_filename() -> str:
"""Return the name of the target clients YAML file."""
return "clients_eest.yaml"
@pytest.fixture(scope="function")
def hive_clients_yaml_generator_command(
client_type: ClientType,
client_file: ClientFile,
hive_clients_yaml_target_filename: str,
hive_info: HiveInfo,
) -> str:
"""
Generate a shell command that creates a clients YAML file for the current
client.
"""
try:
if not client_file:
raise ValueError("No client information available - try updating hive")
client_config = [c for c in client_file.root if c.client in client_type.name]
if not client_config:
raise ValueError(f"Client '{client_type.name}' not found in client file")
try:
yaml_content = ClientFile(root=[client_config[0]]).yaml().replace(" ", " ")
return f'echo "\\\n{yaml_content}" > {hive_clients_yaml_target_filename}'
except Exception as e:
raise ValueError(f"Failed to generate YAML: {str(e)}") from e
except ValueError as e:
error_message = str(e)
warnings.warn(
f"{error_message}. The Hive clients YAML generator command will not be available.",
stacklevel=2,
)
issue_title = f"Client {client_type.name} configuration issue"
issue_body = f"Error: {error_message}\nHive version: {hive_info.commit}\n"
issue_url = f"https://github.com/ethereum/execution-spec-tests/issues/new?title={urllib.parse.quote(issue_title)}&body={urllib.parse.quote(issue_body)}"
return (
f"Error: {error_message}\n"
f'Please <a href="{issue_url}">create an issue</a> to report this problem.'
)
@pytest.fixture(scope="function")
def filtered_hive_options(hive_info: HiveInfo) -> List[str]:
"""Filter Hive command options to remove unwanted options."""
logger.info("Hive info: %s", hive_info.command)
unwanted_options = [
"--client", # gets overwritten: we specify a single client; the one
# from the test case
"--client-file", # gets overwritten: we'll write our own client file
"--results-root", # use default value instead (or you have to pass it
# to ./hiveview)
"--sim.limit", # gets overwritten: we only run the current test case
# id
"--sim.parallelism", # skip; we'll only be running a single test
]
command_parts = []
skip_next = False
for part in hive_info.command:
if skip_next:
skip_next = False
continue
if part in unwanted_options:
skip_next = True
continue
if any(part.startswith(f"{option}=") for option in unwanted_options):
continue
command_parts.append(part)
return command_parts
@pytest.fixture(scope="function")
def hive_client_config_file_parameter(hive_clients_yaml_target_filename: str) -> str:
"""Return the hive client config file parameter."""
return f"--client-file {hive_clients_yaml_target_filename}"
@pytest.fixture(scope="function")
def hive_consume_command(
test_case: TestCaseIndexFile | TestCaseStream,
hive_client_config_file_parameter: str,
filtered_hive_options: List[str],
client_type: ClientType,
) -> str:
"""Command to run the test within hive."""
command_parts = filtered_hive_options.copy()
command_parts.append(f"{hive_client_config_file_parameter}")
command_parts.append(f"--client={client_type.name}")
command_parts.append(f'--sim.limit="id:{test_case.id}"')
return " ".join(command_parts)
@pytest.fixture(scope="function")
def hive_dev_command(
client_type: ClientType,
hive_client_config_file_parameter: str,
) -> str:
"""
Return the command used to instantiate hive alongside the `consume`
command.
"""
return f"./hive --dev {hive_client_config_file_parameter} --client {client_type.name}"
@pytest.fixture(scope="function")
def eest_consume_command(
test_suite_name: str,
test_case: TestCaseIndexFile | TestCaseStream,
fixture_source_flags: List[str],
) -> str:
"""Commands to run the test within EEST using a hive dev back-end."""
flags = " ".join(fixture_source_flags)
return (
f"uv run consume {test_suite_name.split('-')[-1]} "
f'{flags} --sim.limit="id:{test_case.id}" -v -s'
)
@pytest.fixture(scope="function")
def test_case_description(
fixture: BaseFixture,
test_case: TestCaseIndexFile | TestCaseStream,
hive_clients_yaml_generator_command: str,
hive_consume_command: str,
hive_dev_command: str,
eest_consume_command: str,
) -> str:
"""Create the description of the current blockchain fixture test case."""
test_url = fixture.info.get("url", "")
if "description" not in fixture.info or fixture.info["description"] is None:
test_docstring = "No documentation available."
else:
# this prefix was included in the fixture description field for
# fixtures <= v4.3.0
test_docstring = fixture.info["description"].replace("Test function documentation:\n", "") # type: ignore
description = textwrap.dedent(f"""
<b>Test Details</b>
<code>{test_case.id}</code>
{f'<a href="{test_url}">[source]</a>' if test_url else ""}
{test_docstring}
<b>Run This Test Locally:</b>
To run this test in <a href="https://github.com/ethereum/hive">
hive
</a></i>:
<code>{hive_clients_yaml_generator_command}
{hive_consume_command}</code>
<b>Advanced: Run the test against a hive developer backend using
EEST's <code>consume</code> command</b>
Create the client YAML file, as above, then:
1. Start hive in dev mode: <code>{hive_dev_command}</code>
2. In the EEST repository root: <code>{eest_consume_command}</code>
""")
description = description.strip()
description = description.replace("\n", "<br/>")
return description
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/rlp/conftest.py | src/pytest_plugins/consume/simulators/rlp/conftest.py | """Pytest fixtures and classes for the `consume rlp` hive simulator."""
import io
from typing import List, Mapping, cast
import pytest
from ethereum_test_base_types import Bytes
from ethereum_test_fixtures import BlockchainFixture
from ethereum_test_fixtures.consume import TestCaseIndexFile, TestCaseStream
TestCase = TestCaseIndexFile | TestCaseStream
pytest_plugins = (
"pytest_plugins.pytest_hive.pytest_hive",
"pytest_plugins.consume.simulators.base",
"pytest_plugins.consume.simulators.single_test_client",
"pytest_plugins.consume.simulators.test_case_description",
"pytest_plugins.consume.simulators.timing_data",
"pytest_plugins.consume.simulators.exceptions",
)
def pytest_configure(config: pytest.Config) -> None:
"""Set the supported fixture formats for the rlp simulator."""
config.supported_fixture_formats = [BlockchainFixture] # type: ignore[attr-defined]
@pytest.fixture(scope="module")
def test_suite_name() -> str:
"""The name of the hive test suite used in this simulator."""
return "eest/consume-rlp"
@pytest.fixture(scope="module")
def test_suite_description() -> str:
"""The description of the hive test suite used in this simulator."""
return "Execute blockchain tests by providing RLP-encoded blocks to a client upon start-up."
@pytest.fixture(scope="function")
def blocks_rlp(fixture: BlockchainFixture) -> List[Bytes]:
"""List of the fixture's blocks encoded as RLP."""
return [block.rlp for block in fixture.blocks]
@pytest.fixture(scope="function")
def buffered_blocks_rlp(blocks_rlp: List[bytes]) -> list[io.BufferedReader]:
"""
Convert the RLP-encoded blocks of the current test fixture to buffered
readers.
"""
block_rlp_files: list[io.BufferedReader] = []
for _, block_rlp in enumerate(blocks_rlp):
block_rlp_stream = io.BytesIO(block_rlp)
block_rlp_files.append(io.BufferedReader(cast(io.RawIOBase, block_rlp_stream)))
return block_rlp_files
@pytest.fixture(scope="function")
def client_files(
buffered_genesis: io.BufferedReader,
buffered_blocks_rlp: list[io.BufferedReader],
) -> Mapping[str, io.BufferedReader]:
"""
Define the files that hive will start the client with.
The files are specified as a dictionary whose:
- Keys are the target file paths in the client's docker container, and,
- Values are in-memory buffered file objects.
"""
files = {f"/blocks/{i + 1:04d}.rlp": rlp for i, rlp in enumerate(buffered_blocks_rlp)}
files["/genesis.json"] = buffered_genesis
return files
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/rlp/__init__.py | src/pytest_plugins/consume/simulators/rlp/__init__.py | """Consume RLP test functions."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/helpers/exceptions.py | src/pytest_plugins/consume/simulators/helpers/exceptions.py | """Custom exceptions utilized within consume simulators."""
import pprint
from typing import Dict, List, Tuple
from ethereum_clis.clis.besu import BesuExceptionMapper
from ethereum_clis.clis.erigon import ErigonExceptionMapper
from ethereum_clis.clis.ethereumjs import EthereumJSExceptionMapper
from ethereum_clis.clis.ethrex import EthrexExceptionMapper
from ethereum_clis.clis.geth import GethExceptionMapper
from ethereum_clis.clis.nethermind import NethermindExceptionMapper
from ethereum_clis.clis.nimbus import NimbusExceptionMapper
from ethereum_clis.clis.reth import RethExceptionMapper
from ethereum_test_exceptions import ExceptionMapper
from ethereum_test_fixtures.blockchain import FixtureHeader
class GenesisBlockMismatchExceptionError(Exception):
"""
Definers a mismatch exception between the client and fixture genesis
blockhash.
"""
def __init__(self, *, expected_header: FixtureHeader, got_genesis_block: Dict[str, str]):
"""
Initialize the exception with the expected and received genesis block
headers.
"""
message = (
"Genesis block hash mismatch.\n\n"
f"Expected: {expected_header.block_hash}\n"
f" Got: {got_genesis_block['hash']}."
)
differences, unexpected_fields = self.compare_models(
expected_header, FixtureHeader(**got_genesis_block)
)
if differences:
message += (
"\n\nGenesis block header field differences:\n"
f"{pprint.pformat(differences, indent=4)}"
)
elif unexpected_fields:
message += (
"\n\nUn-expected genesis block header fields from client:\n"
f"{pprint.pformat(unexpected_fields, indent=4)}"
"\nIs the fork configuration correct?"
)
else:
message += (
"There were no differences in the expected and received genesis block headers."
)
super().__init__(message)
@staticmethod
def compare_models(expected: FixtureHeader, got: FixtureHeader) -> Tuple[Dict, List]:
"""
Compare two FixtureHeader model instances and return their differences.
"""
differences = {}
unexpected_fields = []
for (exp_name, exp_value), (got_name, got_value) in zip(expected, got, strict=False):
if "rlp" in exp_name or "fork" in exp_name: # ignore rlp as not verbose enough
continue
if exp_value != got_value:
differences[exp_name] = {
"expected ": str(exp_value),
"got (via rpc)": str(got_value),
}
if got_value is None:
unexpected_fields.append(got_name)
return differences, unexpected_fields
EXCEPTION_MAPPERS: Dict[str, ExceptionMapper] = {
"go-ethereum": GethExceptionMapper(),
"nethermind": NethermindExceptionMapper(),
"erigon": ErigonExceptionMapper(),
"besu": BesuExceptionMapper(),
"reth": RethExceptionMapper(),
"nimbus": NimbusExceptionMapper(),
"ethereumjs": EthereumJSExceptionMapper(),
"ethrex": EthrexExceptionMapper(),
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/helpers/ruleset.py | src/pytest_plugins/consume/simulators/helpers/ruleset.py | """
Fork rules for consume hive simulators.
TODO: Create the ruleset dynamically per fixture test case based on the fork.
Remove this file afterwards.
"""
from typing import Dict, List
from ethereum_test_forks import (
BPO1,
BPO2,
BPO3,
BPO4,
Amsterdam,
Berlin,
BerlinToLondonAt5,
BPO1ToBPO2AtTime15k,
BPO2ToBPO3AtTime15k,
BPO3ToBPO4AtTime15k,
Byzantium,
Cancun,
CancunToPragueAtTime15k,
Constantinople,
ConstantinopleFix,
Fork,
Frontier,
Homestead,
Istanbul,
London,
Osaka,
OsakaToBPO1AtTime15k,
Paris,
ParisToShanghaiAtTime15k,
Prague,
PragueToOsakaAtTime15k,
Shanghai,
ShanghaiToCancunAtTime15k,
)
def get_blob_schedule_entries(fork: Fork) -> Dict[str, int]:
"""
Generate blob schedule entries for each fork (and respective parent forks).
Adds the following entries to the ruleset for the given fork (and parent
forks):
HIVE_{FORK}_BLOB_TARGET: target_blobs_per_block()
HIVE_{FORK}_BLOB_MAX: max_blobs_per_block()
HIVE_{FORK}_BLOB_BASE_FEE_UPDATE_FRACTION: blob_base_fee_update_
fraction()
"""
entries: Dict = {}
forks_with_blobs: List[Fork] = []
current_fork = fork
while current_fork.supports_blobs():
forks_with_blobs.append(current_fork)
current_fork = current_fork.parent() # type: ignore
for fork_to_process in forks_with_blobs:
prefix = fork_to_process.__name__.upper()
entries[f"HIVE_{prefix}_BLOB_TARGET"] = fork_to_process.target_blobs_per_block()
entries[f"HIVE_{prefix}_BLOB_MAX"] = fork_to_process.max_blobs_per_block()
entries[f"HIVE_{prefix}_BLOB_BASE_FEE_UPDATE_FRACTION"] = (
fork_to_process.blob_base_fee_update_fraction()
)
return entries
ruleset: Dict[Fork, Dict[str, int]] = {
Frontier: {
"HIVE_FORK_HOMESTEAD": 2000,
"HIVE_FORK_DAO_BLOCK": 2000,
"HIVE_FORK_TANGERINE": 2000,
"HIVE_FORK_SPURIOUS": 2000,
"HIVE_FORK_BYZANTIUM": 2000,
"HIVE_FORK_CONSTANTINOPLE": 2000,
"HIVE_FORK_PETERSBURG": 2000,
"HIVE_FORK_ISTANBUL": 2000,
"HIVE_FORK_BERLIN": 2000,
"HIVE_FORK_LONDON": 2000,
},
Homestead: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_DAO_BLOCK": 2000,
"HIVE_FORK_TANGERINE": 2000,
"HIVE_FORK_SPURIOUS": 2000,
"HIVE_FORK_BYZANTIUM": 2000,
"HIVE_FORK_CONSTANTINOPLE": 2000,
"HIVE_FORK_PETERSBURG": 2000,
"HIVE_FORK_ISTANBUL": 2000,
"HIVE_FORK_BERLIN": 2000,
"HIVE_FORK_LONDON": 2000,
},
Byzantium: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 2000,
"HIVE_FORK_PETERSBURG": 2000,
"HIVE_FORK_ISTANBUL": 2000,
"HIVE_FORK_BERLIN": 2000,
"HIVE_FORK_LONDON": 2000,
},
Constantinople: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 2000,
"HIVE_FORK_ISTANBUL": 2000,
"HIVE_FORK_BERLIN": 2000,
"HIVE_FORK_LONDON": 2000,
},
ConstantinopleFix: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 2000,
"HIVE_FORK_BERLIN": 2000,
"HIVE_FORK_LONDON": 2000,
},
Istanbul: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 2000,
"HIVE_FORK_LONDON": 2000,
},
Berlin: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 2000,
},
BerlinToLondonAt5: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 5,
},
London: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
},
Paris: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
},
Shanghai: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
},
ParisToShanghaiAtTime15k: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 15000,
},
Cancun: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
**get_blob_schedule_entries(Cancun),
},
ShanghaiToCancunAtTime15k: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 15000,
**get_blob_schedule_entries(Cancun),
},
Prague: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
**get_blob_schedule_entries(Prague),
},
CancunToPragueAtTime15k: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 15000,
**get_blob_schedule_entries(Prague),
},
Osaka: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
**get_blob_schedule_entries(Osaka),
},
PragueToOsakaAtTime15k: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 15000,
**get_blob_schedule_entries(Osaka),
},
BPO1: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 0,
**get_blob_schedule_entries(BPO1),
},
OsakaToBPO1AtTime15k: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 15000,
**get_blob_schedule_entries(BPO1),
},
BPO2: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 0,
"HIVE_BPO2_TIMESTAMP": 0,
**get_blob_schedule_entries(BPO2),
},
BPO1ToBPO2AtTime15k: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 0,
"HIVE_BPO2_TIMESTAMP": 15000,
**get_blob_schedule_entries(BPO2),
},
BPO3: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 0,
"HIVE_BPO2_TIMESTAMP": 0,
"HIVE_BPO3_TIMESTAMP": 0,
**get_blob_schedule_entries(BPO3),
},
BPO2ToBPO3AtTime15k: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 0,
"HIVE_BPO2_TIMESTAMP": 0,
"HIVE_BPO3_TIMESTAMP": 15000,
**get_blob_schedule_entries(BPO3),
},
BPO4: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 0,
"HIVE_BPO2_TIMESTAMP": 0,
"HIVE_BPO3_TIMESTAMP": 0,
"HIVE_BPO4_TIMESTAMP": 0,
**get_blob_schedule_entries(BPO4),
},
BPO3ToBPO4AtTime15k: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 0,
"HIVE_BPO2_TIMESTAMP": 0,
"HIVE_BPO3_TIMESTAMP": 0,
"HIVE_BPO4_TIMESTAMP": 15000,
**get_blob_schedule_entries(BPO4),
},
Amsterdam: {
"HIVE_FORK_HOMESTEAD": 0,
"HIVE_FORK_TANGERINE": 0,
"HIVE_FORK_SPURIOUS": 0,
"HIVE_FORK_BYZANTIUM": 0,
"HIVE_FORK_CONSTANTINOPLE": 0,
"HIVE_FORK_PETERSBURG": 0,
"HIVE_FORK_ISTANBUL": 0,
"HIVE_FORK_BERLIN": 0,
"HIVE_FORK_LONDON": 0,
"HIVE_FORK_MERGE": 0,
"HIVE_TERMINAL_TOTAL_DIFFICULTY": 0,
"HIVE_SHANGHAI_TIMESTAMP": 0,
"HIVE_CANCUN_TIMESTAMP": 0,
"HIVE_PRAGUE_TIMESTAMP": 0,
"HIVE_OSAKA_TIMESTAMP": 0,
"HIVE_BPO1_TIMESTAMP": 0,
"HIVE_BPO2_TIMESTAMP": 0,
"HIVE_BPO3_TIMESTAMP": 0,
"HIVE_BPO4_TIMESTAMP": 0,
"HIVE_AMSTERDAM_TIMESTAMP": 0,
**get_blob_schedule_entries(Amsterdam),
},
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/helpers/__init__.py | src/pytest_plugins/consume/simulators/helpers/__init__.py | """Helper classes and functions for consume hive simulators."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/helpers/timing.py | src/pytest_plugins/consume/simulators/helpers/timing.py | """Test timing class used to time tests."""
import time
from typing import Any, List, Self
class TimingData:
"""
The times taken to perform the various steps of a test case (seconds).
"""
name: str
start_time: float | None
end_time: float | None
parent: "TimingData | None"
timings: "List[TimingData]"
def __init__(self, name: str, parent: "TimingData | None" = None):
"""Initialize the timing data."""
self.name = name
self.start_time = None
self.end_time = None
self.parent = parent
self.timings = []
@staticmethod
def format_float(num: float | None, precision: int = 4) -> str | None:
"""Format a float to a specific precision in significant figures."""
if num is None:
return None
return f"{num:.{precision}f}"
def __enter__(self) -> Self:
"""Start timing the test case."""
self.start_time = time.perf_counter()
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
"""Record the time taken since the last time recorded."""
self.end_time = time.perf_counter()
def time(self, sub_name: str) -> "TimingData":
"""Record the time taken in an execution section."""
new_timing = TimingData(sub_name, self)
self.timings.append(new_timing)
return new_timing
def formatted(self, precision: int = 4, indent: int = 0) -> str:
"""Recursively format the timing data with correct indentation."""
assert self.start_time is not None
assert self.end_time is not None
formatted = (
f"{' ' * indent}{self.name}: "
f"{TimingData.format_float(self.end_time - self.start_time, precision)}\n"
)
for timing in self.timings:
formatted += timing.formatted(precision, indent + 2)
return formatted
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/engine/conftest.py | src/pytest_plugins/consume/simulators/engine/conftest.py | """
Pytest fixtures for the `consume engine` simulator.
Configures the hive back-end & EL clients for each individual test execution.
"""
import io
from typing import Mapping
import pytest
from hive.client import Client
from ethereum_test_exceptions import ExceptionMapper
from ethereum_test_fixtures import BlockchainEngineFixture
from ethereum_test_rpc import EngineRPC
pytest_plugins = (
"pytest_plugins.pytest_hive.pytest_hive",
"pytest_plugins.consume.simulators.base",
"pytest_plugins.consume.simulators.single_test_client",
"pytest_plugins.consume.simulators.test_case_description",
"pytest_plugins.consume.simulators.timing_data",
"pytest_plugins.consume.simulators.exceptions",
)
def pytest_configure(config: pytest.Config) -> None:
"""Set the supported fixture formats for the engine simulator."""
config.supported_fixture_formats = [BlockchainEngineFixture] # type: ignore[attr-defined]
@pytest.fixture(scope="function")
def engine_rpc(client: Client, client_exception_mapper: ExceptionMapper | None) -> EngineRPC:
"""Initialize engine RPC client for the execution client under test."""
if client_exception_mapper:
return EngineRPC(
f"http://{client.ip}:8551",
response_validation_context={
"exception_mapper": client_exception_mapper,
},
)
return EngineRPC(f"http://{client.ip}:8551")
@pytest.fixture(scope="module")
def test_suite_name() -> str:
"""The name of the hive test suite used in this simulator."""
return "eest/consume-engine"
@pytest.fixture(scope="module")
def test_suite_description() -> str:
"""The description of the hive test suite used in this simulator."""
return "Execute blockchain tests against clients using the Engine API."
@pytest.fixture(scope="function")
def client_files(buffered_genesis: io.BufferedReader) -> Mapping[str, io.BufferedReader]:
"""Define the files that hive will start the client with."""
files = {}
files["/genesis.json"] = buffered_genesis
return files
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/engine/__init__.py | src/pytest_plugins/consume/simulators/engine/__init__.py | """Consume Engine test functions."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/simulator_logic/test_via_rlp.py | src/pytest_plugins/consume/simulators/simulator_logic/test_via_rlp.py | """
A hive based simulator that executes RLP-encoded blocks against clients. The
simulator uses the `BlockchainFixtures` to test this against clients.
Clients consume the genesis and RLP-encoded blocks from input files upon
start-up.
"""
import logging
from ethereum_test_fixtures import BlockchainFixture
from ethereum_test_fixtures.blockchain import FixtureBlock, FixtureHeader
from ethereum_test_rpc import EthRPC
from ..helpers.exceptions import GenesisBlockMismatchExceptionError
from ..helpers.timing import TimingData
logger = logging.getLogger(__name__)
def test_via_rlp(
timing_data: TimingData,
eth_rpc: EthRPC,
fixture: BlockchainFixture,
) -> None:
"""
1. Check the client genesis block hash matches
`fixture.genesis.block_hash`.
2. Check the client last block hash matches `fixture.last_block_hash`.
"""
with timing_data.time("Get genesis block"):
logger.info("Calling getBlockByNumber to get genesis block...")
genesis_block = eth_rpc.get_block_by_number(0)
assert genesis_block, "`getBlockByNumber` didn't return a block."
if genesis_block["hash"] != str(fixture.genesis.block_hash):
raise GenesisBlockMismatchExceptionError(
expected_header=fixture.genesis,
got_genesis_block=genesis_block,
)
with timing_data.time("Get latest block"):
logger.info("Calling getBlockByNumber to get latest block...")
block = eth_rpc.get_block_by_number("latest")
assert block, "`getBlockByNumber` didn't return a block."
if block["hash"] != str(fixture.last_block_hash):
try:
block_header = FixtureHeader.model_validate(block).model_dump()
last_block = FixtureBlock.model_validate(fixture.blocks[-1])
last_block_header = last_block.header.model_dump()
if block_header["number"] != last_block_header["number"]:
# raise with clearer message if block number mismatches
raise AssertionError(
f"block number mismatch in last block: got "
f"`{block_header['number']}`, "
f"expected `{last_block_header['number']}``"
)
# find all mismatched fields
mismatches = []
for block_field, block_value in block_header.items():
fixture_value = last_block_header[block_field]
if str(block_value) != str(fixture_value):
mismatches.append(
f" {block_field}: got `{block_value}`, expected `{fixture_value}`"
)
raise AssertionError(
"blockHash mismatch in last block - field mismatches:"
"\n" + "\n".join(mismatches)
)
except Exception:
raise AssertionError(
f"blockHash mismatch in last block: got `{block['hash']}`, "
f"expected `{fixture.last_block_hash}`"
) from None
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/simulator_logic/test_via_sync.py | src/pytest_plugins/consume/simulators/simulator_logic/test_via_sync.py | """
A hive based simulator that executes blocks against clients using the
`engine_newPayloadV*` method from the Engine API with sync testing. The
simulator uses the `BlockchainEngineSyncFixtures` to test against clients with
client synchronization.
This simulator:
1. Spins up two clients: one as the client under test and another as the sync
client
2. Executes payloads on the client under test
3. Has the sync client synchronize from the client under test
4. Verifies that the sync was successful
"""
import time
import pytest
from ethereum_test_exceptions import UndefinedException
from ethereum_test_fixtures import BlockchainEngineSyncFixture
from ethereum_test_rpc import AdminRPC, EngineRPC, EthRPC, NetRPC
from ethereum_test_rpc.rpc_types import (
ForkchoiceState,
JSONRPCError,
PayloadStatusEnum,
)
from ....custom_logging import get_logger
from ..helpers.exceptions import GenesisBlockMismatchExceptionError
from ..helpers.timing import TimingData
logger = get_logger(__name__)
class LoggedError(Exception):
"""Exception that uses the logger to log the failure."""
def __init__(self, *args: object) -> None:
"""Initialize the exception and log the failure."""
super().__init__(*args)
logger.fail(str(self))
def test_blockchain_via_sync(
timing_data: TimingData,
eth_rpc: EthRPC,
engine_rpc: EngineRPC,
net_rpc: NetRPC,
sync_eth_rpc: EthRPC,
sync_engine_rpc: EngineRPC,
sync_net_rpc: NetRPC,
sync_admin_rpc: AdminRPC,
client_enode_url: str,
fixture: BlockchainEngineSyncFixture,
strict_exception_matching: bool,
) -> None:
"""
Test blockchain synchronization between two clients.
1. Initialize the client under test with the genesis block
2. Execute all payloads on the client under test
3. Initialize the sync client with the genesis block
4. Send sync payload and forkchoice_updated to the sync client to trigger
synchronization
5. Verify that the sync client successfully syncs to the same state
"""
# Initialize client under test
with timing_data.time("Initialize client under test"):
logger.info("Initializing client under test with genesis block...")
# Send initial forkchoice update to client under test
delay = 0.5
for attempt in range(3):
forkchoice_response = engine_rpc.forkchoice_updated(
forkchoice_state=ForkchoiceState(
head_block_hash=fixture.genesis.block_hash,
),
payload_attributes=None,
version=fixture.payloads[0].forkchoice_updated_version,
)
status = forkchoice_response.payload_status.status
logger.info(f"Initial forkchoice update response attempt {attempt + 1}: {status}")
if status != PayloadStatusEnum.SYNCING:
break
if attempt < 2:
time.sleep(delay)
delay *= 2
if forkchoice_response.payload_status.status != PayloadStatusEnum.VALID:
logger.error(
f"Client under test failed to initialize properly after 3 attempts, "
f"final status: {forkchoice_response.payload_status.status}"
)
raise LoggedError(
f"unexpected status on forkchoice updated to genesis: {forkchoice_response}"
)
# Verify genesis block on client under test
with timing_data.time("Verify genesis on client under test"):
logger.info("Verifying genesis block on client under test...")
genesis_block = eth_rpc.get_block_by_number(0)
assert genesis_block is not None, "genesis_block is None"
if genesis_block["hash"] != str(fixture.genesis.block_hash):
expected = fixture.genesis.block_hash
got = genesis_block["hash"]
logger.fail(f"Genesis block hash mismatch. Expected: {expected}, Got: {got}")
raise GenesisBlockMismatchExceptionError(
expected_header=fixture.genesis,
got_genesis_block=genesis_block,
)
# Execute all payloads on client under test
last_valid_block_hash = fixture.genesis.block_hash
with timing_data.time("Execute payloads on client under test") as total_payload_timing:
logger.info(f"Starting execution of {len(fixture.payloads)} payloads...")
for i, payload in enumerate(fixture.payloads):
logger.info(f"Processing payload {i + 1}/{len(fixture.payloads)}...")
with total_payload_timing.time(f"Payload {i + 1}") as payload_timing:
with payload_timing.time(f"engine_newPayloadV{payload.new_payload_version}"):
logger.info(f"Sending engine_newPayloadV{payload.new_payload_version}...")
# Note: This is similar to the logic in test_via_engine.py
try:
payload_response = engine_rpc.new_payload(
*payload.params,
version=payload.new_payload_version,
)
logger.info(f"Payload response status: {payload_response.status}")
expected_validity = (
PayloadStatusEnum.VALID
if payload.valid()
else PayloadStatusEnum.INVALID
)
if payload_response.status != expected_validity:
raise LoggedError(
f"unexpected status: want {expected_validity},"
f" got {payload_response.status}"
)
if payload.error_code is not None:
raise LoggedError(
f"Client failed to raise expected Engine API error code: "
f"{payload.error_code}"
)
elif payload_response.status == PayloadStatusEnum.INVALID:
if payload_response.validation_error is None:
raise LoggedError(
"Client returned INVALID but no validation error was provided."
)
if isinstance(payload_response.validation_error, UndefinedException):
message = (
"Undefined exception message: "
f'expected exception: "{payload.validation_error}", '
f'returned exception: "{payload_response.validation_error}" '
f'(mapper: "{payload_response.validation_error.mapper_name}")'
)
if strict_exception_matching:
raise LoggedError(message)
else:
logger.warning(message)
else:
if (
payload.validation_error
not in payload_response.validation_error
):
message = (
"Client returned unexpected validation error: "
f'got: "{payload_response.validation_error}" '
f'expected: "{payload.validation_error}"'
)
if strict_exception_matching:
raise LoggedError(message)
else:
logger.warning(message)
except JSONRPCError as e:
logger.info(f"JSONRPC error encountered: {e.code} - {e.message}")
if payload.error_code is None:
raise LoggedError(f"Unexpected error: {e.code} - {e.message}") from e
if e.code != payload.error_code:
raise LoggedError(
f"Unexpected error code: {e.code}, expected: {payload.error_code}"
) from e
if payload.valid():
with payload_timing.time(
f"engine_forkchoiceUpdatedV{payload.forkchoice_updated_version}"
):
# Send a forkchoice update to the engine
version = payload.forkchoice_updated_version
logger.info(f"Sending engine_forkchoiceUpdatedV{version}...")
forkchoice_response = engine_rpc.forkchoice_updated(
forkchoice_state=ForkchoiceState(
head_block_hash=payload.params[0].block_hash,
),
payload_attributes=None,
version=payload.forkchoice_updated_version,
)
status = forkchoice_response.payload_status.status
logger.info(f"Forkchoice update response: {status}")
if forkchoice_response.payload_status.status != PayloadStatusEnum.VALID:
raise LoggedError(
f"unexpected status: want {PayloadStatusEnum.VALID},"
f" got {forkchoice_response.payload_status.status}"
)
last_valid_block_hash = payload.params[0].block_hash
logger.info("All payloads processed successfully on client under test.")
# sync_payload creates the final block that the sync client will sync to
if not fixture.sync_payload:
pytest.fail("Sync tests require a syncPayload that is not present in this test.")
with timing_data.time("Send sync payload to client under test"):
logger.info("Sending sync payload (empty block) to client under test...")
try:
sync_response = engine_rpc.new_payload(
*fixture.sync_payload.params,
version=fixture.sync_payload.new_payload_version,
)
logger.info(f"Client sync payload response status: {sync_response.status}")
if sync_response.status == PayloadStatusEnum.VALID:
# Update forkchoice on client under test to include sync block
forkchoice_response = engine_rpc.forkchoice_updated(
forkchoice_state=ForkchoiceState(
head_block_hash=fixture.sync_payload.params[0].block_hash,
),
payload_attributes=None,
version=fixture.sync_payload.forkchoice_updated_version,
)
status = forkchoice_response.payload_status.status
logger.info(f"Client forkchoice update to sync block: {status}")
last_valid_block_hash = fixture.sync_payload.params[0].block_hash
else:
logger.error(f"Sync payload was not valid: {sync_response.status}")
raise LoggedError(f"Sync payload validation failed: {sync_response.status}")
except JSONRPCError as e:
logger.error(
f"Error sending sync payload to client under test: {e.code} - {e.message}"
)
raise
# Initialize sync client
with timing_data.time("Initialize sync client"):
logger.info("Initializing sync client with genesis block...")
# Send initial forkchoice update to sync client
delay = 0.5
for attempt in range(3):
forkchoice_response = sync_engine_rpc.forkchoice_updated(
forkchoice_state=ForkchoiceState(
head_block_hash=fixture.genesis.block_hash,
),
payload_attributes=None,
version=fixture.payloads[0].forkchoice_updated_version,
)
status = forkchoice_response.payload_status.status
logger.info(f"Sync client forkchoice update response attempt {attempt + 1}: {status}")
if status != PayloadStatusEnum.SYNCING:
break
if attempt < 2:
time.sleep(delay)
delay *= 2
if forkchoice_response.payload_status.status != PayloadStatusEnum.VALID:
logger.error(
f"Sync client failed to initialize properly after 3 attempts, "
f"final status: {forkchoice_response.payload_status.status}"
)
raise LoggedError(
f"Unexpected status on sync client forkchoice updated to genesis: "
f"{forkchoice_response}"
)
# Add peer using admin_addPeer This seems to be required... TODO: we can
# maybe improve flow here if not required
logger.info(f"Adding peer: {client_enode_url}")
assert sync_admin_rpc is not None, "sync_admin_rpc is required"
try:
add_result = sync_admin_rpc.add_peer(client_enode_url)
logger.info(f"admin_addPeer result: {add_result}")
except Exception as e:
raise LoggedError(f"admin_addPeer failed: {e}") from e
# quick sleep to allow for connection - TODO: is this necessary?
time.sleep(1)
try:
sync_peer_count = sync_net_rpc.peer_count()
client_peer_count = net_rpc.peer_count()
logger.info(
f"Peer count: sync_client={sync_peer_count}, client_under_test={client_peer_count}"
)
if sync_peer_count == 0 and client_peer_count == 0:
raise LoggedError("No P2P connection established between clients")
except Exception as e:
logger.warning(f"Could not verify peer connection: {e}")
# Trigger sync by sending the target block via newPayload followed by
# forkchoice update
logger.info(f"Triggering sync to block {last_valid_block_hash}")
# Find the last valid payload to send to sync client
last_valid_payload = None
if fixture.sync_payload and last_valid_block_hash == fixture.sync_payload.params[0].block_hash:
last_valid_payload = fixture.sync_payload
else:
# Find the payload that matches last_valid_block_hash
for payload in fixture.payloads:
if payload.params[0].block_hash == last_valid_block_hash and payload.valid():
last_valid_payload = payload
break
if last_valid_payload:
last_valid_block_forkchoice_state = ForkchoiceState(
head_block_hash=last_valid_block_hash,
safe_block_hash=last_valid_block_hash,
finalized_block_hash=fixture.genesis.block_hash,
)
try:
# log version used for debugging
version = last_valid_payload.new_payload_version
logger.info(f"Sending target payload via engine_newPayloadV{version}")
# send the payload to sync client
assert sync_engine_rpc is not None, "sync_engine_rpc is required"
sync_payload_response = sync_engine_rpc.new_payload(
*last_valid_payload.params,
version=last_valid_payload.new_payload_version,
)
logger.info(f"Sync client newPayload response: {sync_payload_response.status}")
# send forkchoice update pointing to latest block
logger.info("Sending forkchoice update with last valid block to trigger sync...")
sync_forkchoice_response = sync_engine_rpc.forkchoice_updated(
forkchoice_state=last_valid_block_forkchoice_state,
payload_attributes=None,
version=last_valid_payload.forkchoice_updated_version,
)
status = sync_forkchoice_response.payload_status.status
logger.info(f"Sync trigger forkchoice response: {status}")
if sync_forkchoice_response.payload_status.status == PayloadStatusEnum.SYNCING:
logger.info("Sync client is now syncing!")
elif sync_forkchoice_response.payload_status.status == PayloadStatusEnum.ACCEPTED:
logger.info("Sync client accepted the block, may start syncing ancestors")
# Give a moment for P2P connections to establish after sync starts
time.sleep(1)
# Check peer count after triggering sync Note: Reth does not
# actually raise the peer count but doesn't seem to need this to
# sync.
try:
assert sync_net_rpc is not None, "sync_net_rpc is required"
client_peer_count = net_rpc.peer_count()
sync_peer_count = sync_net_rpc.peer_count()
if sync_peer_count > 0 or client_peer_count > 0:
logger.debug(
f"Peers connected: client_under_test={client_peer_count}, "
f"sync_client={sync_peer_count}"
)
except Exception as e:
logger.debug(f"Could not check peer count: {e}")
except Exception as e:
logger.warning(f"Failed to trigger sync with newPayload/forkchoice update: {e}")
else:
logger.warning(
f"Could not find payload for block {last_valid_block_hash} to send to sync client"
)
# Wait for synchronization with continuous forkchoice updates
with timing_data.time("Wait for synchronization"):
# Get the target block number for logging
target_block = eth_rpc.get_block_by_hash(last_valid_block_hash)
target_block_number = int(target_block["number"], 16) if target_block else "unknown"
logger.info(
f"Waiting for sync client to reach block #{target_block_number} "
f"(hash: {last_valid_block_hash})"
)
# Start monitoring sync progress
sync_start_time = time.time()
last_forkchoice_time = time.time()
forkchoice_interval = 2.0 # Send forkchoice updates every 2 seconds
while time.time() - sync_start_time < 15: # 15 second timeout
# Send periodic forkchoice updates to keep sync alive
if time.time() - last_forkchoice_time >= forkchoice_interval:
try:
# Send forkchoice update to sync client to trigger/maintain
# sync
assert sync_engine_rpc is not None, "sync_engine_rpc is required"
sync_fc_response = sync_engine_rpc.forkchoice_updated(
forkchoice_state=last_valid_block_forkchoice_state,
payload_attributes=None,
version=fixture.sync_payload.forkchoice_updated_version
if fixture.sync_payload
else fixture.payloads[-1].forkchoice_updated_version,
)
status = sync_fc_response.payload_status.status
logger.debug(f"Periodic forkchoice update status: {status}")
if status.VALID:
break
last_forkchoice_time = time.time()
except Exception as fc_err:
logger.debug(f"Periodic forkchoice update failed: {fc_err}")
time.sleep(0.5)
else:
raise LoggedError(
f"Sync client failed to synchronize to block {last_valid_block_hash} "
f"within timeout"
)
logger.info("Sync verification successful!")
# Verify the final state but give a few tries
assert eth_rpc is not None, "eth_rpc is required"
assert sync_eth_rpc is not None, "sync_eth_rpc is required"
for attempt in range(5):
try:
sync_block = sync_eth_rpc.get_block_by_hash(last_valid_block_hash)
client_block = eth_rpc.get_block_by_hash(last_valid_block_hash)
if sync_block is None or client_block is None:
raise LoggedError(
f"Failed to retrieve block {last_valid_block_hash} "
f"on attempt {attempt + 1}"
)
if sync_block["stateRoot"] != client_block["stateRoot"]:
raise LoggedError(
f"State root mismatch after sync. "
f"Sync client: {sync_block['stateRoot']}, "
f"Client under test: {client_block['stateRoot']}"
)
if fixture.post_state_hash:
if sync_block["stateRoot"] != str(fixture.post_state_hash):
raise LoggedError(
f"Final state root mismatch. "
f"Expected: {fixture.post_state_hash}, "
f"Got: {sync_block['stateRoot']}"
)
break
except Exception as e:
if attempt < 4:
time.sleep(1)
continue
raise e
logger.info("Sync test completed successfully!")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/simulator_logic/__init__.py | src/pytest_plugins/consume/simulators/simulator_logic/__init__.py | """Defines the Pytest test functions used by Hive Consume Simulators."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/simulator_logic/test_via_engine.py | src/pytest_plugins/consume/simulators/simulator_logic/test_via_engine.py | """
A hive based simulator that executes blocks against clients using the
`engine_newPayloadVX` method from the Engine API. The simulator uses the
`BlockchainEngineFixtures` to test against clients.
Each `engine_newPayloadVX` is verified against the appropriate VALID/INVALID
responses.
"""
import time
from ethereum_test_exceptions import UndefinedException
from ethereum_test_fixtures import BlockchainEngineFixture
from ethereum_test_rpc import EngineRPC, EthRPC
from ethereum_test_rpc.rpc_types import ForkchoiceState, JSONRPCError, PayloadStatusEnum
from ....custom_logging import get_logger
from ..helpers.exceptions import GenesisBlockMismatchExceptionError
from ..helpers.timing import TimingData
logger = get_logger(__name__)
MAX_RETRIES = 30
DELAY_BETWEEN_RETRIES_IN_SEC = 1
class LoggedError(Exception):
"""Exception that uses the logger to log the failure."""
def __init__(self, *args: object) -> None:
"""Initialize the exception and log the failure."""
super().__init__(*args)
logger.fail(str(self))
def test_blockchain_via_engine(
timing_data: TimingData,
eth_rpc: EthRPC,
engine_rpc: EngineRPC,
fixture: BlockchainEngineFixture,
strict_exception_matching: bool,
) -> None:
"""
1. Check the client genesis block hash matches
`fixture.genesis.block_hash`.
2. Execute the test case fixture blocks against the client under test using
the `engine_newPayloadVX` method from the Engine API.
3. For valid payloads a forkchoice update is performed to finalize the
chain.
"""
# Send a initial forkchoice update
with timing_data.time("Initial forkchoice update"):
logger.info("Sending initial forkchoice update to genesis block...")
for attempt in range(1, MAX_RETRIES + 1):
forkchoice_response = engine_rpc.forkchoice_updated(
forkchoice_state=ForkchoiceState(
head_block_hash=fixture.genesis.block_hash,
),
payload_attributes=None,
version=fixture.payloads[0].forkchoice_updated_version,
)
status = forkchoice_response.payload_status.status
logger.info(f"Initial forkchoice update response attempt {attempt}: {status}")
if status != PayloadStatusEnum.SYNCING:
break
if attempt < MAX_RETRIES:
time.sleep(DELAY_BETWEEN_RETRIES_IN_SEC)
if forkchoice_response.payload_status.status != PayloadStatusEnum.VALID:
logger.error(
f"Client failed to initialize properly after {MAX_RETRIES} attempts, "
f"final status: {forkchoice_response.payload_status.status}"
)
raise LoggedError(
f"unexpected status on forkchoice updated to genesis: {forkchoice_response}"
)
with timing_data.time("Get genesis block"):
logger.info("Calling getBlockByNumber to get genesis block...")
genesis_block = eth_rpc.get_block_by_number(0)
assert genesis_block is not None, "genesis_block is None"
if genesis_block["hash"] != str(fixture.genesis.block_hash):
expected = fixture.genesis.block_hash
got = genesis_block["hash"]
logger.fail(f"Genesis block hash mismatch. Expected: {expected}, Got: {got}")
raise GenesisBlockMismatchExceptionError(
expected_header=fixture.genesis,
got_genesis_block=genesis_block,
)
with timing_data.time("Payloads execution") as total_payload_timing:
logger.info(f"Starting execution of {len(fixture.payloads)} payloads...")
for i, payload in enumerate(fixture.payloads):
logger.info(f"Processing payload {i + 1}/{len(fixture.payloads)}...")
with total_payload_timing.time(f"Payload {i + 1}") as payload_timing:
with payload_timing.time(f"engine_newPayloadV{payload.new_payload_version}"):
logger.info(f"Sending engine_newPayloadV{payload.new_payload_version}...")
try:
payload_response = engine_rpc.new_payload(
*payload.params,
version=payload.new_payload_version,
)
logger.info(f"Payload response status: {payload_response.status}")
expected_validity = (
PayloadStatusEnum.VALID
if payload.valid()
else PayloadStatusEnum.INVALID
)
if payload_response.status != expected_validity:
raise LoggedError(
f"unexpected status: want {expected_validity},"
f" got {payload_response.status}"
)
if payload.error_code is not None:
raise LoggedError(
f"Client failed to raise expected Engine API error code: "
f"{payload.error_code}"
)
elif payload_response.status == PayloadStatusEnum.INVALID:
if payload_response.validation_error is None:
raise LoggedError(
"Client returned INVALID but no validation error was provided."
)
if isinstance(payload_response.validation_error, UndefinedException):
message = (
"Undefined exception message: "
f'expected exception: "{payload.validation_error}", '
f'returned exception: "{payload_response.validation_error}" '
f'(mapper: "{payload_response.validation_error.mapper_name}")'
)
if strict_exception_matching:
raise LoggedError(message)
else:
logger.warning(message)
else:
if (
payload.validation_error
not in payload_response.validation_error
):
message = (
"Client returned unexpected validation error: "
f'got: "{payload_response.validation_error}" '
f'expected: "{payload.validation_error}"'
)
if strict_exception_matching:
raise LoggedError(message)
else:
logger.warning(message)
except JSONRPCError as e:
logger.info(f"JSONRPC error encountered: {e.code} - {e.message}")
if payload.error_code is None:
raise LoggedError(f"Unexpected error: {e.code} - {e.message}") from e
if e.code != payload.error_code:
raise LoggedError(
f"Unexpected error code: {e.code}, expected: {payload.error_code}"
) from e
if payload.valid():
with payload_timing.time(
f"engine_forkchoiceUpdatedV{payload.forkchoice_updated_version}"
):
# Send a forkchoice update to the engine
version = payload.forkchoice_updated_version
logger.info(f"Sending engine_forkchoiceUpdatedV{version}...")
forkchoice_response = engine_rpc.forkchoice_updated(
forkchoice_state=ForkchoiceState(
head_block_hash=payload.params[0].block_hash,
),
payload_attributes=None,
version=payload.forkchoice_updated_version,
)
status = forkchoice_response.payload_status.status
logger.info(f"Forkchoice update response: {status}")
if forkchoice_response.payload_status.status != PayloadStatusEnum.VALID:
raise LoggedError(
f"unexpected status: want {PayloadStatusEnum.VALID},"
f" got {forkchoice_response.payload_status.status}"
)
logger.info("All payloads processed successfully.")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/sync/conftest.py | src/pytest_plugins/consume/simulators/sync/conftest.py | """
Pytest fixtures for the `consume sync` simulator.
Configures the hive back-end & EL clients for each individual test execution.
"""
import io
import json
from typing import Dict, Generator, Mapping, cast
import pytest
from hive.client import Client, ClientType
from hive.testing import HiveTest
from ethereum_test_base_types import to_json
from ethereum_test_exceptions import ExceptionMapper
from ethereum_test_fixtures import BlockchainEngineSyncFixture
from ethereum_test_rpc import AdminRPC, EngineRPC, EthRPC, NetRPC
pytest_plugins = (
"pytest_plugins.pytest_hive.pytest_hive",
"pytest_plugins.consume.simulators.base",
"pytest_plugins.consume.simulators.single_test_client",
"pytest_plugins.consume.simulators.test_case_description",
"pytest_plugins.consume.simulators.timing_data",
"pytest_plugins.consume.simulators.exceptions",
)
def pytest_configure(config: pytest.Config) -> None:
"""Set the supported fixture formats for the engine sync simulator."""
config.supported_fixture_formats = [BlockchainEngineSyncFixture] # type: ignore[attr-defined]
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Parametrize sync_client_type separately from client_type."""
if "sync_client_type" in metafunc.fixturenames:
client_ids = [f"sync_{client.name}" for client in metafunc.config.hive_execution_clients] # type: ignore[attr-defined]
metafunc.parametrize(
"sync_client_type",
metafunc.config.hive_execution_clients, # type: ignore[attr-defined]
ids=client_ids,
)
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(
session: pytest.Session, config: pytest.Config, items: list[pytest.Item]
) -> None:
"""Modify test IDs to show both client and sync client clearly."""
del session, config
for item in items:
# Auto-mark all verify_sync tests as flaky with 3 reruns
if item.get_closest_marker("blockchain_test_sync"):
item.add_marker(pytest.mark.flaky(reruns=3))
# Check if this test has both client_type and sync_client_type
if (
hasattr(item, "callspec")
and "client_type" in item.callspec.params
and "sync_client_type" in item.callspec.params
):
# Get the client names and remove fork suffix if present
client_name = item.callspec.params["client_type"].name.replace("-", "_")
sync_client_name = item.callspec.params["sync_client_type"].name.replace("-", "_")
# Format: ``-{client}_sync_{sync_client}``
new_suffix = f"-{client_name}::sync_{sync_client_name}"
# client_param-
# tests/path/to/test.py::test_name[test_params]-sync_client_param
# 1. Remove the client prefix from the beginning
# 2. Replace the -client_param part at the end with our new format
nodeid = item.nodeid
prefix_index = item.nodeid.find("-tests/")
if prefix_index != -1:
nodeid = item.nodeid[prefix_index + 1 :]
# Find the last hyphen followed by client name pattern and replace
if "-" in nodeid:
# Split by the last hyphen to separate the client suffix
parts = nodeid.rsplit("]-", 1)
assert len(parts) == 2, (
# expect "..._end_of_test]-client_name" suffix...
f"Unexpected format to parse client name: {nodeid}"
)
base = parts[0]
if base.endswith("sync_test"):
# Insert suffix before the closing bracket
base = base + new_suffix + "]"
item._nodeid = base
else:
item._nodeid = base + new_suffix
@pytest.fixture(scope="function")
def engine_rpc(client: Client, client_exception_mapper: ExceptionMapper | None) -> EngineRPC:
"""Initialize engine RPC client for the execution client under test."""
if client_exception_mapper:
return EngineRPC(
f"http://{client.ip}:8551",
response_validation_context={
"exception_mapper": client_exception_mapper,
},
)
return EngineRPC(f"http://{client.ip}:8551")
@pytest.fixture(scope="function")
def eth_rpc(client: Client) -> EthRPC:
"""Initialize eth RPC client for the execution client under test."""
return EthRPC(f"http://{client.ip}:8545")
@pytest.fixture(scope="function")
def net_rpc(client: Client) -> NetRPC:
"""Initialize net RPC client for the execution client under test."""
return NetRPC(f"http://{client.ip}:8545")
@pytest.fixture(scope="function")
def admin_rpc(client: Client) -> AdminRPC:
"""Initialize admin RPC client for the execution client under test."""
return AdminRPC(f"http://{client.ip}:8545")
@pytest.fixture(scope="function")
def sync_genesis(fixture: BlockchainEngineSyncFixture) -> Dict:
"""
Convert the fixture genesis block header and pre-state to a sync client
genesis state.
"""
genesis = to_json(fixture.genesis)
alloc = to_json(fixture.pre)
# NOTE: nethermind requires account keys without '0x' prefix
genesis["alloc"] = {k.replace("0x", ""): v for k, v in alloc.items()}
return genesis
@pytest.fixture(scope="function")
def sync_buffered_genesis(sync_genesis: Dict) -> io.BufferedReader:
"""
Create a buffered reader for the genesis block header of the sync client.
"""
genesis_json = json.dumps(sync_genesis)
genesis_bytes = genesis_json.encode("utf-8")
return io.BufferedReader(cast(io.RawIOBase, io.BytesIO(genesis_bytes)))
@pytest.fixture(scope="function")
def sync_client_files(sync_buffered_genesis: io.BufferedReader) -> Mapping[str, io.BufferedReader]:
"""Define the files that hive will start the sync client with."""
files = {}
files["/genesis.json"] = sync_buffered_genesis
return files
@pytest.fixture(scope="function")
def client_enode_url(client: Client) -> str:
"""Get the enode URL from the client under test."""
import logging
logger = logging.getLogger(__name__)
enode = client.enode()
logger.info(f"Client enode object: {enode}")
# Build the enode URL string with container IP
enode_url = f"enode://{enode.id}@{client.ip}:{enode.port}"
logger.info(f"Client enode URL: {enode_url}")
return enode_url
@pytest.fixture(scope="function")
def sync_client(
hive_test: HiveTest,
sync_client_files: Dict,
environment: Dict,
sync_client_type: ClientType, # Separate parametrization for sync client
client_enode_url: str, # Get the enode URL from fixture
) -> Generator[Client, None, None]:
"""Start a sync client that will sync from the client under test."""
import logging
logger = logging.getLogger(__name__)
logger.info(f"Starting sync client setup for {sync_client_type.name}")
# Start with the same environment as the main client
sync_environment = environment.copy()
# Only override what's necessary for sync client
sync_environment["HIVE_MINER"] = "" # Disable mining on sync client
# Set bootnode even though we also use admin_addPeer
# Some clients use this for initial P2P configuration
sync_environment["HIVE_BOOTNODE"] = client_enode_url
# Ensure both network and chain IDs are properly set
if "HIVE_NETWORK_ID" not in sync_environment and "HIVE_CHAIN_ID" in sync_environment:
# Some clients need explicit HIVE_NETWORK_ID
sync_environment["HIVE_NETWORK_ID"] = sync_environment["HIVE_CHAIN_ID"]
logger.info(f"Starting sync client ({sync_client_type.name})")
logger.info(f" Network ID: {sync_environment.get('HIVE_NETWORK_ID', 'NOT SET!')}")
logger.info(f" Chain ID: {sync_environment.get('HIVE_CHAIN_ID', 'NOT SET!')}")
# Debug: log all HIVE_ variables
hive_vars = {k: v for k, v in sync_environment.items() if k.startswith("HIVE_")}
logger.debug(f"All HIVE_ environment variables: {hive_vars}")
# Use the separately parametrized sync client type
sync_client = hive_test.start_client(
client_type=sync_client_type,
environment=sync_environment,
files=sync_client_files,
)
error_message = (
f"Unable to start sync client ({sync_client_type.name}) via Hive. "
"Check the client or Hive server logs for more information."
)
assert sync_client is not None, error_message
logger.info(f"Sync client ({sync_client_type.name}) started with IP: {sync_client.ip}")
yield sync_client
# Cleanup
sync_client.stop()
@pytest.fixture(scope="function")
def sync_client_exception_mapper(
sync_client_type: ClientType, client_exception_mapper_cache: Dict[str, ExceptionMapper | None]
) -> ExceptionMapper | None:
"""Return the exception mapper for the sync client type, with caching."""
if sync_client_type.name not in client_exception_mapper_cache:
from ..exceptions import EXCEPTION_MAPPERS
for client in EXCEPTION_MAPPERS:
if client in sync_client_type.name:
client_exception_mapper_cache[sync_client_type.name] = EXCEPTION_MAPPERS[client]
break
else:
client_exception_mapper_cache[sync_client_type.name] = None
return client_exception_mapper_cache[sync_client_type.name]
@pytest.fixture(scope="function")
def sync_engine_rpc(
sync_client: Client, sync_client_exception_mapper: ExceptionMapper | None
) -> EngineRPC:
"""Initialize engine RPC client for the sync client."""
if sync_client_exception_mapper:
return EngineRPC(
f"http://{sync_client.ip}:8551",
response_validation_context={
"exception_mapper": sync_client_exception_mapper,
},
)
return EngineRPC(f"http://{sync_client.ip}:8551")
@pytest.fixture(scope="function")
def sync_eth_rpc(sync_client: Client) -> EthRPC:
"""Initialize eth RPC client for the sync client."""
return EthRPC(f"http://{sync_client.ip}:8545")
@pytest.fixture(scope="function")
def sync_net_rpc(sync_client: Client) -> NetRPC:
"""Initialize net RPC client for the sync client."""
return NetRPC(f"http://{sync_client.ip}:8545")
@pytest.fixture(scope="function")
def sync_admin_rpc(sync_client: Client) -> AdminRPC:
"""Initialize admin RPC client for the sync client."""
return AdminRPC(f"http://{sync_client.ip}:8545")
@pytest.fixture(scope="module")
def test_suite_name() -> str:
"""The name of the hive test suite used in this simulator."""
return "eest/consume-sync"
@pytest.fixture(scope="module")
def test_suite_description() -> str:
"""The description of the hive test suite used in this simulator."""
return "Execute blockchain sync tests against clients using the Engine API."
@pytest.fixture(scope="function")
def client_files(buffered_genesis: io.BufferedReader) -> Mapping[str, io.BufferedReader]:
"""Define the files that hive will start the client with."""
files = {}
files["/genesis.json"] = buffered_genesis
return files
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/simulators/sync/__init__.py | src/pytest_plugins/consume/simulators/sync/__init__.py | """Consume Sync test functions."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/tests/test_consume_args.py | src/pytest_plugins/consume/tests/test_consume_args.py | """Test the consume plugins with various cli arguments."""
import re
import shutil
from pathlib import Path
from typing import List
import pytest
from filelock import FileLock
from pytest import Pytester, TempPathFactory
from ethereum_clis import TransitionTool
MINIMAL_TEST_FILE_NAME = "test_example.py"
MINIMAL_TEST_CONTENTS = """
from ethereum_test_tools import Transaction
def test_function(state_test, pre):
tx = Transaction(to=0, gas_limit=21_000, sender=pre.fund_eoa())
state_test(pre=pre, post={}, tx=tx)
"""
@pytest.fixture
def minimal_test_path(pytester: pytest.Pytester) -> Path:
"""
Minimal test file that's written to a file using pytester and ready to
fill.
"""
tests_dir = pytester.mkdir("tests")
test_file = tests_dir / MINIMAL_TEST_FILE_NAME
test_file.write_text(MINIMAL_TEST_CONTENTS)
return test_file
@pytest.fixture(scope="module")
def consume_test_case_ids() -> list[str]:
"""Hard-coded expected output of `consume direct --collectonly -q`."""
return [
f"src/pytest_plugins/consume/direct/test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/{MINIMAL_TEST_FILE_NAME}::test_function[fork_Cancun-blockchain_test_from_state_test]]",
f"src/pytest_plugins/consume/direct/test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/{MINIMAL_TEST_FILE_NAME}::test_function[fork_Paris-blockchain_test_from_state_test]]",
f"src/pytest_plugins/consume/direct/test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/{MINIMAL_TEST_FILE_NAME}::test_function[fork_Shanghai-blockchain_test_from_state_test]]",
f"src/pytest_plugins/consume/direct/test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/{MINIMAL_TEST_FILE_NAME}::test_function[fork_Cancun-state_test]]",
f"src/pytest_plugins/consume/direct/test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/{MINIMAL_TEST_FILE_NAME}::test_function[fork_Paris-state_test]]",
f"src/pytest_plugins/consume/direct/test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/{MINIMAL_TEST_FILE_NAME}::test_function[fork_Shanghai-state_test]]",
]
@pytest.fixture(scope="module")
def fill_fork_from() -> str:
"""Specify the value for `fill`'s `--from` argument."""
return "Paris"
@pytest.fixture(scope="module")
def fill_fork_until() -> str:
"""Specify the value for `fill`'s `--until` argument."""
return "Cancun"
@pytest.fixture(scope="module")
def fixtures_dir(tmp_path_factory: TempPathFactory) -> Path:
"""Define the temporary test fixture directory for fill output."""
return tmp_path_factory.mktemp("fixtures")
@pytest.fixture(autouse=True)
def fill_tests(
pytester: Pytester,
fixtures_dir: Path,
fill_fork_from: str,
fill_fork_until: str,
minimal_test_path: Path,
default_t8n: TransitionTool,
) -> None:
"""
Run fill to generate test fixtures for use with testing consume.
We only need to do this once so ideally the scope of this fixture should be
"module", however the `pytester` fixture's scope is function and cannot be
accessed from a higher scope fixture.
Instead we use a file lock and only write the fixtures once to the
directory.
"""
with FileLock(fixtures_dir.with_suffix(".lock")):
meta_folder = fixtures_dir / ".meta"
if not meta_folder.exists():
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-fill.ini")
args = [
"-c",
"pytest-fill.ini",
"-m",
"not blockchain_test_engine",
f"--from={fill_fork_from}",
f"--until={fill_fork_until}",
f"--output={str(fixtures_dir)}",
f"--t8n-server-url={default_t8n.server_url}",
str(minimal_test_path),
]
fill_result = pytester.runpytest(*args)
assert fill_result.ret == 0, f"Fill command failed:\n{str(fill_result.stdout)}"
@pytest.fixture(autouse=True, scope="function")
def test_fixtures(pytester: Pytester, fixtures_dir: Path, fill_tests: None) -> List[Path]:
"""
Copy test fixtures from the regular temp path to the pytester temporary
dir.
We intentionally copy the `.meta/index.json` file to test its compatibility
with consume.
"""
del fill_tests
test_fixtures = []
for json_file in fixtures_dir.rglob("*.json"):
target_dir = Path(pytester.path) / json_file.parent
if not target_dir.exists():
target_dir.mkdir(parents=True)
pytester.copy_example(name=json_file.as_posix())
shutil.move(json_file.name, target_dir / json_file.name)
if ".meta" not in str(json_file):
test_fixtures.append(json_file)
return test_fixtures
@pytest.fixture(autouse=True)
def copy_consume_test_paths(pytester: Pytester) -> None:
"""Specify and copy the consume test paths to the testdir."""
local_test_paths = [Path("src/pytest_plugins/consume/direct/test_via_direct.py")]
for test_path in local_test_paths:
target_dir = Path(pytester.path) / test_path.parent
target_dir.mkdir(parents=True, exist_ok=True)
pytester.copy_example(name=str(test_path))
pytester.copy_example(name=str(test_path.parent / "conftest.py"))
shutil.move(test_path.name, target_dir / test_path.name)
shutil.move("conftest.py", target_dir / "conftest.py")
single_test_id = (
"src/pytest_plugins/consume/direct/"
"test_via_direct.py::test_fixture[CollectOnlyFixtureConsumer-tests/"
f"{MINIMAL_TEST_FILE_NAME}::test_function[fork_Shanghai-state_test]]"
)
@pytest.mark.parametrize(
"extra_args, expected_filter_pattern",
[
pytest.param(
["--collect-only", "-q"],
re.compile(r".*"),
id="no_extra_args",
),
pytest.param(
["--collect-only", "-q", "--sim.limit", ".*fork_Cancun.*"],
re.compile(".*Cancun.*"),
id="sim_limit_regex",
),
pytest.param(
["--sim.limit", "collectonly:.*fork_Cancun.*"],
re.compile(".*Cancun.*"),
id="sim_limit_collect_only_regex",
),
pytest.param(
[
"--collect-only",
"-q",
"--sim.limit",
f"id:{single_test_id}",
],
re.compile(re.escape(f"{single_test_id}")),
id="sim_limit_id",
),
pytest.param(
[
"--sim.limit",
f"collectonly:id:{single_test_id}",
],
re.compile(
re.compile(re.escape(f"{single_test_id}")),
),
id="sim_limit_collect_only_id",
),
],
)
def test_consume_simlimit_collectonly(
pytester: Pytester,
fixtures_dir: Path,
consume_test_case_ids: List[str],
extra_args: List[str],
expected_filter_pattern: re.Pattern,
) -> None:
"""Test consume's --sim.limit argument in collect-only mode."""
pytester.copy_example(name="src/cli/pytest_commands/pytest_ini_files/pytest-consume.ini")
consume_test_path = "src/pytest_plugins/consume/direct/test_via_direct.py"
args = [
"-c",
"pytest-consume.ini",
"--input",
str(fixtures_dir),
consume_test_path,
*extra_args,
]
result = pytester.runpytest(*args)
assert result.ret == 0
stdout_lines = str(result.stdout).splitlines()
test_id_pattern = r"^(?:\s*)([^:\s]+\.py::[^:\s]+(?:::[^:\s]+)?)(?:\[[^\]]*\])?(?:\s*)$"
collected_test_ids = [
line for line in stdout_lines if line.strip() and re.match(test_id_pattern, line)
]
expected_collected_test_ids = [
line for line in consume_test_case_ids if expected_filter_pattern.search(line)
]
assert set(collected_test_ids) == set(expected_collected_test_ids)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/tests/test_releases.py | src/pytest_plugins/consume/tests/test_releases.py | """Test release parsing given the github repository release JSON data."""
from os.path import realpath
from pathlib import Path
from typing import List
import pytest
from ..releases import (
ReleaseInformation,
get_release_url_from_release_information,
parse_release_information_from_file,
)
CURRENT_FILE = Path(realpath(__file__))
CURRENT_FOLDER = CURRENT_FILE.parent
@pytest.fixture(scope="session")
def release_information() -> List[ReleaseInformation]:
"""Return the release information from a file."""
return parse_release_information_from_file(CURRENT_FOLDER / "release_information.json")
@pytest.mark.parametrize(
"release_name,expected_release_download_url",
[
(
"pectra-devnet-5",
"pectra-devnet-5%40v1.0.0/fixtures_pectra-devnet-5.tar.gz",
),
(
"pectra-devnet-4@v1.0.0",
"pectra-devnet-4%40v1.0.0/fixtures_pectra-devnet-4.tar.gz",
),
(
"stable",
"v3.0.0/fixtures_stable.tar.gz",
),
(
"develop",
"v3.0.0/fixtures_develop.tar.gz",
),
(
"eip7692-prague",
"eip7692%40v1.1.1/fixtures_eip7692-prague.tar.gz",
),
],
)
def test_release_parsing(
release_name: str,
expected_release_download_url: str,
release_information: List[ReleaseInformation],
) -> None:
"""Test release parsing."""
assert (
"https://github.com/ethereum/execution-spec-tests/releases/download/"
+ expected_release_download_url
) == get_release_url_from_release_information(release_name, release_information)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/tests/__init__.py | src/pytest_plugins/consume/tests/__init__.py | """Tests for the consume plugin."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/tests/test_fixtures_source_input_types.py | src/pytest_plugins/consume/tests/test_fixtures_source_input_types.py | """Test the simplified consume behavior for different input types."""
from pathlib import Path
from unittest.mock import MagicMock, patch
from ..consume import CACHED_DOWNLOADS_DIRECTORY, FixturesSource
class TestSimplifiedConsumeBehavior:
"""Test suite for the simplified consume behavior."""
def test_fixtures_source_from_release_url_no_api_calls(self) -> None:
"""
Test that direct release URLs do not make API calls for release page.
"""
test_url = "https://github.com/ethereum/execution-spec-tests/releases/download/v3.0.0/fixtures_develop.tar.gz"
with patch("pytest_plugins.consume.consume.FixtureDownloader") as mock_downloader:
mock_instance = MagicMock()
mock_instance.download_and_extract.return_value = (False, Path("/tmp/test"))
mock_downloader.return_value = mock_instance
source = FixturesSource.from_release_url(test_url)
# Verify no release page is set for direct URLs
assert source.release_page == ""
assert source.url == test_url
assert source.input_option == test_url
def test_fixtures_source_from_release_spec_makes_api_calls(self) -> None:
"""
Test that release specs still make API calls and get release page.
"""
test_spec = "stable@latest"
with patch("pytest_plugins.consume.consume.get_release_url") as mock_get_url:
mock_get_url.return_value = "https://github.com/ethereum/execution-spec-tests/releases/download/v3.0.0/fixtures_stable.tar.gz"
with patch("pytest_plugins.consume.consume.get_release_page_url") as mock_get_page:
mock_get_page.return_value = (
"https://github.com/ethereum/execution-spec-tests/releases/tag/v3.0.0"
)
with patch("pytest_plugins.consume.consume.FixtureDownloader") as mock_downloader:
mock_instance = MagicMock()
mock_instance.download_and_extract.return_value = (False, Path("/tmp/test"))
mock_downloader.return_value = mock_instance
source = FixturesSource.from_release_spec(test_spec)
# Verify API calls were made and release page is set
mock_get_url.assert_called_once_with(test_spec)
mock_get_page.assert_called_once_with(
"https://github.com/ethereum/execution-spec-tests/releases/download/v3.0.0/fixtures_stable.tar.gz"
)
assert (
source.release_page
== "https://github.com/ethereum/execution-spec-tests/releases/tag/v3.0.0"
)
def test_fixtures_source_from_regular_url_no_release_page(self) -> None:
"""Test that regular URLs (non-GitHub) don't have release page."""
test_url = "http://example.com/fixtures.tar.gz"
with patch("pytest_plugins.consume.consume.FixtureDownloader") as mock_downloader:
mock_instance = MagicMock()
mock_instance.download_and_extract.return_value = (False, Path("/tmp/test"))
mock_downloader.return_value = mock_instance
source = FixturesSource.from_url(test_url)
# Verify no release page for regular URLs
assert source.release_page == ""
assert source.url == test_url
def test_output_formatting_without_release_page_for_direct_urls(self) -> None:
"""
Test output formatting when release page is empty for direct URLs.
"""
from unittest.mock import MagicMock
from pytest import Config
config = MagicMock(spec=Config)
config.fixtures_source = MagicMock()
config.fixtures_source.was_cached = False
config.fixtures_source.is_local = False
config.fixtures_source.path = Path("/tmp/test")
config.fixtures_source.url = "https://github.com/ethereum/execution-spec-tests/releases/download/v3.0.0/fixtures_develop.tar.gz"
config.fixtures_source.release_page = "" # Empty for direct URLs
# Simulate the output generation logic from pytest_configure
reason = ""
if config.fixtures_source.was_cached:
reason += "Fixtures already cached."
elif not config.fixtures_source.is_local:
reason += "Fixtures downloaded and cached."
reason += f"\nPath: {config.fixtures_source.path}"
reason += f"\nInput: {config.fixtures_source.url or config.fixtures_source.path}"
if config.fixtures_source.release_page:
reason += f"\nRelease page: {config.fixtures_source.release_page}"
assert "Release page:" not in reason
assert "Path:" in reason
assert "Input:" in reason
def test_output_formatting_with_release_page_for_specs(self) -> None:
"""
Test output formatting when release page is present for release specs.
"""
from unittest.mock import MagicMock
from pytest import Config
config = MagicMock(spec=Config)
config.fixtures_source = MagicMock()
config.fixtures_source.was_cached = False
config.fixtures_source.is_local = False
config.fixtures_source.path = Path("/tmp/test")
config.fixtures_source.url = "https://github.com/ethereum/execution-spec-tests/releases/download/v3.0.0/fixtures_stable.tar.gz"
config.fixtures_source.release_page = (
"https://github.com/ethereum/execution-spec-tests/releases/tag/v3.0.0"
)
# Simulate the output generation logic from pytest_configure
reason = ""
if config.fixtures_source.was_cached:
reason += "Fixtures already cached."
elif not config.fixtures_source.is_local:
reason += "Fixtures downloaded and cached."
reason += f"\nPath: {config.fixtures_source.path}"
reason += f"\nInput: {config.fixtures_source.url or config.fixtures_source.path}"
if config.fixtures_source.release_page:
reason += f"\nRelease page: {config.fixtures_source.release_page}"
assert (
"Release page: https://github.com/ethereum/execution-spec-tests/releases/tag/v3.0.0"
in reason
)
class TestFixturesSourceFromInput:
"""Test the from_input method without no_api_calls parameter."""
def test_from_input_handles_release_url(self) -> None:
"""Test that from_input properly handles release URLs."""
test_url = "https://github.com/ethereum/execution-spec-tests/releases/download/v3.0.0/fixtures_develop.tar.gz"
with patch.object(FixturesSource, "from_release_url") as mock_from_release_url:
mock_from_release_url.return_value = MagicMock()
FixturesSource.from_input(test_url)
mock_from_release_url.assert_called_once_with(
test_url, CACHED_DOWNLOADS_DIRECTORY, None
)
def test_from_input_handles_release_spec(self) -> None:
"""Test that from_input properly handles release specs."""
test_spec = "stable@latest"
with patch.object(FixturesSource, "from_release_spec") as mock_from_release_spec:
mock_from_release_spec.return_value = MagicMock()
FixturesSource.from_input(test_spec)
mock_from_release_spec.assert_called_once_with(
test_spec, CACHED_DOWNLOADS_DIRECTORY, None
)
def test_from_input_handles_regular_url(self) -> None:
"""Test that from_input properly handles regular URLs."""
test_url = "http://example.com/fixtures.tar.gz"
with patch.object(FixturesSource, "from_url") as mock_from_url:
mock_from_url.return_value = MagicMock()
FixturesSource.from_input(test_url)
mock_from_url.assert_called_once_with(test_url, CACHED_DOWNLOADS_DIRECTORY, None)
def test_from_input_handles_extract_to_parameter(self) -> None:
"""Test that from_input properly passes extract_to parameter."""
test_url = "https://github.com/ethereum/execution-spec-tests/releases/download/v3.0.0/fixtures_develop.tar.gz"
extract_to_path = Path("/custom/extract/path")
with patch.object(FixturesSource, "from_release_url") as mock_from_release_url:
mock_from_release_url.return_value = MagicMock()
FixturesSource.from_input(test_url, extract_to=extract_to_path)
mock_from_release_url.assert_called_once_with(
test_url, CACHED_DOWNLOADS_DIRECTORY, extract_to_path
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/hive_simulators_reorg/__init__.py | src/pytest_plugins/consume/hive_simulators_reorg/__init__.py | """Hive simulators reorganization consumer plugin."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/direct/conftest.py | src/pytest_plugins/consume/direct/conftest.py | """
A pytest plugin that configures the consume command to act as a test runner for
"direct" client fixture consumer interfaces.
For example, via go-ethereum's `evm blocktest` or `evm statetest` commands.
"""
import json
import tempfile
import warnings
from pathlib import Path
from typing import Any, Generator
import pytest
from ethereum_clis.ethereum_cli import EthereumCLI
from ethereum_clis.fixture_consumer_tool import FixtureConsumerTool
from ethereum_test_base_types import to_json
from ethereum_test_fixtures import (
BaseFixture,
BlockchainFixture,
EOFFixture,
StateFixture,
)
from ethereum_test_fixtures.consume import TestCaseIndexFile, TestCaseStream
from ethereum_test_fixtures.file import Fixtures
from pytest_plugins.consume.consume import FixturesSource
class CollectOnlyCLI(EthereumCLI):
"""A dummy CLI for use with `--collect-only`."""
def __init__(self) -> None: # noqa: D107
pass
class CollectOnlyFixtureConsumer(
FixtureConsumerTool, CollectOnlyCLI, fixture_formats=list(BaseFixture.formats.values())
):
"""A dummy fixture consumer for use with `--collect-only`."""
def consume_fixture(self, *args: Any, **kwargs: Any) -> None: # noqa: D102
pass
def pytest_addoption(parser: pytest.Parser) -> None: # noqa: D103
consume_group = parser.getgroup(
"consume_direct", "Arguments related to consuming fixtures via a client"
)
consume_group.addoption(
"--bin",
action="append",
dest="fixture_consumer_bin",
type=Path,
default=[],
help=(
"Path to a geth evm executable that provides `blocktest` or `statetest`. "
"Flag can be used multiple times to specify multiple fixture consumer binaries."
),
)
consume_group.addoption(
"--traces",
action="store_true",
dest="consumer_collect_traces",
default=False,
help="Collect traces of the execution information from the fixture consumer tool.",
)
debug_group = parser.getgroup("debug", "Arguments defining debug behavior")
debug_group.addoption(
"--dump-dir",
action="store",
dest="base_dump_dir",
type=Path,
default=None,
help="Path to dump the fixture consumer tool debug output.",
)
def pytest_configure(config: pytest.Config) -> None: # noqa: D103
config.supported_fixture_formats = [StateFixture, BlockchainFixture, EOFFixture] # type: ignore[attr-defined]
fixture_consumers = []
for fixture_consumer_bin_path in config.getoption("fixture_consumer_bin"):
fixture_consumers.append(
FixtureConsumerTool.from_binary_path(
binary_path=Path(fixture_consumer_bin_path),
trace=config.getoption("consumer_collect_traces"),
)
)
if config.option.markers:
return
elif not fixture_consumers and config.option.collectonly:
warnings.warn(
(
"No fixture consumer binaries provided; using a dummy consumer for collect-only; "
"all possible fixture formats will be collected. "
"Specify fixture consumer(s) via `--bin` to see actual collection results."
),
stacklevel=1,
)
fixture_consumers = [CollectOnlyFixtureConsumer()]
elif not fixture_consumers:
pytest.exit(
"No fixture consumer binaries provided; please specify a binary path via `--bin`."
)
config.fixture_consumers = fixture_consumers # type: ignore[attr-defined]
@pytest.fixture(scope="function")
def test_dump_dir(
request: pytest.FixtureRequest, fixture_path: Path, fixture_name: str
) -> Path | None:
"""The directory to write evm debug output to."""
base_dump_dir = request.config.getoption("base_dump_dir")
if not base_dump_dir:
return None
if len(fixture_name) > 142:
# ensure file name is not too long for eCryptFS
fixture_name = fixture_name[:70] + "..." + fixture_name[-70:]
return base_dump_dir / fixture_path.stem / fixture_name.replace("/", "-")
@pytest.fixture
def fixture_path(
test_case: TestCaseIndexFile | TestCaseStream, fixtures_source: FixturesSource
) -> Generator[Path, None, None]:
"""
Path to the current JSON fixture file.
If the fixture source is stdin, the fixture is written to a temporary json
file.
"""
if fixtures_source.is_stdin:
assert isinstance(test_case, TestCaseStream)
temp_dir = tempfile.TemporaryDirectory()
fixture_path = Path(temp_dir.name) / f"{test_case.id.replace('/', '_')}.json"
fixtures = Fixtures({test_case.id: test_case.fixture})
with open(fixture_path, "w") as f:
json.dump(to_json(fixtures), f, indent=4)
yield fixture_path
temp_dir.cleanup()
else:
assert isinstance(test_case, TestCaseIndexFile)
yield fixtures_source.path / test_case.json_path
@pytest.fixture(scope="function")
def fixture_name(test_case: TestCaseIndexFile | TestCaseStream) -> str:
"""Name of the current fixture."""
return test_case.id
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Parametrize test cases for every fixture consumer."""
metafunc.parametrize(
"fixture_consumer",
(
pytest.param(fixture_consumer, id=str(fixture_consumer.__class__.__name__))
for fixture_consumer in metafunc.config.fixture_consumers # type: ignore[attr-defined]
),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/pytest_plugins/consume/direct/__init__.py | src/pytest_plugins/consume/direct/__init__.py | """Consume direct test functions."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.