repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_clis/clis/geth.py | src/ethereum_clis/clis/geth.py | """Go-ethereum Transition tool interface."""
import json
import re
import shlex
import shutil
import subprocess
import textwrap
from functools import cache
from pathlib import Path
from typing import Any, ClassVar, Dict, List, Optional
from ethereum_test_exceptions import (
BlockException,
ExceptionBase,
ExceptionMapper,
TransactionException,
)
from ethereum_test_fixtures import BlockchainFixture, FixtureFormat, StateFixture
from ethereum_test_forks import Fork
from ..ethereum_cli import EthereumCLI
from ..fixture_consumer_tool import FixtureConsumerTool
from ..transition_tool import TransitionTool, dump_files_to_directory
class GethExceptionMapper(ExceptionMapper):
"""Translate between EEST exceptions and error strings returned by Geth."""
mapping_substring: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.SENDER_NOT_EOA: "sender not an eoa",
TransactionException.GAS_ALLOWANCE_EXCEEDED: "gas limit reached",
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS: (
"insufficient funds for gas * price + value"
),
TransactionException.INTRINSIC_GAS_TOO_LOW: "intrinsic gas too low",
TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST: (
"insufficient gas for floor data gas cost"
),
TransactionException.NONCE_IS_MAX: "nonce has max value",
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED: (
"would exceed maximum allowance"
),
TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS: (
"max fee per blob gas less than block blob gas fee"
),
TransactionException.INSUFFICIENT_MAX_FEE_PER_GAS: (
"max fee per gas less than block base fee"
),
TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS: (
"max priority fee per gas higher than max fee per gas"
),
TransactionException.TYPE_3_TX_PRE_FORK: ("transaction type not supported"),
TransactionException.TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH: "has invalid hash version",
# This message is the same as TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED
TransactionException.TYPE_3_TX_BLOB_COUNT_EXCEEDED: "blob transaction has too many blobs",
TransactionException.TYPE_3_TX_ZERO_BLOBS: "blob transaction missing blob hashes",
TransactionException.TYPE_3_TX_WITH_FULL_BLOBS: (
"unexpected blob sidecar in transaction at index"
),
TransactionException.TYPE_3_TX_CONTRACT_CREATION: (
"input string too short for common.Address, decoding into (types.BlobTx).To"
),
TransactionException.TYPE_4_EMPTY_AUTHORIZATION_LIST: (
"EIP-7702 transaction with empty auth list"
),
TransactionException.TYPE_4_TX_CONTRACT_CREATION: (
"input string too short for common.Address, decoding into (types.SetCodeTx).To"
),
TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM: "transaction gas limit too high",
TransactionException.TYPE_4_TX_PRE_FORK: ("transaction type not supported"),
TransactionException.INITCODE_SIZE_EXCEEDED: "max initcode size exceeded",
TransactionException.NONCE_MISMATCH_TOO_LOW: "nonce too low",
TransactionException.NONCE_MISMATCH_TOO_HIGH: "nonce too high",
BlockException.INCORRECT_BLOB_GAS_USED: "blob gas used mismatch",
BlockException.INCORRECT_EXCESS_BLOB_GAS: "invalid excessBlobGas",
BlockException.INVALID_VERSIONED_HASHES: "invalid number of versionedHashes",
BlockException.INVALID_REQUESTS: "invalid requests hash",
BlockException.SYSTEM_CONTRACT_CALL_FAILED: "system call failed to execute:",
BlockException.INVALID_BLOCK_HASH: "blockhash mismatch",
BlockException.RLP_BLOCK_LIMIT_EXCEEDED: "block RLP-encoded size exceeds maximum",
BlockException.INVALID_BAL_EXTRA_ACCOUNT: "BAL change not reported in computed",
BlockException.INVALID_BAL_MISSING_ACCOUNT: "additional mutations compared to BAL",
BlockException.INVALID_BLOCK_ACCESS_LIST: "unequal",
}
mapping_regex: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED: (
r"blob gas used \d+ exceeds maximum allowance \d+"
),
BlockException.BLOB_GAS_USED_ABOVE_LIMIT: (
r"blob gas used \d+ exceeds maximum allowance \d+"
),
BlockException.INVALID_GAS_USED_ABOVE_LIMIT: r"invalid gasUsed: have \d+, gasLimit \d+",
BlockException.INVALID_DEPOSIT_EVENT_LAYOUT: (
r"invalid requests hash|failed to parse deposit logs"
),
# Geth does not validate the sizes or offsets of the deposit
# contract logs. As a workaround we have set
# INVALID_DEPOSIT_EVENT_LAYOUT equal to INVALID_REQUESTS.
#
# Although this is out of spec, it is understood that this
# will not cause an issue so long as the mainnet/testnet
# deposit contracts don't change.
#
# The offsets are checked second and the sizes are checked
# third within the `is_valid_deposit_event_data` function:
# https://eips.ethereum.org/EIPS/eip-6110#block-validity
#
# EELS definition for `is_valid_deposit_event_data`:
# https://github.com/ethereum/execution-specs/blob/5ddb904fa7ba27daeff423e78466744c51e8cb6a/src/ethereum/forks/prague/requests.py#L51
}
class GethEvm(EthereumCLI):
"""go-ethereum `evm` base class."""
default_binary = Path("evm")
detect_binary_pattern = re.compile(r"^evm(.exe)? version\b")
cached_version: Optional[str] = None
trace: bool
def __init__(
self,
binary: Optional[Path] = None,
trace: bool = False,
):
"""Initialize the GethEvm class."""
self.binary = binary if binary else self.default_binary
self.trace = trace
self._info_metadata: Optional[Dict[str, Any]] = {}
def _run_command(self, command: List[str]) -> subprocess.CompletedProcess:
try:
return subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
except subprocess.CalledProcessError as e:
raise Exception("Command failed with non-zero status.") from e
except Exception as e:
raise Exception("Unexpected exception calling evm tool.") from e
def _consume_debug_dump(
self,
command: List[str],
result: subprocess.CompletedProcess,
fixture_path: Path,
debug_output_path: Path,
) -> None:
# our assumption is that each command element is a string
assert all(isinstance(x, str) for x in command), (
f"Not all elements of 'command' list are strings: {command}"
)
assert len(command) > 0
# replace last value with debug fixture path
debug_fixture_path = str(debug_output_path / "fixtures.json")
command[-1] = debug_fixture_path
# ensure that flags with spaces are wrapped in double-quotes
consume_direct_call = " ".join(shlex.quote(arg) for arg in command)
consume_direct_script = textwrap.dedent(
f"""\
#!/bin/bash
{consume_direct_call}
"""
)
dump_files_to_directory(
str(debug_output_path),
{
"consume_direct_args.py": command,
"consume_direct_returncode.txt": result.returncode,
"consume_direct_stdout.txt": result.stdout,
"consume_direct_stderr.txt": result.stderr,
"consume_direct.sh+x": consume_direct_script,
},
)
shutil.copyfile(fixture_path, debug_fixture_path)
@cache # noqa
def help(self, subcommand: str | None = None) -> str:
"""Return the help string, optionally for a subcommand."""
help_command = [str(self.binary)]
if subcommand:
help_command.append(subcommand)
help_command.append("--help")
return self._run_command(help_command).stdout
class GethTransitionTool(GethEvm, TransitionTool):
"""go-ethereum `evm` Transition tool interface wrapper class."""
subcommand: Optional[str] = "t8n"
trace: bool
t8n_use_stream = True
def __init__(
self,
*,
exception_mapper: Optional[ExceptionMapper] = None,
binary: Optional[Path] = None,
trace: bool = False,
):
"""Initialize the GethTransitionTool class."""
if not exception_mapper:
exception_mapper = GethExceptionMapper()
GethEvm.__init__(self, binary=binary, trace=trace)
TransitionTool.__init__(
self, binary=binary, exception_mapper=exception_mapper, trace=trace
)
help_command = [str(self.binary), str(self.subcommand), "--help"]
result = self._run_command(help_command)
self.help_string = result.stdout
def is_fork_supported(self, fork: Fork) -> bool:
"""
Return True if the fork is supported by the tool.
If the fork is a transition fork, we want to check the fork it
transitions to.
"""
return fork.transition_tool_name() in self.help_string
class GethFixtureConsumer(
GethEvm,
FixtureConsumerTool,
fixture_formats=[StateFixture, BlockchainFixture],
):
"""Geth's implementation of the fixture consumer."""
def consume_blockchain_test(
self,
fixture_path: Path,
fixture_name: Optional[str] = None,
debug_output_path: Optional[Path] = None,
) -> None:
"""
Consume a single blockchain test.
The `evm blocktest` command takes the `--run` argument which can be
used to select a specific fixture from the fixture file when executing.
"""
subcommand = "blocktest"
global_options = []
subcommand_options = []
if debug_output_path:
global_options += ["--verbosity", "100"]
subcommand_options += ["--trace"]
if fixture_name:
subcommand_options += ["--run", re.escape(fixture_name)]
command = (
[str(self.binary)]
+ global_options
+ [subcommand]
+ subcommand_options
+ [str(fixture_path)]
)
result = self._run_command(command)
if debug_output_path:
self._consume_debug_dump(command, result, fixture_path, debug_output_path)
if result.returncode != 0:
raise Exception(
f"Unexpected exit code:\n{' '.join(command)}\n\n Error:\n{result.stderr}"
)
result_json = json.loads(result.stdout)
if not isinstance(result_json, list):
raise Exception(f"Unexpected result from evm blocktest: {result_json}")
if any(not test_result["pass"] for test_result in result_json):
exception_text = "Blockchain test failed: \n" + "\n".join(
f"{test_result['name']}: " + test_result["error"]
for test_result in result_json
if not test_result["pass"]
)
raise Exception(exception_text)
@cache # noqa
def consume_state_test_file(
self,
fixture_path: Path,
debug_output_path: Optional[Path] = None,
) -> List[Dict[str, Any]]:
"""
Consume an entire state test file.
The `evm statetest` will always execute all the tests contained in a
file without the possibility of selecting a single test, so this
function is cached in order to only call the command once and
`consume_state_test` can simply select the result that was requested.
"""
subcommand = "statetest"
global_options: List[str] = []
subcommand_options: List[str] = []
if debug_output_path:
global_options += ["--verbosity", "100"]
subcommand_options += ["--trace"]
command = (
[str(self.binary)]
+ global_options
+ [subcommand]
+ subcommand_options
+ [str(fixture_path)]
)
result = self._run_command(command)
if debug_output_path:
self._consume_debug_dump(command, result, fixture_path, debug_output_path)
if result.returncode != 0:
raise Exception(
f"Unexpected exit code:\n{' '.join(command)}\n\n Error:\n{result.stderr}"
)
result_json = json.loads(result.stdout)
if not isinstance(result_json, list):
raise Exception(f"Unexpected result from evm statetest: {result_json}")
return result_json
def consume_state_test(
self,
fixture_path: Path,
fixture_name: Optional[str] = None,
debug_output_path: Optional[Path] = None,
) -> None:
"""
Consume a single state test.
Uses the cached result from `consume_state_test_file` in order to not
call the command every time an select a single result from there.
"""
file_results = self.consume_state_test_file(
fixture_path=fixture_path,
debug_output_path=debug_output_path,
)
if fixture_name:
test_result = [
test_result for test_result in file_results if test_result["name"] == fixture_name
]
assert len(test_result) < 2, f"Multiple test results for {fixture_name}"
assert len(test_result) == 1, f"Test result for {fixture_name} missing"
assert test_result[0]["pass"], f"State test failed: {test_result[0]['error']}"
else:
if any(not test_result["pass"] for test_result in file_results):
exception_text = "State test failed: \n" + "\n".join(
f"{test_result['name']}: " + test_result["error"]
for test_result in file_results
if not test_result["pass"]
)
raise Exception(exception_text)
def consume_fixture(
self,
fixture_format: FixtureFormat,
fixture_path: Path,
fixture_name: Optional[str] = None,
debug_output_path: Optional[Path] = None,
) -> None:
"""
Execute the appropriate geth fixture consumer for the fixture at
`fixture_path`.
"""
if fixture_format == BlockchainFixture:
self.consume_blockchain_test(
fixture_path=fixture_path,
fixture_name=fixture_name,
debug_output_path=debug_output_path,
)
elif fixture_format == StateFixture:
self.consume_state_test(
fixture_path=fixture_path,
fixture_name=fixture_name,
debug_output_path=debug_output_path,
)
else:
raise Exception(
f"Fixture format {fixture_format.format_name} not supported by {self.binary}"
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_clis/clis/execution_specs.py | src/ethereum_clis/clis/execution_specs.py | """
Ethereum Specs EVM Resolver Transition Tool Interface.
https://github.com/petertdavies/ethereum-spec-evm-resolver
"""
import os
import re
import subprocess
import time
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import ClassVar, Dict, List, Optional
from ethereum_test_exceptions import (
BlockException,
ExceptionBase,
ExceptionMapper,
TransactionException,
)
from ethereum_test_forks import Fork
from pytest_plugins.custom_logging import get_logger
from ..transition_tool import TransitionTool
DAEMON_STARTUP_TIMEOUT_SECONDS = 5
logger = get_logger(__name__)
class ExecutionSpecsTransitionTool(TransitionTool):
"""
Ethereum Specs EVM Resolver `ethereum-spec-evm-resolver` Transition Tool
wrapper class.
`ethereum-spec-evm-resolver` is installed by default for
`execution-spec-tests`:
```console
uv run fill --evm-bin=ethereum-spec-evm-resolver
```
To use a specific version of the `ethereum-spec-evm-resolver` tool, update
it to the desired version in `pyproject.toml`.
The `ethereum-spec-evm-resolver` tool essentially wraps around the EELS evm
daemon. It can handle requests for different EVM forks, even when those
forks are implemented by different versions of EELS hosted in different
places.
"""
default_binary = Path("ethereum-spec-evm-resolver")
detect_binary_pattern = re.compile(r"^ethereum-spec-evm-resolver\b")
t8n_use_server: bool = True
server_dir: Optional[TemporaryDirectory] = None
server_url: str | None = None
def __init__(
self,
*,
binary: Optional[Path] = None,
trace: bool = False,
server_url: str | None = None,
):
"""
Initialize the Ethereum Specs EVM Resolver Transition Tool interface.
"""
os.environ.setdefault("NO_PROXY", "*") # Disable proxy for local connections
super().__init__(
exception_mapper=ExecutionSpecsExceptionMapper(), binary=binary, trace=trace
)
args = [str(self.binary), "--help"]
try:
result = subprocess.run(args, capture_output=True, text=True)
except subprocess.CalledProcessError as e:
raise Exception(
"ethereum-spec-evm-resolver process unexpectedly returned a non-zero status code: "
f"{e}."
) from e
except Exception as e:
raise Exception(
f"Unexpected exception calling ethereum-spec-evm-resolver: {e}."
) from e
self.help_string = result.stdout
self.server_url = server_url
def start_server(self) -> None:
"""
Start the t8n-server process, extract the port, and leave it
running for future reuse.
"""
self.server_dir = TemporaryDirectory()
self.server_file_path = Path(self.server_dir.name) / "t8n.sock"
replaced_str = str(self.server_file_path).replace("/", "%2F")
self.server_url = f"http+unix://{replaced_str}/"
self.process = subprocess.Popen(
args=[
str(self.binary),
"daemon",
"--uds",
self.server_file_path,
],
)
start = time.time()
while True:
if self.server_file_path.exists():
break
if time.time() - start > DAEMON_STARTUP_TIMEOUT_SECONDS:
raise Exception("Failed starting ethereum-spec-evm subprocess")
time.sleep(0) # yield to other processes
def shutdown(self) -> None:
"""Stop the t8n-server process if it was started."""
if self.process:
self.process.terminate()
if self.server_dir:
self.server_dir.cleanup()
self.server_dir = None
def is_fork_supported(self, fork: Fork) -> bool:
"""
Return True if the fork is supported by the tool.
If the fork is a transition fork, we want to check the fork it
transitions to.
`ethereum-spec-evm` appends newlines to forks in the help string.
"""
fork_is_supported = (fork.transition_tool_name() + "\n") in self.help_string
logger.debug(f"EELS supports fork {fork}: {fork_is_supported}")
return fork_is_supported
def _generate_post_args(
self, t8n_data: TransitionTool.TransitionToolData
) -> Dict[str, List[str] | str]:
"""
Generate the arguments for the POST request to the t8n-server.
EELS T8N expects `--state-test` when running a state test.
"""
return {"arg": "--state-test"} if t8n_data.state_test else {}
class ExecutionSpecsExceptionMapper(ExceptionMapper):
"""
Translate between EEST exceptions and error strings returned by
ExecutionSpecs.
"""
mapping_substring: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.TYPE_4_EMPTY_AUTHORIZATION_LIST: "EmptyAuthorizationListError",
TransactionException.SENDER_NOT_EOA: "InvalidSenderError",
TransactionException.TYPE_4_TX_CONTRACT_CREATION: (
"TransactionTypeContractCreationError("
"'transaction type `SetCodeTransaction` not allowed to create contracts')"
),
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS: "InsufficientBalanceError",
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED: (
"BlobGasLimitExceededError"
),
TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS: (
"InsufficientMaxFeePerBlobGasError"
),
TransactionException.TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH: (
"InvalidBlobVersionedHashError"
),
# This message is the same as TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED
TransactionException.TYPE_3_TX_BLOB_COUNT_EXCEEDED: "BlobCountExceededError",
TransactionException.TYPE_3_TX_ZERO_BLOBS: "NoBlobDataError",
TransactionException.INTRINSIC_GAS_TOO_LOW: "InsufficientTransactionGasError",
TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST: "InsufficientTransactionGasError",
TransactionException.INITCODE_SIZE_EXCEEDED: "InitCodeTooLargeError",
TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS: (
"PriorityFeeGreaterThanMaxFeeError"
),
TransactionException.NONCE_MISMATCH_TOO_HIGH: "NonceMismatchError('nonce too high')",
TransactionException.NONCE_MISMATCH_TOO_LOW: "NonceMismatchError('nonce too low')",
TransactionException.TYPE_3_TX_CONTRACT_CREATION: (
"TransactionTypeContractCreationError("
"'transaction type `BlobTransaction` not allowed to create contracts')"
),
TransactionException.NONCE_IS_MAX: "NonceOverflowError",
TransactionException.GAS_ALLOWANCE_EXCEEDED: "GasUsedExceedsLimitError",
TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM: "TransactionGasLimitExceededError",
BlockException.SYSTEM_CONTRACT_EMPTY: "System contract address",
BlockException.SYSTEM_CONTRACT_CALL_FAILED: "call failed:",
BlockException.INVALID_DEPOSIT_EVENT_LAYOUT: "deposit",
}
mapping_regex: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.INSUFFICIENT_MAX_FEE_PER_GAS: (
r"InsufficientMaxFeePerGasError|InvalidBlock" # Temporary solution for issue #1981.
),
TransactionException.TYPE_3_TX_PRE_FORK: (
r"module '.*transactions' has no attribute 'BlobTransaction'"
),
TransactionException.TYPE_4_TX_PRE_FORK: (
r"'.*transactions' has no attribute 'SetCodeTransaction'"
),
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_clis/clis/__init__.py | src/ethereum_clis/clis/__init__.py | """Package containing concrete implementations of Ethereum CL interfaces."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_clis/clis/erigon.py | src/ethereum_clis/clis/erigon.py | """Erigon execution client transition tool."""
from ethereum_test_exceptions import BlockException, ExceptionMapper, TransactionException
class ErigonExceptionMapper(ExceptionMapper):
"""Erigon exception mapper."""
mapping_substring = {
TransactionException.SENDER_NOT_EOA: "sender not an eoa",
TransactionException.INITCODE_SIZE_EXCEEDED: "max initcode size exceeded",
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS: (
"insufficient funds for gas * price + value"
),
TransactionException.NONCE_IS_MAX: "nonce has max value",
TransactionException.INTRINSIC_GAS_TOO_LOW: "intrinsic gas too low",
TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST: "intrinsic gas too low",
TransactionException.INSUFFICIENT_MAX_FEE_PER_GAS: "fee cap less than block base fee",
TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS: "tip higher than fee cap",
TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS: "max fee per blob gas too low",
TransactionException.NONCE_MISMATCH_TOO_LOW: "nonce too low",
TransactionException.GAS_ALLOWANCE_EXCEEDED: "gas limit reached",
TransactionException.TYPE_3_TX_PRE_FORK: "blob txn is not supported by signer",
TransactionException.TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH: (
"invalid blob versioned hash, must start with VERSIONED_HASH_VERSION_KZG"
),
TransactionException.TYPE_3_TX_BLOB_COUNT_EXCEEDED: "blob transaction has too many blobs",
TransactionException.TYPE_3_TX_ZERO_BLOBS: "a blob stx must contain at least one blob",
TransactionException.TYPE_3_TX_WITH_FULL_BLOBS: "rlp: expected String or Byte",
TransactionException.TYPE_3_TX_CONTRACT_CREATION: "wrong size for To: 0",
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED: (
"blobs/blobgas exceeds max"
),
TransactionException.TYPE_4_EMPTY_AUTHORIZATION_LIST: (
"SetCodeTransaction without authorizations is invalid"
),
TransactionException.TYPE_4_TX_CONTRACT_CREATION: "wrong size for To: 0",
TransactionException.TYPE_4_TX_PRE_FORK: "setCode tx is not supported by signer",
BlockException.INVALID_DEPOSIT_EVENT_LAYOUT: "could not parse requests logs",
BlockException.SYSTEM_CONTRACT_EMPTY: "Syscall failure: Empty Code at",
BlockException.SYSTEM_CONTRACT_CALL_FAILED: "Unprecedented Syscall failure",
BlockException.INVALID_REQUESTS: "invalid requests root hash in header",
BlockException.INVALID_BLOCK_HASH: "invalid block hash",
BlockException.RLP_BLOCK_LIMIT_EXCEEDED: "block exceeds max rlp size",
}
mapping_regex = {
TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM: (
r"invalid block, txnIdx=\d+,.*gas limit too high"
),
BlockException.INCORRECT_BLOB_GAS_USED: r"blobGasUsed by execution: \d+, in header: \d+",
BlockException.INCORRECT_EXCESS_BLOB_GAS: r"invalid excessBlobGas: have \d+, want \d+",
BlockException.INVALID_GAS_USED: r"gas used by execution: \w+, in header: \w+",
BlockException.INVALID_GAS_USED_ABOVE_LIMIT: r"invalid gasUsed: have \d+, gasLimit \d+",
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_clis/clis/evmone.py | src/ethereum_clis/clis/evmone.py | """Evmone Transition tool interface."""
import json
import re
import shlex
import shutil
import subprocess
import tempfile
import textwrap
from functools import cache
from pathlib import Path
from typing import Any, ClassVar, Dict, List, Optional
import pytest
from ethereum_clis.file_utils import dump_files_to_directory
from ethereum_clis.fixture_consumer_tool import FixtureConsumerTool
from ethereum_test_exceptions import (
EOFException,
ExceptionBase,
ExceptionMapper,
TransactionException,
)
from ethereum_test_fixtures.base import FixtureFormat
from ethereum_test_fixtures.blockchain import BlockchainFixture
from ethereum_test_fixtures.state import StateFixture
from ethereum_test_forks import Fork
from ..transition_tool import TransitionTool
class EvmOneTransitionTool(TransitionTool):
"""Evmone `evmone-t8n` Transition tool interface wrapper class."""
default_binary = Path("evmone-t8n")
detect_binary_pattern = re.compile(r"^evmone-t8n\b")
t8n_use_stream = False
binary: Path
cached_version: Optional[str] = None
trace: bool
supports_opcode_count: ClassVar[bool] = True
supports_blob_params: ClassVar[bool] = True
def __init__(
self,
*,
binary: Optional[Path] = None,
trace: bool = False,
):
"""Initialize the Evmone Transition tool interface."""
super().__init__(exception_mapper=EvmoneExceptionMapper(), binary=binary, trace=trace)
def is_fork_supported(self, fork: Fork) -> bool:
"""
Return True if the fork is supported by the tool. Currently, evmone-t8n
provides no way to determine supported forks.
"""
del fork
return True
class EvmoneFixtureConsumerCommon:
"""Common functionality for Evmone fixture consumers."""
binary: Path
version_flag: str = "--version"
cached_version: Optional[str] = None
def __init__(
self,
trace: bool = False,
):
"""Initialize the EvmoneFixtureConsumerCommon class."""
del trace
self._info_metadata: Optional[Dict[str, Any]] = {}
def _run_command(self, command: List[str]) -> subprocess.CompletedProcess:
try:
return subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
except subprocess.CalledProcessError as e:
raise Exception("Command failed with non-zero status.") from e
except Exception as e:
raise Exception("Unexpected exception calling evm tool.") from e
# TODO: copied from geth.py, needs to be deduplicated, but nethermind.py
# also has its version
def _consume_debug_dump(
self,
command: List[str],
result: subprocess.CompletedProcess,
fixture_path: Path,
debug_output_path: Path,
) -> None:
# our assumption is that each command element is a string
assert all(isinstance(x, str) for x in command), (
f"Not all elements of 'command' list are strings: {command}"
)
assert len(command) > 0
# replace last value with debug fixture path
debug_fixture_path = str(debug_output_path / "fixtures.json")
command[-1] = debug_fixture_path
# ensure that flags with spaces are wrapped in double-quotes
consume_direct_call = " ".join(shlex.quote(arg) for arg in command)
consume_direct_script = textwrap.dedent(
f"""\
#!/bin/bash
{consume_direct_call}
"""
)
dump_files_to_directory(
str(debug_output_path),
{
"consume_direct_args.py": command,
"consume_direct_returncode.txt": result.returncode,
"consume_direct_stdout.txt": result.stdout,
"consume_direct_stderr.txt": result.stderr,
"consume_direct.sh+x": consume_direct_script,
},
)
shutil.copyfile(fixture_path, debug_fixture_path)
def _skip_message(self, fixture_format: FixtureFormat) -> str:
return f"Fixture format {fixture_format.format_name} not supported by {self.binary}"
@cache # noqa
def consume_test_file(
self,
fixture_path: Path,
debug_output_path: Optional[Path] = None,
) -> Dict[str, Any]:
"""
Consume an entire state or blockchain test file.
The `evmone-...test` will always execute all the tests contained in a
file without the possibility of selecting a single test, so this
function is cached in order to only call the command once and
`consume_test` can simply select the result that was requested.
"""
global_options: List[str] = []
if debug_output_path:
global_options += ["--trace"]
with tempfile.NamedTemporaryFile() as tempfile_json:
# `evmone` uses `gtest` and generates JSON output to a file,
# c.f. https://google.github.io/googletest/advanced.html#generating-a-json-report
# see there for the JSON schema.
global_options += ["--gtest_output=json:{}".format(tempfile_json.name)]
command = [str(self.binary)] + global_options + [str(fixture_path)]
result = self._run_command(command)
if result.returncode not in [0, 1]:
raise Exception(
f"Unexpected exit code:\n{' '.join(command)}\n\n Error:\n{result.stderr}"
)
try:
output_data = json.load(tempfile_json)
except json.JSONDecodeError as e:
raise Exception(
f"Failed to parse JSON output from evmone-state/blockchaintest: {e}"
) from e
if debug_output_path:
self._consume_debug_dump(command, result, fixture_path, debug_output_path)
return output_data
def _failure_msg(self, file_results: Dict[str, Any]) -> str:
# Assumes only one test has run and there has been a failure,
# as asserted before.
failures = file_results["testsuites"][0]["testsuite"][0]["failures"]
return ", ".join([f["failure"] for f in failures])
def consume_test(
self,
fixture_path: Path,
fixture_name: Optional[str] = None,
debug_output_path: Optional[Path] = None,
) -> None:
"""
Consume a single state or blockchain test.
Uses the cached result from `consume_test_file` in order to not
call the command every time an select a single result from there.
"""
file_results = self.consume_test_file(
fixture_path=fixture_path,
debug_output_path=debug_output_path,
)
assert len(file_results["testsuites"]) < 2, f"Multiple testsuites for {fixture_name}"
assert len(file_results["testsuites"]) == 1, f"testsuite for {fixture_name} missing"
test_suite = file_results["testsuites"][0]["testsuite"]
assert fixture_name is not None, "fixture_name must be provided for evmone tests"
test_results = [
test_result for test_result in test_suite if test_result["name"] == fixture_name
]
assert len(test_results) < 2, f"Multiple test results for {fixture_name}"
assert len(test_results) == 1, f"Test result for {fixture_name} missing"
assert "failures" not in test_results[0], (
f"Test failed: {test_results[0]['failures'][0]['failure']}"
)
class EvmOneStateFixtureConsumer(
EvmoneFixtureConsumerCommon,
FixtureConsumerTool,
fixture_formats=[StateFixture],
):
"""Evmone's implementation of the fixture consumer for state tests."""
default_binary = Path("evmone-statetest")
detect_binary_pattern = re.compile(r"^evmone-statetest\b")
def __init__(
self,
binary: Optional[Path] = None,
trace: bool = False,
):
"""Initialize the EvmOneStateFixtureConsumer class."""
self.binary = binary if binary else self.default_binary
super().__init__(trace=trace)
def consume_fixture(
self,
fixture_format: FixtureFormat,
fixture_path: Path,
fixture_name: Optional[str] = None,
debug_output_path: Optional[Path] = None,
) -> None:
"""
Execute the appropriate fixture consumer for the fixture at
`fixture_path`.
"""
if fixture_format == StateFixture:
self.consume_test(
fixture_path=fixture_path,
fixture_name=fixture_name,
debug_output_path=debug_output_path,
)
else:
pytest.skip(self._skip_message(fixture_format))
class EvmOneBlockchainFixtureConsumer(
EvmoneFixtureConsumerCommon,
FixtureConsumerTool,
fixture_formats=[BlockchainFixture],
):
"""Evmone's implementation of the fixture consumer for blockchain tests."""
default_binary = Path("evmone-blockchaintest")
detect_binary_pattern = re.compile(r"^evmone-blockchaintest\b")
def __init__(
self,
binary: Optional[Path] = None,
trace: bool = False,
):
"""Initialize the EvmOneBlockchainFixtureConsumer class."""
self.binary = binary if binary else self.default_binary
super().__init__(trace=trace)
def consume_fixture(
self,
fixture_format: FixtureFormat,
fixture_path: Path,
fixture_name: Optional[str] = None,
debug_output_path: Optional[Path] = None,
) -> None:
"""
Execute the appropriate fixture consumer for the fixture at
`fixture_path`.
"""
if fixture_format == BlockchainFixture:
self.consume_test(
fixture_path=fixture_path,
fixture_name=fixture_name,
debug_output_path=debug_output_path,
)
else:
pytest.skip(self._skip_message(fixture_format))
class EvmoneExceptionMapper(ExceptionMapper):
"""
Translate between EEST exceptions and error strings returned by Evmone.
"""
mapping_substring: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.SENDER_NOT_EOA: "sender not an eoa:",
TransactionException.GAS_ALLOWANCE_EXCEEDED: "gas limit reached",
TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS: (
"max priority fee per gas higher than max fee per gas"
),
TransactionException.NONCE_IS_MAX: "nonce has max value:",
TransactionException.TYPE_4_TX_CONTRACT_CREATION: "set code transaction must ",
TransactionException.TYPE_4_INVALID_AUTHORITY_SIGNATURE: "invalid authorization signature",
TransactionException.TYPE_4_INVALID_AUTHORITY_SIGNATURE_S_TOO_HIGH: (
"authorization signature s value too high"
),
TransactionException.TYPE_4_EMPTY_AUTHORIZATION_LIST: "empty authorization list",
TransactionException.INTRINSIC_GAS_TOO_LOW: "intrinsic gas too low",
TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST: "intrinsic gas too low",
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED: "blob gas limit exceeded",
TransactionException.INITCODE_SIZE_EXCEEDED: "max initcode size exceeded",
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS: (
"insufficient funds for gas * price + value"
),
TransactionException.INSUFFICIENT_MAX_FEE_PER_GAS: (
"max fee per gas less than block base fee"
),
TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS: (
"max blob fee per gas less than block base fee"
),
TransactionException.TYPE_4_TX_PRE_FORK: "transaction type not supported",
TransactionException.TYPE_3_TX_PRE_FORK: "transaction type not supported",
TransactionException.TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH: "invalid blob hash version",
TransactionException.TYPE_3_TX_BLOB_COUNT_EXCEEDED: "blob gas limit exceeded",
TransactionException.TYPE_3_TX_ZERO_BLOBS: "empty blob hashes list",
TransactionException.TYPE_3_TX_CONTRACT_CREATION: (
"blob transaction must not be a create transaction"
),
TransactionException.NONCE_MISMATCH_TOO_LOW: "nonce too low",
TransactionException.NONCE_MISMATCH_TOO_HIGH: "nonce too high",
TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM: "max gas limit exceeded",
# TODO EVMONE needs to differentiate when the section is missing in the
# header or body
EOFException.MISSING_STOP_OPCODE: "err: no_terminating_instruction",
EOFException.MISSING_CODE_HEADER: "err: code_section_missing",
EOFException.MISSING_TYPE_HEADER: "err: type_section_missing",
# TODO EVMONE these exceptions are too similar, this leeds to ambiguity
EOFException.MISSING_TERMINATOR: "err: header_terminator_missing",
EOFException.MISSING_HEADERS_TERMINATOR: "err: section_headers_not_terminated",
EOFException.INVALID_VERSION: "err: eof_version_unknown",
EOFException.INVALID_NON_RETURNING_FLAG: "err: invalid_non_returning_flag",
EOFException.INVALID_MAGIC: "err: invalid_prefix",
EOFException.INVALID_FIRST_SECTION_TYPE: "err: invalid_first_section_type",
EOFException.INVALID_SECTION_BODIES_SIZE: "err: invalid_section_bodies_size",
EOFException.INVALID_TYPE_SECTION_SIZE: "err: invalid_type_section_size",
EOFException.INCOMPLETE_SECTION_SIZE: "err: incomplete_section_size",
EOFException.INCOMPLETE_SECTION_NUMBER: "err: incomplete_section_number",
EOFException.TOO_MANY_CODE_SECTIONS: "err: too_many_code_sections",
EOFException.ZERO_SECTION_SIZE: "err: zero_section_size",
EOFException.MISSING_DATA_SECTION: "err: data_section_missing",
EOFException.UNDEFINED_INSTRUCTION: "err: undefined_instruction",
EOFException.INPUTS_OUTPUTS_NUM_ABOVE_LIMIT: "err: inputs_outputs_num_above_limit",
EOFException.UNREACHABLE_INSTRUCTIONS: "err: unreachable_instructions",
EOFException.INVALID_RJUMP_DESTINATION: "err: invalid_rjump_destination",
EOFException.UNREACHABLE_CODE_SECTIONS: "err: unreachable_code_sections",
EOFException.STACK_UNDERFLOW: "err: stack_underflow",
EOFException.STACK_OVERFLOW: "err: stack_overflow",
EOFException.MAX_STACK_INCREASE_ABOVE_LIMIT: "err: max_stack_increase_above_limit",
EOFException.STACK_HIGHER_THAN_OUTPUTS: "err: stack_higher_than_outputs_required",
EOFException.JUMPF_DESTINATION_INCOMPATIBLE_OUTPUTS: (
"err: jumpf_destination_incompatible_outputs"
),
EOFException.INVALID_MAX_STACK_INCREASE: "err: invalid_max_stack_increase",
EOFException.INVALID_DATALOADN_INDEX: "err: invalid_dataloadn_index",
EOFException.TRUNCATED_INSTRUCTION: "err: truncated_instruction",
EOFException.TOPLEVEL_CONTAINER_TRUNCATED: "err: toplevel_container_truncated",
EOFException.ORPHAN_SUBCONTAINER: "err: unreferenced_subcontainer",
EOFException.CONTAINER_SIZE_ABOVE_LIMIT: "err: container_size_above_limit",
EOFException.INVALID_CONTAINER_SECTION_INDEX: "err: invalid_container_section_index",
EOFException.INCOMPATIBLE_CONTAINER_KIND: "err: incompatible_container_kind",
EOFException.AMBIGUOUS_CONTAINER_KIND: "err: ambiguous_container_kind",
EOFException.STACK_HEIGHT_MISMATCH: "err: stack_height_mismatch",
EOFException.TOO_MANY_CONTAINERS: "err: too_many_container_sections",
EOFException.INVALID_CODE_SECTION_INDEX: "err: invalid_code_section_index",
EOFException.CALLF_TO_NON_RETURNING: "err: callf_to_non_returning_function",
EOFException.EOFCREATE_WITH_TRUNCATED_CONTAINER: "err: eofcreate_with_truncated_container",
}
mapping_regex: ClassVar[Dict[ExceptionBase, str]] = {}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_clis/clis/besu.py | src/ethereum_clis/clis/besu.py | """Hyperledger Besu Transition tool frontend."""
import json
import os
import re
import subprocess
import tempfile
import textwrap
from pathlib import Path
from typing import ClassVar, Dict, Optional
import requests
from ethereum_test_exceptions import (
BlockException,
ExceptionBase,
ExceptionMapper,
TransactionException,
)
from ethereum_test_forks import Fork
from ..cli_types import TransitionToolOutput
from ..transition_tool import TransitionTool, dump_files_to_directory, model_dump_config
class BesuTransitionTool(TransitionTool):
"""Besu EvmTool Transition tool frontend wrapper class."""
default_binary = Path("evm")
detect_binary_pattern = re.compile(r"^Besu evm .*$")
binary: Path
cached_version: Optional[str] = None
trace: bool
process: Optional[subprocess.Popen] = None
server_url: str
besu_trace_dir: Optional[tempfile.TemporaryDirectory]
supports_xdist: ClassVar[bool] = False
def __init__(
self,
*,
binary: Optional[Path] = None,
trace: bool = False,
):
"""Initialize the BesuTransitionTool class."""
super().__init__(exception_mapper=BesuExceptionMapper(), binary=binary, trace=trace)
args = [str(self.binary), "t8n", "--help"]
try:
result = subprocess.run(args, capture_output=True, text=True)
except subprocess.CalledProcessError as e:
raise Exception(
f"evm process unexpectedly returned a non-zero status code: {e}."
) from e
except Exception as e:
raise Exception(f"Unexpected exception calling evm tool: {e}.") from e
self.help_string = result.stdout
self.besu_trace_dir = tempfile.TemporaryDirectory() if self.trace else None
def start_server(self) -> None:
"""
Start the t8n-server process, extract the port, and leave it
running for future reuse.
"""
args = [
str(self.binary),
"t8n-server",
"--port=0", # OS assigned server port
]
if self.trace:
args.append("--trace")
if self.besu_trace_dir:
args.append(f"--output.basedir={self.besu_trace_dir.name}")
self.process = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
while True:
if self.process.stdout is None:
raise Exception("Failed starting Besu subprocess")
line = str(self.process.stdout.readline())
if not line or "Failed to start transition server" in line:
raise Exception("Failed starting Besu subprocess\n" + line)
if "Transition server listening on" in line:
match = re.search("Transition server listening on (\\d+)", line)
if match:
port = match.group(1)
self.server_url = f"http://localhost:{port}/"
break
def shutdown(self) -> None:
"""Stop the t8n-server process if it was started."""
if self.process:
self.process.kill()
if self.besu_trace_dir:
self.besu_trace_dir.cleanup()
def evaluate(
self,
*,
transition_tool_data: TransitionTool.TransitionToolData,
debug_output_path: str = "",
slow_request: bool = False,
) -> TransitionToolOutput:
"""Execute `evm t8n` with the specified arguments."""
del slow_request
if not self.process:
self.start_server()
input_json = transition_tool_data.to_input().model_dump(mode="json", **model_dump_config)
state_json = {
"fork": transition_tool_data.fork_name,
"chainid": transition_tool_data.chain_id,
"reward": transition_tool_data.reward,
}
post_data = {"state": state_json, "input": input_json}
if debug_output_path:
post_data_string = json.dumps(post_data, indent=4)
additional_indent = " " * 16 # for pretty indentation in t8n.sh
indented_post_data_string = "{\n" + "\n".join(
additional_indent + line for line in post_data_string[1:].splitlines()
)
t8n_script = textwrap.dedent(
f"""\
#!/bin/bash
# Use $1 as t8n-server port if provided, else default to 3000
PORT=${{1:-3000}}
curl http://localhost:${{PORT}}/ -X POST -H "Content-Type: application/json" \\
--data '{indented_post_data_string}'
"""
)
dump_files_to_directory(
debug_output_path,
{
"state.json": state_json,
"input/alloc.json": input_json["alloc"],
"input/env.json": input_json["env"],
"input/txs.json": input_json["txs"],
"t8n.sh+x": t8n_script,
},
)
response = requests.post(self.server_url, json=post_data, timeout=5)
response.raise_for_status() # exception visible in pytest failure output
output: TransitionToolOutput = TransitionToolOutput.model_validate(
response.json(), context={"exception_mapper": self.exception_mapper}
)
if debug_output_path:
dump_files_to_directory(
debug_output_path,
{
"response.txt": response.text,
"status_code.txt": response.status_code,
"time_elapsed_seconds.txt": response.elapsed.total_seconds(),
},
)
if response.status_code != 200:
raise Exception(
f"t8n-server returned status code {response.status_code}, "
f"response: {response.text}"
)
if debug_output_path:
dump_files_to_directory(
debug_output_path,
{
"output/alloc.json": output.alloc.model_dump(mode="json", **model_dump_config),
"output/result.json": output.result.model_dump(
mode="json", **model_dump_config
),
"output/txs.rlp": str(output.body),
},
)
if self.trace and self.besu_trace_dir:
self.collect_traces(output.result.receipts, self.besu_trace_dir, debug_output_path)
for i, r in enumerate(output.result.receipts):
trace_file_name = f"trace-{i}-{r.transaction_hash}.jsonl"
os.remove(os.path.join(self.besu_trace_dir.name, trace_file_name))
return output
def is_fork_supported(self, fork: Fork) -> bool:
"""Return True if the fork is supported by the tool."""
return fork.transition_tool_name() in self.help_string
class BesuExceptionMapper(ExceptionMapper):
"""Translate between EEST exceptions and error strings returned by Besu."""
mapping_substring: ClassVar[Dict[ExceptionBase, str]] = {
TransactionException.NONCE_IS_MAX: "invalid Nonce must be less than",
TransactionException.INSUFFICIENT_MAX_FEE_PER_BLOB_GAS: (
"transaction invalid tx max fee per blob gas less than block blob gas fee"
),
TransactionException.GASLIMIT_PRICE_PRODUCT_OVERFLOW: (
"invalid Upfront gas cost cannot exceed 2^256 Wei"
),
TransactionException.INSUFFICIENT_MAX_FEE_PER_GAS: (
"transaction invalid gasPrice is less than the current BaseFee"
),
TransactionException.GAS_ALLOWANCE_EXCEEDED: "provided gas insufficient",
TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS: (
"transaction invalid max priority fee per gas cannot be greater than max fee per gas"
),
TransactionException.TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH: "Invalid versionedHash",
TransactionException.TYPE_3_TX_CONTRACT_CREATION: (
"transaction invalid transaction blob transactions must have a to address"
),
TransactionException.TYPE_3_TX_WITH_FULL_BLOBS: (
"Failed to decode transactions from block parameter"
),
TransactionException.TYPE_3_TX_ZERO_BLOBS: (
"Failed to decode transactions from block parameter"
),
TransactionException.TYPE_3_TX_PRE_FORK: (
"Transaction type BLOB is invalid, accepted transaction types are"
),
TransactionException.TYPE_4_EMPTY_AUTHORIZATION_LIST: (
"transaction invalid transaction code delegation transactions must have a "
"non-empty code delegation list"
),
TransactionException.TYPE_4_TX_CONTRACT_CREATION: (
"transaction invalid transaction code delegation transactions must have a to address"
),
TransactionException.TYPE_4_TX_PRE_FORK: (
"transaction invalid Transaction type DELEGATE_CODE is invalid"
),
BlockException.RLP_STRUCTURES_ENCODING: (
"Failed to decode transactions from block parameter"
),
BlockException.INCORRECT_EXCESS_BLOB_GAS: (
"Payload excessBlobGas does not match calculated excessBlobGas"
),
BlockException.BLOB_GAS_USED_ABOVE_LIMIT: (
"Payload BlobGasUsed does not match calculated BlobGasUsed"
),
BlockException.INCORRECT_BLOB_GAS_USED: (
"Payload BlobGasUsed does not match calculated BlobGasUsed"
),
BlockException.INVALID_GAS_USED_ABOVE_LIMIT: "Header validation failed (FULL)",
}
mapping_regex = {
BlockException.INVALID_REQUESTS: (
r"Invalid execution requests|Requests hash mismatch, calculated: 0x[0-9a-f]+ header: "
r"0x[0-9a-f]+"
),
BlockException.INVALID_BLOCK_HASH: (
r"Computed block hash 0x[0-9a-f]+ does not match block hash parameter 0x[0-9a-f]+"
),
BlockException.SYSTEM_CONTRACT_CALL_FAILED: (
r"System call halted|System call did not execute to completion"
),
BlockException.SYSTEM_CONTRACT_EMPTY: (
r"(Invalid system call, no code at address)|" r"(Invalid system call address:)"
),
BlockException.INVALID_DEPOSIT_EVENT_LAYOUT: (
r"Invalid (amount|index|pubKey|signature|withdrawalCred) (offset|size): "
r"expected (\d+), but got (-?\d+)|"
r"Invalid deposit log length\. Must be \d+ bytes, but is \d+ bytes"
),
BlockException.RLP_BLOCK_LIMIT_EXCEEDED: (
r"Block size of \d+ bytes exceeds limit of \d+ bytes"
),
TransactionException.INITCODE_SIZE_EXCEEDED: (
r"transaction invalid Initcode size of \d+ exceeds maximum size of \d+"
),
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS: (
r"transaction invalid transaction up-front cost 0x[0-9a-f]+ exceeds transaction "
r"sender account balance 0x[0-9a-f]+"
),
TransactionException.INTRINSIC_GAS_TOO_LOW: (
r"transaction invalid intrinsic gas cost \d+ exceeds gas limit \d+"
),
TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST: (
r"transaction invalid intrinsic gas cost \d+ exceeds gas limit \d+"
),
TransactionException.SENDER_NOT_EOA: (
r"transaction invalid Sender 0x[0-9a-f]+ has deployed code and so is not authorized "
r"to send transactions"
),
TransactionException.NONCE_MISMATCH_TOO_LOW: (
r"transaction invalid transaction nonce \d+ below sender account nonce \d+"
),
TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM: (
r"transaction invalid Transaction gas limit must be at most \d+"
),
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED: (
r"Blob transaction 0x[0-9a-f]+ exceeds block blob gas limit: \d+ > \d+"
),
TransactionException.TYPE_3_TX_BLOB_COUNT_EXCEEDED: (
r"Blob transaction has too many blobs: \d+|Invalid Blob Count: \d+"
),
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/exception_mapper.py | src/ethereum_test_exceptions/exception_mapper.py | """EEST Exception mapper."""
import re
from abc import ABC
from typing import Any, ClassVar, Dict, Generic, List
from pydantic import BaseModel, BeforeValidator, ValidationInfo
from .exceptions import ExceptionBase, ExceptionBoundTypeVar, UndefinedException
class ExceptionMapper(ABC):
"""
Translate between EEST exceptions and error strings returned by client's
t8n or other tools.
"""
mapper_name: str
_mapping_compiled_regex: Dict[ExceptionBase, re.Pattern]
mapping_substring: ClassVar[Dict[ExceptionBase, str]]
"""
Mapping of exception to substring that should be present in the error
message.
Items in this mapping are used for substring matching (`substring in
message`).
"""
mapping_regex: ClassVar[Dict[ExceptionBase, str]]
"""
Mapping of exception to regex that should be present in the error message.
Items in this mapping are compiled into regex patterns for faster matching,
and then used for regex matching (`pattern.search(message)`).
"""
reliable: ClassVar[bool] = True
"""
Whether the exceptions returned by the tool are reliable and can be
accurately mapped to the exceptions in this class.
"""
def __init__(self) -> None:
"""Initialize the exception mapper."""
# Ensure that the subclass has properly defined mapping_substring
# before accessing it
assert self.mapping_substring is not None, "mapping_substring must be defined in subclass"
assert self.mapping_regex is not None, "mapping_regex must be defined in subclass"
self.mapper_name = self.__class__.__name__
self._mapping_compiled_regex = {
exception: re.compile(message) for exception, message in self.mapping_regex.items()
}
def message_to_exception(
self, exception_string: str
) -> List[ExceptionBase] | UndefinedException:
"""Match a formatted string to an exception."""
exceptions: List[ExceptionBase] = []
for exception, substring in self.mapping_substring.items():
if substring in exception_string:
exceptions.append(exception)
for exception, pattern in self._mapping_compiled_regex.items():
if pattern.search(exception_string):
exceptions.append(exception)
if exceptions:
return exceptions
return UndefinedException(exception_string, mapper_name=self.mapper_name)
class ExceptionWithMessage(BaseModel, Generic[ExceptionBoundTypeVar]):
"""
Class that contains the exception along with the verbatim message from the
external tool/client.
"""
exceptions: List[ExceptionBoundTypeVar]
message: str
def __contains__(self, item: Any) -> bool:
"""Check if the item is in the exceptions list."""
if isinstance(item, list):
return any(exception in self.exceptions for exception in item)
return item in self.exceptions
def __str__(self) -> str:
"""Return the string representation of the exception message."""
return f"[{' | '.join(str(e) for e in self.exceptions)}] {self.message}"
def mapper_validator(v: str, info: ValidationInfo) -> Dict[str, Any] | UndefinedException | None:
"""
Use the exception mapper that must be included in the context to map the
exception from the external tool.
"""
if v is None:
return v
if not isinstance(info.context, dict):
return UndefinedException(v, mapper_name="UndefinedExceptionMapper: No context")
exception_mapper = info.context.get("exception_mapper")
if exception_mapper is None:
return UndefinedException(v, mapper_name="UndefinedExceptionMapper: No mapper")
assert isinstance(exception_mapper, ExceptionMapper), (
f"Invalid mapper provided {exception_mapper}"
)
exceptions = exception_mapper.message_to_exception(v)
if isinstance(exceptions, UndefinedException):
return exceptions
return {
"exceptions": exceptions,
"message": v,
}
ExceptionMapperValidator = BeforeValidator(mapper_validator)
"""
Validator that can be used to annotate a pydantic field in a model that is
meant to be parsed from an external tool or client.
The annotated type must be an union that can include `None`,
`UndefinedException` and a custom model as:
```
class BlockExceptionWithMessage(ExceptionWithMessage[BlockException]):
pass
```
where `BlockException` can be any derivation of `ExceptionBase`.
The `message` attribute is the verbatim message received from the external tool
or client, and can be used to be printed for extra context information in case
of failures.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/exceptions.py | src/ethereum_test_exceptions/exceptions.py | """Exceptions for invalid execution."""
from enum import Enum, auto, unique
from typing import Annotated, Any, Dict, List, TypeVar
from pydantic import BeforeValidator, GetCoreSchemaHandler, PlainSerializer
from pydantic_core.core_schema import (
PlainValidatorFunctionSchema,
no_info_plain_validator_function,
to_string_ser_schema,
)
_exception_classes: Dict[str, type] = {}
class ExceptionBase(Enum):
"""Base class for exceptions."""
def __init_subclass__(cls) -> None:
"""Register the exception class."""
super().__init_subclass__()
_exception_classes[cls.__name__] = cls
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call class constructor without info and appends the serialization
schema.
"""
return no_info_plain_validator_function(
cls.from_str,
serialization=to_string_ser_schema(),
)
@classmethod
def from_str(cls, value: "str | ExceptionBase") -> "ExceptionBase":
"""Return ContainerKind enum value from a string."""
if isinstance(value, ExceptionBase):
return value
class_name, enum_name = value.split(".")
if cls == ExceptionBase:
# Exception base automatically resolves the class
assert class_name in _exception_classes, f"No such exception class: {class_name}"
exception_class = _exception_classes[class_name]
else:
# Otherwise, use the class that the method is called on
assert cls.__name__ == class_name, (
f"Unexpected exception type: {class_name}, expected {cls.__name__}"
)
exception_class = cls
exception = getattr(exception_class, enum_name, None)
if exception is not None:
return exception
raise ValueError(f"No such exception in {class_name}: {value}")
def __contains__(self, exception) -> bool:
"""Check if provided exception is equal to this."""
return self == exception
def __str__(self) -> str:
"""Return string representation of the exception."""
return f"{self.__class__.__name__}.{self.name}"
def to_pipe_str(value: Any) -> str:
"""
Single pipe-separated string representation of an exception list.
Obtain a deterministic ordering by ordering using the exception string
representations.
"""
if isinstance(value, list):
return "|".join(str(exception) for exception in value)
return str(value)
def from_pipe_str(value: Any) -> str | List[str]:
"""Parse a single string as a pipe separated list into enum exceptions."""
if isinstance(value, str):
exception_list = value.split("|")
if len(exception_list) == 1:
return exception_list[0]
return exception_list
return value
class UndefinedException(str):
"""Undefined Exception."""
mapper_name: str | None
def __new__(cls, value: str, *, mapper_name: str | None = None) -> "UndefinedException":
"""Create a new UndefinedException instance."""
if isinstance(value, UndefinedException):
return value
assert isinstance(value, str)
instance = super().__new__(cls, value)
instance.mapper_name = mapper_name
return instance
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call class constructor without info and appends the serialization
schema.
"""
return no_info_plain_validator_function(
cls,
serialization=to_string_ser_schema(),
)
@unique
class TransactionException(ExceptionBase):
"""
Exception raised when a transaction is invalid, and thus cannot be
executed.
If a transaction with any of these exceptions is included in a block, the
block is invalid.
"""
TYPE_NOT_SUPPORTED = auto()
"""Transaction type is not supported on this chain configuration."""
SENDER_NOT_EOA = auto()
"""Transaction is coming from address that is not exist anymore."""
ADDRESS_TOO_SHORT = auto()
"""Transaction `to` is not allowed to be less than 20 bytes."""
ADDRESS_TOO_LONG = auto()
"""Transaction `to` is not allowed to be more than 20 bytes."""
NONCE_MISMATCH_TOO_HIGH = auto()
"""Transaction nonce > sender.nonce."""
NONCE_MISMATCH_TOO_LOW = auto()
"""Transaction nonce < sender.nonce."""
NONCE_TOO_BIG = auto()
"""
Transaction `nonce` is not allowed to be max_uint64 - 1 (this is probably
TransactionTest).
"""
NONCE_IS_MAX = auto()
"""
Transaction `nonce` is not allowed to be max_uint64 - 1 (this is
StateTests).
"""
NONCE_OVERFLOW = auto()
"""Transaction `nonce` is not allowed to be more than uint64."""
GASLIMIT_OVERFLOW = auto()
"""Transaction gaslimit exceeds 2^64-1 maximum value."""
VALUE_OVERFLOW = auto()
"""Transaction value exceeds 2^256-1 maximum value."""
GASPRICE_OVERFLOW = auto()
"""Transaction gasPrice exceeds 2^256-1 maximum value."""
GASLIMIT_PRICE_PRODUCT_OVERFLOW = auto()
"""Transaction gasPrice * gasLimit exceeds 2^256-1 maximum value."""
INVALID_SIGNATURE_VRS = auto()
"""Invalid transaction v, r, s values."""
RLP_INVALID_SIGNATURE_R = auto()
"""Error reading transaction signature R value."""
RLP_INVALID_SIGNATURE_S = auto()
"""Error reading transaction signature S value."""
RLP_LEADING_ZEROS_GASLIMIT = auto()
"""Error reading transaction gaslimit field RLP."""
RLP_LEADING_ZEROS_GASPRICE = auto()
"""Error reading transaction gasprice field RLP."""
RLP_LEADING_ZEROS_VALUE = auto()
"""Error reading transaction value field RLP."""
RLP_LEADING_ZEROS_NONCE = auto()
"""Error reading transaction nonce field RLP."""
RLP_LEADING_ZEROS_R = auto()
"""Error reading transaction signature R field RLP."""
RLP_LEADING_ZEROS_S = auto()
"""Error reading transaction signature S field RLP."""
RLP_LEADING_ZEROS_V = auto()
"""Error reading transaction signature V field RLP."""
RLP_LEADING_ZEROS_BASEFEE = auto()
"""Error reading transaction basefee field RLP."""
RLP_LEADING_ZEROS_PRIORITY_FEE = auto()
"""Error reading transaction priority fee field RLP."""
RLP_LEADING_ZEROS_DATA_SIZE = auto()
"""
Error reading transaction data field RLP, (rlp field length has leading
zeros).
"""
RLP_LEADING_ZEROS_NONCE_SIZE = auto()
"""
Error reading transaction nonce field RLP, (rlp field length has leading
zeros).
"""
RLP_TOO_FEW_ELEMENTS = auto()
"""
Error reading transaction RLP, structure has too few elements than
expected.
"""
RLP_TOO_MANY_ELEMENTS = auto()
"""
Error reading transaction RLP, structure has too many elements than
expected.
"""
RLP_ERROR_EOF = auto()
"""Error reading transaction RLP, rlp stream unexpectedly finished."""
RLP_ERROR_SIZE = auto()
"""Error reading transaction RLP, rlp size is invalid."""
RLP_ERROR_SIZE_LEADING_ZEROS = auto()
"""Error reading transaction RLP, field size has leading zeros."""
INVALID_CHAINID = auto()
"""Transaction chain id encoding is incorrect."""
RLP_INVALID_DATA = auto()
"""Transaction data field is invalid rlp."""
RLP_INVALID_GASLIMIT = auto()
"""Transaction gaslimit field is invalid rlp."""
RLP_INVALID_NONCE = auto()
"""Transaction nonce field is invalid rlp."""
RLP_INVALID_TO = auto()
"""Transaction to field is invalid rlp."""
RLP_INVALID_ACCESS_LIST_ADDRESS_TOO_LONG = auto()
"""Transaction access list address is > 20 bytes."""
RLP_INVALID_ACCESS_LIST_ADDRESS_TOO_SHORT = auto()
"""Transaction access list address is < 20 bytes."""
RLP_INVALID_ACCESS_LIST_STORAGE_TOO_LONG = auto()
"""Transaction access list storage hash > 32 bytes."""
RLP_INVALID_ACCESS_LIST_STORAGE_TOO_SHORT = auto()
"""Transaction access list storage hash < 32 bytes."""
RLP_INVALID_HEADER = auto()
"""Transaction failed to read from RLP as rlp header is invalid."""
RLP_INVALID_VALUE = auto()
"""Transaction value field is invalid rlp/structure."""
EC_RECOVERY_FAIL = auto()
"""Transaction has correct signature, but ec recovery failed."""
INSUFFICIENT_ACCOUNT_FUNDS = auto()
"""
Transaction's sender does not have enough funds to pay for the transaction.
"""
INSUFFICIENT_MAX_FEE_PER_GAS = auto()
"""Transaction's max-fee-per-gas is lower than the block base-fee."""
PRIORITY_OVERFLOW = auto()
"""
Transaction's max-priority-fee-per-gas is exceeds 2^256-1 maximum value.
"""
PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS = auto()
"""
Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas.
"""
PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS_2 = auto()
"""
Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas
(TransactionTests).
"""
INSUFFICIENT_MAX_FEE_PER_BLOB_GAS = auto()
"""
Transaction's max-fee-per-blob-gas is lower than the block's blob-gas
price.
"""
INTRINSIC_GAS_TOO_LOW = auto()
"""Transaction's gas limit is too low."""
INTRINSIC_GAS_BELOW_FLOOR_GAS_COST = auto()
"""Transaction's gas limit is below the floor gas cost."""
INITCODE_SIZE_EXCEEDED = auto()
"""
Transaction's initcode for a contract-creating transaction is too large.
"""
TYPE_3_TX_PRE_FORK = auto()
"""Transaction type 3 included before activation fork."""
TYPE_3_TX_ZERO_BLOBS_PRE_FORK = auto()
"""Transaction type 3, with zero blobs, included before activation fork."""
TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH = auto()
"""Transaction contains a blob versioned hash with an invalid version."""
TYPE_3_TX_WITH_FULL_BLOBS = auto()
"""Transaction contains full blobs (network-version of the transaction)."""
TYPE_3_TX_BLOB_COUNT_EXCEEDED = auto()
"""Transaction contains too many blob versioned hashes."""
TYPE_3_TX_CONTRACT_CREATION = auto()
"""Transaction is a type 3 transaction and has an empty `to`."""
TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED = auto()
"""Transaction causes block to go over blob gas limit."""
GAS_ALLOWANCE_EXCEEDED = auto()
"""Transaction causes block to go over blob gas limit."""
GAS_LIMIT_EXCEEDS_MAXIMUM = auto()
"""
Transaction gas limit exceeds the maximum allowed limit of 30 million.
"""
TYPE_3_TX_ZERO_BLOBS = auto()
"""Transaction is type 3, but has no blobs."""
TYPE_4_EMPTY_AUTHORIZATION_LIST = auto()
"""Transaction is type 4, but has an empty authorization list."""
TYPE_4_INVALID_AUTHORITY_SIGNATURE = auto()
"""Transaction authority signature is invalid"""
TYPE_4_INVALID_AUTHORITY_SIGNATURE_S_TOO_HIGH = auto()
"""Transaction authority signature is invalid"""
TYPE_4_TX_CONTRACT_CREATION = auto()
"""Transaction is a type 4 transaction and has an empty `to`."""
TYPE_4_INVALID_AUTHORIZATION_FORMAT = auto()
"""
Transaction is type 4, but contains an authorization that has an invalid
format.
"""
TYPE_4_TX_PRE_FORK = auto()
"""Transaction type 4 included before activation fork."""
@unique
class BlockException(ExceptionBase):
"""
Exception raised when a block is invalid, but not due to a transaction.
E.g. all transactions in the block are valid, and can be applied to the
state, but the block header contains an invalid field.
"""
TOO_MANY_UNCLES = auto()
"""Block declares too many uncles over the allowed limit."""
UNCLE_IN_CHAIN = auto()
"""Block declares uncle header that is already imported into chain."""
UNCLE_IS_ANCESTOR = auto()
"""Block declares uncle header that is directly a parent of this block."""
UNCLE_IS_BROTHER = auto()
"""Block declares two similar uncle headers."""
UNCLE_PARENT_INCORRECT = auto()
"""Block declares uncle header that is an outdated block to be an uncle."""
EXTRA_DATA_TOO_BIG = auto()
"""Block header's extra data >32 bytes."""
EXTRA_DATA_INVALID_DAO = auto()
"""
Block header's extra data after dao fork must be a fixed pre defined hash.
"""
UNKNOWN_PARENT = auto()
"""
Block header's parent hash does not correspond to any of existing blocks on
chain.
"""
UNCLE_UNKNOWN_PARENT = auto()
"""
Uncle header's parent hash does not correspond to any of existing blocks on
chain.
"""
UNKNOWN_PARENT_ZERO = auto()
"""Block header's parent hash is zero hash."""
GASLIMIT_TOO_BIG = auto()
"""Block header's gas limit > 0x7fffffffffffffff."""
INVALID_BLOCK_NUMBER = auto()
"""Block header's number != parent header's number + 1."""
INVALID_BLOCK_TIMESTAMP_OLDER_THAN_PARENT = auto()
"""Block header's timestamp <= parent header's timestamp."""
INVALID_DIFFICULTY = auto()
"""
Block header's difficulty does not match the difficulty formula calculated
from previous block.
"""
INVALID_LOG_BLOOM = auto()
"""
Block header's logs bloom hash does not match the actually computed log
bloom.
"""
INVALID_STATE_ROOT = auto()
"""
Block header's state root hash does not match the actually computed hash of
the state.
"""
INVALID_RECEIPTS_ROOT = auto()
"""
Block header's receipts root hash does not match the actually computed hash
of receipts.
"""
INVALID_TRANSACTIONS_ROOT = auto()
"""
Block header's transactions root hash does not match the actually computed
hash of tx tree.
"""
INVALID_UNCLES_HASH = auto()
"""
Block header's uncle hash does not match the actually computed hash of
block's uncles.
"""
GAS_USED_OVERFLOW = auto()
"""Block transactions consume more gas than block header allow."""
INVALID_GASLIMIT = auto()
"""
Block header's gas limit does not match the gas limit formula calculated
from previous block.
"""
INVALID_BASEFEE_PER_GAS = auto()
"""Block header's base_fee_per_gas field is calculated incorrect."""
INVALID_GAS_USED = auto()
"""
Block header's actual gas used does not match the provided header's value
"""
INVALID_GAS_USED_ABOVE_LIMIT = auto()
"""Block header's gas used value is above the gas limit field's value."""
INVALID_WITHDRAWALS_ROOT = auto()
"""
Block header's withdrawals root does not match calculated withdrawals root.
"""
INCORRECT_BLOCK_FORMAT = auto()
"""
Block's format is incorrect, contains invalid fields, is missing fields, or
contains fields of a fork that is not active yet.
"""
BLOB_GAS_USED_ABOVE_LIMIT = auto()
"""Block's blob gas used in header is above the limit."""
INCORRECT_BLOB_GAS_USED = auto()
"""Block's blob gas used in header is incorrect."""
INCORRECT_EXCESS_BLOB_GAS = auto()
"""Block's excess blob gas in header is incorrect."""
INVALID_VERSIONED_HASHES = auto()
"""Incorrect number of versioned hashes in a payload."""
RLP_STRUCTURES_ENCODING = auto()
"""
Block's rlp encoding is valid but ethereum structures in it are invalid.
"""
RLP_WITHDRAWALS_NOT_READ = auto()
"""Block's rlp encoding is missing withdrawals."""
RLP_INVALID_FIELD_OVERFLOW_64 = auto()
"""One of block's fields rlp is overflow 2**64 value."""
RLP_INVALID_ADDRESS = auto()
"""Block withdrawals address is rlp of invalid address != 20 bytes."""
RLP_BLOCK_LIMIT_EXCEEDED = auto()
"""Block's rlp encoding is larger than the allowed limit."""
INVALID_REQUESTS = auto()
"""Block's requests are invalid."""
IMPORT_IMPOSSIBLE_LEGACY = auto()
"""Legacy block import is impossible in this chain configuration."""
IMPORT_IMPOSSIBLE_LEGACY_WRONG_PARENT = auto()
"""
Legacy block import is impossible, trying to import on top of a block that
is not legacy.
"""
IMPORT_IMPOSSIBLE_LONDON_WRONG_PARENT = auto()
"""
Trying to import london (basefee) block on top of block that is not 1559.
"""
IMPORT_IMPOSSIBLE_PARIS_WRONG_POW = auto()
"""Trying to import paris(merge) block with PoW enabled."""
IMPORT_IMPOSSIBLE_PARIS_WRONG_POS = auto()
"""
Trying to import paris(merge) block with PoS enabled before TTD is reached.
"""
IMPORT_IMPOSSIBLE_LONDON_OVER_PARIS = auto()
"""Trying to import london looking block over paris network (POS)."""
IMPORT_IMPOSSIBLE_PARIS_OVER_SHANGHAI = auto()
"""Trying to import paris block on top of shanghai block."""
IMPORT_IMPOSSIBLE_SHANGHAI = auto()
"""Shanghai block import is impossible in this chain configuration."""
IMPORT_IMPOSSIBLE_UNCLES_OVER_PARIS = auto()
"""
Trying to import a block after paris fork that has not empty uncles hash.
"""
IMPORT_IMPOSSIBLE_DIFFICULTY_OVER_PARIS = auto()
"""Trying to import a block after paris fork that has difficulty != 0."""
SYSTEM_CONTRACT_EMPTY = auto()
"""
A system contract address contains no code at the end of fork activation
block.
"""
SYSTEM_CONTRACT_CALL_FAILED = auto()
"""
A system contract call at the end of block execution (from the system
address) fails.
"""
INVALID_BLOCK_HASH = auto()
"""
Block header's hash does not match the actually computed hash of the block.
"""
INVALID_DEPOSIT_EVENT_LAYOUT = auto()
"""
Transaction emits a `DepositEvent` in the deposit contract (EIP-6110), but
the layout of the event does not match the required layout.
"""
@unique
class EOFException(ExceptionBase):
"""Exception raised when an EOF container is invalid."""
DEFAULT_EXCEPTION = auto()
"""Expect some exception, not yet known."""
UNDEFINED_EXCEPTION = auto()
"""Indicates that exception string is not mapped to an exception enum."""
UNDEFINED_INSTRUCTION = auto()
"""EOF container has undefined instruction in it's body code."""
UNKNOWN_VERSION = auto()
"""EOF container has an unknown version."""
INCOMPLETE_MAGIC = auto()
"""EOF container has not enough bytes to read magic."""
INVALID_MAGIC = auto()
"""EOF container has not allowed magic version byte."""
INVALID_VERSION = auto()
"""EOF container version bytes mismatch."""
INVALID_NON_RETURNING_FLAG = auto()
"""EOF container's section has non-returning flag set incorrectly."""
INVALID_RJUMP_DESTINATION = auto()
"""Code has RJUMP instruction with invalid parameters."""
MISSING_TYPE_HEADER = auto()
"""EOF container missing types section."""
INVALID_TYPE_SECTION_SIZE = auto()
"""EOF container types section has wrong size."""
INVALID_TYPE_BODY = auto()
"""EOF container types body section bytes are wrong."""
MISSING_CODE_HEADER = auto()
"""EOF container missing code section."""
INVALID_CODE_SECTION = auto()
"""EOF container code section bytes are incorrect."""
INCOMPLETE_CODE_HEADER = auto()
"""EOF container code header missing bytes."""
INCOMPLETE_DATA_HEADER = auto()
"""EOF container data header missing bytes."""
ZERO_SECTION_SIZE = auto()
"""EOF container data header construction is wrong."""
MISSING_DATA_SECTION = auto()
"""EOF container missing data section"""
INCOMPLETE_CONTAINER = auto()
"""EOF container bytes are incomplete."""
INVALID_SECTION_BODIES_SIZE = auto()
"""Sections bodies does not match sections headers."""
TRAILING_BYTES = auto()
"""EOF container has bytes beyond data section."""
MISSING_TERMINATOR = auto()
"""EOF container missing terminator bytes between header and body."""
MISSING_HEADERS_TERMINATOR = auto()
"""Some type of another exception about missing headers terminator."""
INVALID_FIRST_SECTION_TYPE = auto()
"""EOF container header does not have types section first."""
INCOMPLETE_SECTION_NUMBER = auto()
"""EOF container header has section that is missing declaration bytes."""
INCOMPLETE_SECTION_SIZE = auto()
"""EOF container header has section that is defined incorrectly."""
TOO_MANY_CODE_SECTIONS = auto()
"""EOF container header has too many code sections."""
MISSING_STOP_OPCODE = auto()
"""EOF container's code missing STOP bytecode at it's end."""
INPUTS_OUTPUTS_NUM_ABOVE_LIMIT = auto()
"""EOF container code section inputs/outputs number is above the limit"""
UNREACHABLE_INSTRUCTIONS = auto()
"""EOF container's code have instructions that are unreachable."""
UNREACHABLE_CODE_SECTIONS = auto()
"""EOF container's body have code sections that are unreachable."""
STACK_UNDERFLOW = auto()
"""EOF container's code produces an stack underflow."""
STACK_OVERFLOW = auto()
"""EOF container's code produces an stack overflow."""
STACK_HEIGHT_MISMATCH = auto()
"""EOF container section stack height mismatch."""
MAX_STACK_INCREASE_ABOVE_LIMIT = auto()
"""EOF container's specified max stack increase is above the limit."""
STACK_HIGHER_THAN_OUTPUTS = auto()
"""
EOF container section stack height is higher than the outputs. when
returning
"""
JUMPF_DESTINATION_INCOMPATIBLE_OUTPUTS = auto()
"""
EOF container section JUMPF's to a destination section with incompatible
outputs.
"""
INVALID_MAX_STACK_INCREASE = auto()
"""
EOF container section's specified max stack increase does not match the
actual stack height.
"""
INVALID_DATALOADN_INDEX = auto()
"""A DATALOADN instruction has out-of-bounds index for the data section."""
TRUNCATED_INSTRUCTION = auto()
"""EOF container's code section has truncated instruction."""
TOPLEVEL_CONTAINER_TRUNCATED = auto()
"""Top-level EOF container has data section truncated"""
ORPHAN_SUBCONTAINER = auto()
"""EOF container has an unreferenced subcontainer. '"""
CONTAINER_SIZE_ABOVE_LIMIT = auto()
"""EOF container is above size limit"""
INVALID_CONTAINER_SECTION_INDEX = auto()
"""Instruction references container section that does not exist."""
INCOMPATIBLE_CONTAINER_KIND = auto()
"""Incompatible instruction found in a container of a specific kind."""
AMBIGUOUS_CONTAINER_KIND = auto()
"""The kind of a sub-container cannot be uniquely deduced."""
TOO_MANY_CONTAINERS = auto()
"""EOF container header has too many sub-containers."""
INVALID_CODE_SECTION_INDEX = auto()
"""CALLF Operation refers to a non-existent code section"""
UNEXPECTED_HEADER_KIND = auto()
"""Header parsing encountered a section kind it wasn't expecting"""
CALLF_TO_NON_RETURNING = auto()
"""CALLF instruction targeting a non-returning code section"""
EOFCREATE_WITH_TRUNCATED_CONTAINER = auto()
"""EOFCREATE with truncated container"""
"""Pydantic Annotated Types"""
ExceptionInstanceOrList = Annotated[
List[TransactionException | BlockException] | TransactionException | BlockException,
BeforeValidator(from_pipe_str),
PlainSerializer(to_pipe_str),
]
TransactionExceptionInstanceOrList = Annotated[
List[TransactionException] | TransactionException,
BeforeValidator(from_pipe_str),
PlainSerializer(to_pipe_str),
]
BlockExceptionInstanceOrList = Annotated[
List[BlockException] | BlockException,
BeforeValidator(from_pipe_str),
PlainSerializer(to_pipe_str),
]
EOFExceptionInstanceOrList = Annotated[
List[EOFException] | EOFException,
BeforeValidator(from_pipe_str),
PlainSerializer(to_pipe_str),
]
ExceptionBoundTypeVar = TypeVar(
"ExceptionBoundTypeVar", TransactionException, BlockException, EOFException
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/__init__.py | src/ethereum_test_exceptions/__init__.py | """Exceptions for invalid execution."""
from .engine_api import EngineAPIError
from .exception_mapper import (
ExceptionMapper,
ExceptionMapperValidator,
ExceptionWithMessage,
)
from .exceptions import (
BlockException,
BlockExceptionInstanceOrList,
EOFException,
EOFExceptionInstanceOrList,
ExceptionBase,
ExceptionInstanceOrList,
TransactionException,
TransactionExceptionInstanceOrList,
UndefinedException,
)
__all__ = [
"BlockException",
"BlockExceptionInstanceOrList",
"EOFException",
"EOFExceptionInstanceOrList",
"ExceptionBase",
"EngineAPIError",
"ExceptionMapper",
"ExceptionInstanceOrList",
"ExceptionWithMessage",
"ExceptionMapperValidator",
"TransactionException",
"UndefinedException",
"TransactionExceptionInstanceOrList",
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/engine_api.py | src/ethereum_test_exceptions/engine_api.py | """Engine API error defniitions."""
from enum import IntEnum
class EngineAPIError(IntEnum):
"""List of Engine API errors."""
ParseError = -32700
InvalidRequest = -32600
MethodNotFound = -32601
InvalidParams = -32602
InternalError = -32603
ServerError = -32000
UnknownPayload = -38001
InvalidForkchoiceState = -38002
InvalidPayloadAttributes = -38003
TooLargeRequest = -38004
UnsupportedFork = -38005
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/exceptions/block.py | src/ethereum_test_exceptions/exceptions/block.py | """Block Exceptions."""
from enum import auto, unique
from .base import ExceptionBase
@unique
class BlockException(ExceptionBase):
"""
Exception raised when a block is invalid, but not due to a transaction.
E.g. all transactions in the block are valid, and can be applied to the
state, but the block header contains an invalid field.
"""
TOO_MANY_UNCLES = auto()
"""Block declares too many uncles over the allowed limit."""
UNCLE_IN_CHAIN = auto()
"""Block declares uncle header that is already imported into chain."""
UNCLE_IS_ANCESTOR = auto()
"""Block declares uncle header that is directly a parent of this block."""
UNCLE_IS_BROTHER = auto()
"""Block declares two similar uncle headers."""
UNCLE_PARENT_INCORRECT = auto()
"""Block declares uncle header that is an outdated block to be an uncle."""
EXTRA_DATA_TOO_BIG = auto()
"""Block header's extra data >32 bytes."""
EXTRA_DATA_INVALID_DAO = auto()
"""
Block header's extra data after dao fork must be a fixed pre defined hash.
"""
UNKNOWN_PARENT = auto()
"""
Block header's parent hash does not correspond to any of existing blocks on
chain.
"""
UNCLE_UNKNOWN_PARENT = auto()
"""
Uncle header's parent hash does not correspond to any of existing blocks on
chain.
"""
UNKNOWN_PARENT_ZERO = auto()
"""Block header's parent hash is zero hash."""
GASLIMIT_TOO_BIG = auto()
"""Block header's gas limit > 0x7fffffffffffffff."""
INVALID_BLOCK_NUMBER = auto()
"""Block header's number != parent header's number + 1."""
INVALID_BLOCK_TIMESTAMP_OLDER_THAN_PARENT = auto()
"""Block header's timestamp <= parent header's timestamp."""
INVALID_DIFFICULTY = auto()
"""
Block header's difficulty does not match the difficulty formula calculated
from previous block.
"""
INVALID_LOG_BLOOM = auto()
"""
Block header's logs bloom hash does not match the actually computed log
bloom.
"""
INVALID_STATE_ROOT = auto()
"""
Block header's state root hash does not match the actually computed hash of
the state.
"""
INVALID_RECEIPTS_ROOT = auto()
"""
Block header's receipts root hash does not match the actually computed hash
of receipts.
"""
INVALID_TRANSACTIONS_ROOT = auto()
"""
Block header's transactions root hash does not match the actually computed
hash of tx tree.
"""
INVALID_UNCLES_HASH = auto()
"""
Block header's uncle hash does not match the actually computed hash of
block's uncles.
"""
GAS_USED_OVERFLOW = auto()
"""Block transactions consume more gas than block header allow."""
INVALID_GASLIMIT = auto()
"""
Block header's gas limit does not match the gas limit formula calculated
from previous block.
"""
INVALID_BASEFEE_PER_GAS = auto()
"""Block header's base_fee_per_gas field is calculated incorrect."""
INVALID_GAS_USED = auto()
"""
Block header's actual gas used does not match the provided header's value
"""
INVALID_GAS_USED_ABOVE_LIMIT = auto()
"""Block header's gas used value is above the gas limit field's value."""
INVALID_WITHDRAWALS_ROOT = auto()
"""
Block header's withdrawals root does not match calculated withdrawals root.
"""
INCORRECT_BLOCK_FORMAT = auto()
"""
Block's format is incorrect, contains invalid fields, is missing fields, or
contains fields of a fork that is not active yet.
"""
BLOB_GAS_USED_ABOVE_LIMIT = auto()
"""Block's blob gas used in header is above the limit."""
INCORRECT_BLOB_GAS_USED = auto()
"""Block's blob gas used in header is incorrect."""
INCORRECT_EXCESS_BLOB_GAS = auto()
"""Block's excess blob gas in header is incorrect."""
INVALID_VERSIONED_HASHES = auto()
"""Incorrect number of versioned hashes in a payload."""
RLP_STRUCTURES_ENCODING = auto()
"""
Block's rlp encoding is valid but ethereum structures in it are invalid.
"""
RLP_WITHDRAWALS_NOT_READ = auto()
"""Block's rlp encoding is missing withdrawals."""
RLP_INVALID_FIELD_OVERFLOW_64 = auto()
"""One of block's fields rlp is overflow 2**64 value."""
RLP_INVALID_ADDRESS = auto()
"""Block withdrawals address is rlp of invalid address != 20 bytes."""
RLP_BLOCK_LIMIT_EXCEEDED = auto()
"""Block's rlp encoding is larger than the allowed limit."""
INVALID_REQUESTS = auto()
"""Block's requests are invalid."""
IMPORT_IMPOSSIBLE_LEGACY = auto()
"""Legacy block import is impossible in this chain configuration."""
IMPORT_IMPOSSIBLE_LEGACY_WRONG_PARENT = auto()
"""
Legacy block import is impossible, trying to import on top of a block that
is not legacy.
"""
IMPORT_IMPOSSIBLE_LONDON_WRONG_PARENT = auto()
"""
Trying to import london (basefee) block on top of block that is not 1559.
"""
IMPORT_IMPOSSIBLE_PARIS_WRONG_POW = auto()
"""Trying to import paris(merge) block with PoW enabled."""
IMPORT_IMPOSSIBLE_PARIS_WRONG_POS = auto()
"""
Trying to import paris(merge) block with PoS enabled before TTD is reached.
"""
IMPORT_IMPOSSIBLE_LONDON_OVER_PARIS = auto()
"""Trying to import london looking block over paris network (POS)."""
IMPORT_IMPOSSIBLE_PARIS_OVER_SHANGHAI = auto()
"""Trying to import paris block on top of shanghai block."""
IMPORT_IMPOSSIBLE_SHANGHAI = auto()
"""Shanghai block import is impossible in this chain configuration."""
IMPORT_IMPOSSIBLE_UNCLES_OVER_PARIS = auto()
"""
Trying to import a block after paris fork that has not empty uncles hash.
"""
IMPORT_IMPOSSIBLE_DIFFICULTY_OVER_PARIS = auto()
"""Trying to import a block after paris fork that has difficulty != 0."""
SYSTEM_CONTRACT_EMPTY = auto()
"""
A system contract address contains no code at the end of fork activation
block.
"""
SYSTEM_CONTRACT_CALL_FAILED = auto()
"""
A system contract call at the end of block execution (from the system
address) fails.
"""
INVALID_BLOCK_HASH = auto()
"""
Block header's hash does not match the actually computed hash of the block.
"""
INVALID_DEPOSIT_EVENT_LAYOUT = auto()
"""
Transaction emits a `DepositEvent` in the deposit contract (EIP-6110), but
the layout of the event does not match the required layout.
"""
# --- Block-Level Access Lists (EIP-7928) --- #
INVALID_BLOCK_ACCESS_LIST = auto()
"""Block's access list is invalid."""
INVALID_BAL_HASH = auto()
"""Block header's BAL hash does not match the computed BAL hash."""
INVALID_BAL_EXTRA_ACCOUNT = auto()
"""
Block BAL contains an account change that is not present in the computed
BAL.
"""
INVALID_BAL_MISSING_ACCOUNT = auto()
"""
Block BAL is missing an account change that is present in the computed BAL.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/exceptions/exceptions_types.py | src/ethereum_test_exceptions/exceptions/exceptions_types.py | """Pydantic annotated types for exceptions."""
from typing import Annotated, List, TypeVar
from pydantic import BeforeValidator, PlainSerializer
from .base import from_pipe_str, to_pipe_str
from .block import BlockException
from .eof import EOFException
from .transaction import TransactionException
"""
Pydantic Annotated Types
"""
ExceptionInstanceOrList = Annotated[
List[TransactionException | BlockException] | TransactionException | BlockException,
BeforeValidator(from_pipe_str),
PlainSerializer(to_pipe_str),
]
TransactionExceptionInstanceOrList = Annotated[
List[TransactionException] | TransactionException,
BeforeValidator(from_pipe_str),
PlainSerializer(to_pipe_str),
]
BlockExceptionInstanceOrList = Annotated[
List[BlockException] | BlockException,
BeforeValidator(from_pipe_str),
PlainSerializer(to_pipe_str),
]
EOFExceptionInstanceOrList = Annotated[
List[EOFException] | EOFException,
BeforeValidator(from_pipe_str),
PlainSerializer(to_pipe_str),
]
ExceptionBoundTypeVar = TypeVar(
"ExceptionBoundTypeVar", TransactionException, BlockException, EOFException
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/exceptions/transaction.py | src/ethereum_test_exceptions/exceptions/transaction.py | """Transaction Exceptions."""
from enum import auto, unique
from .base import ExceptionBase
@unique
class TransactionException(ExceptionBase):
"""
Exception raised when a transaction is invalid, and thus cannot be
executed.
If a transaction with any of these exceptions is included in a block, the
block is invalid.
"""
TYPE_NOT_SUPPORTED = auto()
"""Transaction type is not supported on this chain configuration."""
SENDER_NOT_EOA = auto()
"""Transaction is coming from address that is not exist anymore."""
ADDRESS_TOO_SHORT = auto()
"""Transaction `to` is not allowed to be less than 20 bytes."""
ADDRESS_TOO_LONG = auto()
"""Transaction `to` is not allowed to be more than 20 bytes."""
NONCE_MISMATCH_TOO_HIGH = auto()
"""Transaction nonce > sender.nonce."""
NONCE_MISMATCH_TOO_LOW = auto()
"""Transaction nonce < sender.nonce."""
NONCE_TOO_BIG = auto()
"""
Transaction `nonce` is not allowed to be max_uint64 - 1 (this is probably
TransactionTest).
"""
NONCE_IS_MAX = auto()
"""
Transaction `nonce` is not allowed to be max_uint64 - 1 (this is
StateTests).
"""
NONCE_OVERFLOW = auto()
"""Transaction `nonce` is not allowed to be more than uint64."""
GASLIMIT_OVERFLOW = auto()
"""Transaction gaslimit exceeds 2^64-1 maximum value."""
VALUE_OVERFLOW = auto()
"""Transaction value exceeds 2^256-1 maximum value."""
GASPRICE_OVERFLOW = auto()
"""Transaction gasPrice exceeds 2^256-1 maximum value."""
GASLIMIT_PRICE_PRODUCT_OVERFLOW = auto()
"""Transaction gasPrice * gasLimit exceeds 2^256-1 maximum value."""
INVALID_SIGNATURE_VRS = auto()
"""Invalid transaction v, r, s values."""
RLP_INVALID_SIGNATURE_R = auto()
"""Error reading transaction signature R value."""
RLP_INVALID_SIGNATURE_S = auto()
"""Error reading transaction signature S value."""
RLP_LEADING_ZEROS_GASLIMIT = auto()
"""Error reading transaction gaslimit field RLP."""
RLP_LEADING_ZEROS_GASPRICE = auto()
"""Error reading transaction gasprice field RLP."""
RLP_LEADING_ZEROS_VALUE = auto()
"""Error reading transaction value field RLP."""
RLP_LEADING_ZEROS_NONCE = auto()
"""Error reading transaction nonce field RLP."""
RLP_LEADING_ZEROS_R = auto()
"""Error reading transaction signature R field RLP."""
RLP_LEADING_ZEROS_S = auto()
"""Error reading transaction signature S field RLP."""
RLP_LEADING_ZEROS_V = auto()
"""Error reading transaction signature V field RLP."""
RLP_LEADING_ZEROS_BASEFEE = auto()
"""Error reading transaction basefee field RLP."""
RLP_LEADING_ZEROS_PRIORITY_FEE = auto()
"""Error reading transaction priority fee field RLP."""
RLP_LEADING_ZEROS_DATA_SIZE = auto()
"""
Error reading transaction data field RLP, (rlp field length has leading
zeros).
"""
RLP_LEADING_ZEROS_NONCE_SIZE = auto()
"""
Error reading transaction nonce field RLP, (rlp field length has leading
zeros).
"""
RLP_TOO_FEW_ELEMENTS = auto()
"""
Error reading transaction RLP, structure has too few elements than
expected.
"""
RLP_TOO_MANY_ELEMENTS = auto()
"""
Error reading transaction RLP, structure has too many elements than
expected.
"""
RLP_ERROR_EOF = auto()
"""Error reading transaction RLP, rlp stream unexpectedly finished."""
RLP_ERROR_SIZE = auto()
"""Error reading transaction RLP, rlp size is invalid."""
RLP_ERROR_SIZE_LEADING_ZEROS = auto()
"""Error reading transaction RLP, field size has leading zeros."""
INVALID_CHAINID = auto()
"""Transaction chain id encoding is incorrect."""
RLP_INVALID_DATA = auto()
"""Transaction data field is invalid rlp."""
RLP_INVALID_GASLIMIT = auto()
"""Transaction gaslimit field is invalid rlp."""
RLP_INVALID_NONCE = auto()
"""Transaction nonce field is invalid rlp."""
RLP_INVALID_TO = auto()
"""Transaction to field is invalid rlp."""
RLP_INVALID_ACCESS_LIST_ADDRESS_TOO_LONG = auto()
"""Transaction access list address is > 20 bytes."""
RLP_INVALID_ACCESS_LIST_ADDRESS_TOO_SHORT = auto()
"""Transaction access list address is < 20 bytes."""
RLP_INVALID_ACCESS_LIST_STORAGE_TOO_LONG = auto()
"""Transaction access list storage hash > 32 bytes."""
RLP_INVALID_ACCESS_LIST_STORAGE_TOO_SHORT = auto()
"""Transaction access list storage hash < 32 bytes."""
RLP_INVALID_HEADER = auto()
"""Transaction failed to read from RLP as rlp header is invalid."""
RLP_INVALID_VALUE = auto()
"""Transaction value field is invalid rlp/structure."""
EC_RECOVERY_FAIL = auto()
"""Transaction has correct signature, but ec recovery failed."""
INSUFFICIENT_ACCOUNT_FUNDS = auto()
"""
Transaction's sender does not have enough funds to pay for the transaction.
"""
INSUFFICIENT_MAX_FEE_PER_GAS = auto()
"""Transaction's max-fee-per-gas is lower than the block base-fee."""
PRIORITY_OVERFLOW = auto()
"""
Transaction's max-priority-fee-per-gas is exceeds 2^256-1 maximum value.
"""
PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS = auto()
"""
Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas.
"""
PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS_2 = auto()
"""
Transaction's max-priority-fee-per-gas is greater than the max-fee-per-gas
(TransactionTests).
"""
INSUFFICIENT_MAX_FEE_PER_BLOB_GAS = auto()
"""
Transaction's max-fee-per-blob-gas is lower than the block's blob-gas
price.
"""
INTRINSIC_GAS_TOO_LOW = auto()
"""Transaction's gas limit is too low."""
INTRINSIC_GAS_BELOW_FLOOR_GAS_COST = auto()
"""Transaction's gas limit is below the floor gas cost."""
INITCODE_SIZE_EXCEEDED = auto()
"""
Transaction's initcode for a contract-creating transaction is too large.
"""
TYPE_3_TX_PRE_FORK = auto()
"""Transaction type 3 included before activation fork."""
TYPE_3_TX_ZERO_BLOBS_PRE_FORK = auto()
"""Transaction type 3, with zero blobs, included before activation fork."""
TYPE_3_TX_INVALID_BLOB_VERSIONED_HASH = auto()
"""Transaction contains a blob versioned hash with an invalid version."""
TYPE_3_TX_WITH_FULL_BLOBS = auto()
"""Transaction contains full blobs (network-version of the transaction)."""
TYPE_3_TX_BLOB_COUNT_EXCEEDED = auto()
"""Transaction contains too many blob versioned hashes."""
TYPE_3_TX_CONTRACT_CREATION = auto()
"""Transaction is a type 3 transaction and has an empty `to`."""
TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED = auto()
"""Transaction causes block to go over blob gas limit."""
GAS_ALLOWANCE_EXCEEDED = auto()
"""Transaction causes block to go over blob gas limit."""
GAS_LIMIT_EXCEEDS_MAXIMUM = auto()
"""
Transaction gas limit exceeds the maximum allowed limit of 30 million.
"""
TYPE_3_TX_ZERO_BLOBS = auto()
"""Transaction is type 3, but has no blobs."""
TYPE_4_EMPTY_AUTHORIZATION_LIST = auto()
"""Transaction is type 4, but has an empty authorization list."""
TYPE_4_INVALID_AUTHORITY_SIGNATURE = auto()
"""Transaction authority signature is invalid"""
TYPE_4_INVALID_AUTHORITY_SIGNATURE_S_TOO_HIGH = auto()
"""Transaction authority signature is invalid"""
TYPE_4_TX_CONTRACT_CREATION = auto()
"""Transaction is a type 4 transaction and has an empty `to`."""
TYPE_4_INVALID_AUTHORIZATION_FORMAT = auto()
"""
Transaction is type 4, but contains an authorization that has an invalid
format.
"""
TYPE_4_TX_PRE_FORK = auto()
"""Transaction type 4 included before activation fork."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/exceptions/__init__.py | src/ethereum_test_exceptions/exceptions/__init__.py | """Exceptions for invalid execution."""
from .base import ExceptionBase, UndefinedException, from_pipe_str, to_pipe_str
from .block import BlockException
from .eof import EOFException
from .exceptions_types import (
BlockExceptionInstanceOrList,
EOFExceptionInstanceOrList,
ExceptionBoundTypeVar,
ExceptionInstanceOrList,
TransactionExceptionInstanceOrList,
)
from .transaction import TransactionException
__all__ = [
"ExceptionBase",
"UndefinedException",
"from_pipe_str",
"to_pipe_str",
"TransactionException",
"BlockException",
"EOFException",
"ExceptionInstanceOrList",
"TransactionExceptionInstanceOrList",
"BlockExceptionInstanceOrList",
"EOFExceptionInstanceOrList",
"ExceptionBoundTypeVar",
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/exceptions/base.py | src/ethereum_test_exceptions/exceptions/base.py | """Base classes and infrastructure for exceptions."""
from enum import Enum
from typing import Any, Dict
from pydantic import GetCoreSchemaHandler
from pydantic_core.core_schema import (
PlainValidatorFunctionSchema,
no_info_plain_validator_function,
to_string_ser_schema,
)
_exception_classes: Dict[str, type] = {}
class ExceptionBase(Enum):
"""Base class for exceptions."""
def __init_subclass__(cls) -> None:
"""Register the exception class."""
super().__init_subclass__()
_exception_classes[cls.__name__] = cls
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call class constructor without info and appends the serialization
schema.
"""
return no_info_plain_validator_function(
cls.from_str,
serialization=to_string_ser_schema(),
)
@classmethod
def from_str(cls, value: "str | ExceptionBase") -> "ExceptionBase":
"""Return ContainerKind enum value from a string."""
if isinstance(value, ExceptionBase):
return value
class_name, enum_name = value.split(".")
if cls == ExceptionBase:
# Exception base automatically resolves the class
assert class_name in _exception_classes, f"No such exception class: {class_name}"
exception_class = _exception_classes[class_name]
else:
# Otherwise, use the class that the method is called on
assert cls.__name__ == class_name, (
f"Unexpected exception type: {class_name}, expected {cls.__name__}"
)
exception_class = cls
exception = getattr(exception_class, enum_name, None)
if exception is not None:
return exception
raise ValueError(f"No such exception in {class_name}: {value}")
def __contains__(self, exception: "ExceptionBase") -> bool:
"""Check if provided exception is equal to this."""
return self == exception
def __str__(self) -> str:
"""Return string representation of the exception."""
return f"{self.__class__.__name__}.{self.name}"
class UndefinedException(str):
"""Undefined Exception."""
mapper_name: str | None
def __new__(cls, value: str, *, mapper_name: str | None = None) -> "UndefinedException":
"""Create a new UndefinedException instance."""
if isinstance(value, UndefinedException):
return value
assert isinstance(value, str)
instance = super().__new__(cls, value)
instance.mapper_name = mapper_name
return instance
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call class constructor without info and appends the serialization
schema.
"""
return no_info_plain_validator_function(
cls,
serialization=to_string_ser_schema(),
)
def to_pipe_str(value: Any) -> str:
"""
Single pipe-separated string representation of an exception list.
Obtain a deterministic ordering by ordering using the exception string
representations.
"""
if isinstance(value, list):
return "|".join(str(exception) for exception in value)
return str(value)
def from_pipe_str(value: Any) -> str | list[str]:
"""Parse a single string as a pipe separated list into enum exceptions."""
if isinstance(value, str):
exception_list = value.split("|")
if len(exception_list) == 1:
return exception_list[0]
return exception_list
return value
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/exceptions/eof.py | src/ethereum_test_exceptions/exceptions/eof.py | """EOF Exceptions."""
from enum import auto, unique
from .base import ExceptionBase
@unique
class EOFException(ExceptionBase):
"""Exception raised when an EOF container is invalid."""
DEFAULT_EXCEPTION = auto()
"""Expect some exception, not yet known."""
UNDEFINED_EXCEPTION = auto()
"""Indicates that exception string is not mapped to an exception enum."""
UNDEFINED_INSTRUCTION = auto()
"""EOF container has undefined instruction in it's body code."""
UNKNOWN_VERSION = auto()
"""EOF container has an unknown version."""
INCOMPLETE_MAGIC = auto()
"""EOF container has not enough bytes to read magic."""
INVALID_MAGIC = auto()
"""EOF container has not allowed magic version byte."""
INVALID_VERSION = auto()
"""EOF container version bytes mismatch."""
INVALID_NON_RETURNING_FLAG = auto()
"""EOF container's section has non-returning flag set incorrectly."""
INVALID_RJUMP_DESTINATION = auto()
"""Code has RJUMP instruction with invalid parameters."""
MISSING_TYPE_HEADER = auto()
"""EOF container missing types section."""
INVALID_TYPE_SECTION_SIZE = auto()
"""EOF container types section has wrong size."""
INVALID_TYPE_BODY = auto()
"""EOF container types body section bytes are wrong."""
MISSING_CODE_HEADER = auto()
"""EOF container missing code section."""
INVALID_CODE_SECTION = auto()
"""EOF container code section bytes are incorrect."""
INCOMPLETE_CODE_HEADER = auto()
"""EOF container code header missing bytes."""
INCOMPLETE_DATA_HEADER = auto()
"""EOF container data header missing bytes."""
ZERO_SECTION_SIZE = auto()
"""EOF container data header construction is wrong."""
MISSING_DATA_SECTION = auto()
"""EOF container missing data section"""
INCOMPLETE_CONTAINER = auto()
"""EOF container bytes are incomplete."""
INVALID_SECTION_BODIES_SIZE = auto()
"""Sections bodies does not match sections headers."""
TRAILING_BYTES = auto()
"""EOF container has bytes beyond data section."""
MISSING_TERMINATOR = auto()
"""EOF container missing terminator bytes between header and body."""
MISSING_HEADERS_TERMINATOR = auto()
"""Some type of another exception about missing headers terminator."""
INVALID_FIRST_SECTION_TYPE = auto()
"""EOF container header does not have types section first."""
INCOMPLETE_SECTION_NUMBER = auto()
"""EOF container header has section that is missing declaration bytes."""
INCOMPLETE_SECTION_SIZE = auto()
"""EOF container header has section that is defined incorrectly."""
TOO_MANY_CODE_SECTIONS = auto()
"""EOF container header has too many code sections."""
MISSING_STOP_OPCODE = auto()
"""EOF container's code missing STOP bytecode at it's end."""
INPUTS_OUTPUTS_NUM_ABOVE_LIMIT = auto()
"""EOF container code section inputs/outputs number is above the limit"""
UNREACHABLE_INSTRUCTIONS = auto()
"""EOF container's code have instructions that are unreachable."""
UNREACHABLE_CODE_SECTIONS = auto()
"""EOF container's body have code sections that are unreachable."""
STACK_UNDERFLOW = auto()
"""EOF container's code produces an stack underflow."""
STACK_OVERFLOW = auto()
"""EOF container's code produces an stack overflow."""
STACK_HEIGHT_MISMATCH = auto()
"""EOF container section stack height mismatch."""
MAX_STACK_INCREASE_ABOVE_LIMIT = auto()
"""EOF container's specified max stack increase is above the limit."""
STACK_HIGHER_THAN_OUTPUTS = auto()
"""
EOF container section stack height is higher than the outputs. when
returning
"""
JUMPF_DESTINATION_INCOMPATIBLE_OUTPUTS = auto()
"""
EOF container section JUMPF's to a destination section with incompatible
outputs.
"""
INVALID_MAX_STACK_INCREASE = auto()
"""
EOF container section's specified max stack increase does not match the
actual stack height.
"""
INVALID_DATALOADN_INDEX = auto()
"""A DATALOADN instruction has out-of-bounds index for the data section."""
TRUNCATED_INSTRUCTION = auto()
"""EOF container's code section has truncated instruction."""
TOPLEVEL_CONTAINER_TRUNCATED = auto()
"""Top-level EOF container has data section truncated"""
ORPHAN_SUBCONTAINER = auto()
"""EOF container has an unreferenced subcontainer. '"""
CONTAINER_SIZE_ABOVE_LIMIT = auto()
"""EOF container is above size limit"""
INVALID_CONTAINER_SECTION_INDEX = auto()
"""Instruction references container section that does not exist."""
INCOMPATIBLE_CONTAINER_KIND = auto()
"""Incompatible instruction found in a container of a specific kind."""
AMBIGUOUS_CONTAINER_KIND = auto()
"""The kind of a sub-container cannot be uniquely deduced."""
TOO_MANY_CONTAINERS = auto()
"""EOF container header has too many sub-containers."""
INVALID_CODE_SECTION_INDEX = auto()
"""CALLF Operation refers to a non-existent code section"""
UNEXPECTED_HEADER_KIND = auto()
"""Header parsing encountered a section kind it wasn't expecting"""
CALLF_TO_NON_RETURNING = auto()
"""CALLF instruction targeting a non-returning code section"""
EOFCREATE_WITH_TRUNCATED_CONTAINER = auto()
"""EOFCREATE with truncated container"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/tests/__init__.py | src/ethereum_test_exceptions/tests/__init__.py | """Tests for the ethereum_test_exceptions package."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_exceptions/tests/test_exceptions.py | src/ethereum_test_exceptions/tests/test_exceptions.py | """Test suite for ethereum_test_exceptions module."""
import pytest
from pydantic import TypeAdapter
from ..exceptions import (
BlockException,
BlockExceptionInstanceOrList,
ExceptionInstanceOrList,
TransactionException,
TransactionExceptionInstanceOrList,
)
GenericExceptionListAdapter: TypeAdapter = TypeAdapter(ExceptionInstanceOrList)
TransactionExceptionListAdapter: TypeAdapter = TypeAdapter(TransactionExceptionInstanceOrList)
BlockExceptionListAdapter: TypeAdapter = TypeAdapter(BlockExceptionInstanceOrList)
@pytest.mark.parametrize(
"exception, expected",
[
(
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS,
"TransactionException.INSUFFICIENT_ACCOUNT_FUNDS",
),
(
TransactionException.INITCODE_SIZE_EXCEEDED,
"TransactionException.INITCODE_SIZE_EXCEEDED",
),
(BlockException.INCORRECT_BLOB_GAS_USED, "BlockException.INCORRECT_BLOB_GAS_USED"),
(BlockException.INCORRECT_BLOCK_FORMAT, "BlockException.INCORRECT_BLOCK_FORMAT"),
],
)
def test_exceptions_string_conversion(
exception: BlockException | TransactionException, expected: str
) -> None:
"""
Test that the exceptions are unique and have the correct string
representation.
"""
assert str(exception) == expected
@pytest.mark.parametrize(
"type_adapter,exception,expected",
[
(
GenericExceptionListAdapter,
[
BlockException.INCORRECT_BLOB_GAS_USED,
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS,
],
"BlockException.INCORRECT_BLOB_GAS_USED|"
"TransactionException.INSUFFICIENT_ACCOUNT_FUNDS",
),
(
GenericExceptionListAdapter,
[
BlockException.INCORRECT_BLOB_GAS_USED,
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS,
TransactionException.INITCODE_SIZE_EXCEEDED,
],
"BlockException.INCORRECT_BLOB_GAS_USED"
"|TransactionException.INSUFFICIENT_ACCOUNT_FUNDS"
"|TransactionException.INITCODE_SIZE_EXCEEDED",
),
(
GenericExceptionListAdapter,
[
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS,
BlockException.INCORRECT_BLOB_GAS_USED,
],
"TransactionException.INSUFFICIENT_ACCOUNT_FUNDS"
"|BlockException.INCORRECT_BLOB_GAS_USED",
),
(
TransactionExceptionListAdapter,
[
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS,
TransactionException.INITCODE_SIZE_EXCEEDED,
],
"TransactionException.INSUFFICIENT_ACCOUNT_FUNDS"
"|TransactionException.INITCODE_SIZE_EXCEEDED",
),
(
BlockExceptionListAdapter,
[
BlockException.INCORRECT_BLOB_GAS_USED,
BlockException.INCORRECT_BLOCK_FORMAT,
],
"BlockException.INCORRECT_BLOB_GAS_USED|BlockException.INCORRECT_BLOCK_FORMAT",
),
],
)
def test_exceptions_or(type_adapter: TypeAdapter, exception: list, expected: str) -> None:
"""Test that the exceptions can be combined using the | operator."""
assert type_adapter.dump_python(type_adapter.validate_python(exception)) == expected
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/blockchain.py | src/ethereum_test_fixtures/blockchain.py | """BlockchainTest types."""
import json
from functools import cached_property
from typing import (
Annotated,
Any,
ClassVar,
List,
Literal,
Self,
Set,
Tuple,
Type,
Union,
cast,
get_args,
get_type_hints,
)
import ethereum_rlp as eth_rlp
import pytest
from ethereum_types.numeric import Uint
from pydantic import AliasChoices, Field, PlainSerializer, computed_field, model_validator
from ethereum_test_base_types import (
Address,
Alloc,
Bloom,
Bytes,
CamelModel,
EmptyOmmersRoot,
EmptyTrieRoot,
Hash,
HeaderNonce,
HexNumber,
Number,
ZeroPaddedHexNumber,
)
from ethereum_test_exceptions import EngineAPIError, ExceptionInstanceOrList
from ethereum_test_forks import Fork, Paris
from ethereum_test_types import (
BlockAccessList,
Environment,
Requests,
Transaction,
Withdrawal,
)
from ethereum_test_types.block_types import WithdrawalGeneric
from ethereum_test_types.transaction_types import TransactionFixtureConverter, TransactionGeneric
from .base import BaseFixture, FixtureFillingPhase
from .common import FixtureAuthorizationTuple, FixtureBlobSchedule
def post_state_validator(alternate_field: str | None = None, mode: str = "after") -> Any:
"""
Create a validator to ensure exactly one post-state field is provided.
Args: alternate_field: Alternative field name to post_state_hash (e.g.,
'post_state_diff'). mode: Pydantic validation mode.
"""
def decorator(cls: Type[Any]) -> Type[Any]:
@model_validator(mode=mode) # type: ignore
def validate_post_state_fields(self: Any) -> Any:
"""Ensure exactly one post-state field is provided."""
if mode == "after":
# Determine which fields to check
if alternate_field:
# For engine x fixtures: check post_state vs
# post_state_diff
field1_name, field2_name = "post_state", alternate_field
else:
# For standard fixtures: check post_state vs
# post_state_hash
field1_name, field2_name = "post_state", "post_state_hash"
field1_value = getattr(self, field1_name, None)
field2_value = getattr(self, field2_name, None)
if field1_value is None and field2_value is None:
raise ValueError(f"Either {field1_name} or {field2_name} must be provided.")
if field1_value is not None and field2_value is not None:
raise ValueError(
f"Only one of {field1_name} or {field2_name} must be provided."
)
return self
# Apply the validator to the class
return cls
return decorator
class HeaderForkRequirement(str):
"""
Fork requirement class that specifies the name of the method that should be
called to check if the field is required.
"""
def __new__(cls, value: str) -> "HeaderForkRequirement":
"""Create a new instance of the class."""
return super().__new__(cls, value)
def required(self, fork: Fork, block_number: int, timestamp: int) -> bool:
"""Check if the field is required for the given fork."""
return getattr(fork, f"header_{self}_required")(
block_number=block_number, timestamp=timestamp
)
@classmethod
def get_from_annotation(cls, field_hints: Any) -> Self | None:
"""Find the annotation in the field args."""
if isinstance(field_hints, cls):
return field_hints
for hint in get_args(field_hints):
if res := cls.get_from_annotation(hint):
return res
return None
class FixtureHeader(CamelModel):
"""
Representation of an Ethereum header within a test Fixture.
We combine the `Environment` and `Result` contents to create this model.
"""
parent_hash: Hash = Hash(0)
ommers_hash: Hash = Field(Hash(EmptyOmmersRoot), alias="uncleHash")
fee_recipient: Address = Field(
..., alias="coinbase", validation_alias=AliasChoices("coinbase", "miner")
)
state_root: Hash
transactions_trie: Hash = Field(
Hash(EmptyTrieRoot), validation_alias=AliasChoices("transactionsTrie", "transactionsRoot")
)
receipts_root: Hash = Field(
Hash(EmptyTrieRoot),
alias="receiptTrie",
validation_alias=AliasChoices("receiptTrie", "receiptsRoot"),
)
logs_bloom: Bloom = Field(
Bloom(0), alias="bloom", validation_alias=AliasChoices("bloom", "logsBloom")
)
difficulty: ZeroPaddedHexNumber = ZeroPaddedHexNumber(0)
number: ZeroPaddedHexNumber
gas_limit: ZeroPaddedHexNumber
gas_used: ZeroPaddedHexNumber = ZeroPaddedHexNumber(0)
timestamp: ZeroPaddedHexNumber
extra_data: Bytes
prev_randao: Hash = Field(Hash(0), alias="mixHash")
nonce: HeaderNonce = Field(HeaderNonce(0), validate_default=True)
base_fee_per_gas: (
Annotated[
ZeroPaddedHexNumber,
HeaderForkRequirement("base_fee"),
]
| None
) = Field(None)
withdrawals_root: Annotated[Hash, HeaderForkRequirement("withdrawals")] | None = Field(None)
blob_gas_used: (
Annotated[ZeroPaddedHexNumber, HeaderForkRequirement("blob_gas_used")] | None
) = Field(None)
excess_blob_gas: (
Annotated[ZeroPaddedHexNumber, HeaderForkRequirement("excess_blob_gas")] | None
) = Field(None)
parent_beacon_block_root: Annotated[Hash, HeaderForkRequirement("beacon_root")] | None = Field(
None
)
requests_hash: Annotated[Hash, HeaderForkRequirement("requests")] | None = Field(None)
block_access_list_hash: Annotated[Hash, HeaderForkRequirement("bal_hash")] | None = Field(
None, alias="blockAccessListHash"
)
fork: Fork | None = Field(None, exclude=True)
def model_post_init(self, __context: Any) -> None:
"""
Model post init method used to check for required fields of a given
fork.
"""
super().model_post_init(__context)
if self.fork is None:
# No validation done when we are importing the fixture from file
return
# Get the timestamp and block number
block_number = self.number
timestamp = self.timestamp
# For each field, check if any of the annotations are of type
# HeaderForkRequirement and if so, check if the field is required for
# the given fork.
annotated_hints = get_type_hints(self, include_extras=True)
for field in self.__class__.model_fields:
if field == "fork":
continue
header_fork_requirement = HeaderForkRequirement.get_from_annotation(
annotated_hints[field]
)
if header_fork_requirement is not None:
if (
header_fork_requirement.required(self.fork, block_number, timestamp)
and getattr(self, field) is None
):
raise ValueError(f"Field {field} is required for fork {self.fork}")
@cached_property
def rlp_encode_list(self) -> List:
"""Compute the RLP of the header."""
header_list = []
for field in self.__class__.model_fields:
if field == "fork":
continue
value = getattr(self, field)
if value is not None:
header_list.append(value if isinstance(value, bytes) else Uint(value))
return header_list
@cached_property
def rlp(self) -> Bytes:
"""Compute the RLP of the header."""
return Bytes(eth_rlp.encode(self.rlp_encode_list))
@computed_field(alias="hash") # type: ignore[prop-decorator]
@cached_property
def block_hash(self) -> Hash:
"""Compute the RLP of the header."""
return self.rlp.keccak256()
@classmethod
def genesis(cls, fork: Fork, env: Environment, state_root: Hash) -> Self:
"""Get the genesis header for the given fork."""
environment_values = env.model_dump(exclude_none=True, exclude={"withdrawals"})
if env.withdrawals is not None:
environment_values["withdrawals_root"] = Withdrawal.list_root(env.withdrawals)
environment_values["extra_data"] = env.extra_data
extras = {
"state_root": state_root,
"requests_hash": Requests()
if fork.header_requests_required(block_number=0, timestamp=0)
else None,
"block_access_list_hash": (
BlockAccessList().rlp_hash
if fork.header_bal_hash_required(block_number=0, timestamp=0)
else None
),
"fork": fork,
}
return cls(**environment_values, **extras)
class FixtureExecutionPayload(CamelModel):
"""
Representation of an Ethereum execution payload within a test Fixture.
"""
parent_hash: Hash
fee_recipient: Address
state_root: Hash
receipts_root: Hash
logs_bloom: Bloom
number: HexNumber = Field(..., alias="blockNumber")
gas_limit: HexNumber
gas_used: HexNumber
timestamp: HexNumber
extra_data: Bytes
prev_randao: Hash
base_fee_per_gas: HexNumber
blob_gas_used: HexNumber | None = Field(None)
excess_blob_gas: HexNumber | None = Field(None)
block_hash: Hash
transactions: List[Bytes]
withdrawals: List[Withdrawal] | None = None
block_access_list: Bytes | None = Field(
None, description="RLP-serialized EIP-7928 Block Access List"
)
@classmethod
def from_fixture_header(
cls,
header: FixtureHeader,
transactions: List[Transaction],
withdrawals: List[Withdrawal] | None,
block_access_list: Bytes | None = None,
) -> Self:
"""
Return FixtureExecutionPayload from a FixtureHeader, a list of
transactions, a list of withdrawals, and an optional block access list.
"""
return cls(
**header.model_dump(exclude={"rlp"}, exclude_none=True),
transactions=[tx.rlp() for tx in transactions],
withdrawals=withdrawals,
block_access_list=block_access_list,
)
EngineNewPayloadV1Parameters = Tuple[FixtureExecutionPayload]
EngineNewPayloadV3Parameters = Tuple[FixtureExecutionPayload, List[Hash], Hash]
EngineNewPayloadV4Parameters = Tuple[
FixtureExecutionPayload,
List[Hash],
Hash,
List[Bytes],
]
EngineNewPayloadV5Parameters = EngineNewPayloadV4Parameters
# Important: We check EngineNewPayloadV3Parameters first as it has more fields,
# and pydantic has a weird behavior when the smaller tuple is checked first.
EngineNewPayloadParameters = Union[
EngineNewPayloadV5Parameters,
EngineNewPayloadV4Parameters,
EngineNewPayloadV3Parameters,
EngineNewPayloadV1Parameters,
]
class FixtureEngineNewPayload(CamelModel):
"""
Representation of the `engine_newPayloadVX` information to be sent using
the block information.
"""
params: EngineNewPayloadParameters
new_payload_version: Number
forkchoice_updated_version: Number
validation_error: ExceptionInstanceOrList | None = None
error_code: (
Annotated[
EngineAPIError,
PlainSerializer(
lambda x: str(x.value),
return_type=str,
),
]
| None
) = None
def valid(self) -> bool:
"""Return whether the payload is valid."""
return self.validation_error is None
@classmethod
def from_fixture_header(
cls,
fork: Fork,
header: FixtureHeader,
transactions: List[Transaction],
withdrawals: List[Withdrawal] | None,
requests: List[Bytes] | None,
block_access_list: Bytes | None = None,
**kwargs: Any,
) -> Self:
"""Create `FixtureEngineNewPayload` from a `FixtureHeader`."""
new_payload_version = fork.engine_new_payload_version(
block_number=header.number, timestamp=header.timestamp
)
forkchoice_updated_version = fork.engine_forkchoice_updated_version(
block_number=header.number, timestamp=header.timestamp
)
assert new_payload_version is not None, "Invalid header for engine_newPayload"
if fork.engine_execution_payload_block_access_list(
block_number=header.number, timestamp=header.timestamp
):
if block_access_list is None:
raise ValueError(
f"`block_access_list` is required in engine `ExecutionPayload` for >={fork}."
)
execution_payload = FixtureExecutionPayload.from_fixture_header(
header=header,
transactions=transactions,
withdrawals=withdrawals,
block_access_list=block_access_list,
)
params: List[Any] = [execution_payload]
if fork.engine_new_payload_blob_hashes(
block_number=header.number, timestamp=header.timestamp
):
blob_hashes = Transaction.list_blob_versioned_hashes(transactions)
if blob_hashes is None:
raise ValueError(f"Blob hashes are required for ${fork}.")
params.append(blob_hashes)
if fork.engine_new_payload_beacon_root(
block_number=header.number, timestamp=header.timestamp
):
parent_beacon_block_root = header.parent_beacon_block_root
if parent_beacon_block_root is None:
raise ValueError(f"Parent beacon block root is required for ${fork}.")
params.append(parent_beacon_block_root)
if fork.engine_new_payload_requests(
block_number=header.number, timestamp=header.timestamp
):
if requests is None:
raise ValueError(f"Requests are required for ${fork}.")
params.append(requests)
payload_params: EngineNewPayloadParameters = cast(
EngineNewPayloadParameters,
tuple(params),
)
new_payload = cls(
params=payload_params,
new_payload_version=new_payload_version,
forkchoice_updated_version=forkchoice_updated_version,
**kwargs,
)
return new_payload
class FixtureTransaction(TransactionFixtureConverter, TransactionGeneric[ZeroPaddedHexNumber]):
"""Representation of an Ethereum transaction within a test Fixture."""
authorization_list: List[FixtureAuthorizationTuple] | None = None
initcodes: List[Bytes] | None = None
@classmethod
def from_transaction(cls, tx: Transaction) -> Self:
"""Return FixtureTransaction from a Transaction."""
return cls(**tx.model_dump())
class FixtureWithdrawal(WithdrawalGeneric[ZeroPaddedHexNumber]):
"""
Structure to represent a single withdrawal of a validator's balance from
the beacon chain in the output fixture.
"""
@classmethod
def from_withdrawal(cls, w: WithdrawalGeneric) -> Self:
"""Return FixtureWithdrawal from a Withdrawal."""
return cls(**w.model_dump())
class WitnessChunk(CamelModel):
"""Represents execution witness data for a block."""
state: List[str]
codes: List[str]
keys: List[str]
headers: List[str]
@classmethod
def parse_witness_chunks(cls, s: str) -> List[Self]:
"""
Parse multiple witness chunks from JSON string.
Returns a list of WitnessChunk instances parsed from the JSON array.
"""
return [cls(**obj) for obj in json.loads(s)]
class FixtureBlockBase(CamelModel):
"""
Representation of an Ethereum block within a test Fixture without RLP
bytes.
"""
header: FixtureHeader = Field(..., alias="blockHeader")
txs: List[FixtureTransaction] = Field(default_factory=list, alias="transactions")
ommers: List[FixtureHeader] = Field(default_factory=list, alias="uncleHeaders")
withdrawals: List[FixtureWithdrawal] | None = None
execution_witness: WitnessChunk | None = None
block_access_list: BlockAccessList | None = Field(
None, description="EIP-7928 Block Access List"
)
@computed_field(alias="blocknumber") # type: ignore[prop-decorator]
@cached_property
def block_number(self) -> Number:
"""Get the block number from the header."""
return Number(self.header.number)
def with_rlp(self, txs: List[Transaction]) -> "FixtureBlock":
"""Return FixtureBlock with the RLP bytes set."""
block = [
self.header.rlp_encode_list,
[tx.serializable_list for tx in txs],
# TODO: This is incorrect, and we probably
# need to serialize the ommers
self.ommers,
]
if self.withdrawals is not None:
block.append([w.to_serializable_list() for w in self.withdrawals])
if self.block_access_list is not None:
block.append(self.block_access_list.to_list())
return FixtureBlock(
**self.model_dump(),
rlp=eth_rlp.encode(block),
)
class FixtureBlock(FixtureBlockBase):
"""Representation of an Ethereum block within a test Fixture."""
rlp: Bytes
def without_rlp(self) -> FixtureBlockBase:
"""Return FixtureBlockBase without the RLP bytes set."""
return FixtureBlockBase(
**self.model_dump(exclude={"rlp"}),
)
class FixtureConfig(CamelModel):
"""Chain configuration for a fixture."""
fork: Fork = Field(..., alias="network")
chain_id: ZeroPaddedHexNumber = Field(ZeroPaddedHexNumber(1), alias="chainid")
blob_schedule: FixtureBlobSchedule | None = None
class InvalidFixtureBlock(CamelModel):
"""Representation of an invalid Ethereum block within a test Fixture."""
rlp: Bytes
expect_exception: ExceptionInstanceOrList
rlp_decoded: FixtureBlockBase | None = Field(None, alias="rlp_decoded")
@post_state_validator()
class BlockchainFixtureCommon(BaseFixture):
"""Base blockchain test fixture model."""
fork: Fork = Field(..., alias="network")
genesis: FixtureHeader = Field(..., alias="genesisBlockHeader")
pre: Alloc
post_state: Alloc | None = Field(None)
post_state_hash: Hash | None = Field(None)
# FIXME: lastBlockHash
last_block_hash: Hash = Field(..., alias="lastblockhash")
config: FixtureConfig
@model_validator(mode="before")
@classmethod
def config_defaults_for_backwards_compatibility(cls, data: Any) -> Any:
"""
Check if the config field is populated, otherwise use the root-level
field values for backwards compatibility.
"""
if isinstance(data, dict):
if "config" not in data:
data["config"] = {}
if isinstance(data["config"], dict):
if "network" not in data["config"]:
data["config"]["network"] = data["network"]
if "chainid" not in data["config"]:
data["config"]["chainid"] = "0x01"
return data
def get_fork(self) -> Fork | None:
"""Return fork of the fixture as a string."""
return self.fork
class BlockchainFixture(BlockchainFixtureCommon):
"""Cross-client specific blockchain test model use in JSON fixtures."""
format_name: ClassVar[str] = "blockchain_test"
description: ClassVar[str] = "Tests that generate a blockchain test fixture."
genesis_rlp: Bytes = Field(..., alias="genesisRLP")
blocks: List[FixtureBlock | InvalidFixtureBlock]
seal_engine: Literal["NoProof"] = Field("NoProof")
@post_state_validator()
class BlockchainEngineFixtureCommon(BaseFixture):
"""
Base blockchain test fixture model for Engine API based execution.
Similar to BlockchainFixtureCommon but excludes the 'pre' field to avoid
duplicating large pre-allocations.
"""
fork: Fork = Field(..., alias="network")
post_state_hash: Hash | None = Field(None)
# FIXME: lastBlockHash
last_block_hash: Hash = Field(..., alias="lastblockhash")
config: FixtureConfig
def get_fork(self) -> Fork | None:
"""Return fixture's `Fork`."""
return self.fork
@classmethod
def supports_fork(cls, fork: Fork) -> bool:
"""
Return whether the fixture can be generated for the given fork.
The Engine API is available only on Paris and afterwards.
"""
return fork >= Paris
class BlockchainEngineFixture(BlockchainEngineFixtureCommon):
"""Engine specific test fixture information."""
format_name: ClassVar[str] = "blockchain_test_engine"
description: ClassVar[str] = (
"Tests that generate a blockchain test fixture in Engine API format."
)
pre: Alloc
genesis: FixtureHeader = Field(..., alias="genesisBlockHeader")
post_state: Alloc | None = Field(None)
payloads: List[FixtureEngineNewPayload] = Field(..., alias="engineNewPayloads")
@post_state_validator(alternate_field="post_state_diff")
class BlockchainEngineXFixture(BlockchainEngineFixtureCommon):
"""
Engine X specific test fixture information.
Uses pre-allocation groups (and a single client instance) for efficient
test execution without client restarts.
"""
format_name: ClassVar[str] = "blockchain_test_engine_x"
description: ClassVar[str] = "Tests that generate a Blockchain Test Engine X fixture."
format_phases: ClassVar[Set[FixtureFillingPhase]] = {
FixtureFillingPhase.FILL,
FixtureFillingPhase.PRE_ALLOC_GENERATION,
}
pre_hash: str
"""Hash of the pre-allocation group this test belongs to."""
post_state_diff: Alloc | None = None
"""
State difference from genesis after test execution (efficiency
optimization).
"""
payloads: List[FixtureEngineNewPayload] = Field(..., alias="engineNewPayloads")
"""Engine API payloads for blockchain execution."""
class BlockchainEngineSyncFixture(BlockchainEngineFixture):
"""
Engine Sync specific test fixture information.
This fixture format is specifically designed for sync testing where:
- The client under test receives all payloads
- A sync client attempts to sync from the client under test
- Both client types are parametrized from hive client config
"""
format_name: ClassVar[str] = "blockchain_test_sync"
description: ClassVar[str] = (
"Tests that generate a blockchain test fixture for Engine API testing with client sync."
)
sync_payload: FixtureEngineNewPayload | None = None
@classmethod
def discard_fixture_format_by_marks(
cls,
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
"""Discard the fixture format based on the provided markers."""
del fork
marker_names = [m.name for m in markers]
return "verify_sync" not in marker_names
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/file.py | src/ethereum_test_fixtures/file.py | """Defines models for interacting with JSON fixture files."""
import json
from pathlib import Path
from typing import Any, Dict, ItemsView, Iterator, KeysView, ValuesView
from filelock import FileLock
from pydantic import SerializeAsAny
from ethereum_test_base_types import EthereumTestRootModel
from .base import BaseFixture
class Fixtures(EthereumTestRootModel):
"""
A base class for defining top-level models that encapsulate multiple test
fixtures. Each fixture is stored in a dictionary, where each key is a
string (typically the fixture name) and its corresponding value is a
fixture object. This is the structure used for blockchain and state JSON
fixture files.
This class implements dunder methods and other common functionality to
allow interaction with the model's fixtures as if they were being accessed
directly from a dictionary.
"""
root: Dict[str, SerializeAsAny[BaseFixture]]
def __setitem__(self, key: str, value: BaseFixture) -> None: # noqa: D105
self.root[key] = value
def __getitem__(self, item: str) -> SerializeAsAny[BaseFixture]: # noqa: D105
return self.root[item]
def __iter__(self) -> Iterator[str]: # type: ignore [override] # noqa: D105
return iter(self.root)
def __contains__(self, item: str) -> bool: # noqa: D105
return item in self.root
def __len__(self) -> int: # noqa: D105
return len(self.root)
def keys(self) -> KeysView[str]: # noqa: D102
return self.root.keys()
def values(self) -> ValuesView[SerializeAsAny[BaseFixture]]: # noqa: D102
return self.root.values()
def items(self) -> ItemsView[str, SerializeAsAny[BaseFixture]]: # noqa: D102
return self.root.items()
def collect_into_file(self, file_path: Path) -> None:
"""
For all formats, we join the fixtures as json into a single file.
Note: We don't use pydantic model_dump_json() on the Fixtures object as
we add the hash to the info field on per-fixture basis.
"""
json_fixtures: Dict[str, Dict[str, Any]] = {}
lock_file_path = file_path.with_suffix(".lock")
with FileLock(lock_file_path):
if file_path.exists():
with open(file_path, "r") as f:
json_fixtures = json.load(f)
for name, fixture in self.items():
json_fixtures[name] = fixture.json_dict_with_info()
with open(file_path, "w") as f:
json.dump(dict(sorted(json_fixtures.items())), f, indent=4)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/common.py | src/ethereum_test_fixtures/common.py | """Common types used to define multiple fixture types."""
from typing import Dict
from pydantic import AliasChoices, Field
from ethereum_test_base_types import (
BlobSchedule,
CamelModel,
EthereumTestRootModel,
SignableRLPSerializable,
ZeroPaddedHexNumber,
)
from ethereum_test_types.account_types import Address
from ethereum_test_types.transaction_types import AuthorizationTupleGeneric
class FixtureForkBlobSchedule(CamelModel):
"""Representation of the blob schedule of a given fork."""
target_blobs_per_block: ZeroPaddedHexNumber = Field(..., alias="target")
max_blobs_per_block: ZeroPaddedHexNumber = Field(..., alias="max")
base_fee_update_fraction: ZeroPaddedHexNumber = Field(...)
class FixtureBlobSchedule(EthereumTestRootModel[Dict[str, FixtureForkBlobSchedule]]):
"""Blob schedule configuration dictionary."""
root: Dict[str, FixtureForkBlobSchedule] = Field(default_factory=dict, validate_default=True)
@classmethod
def from_blob_schedule(
cls, blob_schedule: BlobSchedule | None
) -> "FixtureBlobSchedule | None":
"""Return a FixtureBlobSchedule from a BlobSchedule."""
if blob_schedule is None:
return None
return cls(
root=blob_schedule.model_dump(),
)
class FixtureAuthorizationTuple(
AuthorizationTupleGeneric[ZeroPaddedHexNumber], SignableRLPSerializable
):
"""Authorization tuple for fixture transactions."""
v: ZeroPaddedHexNumber = Field(validation_alias=AliasChoices("v", "yParity"))
r: ZeroPaddedHexNumber
s: ZeroPaddedHexNumber
signer: Address | None = None
@classmethod
def from_authorization_tuple(
cls, auth_tuple: AuthorizationTupleGeneric
) -> "FixtureAuthorizationTuple":
"""Return FixtureAuthorizationTuple from an AuthorizationTuple."""
return cls(**auth_tuple.model_dump())
def sign(self) -> None:
"""Sign the current object for further serialization."""
# No-op, as the object is always already signed
return
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/state.py | src/ethereum_test_fixtures/state.py | """StateTest types."""
from typing import ClassVar, List, Mapping, Sequence
from pydantic import BaseModel, Field
from ethereum_test_base_types import (
AccessList,
Address,
Alloc,
Bytes,
CamelModel,
Hash,
ZeroPaddedHexNumber,
)
from ethereum_test_exceptions import TransactionExceptionInstanceOrList
from ethereum_test_forks import Fork
from ethereum_test_types.block_types import EnvironmentGeneric
from ethereum_test_types.transaction_types import (
Transaction,
TransactionFixtureConverter,
)
from .base import BaseFixture
from .common import FixtureAuthorizationTuple, FixtureBlobSchedule
class FixtureEnvironment(EnvironmentGeneric[ZeroPaddedHexNumber]):
"""Type used to describe the environment of a state test."""
prev_randao: Hash | None = Field(None, alias="currentRandom") # type: ignore
class FixtureTransaction(TransactionFixtureConverter):
"""Type used to describe a transaction in a state test."""
nonce: ZeroPaddedHexNumber
gas_price: ZeroPaddedHexNumber | None = None
max_priority_fee_per_gas: ZeroPaddedHexNumber | None = None
max_fee_per_gas: ZeroPaddedHexNumber | None = None
gas_limit: List[ZeroPaddedHexNumber]
to: Address | None = None
value: List[ZeroPaddedHexNumber]
data: List[Bytes]
access_lists: List[List[AccessList] | None] | None = None
authorization_list: List[FixtureAuthorizationTuple] | None = None
initcodes: List[Bytes] | None = None
max_fee_per_blob_gas: ZeroPaddedHexNumber | None = None
blob_versioned_hashes: Sequence[Hash] | None = None
sender: Address | None = None
secret_key: Hash | None = None
@classmethod
def from_transaction(cls, tx: Transaction) -> "FixtureTransaction":
"""Return FixtureTransaction from a Transaction."""
model_as_dict = tx.model_dump(
exclude={"gas_limit", "value", "data", "access_list"}, exclude_none=True
)
model_as_dict["gas_limit"] = [tx.gas_limit]
model_as_dict["value"] = [tx.value]
model_as_dict["data"] = [tx.data]
model_as_dict["access_lists"] = [tx.access_list] if tx.access_list is not None else None
return cls(**model_as_dict)
class FixtureForkPostIndexes(BaseModel):
"""
Type used to describe the indexes of a single post state of a single Fork.
"""
data: int = 0
gas: int = 0
value: int = 0
class FixtureForkPost(CamelModel):
"""Type used to describe the post state of a single Fork."""
state_root: Hash = Field(..., alias="hash")
logs_hash: Hash = Field(..., alias="logs")
tx_bytes: Bytes = Field(..., alias="txbytes")
indexes: FixtureForkPostIndexes = Field(default_factory=FixtureForkPostIndexes)
state: Alloc
expect_exception: TransactionExceptionInstanceOrList | None = None
class FixtureConfig(CamelModel):
"""Chain configuration for a fixture."""
blob_schedule: FixtureBlobSchedule | None = None
chain_id: ZeroPaddedHexNumber = Field(ZeroPaddedHexNumber(1), alias="chainid")
class StateFixture(BaseFixture):
"""Fixture for a single StateTest."""
format_name: ClassVar[str] = "state_test"
description: ClassVar[str] = "Tests that generate a state test fixture."
env: FixtureEnvironment
pre: Alloc
transaction: FixtureTransaction
post: Mapping[Fork, List[FixtureForkPost]]
config: FixtureConfig
def get_fork(self) -> Fork | None:
"""Return fork of the fixture as a string."""
forks = list(self.post.keys())
assert len(forks) == 1, "Expected state test fixture with single fork"
return forks[0]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/consume.py | src/ethereum_test_fixtures/consume.py | """Defines models for index files and consume test cases."""
import datetime
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Iterator, List, Optional, TextIO
from pydantic import BaseModel, RootModel
from ethereum_test_base_types import HexNumber
from ethereum_test_forks import Fork
from .base import BaseFixture, FixtureFormat
from .file import Fixtures
class FixtureConsumer(ABC):
"""Abstract class for verifying Ethereum test fixtures."""
fixture_formats: List[FixtureFormat]
def can_consume(
self,
fixture_format: FixtureFormat,
) -> bool:
"""Return whether the fixture format is consumable by this consumer."""
return fixture_format in self.fixture_formats
@abstractmethod
def consume_fixture(
self,
fixture_format: FixtureFormat,
fixture_path: Path,
fixture_name: str | None = None,
debug_output_path: Path | None = None,
) -> None:
"""
Test the client with the specified fixture using its direct consumer
interface.
"""
raise NotImplementedError(
"The `consume_fixture()` function is not supported by this tool."
)
class TestCaseBase(BaseModel):
"""Base model for a test case used in EEST consume commands."""
id: str
fixture_hash: HexNumber | None
fork: Fork | None
format: FixtureFormat
pre_hash: str | None = None
__test__ = False # stop pytest from collecting this class as a test
class TestCaseStream(TestCaseBase):
"""The test case model used to load test cases from a stream (stdin)."""
fixture: BaseFixture
__test__ = False # stop pytest from collecting this class as a test
class TestCaseIndexFile(TestCaseBase):
"""
The test case model used to save/load test cases to/from an index file.
"""
json_path: Path
__test__ = False # stop pytest from collecting this class as a test
# TODO: add pytest marks
"""
ConsumerTypes = Literal["all", "direct", "rlp", "engine"]
@classmethod
def _marks_default(cls):
return {consumer_type: [] for consumer_type in get_args(ConsumerTypes)}
marks: Mapping[ConsumerTypes, List[pytest.MarkDecorator]] = field(
default_factory=lambda: TestCase._marks_default()
)
"""
class IndexFile(BaseModel):
"""The model definition used for fixture index files."""
root_hash: HexNumber | None
created_at: datetime.datetime
test_count: int
forks: Optional[List[Fork]] = []
fixture_formats: Optional[List[str]] = []
test_cases: List[TestCaseIndexFile]
class TestCases(RootModel):
"""Root model defining a list test cases used in consume commands."""
root: List[TestCaseIndexFile] | List[TestCaseStream]
__test__ = False # stop pytest from collecting this class as a test
def __len__(self) -> int:
"""Return the number of test cases in the root list."""
return len(self.root)
def __getitem__(self, position: int) -> TestCaseIndexFile | TestCaseStream:
"""Retrieve a test case by its index."""
return self.root[position]
def __setitem__(self, position: int, value: TestCaseIndexFile | TestCaseStream) -> None:
"""Set a test case at a particular index."""
self.root[position] = value # type: ignore
def __delitem__(self, position: int) -> None:
"""Remove a test case at a particular index."""
del self.root[position]
def append(self, item: TestCaseIndexFile | TestCaseStream) -> None:
"""Append a test case to the root list."""
self.root.append(item) # type: ignore
def insert(self, position: int, value: TestCaseIndexFile | TestCaseStream) -> None:
"""Insert a test case at a given position."""
self.root.insert(position, value) # type: ignore
def remove(self, value: TestCaseIndexFile | TestCaseStream) -> None:
"""Remove a test case from the root list."""
self.root.remove(value) # type: ignore
def pop(self, position: int = -1) -> TestCaseIndexFile | TestCaseStream:
"""Remove and return a test case at the given position."""
return self.root.pop(position)
def clear(self) -> None:
"""Remove all items from the root list."""
self.root.clear()
def __iter__(self) -> Iterator[TestCaseIndexFile | TestCaseStream]: # type: ignore [override]
"""Return an iterator for the root list."""
return iter(self.root)
def __repr__(self) -> str:
"""Return a string representation of the TestCases object."""
return f"{self.__class__.__name__}(root={self.root})"
@classmethod
def from_stream(cls, fd: TextIO) -> "TestCases":
"""Create a TestCases object from a stream."""
fixtures: Fixtures = Fixtures.model_validate_json(fd.read())
test_cases = [
TestCaseStream(
id=fixture_name,
fixture_hash=fixture.hash,
fork=fixture.get_fork(),
format=fixture.__class__,
fixture=fixture,
)
for fixture_name, fixture in fixtures.items()
]
return cls(root=test_cases)
@classmethod
def from_index_file(cls, index_file: Path) -> "TestCases":
"""Create a TestCases object from an index file."""
index: IndexFile = IndexFile.model_validate_json(index_file.read_text())
return cls(root=index.test_cases)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/pre_alloc_groups.py | src/ethereum_test_fixtures/pre_alloc_groups.py | """Pre-allocation group models for test fixture generation."""
import json
from pathlib import Path
from typing import Any, Dict, Generator, Iterator, KeysView, List, Tuple
from filelock import FileLock
from pydantic import Field, PrivateAttr, computed_field
from ethereum_test_base_types import CamelModel, EthereumTestRootModel
from ethereum_test_forks import Fork
from ethereum_test_types import Alloc, Environment
from .blockchain import FixtureHeader
class PreAllocGroup(CamelModel):
"""
Pre-allocation group for tests with identical Environment and fork values.
Groups tests by a hash of their fixture Environment and fork to enable
pre-allocation group optimization.
"""
# Allow both field names and aliases
model_config = {"populate_by_name": True}
test_ids: List[str] = Field(default_factory=list)
environment: Environment = Field(..., description="Grouping environment for this test group")
fork: Fork = Field(..., alias="network")
pre: Alloc
@computed_field(description="Number of accounts in the pre-allocation") # type: ignore[prop-decorator]
@property
def pre_account_count(self) -> int:
"""Return the amount of accounts the pre-allocation group holds."""
return len(self.pre.root)
@computed_field(description="Number of tests in this group") # type: ignore[prop-decorator]
@property
def test_count(self) -> int:
"""Return the amount of tests that use this pre-allocation group."""
return len(self.test_ids)
@computed_field # type: ignore[prop-decorator]
@property
def genesis(self) -> FixtureHeader:
"""Get the genesis header for this group."""
return FixtureHeader.genesis(
self.fork,
self.environment,
self.pre.state_root(),
)
def to_file(self, file: Path) -> None:
"""Save PreAllocGroup to a file."""
lock_file_path = file.with_suffix(".lock")
with FileLock(lock_file_path):
if file.exists():
with open(file, "r") as f:
previous_pre_alloc_group = PreAllocGroup.model_validate_json(f.read())
for account in previous_pre_alloc_group.pre:
existing_account = previous_pre_alloc_group.pre[account]
if account not in self.pre:
self.pre[account] = existing_account
else:
new_account = self.pre[account]
if new_account != existing_account:
# This procedure fails during xdist worker's
# pytest_sessionfinish and is not reported to the
# master thread. We signal here that the groups
# created contain a collision.
collision_file_path = file.with_suffix(".fail")
collision_exception = Alloc.CollisionError(
address=account,
account_1=existing_account,
account_2=new_account,
)
with open(collision_file_path, "w") as f:
f.write(json.dumps(collision_exception.to_json()))
raise collision_exception
self.test_ids.extend(previous_pre_alloc_group.test_ids)
with open(file, "w") as f:
f.write(self.model_dump_json(by_alias=True, exclude_none=True, indent=2))
class PreAllocGroups(EthereumTestRootModel):
"""
Root model mapping pre-allocation group hashes to test groups.
If lazy_load is True, the groups are not loaded from the folder until they
are accessed.
Iterating will fail if lazy_load is True.
"""
root: Dict[str, PreAllocGroup | None]
_folder_source: Path | None = PrivateAttr(None)
def __setitem__(self, key: str, value: Any) -> None:
"""Set item in root dict."""
assert self._folder_source is None, (
"Cannot set item in root dict after folder source is set"
)
self.root[key] = value
@classmethod
def from_folder(cls, folder: Path, *, lazy_load: bool = False) -> "PreAllocGroups":
"""Create PreAllocGroups from a folder of pre-allocation files."""
# First check for collision failures
for fail_file in folder.glob("*.fail"):
with open(fail_file) as f:
raise Alloc.CollisionError.from_json(json.loads(f.read()))
data: Dict[str, PreAllocGroup | None] = {}
for file in folder.glob("*.json"):
if lazy_load:
data[file.stem] = None
else:
with open(file) as f:
data[file.stem] = PreAllocGroup.model_validate_json(f.read())
instance = cls(root=data)
if lazy_load:
instance._folder_source = folder
return instance
def to_folder(self, folder: Path) -> None:
"""Save PreAllocGroups to a folder of pre-allocation files."""
for key, value in self.root.items():
assert value is not None, f"Value for key {key} is None"
value.to_file(folder / f"{key}.json")
def __getitem__(self, item: str) -> PreAllocGroup:
"""Get item from root dict."""
if self._folder_source is None:
value = self.root[item]
assert value is not None, f"Item {item} is None"
return value
else:
if self.root[item] is None:
with open(self._folder_source / f"{item}.json") as f:
self.root[item] = PreAllocGroup.model_validate_json(f.read())
result = self.root[item]
assert result is not None
return result
def __iter__(self) -> Iterator[str]: # type: ignore [override]
"""Iterate over root dict."""
return iter(self.root)
def __contains__(self, item: str) -> bool:
"""Check if item in root dict."""
return item in self.root
def __len__(self) -> int:
"""Get length of root dict."""
return len(self.root)
def keys(self) -> KeysView[str]:
"""Get keys from root dict."""
return self.root.keys()
def values(self) -> Generator[PreAllocGroup, None, None]:
"""Get values from root dict."""
for value in self.root.values():
assert value is not None, "Value is None"
yield value
def items(self) -> Generator[Tuple[str, PreAllocGroup], None, None]:
"""Get items from root dict."""
for key, value in self.root.items():
assert value is not None, f"Value for key {key} is None"
yield key, value
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/collector.py | src/ethereum_test_fixtures/collector.py | """
Fixture collector class used to collect, sort and combine the different types
of generated fixtures.
"""
import json
import os
import re
import sys
from dataclasses import dataclass, field
from pathlib import Path
from typing import ClassVar, Dict, Literal, Optional, Tuple
from ethereum_test_base_types import to_json
from .base import BaseFixture
from .consume import FixtureConsumer
from .file import Fixtures
@dataclass(kw_only=True, slots=True)
class TestInfo:
"""Contains test information from the current node."""
name: str # pytest: Item.name, e.g. test_paris_one[fork_Paris-state_test]
id: str # pytest: Item.nodeid, e.g.
# tests/paris/test_module_paris.py::test_paris_one[...]
original_name: str # pytest: Item.originalname, e.g. test_paris_one
module_path: Path # pytest: Item.path, e.g.
# .../tests/paris/test_module_paris.py
test_prefix: ClassVar[str] = "test_" # Python test prefix
filler_suffix: ClassVar[str] = "Filler" # Static test suffix
@classmethod
def strip_test_name(cls, name: str) -> str:
"""Remove test prefix from a python test case name."""
if name.startswith(cls.test_prefix):
return name.removeprefix(cls.test_prefix)
if name.endswith(cls.filler_suffix):
return name.removesuffix(cls.filler_suffix)
return name
def get_name_and_parameters(self) -> Tuple[str, str]:
"""
Convert test name to a tuple containing the test name and test
parameters.
Example: test_push0_key_sstore[fork_Shanghai] -> test_push0_key_sstore,
fork_Shanghai
"""
test_name, parameters = self.name.split("[")
return test_name, re.sub(r"[\[\-]", "_", parameters).replace("]", "")
def get_single_test_name(self, mode: Literal["module", "test"] = "module") -> str:
"""Convert test name to a single test name."""
if mode == "module":
# Use the module name as the test name
return self.strip_test_name(self.original_name)
elif mode == "test":
# Mix the module name and the test name/arguments
test_name, test_parameters = self.get_name_and_parameters()
test_name = self.strip_test_name(test_name)
return f"{test_name}__{test_parameters}"
def get_dump_dir_path(
self,
base_dump_dir: Optional[Path],
filler_path: Path,
level: Literal["test_module", "test_function", "test_parameter"] = "test_parameter",
) -> Optional[Path]:
"""Path to dump the debug output as defined by the level to dump at."""
if not base_dump_dir:
return None
test_module_relative_dir = self.get_module_relative_output_dir(filler_path)
if level == "test_module":
return Path(base_dump_dir) / Path(str(test_module_relative_dir).replace(os.sep, "__"))
test_name, test_parameter_string = self.get_name_and_parameters()
flat_path = f"{str(test_module_relative_dir).replace(os.sep, '__')}__{test_name}"
if level == "test_function":
return Path(base_dump_dir) / flat_path
elif level == "test_parameter":
return Path(base_dump_dir) / flat_path / test_parameter_string
raise Exception("Unexpected level.")
def get_id(self) -> str:
"""Return the test id."""
return self.id
def get_module_relative_output_dir(self, filler_path: Path) -> Path:
"""
Return a directory name for the provided test_module (relative to the
base ./tests directory) that can be used for output (within the
configured fixtures output path or the base_dump_dir directory).
Example: tests/shanghai/eip3855_push0/test_push0.py ->
shanghai/eip3855_push0/test_push0
"""
basename = self.module_path.with_suffix("").absolute()
basename_relative = basename.relative_to(
os.path.commonpath([filler_path.absolute(), basename])
)
module_path = basename_relative.parent / self.strip_test_name(basename_relative.stem)
return module_path
@dataclass(kw_only=True)
class FixtureCollector:
"""Collects all fixtures generated by the test cases."""
output_dir: Path
fill_static_tests: bool
single_fixture_per_file: bool
filler_path: Path
base_dump_dir: Optional[Path] = None
flush_interval: int = 1000
# Internal state
all_fixtures: Dict[Path, Fixtures] = field(default_factory=dict)
json_path_to_test_item: Dict[Path, TestInfo] = field(default_factory=dict)
def get_fixture_basename(self, info: TestInfo) -> Path:
"""Return basename of the fixture file for a given test case."""
module_relative_output_dir = info.get_module_relative_output_dir(self.filler_path)
# Each legacy test filler has only 1 test per file if it's a !state
# test! So no need to create directory Add11/add11.json it can be plain
# add11.json
if self.fill_static_tests:
return module_relative_output_dir.parent / info.original_name
if self.single_fixture_per_file:
return module_relative_output_dir / info.get_single_test_name(mode="test")
return module_relative_output_dir / info.get_single_test_name(mode="module")
def add_fixture(self, info: TestInfo, fixture: BaseFixture) -> Path:
"""Add fixture to the list of fixtures of a given test case."""
fixture_basename = self.get_fixture_basename(info)
fixture_path = (
self.output_dir
/ fixture.output_base_dir_name()
/ fixture_basename.with_suffix(fixture.output_file_extension)
)
# relevant when we group by test function
if fixture_path not in self.all_fixtures.keys():
self.all_fixtures[fixture_path] = Fixtures(root={})
self.json_path_to_test_item[fixture_path] = info
self.all_fixtures[fixture_path][info.get_id()] = fixture
if self.flush_interval > 0 and len(self.all_fixtures) >= self.flush_interval:
self.dump_fixtures()
return fixture_path
def dump_fixtures(self) -> None:
"""Dump all collected fixtures to their respective files."""
if self.output_dir.name == "stdout":
combined_fixtures = {
k: to_json(v) for fixture in self.all_fixtures.values() for k, v in fixture.items()
}
json.dump(combined_fixtures, sys.stdout, indent=4)
return
os.makedirs(self.output_dir, exist_ok=True)
for fixture_path, fixtures in self.all_fixtures.items():
os.makedirs(fixture_path.parent, exist_ok=True)
if len({fixture.__class__ for fixture in fixtures.values()}) != 1:
raise TypeError("All fixtures in a single file must have the same format.")
fixtures.collect_into_file(fixture_path)
self.all_fixtures.clear()
def verify_fixture_files(self, evm_fixture_verification: FixtureConsumer) -> None:
"""Run `evm [state|block]test` on each fixture."""
for fixture_path, name_fixture_dict in self.all_fixtures.items():
for _fixture_name, fixture in name_fixture_dict.items():
if evm_fixture_verification.can_consume(fixture.__class__):
info = self.json_path_to_test_item[fixture_path]
consume_direct_dump_dir = self._get_consume_direct_dump_dir(info)
evm_fixture_verification.consume_fixture(
fixture.__class__,
fixture_path,
fixture_name=None,
debug_output_path=consume_direct_dump_dir,
)
def _get_consume_direct_dump_dir(
self,
info: TestInfo,
) -> Path | None:
"""
Directory to dump the current test function's fixture.json and fixture
verification debug output.
"""
if not self.base_dump_dir:
return None
if self.single_fixture_per_file:
return info.get_dump_dir_path(
self.base_dump_dir, self.filler_path, level="test_parameter"
)
else:
return info.get_dump_dir_path(
self.base_dump_dir, self.filler_path, level="test_function"
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/transaction.py | src/ethereum_test_fixtures/transaction.py | """TransactionTest types."""
from typing import ClassVar, Mapping
from pydantic import Field
from ethereum_test_base_types import Address, Bytes, CamelModel, Hash, ZeroPaddedHexNumber
from ethereum_test_exceptions import TransactionExceptionInstanceOrList
from ethereum_test_forks import Fork
from .base import BaseFixture
class FixtureResult(CamelModel):
"""The per-network (fork) result structure."""
hash: Hash | None = None
intrinsic_gas: ZeroPaddedHexNumber
sender: Address | None = None
exception: TransactionExceptionInstanceOrList | None = None
class TransactionFixture(BaseFixture):
"""Fixture for a single TransactionTest."""
format_name: ClassVar[str] = "transaction_test"
description: ClassVar[str] = "Tests that generate a transaction test fixture."
result: Mapping[Fork, FixtureResult]
transaction: Bytes = Field(..., alias="txbytes")
def get_fork(self) -> Fork | None:
"""Return the fork of the fixture as a string."""
forks = list(self.result.keys())
assert len(forks) == 1, "Expected transaction test fixture with single fork"
return forks[0]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/__init__.py | src/ethereum_test_fixtures/__init__.py | """Ethereum test fixture format definitions."""
from .base import BaseFixture, FixtureFillingPhase, FixtureFormat, LabeledFixtureFormat
from .blockchain import (
BlockchainEngineFixture,
BlockchainEngineFixtureCommon,
BlockchainEngineSyncFixture,
BlockchainEngineXFixture,
BlockchainFixture,
BlockchainFixtureCommon,
)
from .collector import FixtureCollector, TestInfo
from .consume import FixtureConsumer
from .eof import EOFFixture
from .pre_alloc_groups import PreAllocGroup, PreAllocGroups
from .state import StateFixture
from .transaction import TransactionFixture
__all__ = [
"BaseFixture",
"BlockchainEngineFixture",
"BlockchainEngineFixtureCommon",
"BlockchainEngineSyncFixture",
"BlockchainEngineXFixture",
"BlockchainFixture",
"BlockchainFixtureCommon",
"EOFFixture",
"FixtureCollector",
"FixtureConsumer",
"FixtureFillingPhase",
"FixtureFormat",
"LabeledFixtureFormat",
"PreAllocGroups",
"PreAllocGroup",
"StateFixture",
"TestInfo",
"TransactionFixture",
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/base.py | src/ethereum_test_fixtures/base.py | """Base fixture definitions used to define all fixture types."""
import hashlib
import json
from enum import Enum, auto
from functools import cached_property
from typing import Annotated, Any, ClassVar, Dict, List, Set, Type, Union
import pytest
from pydantic import (
Discriminator,
Field,
PlainSerializer,
PlainValidator,
Tag,
TypeAdapter,
model_validator,
)
from pydantic_core.core_schema import ValidatorFunctionWrapHandler
from ethereum_test_base_types import CamelModel, ReferenceSpec
from ethereum_test_forks import Fork
def fixture_format_discriminator(v: Any) -> str | None:
"""Discriminator function that returns the model type as a string."""
if v is None:
return None
if isinstance(v, dict):
info_dict = v.get("_info")
elif hasattr(v, "info"):
info_dict = v.info
assert info_dict is not None, (
f"Fixture does not have an info field, cannot determine fixture format: {v}"
)
fixture_format = info_dict.get("fixture-format")
if not fixture_format:
fixture_format = info_dict.get("fixture_format")
assert fixture_format is not None, f"Fixture format not found in info field: {info_dict}"
return fixture_format
class FixtureFillingPhase(Enum):
"""Execution phase for fixture generation."""
PRE_ALLOC_GENERATION = auto()
FILL = auto()
class BaseFixture(CamelModel):
"""Represents a base Ethereum test fixture of any type."""
# Base Fixture class properties
formats: ClassVar[Dict[str, Type["BaseFixture"]]] = {}
formats_type_adapter: ClassVar[TypeAdapter]
info: Dict[str, Dict[str, Any] | str] = Field(default_factory=dict, alias="_info")
# Fixture format properties
format_name: ClassVar[str] = ""
output_file_extension: ClassVar[str] = ".json"
description: ClassVar[str] = "Unknown fixture format; it has not been set."
format_phases: ClassVar[Set[FixtureFillingPhase]] = {FixtureFillingPhase.FILL}
@classmethod
def output_base_dir_name(cls) -> str:
"""
Return name of the subdirectory where this type of fixture should be
dumped to.
"""
return cls.format_name.replace("test", "tests")
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
"""
Register all subclasses of BaseFixture with a fixture format name set
as possible fixture formats.
"""
if cls.format_name:
# Register the new fixture format
BaseFixture.formats[cls.format_name] = cls
if len(BaseFixture.formats) > 1:
BaseFixture.formats_type_adapter = TypeAdapter(
Annotated[
Union[
tuple(
Annotated[fixture_format, Tag(format_name)]
for (
format_name,
fixture_format,
) in BaseFixture.formats.items()
)
],
Discriminator(fixture_format_discriminator),
]
)
else:
BaseFixture.formats_type_adapter = TypeAdapter(cls)
@model_validator(mode="wrap")
@classmethod
def _parse_into_subclass(cls, v: Any, handler: ValidatorFunctionWrapHandler) -> "BaseFixture":
"""Parse the fixture into the correct subclass."""
if cls is BaseFixture:
return BaseFixture.formats_type_adapter.validate_python(v)
return handler(v)
@cached_property
def json_dict(self) -> Dict[str, Any]:
"""Returns the JSON representation of the fixture."""
return self.model_dump(mode="json", by_alias=True, exclude_none=True, exclude={"info"})
@cached_property
def hash(self) -> str:
"""Returns the hash of the fixture."""
json_str = json.dumps(self.json_dict, sort_keys=True, separators=(",", ":"))
h = hashlib.sha256(json_str.encode("utf-8")).hexdigest()
return f"0x{h}"
def json_dict_with_info(self, hash_only: bool = False) -> Dict[str, Any]:
"""Return JSON representation of the fixture with the info field."""
dict_with_info = self.json_dict.copy()
dict_with_info["_info"] = {"hash": self.hash}
if not hash_only:
dict_with_info["_info"].update(self.info)
return dict_with_info
def fill_info(
self,
t8n_version: str,
test_case_description: str,
fixture_source_url: str,
ref_spec: ReferenceSpec | None,
_info_metadata: Dict[str, Any],
) -> None:
"""Fill the info field for this fixture."""
if "comment" not in self.info:
self.info["comment"] = "`execution-spec-tests` generated test"
self.info["filling-transition-tool"] = t8n_version
self.info["description"] = test_case_description
self.info["url"] = fixture_source_url
self.info["fixture-format"] = self.format_name
if ref_spec is not None:
ref_spec.write_info(self.info)
if _info_metadata:
self.info.update(_info_metadata)
def get_fork(self) -> Fork | None:
"""Return fork of the fixture as a string."""
raise NotImplementedError
@classmethod
def supports_fork(cls, fork: Fork) -> bool:
"""
Return whether the fixture can be generated for the given fork.
By default, all fixtures support all forks.
"""
del fork
return True
@classmethod
def discard_fixture_format_by_marks(
cls,
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
"""
Discard a fixture format from filling if the appropriate marker is
used.
"""
del fork, markers
return False
class LabeledFixtureFormat:
"""
Represents a fixture format with a custom label.
This label will be used in the test id and also will be added as a marker
to the generated test case when filling the test.
"""
format: Type[BaseFixture]
label: str
description: str
registered_labels: ClassVar[Dict[str, "LabeledFixtureFormat"]] = {}
def __init__(
self,
fixture_format: "Type[BaseFixture] | LabeledFixtureFormat",
label: str,
description: str,
):
"""Initialize the fixture format with a custom label."""
self.format = (
fixture_format.format
if isinstance(fixture_format, LabeledFixtureFormat)
else fixture_format
)
self.label = label
self.description = description
if label not in LabeledFixtureFormat.registered_labels:
LabeledFixtureFormat.registered_labels[label] = self
@property
def format_name(self) -> str:
"""Get the filling format name."""
return self.format.format_name
@property
def format_phases(self) -> Set[FixtureFillingPhase]:
"""Get the filling format phases where it should be included."""
return self.format.format_phases
def __eq__(self, other: Any) -> bool:
"""
Check if two labeled fixture formats are equal.
If the other object is a FixtureFormat type, the format of the labeled
fixture format will be compared with the format of the other object.
"""
if isinstance(other, LabeledFixtureFormat):
return self.format == other.format
if isinstance(other, type) and issubclass(other, BaseFixture):
return self.format == other
return False
# Annotated type alias for a base fixture class
FixtureFormat = Annotated[
Type[BaseFixture],
PlainSerializer(lambda f: f.format_name),
PlainValidator(lambda f: BaseFixture.formats[f] if f in BaseFixture.formats else f),
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/eof.py | src/ethereum_test_fixtures/eof.py | """EOFTest Type Definitions."""
from typing import Any, ClassVar, Mapping
from pydantic import Field
from ethereum_test_base_types import Bytes, CamelModel, Number
from ethereum_test_exceptions.exceptions import EOFExceptionInstanceOrList
from ethereum_test_forks import Fork
from ethereum_test_types.eof.v1 import ContainerKind
from .base import BaseFixture
class Result(CamelModel):
"""Result for a single fork in a fixture."""
exception: EOFExceptionInstanceOrList | None = None
valid: bool = Field(..., alias="result")
def model_post_init(self, __context: Any) -> None:
"""
Cross-field validation that a test cannot have an empty exception if
the valid is False.
"""
if not self.valid and self.exception is None:
raise ValueError("Invalid test: invalid but exception is not set")
elif self.valid and self.exception is not None:
raise ValueError("Invalid test: valid but exception is set")
super().model_post_init(__context)
class Vector(CamelModel):
"""Single test vector in a fixture."""
code: Bytes
container_kind: ContainerKind = ContainerKind.RUNTIME
results: Mapping[Fork, Result]
class EOFFixture(BaseFixture):
"""Fixture for a single EOFTest."""
format_name: ClassVar[str] = "eof_test"
description: ClassVar[str] = "Tests that generate an EOF test fixture."
vectors: Mapping[Number, Vector]
def get_fork(self) -> Fork | None:
"""Return fork of the fixture as a string."""
return None
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/tests/test_blockchain.py | src/ethereum_test_fixtures/tests/test_blockchain.py | """Test the blockchain test types."""
from typing import Any, Dict
import pytest
from pydantic import TypeAdapter
from ethereum_test_base_types import (
AccessList,
Address,
Bloom,
BLSPublicKey,
BLSSignature,
Bytes,
Hash,
HeaderNonce,
TestPrivateKey,
ZeroPaddedHexNumber,
to_json,
)
from ethereum_test_exceptions import BlockException, EngineAPIError, TransactionException
from ethereum_test_forks import Prague
from ethereum_test_types import (
EOA,
AuthorizationTuple,
ConsolidationRequest,
DepositRequest,
Requests,
Transaction,
Withdrawal,
WithdrawalRequest,
)
from ..blockchain import (
EngineNewPayloadParameters,
FixtureBlockBase,
FixtureEngineNewPayload,
FixtureExecutionPayload,
FixtureHeader,
FixtureTransaction,
InvalidFixtureBlock,
)
fixture_header_ones = FixtureHeader(
parent_hash=Hash(1),
ommers_hash=Hash(1),
fee_recipient=Address(1),
state_root=Hash(1),
transactions_trie=Hash(1),
receipts_root=Hash(1),
logs_bloom=Bloom(1),
difficulty=1,
number=1,
gas_limit=1,
gas_used=1,
timestamp=1,
extra_data=Bytes([1]),
prev_randao=Hash(1),
nonce=HeaderNonce(1),
base_fee_per_gas=1,
withdrawals_root=Hash(1),
blob_gas_used=1,
excess_blob_gas=1,
# hash=Hash(1),
)
@pytest.mark.parametrize(
["can_be_deserialized", "model_instance", "json_repr"],
[
pytest.param(
True,
FixtureTransaction.from_transaction(Transaction().with_signature_and_sender()),
{
"type": "0x00",
"chainId": "0x01",
"nonce": "0x00",
"to": "0x00000000000000000000000000000000000000aa",
"value": "0x00",
"data": "0x",
"gasLimit": "0x5208",
"gasPrice": "0x0a",
"v": "0x26",
"r": "0xcc61d852649c34cc0b71803115f38036ace257d2914f087bf885e6806a664fbd",
"s": "0x2020cb35f5d7731ab540d62614503a7f2344301a86342f67daf011c1341551ff",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
},
id="fixture_transaction_type_0_default_values",
),
pytest.param(
True,
FixtureTransaction.from_transaction(Transaction(to=None).with_signature_and_sender()),
{
"type": "0x00",
"chainId": "0x01",
"to": "",
"nonce": "0x00",
"value": "0x00",
"data": "0x",
"gasLimit": "0x5208",
"gasPrice": "0x0a",
"v": "0x25",
"r": "0x1cfe2cbb0c3577f74d9ae192a7f1ee2d670fe806a040f427af9cb768be3d07ce",
"s": "0x0cbe2d029f52dbf93ade486625bed0603945d2c7358b31de99fe8786c00f13da",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
},
id="fixture_transaction_type_0_contract_creation",
),
pytest.param(
True,
FixtureTransaction.from_transaction(Transaction(ty=1).with_signature_and_sender()),
{
"type": "0x01",
"chainId": "0x01",
"nonce": "0x00",
"to": "0x00000000000000000000000000000000000000aa",
"value": "0x00",
"data": "0x",
"gasLimit": "0x5208",
"gasPrice": "0x0a",
"accessList": [],
"v": "0x01",
"r": "0x58b4ddaa529492d32b6bc8327eb8ee0bc8b535c3bfc0f4f1db3d7c16b51d1851",
"s": "0x5ef19167661b14d06dfc785bf62693e6f9e5a44e7c11e0320efed27b27294970",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
},
id="fixture_transaction_type_1_default_values",
),
pytest.param(
True,
FixtureTransaction.from_transaction(
Transaction(ty=2, max_fee_per_gas=7).with_signature_and_sender()
),
{
"type": "0x02",
"chainId": "0x01",
"nonce": "0x00",
"to": "0x00000000000000000000000000000000000000aa",
"value": "0x00",
"data": "0x",
"gasLimit": "0x5208",
"maxPriorityFeePerGas": "0x00",
"maxFeePerGas": "0x07",
"accessList": [],
"v": "0x00",
"r": "0x33fc39081d01f8e7f0ce5426d4a00a7b07c2edea064d24a8cac8e4b1f0c08298",
"s": "0x4635e1c45238697db38e37070d4fce27fb5684f9dec4046466ea42a9834bad0a",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
},
id="fixture_transaction_type_2_default_values",
),
pytest.param(
True,
FixtureTransaction.from_transaction(
Transaction(
ty=3,
max_fee_per_gas=7,
max_fee_per_blob_gas=1,
blob_versioned_hashes=[],
).with_signature_and_sender()
),
{
"type": "0x03",
"chainId": "0x01",
"nonce": "0x00",
"to": "0x00000000000000000000000000000000000000aa",
"value": "0x00",
"data": "0x",
"gasLimit": "0x5208",
"maxPriorityFeePerGas": "0x00",
"maxFeePerGas": "0x07",
"maxFeePerBlobGas": "0x01",
"accessList": [],
"blobVersionedHashes": [],
"v": "0x01",
"r": "0x8978475a00bf155bf5687dfda89c2df55ef6c341cdfd689aeaa6c519569a530a",
"s": "0x66fc34935cdd191441a12a2e7b1f224cb40b928afb9bc89c8ddb2b78c19342cc",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
},
id="fixture_transaction_type_3_default_values",
),
pytest.param(
True,
FixtureTransaction.from_transaction(
Transaction(
ty=4,
max_fee_per_gas=7,
authorization_list=[
AuthorizationTuple(
chain_id=1,
address=2,
nonce=3,
signer=EOA(key=TestPrivateKey),
)
],
).with_signature_and_sender()
),
{
"type": "0x04",
"chainId": "0x01",
"nonce": "0x00",
"to": "0x00000000000000000000000000000000000000aa",
"value": "0x00",
"data": "0x",
"gasLimit": "0x5208",
"maxPriorityFeePerGas": "0x00",
"maxFeePerGas": "0x07",
"accessList": [],
"authorizationList": [
{
"chainId": "0x01",
"address": Address(2).hex(),
"nonce": "0x03",
"v": "0x00",
"r": "0xda29c3bd0304ae475b06d1a11344e0b6d75590f2c23138c9507f4b5bedde3c79",
"s": "0x3e1fb143ae0460373d567cf901645757b321e42c423a53b2d46ed13c9ef0a9ab",
"signer": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"yParity": "0x00",
}
],
"v": "0x01",
"r": "0xe7da7f244c95cea73ac6316971139ac0eb8fad455d9a25e1c134d7a157c38ff9",
"s": "0x1939185d2e2a2b3375183e42b5755d695efbd72e186cf9a3e6958a3fb84cc709",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
},
id="fixture_transaction_type_4",
),
pytest.param(
True,
FixtureTransaction.from_transaction(
Transaction(
to=0x1234,
data=b"\x01\x00",
access_list=[
AccessList(
address=0x1234,
storage_keys=[0, 1],
)
],
max_priority_fee_per_gas=10,
max_fee_per_gas=20,
max_fee_per_blob_gas=30,
blob_versioned_hashes=[0, 1],
).with_signature_and_sender()
),
{
"type": "0x03",
"chainId": "0x01",
"nonce": "0x00",
"to": "0x0000000000000000000000000000000000001234",
"accessList": [
{
"address": "0x0000000000000000000000000000000000001234",
"storageKeys": [
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000001",
],
}
],
"value": "0x00",
"data": "0x0100",
"gasLimit": "0x5208",
"maxPriorityFeePerGas": "0x0a",
"maxFeePerGas": "0x14",
"maxFeePerBlobGas": "0x1e",
"blobVersionedHashes": [
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000001",
],
"v": "0x00",
"r": "0x418bb557c43262375f80556cb09dac5e67396acf0eaaf2c2540523d1ce54b280",
"s": "0x4fa36090ea68a1138043d943ced123c0b0807d82ff3342a6977cbc09230e927c",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
},
id="fixture_transaction_3",
),
pytest.param(
True,
FixtureHeader(
parent_hash=Hash(0),
ommers_hash=Hash(1),
fee_recipient=Address(2),
state_root=Hash(3),
transactions_trie=Hash(4),
receipts_root=Hash(5),
logs_bloom=Bloom(6),
difficulty=7,
number=8,
gas_limit=9,
gas_used=10,
timestamp=11,
extra_data=Bytes([12]),
prev_randao=Hash(13),
nonce=HeaderNonce(14),
),
{
"parentHash": Hash(0).hex(),
"uncleHash": Hash(1).hex(),
"coinbase": Address(2).hex(),
"stateRoot": Hash(3).hex(),
"transactionsTrie": Hash(4).hex(),
"receiptTrie": Hash(5).hex(),
"bloom": Bloom(6).hex(),
"difficulty": ZeroPaddedHexNumber(7).hex(),
"number": ZeroPaddedHexNumber(8).hex(),
"gasLimit": ZeroPaddedHexNumber(9).hex(),
"gasUsed": ZeroPaddedHexNumber(10).hex(),
"timestamp": ZeroPaddedHexNumber(11).hex(),
"extraData": Bytes([12]).hex(),
"mixHash": Hash(13).hex(),
"nonce": HeaderNonce(14).hex(),
"hash": "0x1dc087517148c2d6a1dd1ea5de107bc5f728414f9d210ed18286d305abe6ba5e",
},
id="fixture_header_1",
),
pytest.param(
True,
FixtureHeader(
parent_hash=Hash(0),
ommers_hash=Hash(1),
fee_recipient=Address(2),
state_root=Hash(3),
transactions_trie=Hash(4),
receipts_root=Hash(5),
logs_bloom=Bloom(6),
difficulty=7,
number=8,
gas_limit=9,
gas_used=10,
timestamp=11,
extra_data=Bytes([12]),
prev_randao=Hash(13),
nonce=HeaderNonce(14),
base_fee_per_gas=15,
withdrawals_root=Hash(16),
blob_gas_used=17,
excess_blob_gas=18,
),
{
"parentHash": Hash(0).hex(),
"uncleHash": Hash(1).hex(),
"coinbase": Address(2).hex(),
"stateRoot": Hash(3).hex(),
"transactionsTrie": Hash(4).hex(),
"receiptTrie": Hash(5).hex(),
"bloom": Bloom(6).hex(),
"difficulty": ZeroPaddedHexNumber(7).hex(),
"number": ZeroPaddedHexNumber(8).hex(),
"gasLimit": ZeroPaddedHexNumber(9).hex(),
"gasUsed": ZeroPaddedHexNumber(10).hex(),
"timestamp": ZeroPaddedHexNumber(11).hex(),
"extraData": Bytes([12]).hex(),
"mixHash": Hash(13).hex(),
"nonce": HeaderNonce(14).hex(),
"baseFeePerGas": ZeroPaddedHexNumber(15).hex(),
"withdrawalsRoot": Hash(16).hex(),
"blobGasUsed": ZeroPaddedHexNumber(17).hex(),
"excessBlobGas": ZeroPaddedHexNumber(18).hex(),
"hash": "0xd90115b7fde329f64335763a446af150ab67e639281dccdb07a007d18bb80211",
},
id="fixture_header_2",
),
pytest.param(
True,
FixtureBlockBase(
header=FixtureHeader(
parent_hash=Hash(0),
ommers_hash=Hash(1),
fee_recipient=Address(2),
state_root=Hash(3),
transactions_trie=Hash(4),
receipts_root=Hash(5),
logs_bloom=Bloom(6),
difficulty=7,
number=8,
gas_limit=9,
gas_used=10,
timestamp=11,
extra_data=Bytes([12]),
prev_randao=Hash(13),
nonce=HeaderNonce(14),
base_fee_per_gas=15,
withdrawals_root=Hash(16),
blob_gas_used=17,
excess_blob_gas=18,
),
transactions=[
FixtureTransaction.from_transaction(Transaction().with_signature_and_sender())
],
),
{
"blockHeader": {
"parentHash": Hash(0).hex(),
"uncleHash": Hash(1).hex(),
"coinbase": Address(2).hex(),
"stateRoot": Hash(3).hex(),
"transactionsTrie": Hash(4).hex(),
"receiptTrie": Hash(5).hex(),
"bloom": Bloom(6).hex(),
"difficulty": ZeroPaddedHexNumber(7).hex(),
"number": ZeroPaddedHexNumber(8).hex(),
"gasLimit": ZeroPaddedHexNumber(9).hex(),
"gasUsed": ZeroPaddedHexNumber(10).hex(),
"timestamp": ZeroPaddedHexNumber(11).hex(),
"extraData": Bytes([12]).hex(),
"mixHash": Hash(13).hex(),
"nonce": HeaderNonce(14).hex(),
"baseFeePerGas": ZeroPaddedHexNumber(15).hex(),
"withdrawalsRoot": Hash(16).hex(),
"blobGasUsed": ZeroPaddedHexNumber(17).hex(),
"excessBlobGas": ZeroPaddedHexNumber(18).hex(),
"hash": "0xd90115b7fde329f64335763a446af150ab67e639281dccdb07a007d18bb80211",
},
"blocknumber": "8",
"uncleHeaders": [],
"transactions": [
{
"type": "0x00",
"chainId": "0x01",
"nonce": "0x00",
"to": "0x00000000000000000000000000000000000000aa",
"value": "0x00",
"data": "0x",
"gasLimit": "0x5208",
"gasPrice": "0x0a",
"v": "0x26",
"r": "0xcc61d852649c34cc0b71803115f38036ace257d2914f087bf885e6806a664fbd",
"s": "0x2020cb35f5d7731ab540d62614503a7f2344301a86342f67daf011c1341551ff",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
}
],
},
id="fixture_block_1",
),
pytest.param(
True,
FixtureBlockBase(
header=FixtureHeader(
parent_hash=Hash(0),
ommers_hash=Hash(1),
fee_recipient=Address(2),
state_root=Hash(3),
transactions_trie=Hash(4),
receipts_root=Hash(5),
logs_bloom=Bloom(6),
difficulty=7,
number=8,
gas_limit=9,
gas_used=10,
timestamp=11,
extra_data=Bytes([12]),
prev_randao=Hash(13),
nonce=HeaderNonce(14),
base_fee_per_gas=15,
withdrawals_root=Hash(16),
blob_gas_used=17,
excess_blob_gas=18,
),
transactions=[
FixtureTransaction.from_transaction(
Transaction(to=None).with_signature_and_sender()
)
],
),
{
"blockHeader": {
"parentHash": Hash(0).hex(),
"uncleHash": Hash(1).hex(),
"coinbase": Address(2).hex(),
"stateRoot": Hash(3).hex(),
"transactionsTrie": Hash(4).hex(),
"receiptTrie": Hash(5).hex(),
"bloom": Bloom(6).hex(),
"difficulty": ZeroPaddedHexNumber(7).hex(),
"number": ZeroPaddedHexNumber(8).hex(),
"gasLimit": ZeroPaddedHexNumber(9).hex(),
"gasUsed": ZeroPaddedHexNumber(10).hex(),
"timestamp": ZeroPaddedHexNumber(11).hex(),
"extraData": Bytes([12]).hex(),
"mixHash": Hash(13).hex(),
"nonce": HeaderNonce(14).hex(),
"baseFeePerGas": ZeroPaddedHexNumber(15).hex(),
"withdrawalsRoot": Hash(16).hex(),
"blobGasUsed": ZeroPaddedHexNumber(17).hex(),
"excessBlobGas": ZeroPaddedHexNumber(18).hex(),
"hash": "0xd90115b7fde329f64335763a446af150ab67e639281dccdb07a007d18bb80211",
},
"blocknumber": "8",
"uncleHeaders": [],
"transactions": [
{
"type": "0x00",
"chainId": "0x01",
"to": "",
"nonce": "0x00",
"value": "0x00",
"data": "0x",
"gasLimit": "0x5208",
"gasPrice": "0x0a",
"v": "0x25",
"r": "0x1cfe2cbb0c3577f74d9ae192a7f1ee2d670fe806a040f427af9cb768be3d07ce",
"s": "0x0cbe2d029f52dbf93ade486625bed0603945d2c7358b31de99fe8786c00f13da",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
}
],
},
id="fixture_block_2",
),
pytest.param(
True,
InvalidFixtureBlock(
rlp="0x00",
expect_exception=BlockException.RLP_STRUCTURES_ENCODING,
),
{
"rlp": "0x00",
"expectException": "BlockException.RLP_STRUCTURES_ENCODING",
},
id="invalid_fixture_block_1",
),
pytest.param(
True,
InvalidFixtureBlock(
rlp="0x00",
expect_exception=TransactionException.INTRINSIC_GAS_TOO_LOW,
),
{
"rlp": "0x00",
"expectException": "TransactionException.INTRINSIC_GAS_TOO_LOW",
},
id="invalid_fixture_block_2",
),
pytest.param(
False, # Can not be deserialized: A single expect_exception str
# will not be deserialized as a list and therefore will not
# match the model_instance definition.
InvalidFixtureBlock(
rlp="0x00",
expect_exception=[TransactionException.INTRINSIC_GAS_TOO_LOW],
),
{
"rlp": "0x00",
"expectException": "TransactionException.INTRINSIC_GAS_TOO_LOW",
},
id="invalid_fixture_block_3",
),
pytest.param(
True,
InvalidFixtureBlock(
rlp="0x00",
expect_exception=[
BlockException.RLP_STRUCTURES_ENCODING,
TransactionException.INTRINSIC_GAS_TOO_LOW,
],
),
{
"rlp": "0x00",
"expectException": "BlockException.RLP_STRUCTURES_ENCODING|"
"TransactionException.INTRINSIC_GAS_TOO_LOW",
},
id="invalid_fixture_block_4",
),
pytest.param(
True,
FixtureExecutionPayload.from_fixture_header(
header=FixtureHeader(
parent_hash=Hash(0),
ommers_hash=Hash(1),
fee_recipient=Address(2),
state_root=Hash(3),
transactions_trie=Hash(4),
receipts_root=Hash(5),
logs_bloom=Bloom(6),
difficulty=7,
number=8,
gas_limit=9,
gas_used=10,
timestamp=11,
extra_data=Bytes([12]),
prev_randao=Hash(13),
nonce=HeaderNonce(14),
base_fee_per_gas=15,
withdrawals_root=Hash(16),
blob_gas_used=17,
excess_blob_gas=18,
),
transactions=[
Transaction(
to=0x1234,
data=b"\x01\x00",
access_list=[
AccessList(
address=0x1234,
storage_keys=[0, 1],
)
],
max_priority_fee_per_gas=10,
max_fee_per_gas=20,
max_fee_per_blob_gas=30,
blob_versioned_hashes=[0, 1],
).with_signature_and_sender(),
],
withdrawals=[Withdrawal(index=0, validator_index=1, address=0x1234, amount=2)],
),
{
"parentHash": Hash(0).hex(),
"feeRecipient": Address(2).hex(),
"stateRoot": Hash(3).hex(),
"receiptsRoot": Hash(5).hex(),
"logsBloom": Bloom(6).hex(),
"blockNumber": hex(8),
"gasLimit": hex(9),
"gasUsed": hex(10),
"timestamp": hex(11),
"extraData": Bytes([12]).hex(),
"prevRandao": Hash(13).hex(),
"baseFeePerGas": hex(15),
"blobGasUsed": hex(17),
"excessBlobGas": hex(18),
"blockHash": "0xd90115b7fde329f64335763a446af150ab67e639281dccdb07a007d18bb80211",
"transactions": [
Transaction(
to=0x1234,
data=b"\x01\x00",
access_list=[
AccessList(
address=0x1234,
storage_keys=[0, 1],
)
],
max_priority_fee_per_gas=10,
max_fee_per_gas=20,
max_fee_per_blob_gas=30,
blob_versioned_hashes=[0, 1],
)
.with_signature_and_sender()
.rlp()
.hex()
],
"withdrawals": [
to_json(Withdrawal(index=0, validator_index=1, address=0x1234, amount=2))
],
},
id="fixture_execution_payload_1",
),
pytest.param(
True,
FixtureEngineNewPayload.from_fixture_header(
fork=Prague,
header=FixtureHeader(
parent_hash=Hash(0),
ommers_hash=Hash(1),
fee_recipient=Address(2),
state_root=Hash(3),
transactions_trie=Hash(4),
receipts_root=Hash(5),
logs_bloom=Bloom(6),
difficulty=7,
number=8,
gas_limit=9,
gas_used=10,
timestamp=11,
extra_data=Bytes([12]),
prev_randao=Hash(13),
nonce=HeaderNonce(14),
base_fee_per_gas=15,
withdrawals_root=Hash(16),
blob_gas_used=17,
excess_blob_gas=18,
parent_beacon_block_root=19,
requests_hash=20,
),
transactions=[
Transaction(
to=0x1234,
data=b"\x01\x00",
access_list=[
AccessList(
address=0x1234,
storage_keys=[0, 1],
)
],
max_priority_fee_per_gas=10,
max_fee_per_gas=20,
max_fee_per_blob_gas=30,
blob_versioned_hashes=[0, 1],
).with_signature_and_sender(),
],
withdrawals=[Withdrawal(index=0, validator_index=1, address=0x1234, amount=2)],
requests=Requests(
DepositRequest(
pubkey=BLSPublicKey(0),
withdrawal_credentials=Hash(1),
amount=2,
signature=BLSSignature(3),
index=4,
),
WithdrawalRequest(
source_address=Address(0),
validator_pubkey=BLSPublicKey(1),
amount=2,
),
ConsolidationRequest(
source_address=Address(0),
source_pubkey=BLSPublicKey(1),
target_pubkey=BLSPublicKey(2),
),
).requests_list,
validation_error=[
BlockException.INCORRECT_BLOCK_FORMAT,
TransactionException.INTRINSIC_GAS_TOO_LOW,
],
error_code=EngineAPIError.InvalidRequest,
),
{
"params": [
{
"parentHash": Hash(0).hex(),
"feeRecipient": Address(2).hex(),
"stateRoot": Hash(3).hex(),
"receiptsRoot": Hash(5).hex(),
"logsBloom": Bloom(6).hex(),
"blockNumber": hex(8),
"gasLimit": hex(9),
"gasUsed": hex(10),
"timestamp": hex(11),
"extraData": Bytes([12]).hex(),
"prevRandao": Hash(13).hex(),
"baseFeePerGas": hex(15),
"blobGasUsed": hex(17),
"excessBlobGas": hex(18),
"blockHash": (
"0x93bd662d8a80a1f54bffc6d140b83d6cda233209998809f9540be51178b4d0b6"
),
"transactions": [
Transaction(
to=0x1234,
data=b"\x01\x00",
access_list=[
AccessList(
address=0x1234,
storage_keys=[0, 1],
)
],
max_priority_fee_per_gas=10,
max_fee_per_gas=20,
max_fee_per_blob_gas=30,
blob_versioned_hashes=[0, 1],
)
.with_signature_and_sender()
.rlp()
.hex()
],
"withdrawals": [
to_json(
Withdrawal(
index=0,
validator_index=1,
address=0x1234,
amount=2,
)
)
],
},
[
str(Hash(0)),
str(Hash(1)),
],
str(Hash(19)),
[
Bytes(r).hex()
for r in Requests(
DepositRequest(
pubkey=BLSPublicKey(0),
withdrawal_credentials=Hash(1),
amount=2,
signature=BLSSignature(3),
index=4,
),
WithdrawalRequest(
source_address=Address(0),
validator_pubkey=BLSPublicKey(1),
amount=2,
),
ConsolidationRequest(
source_address=Address(0),
source_pubkey=BLSPublicKey(1),
target_pubkey=BLSPublicKey(2),
),
).requests_list
],
],
"forkchoiceUpdatedVersion": "3",
"newPayloadVersion": "4",
"validationError": "BlockException.INCORRECT_BLOCK_FORMAT"
"|TransactionException.INTRINSIC_GAS_TOO_LOW",
"errorCode": "-32600",
},
id="fixture_engine_new_payload_1",
),
pytest.param(
True,
FixtureEngineNewPayload.from_fixture_header(
fork=Prague,
header=FixtureHeader(
fork=Prague,
parent_hash=Hash(0),
ommers_hash=Hash(1),
fee_recipient=Address(2),
state_root=Hash(3),
transactions_trie=Hash(4),
receipts_root=Hash(5),
logs_bloom=Bloom(6),
difficulty=7,
number=8,
gas_limit=9,
gas_used=10,
timestamp=11,
extra_data=Bytes([12]),
prev_randao=Hash(13),
nonce=HeaderNonce(14),
base_fee_per_gas=15,
withdrawals_root=Hash(16),
blob_gas_used=17,
excess_blob_gas=18,
parent_beacon_block_root=19,
requests_hash=20,
),
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/tests/test_eof.py | src/ethereum_test_fixtures/tests/test_eof.py | """Test the EOF fixture types."""
from typing import Any, Dict
import pytest
from ethereum_test_base_types import Bytes, to_json
from ethereum_test_exceptions import EOFException
from ..eof import ContainerKind, EOFFixture, Result, Vector
@pytest.mark.parametrize(
["can_be_deserialized", "model_instance", "json_repr"],
[
pytest.param(
True,
EOFFixture(
vectors={
1: Vector(
code=Bytes(b"\x00"),
container_kind=ContainerKind.INITCODE,
results={
"Paris": Result(
exception=None,
valid=True,
),
},
),
}
),
{
"vectors": {
"1": {
"code": "0x00",
"containerKind": "INITCODE",
"results": {
"Paris": {
"result": True,
},
},
},
},
},
id="eof_fixture",
),
pytest.param(
True,
EOFFixture(
vectors={
1: Vector(
code=Bytes(b"\x00"),
container_kind=ContainerKind.RUNTIME,
results={
"Paris": Result(
exception=EOFException.INVALID_MAGIC,
valid=False,
),
},
),
}
),
{
"vectors": {
"1": {
"code": "0x00",
"containerKind": "RUNTIME",
"results": {
"Paris": {
"exception": "EOFException.INVALID_MAGIC",
"result": False,
},
},
},
},
},
id="eof_fixture_with_exception",
),
],
)
class TestPydanticModelConversion:
"""Test that Pydantic models are converted to and from JSON correctly."""
def test_json_serialization(
self, can_be_deserialized: bool, model_instance: Any, json_repr: str | Dict[str, Any]
) -> None:
"""Test that to_json returns the expected JSON for the given object."""
del can_be_deserialized
serialized = to_json(model_instance)
serialized.pop("_info")
assert serialized == json_repr
def test_json_deserialization(
self, can_be_deserialized: bool, model_instance: Any, json_repr: str | Dict[str, Any]
) -> None:
"""Test that to_json returns the expected JSON for the given object."""
if not can_be_deserialized:
pytest.skip(reason="The model instance in this case can not be deserialized")
model_type = type(model_instance)
assert model_type(**json_repr) == model_instance
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/tests/test_base.py | src/ethereum_test_fixtures/tests/test_base.py | """Test cases for the ethereum_test_fixtures.base module."""
import pytest
from ..base import BaseFixture
from ..file import Fixtures
from ..state import FixtureEnvironment, FixtureTransaction, StateFixture
from ..transaction import FixtureResult, TransactionFixture
def test_json_dict() -> None:
"""Test that the json_dict property does not include the info field."""
fixture = TransactionFixture(
txbytes="0x1234",
result={"Paris": FixtureResult(intrinsic_gas=0)},
)
assert "_info" not in fixture.json_dict, "json_dict should exclude the 'info' field"
@pytest.mark.parametrize(
"fixture",
[
pytest.param(
StateFixture(
env=FixtureEnvironment(),
transaction=FixtureTransaction(
nonce=0,
gas_limit=[0],
value=[0],
data=[b""],
),
pre={},
post={},
config={},
),
id="StateFixture",
),
pytest.param(
TransactionFixture(
transaction="0x1234",
result={"Paris": FixtureResult(intrinsic_gas=0)},
),
id="TransactionFixture",
),
],
)
def test_base_fixtures_parsing(fixture: BaseFixture) -> None:
"""Test that the Fixtures generic model can validate any fixture format."""
fixture.fill_info(
"t8n-version",
"test_case_description",
fixture_source_url="fixture_source_url",
ref_spec=None,
_info_metadata={},
)
json_dump = fixture.json_dict_with_info()
assert json_dump is not None
Fixtures.model_validate({"fixture": json_dump})
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/tests/test_state.py | src/ethereum_test_fixtures/tests/test_state.py | """Test state spec types."""
from typing import Any, Dict
import pytest
from ethereum_test_base_types import Bytes, Hash, to_json
from ethereum_test_exceptions import TransactionException
from ..state import FixtureForkPost
@pytest.mark.parametrize(
["can_be_deserialized", "model_instance", "json"],
[
pytest.param(
True,
FixtureForkPost(
state_root=0,
logs_hash=1,
tx_bytes="0x02",
state={},
),
{
"hash": Hash(0).hex(),
"logs": Hash(1).hex(),
"txbytes": Bytes(b"\x02").hex(),
"indexes": {"data": 0, "gas": 0, "value": 0},
"state": {},
},
id="state_fixture_fork_post",
),
pytest.param(
True,
FixtureForkPost(
state_root=0,
logs_hash=1,
tx_bytes="0x02",
expect_exception=TransactionException.INITCODE_SIZE_EXCEEDED,
state={},
),
{
"hash": Hash(0).hex(),
"logs": Hash(1).hex(),
"txbytes": Bytes(b"\x02").hex(),
"expectException": "TransactionException.INITCODE_SIZE_EXCEEDED",
"indexes": {"data": 0, "gas": 0, "value": 0},
"state": {},
},
id="state_fixture_fork_post_exception",
),
pytest.param(
False, # Can not be deserialized: A single expect_exception str
# will not be deserialized as a list and therefore will not
# match the model_instance definition.
FixtureForkPost(
state_root=0,
logs_hash=1,
tx_bytes="0x02",
expect_exception=[TransactionException.INITCODE_SIZE_EXCEEDED],
state={},
),
{
"hash": Hash(0).hex(),
"logs": Hash(1).hex(),
"txbytes": Bytes(b"\x02").hex(),
"expectException": "TransactionException.INITCODE_SIZE_EXCEEDED",
"indexes": {"data": 0, "gas": 0, "value": 0},
"state": {},
},
id="state_fixture_fork_post_exception_list_1",
),
pytest.param(
True,
FixtureForkPost(
state_root=0,
logs_hash=1,
tx_bytes="0x02",
expect_exception=[
TransactionException.INITCODE_SIZE_EXCEEDED,
TransactionException.INSUFFICIENT_ACCOUNT_FUNDS,
],
state={},
),
{
"hash": Hash(0).hex(),
"logs": Hash(1).hex(),
"txbytes": Bytes(b"\x02").hex(),
"expectException": "TransactionException.INITCODE_SIZE_EXCEEDED|"
"TransactionException.INSUFFICIENT_ACCOUNT_FUNDS",
"indexes": {"data": 0, "gas": 0, "value": 0},
"state": {},
},
id="state_fixture_fork_post_exception_list_2",
),
],
)
class TestPydanticModelConversion:
"""Test that Pydantic models are converted to and from JSON correctly."""
def test_json_serialization(
self, can_be_deserialized: bool, model_instance: Any, json: str | Dict[str, Any]
) -> None:
"""Test that to_json returns the expected JSON for the given object."""
del can_be_deserialized
assert to_json(model_instance) == json
def test_json_deserialization(
self, can_be_deserialized: bool, model_instance: Any, json: str | Dict[str, Any]
) -> None:
"""Test that to_json returns the expected JSON for the given object."""
if not can_be_deserialized:
pytest.skip(reason="The model instance in this case can not be deserialized")
model_type = type(model_instance)
assert model_type(**json) == model_instance
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_fixtures/tests/__init__.py | src/ethereum_test_fixtures/tests/__init__.py | """Tests for the ethereum_test_fixtures package."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/serialization.py | src/ethereum_test_base_types/serialization.py | """Ethereum test types for serialization and encoding."""
from typing import Any, ClassVar, List
import ethereum_rlp as eth_rlp
from ethereum_types.numeric import Uint
from ethereum_test_base_types import Bytes
def to_serializable_element(v: Any) -> Any:
"""Return a serializable element that can be passed to `eth_rlp.encode`."""
if isinstance(v, int):
return Uint(v)
elif isinstance(v, bytes):
return v
elif isinstance(v, list):
return [to_serializable_element(v) for v in v]
elif isinstance(v, RLPSerializable):
if v.signable:
v.sign()
return v.to_list(signing=False)
elif v is None:
return b""
raise Exception(f"Unable to serialize element {v} of type {type(v)}.")
class RLPSerializable:
"""Class that adds RLP serialization to another class."""
rlp_override: Bytes | None = None
signable: ClassVar[bool] = False
rlp_fields: ClassVar[List[str]]
rlp_signing_fields: ClassVar[List[str]]
def get_rlp_fields(self) -> List[str]:
"""
Return an ordered list of field names to be included in RLP
serialization.
Function can be overridden to customize the logic to return the fields.
By default, rlp_fields class variable is used.
The list can be nested list up to one extra level to represent nested
fields.
"""
return self.rlp_fields
def get_rlp_signing_fields(self) -> List[str]:
"""
Return an ordered list of field names to be included in the RLP
serialization of the object signature.
Function can be overridden to customize the logic to return the fields.
By default, rlp_signing_fields class variable is used.
The list can be nested list up to one extra level to represent nested
fields.
"""
return self.rlp_signing_fields
def get_rlp_prefix(self) -> bytes:
"""
Return a prefix that has to be appended to the serialized object.
By default, an empty string is returned.
"""
return b""
def get_rlp_signing_prefix(self) -> bytes:
"""
Return a prefix that has to be appended to the serialized signing
object.
By default, an empty string is returned.
"""
return b""
def sign(self) -> None:
"""Sign the current object for further serialization."""
raise NotImplementedError(f'Object "{self.__class__.__name__}" cannot be signed.')
def to_list_from_fields(self, fields: List[str]) -> List[Any]:
"""
Return an RLP serializable list that can be passed to `eth_rlp.encode`.
Can be for signing purposes or the entire object.
"""
values_list: List[Any] = []
for field in fields:
assert isinstance(field, str), (
f'Unable to rlp serialize field "{field}" '
f'in object type "{self.__class__.__name__}"'
)
assert hasattr(self, field), (
f'Unable to rlp serialize field "{field}" '
f'in object type "{self.__class__.__name__}"'
)
try:
values_list.append(to_serializable_element(getattr(self, field)))
except Exception as e:
raise Exception(
f'Unable to rlp serialize field "{field}" '
f'in object type "{self.__class__.__name__}"'
) from e
return values_list
def to_list(self, signing: bool = False) -> List[Any]:
"""
Return an RLP serializable list that can be passed to `eth_rlp.encode`.
Can be for signing purposes or the entire object.
"""
field_list: List[str]
if signing:
if not self.signable:
raise Exception(f'Object "{self.__class__.__name__}" does not support signing')
field_list = self.get_rlp_signing_fields()
else:
if self.signable:
# Automatically sign signable objects during full
# serialization: Ensures nested objects have valid signatures
# in the final RLP.
self.sign()
field_list = self.get_rlp_fields()
return self.to_list_from_fields(field_list)
def rlp_signing_bytes(self) -> Bytes:
"""Return the signing serialized envelope used for signing."""
return Bytes(self.get_rlp_signing_prefix() + eth_rlp.encode(self.to_list(signing=True)))
def rlp(self) -> Bytes:
"""Return the serialized object."""
if self.rlp_override is not None:
return self.rlp_override
return Bytes(self.get_rlp_prefix() + eth_rlp.encode(self.to_list(signing=False)))
class SignableRLPSerializable(RLPSerializable):
"""
Class that adds RLP serialization to another class with signing support.
"""
signable: ClassVar[bool] = True
def sign(self) -> None:
"""Sign the current object for further serialization."""
raise NotImplementedError(f'Object "{self.__class__.__name__}" needs to implement `sign`.')
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/constants.py | src/ethereum_test_base_types/constants.py | """Common values used in Ethereum tests."""
from .base_types import Address
TestAddress = Address("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b")
TestAddress2 = Address("0x8a0a19589531694250d570040a0c4b74576919b8")
TestPrivateKey = 0x45A915E4D060149EB4365960E6A7A45F334393093061116B197E3240065FF2D8
TestPrivateKey2 = 0x9E7645D0CFD9C3A04EB7A9DB59A4EB7D359F2E75C9164A9D6B9A7D54E1B6A36F
AddrAA = Address(0xAA)
AddrBB = Address(0xBB)
EmptyBloom = bytes([0] * 256)
EmptyOmmersRoot = bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")
EmptyTrieRoot = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
EmptyHash = bytes([0] * 32)
EmptyNonce = bytes([0] * 8)
ZeroAddress = Address(0x00)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/mixins.py | src/ethereum_test_base_types/mixins.py | """Provides various mixins for Pydantic models."""
from typing import Any, List, Literal, Tuple
from pydantic import BaseModel
class ModelCustomizationsMixin:
"""
A mixin that customizes the behavior of pydantic models. Any pydantic
configuration override that must apply to all models should be placed here.
This mixin is applied to both `EthereumTestBaseModel` and
`EthereumTestRootModel`.
"""
def serialize(
self,
mode: Literal["json", "python"],
by_alias: bool,
exclude_none: bool = True,
) -> dict[str, Any]:
"""
Serialize the model to the specified format with the given parameters.
Args:
mode: The mode of serialization. If mode is 'json', the output
will only contain JSON serializable types. If mode is
'python', the output may contain non-JSON-serializable
Python objects.
by_alias: Whether to use aliases for field names.
exclude_none: Whether to exclude fields with None values,
default is True.
Returns:
dict[str, Any]: The serialized representation of the model.
"""
if not hasattr(self, "model_dump"):
raise NotImplementedError(
f"{self.__class__.__name__} does not have 'model_dump' method."
"Are you sure you are using a Pydantic model?"
)
return self.model_dump(mode=mode, by_alias=by_alias, exclude_none=exclude_none)
def __repr_args__(self) -> Any:
"""
Generate a list of attribute-value pairs for the object representation.
This method serializes the model, retrieves the attribute names, and
constructs a list of tuples containing attribute names and their
corresponding values. Only attributes with non-None values are included
in the list.
This method is used by the __repr__ method to generate the object
representation, and is used by `gentest` module to generate the test
cases.
See:
https://pydantic-docs.helpmanual.io/usage/models/
#custom-repr
and
https://github.com/ethereum/execution-spec-tests/pull/
901#issuecomment-24432968 35
Returns:
List[Tuple[str, Any]]: A list of tuples where each tuple
contains an attribute name and its
corresponding non-None value.
"""
attrs_names = self.serialize(mode="python", by_alias=False).keys()
attrs = ((s, getattr(self, s)) for s in attrs_names)
# Convert field values based on their type. This ensures consistency
# between JSON and Python object representations. Should a custom
# `__repr__` be needed for a specific type, it can added in the match
# statement below. Otherwise, the default string representation is
# used.
repr_attrs: List[Tuple[str, Any]] = []
for a, v in attrs:
match v:
# Note: The `None` case handles an edge case with transactions
# see: https://github.com/ethereum/execution-spec-tests/pull/
# 901#discussion_r1828491918
case list() | dict() | BaseModel() | None:
repr_attrs.append((a, v))
case _:
repr_attrs.append((a, str(v)))
return repr_attrs
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/composite_types.py | src/ethereum_test_base_types/composite_types.py | """Base composite types for Ethereum test cases."""
from dataclasses import dataclass
from typing import Any, ClassVar, Dict, ItemsView, Iterator, List, SupportsBytes, Type, TypeAlias
from pydantic import Field, PrivateAttr, TypeAdapter
from .base_types import Address, Bytes, Hash, HashInt, HexNumber, ZeroPaddedHexNumber
from .conversions import BytesConvertible, NumberConvertible
from .pydantic import CamelModel, EthereumTestRootModel
from .serialization import RLPSerializable
StorageKeyValueTypeConvertible = NumberConvertible
StorageKeyValueType = HashInt
StorageKeyValueTypeAdapter = TypeAdapter(StorageKeyValueType)
StorageRootType = Dict[NumberConvertible, NumberConvertible]
class Storage(EthereumTestRootModel[Dict[StorageKeyValueType, StorageKeyValueType]]):
"""
Definition of contract storage in the `pre` or `post` state of a test.
This model accepts a dictionary with keys and values as any of: str, int,
bytes, or any type that supports conversion to bytes, and automatically
casts them to `HashInt`.
"""
# internal storage is maintained as a dict with HashInt keys and values.
root: Dict[StorageKeyValueType, StorageKeyValueType] = Field(default_factory=dict)
_current_slot: int = PrivateAttr(0)
_hint_map: Dict[StorageKeyValueType, str] = PrivateAttr(default_factory=dict)
_any_map: Dict[StorageKeyValueType, bool] = PrivateAttr(default_factory=dict)
StorageDictType: ClassVar[TypeAlias] = Dict[
str | int | bytes | SupportsBytes, str | int | bytes | SupportsBytes
]
"""
Dictionary type to be used when defining an input to initialize a storage.
"""
@dataclass(kw_only=True)
class InvalidTypeError(Exception):
"""
Invalid type used when describing test's expected storage key or value.
"""
key_or_value: Any
def __init__(self, key_or_value: Any, *args: Any) -> None:
"""Initialize the exception with the invalid type."""
super().__init__(args)
self.key_or_value = key_or_value
def __str__(self) -> str:
"""Print exception string."""
return f"invalid type for key/value: {self.key_or_value}"
@dataclass(kw_only=True)
class InvalidValueError(Exception):
"""
Invalid value used when describing test's expected storage key or
value.
"""
key_or_value: Any
def __init__(self, key_or_value: Any, *args: Any) -> None:
"""Initialize the exception with the invalid value."""
super().__init__(args)
self.key_or_value = key_or_value
def __str__(self) -> str:
"""Print exception string."""
return f"invalid value for key/value: {self.key_or_value}"
@dataclass(kw_only=True)
class MissingKeyError(Exception):
"""Test expected to find a storage key set but key was missing."""
key: int
def __init__(self, key: int, *args: Any) -> None:
"""Initialize the exception with the missing key."""
super().__init__(args)
self.key = key
def __str__(self) -> str:
"""Print exception string."""
return "key {0} not found in storage".format(Hash(self.key))
@dataclass(kw_only=True)
class KeyValueMismatchError(Exception):
"""
Test expected a certain value in a storage key but value found was
different.
"""
address: Address
key: int
want: int
got: int
hint: str
def __init__(
self, address: Address, key: int, want: int, got: int, hint: str = "", *args: Any
) -> None:
"""
Initialize the exception with the address, key, wanted and got
values.
"""
super().__init__(args)
self.address = address
self.key = key
self.want = want
self.got = got
self.hint = hint
def __str__(self) -> str:
"""Print exception string."""
label_str = ""
if self.address.label is not None:
label_str = f" ({self.address.label})"
return (
f"incorrect value in address {self.address}{label_str} for "
+ f"key {Hash(self.key)}{f' ({self.hint})' if self.hint else ''}:"
+ f" want {HexNumber(self.want)} (dec:{int(self.want)}),"
+ f" got {HexNumber(self.got)} (dec:{int(self.got)})"
)
def __contains__(self, key: StorageKeyValueTypeConvertible | StorageKeyValueType) -> bool:
"""Check for an item in the storage."""
return StorageKeyValueTypeAdapter.validate_python(key) in self.root
def __getitem__(
self, key: StorageKeyValueTypeConvertible | StorageKeyValueType
) -> StorageKeyValueType:
"""Return an item from the storage."""
return self.root[StorageKeyValueTypeAdapter.validate_python(key)]
def __setitem__(
self,
key: StorageKeyValueTypeConvertible | StorageKeyValueType,
value: StorageKeyValueTypeConvertible | StorageKeyValueType,
) -> None:
"""Set an item in the storage."""
self.root[StorageKeyValueTypeAdapter.validate_python(key)] = (
StorageKeyValueTypeAdapter.validate_python(value)
)
def __delitem__(self, key: StorageKeyValueTypeConvertible | StorageKeyValueType) -> None:
"""Delete an item from the storage."""
del self.root[StorageKeyValueTypeAdapter.validate_python(key)]
def __iter__(self) -> Iterator[StorageKeyValueType]: # type: ignore [override]
"""Return an iterator over the storage."""
return iter(self.root)
def __eq__(self, other: object) -> bool:
"""Return True if both storages are equal."""
if not isinstance(other, Storage):
return False
return self.root == other.root
def __ne__(self, other: object) -> bool:
"""Return True if both storages are not equal."""
if not isinstance(other, Storage):
return False
return self.root != other.root
def __bool__(self) -> bool:
"""Return True if the storage is not empty."""
return any(v for v in self.root.values())
def __add__(self, other: "Storage") -> "Storage":
"""Return a new storage that is the sum of two storages."""
return Storage({**self.root, **other.root})
def keys(self) -> set[StorageKeyValueType]:
"""Return the keys of the storage."""
return set(self.root.keys())
def set_next_slot(self, slot: int) -> "Storage":
"""Set the next slot to be used by `store_next`."""
self._current_slot = slot
return self
def items(self) -> ItemsView[StorageKeyValueType, StorageKeyValueType]:
"""Return the items of the storage."""
return self.root.items()
def set_expect_any(self, key: StorageKeyValueTypeConvertible | StorageKeyValueType) -> None:
"""
Mark key to be able to have any expected value when comparing storages.
"""
self._any_map[StorageKeyValueTypeAdapter.validate_python(key)] = True
def store_next(
self, value: StorageKeyValueTypeConvertible | StorageKeyValueType | bool, hint: str = ""
) -> StorageKeyValueType:
"""
Store a value in the storage and returns the key where the value is
stored.
Increments the key counter so the next time this function is called,
the next key is used.
"""
slot = StorageKeyValueTypeAdapter.validate_python(self._current_slot)
self._current_slot += 1
if hint:
self._hint_map[slot] = hint
self[slot] = StorageKeyValueTypeAdapter.validate_python(value)
return slot
def peek_slot(self) -> int:
"""Peek the next slot that will be used by `store_next`."""
return self._current_slot
def contains(self, other: "Storage") -> bool:
"""
Return True if self contains all keys with equal value as contained by
second storage. Used for comparison with test expected post state and
alloc returned by the transition tool.
"""
for key in other.keys():
if key not in self:
return False
if self[key] != other[key]:
return False
return True
def must_contain(self, address: Address, other: "Storage") -> None:
"""
Succeeds only if self contains all keys with equal value as contained
by second storage. Used for comparison with test expected post state
and alloc returned by the transition tool. Raises detailed exception
when a difference is found.
"""
for key in other.keys():
if key not in self:
# storage[key]==0 is equal to missing storage
if other[key] != 0:
raise Storage.MissingKeyError(key=key)
elif self[key] != other[key]:
raise Storage.KeyValueMismatchError(
address=address,
key=key,
want=self[key],
got=other[key],
hint=self._hint_map.get(key, ""),
)
def must_be_equal(self, address: Address, other: "Storage | None") -> None:
"""Succeed only if "self" is equal to "other" storage."""
# Test keys contained in both storage objects
if other is None:
other = Storage({})
for key in self.keys() & other.keys():
if self[key] != other[key]:
raise Storage.KeyValueMismatchError(
address=address,
key=key,
want=self[key],
got=other[key],
hint=self._hint_map.get(key, ""),
)
# Test keys contained in either one of the storage objects
for key in self.keys() ^ other.keys():
if key in self:
if self[key] != 0:
raise Storage.KeyValueMismatchError(
address=address,
key=key,
want=self[key],
got=0,
hint=self._hint_map.get(key, ""),
)
elif other[key] != 0:
# Skip key verification if we allow this key to be ANY
if self._any_map.get(key) is True:
continue
raise Storage.KeyValueMismatchError(
address=address,
key=key,
want=0,
got=other[key],
hint=self._hint_map.get(key, ""),
)
def canary(self) -> "Storage":
"""
Return a canary storage filled with non-zero values where the current
storage expects zero values, to guarantee that the test overwrites the
storage.
"""
return Storage({key: HashInt(0xBA5E) for key in self.keys() if self[key] == 0})
class Account(CamelModel):
"""State associated with an address."""
nonce: ZeroPaddedHexNumber = ZeroPaddedHexNumber(0)
"""
The scalar value equal to a) the number of transactions sent by an
Externally Owned Account, b) the amount of contracts created by a contract.
"""
balance: ZeroPaddedHexNumber = ZeroPaddedHexNumber(0)
"""The amount of Wei (10<sup>-18</sup> Eth) the account has."""
code: Bytes = Bytes(b"")
"""Bytecode contained by the account."""
storage: Storage = Field(default_factory=Storage)
"""Storage within a contract."""
NONEXISTENT: ClassVar[None] = None
"""
Sentinel object used to specify when an account should not exist in the
state.
"""
@dataclass(kw_only=True)
class NonceMismatchError(Exception):
"""
Test expected a certain nonce value for an account but a different
value was found.
"""
address: Address
want: int | None
got: int | None
def __init__(
self, address: Address, want: int | None, got: int | None, *args: Any
) -> None:
"""
Initialize the exception with the address, wanted and got values.
"""
super().__init__(args)
self.address = address
self.want = want
self.got = got
def __str__(self) -> str:
"""Print exception string."""
label_str = ""
if self.address.label is not None:
label_str = f" ({self.address.label})"
return (
f"unexpected nonce for account {self.address}{label_str}: "
+ f"want {self.want}, got {self.got}"
)
@dataclass(kw_only=True)
class BalanceMismatchError(Exception):
"""
Test expected a certain balance for an account but a different value
was found.
"""
address: Address
want: int | None
got: int | None
def __init__(
self, address: Address, want: int | None, got: int | None, *args: Any
) -> None:
"""
Initialize the exception with the address, wanted and got values.
"""
super().__init__(args)
self.address = address
self.want = want
self.got = got
def __str__(self) -> str:
"""Print exception string."""
label_str = ""
if self.address.label is not None:
label_str = f" ({self.address.label})"
return (
f"unexpected balance for account {self.address}{label_str}: "
+ f"want {self.want}, got {self.got}"
)
@dataclass(kw_only=True)
class CodeMismatchError(Exception):
"""
Test expected a certain bytecode for an account but a different one was
found.
"""
address: Address
want: bytes | None
got: bytes | None
def __init__(
self, address: Address, want: bytes | None, got: bytes | None, *args: Any
) -> None:
"""
Initialize the exception with the address, wanted and got values.
"""
super().__init__(args)
self.address = address
self.want = want
self.got = got
def __str__(self) -> str:
"""Print exception string."""
label_str = ""
if self.address.label is not None:
label_str = f" ({self.address.label})"
return (
f"unexpected code for account {self.address}{label_str}: "
f"want {self.want.hex() if self.want else self.want}, "
f"got {self.got.hex() if self.got else self.got}"
)
def check_alloc(self: "Account", address: Address, account: "Account") -> None:
"""
Check the returned alloc against an expected account in post state.
Raises exception on failure.
"""
if "nonce" in self.model_fields_set:
if self.nonce != account.nonce:
raise Account.NonceMismatchError(
address=address,
want=self.nonce,
got=account.nonce,
)
if "balance" in self.model_fields_set:
if self.balance != account.balance:
raise Account.BalanceMismatchError(
address=address,
want=self.balance,
got=account.balance,
)
if "code" in self.model_fields_set:
if self.code != account.code:
raise Account.CodeMismatchError(
address=address,
want=self.code,
got=account.code,
)
if "storage" in self.model_fields_set:
self.storage.must_be_equal(address=address, other=account.storage)
def __bool__(self: "Account") -> bool:
"""Return True on a non-empty account."""
return any((self.nonce, self.balance, self.code, self.storage))
@classmethod
def with_code(cls: Type, code: BytesConvertible) -> "Account":
"""Create account with provided `code` and nonce of `1`."""
return Account(nonce=HexNumber(1), code=Bytes(code))
@classmethod
def merge(
cls: Type, account_1: "Dict | Account | None", account_2: "Dict | Account | None"
) -> "Account":
"""Create a merged account from two sources."""
def to_kwargs_dict(account: "Dict | Account | None") -> Dict:
if account is None:
return {}
if isinstance(account, dict):
return account
elif isinstance(account, cls):
return account.model_dump(exclude_unset=True)
raise TypeError(f"Unexpected type for account merge: {type(account)}")
kwargs = to_kwargs_dict(account_1)
kwargs.update(to_kwargs_dict(account_2))
return cls(**kwargs)
class Alloc(EthereumTestRootModel[Dict[Address, Account | None]]):
"""Allocation of accounts in the state, pre and post test execution."""
root: Dict[Address, Account | None] = Field(default_factory=dict, validate_default=True)
class AccessList(CamelModel, RLPSerializable):
"""Access List for transactions."""
address: Address
storage_keys: List[Hash]
rlp_fields: ClassVar[List[str]] = ["address", "storage_keys"]
class ForkBlobSchedule(CamelModel):
"""Representation of the blob schedule of a given fork."""
target_blobs_per_block: HexNumber = Field(..., alias="target")
max_blobs_per_block: HexNumber = Field(..., alias="max")
base_fee_update_fraction: HexNumber = Field(...)
class BlobSchedule(EthereumTestRootModel[Dict[str, ForkBlobSchedule]]):
"""Blob schedule configuration dictionary."""
root: Dict[str, ForkBlobSchedule] = Field(default_factory=dict, validate_default=True)
def append(self, *, fork: str, schedule: Any) -> None:
"""Append a new fork schedule."""
if not isinstance(schedule, ForkBlobSchedule):
schedule = ForkBlobSchedule(**schedule)
self.root[fork] = schedule
def last(self) -> ForkBlobSchedule | None:
"""Return the last schedule."""
if len(self.root) == 0:
return None
return list(self.root.values())[-1]
def __getitem__(self, key: str) -> ForkBlobSchedule:
"""Return the schedule for a given fork."""
return self.root[key]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/base_types.py | src/ethereum_test_base_types/base_types.py | """Basic type primitives used to define other types."""
from hashlib import sha256
from typing import Annotated, Any, ClassVar, SupportsBytes, Type, TypeVar
from Crypto.Hash import keccak
from pydantic import GetCoreSchemaHandler, StringConstraints
from pydantic_core.core_schema import (
PlainValidatorFunctionSchema,
no_info_plain_validator_function,
to_string_ser_schema,
)
from typing_extensions import Self
from .conversions import (
BytesConvertible,
FixedSizeBytesConvertible,
NumberConvertible,
to_bytes,
to_fixed_size_bytes,
to_number,
)
class ToStringSchema:
"""
Type converter to add a simple pydantic schema that correctly parses and
serializes the type.
"""
@staticmethod
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call the class constructor without info and appends the serialization
schema.
"""
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
)
class Number(int, ToStringSchema):
"""Class that helps represent numbers in tests."""
def __new__(cls, input_number: NumberConvertible | Self) -> Self:
"""Create a new Number object."""
return super(Number, cls).__new__(cls, to_number(input_number))
def __str__(self) -> str:
"""Return the string representation of the number."""
return str(int(self))
def hex(self) -> str:
"""Return the hexadecimal representation of the number."""
return hex(self)
@classmethod
def or_none(cls: Type[Self], input_number: Self | NumberConvertible | None) -> Self | None:
"""Convert the input to a Number while accepting None."""
if input_number is None:
return input_number
return cls(input_number)
class Wei(Number):
"""Class that helps represent wei that can be parsed from strings."""
def __new__(cls, input_number: NumberConvertible | Self) -> Self:
"""Create a new Number object."""
if isinstance(input_number, str):
words = input_number.split()
multiplier = 1
assert len(words) <= 2
value_str = words[0]
if len(words) > 1:
unit = words[1].lower()
multiplier = cls._get_multiplier(unit)
value: float
if "**" in value_str:
base, exp = value_str.split("**")
value = float(base) ** int(exp)
else:
value = int(value_str) if value_str.isdecimal() else float(value_str)
return super(Number, cls).__new__(cls, value * multiplier)
return super(Number, cls).__new__(cls, to_number(input_number))
@staticmethod
def _get_multiplier(unit: str) -> int:
"""
Return the multiplier for the given unit of wei, handling synonyms.
"""
match unit:
case "wei":
return 1
case "kwei" | "babbage" | "femtoether":
return 10**3
case "mwei" | "lovelace" | "picoether":
return 10**6
case "gwei" | "shannon" | "nanoether" | "nano":
return 10**9
case "szabo" | "microether" | "micro":
return 10**12
case "finney" | "milliether" | "milli":
return 10**15
case "ether" | "eth":
return 10**18
case _:
raise ValueError(f"Invalid unit {unit}")
class HexNumber(Number):
"""Class that helps represent an hexadecimal numbers in tests."""
def __str__(self) -> str:
"""Return the string representation of the number."""
return self.hex()
@staticmethod
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call the class constructor without info and appends the serialization
schema.
"""
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
json_schema_input_schema=handler(
Annotated[str, StringConstraints(pattern=r"^0x[0-9a-fA-F]*$")]
),
)
class ZeroPaddedHexNumber(HexNumber):
"""Class that helps represent zero padded hexadecimal numbers in tests."""
def hex(self) -> str:
"""Return the hexadecimal representation of the number."""
if self == 0:
return "0x00"
hex_str = hex(self)[2:]
if len(hex_str) % 2 == 1:
return "0x0" + hex_str
return "0x" + hex_str
@staticmethod
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call the class constructor without info and appends the serialization
schema.
"""
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
json_schema_input_schema=handler(
Annotated[str, StringConstraints(pattern=r"^0x([0-9a-fA-F]{2})*$")]
),
)
NumberBoundTypeVar = TypeVar("NumberBoundTypeVar", Number, HexNumber, ZeroPaddedHexNumber)
class Bytes(bytes, ToStringSchema):
"""Class that helps represent bytes of variable length in tests."""
def __new__(cls, input_bytes: BytesConvertible = b"") -> Self:
"""Create a new Bytes object."""
if type(input_bytes) is cls:
return input_bytes
return super(Bytes, cls).__new__(cls, to_bytes(input_bytes))
def __hash__(self) -> int:
"""Return the hash of the bytes."""
return super(Bytes, self).__hash__()
def __str__(self) -> str:
"""Return the hexadecimal representation of the bytes."""
return self.hex()
def hex(self, *args: Any, **kwargs: Any) -> str:
"""Return the hexadecimal representation of the bytes."""
return "0x" + super().hex(*args, **kwargs)
@classmethod
def or_none(cls, input_bytes: "Bytes | BytesConvertible | None") -> "Bytes | None":
"""Convert the input to a Bytes while accepting None."""
if input_bytes is None:
return input_bytes
return cls(input_bytes)
def keccak256(self) -> "Hash":
"""Return the keccak256 hash of the opcode byte representation."""
k = keccak.new(digest_bits=256)
return Hash(k.update(bytes(self)).digest())
def sha256(self) -> "Hash":
"""Return the sha256 hash of the opcode byte representation."""
return Hash(sha256(self).digest())
@staticmethod
def __get_pydantic_core_schema__(
source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call the class constructor without info and appends the serialization
schema.
"""
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
json_schema_input_schema=handler(
Annotated[str, StringConstraints(pattern=r"^0x([0-9a-fA-F]{2})*$")]
),
)
class FixedSizeHexNumber(int, ToStringSchema):
"""
A base class that helps represent an integer as a fixed byte-length
hexadecimal number.
This class is used to dynamically generate subclasses of a specific byte
length.
"""
byte_length: ClassVar[int]
max_value: ClassVar[int]
def __class_getitem__(cls, length: int) -> Type["FixedSizeHexNumber"]:
"""Create a new FixedSizeHexNumber class with the given length."""
class Sized(cls): # type: ignore
byte_length = length
max_value = 2 ** (8 * length) - 1
return Sized
def __new__(cls, input_number: NumberConvertible | Self) -> Self:
"""Create a new Number object."""
i = to_number(input_number)
if i > cls.max_value:
raise ValueError(f"Value {i} is too large for {cls.byte_length} bytes")
if i < 0:
i += cls.max_value + 1
if i <= 0:
raise ValueError(f"Value {i} is too small for {cls.byte_length} bytes")
return super(FixedSizeHexNumber, cls).__new__(cls, i)
def __str__(self) -> str:
"""Return the string representation of the number."""
return self.hex()
def hex(self) -> str:
"""Return the hexadecimal representation of the number."""
if self == 0:
return "0x00"
hex_str = hex(self)[2:]
if len(hex_str) % 2 == 1:
return "0x0" + hex_str
return "0x" + hex_str
@classmethod
def __get_pydantic_core_schema__(
cls: Type[Self], source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call the class constructor without info and appends the serialization
schema.
"""
pattern = f"^0x([0-9a-fA-F]{{{cls.byte_length * 2}}})*$"
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
json_schema_input_schema=handler(Annotated[str, StringConstraints(pattern=pattern)]),
)
class HashInt(FixedSizeHexNumber[32]): # type: ignore
"""Class that helps represent hashes in tests."""
pass
class FixedSizeBytes(Bytes):
"""Class that helps represent bytes of fixed length in tests."""
byte_length: ClassVar[int]
_sized_: ClassVar[Type["FixedSizeBytes"]]
def __class_getitem__(cls, length: int) -> Type["FixedSizeBytes"]:
"""Create a new FixedSizeBytes class with the given length."""
class Sized(cls): # type: ignore
byte_length = length
Sized._sized_ = Sized
return Sized
def __new__(
cls,
input_bytes: FixedSizeBytesConvertible | Self,
*,
left_padding: bool = False,
right_padding: bool = False,
) -> Self:
"""Create a new FixedSizeBytes object."""
if type(input_bytes) is cls:
return input_bytes
return super(FixedSizeBytes, cls).__new__(
cls,
to_fixed_size_bytes(
input_bytes,
cls.byte_length,
left_padding=left_padding,
right_padding=right_padding,
),
)
def __hash__(self) -> int:
"""Return the hash of the bytes."""
return super(FixedSizeBytes, self).__hash__()
@classmethod
def or_none(
cls: Type[Self], input_bytes: Self | FixedSizeBytesConvertible | None
) -> Self | None:
"""Convert the input to a Fixed Size Bytes while accepting None."""
if input_bytes is None:
return input_bytes
return cls(input_bytes)
def __eq__(self, other: object) -> bool:
"""Compare two FixedSizeBytes objects to be equal."""
if other is None:
return False
if not isinstance(other, FixedSizeBytes):
assert (
isinstance(other, str)
or isinstance(other, int)
or isinstance(other, bytes)
or isinstance(other, SupportsBytes)
)
other = self._sized_(other)
return super().__eq__(other)
def __ne__(self, other: object) -> bool:
"""Compare two FixedSizeBytes objects to be not equal."""
return not self.__eq__(other)
@classmethod
def __get_pydantic_core_schema__(
cls: Type[Self], source_type: Any, handler: GetCoreSchemaHandler
) -> PlainValidatorFunctionSchema:
"""
Call the class constructor without info and appends the serialization
schema.
"""
pattern = f"^0x([0-9a-fA-F]{{{cls.byte_length * 2}}})*$"
return no_info_plain_validator_function(
source_type,
serialization=to_string_ser_schema(),
json_schema_input_schema=handler(Annotated[str, StringConstraints(pattern=pattern)]),
)
class ForkHash(FixedSizeBytes[4]): # type: ignore
"""
Class that helps represent the CRC config hashes and identifiers of a fork.
"""
pass
class Address(FixedSizeBytes[20]): # type: ignore
"""Class that helps represent Ethereum addresses in tests."""
label: str | None = None
def __new__(
cls,
input_bytes: "FixedSizeBytesConvertible | Address",
*args: Any,
label: str | None = None,
**kwargs: Any,
) -> Self:
"""Create a new Address object with an optional label."""
instance = super(Address, cls).__new__(cls, input_bytes, *args, **kwargs)
if isinstance(input_bytes, Address) and label is None:
instance.label = input_bytes.label
else:
instance.label = label
return instance
class Hash(FixedSizeBytes[32]): # type: ignore
"""Class that helps represent hashes in tests."""
pass
class StorageKey(FixedSizeBytes[32]): # type: ignore
"""
Storage key type that automatically applies left padding for values shorter
than 32 bytes.
"""
def __new__(
cls, input_bytes: FixedSizeBytesConvertible | FixedSizeBytes, **kwargs: Any
) -> Self:
"""Create a new StorageKey with automatic left padding."""
# Always apply left_padding for storage keys unless explicitly set to
# False
if "left_padding" not in kwargs:
kwargs["left_padding"] = True
return super().__new__(cls, input_bytes, **kwargs)
class Bloom(FixedSizeBytes[256]): # type: ignore
"""Class that helps represent blooms in tests."""
pass
class HeaderNonce(FixedSizeBytes[8]): # type: ignore
"""Class that helps represent the header nonce in tests."""
pass
class BLSPublicKey(FixedSizeBytes[48]): # type: ignore
"""Class that helps represent BLS public keys in tests."""
pass
class BLSSignature(FixedSizeBytes[96]): # type: ignore
"""Class that helps represent BLS signatures in tests."""
pass
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/__init__.py | src/ethereum_test_base_types/__init__.py | """Common definitions and types."""
from .base_types import (
Address,
Bloom,
BLSPublicKey,
BLSSignature,
Bytes,
FixedSizeBytes,
ForkHash,
Hash,
HashInt,
HeaderNonce,
HexNumber,
Number,
NumberBoundTypeVar,
StorageKey,
Wei,
ZeroPaddedHexNumber,
)
from .base_types_json import to_json
from .composite_types import (
AccessList,
Account,
Alloc,
BlobSchedule,
ForkBlobSchedule,
Storage,
StorageRootType,
)
from .constants import (
AddrAA,
AddrBB,
EmptyOmmersRoot,
EmptyTrieRoot,
TestAddress,
TestAddress2,
TestPrivateKey,
TestPrivateKey2,
)
from .conversions import to_bytes, to_hex
from .pydantic import CamelModel, EthereumTestBaseModel, EthereumTestRootModel
from .reference_spec import ReferenceSpec
from .serialization import RLPSerializable, SignableRLPSerializable
__all__ = (
"AccessList",
"Account",
"AddrAA",
"AddrBB",
"Address",
"Alloc",
"BlobSchedule",
"Bloom",
"BLSPublicKey",
"BLSSignature",
"Bytes",
"CamelModel",
"EmptyOmmersRoot",
"EmptyTrieRoot",
"EthereumTestBaseModel",
"EthereumTestRootModel",
"FixedSizeBytes",
"ForkBlobSchedule",
"ForkHash",
"Hash",
"HashInt",
"HeaderNonce",
"HexNumber",
"Number",
"NumberBoundTypeVar",
"ReferenceSpec",
"RLPSerializable",
"SignableRLPSerializable",
"Storage",
"StorageKey",
"StorageRootType",
"TestAddress",
"TestAddress2",
"TestPrivateKey",
"TestPrivateKey2",
"Wei",
"ZeroPaddedHexNumber",
"to_bytes",
"to_hex",
"to_json",
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/conversions.py | src/ethereum_test_base_types/conversions.py | """Common conversion methods."""
from re import sub
from typing import Any, List, Optional, SupportsBytes, TypeAlias
BytesConvertible: TypeAlias = str | bytes | SupportsBytes | List[int]
FixedSizeBytesConvertible: TypeAlias = str | bytes | SupportsBytes | List[int] | int
NumberConvertible: TypeAlias = str | bytes | SupportsBytes | int
def int_or_none(input_value: Any, default: Optional[int] = None) -> int | None:
"""Convert a value to int or returns a default (None)."""
if input_value is None:
return default
if isinstance(input_value, int):
return input_value
return int(input_value, 0)
def str_or_none(input_value: Any, default: Optional[str] = None) -> str | None:
"""Convert a value to string or returns a default (None)."""
if input_value is None:
return default
if isinstance(input_value, str):
return input_value
return str(input_value)
def to_bytes(input_bytes: BytesConvertible) -> bytes:
"""Convert multiple types into bytes."""
if input_bytes is None:
raise Exception("Cannot convert `None` input to bytes")
if (
isinstance(input_bytes, SupportsBytes)
or isinstance(input_bytes, bytes)
or isinstance(input_bytes, list)
):
return bytes(input_bytes)
if isinstance(input_bytes, str):
# We can have a hex representation of bytes with spaces for readability
input_bytes = sub(r"\s+", "", input_bytes)
if input_bytes.startswith("0x"):
input_bytes = input_bytes[2:]
if len(input_bytes) % 2 == 1:
input_bytes = "0" + input_bytes
return bytes.fromhex(input_bytes)
raise Exception("invalid type for `bytes`")
def to_fixed_size_bytes(
input_bytes: FixedSizeBytesConvertible,
size: int,
*,
left_padding: bool = False,
right_padding: bool = False,
) -> bytes:
"""
Convert multiple types into fixed-size bytes.
Args:
input_bytes: The input data to convert.
size: The size of the output bytes.
left_padding: Whether to allow left-padding of the input data bytes
using zeros. If the input data is an integer, padding is
always performed.
right_padding: Whether to allow right-padding of the input data bytes
using zeros. If the input data is an integer, padding
is always performed.
"""
if isinstance(input_bytes, int):
return int.to_bytes(input_bytes, length=size, byteorder="big", signed=input_bytes < 0)
input_bytes = to_bytes(input_bytes)
if len(input_bytes) > size:
raise Exception(
f"input is too large for fixed size bytes: {input_bytes.hex()}, "
f" {len(input_bytes)} > {size}"
)
if len(input_bytes) < size:
if left_padding:
return bytes(input_bytes).rjust(size, b"\x00")
if right_padding:
return bytes(input_bytes).ljust(size, b"\x00")
raise Exception(
f"input is too small for fixed size bytes: {len(input_bytes)} < {size}\n"
"Use `left_padding=True` or `right_padding=True` to allow padding."
)
return input_bytes
def to_hex(input_bytes: BytesConvertible) -> str:
"""Convert multiple types into a bytes hex string."""
return "0x" + to_bytes(input_bytes).hex()
def to_number(input_number: NumberConvertible) -> int:
"""Convert multiple types into a number."""
if isinstance(input_number, int):
return input_number
if isinstance(input_number, str):
return int(input_number, 0)
if isinstance(input_number, bytes) or isinstance(input_number, SupportsBytes):
return int.from_bytes(input_number, byteorder="big")
raise Exception("invalid type for `number`")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/base_types_json.py | src/ethereum_test_base_types/base_types_json.py | """JSON encoding and decoding for Ethereum types."""
from typing import Any, AnyStr, List
from .pydantic import EthereumTestBaseModel, EthereumTestRootModel
def to_json(
input_model: (
EthereumTestBaseModel
| EthereumTestRootModel
| AnyStr
| List[EthereumTestBaseModel | EthereumTestRootModel | AnyStr]
),
) -> Any:
"""Convert a model to its json data representation."""
if isinstance(input_model, list):
return [to_json(item) for item in input_model]
elif isinstance(input_model, (EthereumTestBaseModel, EthereumTestRootModel)):
return input_model.model_dump(mode="json", by_alias=True, exclude_none=True)
else:
return str(input_model)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/pydantic.py | src/ethereum_test_base_types/pydantic.py | """Base pydantic classes used to define the models for Ethereum tests."""
from typing import Any, TypeVar
from pydantic import BaseModel, ConfigDict, RootModel
from pydantic.alias_generators import to_camel
from typing_extensions import Self
from .mixins import ModelCustomizationsMixin
RootModelRootType = TypeVar("RootModelRootType")
class EthereumTestBaseModel(BaseModel, ModelCustomizationsMixin):
"""Base model for all models for Ethereum tests."""
pass
class EthereumTestRootModel(RootModel[RootModelRootType], ModelCustomizationsMixin):
"""Base model for all models for Ethereum tests."""
root: Any
class CopyValidateModel(EthereumTestBaseModel):
"""Model that supports copying with validation."""
def copy(self: Self, **kwargs: Any) -> Self:
"""
Create a copy of the model with the updated fields that are validated.
"""
return self.__class__(**(self.model_dump(exclude_unset=True) | kwargs))
class CamelModel(CopyValidateModel):
"""
A base model that converts field names to camel case when serializing.
For example, the field name `current_timestamp` in a Python model will be
represented as `currentTimestamp` when it is serialized to json.
"""
model_config = ConfigDict(
alias_generator=to_camel,
populate_by_name=True,
validate_default=True,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/tests/test_base_types.py | src/ethereum_test_base_types/tests/test_base_types.py | """Test suite for `ethereum_test` module base types."""
from typing import Any, Dict
import pytest
from ..base_types import Address, Hash, Wei
from ..base_types_json import to_json
from ..composite_types import AccessList
@pytest.mark.parametrize(
"a, b, equal",
[
(Address(0), Address(0), True),
(
Address("0x0000000000000000000000000000000000000000"),
Address("0x0000000000000000000000000000000000000000"),
True,
),
(
Address("0x0000000000000000000000000000000000000000"),
Address("0x0000000000000000000000000000000000000001"),
False,
),
(
Address("0x0000000000000000000000000000000000000001"),
Address("0x0000000000000000000000000000000000000000"),
False,
),
(
Address("0x0000000000000000000000000000000000000001"),
"0x0000000000000000000000000000000000000001",
True,
),
(
Address("0x0000000000000000000000000000000000000001"),
"0x0000000000000000000000000000000000000002",
False,
),
(Address("0x0000000000000000000000000000000000000001"), 1, True),
(Address("0x0000000000000000000000000000000000000001"), 2, False),
(
Address("0x0000000000000000000000000000000000000001"),
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
True,
),
(
Address("0x0000000000000000000000000000000000000001"),
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02",
False,
),
(
"0x0000000000000000000000000000000000000001",
Address("0x0000000000000000000000000000000000000001"),
True,
),
(
"0x0000000000000000000000000000000000000002",
Address("0x0000000000000000000000000000000000000001"),
False,
),
(1, Address("0x0000000000000000000000000000000000000001"), True),
(2, Address("0x0000000000000000000000000000000000000001"), False),
(
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
Address("0x0000000000000000000000000000000000000001"),
True,
),
(
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02",
Address("0x0000000000000000000000000000000000000001"),
False,
),
(
Hash("0x0000000000000000000000000000000000000000000000000000000000000000"),
Hash("0x0000000000000000000000000000000000000000000000000000000000000000"),
True,
),
(
Hash("0x0000000000000000000000000000000000000000000000000000000000000000"),
Hash("0x0000000000000000000000000000000000000000000000000000000000000001"),
False,
),
(
Hash("0x0000000000000000000000000000000000000000000000000000000000000001"),
Hash("0x0000000000000000000000000000000000000000000000000000000000000000"),
False,
),
(
Hash("0x0000000000000000000000000000000000000000000000000000000000000001"),
"0x0000000000000000000000000000000000000000000000000000000000000001",
True,
),
(
Hash("0x0000000000000000000000000000000000000000000000000000000000000001"),
"0x0000000000000000000000000000000000000000000000000000000000000002",
False,
),
(Hash("0x0000000000000000000000000000000000000000000000000000000000000001"), 1, True),
(Hash("0x0000000000000000000000000000000000000000000000000000000000000001"), 2, False),
(
Hash("0x0000000000000000000000000000000000000000000000000000000000000001"),
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
True,
),
(
Hash("0x0000000000000000000000000000000000000000000000000000000000000001"),
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02",
False,
),
(
"0x0000000000000000000000000000000000000000000000000000000000000001",
Hash("0x0000000000000000000000000000000000000000000000000000000000000001"),
True,
),
(
"0x0000000000000000000000000000000000000000000000000000000000000002",
Hash("0x0000000000000000000000000000000000000000000000000000000000000001"),
False,
),
(1, Hash("0x0000000000000000000000000000000000000000000000000000000000000001"), True),
(2, Hash("0x0000000000000000000000000000000000000000000000000000000000000001"), False),
],
)
def test_comparisons(a: Any, b: Any, equal: bool) -> None:
"""Test the comparison methods of the base types."""
if equal:
assert a == b
assert not a != b
else:
assert a != b
assert not a == b
def test_hash_padding() -> None:
"""Test Hash objects are padded correctly."""
assert Hash(b"\x01", left_padding=True) == (
"0x0000000000000000000000000000000000000000000000000000000000000001"
)
assert Hash(b"\x02", right_padding=True) == (
"0x0200000000000000000000000000000000000000000000000000000000000000"
)
def test_address_padding() -> None:
"""Test that addresses are padded correctly."""
assert Address(b"\x01", left_padding=True) == Address(
"0x0000000000000000000000000000000000000001"
)
assert Address(b"\x80", right_padding=True) == Address(
"0x8000000000000000000000000000000000000000"
)
@pytest.mark.parametrize(
"s, expected",
[
("0", 0),
("10**18", 10**18),
("1e18", 10**18),
("1 ether", 10**18),
("2 ether", 2 * 10**18),
("70000 ether", 70000 * 10**18),
("123456 ether", 123456 * 10**18),
("123456.789 ether", 123456.789 * 10**18),
("2.1 ether", 2.1 * 10**18),
("2.1 Ether", 2.1 * 10**18),
("2.1 ETHER", 2.1 * 10**18),
("1 wei", 1),
("10**9 wei", 10**9),
("1 gwei", 10**9),
("1 szabo", 10**12),
("1 finney", 10**15),
("1 kwei", 10**3),
("1 mwei", 10**6),
("1 babbage", 10**3),
("1 femtoether", 10**3),
("1 Lovelace", 10**6),
("1 Picoether", 10**6),
("1 gwei", 10**9),
("1 shannon", 10**9),
("1 nanoether", 10**9),
("1 nano", 10**9),
("1 microether", 10**12),
("1 micro", 10**12),
("1 milliether", 10**15),
("1 milli", 10**15),
],
)
def test_wei_parsing(s: str, expected: int) -> None:
"""Test the parsing of wei values."""
assert Wei(s) == expected
@pytest.mark.parametrize(
["can_be_deserialized", "model_instance", "json"],
[
pytest.param(
True,
AccessList(
address=0x1234,
storage_keys=[0, 1],
),
{
"address": "0x0000000000000000000000000000000000001234",
"storageKeys": [
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000001",
],
},
id="access_list",
),
],
)
class TestPydanticModelConversion:
"""Test that Pydantic models are converted to and from JSON correctly."""
def test_json_serialization(
self, can_be_deserialized: bool, model_instance: Any, json: str | Dict[str, Any]
) -> None:
"""Test that to_json returns the expected JSON for the given object."""
del can_be_deserialized
assert to_json(model_instance) == json
def test_json_deserialization(
self, can_be_deserialized: bool, model_instance: Any, json: str | Dict[str, Any]
) -> None:
"""Test that to_json returns the expected JSON for the given object."""
if not can_be_deserialized:
pytest.skip(reason="The model instance in this case can not be deserialized")
model_type = type(model_instance)
assert model_type(**json) == model_instance
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/tests/test_reference_spec.py | src/ethereum_test_base_types/tests/test_reference_spec.py | """Test suite for `ethereum_test_base_types.reference_spec` module."""
import re
from typing import Any
import pytest
import requests
from ..reference_spec.git_reference_spec import GitReferenceSpec
from ..reference_spec.reference_spec import NoLatestKnownVersionError
# the content field from
# https://api.github.com/repos/ethereum/EIPs/contents/EIPS/eip-100.md
# as of 2023-08-29
response_content = "LS0tCmVpcDogMTAwCnRpdGxlOiBDaGFuZ2UgZGlmZmljdWx0eSBhZGp1c3Rt\
ZW50IHRvIHRhcmdldCBtZWFuIGJsb2NrIHRpbWUgaW5jbHVkaW5nIHVuY2xl\
cwphdXRob3I6IFZpdGFsaWsgQnV0ZXJpbiAoQHZidXRlcmluKQp0eXBlOiBT\
dGFuZGFyZHMgVHJhY2sKY2F0ZWdvcnk6IENvcmUKc3RhdHVzOiBGaW5hbApj\
cmVhdGVkOiAyMDE2LTA0LTI4Ci0tLQoKIyMjIFNwZWNpZmljYXRpb24KCkN1\
cnJlbnRseSwgdGhlIGZvcm11bGEgdG8gY29tcHV0ZSB0aGUgZGlmZmljdWx0\
eSBvZiBhIGJsb2NrIGluY2x1ZGVzIHRoZSBmb2xsb3dpbmcgbG9naWM6Cgpg\
YGAgcHl0aG9uCmFkal9mYWN0b3IgPSBtYXgoMSAtICgodGltZXN0YW1wIC0g\
cGFyZW50LnRpbWVzdGFtcCkgLy8gMTApLCAtOTkpCmNoaWxkX2RpZmYgPSBp\
bnQobWF4KHBhcmVudC5kaWZmaWN1bHR5ICsgKHBhcmVudC5kaWZmaWN1bHR5\
IC8vIEJMT0NLX0RJRkZfRkFDVE9SKSAqIGFkal9mYWN0b3IsIG1pbihwYXJl\
bnQuZGlmZmljdWx0eSwgTUlOX0RJRkYpKSkKLi4uCmBgYAoKSWYgYGJsb2Nr\
Lm51bWJlciA+PSBCWVpBTlRJVU1fRk9SS19CTEtOVU1gLCB3ZSBjaGFuZ2Ug\
dGhlIGZpcnN0IGxpbmUgdG8gdGhlIGZvbGxvd2luZzoKCmBgYCBweXRob24K\
YWRqX2ZhY3RvciA9IG1heCgoMiBpZiBsZW4ocGFyZW50LnVuY2xlcykgZWxz\
ZSAxKSAtICgodGltZXN0YW1wIC0gcGFyZW50LnRpbWVzdGFtcCkgLy8gOSks\
IC05OSkKYGBgCiMjIyBSYXRpb25hbGUKClRoaXMgbmV3IGZvcm11bGEgZW5z\
dXJlcyB0aGF0IHRoZSBkaWZmaWN1bHR5IGFkanVzdG1lbnQgYWxnb3JpdGht\
IHRhcmdldHMgYSBjb25zdGFudCBhdmVyYWdlIHJhdGUgb2YgYmxvY2tzIHBy\
b2R1Y2VkIGluY2x1ZGluZyB1bmNsZXMsIGFuZCBzbyBlbnN1cmVzIGEgaGln\
aGx5IHByZWRpY3RhYmxlIGlzc3VhbmNlIHJhdGUgdGhhdCBjYW5ub3QgYmUg\
bWFuaXB1bGF0ZWQgdXB3YXJkIGJ5IG1hbmlwdWxhdGluZyB0aGUgdW5jbGUg\
cmF0ZS4gQSBmb3JtdWxhIHRoYXQgYWNjb3VudHMgZm9yIHRoZSBleGFjdCBu\
dW1iZXIgb2YgaW5jbHVkZWQgdW5jbGVzOgpgYGAgcHl0aG9uCmFkal9mYWN0\
b3IgPSBtYXgoMSArIGxlbihwYXJlbnQudW5jbGVzKSAtICgodGltZXN0YW1w\
IC0gcGFyZW50LnRpbWVzdGFtcCkgLy8gOSksIC05OSkKYGBgCmNhbiBiZSBm\
YWlybHkgZWFzaWx5IHNlZW4gdG8gYmUgKHRvIHdpdGhpbiBhIHRvbGVyYW5j\
ZSBvZiB+My80MTk0MzA0KSBtYXRoZW1hdGljYWxseSBlcXVpdmFsZW50IHRv\
IGFzc3VtaW5nIHRoYXQgYSBibG9jayB3aXRoIGBrYCB1bmNsZXMgaXMgZXF1\
aXZhbGVudCB0byBhIHNlcXVlbmNlIG9mIGBrKzFgIGJsb2NrcyB0aGF0IGFs\
bCBhcHBlYXIgd2l0aCB0aGUgZXhhY3Qgc2FtZSB0aW1lc3RhbXAsIGFuZCB0\
aGlzIGlzIGxpa2VseSB0aGUgc2ltcGxlc3QgcG9zc2libGUgd2F5IHRvIGFj\
Y29tcGxpc2ggdGhlIGRlc2lyZWQgZWZmZWN0LiBCdXQgc2luY2UgdGhlIGV4\
YWN0IGZvcm11bGEgZGVwZW5kcyBvbiB0aGUgZnVsbCBibG9jayBhbmQgbm90\
IGp1c3QgdGhlIGhlYWRlciwgd2UgYXJlIGluc3RlYWQgdXNpbmcgYW4gYXBw\
cm94aW1hdGUgZm9ybXVsYSB0aGF0IGFjY29tcGxpc2hlcyBhbG1vc3QgdGhl\
IHNhbWUgZWZmZWN0IGJ1dCBoYXMgdGhlIGJlbmVmaXQgdGhhdCBpdCBkZXBl\
bmRzIG9ubHkgb24gdGhlIGJsb2NrIGhlYWRlciAoYXMgeW91IGNhbiBjaGVj\
ayB0aGUgdW5jbGUgaGFzaCBhZ2FpbnN0IHRoZSBibGFuayBoYXNoKS4KCkNo\
YW5naW5nIHRoZSBkZW5vbWluYXRvciBmcm9tIDEwIHRvIDkgZW5zdXJlcyB0\
aGF0IHRoZSBibG9jayB0aW1lIHJlbWFpbnMgcm91Z2hseSB0aGUgc2FtZSAo\
aW4gZmFjdCwgaXQgc2hvdWxkIGRlY3JlYXNlIGJ5IH4zJSBnaXZlbiB0aGUg\
Y3VycmVudCB1bmNsZSByYXRlIG9mIDclKS4KCiMjIyBSZWZlcmVuY2VzCgox\
LiBFSVAgMTAwIGlzc3VlIGFuZCBkaXNjdXNzaW9uOiBodHRwczovL2dpdGh1\
Yi5jb20vZXRoZXJldW0vRUlQcy9pc3N1ZXMvMTAwCjIuIGh0dHBzOi8vYml0\
c2xvZy53b3JkcHJlc3MuY29tLzIwMTYvMDQvMjgvdW5jbGUtbWluaW5nLWFu\
LWV0aGVyZXVtLWNvbnNlbnN1cy1wcm90b2NvbC1mbGF3Lwo="
def test_git_reference_spec(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test Git reference spec."""
def mock_get(self: Any, headers: Any | None = None) -> object:
del self, headers
class Response:
content = (
'{"content": "'
+ response_content
+ '", "sha":"78b94002190eb71cb04b8757629397f9418e8cce"}'
)
status_code = 200
return Response()
monkeypatch.setattr(requests, "get", mock_get)
ref_spec = GitReferenceSpec(
SpecPath="EIPS/eip-100.md",
)
latest_spec = ref_spec._get_latest_spec()
assert latest_spec is not None
assert "sha" in latest_spec
assert re.match(r"^[0-9a-f]{40}$", latest_spec["sha"])
with pytest.raises(NoLatestKnownVersionError):
# `is_outdated` method raises here because known version is unset
ref_spec.is_outdated()
ref_spec.SpecVersion = "0000000000000000000000000000000000000000"
assert ref_spec.is_outdated()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/tests/__init__.py | src/ethereum_test_base_types/tests/__init__.py | """Tests for the ethereum_test_base_types package."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/reference_spec/reference_spec.py | src/ethereum_test_base_types/reference_spec/reference_spec.py | """
Types used to describe a reference specification and versioning used to write
Ethereum tests.
"""
from abc import abstractmethod
from typing import Any, Dict, Optional
# Exceptions
class NoLatestKnownVersionError(Exception):
"""
Exception used to signal that the reference specification does not have a
latest known version.
"""
pass
class ParseModuleError(Exception):
"""
Exception used to signal that module's reference spec could not be parsed
using the given class.
"""
pass
class ReferenceSpec:
"""Reference Specification Description Abstract Class."""
@abstractmethod
def name(self) -> str:
"""Return the name of the spec."""
pass
@abstractmethod
def has_known_version(self) -> bool:
"""
Return true if the reference spec object is hard-coded with a latest
known version.
"""
pass
@abstractmethod
def known_version(self) -> str:
"""Return the latest known version in the reference."""
pass
@abstractmethod
def api_url(self) -> str:
"""
Return the URL required to poll the version from an API, if needed.
"""
pass
@abstractmethod
def latest_version(self) -> str:
"""Return a digest that points to the latest version of the spec."""
pass
@abstractmethod
def is_outdated(self) -> bool:
"""
Check whether the reference specification has been updated since the
test was last updated.
"""
pass
@abstractmethod
def write_info(self, info: Dict[str, Dict[str, Any] | str]) -> None:
"""
Write info about the reference specification used into the output
fixture.
"""
pass
@staticmethod
@abstractmethod
def parseable_from_module(module_dict: Dict[str, Any]) -> bool:
"""
Check whether the module's dict contains required reference spec
information.
"""
pass
@staticmethod
@abstractmethod
def parse_from_module(
module_dict: Dict[str, Any], github_token: Optional[str] = None
) -> "ReferenceSpec":
"""
Parse the module's dict into a reference spec.
Args:
module_dict: Dictionary containing module information
github_token: Optional GitHub token for API authentication
"""
pass
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/reference_spec/git_reference_spec.py | src/ethereum_test_base_types/reference_spec/git_reference_spec.py | """Reference Specification file located in a github repository."""
import base64
import json
import os
import warnings
from dataclasses import dataclass
from typing import Any, Dict, Optional
import requests
from .reference_spec import NoLatestKnownVersionError, ParseModuleError, ReferenceSpec
def _decode_base64_content(encoded_data: str) -> str:
return base64.b64decode(encoded_data).decode("utf-8")
@dataclass(kw_only=True)
class GitReferenceSpec(ReferenceSpec):
"""Git Reference Specification Description Class."""
SpecPath: str
RepositoryOwner: str = "ethereum"
RepositoryName: str = "EIPs"
BranchName: str = "master"
SpecVersion: str = ""
_latest_spec: Dict | None = None
_github_token: Optional[str] = None
def name(self) -> str:
"""Return the name of the spec."""
return (
f"https://github.com/{self.RepositoryOwner}/"
+ f"{self.RepositoryName}/blob/{self.BranchName}/{self.SpecPath}"
)
def known_version(self) -> str:
"""Return the latest known version in the reference."""
return self.SpecVersion
def api_url(self) -> str:
"""URL used to retrieve the version via the Github API."""
return (
f"https://api.github.com/repos/{self.RepositoryOwner}/"
f"{self.RepositoryName}/contents/{self.SpecPath}"
)
def _get_request_headers(self) -> Dict[str, str]:
"""Get headers for GitHub API request, including token if available."""
headers = {}
token = self._github_token or os.environ.get("GITHUB_TOKEN")
if token:
headers["Authorization"] = f"token {token}"
return headers
def _get_latest_known_spec(self) -> Dict | None:
headers = self._get_request_headers()
response = requests.get(self.api_url(), headers=headers)
if response.status_code != 200:
return None
content = json.loads(response.content)
content["content"] = _decode_base64_content(content["content"])
return content
def _get_latest_spec(self) -> Dict | None:
if self._latest_spec is not None:
return self._latest_spec
headers = self._get_request_headers()
response = requests.get(self.api_url(), headers=headers)
if response.status_code != 200:
warnings.warn(
f"Unable to get latest version, status code: {response.status_code} - "
f"text: {response.text}",
stacklevel=2,
)
return None
content = json.loads(response.content)
content["content"] = _decode_base64_content(content["content"])
self._latest_spec = content
return content
def is_outdated(self) -> bool:
"""
Check whether the reference specification has been updated since the
test was last updated, by comparing the latest known `sha` value of
the file in the repository.
"""
if self.SpecVersion == "":
raise NoLatestKnownVersionError
# Fetch the latest spec
latest = self._get_latest_spec()
if latest is None:
raise Exception("unable to get latest version")
return latest["sha"].strip() != self.SpecVersion.strip()
def latest_version(self) -> str:
"""Return the sha digest of the latest version of the spec."""
latest = self._get_latest_spec()
if latest is None or "sha" not in latest:
return ""
return latest["sha"]
def has_known_version(self) -> bool:
"""
Return true if the reference spec object is hard-coded with a latest
known version.
"""
return self.SpecVersion != ""
def write_info(self, info: Dict[str, Dict[str, Any] | str]) -> None:
"""
Write info about the reference specification used into the output
fixture.
"""
info["reference-spec"] = self.name()
info["reference-spec-version"] = self.SpecVersion
@staticmethod
def parseable_from_module(module_dict: Dict[str, Any]) -> bool:
"""Check whether the module contains a git reference spec."""
return "REFERENCE_SPEC_GIT_PATH" in module_dict
@staticmethod
def parse_from_module(
module_dict: Dict[str, Any], github_token: Optional[str] = None
) -> "ReferenceSpec":
"""
Parse the module's dict into a reference spec.
Args:
module_dict: Dictionary containing module information
github_token: Optional GitHub token for API authentication
"""
if "REFERENCE_SPEC_GIT_PATH" not in module_dict:
raise ParseModuleError
spec_path = module_dict["REFERENCE_SPEC_GIT_PATH"]
spec = GitReferenceSpec(SpecPath=spec_path, _github_token=github_token)
if "REFERENCE_SPEC_VERSION" in module_dict:
spec.SpecVersion = module_dict["REFERENCE_SPEC_VERSION"]
return spec
_ = GitReferenceSpec(SpecPath="")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_base_types/reference_spec/__init__.py | src/ethereum_test_base_types/reference_spec/__init__.py | """Reference Spec Classes."""
from typing import Sequence, Type
from .git_reference_spec import GitReferenceSpec
from .reference_spec import ReferenceSpec
ReferenceSpecTypes: Sequence[Type[ReferenceSpec]] = [
GitReferenceSpec,
]
__all__ = ("ReferenceSpec", "ReferenceSpecTypes")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/base_static.py | src/ethereum_test_specs/base_static.py | """
Base class to parse test cases written in static formats.
"""
import re
from abc import abstractmethod
from typing import Any, Callable, ClassVar, Dict, List, Tuple, Type, Union
from pydantic import (
BaseModel,
TypeAdapter,
ValidatorFunctionWrapHandler,
model_validator,
)
from ethereum_test_base_types import Bytes
class BaseStaticTest(BaseModel):
"""Represents a base class that reads cases from static files."""
formats: ClassVar[List[Type["BaseStaticTest"]]] = []
formats_type_adapter: ClassVar[TypeAdapter]
format_name: ClassVar[str] = ""
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
"""
Register all subclasses of BaseStaticTest with a static test format
name set as possible static test format.
"""
if cls.format_name:
# Register the new fixture format
BaseStaticTest.formats.append(cls)
if len(BaseStaticTest.formats) > 1:
BaseStaticTest.formats_type_adapter = TypeAdapter(
Union[tuple(BaseStaticTest.formats)],
)
else:
BaseStaticTest.formats_type_adapter = TypeAdapter(cls)
@model_validator(mode="wrap")
@classmethod
def _parse_into_subclass(
cls, v: Any, handler: ValidatorFunctionWrapHandler
) -> "BaseStaticTest":
"""Parse the static test into the correct subclass."""
if cls is BaseStaticTest:
return BaseStaticTest.formats_type_adapter.validate_python(v)
return handler(v)
@abstractmethod
def fill_function(self) -> Callable:
"""
Return the test function that can be used to fill the test.
This method should be implemented by the subclasses.
The function returned can be optionally decorated with the
`@pytest.mark.parametrize` decorator to parametrize the test with the
number of sub test cases.
Example:
```
@pytest.mark.parametrize("n", [1])
@pytest.mark.parametrize("m", [1, 2])
@pytest.mark.valid_from("Homestead")
def test_state_filler(
state_test: StateTestFiller,
fork: Fork,
pre: Alloc,
n: int,
m: int
):
\"\"\"Generate a test from a static state filler.\"\"\"
assert n == 1
assert m in [1, 2]
env = Environment(**self.env.model_dump())
sender = pre.fund_eoa()
tx = Transaction(
ty=0x0,
nonce=0,
to=Address(0x1000),
gas_limit=500000,
protected=False if fork in [Frontier, Homestead] else True,
data="",
sender=sender,
)
state_test(env=env, pre=pre, post={}, tx=tx)
```
To aid the generation of the test, the function can be defined and then
the decorator be applied after defining the function:
```
def test_state_filler(
state_test: StateTestFiller,
fork: Fork,
pre: Alloc,
n: int,
m: int,
):
...
test_state_filler = pytest.mark.parametrize("n",
[1])(test_state_filler
)
test_state_filler = pytest.mark.parametrize("m",
[1, 2])(test_state_filler
)
if self.valid_from:
test_state_filler = pytest.mark.valid_from(
self.valid_from
)(test_state_filler)
if self.valid_until:
test_state_filler = pytest.mark.valid_until(
self.valid_until
)(test_state_filler)
return test_state_filler
```
The function can contain the following parameters on top of the spec
type parameter (`state_test` in the example above): - `fork`: The fork
for which the test is currently being filled. - `pre`: The pre-state of
the test.
"""
raise NotImplementedError
@staticmethod
def remove_comments(data: Dict) -> Dict:
"""Remove comments from a dictionary."""
result = {}
for k, v in data.items():
if isinstance(k, str) and k.startswith("//"):
continue
if isinstance(v, dict):
v = BaseStaticTest.remove_comments(v)
elif isinstance(v, list):
v = [BaseStaticTest.remove_comments(i) if isinstance(i, dict) else i for i in v]
result[k] = v
return result
@model_validator(mode="before")
@classmethod
def remove_comments_from_model(cls, data: Any) -> Any:
"""Remove comments from the static file loaded, if any."""
if isinstance(data, dict):
return BaseStaticTest.remove_comments(data)
return data
def remove_comments(v: str) -> str:
"""
Split by line and then remove the comments (starting with #) at the end of
each line if any.
"""
return "\n".join([line.split("#")[0].strip() for line in v.splitlines()])
label_matcher = re.compile(r"^:label\s+(\S+)\s*", re.MULTILINE)
raw_matcher = re.compile(r":raw\s+(.*)", re.MULTILINE)
def labeled_bytes_from_string(v: str) -> Tuple[str | None, Bytes]:
"""Parse `:label` and `:raw` from a string."""
v = remove_comments(v)
label: str | None = None
if m := label_matcher.search(v):
label = m.group(1)
v = label_matcher.sub("", v)
m = raw_matcher.match(v.replace("\n", " "))
if not m:
raise Exception(f"Unable to parse container from string: {v}")
strip_string = m.group(1).strip()
return label, Bytes(strip_string)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/blockchain.py | src/ethereum_test_specs/blockchain.py | """Ethereum blockchain test spec definition and filler."""
from pprint import pprint
from typing import Any, Callable, ClassVar, Dict, Generator, List, Sequence, Tuple, Type
import pytest
from pydantic import ConfigDict, Field, field_validator, model_serializer
from ethereum_clis import BlockExceptionWithMessage, Result, TransitionTool
from ethereum_test_base_types import (
Address,
Bloom,
Bytes,
CamelModel,
Hash,
HeaderNonce,
HexNumber,
Number,
)
from ethereum_test_exceptions import (
BlockException,
EngineAPIError,
ExceptionWithMessage,
TransactionException,
UndefinedException,
)
from ethereum_test_execution import (
BaseExecute,
ExecuteFormat,
LabeledExecuteFormat,
TransactionPost,
)
from ethereum_test_fixtures import (
BaseFixture,
BlockchainEngineFixture,
BlockchainEngineSyncFixture,
BlockchainEngineXFixture,
BlockchainFixture,
FixtureFormat,
LabeledFixtureFormat,
)
from ethereum_test_fixtures.blockchain import (
FixtureBlock,
FixtureBlockBase,
FixtureConfig,
FixtureEngineNewPayload,
FixtureHeader,
FixtureTransaction,
FixtureWithdrawal,
InvalidFixtureBlock,
)
from ethereum_test_fixtures.common import FixtureBlobSchedule
from ethereum_test_forks import Fork
from ethereum_test_types import Alloc, Environment, Removable, Requests, Transaction, Withdrawal
from ethereum_test_types.block_access_list import BlockAccessList, BlockAccessListExpectation
from .base import BaseTest, OpMode, verify_result
from .debugging import print_traces
from .helpers import verify_block, verify_transactions
def environment_from_parent_header(parent: "FixtureHeader") -> "Environment":
"""Instantiate new environment with the provided header as parent."""
return Environment(
parent_difficulty=parent.difficulty,
parent_timestamp=parent.timestamp,
parent_base_fee_per_gas=parent.base_fee_per_gas,
parent_blob_gas_used=parent.blob_gas_used,
parent_excess_blob_gas=parent.excess_blob_gas,
parent_gas_used=parent.gas_used,
parent_gas_limit=parent.gas_limit,
parent_ommers_hash=parent.ommers_hash,
block_hashes={parent.number: parent.block_hash},
)
def apply_new_parent(env: Environment, new_parent: FixtureHeader) -> "Environment":
"""Apply header as parent to a copy of this environment."""
updated: Dict[str, Any] = {}
updated["parent_difficulty"] = new_parent.difficulty
updated["parent_timestamp"] = new_parent.timestamp
updated["parent_base_fee_per_gas"] = new_parent.base_fee_per_gas
updated["parent_blob_gas_used"] = new_parent.blob_gas_used
updated["parent_excess_blob_gas"] = new_parent.excess_blob_gas
updated["parent_gas_used"] = new_parent.gas_used
updated["parent_gas_limit"] = new_parent.gas_limit
updated["parent_ommers_hash"] = new_parent.ommers_hash
block_hashes = env.block_hashes.copy()
block_hashes[new_parent.number] = new_parent.block_hash
updated["block_hashes"] = block_hashes
return env.copy(**updated)
def count_blobs(txs: List[Transaction]) -> int:
"""Return number of blobs in a list of transactions."""
return sum(
[len(tx.blob_versioned_hashes) for tx in txs if tx.blob_versioned_hashes is not None]
)
class Header(CamelModel):
"""Header type used to describe block header properties in test specs."""
parent_hash: Hash | None = None
ommers_hash: Hash | None = None
fee_recipient: Address | None = None
state_root: Hash | None = None
transactions_trie: Hash | None = None
receipts_root: Hash | None = None
logs_bloom: Bloom | None = None
difficulty: HexNumber | None = None
number: HexNumber | None = None
gas_limit: HexNumber | None = None
gas_used: HexNumber | None = None
timestamp: HexNumber | None = None
extra_data: Bytes | None = None
prev_randao: Hash | None = None
nonce: HeaderNonce | None = None
base_fee_per_gas: Removable | HexNumber | None = None
withdrawals_root: Removable | Hash | None = None
blob_gas_used: Removable | HexNumber | None = None
excess_blob_gas: Removable | HexNumber | None = None
parent_beacon_block_root: Removable | Hash | None = None
requests_hash: Removable | Hash | None = None
bal_hash: Removable | Hash | None = None
REMOVE_FIELD: ClassVar[Removable] = Removable()
"""
Sentinel object used to specify that a header field should be removed.
"""
EMPTY_FIELD: ClassVar[Removable] = Removable()
"""
Sentinel object used to specify that a header field must be empty during
verification.
This can be used in a test to explicitly skip a field in a block's RLP
encoding. included in the (json) output when the model is serialized. For
example: ``` header_modifier = Header( excess_blob_gas=Header.REMOVE_FIELD,
) block = Block( timestamp=TIMESTAMP, rlp_modifier=header_modifier,
exception=BlockException.INCORRECT_BLOCK_FORMAT,
engine_api_error_code=EngineAPIError.InvalidParams, ) ```
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
@model_serializer(mode="wrap", when_used="json")
def _serialize_model(self, serializer: Any, info: Any) -> Dict[str, Any]:
"""Exclude Removable fields from serialization."""
del info
data = serializer(self)
return {k: v for k, v in data.items() if not isinstance(v, Removable)}
@field_validator("withdrawals_root", mode="before")
@classmethod
def validate_withdrawals_root(cls, value: Any) -> Any:
"""Convert a list of withdrawals into the withdrawals root hash."""
if isinstance(value, list):
return Withdrawal.list_root(value)
return value
def apply(self, target: FixtureHeader) -> FixtureHeader:
"""
Produce a fixture header copy with the set values from the modifier.
"""
return target.copy(
**{
k: (v if v is not Header.REMOVE_FIELD else None)
for k, v in self.model_dump(exclude_none=True).items()
}
)
def verify(self, target: FixtureHeader) -> None:
"""Verify that the header fields from self are as expected."""
for field_name in self.__class__.model_fields:
baseline_value = getattr(self, field_name)
if baseline_value is not None:
assert baseline_value is not Header.REMOVE_FIELD, "invalid header"
value = getattr(target, field_name)
if baseline_value is Header.EMPTY_FIELD:
assert value is None, (
f"invalid header field {field_name}, got {value}, want None"
)
continue
assert value == baseline_value, (
f"invalid header field ({field_name}) value, "
+ f"got {value}, want {baseline_value}"
)
BLOCK_EXCEPTION_TYPE = (
List[TransactionException | BlockException] | TransactionException | BlockException | None
)
class Block(Header):
"""Block type used to describe block properties in test specs."""
header_verify: Header | None = None
# If set, the block header will be verified against the specified values.
rlp_modifier: Header | None = None
"""
An RLP modifying header which values would be used to override the ones
returned by the `ethereum_clis.TransitionTool`.
"""
expected_block_access_list: BlockAccessListExpectation | None = None
"""
If set, the block access list will be verified and potentially corrupted
for invalid tests.
"""
exception: BLOCK_EXCEPTION_TYPE = None
# If set, the block is expected to be rejected by the client.
skip_exception_verification: bool = False
"""
Skip verifying that the exception is returned by the transition tool. This
could be because the exception is inserted in the block after the
transition tool evaluates it.
"""
engine_api_error_code: EngineAPIError | None = None
"""
If set, the block is expected to produce an error response from the Engine
API.
"""
txs: List[Transaction] = Field(default_factory=list)
"""List of transactions included in the block."""
ommers: List[Header] | None = None
"""List of ommer headers included in the block."""
withdrawals: List[Withdrawal] | None = None
"""List of withdrawals to perform for this block."""
requests: List[Bytes] | None = None
"""Custom list of requests to embed in this block."""
expected_post_state: Alloc | None = None
"""Post state for verification after block execution in BlockchainTest"""
block_access_list: Bytes | None = Field(None)
"""EIP-7928: Block-level access lists (serialized)."""
def set_environment(self, env: Environment) -> Environment:
"""
Create copy of the environment with the characteristics of this
specific block.
"""
new_env_values: Dict[str, Any] = {}
"""
Values that need to be set in the environment and are `None` for this
block need to be set to their defaults.
"""
new_env_values["difficulty"] = self.difficulty
new_env_values["prev_randao"] = self.prev_randao
new_env_values["fee_recipient"] = (
self.fee_recipient if self.fee_recipient is not None else Environment().fee_recipient
)
new_env_values["gas_limit"] = (
self.gas_limit or env.parent_gas_limit or Environment().gas_limit
)
if not isinstance(self.base_fee_per_gas, Removable):
new_env_values["base_fee_per_gas"] = self.base_fee_per_gas
new_env_values["withdrawals"] = self.withdrawals
if not isinstance(self.excess_blob_gas, Removable):
new_env_values["excess_blob_gas"] = self.excess_blob_gas
if not isinstance(self.blob_gas_used, Removable):
new_env_values["blob_gas_used"] = self.blob_gas_used
if not isinstance(self.parent_beacon_block_root, Removable):
new_env_values["parent_beacon_block_root"] = self.parent_beacon_block_root
if not isinstance(self.requests_hash, Removable) and self.block_access_list is not None:
new_env_values["bal_hash"] = self.block_access_list.keccak256()
new_env_values["block_access_list"] = self.block_access_list
if (
not isinstance(self.block_access_list, Removable)
and self.block_access_list is not None
):
new_env_values["block_access_list"] = self.block_access_list
"""
These values are required, but they depend on the previous environment,
so they can be calculated here.
"""
if self.number is not None:
new_env_values["number"] = self.number
else:
# calculate the next block number for the environment
if len(env.block_hashes) == 0:
new_env_values["number"] = 0
else:
new_env_values["number"] = max([Number(n) for n in env.block_hashes.keys()]) + 1
if self.timestamp is not None:
new_env_values["timestamp"] = self.timestamp
else:
assert env.parent_timestamp is not None
new_env_values["timestamp"] = int(Number(env.parent_timestamp) + 12)
return env.copy(**new_env_values)
class BuiltBlock(CamelModel):
"""Model that contains all properties to build a full block or payload."""
header: FixtureHeader
env: Environment
alloc: Alloc
txs: List[Transaction]
ommers: List[FixtureHeader]
withdrawals: List[Withdrawal] | None
requests: List[Bytes] | None
result: Result
expected_exception: BLOCK_EXCEPTION_TYPE = None
engine_api_error_code: EngineAPIError | None = None
fork: Fork
block_access_list: BlockAccessList | None
def get_fixture_block(self) -> FixtureBlock | InvalidFixtureBlock:
"""Get a FixtureBlockBase from the built block."""
fixture_block = FixtureBlockBase(
header=self.header,
txs=[FixtureTransaction.from_transaction(tx) for tx in self.txs],
withdrawals=(
[FixtureWithdrawal.from_withdrawal(w) for w in self.withdrawals]
if self.withdrawals is not None
else None
),
block_access_list=self.block_access_list if self.block_access_list else None,
fork=self.fork,
).with_rlp(txs=self.txs)
if self.expected_exception is not None:
return InvalidFixtureBlock(
rlp=fixture_block.rlp,
expect_exception=self.expected_exception,
rlp_decoded=(
None
if BlockException.RLP_STRUCTURES_ENCODING in self.expected_exception
else fixture_block.without_rlp()
),
)
return fixture_block
def get_block_rlp(self) -> Bytes:
"""Get the RLP of the block."""
return self.get_fixture_block().rlp
def get_fixture_engine_new_payload(self) -> FixtureEngineNewPayload:
"""Get a FixtureEngineNewPayload from the built block."""
return FixtureEngineNewPayload.from_fixture_header(
fork=self.fork,
header=self.header,
transactions=self.txs,
withdrawals=self.withdrawals,
requests=self.requests,
block_access_list=self.block_access_list.rlp if self.block_access_list else None,
validation_error=self.expected_exception,
error_code=self.engine_api_error_code,
)
def verify_transactions(self, transition_tool_exceptions_reliable: bool) -> List[int]:
"""Verify the transactions."""
return verify_transactions(
txs=self.txs,
result=self.result,
transition_tool_exceptions_reliable=transition_tool_exceptions_reliable,
)
def verify_block_exception(self, transition_tool_exceptions_reliable: bool) -> None:
"""Verify the block exception."""
got_exception: ExceptionWithMessage | UndefinedException | None = (
self.result.block_exception
)
# Verify exceptions that are not caught by the transition tool.
fork_block_rlp_size_limit = self.fork.block_rlp_size_limit(
block_number=self.env.number,
timestamp=self.env.timestamp,
)
if fork_block_rlp_size_limit is not None:
rlp_size = len(self.get_block_rlp())
if rlp_size > fork_block_rlp_size_limit:
got_exception = BlockExceptionWithMessage(
exceptions=[BlockException.RLP_BLOCK_LIMIT_EXCEEDED],
message=f"Block RLP size limit exceeded: {rlp_size} > "
f"{fork_block_rlp_size_limit}",
)
verify_block(
block_number=self.env.number,
want_exception=self.expected_exception,
got_exception=got_exception,
transition_tool_exceptions_reliable=transition_tool_exceptions_reliable,
)
GENESIS_ENVIRONMENT_DEFAULTS: Dict[str, Any] = {
"fee_recipient": 0,
"number": 0,
"timestamp": 0,
"extra_data": b"\x00",
"prev_randao": 0,
}
"""
Default values for the genesis environment that are used to create all genesis
headers.
"""
class BlockchainTest(BaseTest):
"""Filler type that tests multiple blocks (valid or invalid) in a chain."""
pre: Alloc
post: Alloc
blocks: List[Block]
genesis_environment: Environment = Field(default_factory=Environment)
chain_id: int = 1
exclude_full_post_state_in_output: bool = False
"""
Exclude the post state from the fixture output. In this case, the state
verification is only performed based on the state root.
"""
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
BlockchainFixture,
BlockchainEngineFixture,
BlockchainEngineXFixture,
BlockchainEngineSyncFixture,
]
supported_execute_formats: ClassVar[Sequence[LabeledExecuteFormat]] = [
LabeledExecuteFormat(
TransactionPost,
"blockchain_test",
"An execute test derived from a blockchain test",
),
]
supported_markers: ClassVar[Dict[str, str]] = {
"blockchain_test_engine_only": "Only generate a blockchain test engine fixture",
"blockchain_test_only": "Only generate a blockchain test fixture",
}
@classmethod
def discard_fixture_format_by_marks(
cls,
fixture_format: FixtureFormat,
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
"""
Discard a fixture format from filling if the appropriate marker is
used.
"""
del fork
marker_names = [m.name for m in markers]
if fixture_format != BlockchainFixture and "blockchain_test_only" in marker_names:
return True
if (
fixture_format not in [BlockchainEngineFixture, BlockchainEngineXFixture]
and "blockchain_test_engine_only" in marker_names
):
return True
return False
def get_genesis_environment(self, fork: Fork) -> Environment:
"""Get the genesis environment for pre-allocation groups."""
modified_values = self.genesis_environment.set_fork_requirements(fork).model_dump(
exclude_unset=True
)
return Environment(**(GENESIS_ENVIRONMENT_DEFAULTS | modified_values))
def make_genesis(
self, *, fork: Fork, apply_pre_allocation_blockchain: bool
) -> Tuple[Alloc, FixtureBlock]:
"""Create a genesis block from the blockchain test definition."""
env = self.get_genesis_environment(fork)
assert env.withdrawals is None or len(env.withdrawals) == 0, (
"withdrawals must be empty at genesis"
)
assert env.parent_beacon_block_root is None or env.parent_beacon_block_root == Hash(0), (
"parent_beacon_block_root must be empty at genesis"
)
pre_alloc = self.pre
if apply_pre_allocation_blockchain:
pre_alloc = Alloc.merge(
Alloc.model_validate(fork.pre_allocation_blockchain()),
pre_alloc,
)
if empty_accounts := pre_alloc.empty_accounts():
raise Exception(f"Empty accounts in pre state: {empty_accounts}")
state_root = pre_alloc.state_root()
genesis = FixtureHeader.genesis(fork, env, state_root)
return (
pre_alloc,
FixtureBlockBase(
header=genesis,
withdrawals=None if env.withdrawals is None else [],
).with_rlp(txs=[]),
)
def generate_block_data(
self,
t8n: TransitionTool,
fork: Fork,
block: Block,
previous_env: Environment,
previous_alloc: Alloc,
last_block: bool,
) -> BuiltBlock:
"""
Generate common block data for both make_fixture and make_hive_fixture.
"""
env = block.set_environment(previous_env)
env = env.set_fork_requirements(fork)
txs = [tx.with_signature_and_sender() for tx in block.txs]
if failing_tx_count := len([tx for tx in txs if tx.error]) > 0:
if failing_tx_count > 1:
raise Exception(
"test correctness: only one transaction can produce an exception in a block"
)
if not txs[-1].error:
raise Exception(
"test correctness: the transaction that produces an exception "
+ "must be the last transaction in the block"
)
transition_tool_output = t8n.evaluate(
transition_tool_data=TransitionTool.TransitionToolData(
alloc=previous_alloc,
txs=txs,
env=env,
fork=fork,
chain_id=self.chain_id,
reward=fork.get_reward(block_number=env.number, timestamp=env.timestamp),
blob_schedule=fork.blob_schedule(),
),
debug_output_path=self.get_next_transition_tool_output_path(),
slow_request=self.is_tx_gas_heavy_test(),
)
if transition_tool_output.result.opcode_count is not None:
if self._opcode_count is None:
self._opcode_count = transition_tool_output.result.opcode_count
else:
self._opcode_count += transition_tool_output.result.opcode_count
# One special case of the invalid transactions is the blob gas used,
# since this value is not included in the transition tool result, but
# it is included in the block header, and some clients check it before
# executing the block by simply counting the type-3 txs, we need to set
# the correct value by default.
blob_gas_used: int | None = None
if (
blob_gas_per_blob := fork.blob_gas_per_blob(
block_number=env.number, timestamp=env.timestamp
)
) > 0:
blob_gas_used = blob_gas_per_blob * count_blobs(txs)
header = FixtureHeader(
**(
transition_tool_output.result.model_dump(
exclude_none=True, exclude={"blob_gas_used", "transactions_trie"}
)
| env.model_dump(exclude_none=True, exclude={"blob_gas_used"})
),
blob_gas_used=blob_gas_used,
transactions_trie=Transaction.list_root(txs),
extra_data=block.extra_data if block.extra_data is not None else b"",
fork=fork,
)
if block.header_verify is not None:
# Verify the header after transition tool processing.
try:
block.header_verify.verify(header)
except Exception as e:
raise Exception(f"Verification of block {int(env.number)} failed") from e
if last_block and self._operation_mode == OpMode.BENCHMARKING:
expected_benchmark_gas_used = self.expected_benchmark_gas_used
assert expected_benchmark_gas_used is not None, (
"expected_benchmark_gas_used is not set"
)
gas_used = int(transition_tool_output.result.gas_used)
if not self.skip_gas_used_validation:
assert gas_used == expected_benchmark_gas_used, (
f"gas_used ({gas_used}) does not match expected_benchmark_gas_used "
f"({expected_benchmark_gas_used})"
f", difference: {gas_used - expected_benchmark_gas_used}"
)
requests_list: List[Bytes] | None = None
if fork.header_requests_required(block_number=header.number, timestamp=header.timestamp):
assert transition_tool_output.result.requests is not None, (
"Requests are required for this block"
)
requests = Requests(requests_lists=list(transition_tool_output.result.requests))
if Hash(requests) != header.requests_hash:
raise Exception(
"Requests root in header does not match the requests root in the transition "
"tool output: "
f"{header.requests_hash} != {Hash(requests)}"
)
requests_list = requests.requests_list
if block.requests is not None:
header.requests_hash = Hash(Requests(requests_lists=list(block.requests)))
requests_list = block.requests
if fork.header_bal_hash_required(block_number=header.number, timestamp=header.timestamp):
assert transition_tool_output.result.block_access_list is not None, (
"Block access list is required for this block but was not provided "
"by the transition tool"
)
rlp = transition_tool_output.result.block_access_list.rlp
computed_bal_hash = Hash(rlp.keccak256())
assert computed_bal_hash == header.block_access_list_hash, (
"Block access list hash in header does not match the "
f"computed hash from BAL: {header.block_access_list_hash} "
f"!= {computed_bal_hash}"
)
if block.rlp_modifier is not None:
# Modify any parameter specified in the `rlp_modifier` after
# transition tool processing.
header = block.rlp_modifier.apply(header)
header.fork = fork # Deleted during `apply` because `exclude=True`
# Process block access list - apply transformer if present for invalid
# tests
t8n_bal = transition_tool_output.result.block_access_list
bal = t8n_bal
if block.expected_block_access_list is not None and t8n_bal is not None:
block.expected_block_access_list.verify_against(t8n_bal)
bal = block.expected_block_access_list.modify_if_invalid_test(t8n_bal)
if bal != t8n_bal:
# If the BAL was modified, update the header hash
header.block_access_list_hash = Hash(bal.rlp.keccak256())
built_block = BuiltBlock(
header=header,
alloc=transition_tool_output.alloc,
env=env,
txs=txs,
ommers=[],
withdrawals=env.withdrawals,
requests=requests_list,
result=transition_tool_output.result,
expected_exception=block.exception,
engine_api_error_code=block.engine_api_error_code,
fork=fork,
block_access_list=bal,
)
try:
rejected_txs = built_block.verify_transactions(
transition_tool_exceptions_reliable=t8n.exception_mapper.reliable,
)
if (
not rejected_txs
and block.rlp_modifier is None
and block.requests is None
and not block.skip_exception_verification
and not (
block.expected_block_access_list is not None
and block.expected_block_access_list._modifier is not None
)
):
# Only verify block level exception if: - No transaction
# exception was raised, because these are not reported as block
# exceptions. - No RLP modifier was specified, because the
# modifier is what normally produces the block exception. - No
# requests were specified, because modified requests are also
# what normally produces the block exception. - No BAL modifier
# was specified, because modified BAL also produces block
# exceptions.
built_block.verify_block_exception(
transition_tool_exceptions_reliable=t8n.exception_mapper.reliable,
)
verify_result(transition_tool_output.result, env)
except Exception as e:
print_traces(t8n.get_traces())
pprint(transition_tool_output.result)
pprint(previous_alloc)
pprint(transition_tool_output.alloc)
raise e
if len(rejected_txs) > 0 and block.exception is None:
print_traces(t8n.get_traces())
raise Exception(
"one or more transactions in `BlockchainTest` are "
+ "intrinsically invalid, but the block was not expected "
+ "to be invalid. Please verify whether the transaction "
+ "was indeed expected to fail and add the proper "
+ "`block.exception`"
)
return built_block
def verify_post_state(
self, t8n: TransitionTool, t8n_state: Alloc, expected_state: Alloc | None = None
) -> None:
"""Verify post alloc after all block/s or payload/s are generated."""
try:
if expected_state:
expected_state.verify_post_alloc(t8n_state)
else:
self.post.verify_post_alloc(t8n_state)
except Exception as e:
print_traces(t8n.get_traces())
raise e
def make_fixture(
self,
t8n: TransitionTool,
fork: Fork,
) -> BlockchainFixture:
"""Create a fixture from the blockchain test definition."""
fixture_blocks: List[FixtureBlock | InvalidFixtureBlock] = []
pre, genesis = self.make_genesis(fork=fork, apply_pre_allocation_blockchain=True)
alloc = pre
env = environment_from_parent_header(genesis.header)
head = genesis.header.block_hash
invalid_blocks = 0
for i, block in enumerate(self.blocks):
# This is the most common case, the RLP needs to be constructed
# based on the transactions to be included in the block.
# Set the environment according to the block to execute.
built_block = self.generate_block_data(
t8n=t8n,
fork=fork,
block=block,
previous_env=env,
previous_alloc=alloc,
last_block=i == len(self.blocks) - 1,
)
fixture_blocks.append(built_block.get_fixture_block())
# BAL verification already done in to_fixture_bal() if
# expected_block_access_list set
if block.exception is None:
# Update env, alloc and last block hash for the next block.
alloc = built_block.alloc
env = apply_new_parent(built_block.env, built_block.header)
head = built_block.header.block_hash
else:
invalid_blocks += 1
if block.expected_post_state:
self.verify_post_state(
t8n, t8n_state=alloc, expected_state=block.expected_post_state
)
self.check_exception_test(exception=invalid_blocks > 0)
self.verify_post_state(t8n, t8n_state=alloc)
info = {}
if self._opcode_count is not None:
info["opcode_count"] = self._opcode_count.model_dump()
return BlockchainFixture(
fork=fork,
genesis=genesis.header,
genesis_rlp=genesis.rlp,
blocks=fixture_blocks,
last_block_hash=head,
pre=pre,
post_state=alloc if not self.exclude_full_post_state_in_output else None,
post_state_hash=alloc.state_root() if self.exclude_full_post_state_in_output else None,
config=FixtureConfig(
fork=fork,
blob_schedule=FixtureBlobSchedule.from_blob_schedule(fork.blob_schedule()),
chain_id=self.chain_id,
),
info=info,
)
def make_hive_fixture(
self,
t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat = BlockchainEngineFixture,
) -> BlockchainEngineFixture | BlockchainEngineXFixture | BlockchainEngineSyncFixture:
"""Create a hive fixture from the blocktest definition."""
fixture_payloads: List[FixtureEngineNewPayload] = []
pre, genesis = self.make_genesis(
fork=fork,
apply_pre_allocation_blockchain=fixture_format != BlockchainEngineXFixture,
)
alloc = pre
env = environment_from_parent_header(genesis.header)
head_hash = genesis.header.block_hash
invalid_blocks = 0
for i, block in enumerate(self.blocks):
built_block = self.generate_block_data(
t8n=t8n,
fork=fork,
block=block,
previous_env=env,
previous_alloc=alloc,
last_block=i == len(self.blocks) - 1,
)
fixture_payloads.append(built_block.get_fixture_engine_new_payload())
if block.exception is None:
alloc = built_block.alloc
env = apply_new_parent(built_block.env, built_block.header)
head_hash = built_block.header.block_hash
else:
invalid_blocks += 1
if block.expected_post_state:
self.verify_post_state(
t8n, t8n_state=alloc, expected_state=block.expected_post_state
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/state.py | src/ethereum_test_specs/state.py | """Ethereum state test spec definition and filler."""
from pprint import pprint
from typing import Any, Callable, ClassVar, Dict, Generator, List, Optional, Sequence, Type
import pytest
from pydantic import Field
from ethereum_clis import TransitionTool, TransitionToolOutput
from ethereum_test_base_types import HexNumber
from ethereum_test_exceptions import BlockException, EngineAPIError, TransactionException
from ethereum_test_execution import (
BaseExecute,
ExecuteFormat,
LabeledExecuteFormat,
TransactionPost,
)
from ethereum_test_fixtures import (
BaseFixture,
FixtureFormat,
LabeledFixtureFormat,
StateFixture,
)
from ethereum_test_fixtures.common import FixtureBlobSchedule
from ethereum_test_fixtures.state import (
FixtureConfig,
FixtureEnvironment,
FixtureForkPost,
FixtureTransaction,
)
from ethereum_test_forks import Fork
from ethereum_test_types import (
Alloc,
BlockAccessListExpectation,
Environment,
Transaction,
)
from pytest_plugins.custom_logging import get_logger
from .base import BaseTest, OpMode
from .blockchain import Block, BlockchainTest, Header
from .debugging import print_traces
from .helpers import verify_transactions
logger = get_logger(__name__)
class StateTest(BaseTest):
"""
Filler type that tests transactions over the period of a single block.
"""
env: Environment = Field(default_factory=Environment)
pre: Alloc
post: Alloc
tx: Transaction
block_exception: (
List[TransactionException | BlockException] | TransactionException | BlockException | None
) = None
engine_api_error_code: Optional[EngineAPIError] = None
blockchain_test_header_verify: Optional[Header] = None
blockchain_test_rlp_modifier: Optional[Header] = None
expected_block_access_list: Optional[BlockAccessListExpectation] = None
chain_id: int = 1
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
StateFixture,
] + [
LabeledFixtureFormat(
fixture_format,
f"{fixture_format.format_name}_from_state_test",
f"A {fixture_format.format_name} generated from a state_test",
)
for fixture_format in BlockchainTest.supported_fixture_formats
# Exclude sync fixtures from state tests - they don't make sense for
# state tests
if not (
(hasattr(fixture_format, "__name__") and "Sync" in fixture_format.__name__)
or (hasattr(fixture_format, "format") and "Sync" in fixture_format.format.__name__)
)
]
supported_execute_formats: ClassVar[Sequence[LabeledExecuteFormat]] = [
LabeledExecuteFormat(
TransactionPost,
"state_test",
"An execute test derived from a state test",
),
]
supported_markers: ClassVar[Dict[str, str]] = {
"state_test_only": "Only generate a state test fixture",
}
def verify_modified_gas_limit(
self,
*,
t8n: TransitionTool,
base_tool_output: TransitionToolOutput,
fork: Fork,
current_gas_limit: int,
pre_alloc: Alloc,
env: Environment,
enable_post_processing: bool,
) -> bool:
"""Verify a new lower gas limit yields the same transaction outcome."""
base_traces = base_tool_output.result.traces
assert base_traces is not None, "Traces not collected for gas optimization"
new_tx = self.tx.copy(gas_limit=current_gas_limit).with_signature_and_sender()
modified_tool_output = t8n.evaluate(
transition_tool_data=TransitionTool.TransitionToolData(
alloc=pre_alloc,
txs=[new_tx],
env=env,
fork=fork,
chain_id=self.chain_id,
reward=0, # Reward on state tests is always zero
blob_schedule=fork.blob_schedule(),
state_test=True,
),
debug_output_path=self.get_next_transition_tool_output_path(),
slow_request=self.is_tx_gas_heavy_test(),
)
modified_traces = modified_tool_output.result.traces
assert modified_traces is not None, "Traces not collected for gas optimization"
if not base_traces.are_equivalent(
modified_tool_output.result.traces,
enable_post_processing,
):
logger.debug(f"Traces are not equivalent (gas_limit={current_gas_limit})")
return False
try:
self.post.verify_post_alloc(modified_tool_output.alloc)
except Exception as e:
logger.debug(f"Post alloc is not equivalent (gas_limit={current_gas_limit})")
logger.debug(e)
return False
try:
verify_transactions(
txs=[new_tx],
result=modified_tool_output.result,
transition_tool_exceptions_reliable=t8n.exception_mapper.reliable,
)
except Exception as e:
logger.debug(f"Transactions are not equivalent (gas_limit={current_gas_limit})")
logger.debug(e)
return False
if len(base_tool_output.alloc.root) != len(modified_tool_output.alloc.root):
logger.debug(f"Post alloc is not equivalent (gas_limit={current_gas_limit})")
return False
if modified_tool_output.alloc.root.keys() != modified_tool_output.alloc.root.keys():
logger.debug(f"Post alloc is not equivalent (gas_limit={current_gas_limit})")
return False
for k in base_tool_output.alloc.root.keys():
if k not in modified_tool_output.alloc:
logger.debug(f"Post alloc is not equivalent (gas_limit={current_gas_limit})")
return False
base_account = base_tool_output.alloc[k]
modified_account = modified_tool_output.alloc[k]
if (modified_account is None) != (base_account is None):
logger.debug(f"Post alloc is not equivalent (gas_limit={current_gas_limit})")
return False
if (
modified_account is not None
and base_account is not None
and base_account.nonce != modified_account.nonce
):
logger.debug(f"Post alloc is not equivalent (gas_limit={current_gas_limit})")
return False
logger.debug(f"Gas limit is equivalent (gas_limit={current_gas_limit})")
return True
@classmethod
def discard_fixture_format_by_marks(
cls,
fixture_format: FixtureFormat,
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
"""
Discard a fixture format from filling if the appropriate marker is
used.
"""
del fork
if "state_test_only" in [m.name for m in markers]:
return fixture_format != StateFixture
return False
def _generate_blockchain_genesis_environment(self, *, fork: Fork) -> Environment:
"""
Generate the genesis environment for the BlockchainTest formatted test.
"""
assert self.env.number >= 1, (
"genesis block number cannot be negative, set state test env.number to at least 1"
)
assert self.env.timestamp >= 1, (
"genesis timestamp cannot be negative, set state test env.timestamp to at least 1"
)
# There's only a handful of values that we need to set in the genesis
# for the environment values at block 1 to make sense:
# - Number: Needs to be N minus 1
# - Timestamp: Needs to be zero, because the subsequent
# block can come at any time.
# - Gas Limit: Changes from parent to child, needs to be set in genesis
# - Base Fee Per Gas: Block's base fee depends on the parent's value
# - Excess Blob Gas: Block's excess blob gas value depends on
# the parent's value
kwargs: Dict[str, Any] = {
"number": self.env.number - 1,
"timestamp": 0,
}
if "gas_limit" in self.env.model_fields_set:
kwargs["gas_limit"] = self.env.gas_limit
if self.env.base_fee_per_gas:
# Calculate genesis base fee per gas from state test's block#1 env
kwargs["base_fee_per_gas"] = HexNumber(
int(int(str(self.env.base_fee_per_gas), 0) * 8 / 7)
)
if self.env.excess_blob_gas:
# The excess blob gas environment value means the value of the
# context (block header) where the transaction is executed. In a
# blockchain test, we need to indirectly set the excess blob gas by
# setting the excess blob gas of the genesis block to the expected
# value plus the TARGET_BLOB_GAS_PER_BLOCK, which is the value that
# will be subtracted from the excess blob gas when the first block
# is mined.
kwargs["excess_blob_gas"] = self.env.excess_blob_gas + (
fork.target_blobs_per_block() * fork.blob_gas_per_blob()
)
return Environment(**kwargs)
def _generate_blockchain_blocks(self, *, fork: Fork) -> List[Block]:
"""
Generate the single block that represents this state test in a
BlockchainTest format.
"""
kwargs = {
"number": self.env.number,
"timestamp": self.env.timestamp,
"prev_randao": self.env.prev_randao,
"fee_recipient": self.env.fee_recipient,
"gas_limit": self.env.gas_limit,
"extra_data": self.env.extra_data,
"withdrawals": self.env.withdrawals,
"parent_beacon_block_root": self.env.parent_beacon_block_root,
"txs": [self.tx],
"ommers": [],
"header_verify": self.blockchain_test_header_verify,
"rlp_modifier": self.blockchain_test_rlp_modifier,
"expected_block_access_list": self.expected_block_access_list,
}
if not fork.header_prev_randao_required():
kwargs["difficulty"] = self.env.difficulty
if "block_exception" in self.model_fields_set:
kwargs["exception"] = self.block_exception # type: ignore
elif "error" in self.tx.model_fields_set:
kwargs["exception"] = self.tx.error # type: ignore
return [Block(**kwargs)]
def generate_blockchain_test(self, *, fork: Fork) -> BlockchainTest:
"""Generate a BlockchainTest fixture from this StateTest fixture."""
return BlockchainTest.from_test(
base_test=self,
genesis_environment=self._generate_blockchain_genesis_environment(fork=fork),
pre=self.pre,
post=self.post,
blocks=self._generate_blockchain_blocks(fork=fork),
)
def make_state_test_fixture(
self,
t8n: TransitionTool,
fork: Fork,
) -> StateFixture:
"""Create a fixture from the state test definition."""
# We can't generate a state test fixture that names a transition fork,
# so we get the fork at the block number and timestamp of the state
# test
fork = fork.fork_at(block_number=self.env.number, timestamp=self.env.timestamp)
env = self.env.set_fork_requirements(fork)
tx = self.tx.with_signature_and_sender(keep_secret_key=True)
pre_alloc = Alloc.merge(
Alloc.model_validate(fork.pre_allocation()),
self.pre,
)
if empty_accounts := pre_alloc.empty_accounts():
raise Exception(f"Empty accounts in pre state: {empty_accounts}")
transition_tool_output = t8n.evaluate(
transition_tool_data=TransitionTool.TransitionToolData(
alloc=pre_alloc,
txs=[tx],
env=env,
fork=fork,
chain_id=self.chain_id,
reward=0, # Reward on state tests is always zero
blob_schedule=fork.blob_schedule(),
state_test=True,
),
debug_output_path=self.get_next_transition_tool_output_path(),
slow_request=self.is_tx_gas_heavy_test(),
)
try:
self.post.verify_post_alloc(transition_tool_output.alloc)
except Exception as e:
print_traces(t8n.get_traces())
raise e
try:
verify_transactions(
txs=[tx],
result=transition_tool_output.result,
transition_tool_exceptions_reliable=t8n.exception_mapper.reliable,
)
except Exception as e:
print_traces(t8n.get_traces())
pprint(transition_tool_output.result)
pprint(transition_tool_output.alloc)
raise e
if (
self._operation_mode == OpMode.OPTIMIZE_GAS
or self._operation_mode == OpMode.OPTIMIZE_GAS_POST_PROCESSING
):
enable_post_processing = self._operation_mode == OpMode.OPTIMIZE_GAS_POST_PROCESSING
base_tool_output = transition_tool_output
assert base_tool_output.result.traces is not None, "Traces not found."
# First try reducing the gas limit only by one, if the validation
# fails, it means that the traces change even with the slightest
# modification to the gas.
if self.verify_modified_gas_limit(
t8n=t8n,
base_tool_output=base_tool_output,
fork=fork,
current_gas_limit=self.tx.gas_limit - 1,
pre_alloc=pre_alloc,
env=env,
enable_post_processing=enable_post_processing,
):
minimum_gas_limit = 0
maximum_gas_limit = int(self.tx.gas_limit)
while minimum_gas_limit < maximum_gas_limit:
current_gas_limit = (maximum_gas_limit + minimum_gas_limit) // 2
if self.verify_modified_gas_limit(
t8n=t8n,
base_tool_output=base_tool_output,
fork=fork,
current_gas_limit=current_gas_limit,
pre_alloc=pre_alloc,
env=env,
enable_post_processing=enable_post_processing,
):
maximum_gas_limit = current_gas_limit
else:
minimum_gas_limit = current_gas_limit + 1
if (
self._gas_optimization_max_gas_limit is not None
and minimum_gas_limit > self._gas_optimization_max_gas_limit
):
raise Exception(
"Requires more than the minimum "
f"{self._gas_optimization_max_gas_limit} wanted."
)
assert self.verify_modified_gas_limit(
t8n=t8n,
base_tool_output=base_tool_output,
fork=fork,
current_gas_limit=minimum_gas_limit,
pre_alloc=pre_alloc,
env=env,
enable_post_processing=enable_post_processing,
)
self._gas_optimization = current_gas_limit
else:
raise Exception("Impossible to compare.")
if self._operation_mode == OpMode.BENCHMARKING:
expected_benchmark_gas_used = self.expected_benchmark_gas_used
assert expected_benchmark_gas_used is not None, (
"expected_benchmark_gas_used is not set"
)
gas_used = int(transition_tool_output.result.gas_used)
if not self.skip_gas_used_validation:
assert gas_used == expected_benchmark_gas_used, (
f"gas_used ({gas_used}) does not match expected_benchmark_gas_used "
f"({expected_benchmark_gas_used})"
f", difference: {gas_used - expected_benchmark_gas_used}"
)
return StateFixture(
env=FixtureEnvironment(**env.model_dump(exclude_none=True)),
pre=pre_alloc,
post={
fork: [
FixtureForkPost(
state_root=transition_tool_output.result.state_root,
logs_hash=transition_tool_output.result.logs_hash,
tx_bytes=tx.rlp(),
expect_exception=tx.error,
state=transition_tool_output.alloc,
)
]
},
transaction=FixtureTransaction.from_transaction(tx),
config=FixtureConfig(
blob_schedule=FixtureBlobSchedule.from_blob_schedule(fork.blob_schedule()),
chain_id=self.chain_id,
),
)
def get_genesis_environment(self, fork: Fork) -> Environment:
"""Get the genesis environment for pre-allocation groups."""
return self.generate_blockchain_test(fork=fork).get_genesis_environment(fork=fork)
def generate(
self,
t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat,
) -> BaseFixture:
"""Generate the BlockchainTest fixture."""
self.check_exception_test(exception=self.tx.error is not None)
if fixture_format in BlockchainTest.supported_fixture_formats:
return self.generate_blockchain_test(fork=fork).generate(
t8n=t8n, fork=fork, fixture_format=fixture_format
)
elif fixture_format == StateFixture:
return self.make_state_test_fixture(t8n, fork)
raise Exception(f"Unknown fixture format: {fixture_format}")
def execute(
self,
*,
fork: Fork,
execute_format: ExecuteFormat,
) -> BaseExecute:
"""Generate the list of test fixtures."""
del fork
if execute_format == TransactionPost:
# Pass gas validation params for benchmark tests
# If not benchmark mode, skip gas used validation
if self._operation_mode != OpMode.BENCHMARKING:
self.skip_gas_used_validation = True
return TransactionPost(
blocks=[[self.tx]],
post=self.post,
expected_benchmark_gas_used=self.expected_benchmark_gas_used,
skip_gas_used_validation=self.skip_gas_used_validation,
)
raise Exception(f"Unsupported execute format: {execute_format}")
StateTestSpec = Callable[[str], Generator[StateTest, None, None]]
StateTestFiller = Type[StateTest]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/debugging.py | src/ethereum_test_specs/debugging.py | """Test spec debugging tools."""
from typing import List
from ethereum_clis import Traces
def print_traces(traces: List[Traces] | None) -> None:
"""Print the traces from the transition tool for debugging."""
if traces is None:
print("Traces not collected. Use `--traces` to see detailed execution information.")
return
print("Printing traces for debugging purposes:")
for block_number, block in enumerate(traces):
print(f"Block {block_number}:")
block.print()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/blobs.py | src/ethereum_test_specs/blobs.py | """Test specification for blob tests."""
from typing import Callable, ClassVar, Generator, List, Sequence, Type
from ethereum_clis import TransitionTool
from ethereum_test_base_types import Alloc
from ethereum_test_base_types.base_types import Hash
from ethereum_test_execution import BaseExecute, BlobTransaction
from ethereum_test_fixtures import (
BaseFixture,
FixtureFormat,
)
from ethereum_test_forks import Fork
from ethereum_test_types import NetworkWrappedTransaction, Transaction
from .base import BaseTest, ExecuteFormat, LabeledExecuteFormat
class BlobsTest(BaseTest):
"""Test specification for blob tests."""
pre: Alloc
txs: List[NetworkWrappedTransaction | Transaction]
nonexisting_blob_hashes: List[Hash] | None = None
supported_execute_formats: ClassVar[Sequence[LabeledExecuteFormat]] = [
LabeledExecuteFormat(
BlobTransaction,
"blob_transaction_test",
"A test that executes a blob transaction",
),
]
def generate(
self,
*,
t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat,
) -> BaseFixture:
"""Generate the list of test fixtures."""
del t8n, fork
raise Exception(f"Unknown fixture format: {fixture_format}")
def execute(
self,
*,
fork: Fork,
execute_format: ExecuteFormat,
) -> BaseExecute:
"""Generate the list of test fixtures."""
del fork
if execute_format == BlobTransaction:
return BlobTransaction(
txs=self.txs, nonexisting_blob_hashes=self.nonexisting_blob_hashes
)
raise Exception(f"Unsupported execute format: {execute_format}")
BlobsTestSpec = Callable[[str], Generator[BlobsTest, None, None]]
BlobsTestFiller = Type[BlobsTest]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/helpers.py | src/ethereum_test_specs/helpers.py | """Helper functions."""
from dataclasses import dataclass
from enum import StrEnum
from typing import Any, Dict, List
from ethereum_clis import Result
from ethereum_test_exceptions import (
BlockException,
ExceptionBase,
ExceptionWithMessage,
TransactionException,
UndefinedException,
)
from ethereum_test_types import Transaction, TransactionReceipt
class ExecutionContext(StrEnum):
"""The execution context in which a test case can fail."""
BLOCK = "Block"
TRANSACTION = "Transaction"
class UnexpectedExecutionSuccessError(Exception):
"""
Exception used when the transaction expected to fail succeeded instead.
"""
def __init__(self, execution_context: ExecutionContext, **kwargs: Any) -> None:
"""Initialize the unexpected success exception."""
message = (
f"\nUnexpected success for {execution_context.value} ({kwargs}):"
f"\n What: {execution_context.value} unexpectedly succeeded!"
)
super().__init__(message)
class UnexpectedExecutionFailError(Exception):
"""
Exception used when a transaction/block expected to succeed failed instead.
"""
def __init__(
self,
execution_context: ExecutionContext,
message: str,
exception: ExceptionWithMessage | UndefinedException,
**kwargs: Any,
) -> None:
"""Initialize the exception."""
message = (
f"Unexpected fail for {execution_context.value} ({kwargs}):"
f"\n What: {execution_context.value} unexpectedly failed!"
f'\n Error: "{message}" ({exception})'
)
super().__init__(message)
class UndefinedExecutionExceptionError(Exception):
"""
Exception used when a client's exception message isn't present in its
`ExceptionMapper`.
"""
def __init__(
self,
execution_context: ExecutionContext,
want_exception: ExceptionBase | List[ExceptionBase],
got_exception: UndefinedException,
**kwargs: Any,
) -> None:
"""Initialize the exception."""
message = (
f"Exception mismatch on {execution_context.value} ({kwargs}):"
f"\n What: {execution_context.value} exception mismatch!"
f"\n Want: {want_exception}"
f'\n Got: "{got_exception}"'
"\n No exception defined for error message got, please add it to "
f"{got_exception.mapper_name}"
)
super().__init__(message)
class ExecutionExceptionMismatchError(Exception):
"""
Exception used when the actual block/transaction error string differs from
the expected one.
"""
def __init__(
self,
execution_context: ExecutionContext,
want_exception: ExceptionBase | List[ExceptionBase],
got_exception: ExceptionWithMessage,
got_message: str,
**kwargs: Any,
) -> None:
"""Initialize the exception."""
message = (
f"Exception mismatch on {execution_context.value} ({kwargs}):"
f"\n What: {execution_context.value} exception mismatch!"
f"\n Want: {want_exception}"
f'\n Got: "{got_exception}" ("{got_message}")'
)
super().__init__(message)
class TransactionReceiptMismatchError(Exception):
"""
Exception used when the actual transaction receipt differs from the
expected one.
"""
def __init__(
self,
index: int,
field_name: str,
expected_value: Any,
actual_value: Any,
):
"""Initialize the exception."""
message = (
f"\nTransactionReceiptMismatch (pos={index}):"
f"\n What: {field_name} mismatch!"
f"\n Want: {expected_value}"
f"\n Got: {actual_value}"
)
super().__init__(message)
@dataclass
class ExceptionInfo:
"""Info to print transaction exception error messages."""
execution_context: ExecutionContext
want_exception: List[ExceptionBase] | ExceptionBase | None
got_exception: ExceptionWithMessage | UndefinedException | None
got_message: str | None
context: Dict[str, Any]
def __init__(
self,
*,
execution_context: ExecutionContext,
want_exception: List[ExceptionBase] | ExceptionBase | None,
got_exception: ExceptionWithMessage | UndefinedException | None,
context: Dict[str, Any],
):
"""Initialize the exception."""
self.execution_context = execution_context
self.want_exception = want_exception
self.got_exception = got_exception
if self.got_exception is None:
self.got_message = None
else:
self.got_message = (
got_exception.message
if isinstance(got_exception, ExceptionWithMessage)
else str(got_exception)
)
self.context = context
def verify(self: "ExceptionInfo", *, strict_match: bool) -> None:
"""Verify the exception."""
want_exception, got_exception = (
self.want_exception,
self.got_exception,
)
if want_exception and not got_exception:
raise UnexpectedExecutionSuccessError(
execution_context=self.execution_context, **self.context
)
elif not want_exception and got_exception:
assert self.got_message is not None
raise UnexpectedExecutionFailError(
execution_context=self.execution_context,
message=self.got_message,
exception=got_exception,
**self.context,
)
elif want_exception and got_exception:
if isinstance(got_exception, UndefinedException):
raise UndefinedExecutionExceptionError(
execution_context=self.execution_context,
want_exception=want_exception,
got_exception=got_exception,
**self.context,
)
if strict_match:
if want_exception not in got_exception:
got_message = self.got_message
assert got_message is not None
raise ExecutionExceptionMismatchError(
execution_context=self.execution_context,
want_exception=want_exception,
got_exception=got_exception,
got_message=got_message,
**self.context,
)
class TransactionExceptionInfo(ExceptionInfo):
"""Info to print transaction exception error messages."""
def __init__(
self,
tx: Transaction,
tx_index: int,
**kwargs: Any,
) -> None:
"""Initialize the exception."""
super().__init__(
execution_context=ExecutionContext.TRANSACTION,
want_exception=tx.error, # type: ignore
context={"index": tx_index, "nonce": tx.nonce},
**kwargs,
)
class BlockExceptionInfo(ExceptionInfo):
"""Info to print block exception error messages."""
def __init__(
self,
block_number: int,
**kwargs: Any,
) -> None:
"""Initialize the exception."""
super().__init__(
execution_context=ExecutionContext.BLOCK,
context={"number": block_number},
**kwargs,
)
def verify_transaction_receipt(
transaction_index: int,
expected_receipt: TransactionReceipt | None,
actual_receipt: TransactionReceipt | None,
) -> None:
"""
Verify the actual receipt against the expected one.
If the expected receipt is None, validation is skipped.
Only verifies non-None values in the expected receipt if any.
"""
if expected_receipt is None:
return
assert actual_receipt is not None
if (
expected_receipt.gas_used is not None
and actual_receipt.gas_used != expected_receipt.gas_used
):
raise TransactionReceiptMismatchError(
index=transaction_index,
field_name="gas_used",
expected_value=expected_receipt.gas_used,
actual_value=actual_receipt.gas_used,
)
# TODO: Add more fields as needed
def verify_transactions(
*,
txs: List[Transaction],
result: Result,
transition_tool_exceptions_reliable: bool,
) -> List[int]:
"""
Verify accepted and rejected (if any) transactions against the expected
outcome. Raises exception on unexpected rejections, unexpected successful
txs, or successful txs with unexpected receipt values.
"""
rejected_txs: Dict[int, ExceptionWithMessage | UndefinedException] = {
rejected_tx.index: rejected_tx.error for rejected_tx in result.rejected_transactions
}
receipt_index = 0
for i, tx in enumerate(txs):
error_message = rejected_txs[i] if i in rejected_txs else None
info = TransactionExceptionInfo(
tx=tx,
tx_index=i,
got_exception=error_message,
)
info.verify(strict_match=transition_tool_exceptions_reliable)
if error_message is None:
verify_transaction_receipt(i, tx.expected_receipt, result.receipts[receipt_index])
receipt_index += 1
return list(rejected_txs.keys())
def verify_block(
*,
block_number: int,
want_exception: List[TransactionException | BlockException]
| TransactionException
| BlockException
| None,
got_exception: ExceptionWithMessage | UndefinedException | None,
transition_tool_exceptions_reliable: bool,
) -> None:
"""Verify the block exception against the expected one."""
info = BlockExceptionInfo(
block_number=block_number,
want_exception=want_exception,
got_exception=got_exception,
)
info.verify(strict_match=transition_tool_exceptions_reliable)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/transaction.py | src/ethereum_test_specs/transaction.py | """Ethereum transaction test spec definition and filler."""
from typing import Callable, ClassVar, Generator, Sequence, Type
from ethereum_clis import TransitionTool
from ethereum_test_execution import (
BaseExecute,
ExecuteFormat,
LabeledExecuteFormat,
TransactionPost,
)
from ethereum_test_fixtures import (
BaseFixture,
FixtureFormat,
LabeledFixtureFormat,
TransactionFixture,
)
from ethereum_test_fixtures.transaction import FixtureResult
from ethereum_test_forks import Fork
from ethereum_test_types import Alloc, Transaction
from .base import BaseTest
class TransactionTest(BaseTest):
"""
Filler type that tests the transaction over the period of a single block.
"""
tx: Transaction
pre: Alloc | None = None
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
TransactionFixture,
]
supported_execute_formats: ClassVar[Sequence[LabeledExecuteFormat]] = [
LabeledExecuteFormat(
TransactionPost,
"transaction_test",
"An execute test derived from a transaction test",
),
]
def make_transaction_test_fixture(
self,
fork: Fork,
) -> TransactionFixture:
"""Create a fixture from the transaction test definition."""
if self.tx.error is not None:
result = FixtureResult(
exception=self.tx.error,
hash=None,
intrinsic_gas=0,
sender=None,
)
else:
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
intrinsic_gas = intrinsic_gas_cost_calculator(
calldata=self.tx.data,
contract_creation=self.tx.to is None,
access_list=self.tx.access_list,
authorization_list_or_count=self.tx.authorization_list,
)
result = FixtureResult(
exception=None,
hash=self.tx.hash,
intrinsic_gas=intrinsic_gas,
sender=self.tx.sender,
)
return TransactionFixture(
result={
fork: result,
},
transaction=self.tx.with_signature_and_sender().rlp(),
)
def generate(
self,
t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat,
) -> BaseFixture:
"""Generate the TransactionTest fixture."""
del t8n
self.check_exception_test(exception=self.tx.error is not None)
if fixture_format == TransactionFixture:
return self.make_transaction_test_fixture(fork)
raise Exception(f"Unknown fixture format: {fixture_format}")
def execute(
self,
*,
fork: Fork,
execute_format: ExecuteFormat,
) -> BaseExecute:
"""Execute the transaction test by sending it to the live network."""
del fork
if execute_format == TransactionPost:
return TransactionPost(
blocks=[[self.tx]],
post={},
)
raise Exception(f"Unsupported execute format: {execute_format}")
TransactionTestSpec = Callable[[str], Generator[TransactionTest, None, None]]
TransactionTestFiller = Type[TransactionTest]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/benchmark.py | src/ethereum_test_specs/benchmark.py | """Ethereum benchmark test spec definition and filler."""
import math
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any, Callable, ClassVar, Dict, Generator, List, Sequence, Type
import pytest
from pydantic import ConfigDict, Field
from ethereum_clis import TransitionTool
from ethereum_test_base_types import Address, HexNumber
from ethereum_test_exceptions import BlockException, TransactionException
from ethereum_test_execution import (
BaseExecute,
ExecuteFormat,
LabeledExecuteFormat,
TransactionPost,
)
from ethereum_test_fixtures import (
BaseFixture,
BlockchainEngineFixture,
BlockchainEngineXFixture,
BlockchainFixture,
FixtureFormat,
LabeledFixtureFormat,
)
from ethereum_test_forks import Fork
from ethereum_test_types import Alloc, Environment, Transaction
from ethereum_test_vm import Bytecode
from ethereum_test_vm.opcodes import Opcodes as Op
from .base import BaseTest
from .blockchain import Block, BlockchainTest
@dataclass(kw_only=True)
class BenchmarkCodeGenerator(ABC):
"""Abstract base class for generating benchmark bytecode."""
attack_block: Bytecode
setup: Bytecode = field(default_factory=Bytecode)
cleanup: Bytecode = field(default_factory=Bytecode)
tx_kwargs: Dict[str, Any] = field(default_factory=dict)
_contract_address: Address | None = None
@abstractmethod
def deploy_contracts(self, *, pre: Alloc, fork: Fork) -> Address:
"""Deploy any contracts needed for the benchmark."""
...
def generate_transaction(self, *, pre: Alloc, gas_benchmark_value: int) -> Transaction:
"""Generate transaction that executes the looping contract."""
assert self._contract_address is not None
if "gas_limit" not in self.tx_kwargs:
self.tx_kwargs["gas_limit"] = gas_benchmark_value
return Transaction(
to=self._contract_address,
sender=pre.fund_eoa(),
**self.tx_kwargs,
)
def generate_repeated_code(
self,
*,
repeated_code: Bytecode,
setup: Bytecode | None = None,
cleanup: Bytecode | None = None,
fork: Fork,
) -> Bytecode:
"""
Calculate the maximum number of iterations that
can fit in the code size limit.
"""
assert len(repeated_code) > 0, "repeated_code cannot be empty"
max_code_size = fork.max_code_size()
if setup is None:
setup = Bytecode()
if cleanup is None:
cleanup = Bytecode()
overhead = len(setup) + len(Op.JUMPDEST) + len(cleanup) + len(Op.JUMP(len(setup)))
available_space = max_code_size - overhead
max_iterations = available_space // len(repeated_code)
# TODO: Unify the PUSH0 and PUSH1 usage.
code = setup + Op.JUMPDEST + repeated_code * max_iterations + cleanup
code += Op.JUMP(len(setup)) if len(setup) > 0 else Op.PUSH0 + Op.JUMP
self._validate_code_size(code, fork)
return code
def _validate_code_size(self, code: Bytecode, fork: Fork) -> None:
"""Validate that the generated code fits within size limits."""
if len(code) > fork.max_code_size():
raise ValueError(
f"Generated code size {len(code)} exceeds maximum allowed size "
f"{fork.max_code_size()}"
)
class BenchmarkTest(BaseTest):
"""Test type designed specifically for benchmark test cases."""
model_config = ConfigDict(extra="forbid")
pre: Alloc = Field(default_factory=Alloc)
post: Alloc = Field(default_factory=Alloc)
tx: Transaction | None = None
setup_blocks: List[Block] = Field(default_factory=list)
blocks: List[Block] | None = None
block_exception: (
List[TransactionException | BlockException] | TransactionException | BlockException | None
) = None
env: Environment = Field(default_factory=Environment)
expected_benchmark_gas_used: int | None = None
gas_benchmark_value: int = Field(default_factory=lambda: int(Environment().gas_limit))
code_generator: BenchmarkCodeGenerator | None = None
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
BlockchainFixture,
BlockchainEngineFixture,
BlockchainEngineXFixture,
]
supported_execute_formats: ClassVar[Sequence[LabeledExecuteFormat]] = [
LabeledExecuteFormat(
TransactionPost,
"benchmark_test",
"An execute test derived from a benchmark test",
),
]
supported_markers: ClassVar[Dict[str, str]] = {
"blockchain_test_engine_only": "Only generate a blockchain test engine fixture",
"blockchain_test_only": "Only generate a blockchain test fixture",
}
def model_post_init(self, __context: Any, /) -> None:
"""
Model post-init to assert that the custom pre-allocation was
provided and the default was not used.
"""
super().model_post_init(__context)
assert "pre" in self.model_fields_set, "pre allocation was not provided"
@classmethod
def pytest_parameter_name(cls) -> str:
"""
Return the parameter name used in pytest
to select this spec type.
"""
return "benchmark_test"
@classmethod
def discard_fixture_format_by_marks(
cls,
fixture_format: FixtureFormat,
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
"""
Discard a fixture format from filling if the
appropriate marker is used.
"""
del fork
if "blockchain_test_only" in [m.name for m in markers]:
return fixture_format != BlockchainFixture
if "blockchain_test_engine_only" in [m.name for m in markers]:
return fixture_format != BlockchainEngineFixture
return False
def get_genesis_environment(self, fork: Fork) -> Environment:
"""Get the genesis environment for this benchmark test."""
del fork
return self.env
def split_transaction(self, tx: Transaction, gas_limit_cap: int | None) -> List[Transaction]:
"""
Split a transaction that exceeds the gas
limit cap into multiple transactions.
"""
if gas_limit_cap is None:
tx.gas_limit = HexNumber(self.gas_benchmark_value)
return [tx]
if gas_limit_cap >= self.gas_benchmark_value:
tx.gas_limit = HexNumber(self.gas_benchmark_value)
return [tx]
num_splits = math.ceil(self.gas_benchmark_value / gas_limit_cap)
remaining_gas = self.gas_benchmark_value
split_transactions = []
for i in range(num_splits):
split_tx = tx.model_copy()
split_tx.gas_limit = HexNumber(remaining_gas if i == num_splits - 1 else gas_limit_cap)
remaining_gas -= gas_limit_cap
split_tx.nonce = HexNumber(tx.nonce + i)
split_transactions.append(split_tx)
return split_transactions
def generate_blocks_from_code_generator(self, fork: Fork) -> List[Block]:
"""Generate blocks using the code generator."""
if self.code_generator is None:
raise Exception("Code generator is not set")
self.code_generator.deploy_contracts(pre=self.pre, fork=fork)
gas_limit = fork.transaction_gas_limit_cap() or self.gas_benchmark_value
benchmark_tx = self.code_generator.generate_transaction(
pre=self.pre, gas_benchmark_value=gas_limit
)
execution_txs = self.split_transaction(benchmark_tx, gas_limit)
execution_block = Block(txs=execution_txs)
return [execution_block]
def generate_blockchain_test(self, fork: Fork) -> BlockchainTest:
"""Create a BlockchainTest from this BenchmarkTest."""
set_props = [
name
for name, val in [
("code_generator", self.code_generator),
("blocks", self.blocks),
("tx", self.tx),
]
if val is not None
]
if len(set_props) != 1:
raise ValueError(
f"Exactly one must be set, but got {len(set_props)}: {', '.join(set_props)}"
)
blocks: List[Block] = self.setup_blocks
if self.code_generator is not None:
generated_blocks = self.generate_blocks_from_code_generator(fork)
blocks += generated_blocks
elif self.blocks is not None:
blocks += self.blocks
elif self.tx is not None:
gas_limit = fork.transaction_gas_limit_cap() or self.gas_benchmark_value
transactions = self.split_transaction(self.tx, gas_limit)
blocks.append(Block(txs=transactions))
else:
raise ValueError(
"Cannot create BlockchainTest without a code generator, transactions, or blocks"
)
return BlockchainTest.from_test(
base_test=self,
genesis_environment=self.env,
pre=self.pre,
post=self.post,
blocks=blocks,
)
def generate(
self,
t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat,
) -> BaseFixture:
"""Generate the blockchain test fixture."""
self.check_exception_test(exception=self.tx.error is not None if self.tx else False)
if fixture_format in BlockchainTest.supported_fixture_formats:
return self.generate_blockchain_test(fork=fork).generate(
t8n=t8n, fork=fork, fixture_format=fixture_format
)
else:
raise Exception(f"Unsupported fixture format: {fixture_format}")
def execute(
self,
*,
fork: Fork,
execute_format: ExecuteFormat,
) -> BaseExecute:
"""Execute the benchmark test by sending it to the live network."""
del fork
if execute_format == TransactionPost:
return TransactionPost(
blocks=[[self.tx]],
post=self.post,
)
raise Exception(f"Unsupported execute format: {execute_format}")
BenchmarkTestSpec = Callable[[str], Generator[BenchmarkTest, None, None]]
BenchmarkTestFiller = Type[BenchmarkTest]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/__init__.py | src/ethereum_test_specs/__init__.py | """Test spec definitions and utilities."""
from .base import BaseTest, TestSpec
from .base_static import BaseStaticTest
from .benchmark import BenchmarkTest, BenchmarkTestFiller, BenchmarkTestSpec
from .blobs import BlobsTest, BlobsTestFiller, BlobsTestSpec
from .blockchain import (
BlockchainTest,
BlockchainTestFiller,
BlockchainTestSpec,
)
from .eof import (
EOFStateTest,
EOFStateTestFiller,
EOFStateTestSpec,
EOFTest,
EOFTestFiller,
EOFTestSpec,
)
from .state import StateTest, StateTestFiller, StateTestSpec
from .static_state.state_static import StateStaticTest
from .transaction import TransactionTest, TransactionTestFiller, TransactionTestSpec
__all__ = (
"BaseStaticTest",
"BaseTest",
"BenchmarkTest",
"BenchmarkTestFiller",
"BenchmarkTestSpec",
"BlobsTest",
"BlobsTestFiller",
"BlobsTestSpec",
"BlockchainTest",
"BlockchainTestEngineFiller",
"BlockchainTestEngineSpec",
"BlockchainTestFiller",
"BlockchainTestSpec",
"EOFStateTest",
"EOFStateTestFiller",
"EOFStateTestSpec",
"EOFTest",
"EOFTestFiller",
"EOFTestSpec",
"StateStaticTest",
"StateTest",
"StateTestFiller",
"StateTestSpec",
"TestSpec",
"TransactionTest",
"TransactionTestFiller",
"TransactionTestSpec",
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/base.py | src/ethereum_test_specs/base.py | """
Base test class and helper functions for Ethereum state and blockchain tests.
"""
import hashlib
from abc import abstractmethod
from enum import StrEnum, unique
from functools import reduce
from os import path
from pathlib import Path
from typing import Any, Callable, ClassVar, Dict, Generator, List, Sequence, Type
import pytest
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from typing_extensions import Self
from ethereum_clis import Result, TransitionTool
from ethereum_clis.cli_types import OpcodeCount
from ethereum_test_base_types import to_hex
from ethereum_test_execution import BaseExecute, ExecuteFormat, LabeledExecuteFormat
from ethereum_test_fixtures import (
BaseFixture,
FixtureFormat,
LabeledFixtureFormat,
PreAllocGroup,
PreAllocGroups,
)
from ethereum_test_forks import Fork
from ethereum_test_types import Alloc, Environment, Withdrawal
class HashMismatchExceptionError(Exception):
"""Exception raised when the expected and actual hashes don't match."""
def __init__(
self, expected_hash: str, actual_hash: str, message: str = "Hashes do not match"
) -> None:
"""Initialize the exception with the expected and actual hashes."""
self.expected_hash = expected_hash
self.actual_hash = actual_hash
self.message = message
super().__init__(self.message)
def __str__(self) -> str:
"""Return the error message."""
return f"{self.message}: Expected {self.expected_hash}, got {self.actual_hash}"
def verify_result(result: Result, env: Environment) -> None:
"""
Verify that values in the t8n result match the expected values. Raises
exception on unexpected values.
"""
if env.withdrawals is not None:
assert result.withdrawals_root == to_hex(Withdrawal.list_root(env.withdrawals))
@unique
class OpMode(StrEnum):
"""Operation mode for the fill and execute."""
CONSENSUS = "consensus"
BENCHMARKING = "benchmarking"
OPTIMIZE_GAS = "optimize-gas"
OPTIMIZE_GAS_POST_PROCESSING = "optimize-gas-post-processing"
class BaseTest(BaseModel):
"""
Represents a base Ethereum test which must return a single test fixture.
"""
model_config = ConfigDict(extra="forbid")
tag: str = ""
_request: pytest.FixtureRequest | None = PrivateAttr(None)
_operation_mode: OpMode | None = PrivateAttr(None)
_gas_optimization: int | None = PrivateAttr(None)
_gas_optimization_max_gas_limit: int | None = PrivateAttr(None)
_opcode_count: OpcodeCount | None = PrivateAttr(None)
expected_benchmark_gas_used: int | None = None
skip_gas_used_validation: bool = False
spec_types: ClassVar[Dict[str, Type["BaseTest"]]] = {}
# Transition tool specific fields
t8n_dump_dir: Path | None = Field(None, exclude=True)
t8n_call_counter: int = Field(0, exclude=True)
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = []
supported_execute_formats: ClassVar[Sequence[LabeledExecuteFormat]] = []
supported_markers: ClassVar[Dict[str, str]] = {}
@classmethod
def discard_fixture_format_by_marks(
cls,
fixture_format: FixtureFormat,
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
"""
Discard a fixture format from filling if the appropriate marker is
used.
"""
del fork, fixture_format, markers
return False
@classmethod
def __pydantic_init_subclass__(cls, **kwargs: Any) -> None:
"""
Register all subclasses of BaseFixture with a fixture format name set
as possible fixture formats.
"""
if cls.pytest_parameter_name():
# Register the new fixture format
BaseTest.spec_types[cls.pytest_parameter_name()] = cls
@classmethod
def from_test(
cls: Type[Self],
*,
base_test: "BaseTest",
**kwargs: Any,
) -> Self:
"""Create a test in a different format from a base test."""
new_instance = cls(
tag=base_test.tag,
t8n_dump_dir=base_test.t8n_dump_dir,
expected_benchmark_gas_used=base_test.expected_benchmark_gas_used,
skip_gas_used_validation=base_test.skip_gas_used_validation,
**kwargs,
)
new_instance._request = base_test._request
new_instance._operation_mode = base_test._operation_mode
new_instance._opcode_count = base_test._opcode_count
return new_instance
@classmethod
def discard_execute_format_by_marks(
cls,
execute_format: ExecuteFormat,
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
"""
Discard an execute format from executing if the appropriate marker is
used.
"""
del execute_format, fork, markers
return False
@abstractmethod
def generate(
self,
*,
t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat,
) -> BaseFixture:
"""Generate the list of test fixtures."""
pass
def execute(
self,
*,
fork: Fork,
execute_format: ExecuteFormat,
) -> BaseExecute:
"""Generate the list of test fixtures."""
del fork
raise Exception(f"Unsupported execute format: {execute_format}")
@classmethod
def pytest_parameter_name(cls) -> str:
"""
Must return the name of the parameter used in pytest to select this
spec type as filler for the test.
By default, it returns the underscore separated name of the class.
"""
if cls == BaseTest:
return ""
return reduce(lambda x, y: x + ("_" if y.isupper() else "") + y, cls.__name__).lower()
def get_next_transition_tool_output_path(self) -> str:
"""Return path to the next transition tool output file."""
if not self.t8n_dump_dir:
return ""
current_value = self.t8n_call_counter
self.t8n_call_counter += 1
return path.join(
self.t8n_dump_dir,
str(current_value),
)
def is_tx_gas_heavy_test(self) -> bool:
"""Check if the test is gas-heavy for transaction execution."""
if self._request is not None and hasattr(self._request, "node"):
node = self._request.node
has_slow_marker = node.get_closest_marker("slow") is not None
has_benchmark_marker = node.get_closest_marker("benchmark") is not None
return has_slow_marker or has_benchmark_marker
return False
def is_exception_test(self) -> bool | None:
"""
Check if the test is an exception test (invalid block, invalid
transaction).
`None` is returned if it's not possible to determine if the test is
negative or not. This is the case when the test is not run in pytest.
"""
if self._request is not None and hasattr(self._request, "node"):
return self._request.node.get_closest_marker("exception_test") is not None
return None
def node_id(self) -> str:
"""Return the node ID of the test."""
if self._request is not None and hasattr(self._request, "node"):
return self._request.node.nodeid
return ""
def check_exception_test(
self,
*,
exception: bool,
) -> None:
"""Compare the test marker against the outcome of the test."""
negative_test_marker = self.is_exception_test()
if negative_test_marker is None:
return
if negative_test_marker != exception:
if exception:
raise Exception(
"Test produced an invalid block or transaction but was not marked with the "
"`exception_test` marker. Add the `@pytest.mark.exception_test` decorator "
"to the test."
)
else:
raise Exception(
"Test didn't produce an invalid block or transaction but was marked with the "
"`exception_test` marker. Remove the `@pytest.mark.exception_test` decorator "
"from the test."
)
def get_genesis_environment(self, fork: Fork) -> Environment:
"""
Get the genesis environment for pre-allocation groups.
Must be implemented by subclasses to provide the appropriate
environment.
"""
raise NotImplementedError(
f"{self.__class__.__name__} must implement genesis environment access for use with "
"pre-allocation groups."
)
def update_pre_alloc_groups(
self, pre_alloc_groups: PreAllocGroups, fork: Fork, test_id: str
) -> PreAllocGroups:
"""
Create or update the pre-allocation group with the pre from the current
spec.
"""
if not hasattr(self, "pre"):
raise AttributeError(
f"{self.__class__.__name__} does not have a 'pre' field. Pre-allocation groups "
"are only supported for test types that define pre-allocation."
)
pre_alloc_hash = self.compute_pre_alloc_group_hash(fork=fork)
if pre_alloc_hash in pre_alloc_groups:
# Update existing group - just merge pre-allocations
group = pre_alloc_groups[pre_alloc_hash]
group.pre = Alloc.merge(
group.pre,
self.pre,
key_collision_mode=Alloc.KeyCollisionMode.ALLOW_IDENTICAL_ACCOUNTS,
)
group.fork = fork
group.test_ids.append(str(test_id))
pre_alloc_groups[pre_alloc_hash] = group
else:
# Create new group - use Environment instead of expensive genesis
# generation
genesis_env = self.get_genesis_environment(fork)
pre_alloc = Alloc.merge(
Alloc.model_validate(fork.pre_allocation_blockchain()),
self.pre,
)
group = PreAllocGroup(
test_ids=[str(test_id)],
fork=fork,
environment=genesis_env,
pre=pre_alloc,
)
pre_alloc_groups[pre_alloc_hash] = group
return pre_alloc_groups
def compute_pre_alloc_group_hash(self, fork: Fork) -> str:
"""Hash (fork, env) in order to group tests by genesis config."""
if not hasattr(self, "pre"):
raise AttributeError(
f"{self.__class__.__name__} does not have a 'pre' field. Pre-allocation group "
"usage is only supported for test types that define pre-allocs."
)
fork_digest = hashlib.sha256(fork.name().encode("utf-8")).digest()
fork_hash = int.from_bytes(fork_digest[:8], byteorder="big")
genesis_env = self.get_genesis_environment(fork)
combined_hash = fork_hash ^ hash(genesis_env)
# Check if test has pre_alloc_group marker
if self._request is not None and hasattr(self._request, "node"):
pre_alloc_group_marker = self._request.node.get_closest_marker("pre_alloc_group")
if pre_alloc_group_marker:
# Get the group name/salt from marker args
if pre_alloc_group_marker.args:
group_salt = str(pre_alloc_group_marker.args[0])
if group_salt == "separate":
# Use nodeid for unique group per test
group_salt = self._request.node.nodeid
# Add custom salt to hash
salt_hash = hashlib.sha256(group_salt.encode("utf-8")).digest()
salt_int = int.from_bytes(salt_hash[:8], byteorder="big")
combined_hash = combined_hash ^ salt_int
return f"0x{combined_hash:016x}"
TestSpec = Callable[[Fork], Generator[BaseTest, None, None]]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/eof.py | src/ethereum_test_specs/eof.py | """Ethereum EOF test spec definition and filler."""
import subprocess
import warnings
from pathlib import Path
from shutil import which
from subprocess import CompletedProcess
from typing import (
Annotated,
Any,
Callable,
ClassVar,
Dict,
Generator,
List,
Optional,
Sequence,
Type,
)
import pytest
from pydantic import Field, TypeAdapter
from ethereum_clis import EvmoneExceptionMapper, TransitionTool
from ethereum_test_base_types import Account, Bytes, HexNumber
from ethereum_test_exceptions import (
EOFException,
ExceptionMapperValidator,
ExceptionWithMessage,
UndefinedException,
)
from ethereum_test_exceptions.exceptions import EOFExceptionInstanceOrList, to_pipe_str
from ethereum_test_execution import (
BaseExecute,
ExecuteFormat,
LabeledExecuteFormat,
TransactionPost,
)
from ethereum_test_fixtures import (
BaseFixture,
EOFFixture,
FixtureFormat,
LabeledFixtureFormat,
)
from ethereum_test_fixtures.eof import Result, Vector
from ethereum_test_forks import Fork
from ethereum_test_types import EOA, Alloc, Environment, Transaction
from ethereum_test_types.eof.v1 import Container, ContainerKind, Section, SectionKind
from ethereum_test_types.helpers import compute_eofcreate_address
from ethereum_test_vm import Opcodes as Op
from .base import BaseTest
from .state import StateTest
existing_tests: Dict[Bytes, str] = {}
class EOFBaseExceptionError(Exception):
"""Base exception class for exceptions raised when verifying EOF code."""
def __init__(self, message: str) -> None:
"""Initialize the exception with the message."""
super().__init__(message)
@staticmethod
def format_code(code: Bytes, max_length: int = 60) -> str:
"""
Avoid printing long bytecode strings in the terminal upon test failure.
"""
if len(code) > max_length:
half_length = max_length // 2 - 5 # Floor; adjust for ellipsis
return f"{code[:half_length].hex()}...{code[-half_length:].hex()}"
return code.hex()
class UnexpectedEOFExceptionError(EOFBaseExceptionError):
"""
Exception used when valid EOF code unexpectedly raises an exception in
eofparse.
"""
def __init__(self, *, code: Bytes, got: str):
"""Initialize the exception with the code and the exception message."""
message = (
"Expected EOF code to be valid, but an exception occurred:\n"
f" Code: {self.format_code(code)}\n"
f"Expected: No Exception\n"
f" Got: {got}"
)
super().__init__(message)
class ExpectedEOFExceptionError(EOFBaseExceptionError):
"""
Exception used when EOF code is expected to raise an exception, but
eofparse did not raise an exception.
"""
def __init__(self, *, code: Bytes, expected: str):
"""
Initialize the exception with the code and the expected exception
message.
"""
message = (
"Expected EOF code to be invalid, but no exception was raised:\n"
f" Code: {self.format_code(code)}\n"
f"Expected: {expected}\n"
f" Got: No Exception"
)
super().__init__(message)
class EOFExceptionMismatchError(EOFBaseExceptionError):
"""
Exception used when the actual EOF exception differs from the expected one.
"""
def __init__(self, code: Bytes, expected: str, got: str):
"""
Initialize the exception with the code, the expected/actual exception
message.
"""
message = (
"EOF code raised a different exception than expected:\n"
f" Code: {self.format_code(code)}\n"
f"Expected: {expected}\n"
f" Got: {got}"
)
super().__init__(message)
class EOFExceptionWithMessage(ExceptionWithMessage[EOFException]):
"""Exception returned from the eof validator with a message."""
pass
eof_exception_type_adapter: TypeAdapter[EOFExceptionWithMessage | UndefinedException] = (
TypeAdapter(Annotated[EOFExceptionWithMessage | UndefinedException, ExceptionMapperValidator])
)
class EOFParse:
"""evmone-eofparse binary."""
binary: Path
def __new__(cls) -> "EOFParse":
"""Make EOF binary a singleton."""
if not hasattr(cls, "instance"):
cls.instance = super(EOFParse, cls).__new__(cls)
return cls.instance
def __init__(
self,
binary: Optional[Path | str] = None,
):
"""Initialize the EOF binary."""
if binary is None:
which_path = which("evmone-eofparse")
if which_path is not None:
binary = Path(which_path)
if binary is None or not Path(binary).exists():
raise FileNotFoundError(
"`evmone-eofparse` binary executable not found/not executable."
)
self.binary = Path(binary)
def run(self, *args: str, input_value: str | None = None) -> CompletedProcess:
"""Run evmone with the given arguments."""
result = subprocess.run(
[self.binary, *args],
capture_output=True,
text=True,
input=input_value,
)
if result.returncode not in [0, 1]:
raise Exception(
f"`{self.binary.name}` call failed with return code {result.returncode}."
)
return result
class EOFTest(BaseTest):
"""
Filler type that generates a test for EOF container validation.
A state test is also automatically generated where the container is wrapped
in a contract-creating transaction to test deployment/validation on the
instantiated blockchain.
"""
container: Container
"""
EOF container that will be tested for validity.
The only supported type at the moment is
`ethereum_test_types.eof.v1.Container`.
If an invalid container needs to be tested, and it cannot be generated
using the Container class features, the `raw_bytes` field can be used to
provide the raw container bytes.
"""
expect_exception: EOFExceptionInstanceOrList | None = None
"""
Expected exception that the container should raise when parsed by an EOF
parser.
Can be a single exception or a list of exceptions that the container is
expected to raise, in which case the test will pass if any of the
exceptions are raised.
The list of supported exceptions can be found in the
`ethereum_test_exceptions.EOFException` class.
"""
container_kind: ContainerKind = ContainerKind.RUNTIME
"""
Container kind type that the container should be treated as.
The container kind can be one of the following: - `ContainerKind.INITCODE`:
The container is an initcode container. - `ContainerKind.RUNTIME`: The
container is a runtime container.
The default value is `ContainerKind.RUNTIME`.
"""
deployed_container: Container | None = None
"""
To be used when the container is an initcode container and the expected
deployed container is known.
The value is only used when a State Test is generated from this EOF test to
set the expected deployed container that should be found in the post state.
If this field is not set, and the container is valid: - If the container
kind is `ContainerKind.RUNTIME`, the deployed container is assumed to be
the container itself, and an initcode container that wraps the container is
generated automatically. - If the container kind is
`ContainerKind.INITCODE`, `model_post_init` will attempt to infer the
deployed container from the sections of the init-container, and the first
container-type section will be used. An error will be raised if the
deployed container cannot be inferred.
If the value is set to `None`, it is assumed that the container is invalid
and the test will expect that no contract is created.
It is considered an error if: - The `deployed_container` field is set and
the `container_kind` field is not set to `ContainerKind.INITCODE`. - The
`deployed_container` field is set and the `expect_exception` is not `None`.
The deployed container is **not** executed at any point during the EOF
validation test nor the generated State Test. For container runtime testing
use the `EOFStateTest` class.
"""
pre: Alloc | None = None
"""
Pre alloc object that is used during State Test generation.
This field is automatically set by the test filler when generating a State
Test from this EOF test and should otherwise be left unset.
"""
post: Alloc | None = None
"""
Post alloc object that is used during State Test generation.
This field is automatically set by the test filler when generating a State
Test from this EOF test and is normally not set by the user.
"""
sender: EOA | None = None
"""
Sender EOA object that is used during State Test generation.
This field is automatically set by the `model_post_init` method and should
otherwise be left unset.
"""
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
EOFFixture
] + [
LabeledFixtureFormat(
fixture_format,
f"{fixture_format.format_name}_from_eof_test",
f"A {fixture_format.format_name} generated from an eof_test.",
)
for fixture_format in StateTest.supported_fixture_formats
]
supported_execute_formats: ClassVar[Sequence[LabeledExecuteFormat]] = [
LabeledExecuteFormat(
execute_format,
f"{execute_format.label}_from_eof_test",
f"A {execute_format.label} generated from an eof_test.",
)
for execute_format in StateTest.supported_execute_formats
]
supported_markers: ClassVar[Dict[str, str]] = {
"eof_test_only": "Only generate an EOF test fixture",
}
@classmethod
def discard_fixture_format_by_marks(
cls,
fixture_format: FixtureFormat,
fork: Fork,
markers: List[pytest.Mark],
) -> bool:
"""
Discard a fixture format from filling if the appropriate marker is
used.
"""
del fork
if "eof_test_only" in [m.name for m in markers]:
return fixture_format != EOFFixture
return False
@classmethod
def pytest_parameter_name(cls) -> str:
"""Workaround for pytest parameter name."""
return "eof_test"
def model_post_init(self, __context: Any) -> None:
"""Prepare the test exception based on the container."""
if self.container.validity_error is not None:
if self.expect_exception is not None:
assert self.expect_exception == self.container.validity_error, (
f"Container validity error {self.container.validity_error} "
f"does not match expected exception {self.expect_exception}."
)
self.expect_exception = self.container.validity_error # type: ignore[assignment]
assert self.deployed_container is None, (
"deployed_container must be None for invalid containers."
)
if "kind" in self.container.model_fields_set or "container_kind" in self.model_fields_set:
if (
"kind" in self.container.model_fields_set
and "container_kind" in self.model_fields_set
):
assert self.container.kind == self.container_kind, (
f"Container kind type {str(self.container.kind)} "
f"does not match test {self.container_kind}."
)
elif "kind" in self.container.model_fields_set:
self.container_kind = self.container.kind
elif "container_kind" in self.model_fields_set:
self.container.kind = self.container_kind
assert self.pre is not None, "pre must be set to generate a StateTest."
self.sender = self.pre.fund_eoa()
if self.post is None:
self.post = Alloc()
def make_eof_test_fixture(
self,
*,
fork: Fork,
) -> EOFFixture:
"""Generate the EOF test fixture."""
container_bytes = Bytes(self.container)
if container_bytes in existing_tests:
pytest.fail(
f"Duplicate EOF test: {container_bytes}, "
f"existing test: {existing_tests[container_bytes]}"
)
existing_tests[container_bytes] = self.node_id()
vectors = [
Vector(
code=container_bytes,
container_kind=self.container_kind,
results={
fork: Result(
exception=self.expect_exception,
valid=self.expect_exception is None,
),
},
)
]
fixture = EOFFixture(vectors=dict(enumerate(vectors)))
try:
eof_parse = EOFParse()
except FileNotFoundError as e:
warnings.warn(
f"{e} Skipping EOF fixture verification. Fixtures may be invalid!", stacklevel=2
)
return fixture
for _, vector in fixture.vectors.items():
expected_result = vector.results.get(fork)
if expected_result is None:
raise Exception(f"EOF Fixture missing vector result for fork: {fork}")
args = []
if vector.container_kind == ContainerKind.INITCODE:
args.append("--initcode")
result = eof_parse.run(*args, input_value=str(vector.code))
self.verify_result(result, expected_result, vector.code)
return fixture
def verify_result(
self, result: CompletedProcess, expected_result: Result, code: Bytes
) -> None:
"""
Check that the reported exception string matches the expected error.
"""
evmone_exception_mapper = EvmoneExceptionMapper()
actual_exception_str = result.stdout.strip()
actual_exception: EOFExceptionWithMessage | UndefinedException | None = None
if not actual_exception_str.startswith("OK"):
actual_exception = eof_exception_type_adapter.validate_python(
actual_exception_str, context={"exception_mapper": evmone_exception_mapper}
)
if expected_result.exception is None:
if actual_exception is not None:
raise UnexpectedEOFExceptionError(code=code, got=f"{actual_exception}")
else:
expected_string = to_pipe_str(expected_result.exception)
if actual_exception is None:
raise ExpectedEOFExceptionError(
code=code,
expected=f"{expected_string}",
)
if (
not isinstance(actual_exception, EOFExceptionWithMessage)
or expected_result.exception not in actual_exception
):
raise EOFExceptionMismatchError(
code=code,
expected=f"{expected_string}",
got=f"{actual_exception}",
)
def generate_eof_contract_create_transaction(self) -> Transaction:
"""Generate a transaction that creates a contract."""
assert self.sender is not None, "sender must be set to generate a StateTest."
assert self.post is not None, "post must be set to generate a StateTest."
assert self.pre is not None, "pre must be set to generate a StateTest."
initcode: Container
deployed_container: Container | Bytes | None = None
if self.container_kind == ContainerKind.INITCODE:
initcode = self.container
if "deployed_container" in self.model_fields_set:
# In the case of an initcontainer where we know the deployed
# container, we can use the initcontainer as-is.
deployed_container = self.deployed_container
elif self.expect_exception is None:
# We have a valid init-container, but we don't know the
# deployed container. Try to infer the deployed container from
# the sections of the init-container.
assert self.container.raw_bytes is None, (
"deployed_container must be set for initcode containers with raw_bytes."
)
for section in self.container.sections:
if section.kind == SectionKind.CONTAINER:
deployed_container = section.data
break
assert deployed_container is not None, (
"Unable to infer deployed container for init-container. "
"Use field `deployed_container` to set the expected deployed container."
)
else:
assert self.deployed_container is None, (
"deployed_container must be None for runtime containers."
)
initcode = Container(
sections=[
Section.Code(Op.RETURNCODE[0](0, 0)),
Section.Container(self.container),
]
)
deployed_container = self.container
factory_address = self.pre.deploy_contract(
Op.TXCREATE(tx_initcode_hash=initcode.hash) + Op.STOP
)
tx = Transaction(
sender=self.sender,
to=factory_address,
gas_limit=10_000_000,
max_priority_fee_per_gas=10,
max_fee_per_gas=10,
initcodes=[initcode],
)
if self.expect_exception is not None or deployed_container is None:
self.post[compute_eofcreate_address(factory_address, 0)] = None
else:
self.post[compute_eofcreate_address(factory_address, 0)] = Account(
code=deployed_container,
)
return tx
def generate_state_test(self, fork: Fork) -> StateTest:
"""Generate the StateTest filler."""
del fork
return StateTest.from_test(
base_test=self,
pre=self.pre,
tx=self.generate_eof_contract_create_transaction(),
env=Environment(),
post=self.post,
)
def generate(
self,
*,
t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat,
**_: Any,
) -> BaseFixture:
"""Generate the BlockchainTest fixture."""
if fixture_format == EOFFixture:
return self.make_eof_test_fixture(fork=fork)
elif fixture_format in StateTest.supported_fixture_formats:
return self.generate_state_test(fork).generate(
t8n=t8n, fork=fork, fixture_format=fixture_format
)
raise Exception(f"Unknown fixture format: {fixture_format}")
def execute(
self,
*,
fork: Fork,
execute_format: ExecuteFormat,
) -> BaseExecute:
"""Generate the list of test fixtures."""
if execute_format == TransactionPost:
return self.generate_state_test(fork).execute(fork=fork, execute_format=execute_format)
raise Exception(f"Unsupported execute format: {execute_format}")
EOFTestSpec = Callable[[str], Generator[EOFTest, None, None]]
EOFTestFiller = Type[EOFTest]
class EOFStateTest(EOFTest, Transaction):
"""
Filler type that generates an EOF test for container validation, and also
tests the container during runtime using a state test (and blockchain
test).
In the state or blockchain test, the container is first deployed to the
pre-allocation and then a transaction is sent to the deployed container.
Container deployment/validation is **not** tested like in the `EOFTest`
unless the container under test is an initcode container.
All fields from `ethereum_test_types.Transaction` are available for use in
the test.
"""
gas_limit: HexNumber = Field(HexNumber(10_000_000), serialization_alias="gas")
"""Gas limit for the transaction that deploys the container."""
tx_sender_funding_amount: int = 1_000_000_000_000_000_000_000
"""Amount of funds to send to the sender EOA before the transaction."""
env: Environment = Field(default_factory=Environment)
"""Environment object that is used during State Test generation."""
container_post: Account = Field(default_factory=Account)
"""Account object used to verify the container post state."""
supported_fixture_formats: ClassVar[Sequence[FixtureFormat | LabeledFixtureFormat]] = [
EOFFixture
] + [
LabeledFixtureFormat(
fixture_format,
f"eof_{fixture_format.format_name}",
f"Tests that generate an EOF {fixture_format.format_name}.",
)
for fixture_format in StateTest.supported_fixture_formats
]
supported_execute_formats: ClassVar[Sequence[LabeledExecuteFormat]] = [
LabeledExecuteFormat(
execute_format,
f"eof_{execute_format.label}",
f"Tests that generate an EOF {execute_format.label}.",
)
for execute_format in StateTest.supported_execute_formats
]
@classmethod
def pytest_parameter_name(cls) -> str:
"""Workaround for pytest parameter name."""
return "eof_state_test"
def model_post_init(self, __context: Any) -> None:
"""Prepare the transaction parameters required to fill the test."""
assert self.pre is not None, "pre must be set to generate a StateTest."
EOFTest.model_post_init(self, __context)
self.sender = self.pre.fund_eoa(amount=self.tx_sender_funding_amount)
if self.post is None:
self.post = Alloc()
if self.expect_exception is not None and self.container_kind == ContainerKind.RUNTIME:
# Invalid EOF runtime code
initcode = Container.Init(deploy_container=self.container)
self.to = self.pre.deploy_contract(
Op.TXCREATE(tx_initcode_hash=initcode.hash) + Op.STOP
)
self.initcodes = [initcode] # type: ignore[list-item]
# Run transaction model validation
Transaction.model_post_init(self, __context)
self.post[compute_eofcreate_address(self.to, 0)] = None # Expect
# failure.
elif self.expect_exception is not None and self.container_kind == ContainerKind.INITCODE:
# Invalid EOF initcode
self.to = self.pre.deploy_contract(
Op.TXCREATE(tx_initcode_hash=self.container.hash) + Op.STOP
)
self.initcodes = [self.container] # type: ignore[list-item]
# Run transaction model validation
Transaction.model_post_init(self, __context)
self.post[compute_eofcreate_address(self.to, 0)] = None # Expect
# failure.
elif self.container_kind == ContainerKind.INITCODE:
self.to = self.pre.deploy_contract(
Op.TXCREATE(tx_initcode_hash=self.container.hash) + Op.STOP
)
self.initcodes = [self.container] # type: ignore[list-item]
# Run transaction model validation
Transaction.model_post_init(self, __context)
self.post[compute_eofcreate_address(self.to, 0)] = self.container_post
else:
self.to = self.pre.deploy_contract(code=self.container)
# Run transaction model validation
Transaction.model_post_init(self, __context)
self.post[self.to] = self.container_post
def generate_state_test(self, fork: Fork) -> StateTest:
"""Generate the StateTest filler."""
del fork
assert self.pre is not None, "pre must be set to generate a StateTest."
assert self.post is not None, "post must be set to generate a StateTest."
return StateTest.from_test(
base_test=self,
pre=self.pre,
tx=self,
env=self.env,
post=self.post,
)
def generate(
self,
*,
t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat,
**_: Any,
) -> BaseFixture:
"""Generate the BlockchainTest fixture."""
if fixture_format == EOFFixture:
if Bytes(self.container) in existing_tests:
# Gracefully skip duplicate tests because one EOFStateTest can
# generate multiple state fixtures with the same data.
pytest.skip(f"Duplicate EOF container on EOFStateTest: {self.node_id()}")
return self.make_eof_test_fixture(fork=fork)
elif fixture_format in StateTest.supported_fixture_formats:
return self.generate_state_test(fork).generate(
t8n=t8n, fork=fork, fixture_format=fixture_format
)
raise Exception(f"Unknown fixture format: {fixture_format}")
EOFStateTestSpec = Callable[[str], Generator[EOFStateTest, None, None]]
EOFStateTestFiller = Type[EOFStateTest]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/tests/test_transaction.py | src/ethereum_test_specs/tests/test_transaction.py | """Test suite for the transaction spec test generation."""
import json
import os
import pytest
from ethereum_test_fixtures import TransactionFixture
from ethereum_test_forks import Fork, Shanghai
from ethereum_test_types import Transaction
from ..transaction import TransactionTest
from .helpers import remove_info_metadata
@pytest.mark.parametrize(
"name, tx, fork",
[
pytest.param("simple_type_0", Transaction(), Shanghai),
],
)
def test_transaction_test_filling(name: str, tx: Transaction, fork: Fork) -> None:
"""Test the transaction test filling."""
generated_fixture = TransactionTest(tx=tx.with_signature_and_sender()).generate(
t8n=None, # type: ignore
fork=fork,
fixture_format=TransactionFixture,
)
assert generated_fixture.__class__ == TransactionFixture
fixture_json_dict = generated_fixture.json_dict_with_info()
fixture = {
"fixture": fixture_json_dict,
}
expected_json_file = f"tx_{name}_{fork.name().lower()}.json"
with open(
os.path.join(
"src",
"ethereum_test_specs",
"tests",
"fixtures",
expected_json_file,
)
) as f:
expected = json.load(f)
remove_info_metadata(expected)
remove_info_metadata(fixture)
assert fixture == expected
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/tests/test_expect.py | src/ethereum_test_specs/tests/test_expect.py | """Test fixture post state (expect section) during state fixture generation."""
from typing import Any, Mapping, Type
import pytest
from ethereum_clis import TransitionTool
from ethereum_test_base_types import Account, Address, Storage, TestAddress, TestPrivateKey
from ethereum_test_exceptions import TransactionException
from ethereum_test_fixtures import BlockchainFixture, FixtureFormat, StateFixture
from ethereum_test_forks import Fork, get_deployed_forks
from ethereum_test_tools import Block
from ethereum_test_types import Alloc, Environment, Transaction, TransactionReceipt
from ..blockchain import BlockchainEngineFixture, BlockchainTest
from ..helpers import (
ExecutionExceptionMismatchError,
TransactionReceiptMismatchError,
UnexpectedExecutionFailError,
UnexpectedExecutionSuccessError,
)
from ..state import StateTest
ADDRESS_UNDER_TEST = Address(0x01)
@pytest.fixture
def tx() -> Transaction:
"""Fixture set from the test's indirectly parametrized `tx` parameter."""
return Transaction(secret_key=TestPrivateKey)
@pytest.fixture
def pre(request: Any) -> Alloc:
"""Fixture set from the test's indirectly parametrized `pre` parameter."""
extra_accounts = {}
if hasattr(request, "param"):
extra_accounts = request.param
return Alloc(extra_accounts | {TestAddress: Account(balance=(10**18))})
@pytest.fixture
def post(request: Any) -> Alloc: # noqa: D103
"""Fixture set from the test's indirectly parametrized `post` parameter."""
extra_accounts = {}
if hasattr(request, "param"):
extra_accounts = request.param
return Alloc(extra_accounts)
@pytest.fixture
def fork() -> Fork: # noqa: D103
return get_deployed_forks()[-1]
@pytest.fixture
def state_test( # noqa: D103
pre: Mapping[Any, Any], post: Mapping[Any, Any], tx: Transaction
) -> StateTest:
return StateTest(
env=Environment(),
pre=pre,
post=post,
tx=tx,
)
# Storage value mismatch tests
@pytest.mark.parametrize(
"pre,post,expected_exception",
[
( # mismatch_1: 1:1 vs 1:2
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x01"}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=1, want=2, got=1),
),
( # mismatch_2: 1:1 vs 2:1
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x01"}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={"0x02": "0x01"})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=1, want=0, got=1),
),
( # mismatch_2_a: 1:1 vs 0:0
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x01"}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={"0x00": "0x00"})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=1, want=0, got=1),
),
( # mismatch_2_b: 1:1 vs empty
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x01"}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=1, want=0, got=1),
),
( # mismatch_3: 0:0 vs 1:2
{ADDRESS_UNDER_TEST: Account(storage={"0x00": "0x00"}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=1, want=2, got=0),
),
( # mismatch_3_a: empty vs 1:2
{ADDRESS_UNDER_TEST: Account(storage={}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=1, want=2, got=0),
),
( # mismatch_4: 0:3, 1:2 vs 1:2
{ADDRESS_UNDER_TEST: Account(storage={"0x00": "0x03", "0x01": "0x02"}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=0, want=0, got=3),
),
( # mismatch_5: 1:2, 2:3 vs 1:2
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02", "0x02": "0x03"}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=2, want=0, got=3),
),
( # mismatch_6: 1:2 vs 1:2, 2:3
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02"}, nonce=1)},
{ADDRESS_UNDER_TEST: Account(storage={"0x01": "0x02", "0x02": "0x03"})},
Storage.KeyValueMismatchError(address=ADDRESS_UNDER_TEST, key=2, want=3, got=0),
),
],
indirect=["pre", "post"],
)
def test_post_storage_value_mismatch(
expected_exception: Storage.KeyValueMismatchError,
state_test: StateTest,
default_t8n: TransitionTool,
fork: Fork,
) -> None:
"""
Test post state `Account.storage` exceptions during state test fixture
generation.
"""
with pytest.raises(Storage.KeyValueMismatchError) as e_info:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
assert e_info.value == expected_exception
# Nonce value mismatch tests
@pytest.mark.parametrize(
"pre,post",
[
({ADDRESS_UNDER_TEST: Account(nonce=1)}, {ADDRESS_UNDER_TEST: Account(nonce=2)}),
({ADDRESS_UNDER_TEST: Account(nonce=1)}, {ADDRESS_UNDER_TEST: Account(nonce=0)}),
({ADDRESS_UNDER_TEST: Account(nonce=1)}, {ADDRESS_UNDER_TEST: Account()}),
],
indirect=["pre", "post"],
)
def test_post_nonce_value_mismatch(
pre: Alloc, post: Alloc, state_test: StateTest, default_t8n: TransitionTool, fork: Fork
) -> None:
"""
Test post state `Account.nonce` verification and exceptions during state
test fixture generation.
"""
pre_account = pre[ADDRESS_UNDER_TEST]
post_account = post[ADDRESS_UNDER_TEST]
assert pre_account is not None
assert post_account is not None
pre_nonce = pre_account.nonce
post_nonce = post_account.nonce
if "nonce" not in post_account.model_fields_set: # no exception
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
return
with pytest.raises(Account.NonceMismatchError) as e_info:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
assert e_info.value == Account.NonceMismatchError(
address=ADDRESS_UNDER_TEST, want=post_nonce, got=pre_nonce
)
# Code value mismatch tests
@pytest.mark.parametrize(
"pre,post",
[
({ADDRESS_UNDER_TEST: Account(code="0x02")}, {ADDRESS_UNDER_TEST: Account(code="0x01")}),
({ADDRESS_UNDER_TEST: Account(code="0x02")}, {ADDRESS_UNDER_TEST: Account(code="0x")}),
({ADDRESS_UNDER_TEST: Account(code="0x02")}, {ADDRESS_UNDER_TEST: Account()}),
],
indirect=["pre", "post"],
)
def test_post_code_value_mismatch(
pre: Alloc, post: Alloc, state_test: StateTest, default_t8n: TransitionTool, fork: Fork
) -> None:
"""
Test post state `Account.code` verification and exceptions during state
test fixture generation.
"""
pre_account = pre[ADDRESS_UNDER_TEST]
post_account = post[ADDRESS_UNDER_TEST]
assert pre_account is not None
assert post_account is not None
pre_code = pre_account.code
post_code = post_account.code
if "code" not in post_account.model_fields_set: # no exception
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
return
with pytest.raises(Account.CodeMismatchError) as e_info:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
assert e_info.value == Account.CodeMismatchError(
address=ADDRESS_UNDER_TEST, want=post_code, got=pre_code
)
# Balance value mismatch tests
@pytest.mark.parametrize(
"pre,post",
[
({ADDRESS_UNDER_TEST: Account(balance=1)}, {ADDRESS_UNDER_TEST: Account(balance=2)}),
({ADDRESS_UNDER_TEST: Account(balance=1)}, {ADDRESS_UNDER_TEST: Account(balance=0)}),
({ADDRESS_UNDER_TEST: Account(balance=1)}, {ADDRESS_UNDER_TEST: Account()}),
],
indirect=["pre", "post"],
)
def test_post_balance_value_mismatch(
pre: Alloc, post: Alloc, state_test: StateTest, default_t8n: TransitionTool, fork: Fork
) -> None:
"""
Test post state `Account.balance` verification and exceptions during state
test fixture generation.
"""
pre_account = pre[ADDRESS_UNDER_TEST]
post_account = post[ADDRESS_UNDER_TEST]
assert pre_account is not None
assert post_account is not None
pre_balance = pre_account.balance
post_balance = post_account.balance
if "balance" not in post_account.model_fields_set: # no exception
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
return
with pytest.raises(Account.BalanceMismatchError) as e_info:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
assert e_info.value == Account.BalanceMismatchError(
address=ADDRESS_UNDER_TEST, want=post_balance, got=pre_balance
)
# Account mismatch tests
@pytest.mark.parametrize(
"pre,post,exception_type",
[
(
{ADDRESS_UNDER_TEST: Account(balance=1)},
{ADDRESS_UNDER_TEST: Account()},
None,
),
(
{ADDRESS_UNDER_TEST: Account(balance=1)},
{ADDRESS_UNDER_TEST: Account(balance=1), Address(0x02): Account(balance=1)},
Alloc.MissingAccountError,
),
(
{ADDRESS_UNDER_TEST: Account(balance=1)},
{},
None,
),
(
{ADDRESS_UNDER_TEST: Account(balance=1)},
{ADDRESS_UNDER_TEST: Account.NONEXISTENT},
Alloc.UnexpectedAccountError,
),
],
indirect=["pre", "post"],
)
def test_post_account_mismatch(
state_test: StateTest,
default_t8n: TransitionTool,
fork: Fork,
exception_type: Type[Exception] | None,
) -> None:
"""
Test post state `Account` verification and exceptions during state test
fixture generation.
"""
if exception_type is None:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
return
with pytest.raises(exception_type) as _:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=StateFixture)
# Transaction result mismatch tests
@pytest.mark.parametrize(
"tx,exception_type",
[
pytest.param(
Transaction(
secret_key=TestPrivateKey,
gas_limit=20_999,
error=TransactionException.SENDER_NOT_EOA,
),
ExecutionExceptionMismatchError,
id="TransactionExecutionExceptionMismatchError",
),
pytest.param(
Transaction(
secret_key=TestPrivateKey,
error=TransactionException.INTRINSIC_GAS_TOO_LOW,
expected_receipt=TransactionReceipt(gas_used=21_000),
),
UnexpectedExecutionSuccessError,
id="TransactionUnexpectedExecutionSuccessError",
),
pytest.param(
Transaction(
secret_key=TestPrivateKey,
gas_limit=20_999,
expected_receipt=TransactionReceipt(gas_used=21_000),
),
UnexpectedExecutionFailError,
id="TransactionUnexpectedExecutionFailError",
),
pytest.param(
Transaction(
secret_key=TestPrivateKey,
expected_receipt=TransactionReceipt(gas_used=21_001),
),
TransactionReceiptMismatchError,
id="TransactionReceiptMismatchError",
),
pytest.param(
Transaction(
secret_key=TestPrivateKey,
gas_limit=20_999,
expected_receipt=TransactionReceipt(gas_used=21_001),
),
UnexpectedExecutionFailError,
id="TransactionUnexpectedExecutionFailError+TransactionReceiptMismatchError",
),
pytest.param(
Transaction(
secret_key=TestPrivateKey,
error=TransactionException.INTRINSIC_GAS_TOO_LOW,
expected_receipt=TransactionReceipt(gas_used=21_001),
),
UnexpectedExecutionSuccessError,
id="TransactionUnexpectedExecutionSuccessError+TransactionReceiptMismatchError",
),
],
)
@pytest.mark.parametrize(
"fixture_format",
[
StateFixture,
BlockchainFixture,
],
)
def test_transaction_expectation(
state_test: StateTest,
default_t8n: TransitionTool,
fork: Fork,
exception_type: Type[Exception] | None,
fixture_format: FixtureFormat,
) -> None:
"""
Test a transaction that has an unexpected error, expected error, or
expected a specific value in its receipt.
"""
if (
exception_type == ExecutionExceptionMismatchError
and not default_t8n.exception_mapper.reliable
):
pytest.xfail(
reason="Exceptions need to be better described in the t8n tool "
f"({default_t8n.__class__.__name__})."
)
if exception_type is None:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=fixture_format)
else:
with pytest.raises(exception_type) as _:
state_test.generate(t8n=default_t8n, fork=fork, fixture_format=fixture_format)
@pytest.mark.parametrize(
"intermediate_state,expected_exception",
[
pytest.param(
{
TestAddress: Account(nonce=1),
Address(0x01): Account(balance=1),
},
None,
id="NoException",
),
pytest.param(
{
TestAddress: Account(nonce=2),
Address(0x01): Account(balance=1),
},
Account.NonceMismatchError,
id="NonceMismatchError",
),
pytest.param(
{
TestAddress: Account(nonce=1),
Address(0x01): Account(balance=2),
},
Account.BalanceMismatchError,
id="BalanceMismatchError",
),
],
)
@pytest.mark.parametrize(
"fixture_format",
[
BlockchainFixture,
BlockchainEngineFixture,
],
)
def test_block_intermediate_state(
pre: Alloc,
default_t8n: TransitionTool,
fork: Fork,
fixture_format: FixtureFormat,
intermediate_state: Mapping[Any, Any],
expected_exception: Type[Exception] | None,
) -> None:
"""Validate the state when building blockchain."""
env = Environment()
to = Address(0x01)
tx = Transaction(gas_limit=100_000, to=to, value=1, nonce=0, secret_key=TestPrivateKey)
tx_2 = Transaction(gas_limit=100_000, to=to, value=1, nonce=1, secret_key=TestPrivateKey)
block_1 = Block(
txs=[tx],
expected_post_state={
TestAddress: Account(nonce=1),
to: Account(balance=1),
},
)
block_2 = Block(txs=[], expected_post_state=intermediate_state)
block_3 = Block(
txs=[tx_2],
expected_post_state={
TestAddress: Account(nonce=2),
to: Account(balance=2),
},
)
if expected_exception:
with pytest.raises(expected_exception) as _:
BlockchainTest(
genesis_environment=env,
pre=pre,
post=block_3.expected_post_state,
blocks=[block_1, block_2, block_3],
).generate(t8n=default_t8n, fork=fork, fixture_format=fixture_format)
return
else:
BlockchainTest(
genesis_environment=env,
pre=pre,
post=block_3.expected_post_state,
blocks=[block_1, block_2, block_3],
).generate(t8n=default_t8n, fork=fork, fixture_format=fixture_format)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/tests/test_types.py | src/ethereum_test_specs/tests/test_types.py | """Test types from ethereum_test_specs."""
import pytest
from ethereum_test_base_types import Address, Bloom, Bytes, Hash, HeaderNonce
from ethereum_test_fixtures.blockchain import FixtureHeader
from ..blockchain import Header
fixture_header_ones = FixtureHeader(
parent_hash=Hash(1),
ommers_hash=Hash(1),
fee_recipient=Address(1),
state_root=Hash(1),
transactions_trie=Hash(1),
receipts_root=Hash(1),
logs_bloom=Bloom(1),
difficulty=1,
number=1,
gas_limit=1,
gas_used=1,
timestamp=1,
extra_data=Bytes([1]),
prev_randao=Hash(1),
nonce=HeaderNonce(1),
base_fee_per_gas=1,
withdrawals_root=Hash(1),
blob_gas_used=1,
excess_blob_gas=1,
# hash=Hash(1),
)
@pytest.mark.parametrize(
"fixture_header,modifier,fixture_header_expected",
[
pytest.param(
fixture_header_ones,
Header(),
fixture_header_ones,
id="default_header",
),
pytest.param(
fixture_header_ones,
Header(
state_root="0x0000000000000000000000000000000000000000000000000000000000000100"
),
fixture_header_ones.copy(
state_root="0x0000000000000000000000000000000000000000000000000000000000000100"
),
id="state_root_as_str",
),
pytest.param(
fixture_header_ones,
Header(state_root=100),
fixture_header_ones.copy(state_root=100),
id="state_root_as_int",
),
pytest.param(
fixture_header_ones,
Header(state_root=Hash(100)),
fixture_header_ones.copy(state_root=100),
id="state_root_as_hash",
),
pytest.param(
fixture_header_ones,
Header(withdrawals_root=Header.REMOVE_FIELD), # state_root is not removable
fixture_header_ones.copy(withdrawals_root=None),
id="state_root_as_header_remove_field",
),
pytest.param(
fixture_header_ones,
Header(state_root=None),
fixture_header_ones,
id="state_root_as_none",
),
pytest.param(
fixture_header_ones,
Header(
logs_bloom="0x00000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000100"
),
fixture_header_ones.copy(
logs_bloom="0x00000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000000000000000000"
"000000000000000000000000000000000000100"
),
id="bloom_as_str",
),
pytest.param(
fixture_header_ones,
Header(logs_bloom=100),
fixture_header_ones.copy(logs_bloom=100),
id="bloom_as_int",
),
pytest.param(
fixture_header_ones,
Header(logs_bloom=Bloom(100)),
fixture_header_ones.copy(logs_bloom=100),
id="bloom_as_hash",
),
pytest.param(
fixture_header_ones,
Header(
state_root="0x0000000000000000000000000000000000000000000000000000000000000100",
logs_bloom=Bloom(200),
difficulty=300,
),
fixture_header_ones.copy(
state_root=0x100,
logs_bloom=200,
difficulty=300,
),
id="multiple_fields",
),
],
)
def test_fixture_header_join(
fixture_header: FixtureHeader, modifier: Header, fixture_header_expected: FixtureHeader
) -> None:
"""Test that the join method works as expected."""
assert modifier.apply(fixture_header) == fixture_header_expected
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/tests/helpers.py | src/ethereum_test_specs/tests/helpers.py | """Helper methods used in the spec tests."""
from typing import Any, Dict
def remove_info_metadata(fixture_json: Dict[str, Any]) -> None: # noqa: D103
for t in fixture_json:
if "_info" in fixture_json[t]:
info_keys = list(fixture_json[t]["_info"].keys())
for key in info_keys:
if key != "hash": # remove keys that are not 'hash'
del fixture_json[t]["_info"][key]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/tests/__init__.py | src/ethereum_test_specs/tests/__init__.py | """Tests for the ethereum_test_specs package."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/tests/test_fixtures.py | src/ethereum_test_specs/tests/test_fixtures.py | """Test suite for `ethereum_test_specs` fixture generation."""
import json
import os
from typing import Any, List, Mapping
import pytest
from click.testing import CliRunner
import cli.check_fixtures
from ethereum_clis import TransitionTool
from ethereum_test_base_types import AccessList, Account, Address, Hash
from ethereum_test_exceptions import TransactionException
from ethereum_test_fixtures import (
BaseFixture,
BlockchainEngineFixture,
BlockchainFixture,
BlockchainFixtureCommon,
FixtureFormat,
StateFixture,
)
from ethereum_test_forks import Berlin, Cancun, Fork, Istanbul, London, Paris, Shanghai
from ethereum_test_types import Alloc, Environment, Transaction, TransactionType
from ethereum_test_vm import Opcodes as Op
from ..blockchain import Block, BlockchainTest, Header
from ..state import StateTest
from .helpers import remove_info_metadata
@pytest.fixture()
def fixture_hash(fork: Fork) -> bytes:
"""Set the fixture hash based on the fork."""
if fork == Berlin:
return bytes.fromhex("e57ad774ca")
elif fork == London:
return bytes.fromhex("3714102a4c")
elif fork == Cancun:
return bytes.fromhex("2885c707e3")
raise ValueError(f"Unexpected fork: {fork}")
def test_check_helper_fixtures() -> None:
"""
Test that the framework's pydantic models serialization and deserialization
work correctly and that they are compatible with the helper fixtures
defined in ./fixtures/ by using the check_fixtures.py script.
"""
runner = CliRunner()
args = [
"--input",
"src/ethereum_test_specs/tests/fixtures",
"--quiet",
"--stop-on-error",
]
result = runner.invoke(cli.check_fixtures.check_fixtures, args)
assert result.exit_code == 0, (
"check_fixtures detected errors in the json fixtures:" + f"\n{result}"
)
@pytest.mark.parametrize(
"fork",
[
Berlin,
London,
Cancun,
],
)
def test_make_genesis(fork: Fork, fixture_hash: bytes, default_t8n: TransitionTool) -> None: # noqa: D103
env = Environment(gas_limit=100_000_000_000_000_000)
pre = Alloc(
{
Address(0x0BA1A9CE0BA1A9CE): Account(balance=0x0BA1A9CE0BA1A9CE),
Address(0xC0DE): Account(
code=Op.SSTORE(0, Op.ADD(1, 2)) + Op.RETURN(0, 32),
balance=0x0BA1A9CE0BA1A9CE,
nonce=1,
),
}
)
fixture = BlockchainTest(
genesis_environment=env,
pre=pre,
post={},
blocks=[],
tag="some_state_test",
).generate(t8n=default_t8n, fork=fork, fixture_format=BlockchainFixture)
assert isinstance(fixture, BlockchainFixture)
assert fixture.genesis is not None
assert fixture.genesis.block_hash is not None
assert fixture.genesis.block_hash.startswith(fixture_hash)
@pytest.mark.parametrize(
"fork,fixture_format,tx_type",
[
(Istanbul, BlockchainFixture, TransactionType.LEGACY),
(London, BlockchainFixture, TransactionType.LEGACY),
(Cancun, BlockchainFixture, TransactionType.LEGACY),
(Paris, BlockchainEngineFixture, TransactionType.LEGACY),
(Shanghai, BlockchainEngineFixture, TransactionType.LEGACY),
(Cancun, BlockchainEngineFixture, TransactionType.LEGACY),
(Paris, StateFixture, TransactionType.LEGACY),
(Shanghai, StateFixture, TransactionType.LEGACY),
(Cancun, StateFixture, TransactionType.LEGACY),
(Cancun, StateFixture, TransactionType.ACCESS_LIST),
],
)
def test_fill_state_test(
fork: Fork,
fixture_format: FixtureFormat,
tx_type: TransactionType,
default_t8n: TransitionTool,
) -> None:
"""Test `ethereum_test.filler.fill_fixtures` with `StateTest`."""
env = Environment(
fee_recipient="0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
difficulty=0x20000,
gas_limit=10000000000,
number=1,
timestamp=1000,
)
pre = {
0x1000000000000000000000000000000000000000: Account(code="0x4660015500"),
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=1000000000000000000000),
}
if tx_type == TransactionType.LEGACY:
tx = Transaction(
chain_id=0x0,
nonce=0,
to="0x1000000000000000000000000000000000000000",
gas_limit=100000000,
gas_price=10,
protected=False,
)
elif tx_type == TransactionType.ACCESS_LIST:
tx = Transaction(
ty=0x1,
chain_id=0x1,
nonce=0,
to="0x1000000000000000000000000000000000000000",
gas_limit=100000000,
gas_price=10,
access_list=[
AccessList(
address=0x1234,
storage_keys=[0, 1],
)
],
)
post = {
"0x1000000000000000000000000000000000000000": Account(
code="0x4660015500", storage={"0x01": "0x01"}
),
}
generated_fixture = StateTest(
env=env,
pre=pre,
post=post,
tx=tx,
tag="my_chain_id_test",
).generate(t8n=default_t8n, fork=fork, fixture_format=fixture_format)
assert generated_fixture.__class__ == fixture_format
fixture = {
f"000/my_chain_id_test/{fork}/tx_type_{tx_type}": generated_fixture.json_dict_with_info(
hash_only=True
),
}
format_name = fixture_format.format_name
expected_json_file = f"chainid_{fork.name().lower()}_{format_name}_tx_type_{tx_type}.json"
with open(
os.path.join(
"src",
"ethereum_test_specs",
"tests",
"fixtures",
expected_json_file,
)
) as f:
expected = json.load(f)
remove_info_metadata(expected)
remove_info_metadata(fixture)
assert fixture == expected
class TestFillBlockchainValidTxs:
"""Test `BlockchainTest.generate()` and blockchain fixtures."""
@pytest.fixture
def fork(self, request: Any) -> Fork: # noqa: D102
return request.param
@pytest.fixture
def check_hive(self, fork: Fork) -> bool: # noqa: D102
return fork == Shanghai
@pytest.fixture
def expected_json_file(self, fork: Fork, check_hive: bool) -> str: # noqa: D102
if fork == London and not check_hive:
return "blockchain_london_valid_filled.json"
elif fork == Shanghai and check_hive:
return "blockchain_shanghai_valid_filled_engine.json"
raise ValueError(f"Unexpected fork/check_hive combination: {fork}/{check_hive}")
@pytest.fixture
def pre(self, fork: Fork) -> Mapping[Any, Any]: # noqa: D102
pre = {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=0x1000000000000000000),
"0xd02d72E067e77158444ef2020Ff2d325f929B363": Account(
balance=0x1000000000000000000, nonce=1
),
"0xcccccccccccccccccccccccccccccccccccccccc": Account(
balance=0x10000000000,
nonce=1,
code=(
Op.SSTORE(Op.NUMBER(), Op.BASEFEE())
+ Op.SSTORE(Op.ADD(Op.NUMBER(), 0x1000), Op.SUB(Op.GASPRICE(), Op.BASEFEE()))
+ Op.SSTORE(Op.ADD(Op.NUMBER(), 0x2000), Op.SELFBALANCE())
+ Op.STOP()
),
),
"0xcccccccccccccccccccccccccccccccccccccccd": Account(
balance=0x20000000000,
nonce=1,
code=(
(Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ Op.PUSH20("0xcccccccccccccccccccccccccccccccccccccccc")
+ Op.GAS
+ Op.DELEGATECALL
+ Op.POP
),
),
0xC0DE: Account(
balance=0,
nonce=1,
code=(
(Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ Op.PUSH20("0xcccccccccccccccccccccccccccccccccccccccc")
+ Op.GAS
+ Op.DELEGATECALL
+ Op.POP
),
),
"0xccccccccccccccccccccccccccccccccccccccce": Account(
balance=0x20000000000,
nonce=1,
code=(
(Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ Op.PUSH2("0x1000")
+ Op.PUSH2("0xc0de")
+ Op.GAS
+ Op.CALL
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ Op.PUSH20("0xcccccccccccccccccccccccccccccccccccccccc")
+ Op.GAS
+ Op.DELEGATECALL
+ Op.SWAP1
+ Op.POP
+ Op.POP
),
),
}
return pre
@pytest.fixture
def blocks(self) -> List[Block]: # noqa: D102
blocks: List[Block] = [
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x01",
nonce=0,
gas_limit=1000000,
max_priority_fee_per_gas=1,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
],
),
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x0201",
nonce=1,
gas_limit=1000000,
max_priority_fee_per_gas=10,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
Transaction(
data="0x0202",
nonce=2,
gas_limit=1000000,
max_priority_fee_per_gas=100,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD",
),
Transaction(
data="0x0203",
nonce=3,
gas_limit=1000000,
max_priority_fee_per_gas=100,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE",
),
],
),
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x0301",
nonce=4,
gas_limit=1000000,
max_priority_fee_per_gas=1000,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
Transaction(
data="0x0303",
nonce=5,
gas_limit=1000000,
max_priority_fee_per_gas=100,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE",
),
Transaction(
data="0x0304",
nonce=6,
gas_limit=1000000,
max_priority_fee_per_gas=100000,
max_fee_per_gas=100000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD",
),
],
),
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x0401",
nonce=7,
gas_limit=1000000,
max_priority_fee_per_gas=1000,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
Transaction(
data="0x0403",
nonce=8,
gas_limit=1000000,
max_priority_fee_per_gas=100,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE",
),
Transaction(
data="0x0404",
nonce=9,
gas_limit=1000000,
max_priority_fee_per_gas=100000,
max_fee_per_gas=100000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD",
),
],
),
]
return blocks
@pytest.fixture
def post(self) -> Mapping[Any, Any]: # noqa: D102
post = {
"0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC": Account(
storage={
# BASEFEE and the tip in block 1
0x0001: 875, # BASEFEE
0x1001: 1, # tip
# Block 2
0x0002: 766, # BASEFEE
0x1002: 10, # tip
# Block 3
0x0003: 671,
0x1003: 329,
# Block 4
0x0004: 588,
0x1004: 412,
# SELFBALANCE, always the same
0x2001: 0x010000000000,
0x2002: 0x010000000000,
0x2003: 0x010000000000,
0x2004: 0x010000000000,
}
),
"0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD": Account(
storage={
# Block 2
0x0002: 766, # BASEFEE
0x1002: 100, # tip
# Block 3
0x0003: 671,
0x1003: 99329,
# Block 4
0x0004: 588,
0x1004: 99412,
# SELFBALANCE, always the same
0x2002: 0x020000000000,
0x2003: 0x020000000000,
0x2004: 0x020000000000,
}
),
"0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE": Account(
storage={
# Block 2
0x0002: 766, # BASEFEE
0x1002: 100, # tip
0x0003: 671,
0x1003: 100,
0x0004: 588,
0x1004: 100,
# SELFBALANCE
0x2002: 0x01FFFFFFF000,
0x2003: 0x01FFFFFFE000,
0x2004: 0x01FFFFFFD000,
}
),
0xC0DE: Account(
storage={
# Block 2
0x0002: 766,
0x1002: 100,
# Block 3
0x0003: 671,
0x1003: 100,
# Block 4
0x0004: 588,
0x1004: 100,
# SELFBALANCE
0x2002: 0x1000,
0x2003: 0x2000,
0x2004: 0x3000,
}
),
}
return post
@pytest.fixture
def genesis_environment(self) -> Environment: # noqa: D102
return Environment(
gas_limit=100_000_000_000_000_000,
base_fee_per_gas=1000,
fee_recipient="0x0000000000000000000000000000000000000000",
)
@pytest.fixture
def fixture_format(self, check_hive: bool) -> FixtureFormat: # noqa: D102
return BlockchainEngineFixture if check_hive else BlockchainFixture
@pytest.fixture
def blockchain_test_fixture( # noqa: D102
self,
fork: Fork,
pre: Mapping[Any, Any],
post: Mapping[Any, Any],
blocks: List[Block],
genesis_environment: Environment,
fixture_format: FixtureFormat,
default_t8n: TransitionTool,
) -> BaseFixture:
return BlockchainTest(
pre=pre,
post=post,
blocks=blocks,
genesis_environment=genesis_environment,
tag="my_blockchain_test_valid_txs",
).generate(t8n=default_t8n, fork=fork, fixture_format=fixture_format)
@pytest.mark.parametrize("fork", [London, Shanghai], indirect=True)
def test_fill_blockchain_valid_txs( # noqa: D102
self,
fork: Fork,
fixture_format: FixtureFormat,
expected_json_file: str,
blockchain_test_fixture: BlockchainFixture,
) -> None:
assert blockchain_test_fixture.__class__ == fixture_format
# BlockchainEngineFixture inherits from BlockchainEngineFixtureCommon
# (not BlockchainFixtureCommon)
from ethereum_test_fixtures.blockchain import BlockchainEngineFixtureCommon
assert isinstance(
blockchain_test_fixture, (BlockchainFixtureCommon, BlockchainEngineFixtureCommon)
)
fixture_name = f"000/my_blockchain_test/{fork.name()}"
fixture = {
fixture_name: blockchain_test_fixture.json_dict_with_info(hash_only=True),
}
with open(
os.path.join(
"src",
"ethereum_test_specs",
"tests",
"fixtures",
expected_json_file,
)
) as f:
expected = json.load(f)
remove_info_metadata(expected)
remove_info_metadata(fixture)
assert fixture_name in fixture
assert fixture_name in expected
assert fixture[fixture_name] == expected[fixture_name]
@pytest.mark.parametrize("fork", [London], indirect=True)
def test_fixture_header_join(self, blockchain_test_fixture: BlockchainFixture) -> None:
"""Test `FixtureHeader.join()`."""
block = blockchain_test_fixture.blocks[0]
new_difficulty = block.header.difficulty - 1 # type: ignore
new_state_root = Hash(12345)
# See description of https://github.com/ethereum/execution-spec-tests/pull/398
new_transactions_root = 0x100
header_new_fields = Header(
difficulty=new_difficulty,
state_root=new_state_root,
transactions_trie=new_transactions_root,
)
updated_block_header = header_new_fields.apply(block.header) # type: ignore
assert updated_block_header.difficulty == new_difficulty
assert updated_block_header.state_root == new_state_root
assert updated_block_header.transactions_trie == Hash(new_transactions_root)
assert updated_block_header.block_hash != block.header.block_hash # type: ignore
assert isinstance(updated_block_header.transactions_trie, Hash)
@pytest.mark.parametrize(
"fork,check_hive,expected_json_file",
[
(London, False, "blockchain_london_invalid_filled.json"),
(Shanghai, True, "blockchain_shanghai_invalid_filled_engine.json"),
],
)
def test_fill_blockchain_invalid_txs(
fork: Fork, check_hive: bool, expected_json_file: str, default_t8n: TransitionTool
) -> None:
"""Test `ethereum_test.filler.fill_fixtures` with `BlockchainTest`."""
pre = {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(balance=0x1000000000000000000),
"0xd02d72E067e77158444ef2020Ff2d325f929B363": Account(
balance=0x1000000000000000000, nonce=1
),
"0xcccccccccccccccccccccccccccccccccccccccc": Account(
balance=0x10000000000,
nonce=1,
code=(
Op.SSTORE(Op.NUMBER(), Op.BASEFEE())
+ Op.SSTORE(Op.ADD(Op.NUMBER(), 0x1000), Op.SUB(Op.GASPRICE(), Op.BASEFEE()))
+ Op.SSTORE(Op.ADD(Op.NUMBER(), 0x2000), Op.SELFBALANCE())
+ Op.STOP()
),
),
"0xcccccccccccccccccccccccccccccccccccccccd": Account(
balance=0x20000000000,
nonce=1,
code=(
(Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ Op.PUSH20("0xcccccccccccccccccccccccccccccccccccccccc")
+ Op.GAS
+ Op.DELEGATECALL
+ Op.POP
),
),
0xC0DE: Account(
balance=0,
nonce=1,
code=(
(Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ Op.PUSH20("0xcccccccccccccccccccccccccccccccccccccccc")
+ Op.GAS
+ Op.DELEGATECALL
+ Op.POP
),
),
"0xccccccccccccccccccccccccccccccccccccccce": Account(
balance=0x20000000000,
nonce=1,
code=(
(Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ Op.PUSH2("0x1000")
+ Op.PUSH2("0xc0de")
+ Op.GAS
+ Op.CALL
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ (Op.PUSH1(0) if fork < Shanghai else Op.PUSH0)
+ Op.DUP1
+ Op.PUSH20("0xcccccccccccccccccccccccccccccccccccccccc")
+ Op.GAS
+ Op.DELEGATECALL
+ Op.SWAP1
+ Op.POP
+ Op.POP
),
),
}
blocks: List[Block] = [
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x01",
nonce=0,
gas_limit=1000000,
max_priority_fee_per_gas=1,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
],
),
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x0201",
nonce=1,
gas_limit=1000000,
max_priority_fee_per_gas=10,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
Transaction(
data="0x0202",
nonce=2,
gas_limit=1000000,
max_priority_fee_per_gas=100,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD",
),
Transaction(
data="0x0203",
nonce=3,
gas_limit=1000000,
max_priority_fee_per_gas=100,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE",
),
],
),
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x0301",
nonce=4,
gas_limit=1000000,
max_priority_fee_per_gas=1000,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
Transaction(
data="0x0302",
nonce=5,
gas_limit=1000000,
max_priority_fee_per_gas=100000,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD",
error=TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS,
),
],
exception=TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS,
),
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x0301",
nonce=4,
gas_limit=1000000,
max_priority_fee_per_gas=1000,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
Transaction(
data="0x0303",
nonce=5,
gas_limit=1000000,
max_priority_fee_per_gas=100,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE",
),
Transaction(
data="0x0304",
nonce=6,
gas_limit=1000000,
max_priority_fee_per_gas=100000,
max_fee_per_gas=100000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD",
),
],
),
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x0401",
nonce=7,
gas_limit=1000000,
max_priority_fee_per_gas=1000,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
Transaction(
data="0x0402",
nonce=8,
gas_limit=1000000,
max_priority_fee_per_gas=100000,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD",
error=TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS,
),
],
exception=TransactionException.PRIORITY_GREATER_THAN_MAX_FEE_PER_GAS,
),
Block(
fee_recipient="0xba5e000000000000000000000000000000000000",
txs=[
Transaction(
data="0x0401",
nonce=7,
gas_limit=1000000,
max_priority_fee_per_gas=1000,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
),
Transaction(
data="0x0403",
nonce=8,
gas_limit=1000000,
max_priority_fee_per_gas=100,
max_fee_per_gas=1000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE",
),
Transaction(
data="0x0404",
nonce=9,
gas_limit=1000000,
max_priority_fee_per_gas=100000,
max_fee_per_gas=100000,
to="0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD",
),
],
),
]
post = {
"0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC": Account(
storage={
# BASEFEE and the tip in block 1
0x0001: 875, # BASEFEE
0x1001: 1, # tip
# Block 2
0x0002: 766, # BASEFEE
0x1002: 10, # tip
# Block 3
0x0003: 671,
0x1003: 329,
# Block 4
0x0004: 588,
0x1004: 412,
# SELFBALANCE, always the same
0x2001: 0x010000000000,
0x2002: 0x010000000000,
0x2003: 0x010000000000,
0x2004: 0x010000000000,
}
),
"0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCD": Account(
storage={
# Block 2
0x0002: 766, # BASEFEE
0x1002: 100, # tip
# Block 3
0x0003: 671,
0x1003: 99329,
# Block 4
0x0004: 588,
0x1004: 99412,
# SELFBALANCE, always the same
0x2002: 0x020000000000,
0x2003: 0x020000000000,
0x2004: 0x020000000000,
}
),
"0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCE": Account(
storage={
# Block 2
0x0002: 766, # BASEFEE
0x1002: 100, # tip
0x0003: 671,
0x1003: 100,
0x0004: 588,
0x1004: 100,
# SELFBALANCE
0x2002: 0x01FFFFFFF000,
0x2003: 0x01FFFFFFE000,
0x2004: 0x01FFFFFFD000,
}
),
0xC0DE: Account(
storage={
# Block 2
0x0002: 766,
0x1002: 100,
# Block 3
0x0003: 671,
0x1003: 100,
# Block 4
0x0004: 588,
0x1004: 100,
# SELFBALANCE
0x2002: 0x1000,
0x2003: 0x2000,
0x2004: 0x3000,
}
),
}
# We start genesis with a baseFee of 1000
genesis_environment = Environment(
gas_limit=100_000_000_000_000_000,
base_fee_per_gas=1000,
fee_recipient="0x0000000000000000000000000000000000000000",
)
fixture_format: FixtureFormat = BlockchainEngineFixture if check_hive else BlockchainFixture
generated_fixture = BlockchainTest(
pre=pre,
post=post,
blocks=blocks,
genesis_environment=genesis_environment,
).generate(t8n=default_t8n, fork=fork, fixture_format=fixture_format)
assert generated_fixture.__class__ == fixture_format
# BlockchainEngineFixture inherits from BlockchainEngineFixtureCommon
# (not BlockchainFixtureCommon)
from ethereum_test_fixtures.blockchain import BlockchainEngineFixtureCommon
assert isinstance(generated_fixture, (BlockchainFixtureCommon, BlockchainEngineFixtureCommon))
fixture_name = f"000/my_blockchain_test/{fork.name()}"
fixture = {
fixture_name: generated_fixture.json_dict_with_info(hash_only=True),
}
with open(
os.path.join(
"src",
"ethereum_test_specs",
"tests",
"fixtures",
expected_json_file,
)
) as f:
expected = json.load(f)
remove_info_metadata(expected)
remove_info_metadata(fixture)
assert fixture_name in fixture
assert fixture_name in expected
assert fixture[fixture_name] == expected[fixture_name]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/tests/test_benchmark.py | src/ethereum_test_specs/tests/test_benchmark.py | """
Tests for the BenchmarkTest class and its
transaction splitting functionality.
"""
import pytest
from ethereum_test_base_types import HexNumber
from ethereum_test_specs.benchmark import BenchmarkTest
from ethereum_test_types import Alloc, Environment, Transaction
@pytest.mark.parametrize(
"gas_benchmark_value_millions,expected_splits",
[
(1, 1), # 1M / 16M = 1 transaction
(10, 1), # 10M / 16M = 1 transaction
(30, 2), # 30M / 16M = 2 transactions (16M + 14M)
(45, 3), # 45M / 16M = 3 transactions (16M + 16M + 13M)
(60, 4), # 60M / 16M = 4 transactions (16M + 16M + 16M + 12M)
(100, 7), # 100M / 16M = 7 transactions (6x16M + 4M)
(150, 10), # 150M / 16M = 10 transactions (9x16M + 6M)
],
)
def test_split_transaction(gas_benchmark_value_millions: int, expected_splits: int) -> None:
"""
Test that transaction splitting works
correctly for Osaka fork gas cap.
"""
gas_benchmark_value = gas_benchmark_value_millions * 1_000_000
gas_limit_cap = 16_000_000 # Osaka's transaction gas limit cap
# Create a minimal BenchmarkTest instance
benchmark_test = BenchmarkTest(
pre=Alloc(),
post=Alloc(),
tx=Transaction(sender=HexNumber(0), to=HexNumber(0), nonce=0),
env=Environment(),
gas_benchmark_value=gas_benchmark_value,
)
# Test the split_transaction method
assert benchmark_test.tx is not None, "Transaction should not be None"
split_txs = benchmark_test.split_transaction(benchmark_test.tx, gas_limit_cap)
# Verify the number of transactions
assert len(split_txs) == expected_splits, (
f"Expected {expected_splits} transactions for {gas_benchmark_value_millions}M gas, "
f"got {len(split_txs)}"
)
# Verify total gas equals the benchmark value
total_gas = sum(tx.gas_limit for tx in split_txs)
assert total_gas == gas_benchmark_value, (
f"Total gas {total_gas} doesn't match benchmark value {gas_benchmark_value}"
)
# Verify no transaction exceeds the cap
for i, tx in enumerate(split_txs):
assert tx.gas_limit <= gas_limit_cap, (
f"Transaction {i} gas limit {tx.gas_limit} exceeds cap {gas_limit_cap}"
)
# Verify nonces increment correctly
for i, tx in enumerate(split_txs):
assert tx.nonce == i, f"Transaction {i} has incorrect nonce {tx.nonce}"
# Verify gas distribution
for i, tx in enumerate(split_txs[:-1]): # All but last should be at cap
assert tx.gas_limit == gas_limit_cap, (
f"Transaction {i} should have gas limit {gas_limit_cap}, got {tx.gas_limit}"
)
# Last transaction should have the remainder
if expected_splits > 1:
expected_last_gas = gas_benchmark_value - (gas_limit_cap * (expected_splits - 1))
assert split_txs[-1].gas_limit == expected_last_gas, (
f"Last transaction should have {expected_last_gas} gas, got {split_txs[-1].gas_limit}"
)
@pytest.mark.parametrize(
"gas_benchmark_value,gas_limit_cap",
[
(50_000_000, None), # No cap - should return single transaction
(50_000_000, 100_000_000), # Cap higher than benchmark value
],
)
def test_split_transaction_edge_cases(gas_benchmark_value: int, gas_limit_cap: int | None) -> None:
"""Test edge cases for transaction splitting."""
benchmark_test = BenchmarkTest(
pre=Alloc(),
post=Alloc(),
tx=Transaction(sender=HexNumber(0), to=HexNumber(0), nonce=0, gas_limit=1_000_000_000),
env=Environment(),
gas_benchmark_value=gas_benchmark_value,
)
assert benchmark_test.tx is not None, "Transaction should not be None"
split_txs = benchmark_test.split_transaction(benchmark_test.tx, gas_limit_cap)
# Should return single transaction in both cases
assert len(split_txs) == 1, f"Expected 1 transaction, got {len(split_txs)}"
if gas_limit_cap is None:
# When no cap, gas_limit should be benchmark value
assert split_txs[0].gas_limit == gas_benchmark_value
else:
# When cap > benchmark, gas_limit should be
# min of tx.gas_limit and benchmark
assert benchmark_test.tx is not None, "Transaction should not be None"
assert split_txs[0].gas_limit == min(benchmark_test.tx.gas_limit, gas_benchmark_value)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/state_static.py | src/ethereum_test_specs/static_state/state_static.py | """Ethereum General State Test filler static test spec parser."""
from typing import Any, Callable, ClassVar, List, Self, Set, Union
import pytest
from _pytest.mark.structures import ParameterSet
from pydantic import BaseModel, ConfigDict, Field, model_validator
from ethereum_test_forks import Fork
from ethereum_test_types import Alloc
from ..base_static import BaseStaticTest
from ..state import StateTestFiller
from .account import PreInFiller
from .common import Tag
from .environment import EnvironmentInStateTestFiller
from .expect_section import ExpectSectionInStateTestFiller
from .general_transaction import GeneralTransactionInFiller
class Info(BaseModel):
"""Class that represents an info filler."""
comment: str | None = Field(None)
pytest_marks: List[str] = Field(default_factory=list)
class StateStaticTest(BaseStaticTest):
"""General State Test static filler from ethereum/tests."""
test_name: str = ""
format_name: ClassVar[str] = "state_test"
info: Info | None = Field(None, alias="_info")
env: EnvironmentInStateTestFiller
pre: PreInFiller
transaction: GeneralTransactionInFiller
expect: List[ExpectSectionInStateTestFiller]
model_config = ConfigDict(extra="forbid")
def model_post_init(self, context: Any) -> None:
"""Initialize StateStaticTest."""
super().model_post_init(context)
@model_validator(mode="after")
def match_labels(self) -> Self:
"""Replace labels in expect section with corresponding tx.d indexes."""
def parse_string_indexes(indexes: str) -> List[int]:
"""Parse index that are string in to list of int."""
if ":label" in indexes:
# Parse labels in data
indexes = indexes.replace(":label ", "")
tx_matches: List[int] = []
for idx in self.transaction.data:
if indexes == idx.label:
tx_matches.append(idx.index)
return tx_matches
else:
# Parse ranges in data
start, end = map(int, indexes.lstrip().split("-"))
return list(range(start, end + 1))
def parse_indexes(
indexes: Union[int, str, list[Union[int, str]], list[str], list[int]],
do_hint: bool = False,
) -> List[int] | int:
"""
Parse indexes and replace all ranges and labels into tx indexes.
"""
result: List[int] | int = []
if do_hint:
print("Before: " + str(indexes))
if isinstance(indexes, int):
result = indexes
if isinstance(indexes, str):
result = parse_string_indexes(indexes)
if isinstance(indexes, list):
result = []
for element in indexes:
parsed = parse_indexes(element)
if isinstance(parsed, int):
result.append(parsed)
else:
result.extend(parsed)
result = list(set(result))
if do_hint:
print("After: " + str(result))
return result
for expect_section in self.expect:
expect_section.indexes.data = parse_indexes(expect_section.indexes.data)
expect_section.indexes.gas = parse_indexes(expect_section.indexes.gas)
expect_section.indexes.value = parse_indexes(expect_section.indexes.value)
return self
def fill_function(self) -> Callable:
"""Return a StateTest spec from a static file."""
# Check if this test uses tags
has_tags = False
tx_tag_dependencies = self.transaction.tag_dependencies()
if tx_tag_dependencies:
has_tags = True
else:
# Check expect sections for tags
for expect in self.expect:
result_tag_dependencies = expect.result.tag_dependencies()
if result_tag_dependencies:
has_tags = True
break
fully_tagged = True
for address in self.pre.root:
if not isinstance(address, Tag):
fully_tagged = False
break
d_g_v_parameters: List[ParameterSet] = []
for d in self.transaction.data:
for g in range(len(self.transaction.gas_limit)):
for v in range(len(self.transaction.value)):
exception_test = False
for expect in self.expect:
if expect.has_index(d.index, g, v) and expect.expect_exception is not None:
exception_test = True
# TODO: This does not take into account exceptions that
# only happen on specific forks, but this requires a
# covariant parametrize
marks = [pytest.mark.exception_test] if exception_test else []
id_label = ""
if len(self.transaction.data) > 1 or d.label is not None:
if d.label is not None:
id_label = f"{d}"
else:
id_label = f"d{d}"
if len(self.transaction.gas_limit) > 1:
id_label += f"-g{g}"
if len(self.transaction.value) > 1:
id_label += f"-v{v}"
d_g_v_parameters.append(pytest.param(d.index, g, v, marks=marks, id=id_label))
@pytest.mark.valid_at(*self.get_valid_at_forks())
@pytest.mark.parametrize("d,g,v", d_g_v_parameters)
def test_state_vectors(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
d: int,
g: int,
v: int,
) -> None:
for expect in self.expect:
if expect.has_index(d, g, v):
if fork in expect.network:
tx_tag_dependencies = self.transaction.tag_dependencies()
result_tag_dependencies = expect.result.tag_dependencies()
all_dependencies = {**tx_tag_dependencies, **result_tag_dependencies}
tags = self.pre.setup(pre, all_dependencies)
env = self.env.get_environment(tags)
exception = (
None
if expect.expect_exception is None
else expect.expect_exception[fork]
)
tx = self.transaction.get_transaction(tags, d, g, v, exception)
post = expect.result.resolve(tags)
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
return
pytest.fail(f"Expectation not found for d={d}, g={g}, v={v}, fork={fork}")
if self.info and self.info.pytest_marks:
for mark in self.info.pytest_marks:
if mark == "pre_alloc_group":
test_state_vectors = pytest.mark.pre_alloc_group(
"separate", reason="Requires separate pre-alloc grouping"
)(test_state_vectors)
else:
apply_mark = getattr(pytest.mark, mark)
test_state_vectors = apply_mark(test_state_vectors)
if has_tags:
test_state_vectors = pytest.mark.tagged(test_state_vectors)
if fully_tagged:
test_state_vectors = pytest.mark.fully_tagged(test_state_vectors)
else:
test_state_vectors = pytest.mark.untagged(test_state_vectors)
test_state_vectors = pytest.mark.pre_alloc_group(
"separate", reason="Uses hard-coded addresses"
)(test_state_vectors)
if not fully_tagged:
test_state_vectors = pytest.mark.pre_alloc_modify(test_state_vectors)
return test_state_vectors
def get_valid_at_forks(self) -> List[str]:
"""Return list of forks that are valid for this test."""
fork_set: Set[Fork] = set()
for expect in self.expect:
fork_set.update(expect.network)
return sorted([str(f) for f in fork_set])
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/expect_section.py | src/ethereum_test_specs/static_state/expect_section.py | """Expect section structure of ethereum/tests fillers."""
import re
from enum import StrEnum
from typing import Annotated, Any, Dict, Iterator, List, Mapping, Set, Union
from pydantic import (
BaseModel,
BeforeValidator,
Field,
ValidatorFunctionWrapHandler,
field_validator,
model_validator,
)
from ethereum_test_base_types import (
Account,
Address,
CamelModel,
EthereumTestRootModel,
HexNumber,
Storage,
)
from ethereum_test_exceptions import TransactionExceptionInstanceOrList
from ethereum_test_forks import Fork, get_forks
from ethereum_test_types import Alloc
from .common import (
AddressOrCreateTagInFiller,
CodeInFiller,
Tag,
TagDependentData,
TagDict,
ValueInFiller,
ValueOrCreateTagInFiller,
)
class Indexes(BaseModel):
"""Class that represents an index filler."""
data: int | List[Union[int, str]] | List[int] | str = Field(-1)
gas: int | List[Union[int, str]] | List[int] | str = Field(-1)
value: int | List[Union[int, str]] | List[int] | str = Field(-1)
def validate_any_string_as_none(v: Any) -> Any:
"""Validate "ANY" as None."""
if type(v) is str and v == "ANY":
return None
return v
class StorageInExpectSection(EthereumTestRootModel, TagDependentData):
"""Class that represents a storage in expect section filler."""
root: Dict[
ValueOrCreateTagInFiller,
Annotated[ValueOrCreateTagInFiller | None, BeforeValidator(validate_any_string_as_none)],
]
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get storage dependencies."""
tag_dependencies = {}
for key, value in self.root.items():
if isinstance(key, Tag):
tag_dependencies[key.name] = key
if isinstance(value, Tag):
tag_dependencies[value.name] = value
return tag_dependencies
def resolve(self, tags: TagDict) -> Storage:
"""Resolve the account with the given tags."""
storage = Storage()
for key, value in self.root.items():
resolved_key: HexNumber | Address
if isinstance(key, Tag):
resolved_key = key.resolve(tags)
else:
resolved_key = key
if value is None:
storage.set_expect_any(resolved_key)
elif isinstance(value, Tag):
storage[resolved_key] = value.resolve(tags)
else:
storage[resolved_key] = value
return storage
def __contains__(self, key: Address) -> bool:
"""Check if the storage contains a key."""
return key in self.root
def __iter__(self) -> Iterator[ValueOrCreateTagInFiller]: # type: ignore[override]
"""Iterate over the storage."""
return iter(self.root)
class AccountInExpectSection(BaseModel, TagDependentData):
"""Class that represents an account in expect section filler."""
balance: ValueInFiller | None = None
code: CodeInFiller | None = None
nonce: ValueInFiller | None = None
storage: StorageInExpectSection | None = None
@model_validator(mode="wrap") # type: ignore[misc]
@classmethod
def validate_should_not_exist(
cls, v: Any, handler: ValidatorFunctionWrapHandler
) -> "AccountInExpectSection | None":
"""
Validate the "shouldnotexist" field, which makes this validator return
`None`.
"""
if isinstance(v, dict):
if "shouldnotexist" in v:
return None
return handler(v)
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get tag dependencies."""
tag_dependencies: Dict[str, Tag] = {}
if self.code is not None:
tag_dependencies.update(self.code.tag_dependencies())
if self.storage is not None:
tag_dependencies.update(self.storage.tag_dependencies())
return tag_dependencies
def resolve(self, tags: TagDict) -> Account:
"""Resolve the account with the given tags."""
account_kwargs: Dict[str, Any] = {}
if self.storage is not None:
account_kwargs["storage"] = self.storage.resolve(tags)
if self.code is not None:
account_kwargs["code"] = self.code.compiled(tags)
if self.balance is not None:
account_kwargs["balance"] = self.balance
if self.nonce is not None:
account_kwargs["nonce"] = self.nonce
return Account(**account_kwargs)
class CMP(StrEnum):
"""Comparison action."""
LE = "<="
GE = ">="
LT = "<"
GT = ">"
EQ = "="
class ForkConstraint(BaseModel):
"""Single fork with an operand."""
operand: CMP
fork: Fork
@field_validator("fork", mode="before")
@classmethod
def parse_fork_synonyms(cls, value: Any) -> Any:
"""Resolve fork synonyms."""
if value == "EIP158":
value = "Byzantium"
return value
@model_validator(mode="before")
@classmethod
def parse_from_string(cls, data: Any) -> Any:
"""Parse a fork with operand from a string."""
if isinstance(data, str):
for cmp in CMP:
if data.startswith(cmp):
fork = data.removeprefix(cmp)
return {
"operand": cmp,
"fork": fork,
}
return {
"operand": CMP.EQ,
"fork": data,
}
return data
def match(self, fork: Fork) -> bool:
"""Return whether the fork satisfies the operand evaluation."""
match self.operand:
case CMP.LE:
return fork <= self.fork
case CMP.GE:
return fork >= self.fork
case CMP.LT:
return fork < self.fork
case CMP.GT:
return fork > self.fork
case CMP.EQ:
return fork == self.fork
case _:
raise ValueError(f"Invalid operand: {self.operand}")
class ForkSet(EthereumTestRootModel):
"""Set of forks."""
root: Set[Fork]
@model_validator(mode="before")
@classmethod
def parse_from_list_or_string(cls, value: Any) -> Set[Fork]:
"""Parse fork_with_operand `>=Cancun` into {Cancun, Prague, ...}."""
fork_set: Set[Fork] = set()
if not isinstance(value, list):
value = [value]
for fork_with_operand in value:
matches = re.findall(r"(<=|<|>=|>|=)([^<>=]+)", fork_with_operand)
if matches:
all_fork_constraints = [
ForkConstraint.model_validate(f"{op}{fork.strip()}") for op, fork in matches
]
else:
all_fork_constraints = [ForkConstraint.model_validate(fork_with_operand.strip())]
for fork in get_forks():
for f in all_fork_constraints:
if not f.match(fork):
# If any constraint does not match, skip adding
break
else:
# All constraints match, add the fork to the set
fork_set.add(fork)
return fork_set
def __hash__(self) -> int:
"""Return the hash of the fork set."""
h = hash(None)
for fork in sorted([str(f) for f in self]):
h ^= hash(fork)
return h
def __contains__(self, fork: Fork) -> bool:
"""Check if the fork set contains a fork."""
return fork in self.root
def __iter__(self) -> Iterator[Fork]: # type: ignore[override]
"""Iterate over the fork set."""
return iter(self.root)
def __len__(self) -> int:
"""Return the length of the fork set."""
return len(self.root)
class ResultInFiller(EthereumTestRootModel, TagDependentData):
"""
Post section in state test filler.
A value of `None` for an address means that the account should not be in
the state trie at the end of the test.
"""
root: Dict[AddressOrCreateTagInFiller, AccountInExpectSection | None]
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Return all tags used in the result."""
tag_dependencies: Dict[str, Tag] = {}
for address, account in self.root.items():
if isinstance(address, Tag):
tag_dependencies[address.name] = address
if account is None:
continue
tag_dependencies.update(account.tag_dependencies())
return tag_dependencies
def resolve(self, tags: TagDict) -> Alloc:
"""Resolve the post section."""
post = Alloc()
for address, account in self.root.items():
if isinstance(address, Tag):
resolved_address = address.resolve(tags)
else:
resolved_address = Address(address)
if account is None:
continue
post[resolved_address] = account.resolve(tags)
return post
def __contains__(self, address: Address) -> bool:
"""Check if the result contains an address."""
return address in self.root
def __iter__(self) -> Iterator[AddressOrCreateTagInFiller]: # type: ignore[override]
"""Iterate over the result."""
return iter(self.root)
def __len__(self) -> int:
"""Return the length of the result."""
return len(self.root)
class ExpectException(EthereumTestRootModel):
"""Expect exception model."""
root: Dict[ForkSet, TransactionExceptionInstanceOrList]
def __getitem__(self, fork: Fork) -> TransactionExceptionInstanceOrList:
"""Get an expectation for a given fork."""
for k in self.root:
if fork in k:
return self.root[k]
raise KeyError(f"Fork {fork} not found in expectations.")
def __contains__(self, fork: Fork) -> bool:
"""Check if the expect exception contains a fork."""
return fork in self.root
def __iter__(self) -> Iterator[ForkSet]: # type: ignore[override]
"""Iterate over the expect exception."""
return iter(self.root)
def __len__(self) -> int:
"""Return the length of the expect exception."""
return len(self.root)
class ExpectSectionInStateTestFiller(CamelModel):
"""Expect section in state test filler."""
indexes: Indexes = Field(default_factory=Indexes)
network: ForkSet
result: ResultInFiller
expect_exception: ExpectException | None = None
def model_post_init(self, __context: Any) -> None:
"""Validate that the expectation is coherent."""
if self.expect_exception is None:
return
all_forks: Set[Fork] = set()
for current_fork_set in self.expect_exception:
for fork in current_fork_set:
assert fork not in all_forks
all_forks.add(fork)
def has_index(self, d: int, g: int, v: int) -> bool:
"""Check if there is index set in indexes."""
d_match: bool = False
g_match: bool = False
v_match: bool = False
# Check if data index match
if isinstance(self.indexes.data, int):
d_match = True if self.indexes.data == -1 or self.indexes.data == d else False
elif isinstance(self.indexes.data, list):
d_match = True if self.indexes.data.count(d) else False
# Check if gas index match
if isinstance(self.indexes.gas, int):
g_match = True if self.indexes.gas == -1 or self.indexes.gas == g else False
elif isinstance(self.indexes.gas, list):
g_match = True if self.indexes.gas.count(g) else False
# Check if value index match
if isinstance(self.indexes.value, int):
v_match = True if self.indexes.value == -1 or self.indexes.value == v else False
elif isinstance(self.indexes.value, list):
v_match = True if self.indexes.value.count(v) else False
return d_match and g_match and v_match
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/account.py | src/ethereum_test_specs/static_state/account.py | """Account structure of ethereum/tests fillers."""
from typing import Any, Dict, List, Mapping, Set, Tuple
from pydantic import BaseModel, ConfigDict
from ethereum_test_base_types import Bytes, EthereumTestRootModel, HexNumber, Storage
from ethereum_test_types import Alloc
from .common import (
AddressOrTagInFiller,
CodeInFiller,
ContractTag,
SenderTag,
Tag,
TagDependentData,
TagDict,
ValueInFiller,
ValueOrTagInFiller,
)
class StorageInPre(EthereumTestRootModel):
"""Class that represents a storage in pre-state."""
root: Dict[ValueInFiller, ValueOrTagInFiller]
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get tag dependencies."""
tag_dependencies: Dict[str, Tag] = {}
for k, v in self.root.items():
if isinstance(k, Tag):
tag_dependencies[k.name] = k
if isinstance(v, Tag):
tag_dependencies[v.name] = v
return tag_dependencies
def resolve(self, tags: TagDict) -> Dict[ValueInFiller, ValueInFiller]:
"""Resolve the storage."""
resolved_storage: Dict[ValueInFiller, ValueInFiller] = {}
for key, value in self.root.items():
if isinstance(value, Tag):
resolved_storage[key] = HexNumber(int.from_bytes(value.resolve(tags), "big"))
else:
resolved_storage[key] = value
return resolved_storage
class AccountInFiller(BaseModel, TagDependentData):
"""Class that represents an account in filler."""
balance: ValueInFiller | None = None
code: CodeInFiller | None = None
nonce: ValueInFiller | None = None
storage: StorageInPre | None = None
model_config = ConfigDict(arbitrary_types_allowed=True, extra="forbid")
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get tag dependencies."""
tag_dependencies: Dict[str, Tag] = {}
if self.storage is not None:
tag_dependencies.update(self.storage.tag_dependencies())
if self.code is not None and isinstance(self.code, CodeInFiller):
tag_dependencies.update(self.code.tag_dependencies())
return tag_dependencies
def resolve(self, tags: TagDict) -> Dict[str, Any]:
"""Resolve the account."""
account_properties: Dict[str, Any] = {}
if self.balance is not None:
account_properties["balance"] = self.balance
if self.code is not None:
if compiled_code := self.code.compiled(tags):
account_properties["code"] = compiled_code
if self.nonce is not None:
account_properties["nonce"] = self.nonce
if self.storage is not None:
if resolved_storage := self.storage.resolve(tags):
account_properties["storage"] = resolved_storage
return account_properties
class PreInFiller(EthereumTestRootModel):
"""Class that represents a pre-state in filler."""
root: Dict[AddressOrTagInFiller, AccountInFiller]
def _build_dependency_graph(
self,
) -> Tuple[Dict[str, Set[str]], Dict[str, AddressOrTagInFiller]]:
"""Build a dependency graph for all tags."""
dep_graph: Dict[str, Set[str]] = {}
tag_to_address: Dict[str, AddressOrTagInFiller] = {}
# First pass: identify all tags and their dependencies
for address_or_tag, account in self.root.items():
if isinstance(address_or_tag, Tag):
tag_name = address_or_tag.name
tag_to_address[tag_name] = address_or_tag
dep_graph[tag_name] = set()
# Get dependencies from account properties
dependencies = account.tag_dependencies()
for dep_name in dependencies:
if dep_name != tag_name: # Ignore self-references
dep_graph[tag_name].add(dep_name)
return dep_graph, tag_to_address
def _topological_sort(self, dep_graph: Dict[str, Set[str]]) -> List[str]:
"""Perform topological sort on dependency graph."""
# Create a copy to modify
graph = {node: deps.copy() for node, deps in dep_graph.items()}
# Find nodes with no dependencies
no_deps = [node for node, deps in graph.items() if not deps]
sorted_nodes = []
while no_deps:
# Process a node with no dependencies
node = no_deps.pop()
sorted_nodes.append(node)
# Remove this node from other nodes' dependencies
for other_node, deps in graph.items():
if node in deps:
deps.remove(node)
if not deps and other_node not in sorted_nodes:
no_deps.append(other_node)
# Check for cycles
remaining = [node for node in graph if node not in sorted_nodes]
if remaining:
# Handle cycles by processing remaining nodes in any order
# This works because self-references are allowed
sorted_nodes.extend(remaining)
return sorted_nodes
def setup(self, pre: Alloc, all_dependencies: Dict[str, Tag]) -> TagDict:
"""Resolve the pre-state with improved tag resolution."""
resolved_accounts: TagDict = {}
# Separate tagged and non-tagged accounts
tagged_accounts = {}
non_tagged_accounts = {}
for address_or_tag, account in self.root.items():
if isinstance(address_or_tag, Tag):
tagged_accounts[address_or_tag] = account
else:
non_tagged_accounts[address_or_tag] = account
# Step 1: Process non-tagged accounts but don't compile code yet
# We'll compile code later after all tags are resolved
non_tagged_to_process = []
for address, account in non_tagged_accounts.items():
non_tagged_to_process.append((address, account))
resolved_accounts[address.hex()] = address
# Step 2: Build dependency graph for tagged accounts
dep_graph, tag_to_address = self._build_dependency_graph()
# Step 3: Get topological order
resolution_order = self._topological_sort(dep_graph)
# Step 4: Pre-deploy all contract tags and pre-fund EOAs to get
# addresses
for tag_name in resolution_order:
if tag_name in tag_to_address:
tag = tag_to_address[tag_name]
if isinstance(tag, ContractTag):
# Deploy with placeholder to get address
deployed_address = pre.deploy_contract(
code=b"", # Temporary placeholder
label=tag_name,
)
resolved_accounts[tag_name] = deployed_address
elif isinstance(tag, SenderTag):
# Create EOA to get address - use amount=1 to ensure
# account is created
eoa = pre.fund_eoa(amount=1, label=tag_name)
# Store the EOA object for SenderKeyTag resolution
resolved_accounts[tag_name] = eoa
# Step 5: Now resolve all properties with all addresses available
for tag_name in resolution_order:
if tag_name in tag_to_address:
tag = tag_to_address[tag_name]
assert isinstance(tag, (ContractTag, SenderTag)), (
f"Tag {tag_name} is not a contract or sender"
)
account = tagged_accounts[tag]
# All addresses are now available, so resolve properties
account_properties = account.resolve(resolved_accounts)
if isinstance(tag, ContractTag):
# Update the already-deployed contract
deployed_address = resolved_accounts[tag_name]
deployed_account = pre[deployed_address]
if deployed_account is not None:
if "code" in account_properties:
deployed_account.code = Bytes(account_properties["code"])
if "balance" in account_properties:
deployed_account.balance = account_properties["balance"]
if "nonce" in account_properties:
deployed_account.nonce = account_properties["nonce"]
if "storage" in account_properties:
deployed_account.storage = Storage(root=account_properties["storage"])
elif isinstance(tag, SenderTag):
eoa_account = pre[resolved_accounts[tag_name]]
if eoa_account is not None:
if "balance" in account_properties:
eoa_account.balance = account_properties["balance"]
if "nonce" in account_properties:
eoa_account.nonce = account_properties["nonce"]
if "code" in account_properties:
eoa_account.code = Bytes(account_properties["code"])
if "storage" in account_properties:
eoa_account.storage = Storage(root=account_properties["storage"])
# Step 6: Now process non-tagged accounts (including code compilation)
for address, account in non_tagged_to_process:
account_properties = account.resolve(resolved_accounts)
if "balance" in account_properties:
pre.fund_address(address, account_properties["balance"])
existing_account = pre[address]
if existing_account is not None:
if "code" in account_properties:
existing_account.code = Bytes(account_properties["code"])
if "nonce" in account_properties:
existing_account.nonce = account_properties["nonce"]
if "storage" in account_properties:
existing_account.storage = Storage(root=account_properties["storage"])
# Step 7: Handle any extra dependencies not in pre
for extra_dependency in all_dependencies:
if extra_dependency not in resolved_accounts:
if all_dependencies[extra_dependency].type != "eoa":
raise ValueError(f"Contract dependency {extra_dependency} not found in pre")
# Create new EOA - this will have a dynamically generated key
# and address
eoa = pre.fund_eoa(amount=0, label=extra_dependency)
resolved_accounts[extra_dependency] = eoa
return resolved_accounts
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/general_transaction.py | src/ethereum_test_specs/static_state/general_transaction.py | """General transaction structure of ethereum/tests fillers."""
from typing import Any, Dict, Generator, List, Mapping
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
from ethereum_test_base_types import Address, CamelModel, EthereumTestRootModel, Hash
from ethereum_test_exceptions import TransactionExceptionInstanceOrList
from ethereum_test_types import Transaction
from .common import (
AccessListInFiller,
AddressOrTagInFiller,
CodeInFiller,
HashOrTagInFiller,
Tag,
TagDependentData,
TagDict,
ValueInFiller,
)
class DataWithAccessList(CamelModel, TagDependentData):
"""Class that represents data with access list."""
data: CodeInFiller
access_list: List[AccessListInFiller] | None = None
@field_validator("access_list", mode="before")
@classmethod
def convert_keys_to_hash(
cls, access_list: List[Dict[str, Any]] | None
) -> List[Dict[str, Any]] | None: # noqa: N805
"""Fix keys."""
if access_list is None:
return None
for entry in access_list:
if "storageKeys" in entry:
entry["storageKeys"] = [
Hash(key, left_padding=True) for key in entry["storageKeys"]
]
return access_list
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get tag dependencies."""
tag_dependencies: Dict[str, Tag] = {}
if self.access_list is not None:
for entry in self.access_list:
tag_dependencies.update(entry.tag_dependencies())
if self.data is not None and isinstance(self.data, CodeInFiller):
tag_dependencies.update(self.data.tag_dependencies())
return tag_dependencies
@model_validator(mode="wrap")
@classmethod
def wrap_data_only(cls, data: Any, handler: Any) -> "DataWithAccessList":
"""Wrap data only if it is not a dictionary."""
if not isinstance(data, dict) and not isinstance(data, DataWithAccessList):
data = {"data": data}
return handler(data)
class LabeledDataIndex(BaseModel):
"""Represents an index with a label if any."""
index: int
label: str | None = None
def __str__(self) -> str:
"""Transform into a string that can be part of a test name."""
if self.label is not None:
return self.label
return f"{self.index}"
class LabeledDataList(EthereumTestRootModel):
"""Class that represents a list of labeled data."""
root: List[DataWithAccessList]
def __getitem__(self, label_or_index: int | str) -> DataWithAccessList:
"""Get an item by label or index."""
if isinstance(label_or_index, int):
return self.root[label_or_index]
if isinstance(label_or_index, str):
for item in self.root:
if item.data.label == label_or_index:
return item
raise KeyError(f"Label/index {label_or_index} not found in data indexes")
def __contains__(self, label_or_index: int | str) -> bool:
"""
Return True if the LabeledDataList contains the given label/index.
"""
if isinstance(label_or_index, int):
return label_or_index < len(self.root)
if isinstance(label_or_index, str):
for item in self.root:
if item.data.label == label_or_index:
return True
return False
def __len__(self) -> int:
"""Return the length of the list."""
return len(self.root)
def __iter__(self) -> Generator[LabeledDataIndex, None, None]: # type: ignore
"""Return the iterator of the root list."""
for i, item in enumerate(self.root):
labeled_data_index = LabeledDataIndex(index=i)
if item.data.label is not None:
labeled_data_index.label = item.data.label
yield labeled_data_index
class GeneralTransactionInFiller(BaseModel, TagDependentData):
"""Class that represents general transaction in filler."""
data: LabeledDataList
gas_limit: List[ValueInFiller] = Field(..., alias="gasLimit")
gas_price: ValueInFiller | None = Field(None, alias="gasPrice")
nonce: ValueInFiller | None
to: AddressOrTagInFiller | None
value: List[ValueInFiller]
secret_key: HashOrTagInFiller = Field(..., alias="secretKey")
max_fee_per_gas: ValueInFiller | None = Field(None, alias="maxFeePerGas")
max_priority_fee_per_gas: ValueInFiller | None = Field(None, alias="maxPriorityFeePerGas")
max_fee_per_blob_gas: ValueInFiller | None = Field(None, alias="maxFeePerBlobGas")
blob_versioned_hashes: List[Hash] | None = Field(None, alias="blobVersionedHashes")
model_config = ConfigDict(extra="forbid")
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get tag dependencies."""
tag_dependencies: Dict[str, Tag] = {}
if self.data:
for idx in self.data:
data = self.data[idx.index]
tag_dependencies.update(data.tag_dependencies())
if self.to is not None and isinstance(self.to, Tag):
tag_dependencies[self.to.name] = self.to
if self.secret_key is not None and isinstance(self.secret_key, Tag):
tag_dependencies[self.secret_key.name] = self.secret_key
return tag_dependencies
@field_validator("to", mode="before")
def check_single_key(cls, to: Any) -> Any: # noqa: N805
"""Creation transaction."""
if to == "":
to = None
return to
@model_validator(mode="after")
def check_fields(self) -> "GeneralTransactionInFiller":
"""Validate all fields are set."""
if self.gas_price is None:
if self.max_fee_per_gas is None or self.max_priority_fee_per_gas is None:
raise ValueError(
"If `gasPrice` is not set,"
" `maxFeePerGas` and `maxPriorityFeePerGas` must be set!"
)
return self
def get_transaction(
self,
tags: TagDict,
d: int,
g: int,
v: int,
exception: TransactionExceptionInstanceOrList | None,
) -> Transaction:
"""Get the transaction."""
data_box = self.data[d]
kwargs: Dict[str, Any] = {}
if self.to is None:
kwargs["to"] = None
elif isinstance(self.to, Tag):
kwargs["to"] = self.to.resolve(tags)
else:
kwargs["to"] = Address(self.to)
kwargs["data"] = data_box.data.compiled(tags)
if data_box.access_list is not None:
kwargs["access_list"] = [entry.resolve(tags) for entry in data_box.access_list]
kwargs["gas_limit"] = self.gas_limit[g]
if isinstance(self.secret_key, Tag):
sender = self.secret_key.resolve(tags)
kwargs["secret_key"] = sender.key
else:
kwargs["secret_key"] = self.secret_key
if self.value[v] > 0:
kwargs["value"] = self.value[v]
if self.gas_price is not None:
kwargs["gas_price"] = self.gas_price
if self.nonce is not None:
kwargs["nonce"] = self.nonce
if self.max_fee_per_gas is not None:
kwargs["max_fee_per_gas"] = self.max_fee_per_gas
if self.max_priority_fee_per_gas is not None:
kwargs["max_priority_fee_per_gas"] = self.max_priority_fee_per_gas
if self.max_fee_per_blob_gas is not None:
kwargs["max_fee_per_blob_gas"] = self.max_fee_per_blob_gas
if self.blob_versioned_hashes is not None:
kwargs["blob_versioned_hashes"] = self.blob_versioned_hashes
if exception is not None:
kwargs["error"] = exception
return Transaction(**kwargs)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/__init__.py | src/ethereum_test_specs/static_state/__init__.py | """Ethereum/tests structures."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/environment.py | src/ethereum_test_specs/static_state/environment.py | """Environment structure of ethereum/tests fillers."""
from typing import Any, Dict
from pydantic import BaseModel, ConfigDict, Field, model_validator
from ethereum_test_base_types import Address
from ethereum_test_types import Environment
from .common import AddressOrTagInFiller, Tag, TagDict, ValueInFiller
class EnvironmentInStateTestFiller(BaseModel):
"""Class that represents an environment filler."""
current_coinbase: AddressOrTagInFiller = Field(..., alias="currentCoinbase")
current_gas_limit: ValueInFiller = Field(..., alias="currentGasLimit")
current_number: ValueInFiller = Field(..., alias="currentNumber")
current_timestamp: ValueInFiller = Field(..., alias="currentTimestamp")
current_difficulty: ValueInFiller | None = Field(
ValueInFiller("0x020000"), alias="currentDifficulty"
)
current_random: ValueInFiller | None = Field(ValueInFiller("0x020000"), alias="currentRandom")
current_base_fee: ValueInFiller | None = Field(ValueInFiller("0x0a"), alias="currentBaseFee")
current_excess_blob_gas: ValueInFiller | None = Field(None, alias="currentExcessBlobGas")
model_config = ConfigDict(extra="forbid")
@model_validator(mode="after")
def check_fields(self) -> "EnvironmentInStateTestFiller":
"""Validate all fields are set."""
if self.current_difficulty is None:
if self.current_random is None:
raise ValueError("If `currentDifficulty` is not set, `currentRandom` must be set!")
return self
def get_environment(self, tags: TagDict) -> Environment:
"""Get the environment."""
kwargs: Dict[str, Any] = {}
if isinstance(self.current_coinbase, Tag):
assert self.current_coinbase.name in tags, (
f"Tag {self.current_coinbase.name} to resolve coinbase not found in tags"
)
kwargs["fee_recipient"] = self.current_coinbase.resolve(tags)
else:
kwargs["fee_recipient"] = Address(self.current_coinbase)
if self.current_difficulty is not None:
kwargs["difficulty"] = self.current_difficulty
if self.current_random is not None:
kwargs["prev_randao"] = self.current_random
if self.current_gas_limit is not None:
kwargs["gas_limit"] = self.current_gas_limit
if self.current_number is not None:
kwargs["number"] = self.current_number
if self.current_timestamp is not None:
kwargs["timestamp"] = self.current_timestamp
if self.current_base_fee is not None:
kwargs["base_fee_per_gas"] = self.current_base_fee
if self.current_excess_blob_gas is not None:
kwargs["excess_blob_gas"] = self.current_excess_blob_gas
return Environment(**kwargs)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/common/tags.py | src/ethereum_test_specs/static_state/common/tags.py | """Classes to manage tags in static state tests."""
import re
from abc import ABC, abstractmethod
from typing import Any, ClassVar, Dict, Generic, Mapping, TypeVar
from pydantic import BaseModel, model_validator
from ethereum_test_base_types import Address, Bytes, Hash, HexNumber
from ethereum_test_types import EOA, compute_create2_address, compute_create_address
TagDict = Dict[str, Address | EOA]
T = TypeVar("T", bound=Address | Hash)
class Tag(BaseModel, Generic[T]):
"""Tag."""
name: str
type: ClassVar[str] = ""
regex_pattern: ClassVar[re.Pattern] = re.compile(r"<\w+:(\w+)(:[^>]+)?")
# Store the original tag string for replacement
original_string: str | None = None
def __hash__(self) -> int:
"""Hash based on original string for use as dict key."""
return hash(f"{self.__class__.__name__}:{self.name}")
@model_validator(mode="before")
@classmethod
def validate_from_string(cls, data: Any) -> Any:
"""Validate the generic tag from string: <tag_kind:name:0x...>."""
if isinstance(data, str):
if m := cls.regex_pattern.match(data):
name = m.group(1)
return {"name": name, "original_string": data}
return data
def resolve(self, tags: TagDict) -> T:
"""Resolve the tag."""
raise NotImplementedError("Subclasses must implement this method")
class TagDependentData(ABC):
"""Data for resolving tags."""
@abstractmethod
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get tag dependencies."""
pass
class AddressTag(Tag[Address]):
"""Address tag."""
def resolve(self, tags: TagDict) -> Address:
"""Resolve the tag."""
assert self.name in tags, f"Tag {self.name} not found in tags"
return Address(tags[self.name])
class ContractTag(AddressTag):
"""Contract tag."""
type: ClassVar[str] = "contract"
regex_pattern: ClassVar[re.Pattern] = re.compile(r"<contract:([^:>]+)(?::(0x[a-fA-F0-9]+))?>")
# Optional hard-coded address for debugging
debug_address: Address | None = None
@model_validator(mode="before")
@classmethod
def validate_from_string(cls, data: Any) -> Any:
"""
Validate the contract tag from string:
<contract:name:0x...>
or
<contract:0x...>.
"""
if isinstance(data, str):
if m := cls.regex_pattern.match(data):
name_or_addr = m.group(1)
debug_addr = m.group(2) if m.lastindex and m.lastindex >= 2 else None
# Check if it's a 2-part format with an address
if name_or_addr.startswith("0x") and len(name_or_addr) == 42:
# For 2-part format, use the full address as the name This
# ensures all references to the same address get the same
# tag name
return {
"name": name_or_addr,
"debug_address": Address(name_or_addr),
"original_string": data,
}
else:
# Normal 3-part format - use the name as-is
result = {"name": name_or_addr, "original_string": data}
if debug_addr:
result["debug_address"] = Address(debug_addr)
return result
return data
class CreateTag(AddressTag):
"""Contract derived from a another contract via CREATE."""
create_type: str
nonce: HexNumber | None = None
salt: HexNumber | None = None
initcode: Bytes | None = None
type: ClassVar[str] = "contract"
regex_pattern: ClassVar[re.Pattern] = re.compile(r"<(create|create2):(\w+):(\w+):?(\w+)?>")
@model_validator(mode="before")
@classmethod
def validate_from_string(cls, data: Any) -> Any:
"""Validate the create tag from string: <create:name:nonce>."""
if isinstance(data, str):
if m := cls.regex_pattern.match(data):
create_type = m.group(1)
name = m.group(2)
kwargs = {
"create_type": create_type,
"name": name,
"original_string": data,
}
if create_type == "create":
kwargs["nonce"] = m.group(3)
elif create_type == "create2":
kwargs["salt"] = m.group(3)
kwargs["initcode"] = m.group(4)
return kwargs
return data
def resolve(self, tags: TagDict) -> Address:
"""Resolve the tag."""
assert self.name in tags, f"Tag {self.name} not found in tags"
if self.create_type == "create":
assert self.nonce is not None, "Nonce is required for create"
return compute_create_address(address=tags[self.name], nonce=self.nonce)
elif self.create_type == "create2":
assert self.salt is not None, "Salt is required for create2"
assert self.initcode is not None, "Init code is required for create2"
return compute_create2_address(
address=tags[self.name], salt=self.salt, initcode=self.initcode
)
else:
raise ValueError(f"Invalid create type: {self.create_type}")
class SenderTag(AddressTag):
"""Sender tag."""
type: ClassVar[str] = "eoa"
regex_pattern: ClassVar[re.Pattern] = re.compile(r"<eoa:(\w+)(?::(0x[a-fA-F0-9]+))?>")
# Optional hard-coded address for debugging
debug_address: Address | None = None
@model_validator(mode="before")
@classmethod
def validate_from_string(cls, data: Any) -> Any:
"""Validate the sender tag from string: <eoa:name:0x...>."""
if isinstance(data, str):
if m := cls.regex_pattern.match(data):
name = m.group(1)
debug_addr = m.group(2) if m.lastindex and m.lastindex >= 2 else None
result = {"name": name, "original_string": data}
if debug_addr:
result["debug_address"] = Address(debug_addr)
return result
return data
class SenderKeyTag(Tag[EOA]):
"""Sender eoa tag."""
type: ClassVar[str] = "eoa"
regex_pattern: ClassVar[re.Pattern] = re.compile(r"<eoa:(\w+)(?::(0x[a-fA-F0-9]+))?>")
debug_key: str | None = None # Optional hard-coded key for debugging
@model_validator(mode="before")
@classmethod
def validate_from_string(cls, data: Any) -> Any:
"""Validate the sender key tag from string: <eoa:name:0xkey...>."""
if isinstance(data, str):
if m := cls.regex_pattern.match(data):
name = m.group(1)
debug_key = m.group(2) if m.lastindex and m.lastindex >= 2 else None
result = {"name": name, "original_string": data}
if debug_key:
result["debug_key"] = debug_key
return result
return data
def resolve(self, tags: TagDict) -> EOA:
"""Resolve the tag."""
assert self.name in tags, f"Tag {self.name} not found in tags"
result = tags[self.name]
assert isinstance(result, EOA), f"Expected EOA but got {type(result)} for tag {self.name}"
return result
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/common/common.py | src/ethereum_test_specs/static_state/common/common.py | """Common field types from ethereum/tests."""
import re
import subprocess
import tempfile
from typing import Any, Dict, List, Mapping, Tuple, Union
from eth_abi import encode
from eth_utils import function_signature_to_4byte_selector
from pydantic import BaseModel, BeforeValidator, Field, PrivateAttr, model_validator
from pydantic_core import core_schema
from typing_extensions import Annotated
from ethereum_test_base_types import AccessList, Address, CamelModel, Hash, HexNumber
from .compile_yul import compile_yul
from .tags import (
ContractTag,
CreateTag,
SenderKeyTag,
SenderTag,
Tag,
TagDependentData,
TagDict,
)
def parse_hex_number(i: str | int) -> int:
"""Check if the given string is a valid hex number."""
if i == "" or i == "0x":
return 0
if isinstance(i, int):
return i
if i.startswith("0x:bigint "):
i = i[10:]
return int(i, 16)
if i.startswith("0x") or any(char in "abcdef" for char in i.lower()):
return int(i, 16)
return int(i, 10)
def parse_args_from_string_into_array(
stream: str, pos: int, delim: str = " "
) -> Tuple[List[str], int]:
"""Parse YUL options into array."""
args = []
arg = ""
# Loop until end of stream or until encountering newline or '{'
while pos < len(stream) and stream[pos] not in ("\n", "{"):
if stream[pos] == delim:
args.append(arg)
arg = ""
else:
arg += stream[pos]
pos += 1
if arg:
args.append(arg)
return args, pos
class CodeInFiller(BaseModel, TagDependentData):
"""Not compiled code source in test filler."""
label: str | None
source: str
_dependencies: Dict[str, Tag] = PrivateAttr(default_factory=dict)
@model_validator(mode="before")
@classmethod
def validate_from_string(cls, code: Any) -> Any:
"""Validate from string, separating label from code source."""
if isinstance(code, str):
label_marker = ":label"
# Only look for label at the beginning of the string (possibly
# after whitespace)
stripped_code = code.lstrip()
# Parse :label into code options
label = None
source = code
# Check if the code starts with :label
if stripped_code.startswith(label_marker):
# Calculate the position in the original string
label_index = code.find(label_marker)
space_index = code.find(" ", label_index + len(label_marker) + 1)
if space_index == -1:
label = code[label_index + len(label_marker) + 1 :]
source = "" # No source after label
else:
label = code[label_index + len(label_marker) + 1 : space_index]
source = code[space_index + 1 :].strip()
return {"label": label, "source": source}
return code
def model_post_init(self, context: Any) -> None:
"""Initialize StateStaticTest."""
super().model_post_init(context)
tag_dependencies: Dict[str, Tag] = {}
for tag_type in {ContractTag, SenderTag}:
for m in tag_type.regex_pattern.finditer(self.source):
new_tag = tag_type.model_validate(m.group(0))
tag_dependencies[new_tag.name] = new_tag
self._dependencies = tag_dependencies
def compiled(self, tags: TagDict) -> bytes:
"""Compile the code from source to bytes."""
raw_code = self.source
if isinstance(raw_code, int):
# Users pass code as int (very bad)
hex_str = format(raw_code, "02x")
return bytes.fromhex(hex_str)
if not isinstance(raw_code, str):
raise ValueError(f"code is of type {type(raw_code)} but expected a string: {raw_code}")
if len(raw_code) == 0:
return b""
compiled_code = ""
def replace_tags(raw_code: str, keep_prefix: bool) -> str:
for tag in self._dependencies.values():
if tag.name not in tags:
raise ValueError(f"Tag {tag} not found in tags")
substitution_address = f"{tag.resolve(tags)}"
if not keep_prefix and substitution_address.startswith("0x"):
substitution_address = substitution_address[2:]
# Use the original string if available, otherwise construct a
# pattern
if hasattr(tag, "original_string") and tag.original_string:
raw_code = raw_code.replace(tag.original_string, substitution_address)
else:
raw_code = re.sub(f"<\\w+:{tag.name}(:0x.+)?>", substitution_address, raw_code)
return raw_code
raw_marker = ":raw 0x"
raw_index = raw_code.find(raw_marker)
if raw_index == -1:
raw_index = replace_tags(raw_code, True).find(raw_marker)
abi_marker = ":abi"
abi_index = raw_code.find(abi_marker)
yul_marker = ":yul"
yul_index = raw_code.find(yul_marker)
# Parse :raw or 0x
if raw_index != -1 or raw_code.lstrip().startswith("0x"):
raw_code = replace_tags(raw_code, False)
# Parse :raw
if raw_index != -1:
compiled_code = raw_code[raw_index + len(raw_marker) :]
# Parse plain code 0x
elif raw_code.lstrip().startswith("0x"):
compiled_code = raw_code[2:].lower()
else:
raw_code = replace_tags(raw_code, True)
# Parse :yul
if yul_index != -1:
option_start = yul_index + len(yul_marker)
options: list[str] = []
native_yul_options: str = ""
if raw_code[option_start:].lstrip().startswith("{"):
# No yul options, proceed to code parsing
source_start = option_start
else:
opt, source_start = parse_args_from_string_into_array(
raw_code, option_start + 1
)
for arg in opt:
if arg == "object" or arg == '"C"':
native_yul_options += arg + " "
else:
options.append(arg)
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".yul") as tmp:
tmp.write(native_yul_options + raw_code[source_start:])
tmp_path = tmp.name
compiled_code = compile_yul(
source_file=tmp_path,
evm_version=options[0] if len(options) >= 1 else None,
optimize=options[1] if len(options) >= 2 else None,
)[2:]
# Parse :abi
elif abi_index != -1:
abi_encoding = raw_code[abi_index + len(abi_marker) + 1 :]
tokens = abi_encoding.strip().split()
abi = tokens[0]
function_signature = function_signature_to_4byte_selector(abi)
parameter_str = re.sub(r"^\w+", "", abi).strip()
parameter_types = parameter_str.strip("()").split(",")
if len(tokens) > 1:
function_parameters = encode(
[parameter_str],
[
[
# treat big ints as 256bits
int(t.lower(), 0) & ((1 << 256) - 1)
if parameter_types[t_index] == "uint"
# treat positive values as True
else int(t.lower(), 0) > 0
if parameter_types[t_index] == "bool"
else False and ValueError("unhandled parameter_types")
for t_index, t in enumerate(tokens[1:])
]
],
)
return function_signature + function_parameters
return function_signature
# Parse lllc code
elif (
raw_code.lstrip().startswith("{")
or raw_code.lstrip().startswith("(asm")
or raw_code.lstrip().startswith(":raw 0x")
):
with tempfile.NamedTemporaryFile(mode="w+", delete=False) as tmp:
tmp.write(raw_code)
tmp_path = tmp.name
# - using lllc
result = subprocess.run(["lllc", tmp_path], capture_output=True, text=True)
# - using docker: If the running machine does not have lllc
# installed, we can use docker to run lllc, but we need to
# start a container first, and the process is generally slower.
#
# from .docker import get_lllc_container_id
# result = subprocess.run( ["docker",
# "exec",
# get_lllc_container_id(),
# "lllc",
# tmp_path[5:]],
# capture_output=True,
# text=True
# )
compiled_code = "".join(result.stdout.splitlines())
else:
raise Exception(f'Error parsing code: "{raw_code}"')
try:
return bytes.fromhex(compiled_code)
except ValueError as e:
raise Exception(f'Error parsing compile code: "{raw_code}"') from e
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get tag dependencies."""
return self._dependencies
class AddressTag:
"""
Represents an address tag like:
- <eoa:sender:0x...>.
- <contract:target:0x...>.
- <coinbase:0x...>.
"""
def __init__(self, tag_type: str, tag_name: str, original_string: str):
"""Initialize address tag."""
self.tag_type = tag_type # "eoa", "contract", or "coinbase"
# e.g., "sender", "target", or address for 2-part tags
self.tag_name = tag_name
self.original_string = original_string
def __str__(self) -> str:
"""Return original tag string."""
return self.original_string
def __repr__(self) -> str:
"""Return debug representation."""
return f"AddressTag(type={self.tag_type}, name={self.tag_name})"
def __eq__(self, other: object) -> bool:
"""Check equality based on original string."""
if isinstance(other, AddressTag):
return self.original_string == other.original_string
return False
def __hash__(self) -> int:
"""Hash based on original string for use as dict key."""
return hash(self.original_string)
@classmethod
def __get_pydantic_core_schema__(
cls, source_type: Any, handler: Any
) -> core_schema.CoreSchema:
"""Pydantic core schema for AddressTag."""
return core_schema.str_schema()
def parse_address_or_tag(value: Any) -> Union[Address, AddressTag]:
"""Parse either a regular address or an address tag."""
if not isinstance(value, str):
# Non-string values should be converted to Address normally
return Address(value, left_padding=True)
# Check if it matches tag pattern:
# - <eoa:0x...>, <contract:0x...>, <coinbase:0x...>
# - <eoa:name:0x...>, <contract:name:0x...>
# Try 3-part pattern first (type:name:address)
tag_pattern_3_part = r"^<(eoa|contract|coinbase):([^:]+):(.+)>$"
match = re.match(tag_pattern_3_part, value.strip())
if match:
tag_type = match.group(1)
tag_name = match.group(2)
address_part = match.group(3)
# For 3-part tags, the tag_name is the middle part
return AddressTag(tag_type, tag_name, value.strip())
# Try 2-part pattern (type:address)
tag_pattern_2_part = r"^<(eoa|contract|coinbase):(.+)>$"
match = re.match(tag_pattern_2_part, value.strip())
if match:
tag_type = match.group(1)
address_part = match.group(2)
# For 2-part tags, use the address as the tag_name
return AddressTag(tag_type, address_part, value.strip())
# Regular address string
return Address(value, left_padding=True)
def parse_address_or_tag_for_access_list(value: Any) -> Union[Address, str]:
"""
Parse either a regular address or an address tag, keeping tags as strings
for later resolution.
"""
if not isinstance(value, str):
# Non-string values should be converted to Address normally
return Address(value, left_padding=True)
# Check if it matches a tag pattern
tag_pattern = r"^<(eoa|contract|coinbase):.+>$"
if re.match(tag_pattern, value.strip()):
# Return the tag string as-is for later resolution
return value.strip()
else:
# Regular address string
return Address(value, left_padding=True)
AddressInFiller = Annotated[Address, BeforeValidator(lambda a: Address(a, left_padding=True))]
AddressOrTagInFiller = ContractTag | SenderTag | Address
AddressOrCreateTagInFiller = ContractTag | SenderTag | CreateTag | Address
ValueInFiller = Annotated[HexNumber, BeforeValidator(parse_hex_number)]
ValueOrTagInFiller = ContractTag | SenderTag | ValueInFiller
ValueOrCreateTagInFiller = ContractTag | SenderTag | CreateTag | ValueInFiller
HashOrTagInFiller = SenderKeyTag | Hash
class AccessListInFiller(CamelModel, TagDependentData):
"""
Access List for transactions in fillers that can contain address tags.
"""
address: AddressOrTagInFiller
storage_keys: List[Hash] = Field(default_factory=list)
def tag_dependencies(self) -> Mapping[str, Tag]:
"""Get tag dependencies."""
if isinstance(self.address, Tag):
return {
self.address.name: self.address,
}
return {}
def resolve(self, tags: TagDict) -> AccessList:
"""Resolve the access list."""
kwargs: Dict[str, Address | List[Hash]] = {}
if isinstance(self.address, Tag):
kwargs["address"] = self.address.resolve(tags)
else:
kwargs["address"] = self.address
kwargs["storageKeys"] = [Hash(key, left_padding=True) for key in self.storage_keys]
return AccessList(**kwargs)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/common/__init__.py | src/ethereum_test_specs/static_state/common/__init__.py | """Ethereum/tests structures."""
from .common import (
AccessListInFiller,
AddressInFiller,
AddressOrCreateTagInFiller,
AddressOrTagInFiller,
AddressTag,
CodeInFiller,
ContractTag,
HashOrTagInFiller,
SenderTag,
Tag,
TagDependentData,
TagDict,
ValueInFiller,
ValueOrCreateTagInFiller,
ValueOrTagInFiller,
parse_address_or_tag,
)
__all__ = [
"AccessListInFiller",
"AddressInFiller",
"AddressOrCreateTagInFiller",
"AddressOrTagInFiller",
"AddressTag",
"CodeInFiller",
"ContractTag",
"HashOrTagInFiller",
"Tag",
"TagDict",
"TagDependentData",
"SenderTag",
"ValueInFiller",
"ValueOrCreateTagInFiller",
"ValueOrTagInFiller",
"parse_address_or_tag",
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_specs/static_state/common/compile_yul.py | src/ethereum_test_specs/static_state/common/compile_yul.py | """compile yul with arguments."""
import subprocess
from pathlib import Path
from typing import LiteralString
def safe_solc_command(
source_file: Path | str, evm_version: str | None = None, optimize: str | None = None
) -> list[str]:
"""Safely construct solc command with validated inputs."""
# Validate source file path
source_path = Path(source_file)
if not source_path.exists():
raise FileNotFoundError(f"Source file not found: {source_file}")
cmd: list[str] = ["solc"]
# Add EVM version if provided (validate against known versions)
if evm_version:
valid_versions = {
"homestead",
"tangerineWhistle",
"spuriousDragon",
"byzantium",
"constantinople",
"petersburg",
"istanbul",
"berlin",
"london",
"paris",
"shanghai",
"cancun",
}
if evm_version not in valid_versions:
raise ValueError(f"Invalid EVM version: {evm_version}")
cmd.extend(["--evm-version", evm_version])
# Add compilation flags (using literal strings)
strict_assembly: LiteralString = "--strict-assembly"
cmd.append(strict_assembly)
if optimize is None:
optimize_flag: LiteralString = "--optimize"
yul_opts: LiteralString = "--yul-optimizations=:"
cmd.extend([optimize_flag, yul_opts])
cmd.append(str(source_path))
return cmd
def compile_yul(
source_file: str, evm_version: str | None = None, optimize: str | None = None
) -> str:
"""
Compiles a Yul source file using solc and returns the binary
representation.
Arguments:
source_file (str): Path to the Yul source file.
evm_version(str, optional): The EVM version to use (e.g., 'istanbul').
Defaults to None.
optimize (any, optional): If provided (non-None), optimization flags
are not added. If None, additional
optimization flags will be included.
Returns: str: The binary representation prefixed with "0x".
Raises: Exception: If the solc output contains an error message.
"""
cmd = safe_solc_command(source_file, evm_version, optimize)
# Execute the solc command and capture both stdout and stderr
result = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, check=False
)
out = result.stdout
# Check for errors in the output
if "Error" in out:
raise Exception(f"Yul compilation error:\n{out}")
# Search for the "Binary representation:" line and get the following line
# as the binary
lines = out.splitlines()
binary_line = ""
for i, line in enumerate(lines):
if "Binary representation:" in line:
if i + 1 < len(lines):
binary_line = lines[i + 1].strip()
break
return f"0x{binary_line}"
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/gas_costs.py | src/ethereum_test_forks/gas_costs.py | """Defines the data class that will contain gas cost constants on each fork."""
from dataclasses import dataclass
@dataclass(kw_only=True, frozen=True)
class GasCosts:
"""Class that contains the gas cost constants for any fork."""
G_JUMPDEST: int
G_BASE: int
G_VERY_LOW: int
G_LOW: int
G_MID: int
G_HIGH: int
G_WARM_ACCOUNT_ACCESS: int
G_COLD_ACCOUNT_ACCESS: int
G_ACCESS_LIST_ADDRESS: int
G_ACCESS_LIST_STORAGE: int
G_WARM_SLOAD: int
G_COLD_SLOAD: int
G_STORAGE_SET: int
G_STORAGE_RESET: int
R_STORAGE_CLEAR: int
G_SELF_DESTRUCT: int
G_CREATE: int
G_CODE_DEPOSIT_BYTE: int
G_INITCODE_WORD: int
G_CALL_VALUE: int
G_CALL_STIPEND: int
G_NEW_ACCOUNT: int
G_EXP: int
G_EXP_BYTE: int
G_MEMORY: int
G_TX_DATA_ZERO: int
G_TX_DATA_NON_ZERO: int
G_TX_DATA_STANDARD_TOKEN_COST: int
G_TX_DATA_FLOOR_TOKEN_COST: int
G_TRANSACTION: int
G_TRANSACTION_CREATE: int
G_LOG: int
G_LOG_DATA: int
G_LOG_TOPIC: int
G_KECCAK_256: int
G_KECCAK_256_WORD: int
G_COPY: int
G_BLOCKHASH: int
G_AUTHORIZATION: int
R_AUTHORIZATION_EXISTING_AUTHORITY: int
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/base_decorators.py | src/ethereum_test_forks/base_decorators.py | """Decorators for the fork methods."""
from typing import Callable, TypeVar
F = TypeVar("F", bound=Callable)
def prefer_transition_to_method(method: F) -> F:
"""Call the `fork_to` implementation when transitioning."""
method.__prefer_transition_to_method__ = True # type: ignore
return method
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/transition_base_fork.py | src/ethereum_test_forks/transition_base_fork.py | """Base objects used to define transition forks."""
from inspect import signature
from typing import Any, Callable, List, Type
from .base_fork import BaseFork
ALWAYS_TRANSITIONED_BLOCK_NUMBER = 10_000
ALWAYS_TRANSITIONED_BLOCK_TIMESTAMP = 10_000_000
class TransitionBaseClass:
"""Base class for transition forks."""
@classmethod
def transitions_to(cls) -> Type[BaseFork]:
"""Return fork where the transition ends."""
raise Exception("Not implemented")
@classmethod
def transitions_from(cls) -> Type[BaseFork]:
"""Return fork where the transition starts."""
raise Exception("Not implemented")
def base_fork_abstract_methods() -> List[str]:
"""
Return list of all abstract methods that must be implemented by a fork.
"""
return list(BaseFork.__abstractmethods__)
def transition_fork(
to_fork: Type[BaseFork], at_block: int = 0, at_timestamp: int = 0
) -> Callable[[Type[BaseFork]], Type[TransitionBaseClass]]:
"""Mark a class as a transition fork."""
def decorator(cls: Type[Any]) -> Type[TransitionBaseClass]:
transition_name = cls.__name__
from_fork = cls.__bases__[0]
assert issubclass(from_fork, BaseFork)
class NewTransitionClass(
cls,
TransitionBaseClass,
BaseFork,
transition_tool_name=cls._transition_tool_name,
solc_name=cls._solc_name,
ignore=cls._ignore,
):
@classmethod
def transitions_to(cls) -> Type[BaseFork]:
return to_fork
@classmethod
def transitions_from(cls) -> Type[BaseFork]:
return from_fork
NewTransitionClass.name = lambda: transition_name # type: ignore
def make_transition_method(
base_method: Callable[..., Any],
from_fork_method: Callable[..., Any],
to_fork_method: Callable[..., Any],
) -> classmethod:
base_method_parameters = signature(base_method).parameters
def transition_method(
cls: Type[Any],
block_number: int = ALWAYS_TRANSITIONED_BLOCK_NUMBER,
timestamp: int = ALWAYS_TRANSITIONED_BLOCK_TIMESTAMP,
) -> Any:
del cls
kwargs = {}
if "block_number" in base_method_parameters:
kwargs["block_number"] = block_number
if "timestamp" in base_method_parameters:
kwargs["timestamp"] = timestamp
if getattr(base_method, "__prefer_transition_to_method__", False):
return to_fork_method(**kwargs)
return (
to_fork_method(**kwargs)
if block_number >= at_block and timestamp >= at_timestamp
else from_fork_method(**kwargs)
)
return classmethod(transition_method)
for method_name in base_fork_abstract_methods() + ["bpo_fork"]:
setattr(
NewTransitionClass,
method_name,
make_transition_method(
getattr(BaseFork, method_name),
getattr(from_fork, method_name),
getattr(to_fork, method_name),
),
)
NewTransitionClass.transitions_to = lambda: to_fork # type: ignore
NewTransitionClass.transitions_from = lambda: from_fork # type: ignore
NewTransitionClass.fork_at = lambda block_number=0, timestamp=0: ( # type: ignore
to_fork if block_number >= at_block and timestamp >= at_timestamp else from_fork
)
return NewTransitionClass
return decorator
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/helpers.py | src/ethereum_test_forks/helpers.py | """Helper methods to resolve forks during test filling."""
import re
from typing import Annotated, Any, Callable, FrozenSet, List, Optional, Set, Type
from pydantic import (
BaseModel,
BeforeValidator,
ConfigDict,
PlainSerializer,
PlainValidator,
TypeAdapter,
ValidatorFunctionWrapHandler,
model_validator,
)
from .base_fork import BaseFork
from .forks import forks, transition
from .transition_base_fork import TransitionBaseClass
class InvalidForkError(Exception):
"""
Invalid fork error raised when the fork specified is not found or
incompatible.
"""
def __init__(self, message: str) -> None:
"""Initialize the InvalidForkError exception."""
super().__init__(message)
all_forks: List[Type[BaseFork]] = []
for fork_name in forks.__dict__:
fork = forks.__dict__[fork_name]
if not isinstance(fork, type):
continue
if issubclass(fork, BaseFork) and fork is not BaseFork:
all_forks.append(fork)
transition_forks: List[Type[BaseFork]] = []
for fork_name in transition.__dict__:
fork = transition.__dict__[fork_name]
if not isinstance(fork, type):
continue
if issubclass(fork, TransitionBaseClass) and issubclass(fork, BaseFork):
transition_forks.append(fork)
ALL_FORKS = frozenset(fork for fork in all_forks if not fork.ignore())
ALL_TRANSITION_FORKS = frozenset(transition_forks)
ALL_FORKS_WITH_TRANSITIONS = frozenset(
fork for fork in ALL_FORKS | ALL_TRANSITION_FORKS if not fork.ignore()
)
def get_forks() -> List[Type[BaseFork]]:
"""
Return list of all the fork classes implemented by `ethereum_test_forks`
ordered chronologically by deployment.
"""
return all_forks[:]
def get_deployed_forks() -> List[Type[BaseFork]]:
"""
Return list of all the fork classes implemented by `ethereum_test_forks`
that have been deployed to mainnet, chronologically ordered by deployment.
"""
return [fork for fork in get_forks() if fork.is_deployed() and not fork.ignore()]
def get_development_forks() -> List[Type[BaseFork]]:
"""
Return list of all the fork classes implemented by `ethereum_test_forks`
that have been not yet deployed to mainnet and are currently under
development. The list is ordered by their planned deployment date.
"""
return [fork for fork in get_forks() if not fork.is_deployed()]
def get_parent_fork(fork: Type[BaseFork]) -> Type[BaseFork]:
"""Return parent fork of the specified fork."""
parent_fork = fork.__base__
if not parent_fork:
raise InvalidForkError(f"Parent fork of {fork} not found.")
return parent_fork
def get_closest_fork(fork: Type[BaseFork]) -> Optional[Type[BaseFork]]:
"""Return None if BaseFork is passed, otherwise return the fork itself."""
if fork is BaseFork:
return None
return fork
def get_transition_forks() -> Set[Type[BaseFork]]:
"""Return all the transition forks."""
return set(ALL_TRANSITION_FORKS)
def get_transition_fork_predecessor(transition_fork: Type[BaseFork]) -> Type[BaseFork]:
"""Return the fork from which the transition fork transitions."""
if not issubclass(transition_fork, TransitionBaseClass):
raise InvalidForkError(f"{transition_fork} is not a transition fork.")
return transition_fork.transitions_from()
def get_transition_fork_successor(transition_fork: Type[BaseFork]) -> Type[BaseFork]:
"""Return the fork to which the transition fork transitions."""
if not issubclass(transition_fork, TransitionBaseClass):
raise InvalidForkError(f"{transition_fork} is not a transition fork.")
return transition_fork.transitions_to()
def get_from_until_fork_set(
forks: Set[Type[BaseFork]] | FrozenSet[Type[BaseFork]],
forks_from: Set[Type[BaseFork]],
forks_until: Set[Type[BaseFork]],
) -> Set[Type[BaseFork]]:
"""Get fork range from forks_from to forks_until."""
resulting_set = set()
for fork_from in forks_from:
for fork_until in forks_until:
for fork in forks:
if fork <= fork_until and fork >= fork_from:
resulting_set.add(fork)
return resulting_set
def get_forks_with_no_parents(
forks: Set[Type[BaseFork]] | FrozenSet[Type[BaseFork]],
) -> Set[Type[BaseFork]]:
"""Get forks with no parents in the inheritance hierarchy."""
resulting_forks: Set[Type[BaseFork]] = set()
for fork in forks:
parents = False
for next_fork in forks - {fork}:
if next_fork < fork:
parents = True
break
if not parents:
resulting_forks = resulting_forks | {fork}
return resulting_forks
def get_forks_with_no_descendants(forks: Set[Type[BaseFork]]) -> Set[Type[BaseFork]]:
"""Get forks with no descendants in the inheritance hierarchy."""
resulting_forks: Set[Type[BaseFork]] = set()
for fork in forks:
descendants = False
for next_fork in forks - {fork}:
if next_fork > fork:
descendants = True
break
if not descendants:
resulting_forks = resulting_forks | {fork}
return resulting_forks
def get_last_descendants(
forks: Set[Type[BaseFork]], forks_from: Set[Type[BaseFork]]
) -> Set[Type[BaseFork]]:
"""Get last descendant of a class in the inheritance hierarchy."""
resulting_forks: Set[Type[BaseFork]] = set()
forks = get_forks_with_no_descendants(forks)
for fork_from in forks_from:
for fork in forks:
if fork >= fork_from:
resulting_forks = resulting_forks | {fork}
return resulting_forks
def get_selected_fork_set(
*,
single_fork: Set[Type[BaseFork]],
forks_from: Set[Type[BaseFork]],
forks_until: Set[Type[BaseFork]],
transition_forks: bool = True,
) -> Set[Type[BaseFork]]:
"""
Process sets derived from `--fork`, `--until` and `--from` to return an
unified fork set.
"""
selected_fork_set = set()
if single_fork:
selected_fork_set |= single_fork
else:
if not forks_from:
forks_from = get_forks_with_no_parents(ALL_FORKS)
if not forks_until:
forks_until = get_last_descendants(set(get_deployed_forks()), forks_from)
selected_fork_set = get_from_until_fork_set(ALL_FORKS, forks_from, forks_until)
if transition_forks:
for fork in list(selected_fork_set):
transition_fork_set = transition_fork_to(fork)
selected_fork_set |= transition_fork_set
return selected_fork_set
def transition_fork_from_to(
fork_from: Type[BaseFork], fork_to: Type[BaseFork]
) -> Type[BaseFork] | None:
"""
Return transition fork that transitions to and from the specified forks.
"""
for transition_fork in get_transition_forks():
if not issubclass(transition_fork, TransitionBaseClass):
continue
if (
transition_fork.transitions_to() == fork_to
and transition_fork.transitions_from() == fork_from
):
return transition_fork
return None
def transition_fork_to(fork_to: Type[BaseFork]) -> Set[Type[BaseFork]]:
"""Return transition fork that transitions to the specified fork."""
transition_forks: Set[Type[BaseFork]] = set()
for transition_fork in get_transition_forks():
if not issubclass(transition_fork, TransitionBaseClass):
continue
if transition_fork.transitions_to() == fork_to:
transition_forks.add(transition_fork)
return transition_forks
def forks_from_until(
fork_from: Type[BaseFork], fork_until: Type[BaseFork]
) -> List[Type[BaseFork]]:
"""
Return specified fork and all forks after it until and including the second
specified fork.
"""
prev_fork = fork_until
forks: List[Type[BaseFork]] = []
while prev_fork != BaseFork and prev_fork != fork_from:
forks.insert(0, prev_fork)
prev_fork = get_parent_fork(prev_fork)
if prev_fork == BaseFork:
return []
forks.insert(0, fork_from)
return forks
def forks_from(fork: Type[BaseFork], deployed_only: bool = True) -> List[Type[BaseFork]]:
"""Return specified fork and all forks after it."""
if deployed_only:
latest_fork = get_deployed_forks()[-1]
else:
latest_fork = get_forks()[-1]
return forks_from_until(fork, latest_fork)
def get_relative_fork_markers(
fork_identifier: Type[BaseFork] | str, strict_mode: bool = True
) -> list[str]:
"""
Return a list of marker names for a given fork.
For a base fork (e.g. `Shanghai`), return [ `Shanghai` ]. For a transition
fork (e.g. `ShanghaiToCancunAtTime15k` which transitions to `Cancun`),
return [ `ShanghaiToCancunAtTime15k`, `Cancun` ].
If `strict_mode` is set to `True`, raise an `InvalidForkError` if the fork
is not found, otherwise, simply return the provided (str) `fork_identifier`
(this is required to run `consume` with forks that are unknown to EEST).
"""
all_forks = set(get_forks()) | set(get_transition_forks())
if isinstance(fork_identifier, str):
fork_class = None
for candidate in all_forks:
if candidate.name() == fork_identifier:
fork_class = candidate
break
if strict_mode and fork_class is None:
raise InvalidForkError(f"Unknown fork: {fork_identifier}")
return [fork_identifier]
else:
fork_class = fork_identifier
if issubclass(fork_class, TransitionBaseClass):
return [fork_class.name(), fork_class.transitions_to().name()]
else:
return [fork_class.name()]
def get_fork_by_name(fork_name: str) -> Type[BaseFork] | None:
"""Get a fork by name."""
for fork in get_forks():
if fork.name() == fork_name:
return fork
return None
class ForkRangeDescriptor(BaseModel):
"""
Fork descriptor parsed from string normally contained in ethereum/tests
fillers.
"""
greater_equal: Type[BaseFork] | None = None
less_than: Type[BaseFork] | None = None
model_config = ConfigDict(frozen=True)
def fork_in_range(self, fork: Type[BaseFork]) -> bool:
"""Return whether the given fork is within range."""
if self.greater_equal is not None and fork < self.greater_equal:
return False
if self.less_than is not None and fork >= self.less_than:
return False
return True
@model_validator(mode="wrap")
@classmethod
def validate_fork_range_descriptor(
cls, v: Any, handler: ValidatorFunctionWrapHandler
) -> "ForkRangeDescriptor":
"""
Validate the fork range descriptor from a string.
Examples:
- ">=Osaka" validates to {greater_equal=Osaka, less_than=None}
- ">=Prague<Osaka" validates to {greater_equal=Prague,
less_than=Osaka}
"""
if isinstance(v, str):
# Decompose the string into its parts
descriptor_string = re.sub(r"\s+", "", v.strip())
v = {}
if m := re.search(r">=(\w+)", descriptor_string):
fork: Type[BaseFork] | None = get_fork_by_name(m.group(1))
if fork is None:
raise Exception(f"Unable to parse fork name: {m.group(1)}")
v["greater_equal"] = fork
descriptor_string = re.sub(r">=(\w+)", "", descriptor_string)
if m := re.search(r"<(\w+)", descriptor_string):
fork = get_fork_by_name(m.group(1))
if fork is None:
raise Exception(f"Unable to parse fork name: {m.group(1)}")
v["less_than"] = fork
descriptor_string = re.sub(r"<(\w+)", "", descriptor_string)
if descriptor_string:
raise Exception(
"Unable to completely parse fork range descriptor. "
+ f'Remaining string: "{descriptor_string}"'
)
return handler(v)
def fork_validator_generator(
cls_name: str, forks: List[Type[BaseFork]]
) -> Callable[[Any], Type[BaseFork]]:
"""Generate a fork validator function."""
forks_dict = {fork.name().lower(): fork for fork in forks}
def fork_validator(obj: Any) -> Type[BaseFork]:
"""Get a fork by name or raise an error."""
if obj is None:
raise InvalidForkError("Fork cannot be None")
if isinstance(obj, type) and issubclass(obj, BaseFork):
return obj
if isinstance(obj, str):
if obj.lower() in forks_dict:
return forks_dict[obj.lower()]
else:
raise InvalidForkError(f"Invalid fork '{obj}' specified")
raise InvalidForkError(f"Invalid {cls_name}: {obj} (type: {type(obj)})")
return fork_validator
def set_before_validator(value: Any) -> Any:
"""Convert a comma-separated string to a validation input for a set."""
if isinstance(value, str):
if value.strip() == "":
return set()
return {v.strip() for v in value.split(",")}
return value
# Annotated Pydantic-Friendly Fork Types
Fork = Annotated[
Type[BaseFork],
PlainSerializer(str),
PlainValidator(fork_validator_generator("Fork", all_forks + transition_forks)),
]
ForkAdapter: TypeAdapter = TypeAdapter(Fork)
ForkOrNoneAdapter: TypeAdapter = TypeAdapter(Fork | None)
ForkSet = Annotated[
Set[Fork],
BeforeValidator(set_before_validator),
]
ForkSetAdapter: TypeAdapter = TypeAdapter(ForkSet)
TransitionFork = Annotated[
Type[BaseFork],
PlainSerializer(str),
PlainValidator(fork_validator_generator("TransitionFork", transition_forks)),
]
TransitionForkAdapter: TypeAdapter = TypeAdapter(TransitionFork)
TransitionForkOrNoneAdapter: TypeAdapter = TypeAdapter(TransitionFork | None)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/base_fork.py | src/ethereum_test_forks/base_fork.py | """Abstract base class for Ethereum forks."""
from abc import ABC, ABCMeta, abstractmethod
from typing import (
Any,
ClassVar,
Dict,
List,
Literal,
Mapping,
Optional,
Protocol,
Set,
Sized,
Tuple,
Type,
Union,
)
from ethereum_test_base_types import AccessList, Address, BlobSchedule
from ethereum_test_base_types.conversions import BytesConvertible
from ethereum_test_vm import EVMCodeType, Opcodes
from .base_decorators import prefer_transition_to_method
from .gas_costs import GasCosts
class ForkAttribute(Protocol):
"""
A protocol to get the attribute of a fork at a given block number and
timestamp.
"""
def __call__(self, block_number: int = 0, timestamp: int = 0) -> Any:
"""
Return value of the attribute at the given block number and timestamp.
"""
pass
class MemoryExpansionGasCalculator(Protocol):
"""
A protocol to calculate the gas cost of memory expansion at a given fork.
"""
def __call__(self, *, new_bytes: int, previous_bytes: int = 0) -> int:
"""Return gas cost of expanding the memory by the given length."""
pass
class CalldataGasCalculator(Protocol):
"""
A protocol to calculate the transaction gas cost of calldata at a given
fork.
"""
def __call__(self, *, data: BytesConvertible, floor: bool = False) -> int:
"""Return the transaction gas cost of calldata given its contents."""
pass
class TransactionDataFloorCostCalculator(Protocol):
"""
Calculate the transaction floor cost due to its calldata for a given fork.
"""
def __call__(self, *, data: BytesConvertible) -> int:
"""Return transaction gas cost of calldata given its contents."""
pass
class BaseFeePerGasCalculator(Protocol):
"""A protocol to calculate the base fee per gas at a given fork."""
def __call__(
self, *, parent_base_fee_per_gas: int, parent_gas_used: int, parent_gas_limit: int
) -> int:
"""Return the base fee per gas at a given fork."""
pass
class BaseFeeChangeCalculator(Protocol):
"""
A protocol to calculate the gas that needs to be used to change the base
fee.
"""
def __call__(
self,
*,
parent_base_fee_per_gas: int,
parent_gas_limit: int,
required_base_fee_per_gas: int,
) -> int:
"""Return the gas that needs to be used to change the base fee."""
pass
class TransactionIntrinsicCostCalculator(Protocol):
"""
A protocol to calculate the intrinsic gas cost of a transaction at a given
fork.
"""
def __call__(
self,
*,
calldata: BytesConvertible = b"",
contract_creation: bool = False,
access_list: List[AccessList] | None = None,
authorization_list_or_count: Sized | int | None = None,
return_cost_deducted_prior_execution: bool = False,
) -> int:
"""
Return the intrinsic gas cost of a transaction given its properties.
Args:
calldata: The data of the transaction.
contract_creation: Whether the transaction creates a contract.
access_list: The list of access lists for the transaction.
authorization_list_or_count: The list of authorizations or the count
of authorizations for the transaction.
return_cost_deducted_prior_execution: If set to False, the returned
value is equal to the minimum
gas required for the
transaction to be valid. If
set to True, the returned
value is equal to the cost
that is deducted from the gas
limit before the transaction
starts execution.
Returns: Gas cost of a transaction
"""
pass
class BlobGasPriceCalculator(Protocol):
"""
A protocol to calculate the blob gas price given the excess blob gas at a
given fork.
"""
def __call__(self, *, excess_blob_gas: int) -> int:
"""Return the blob gas price given the excess blob gas."""
pass
class ExcessBlobGasCalculator(Protocol):
"""
A protocol to calculate the excess blob gas for a block at a given fork.
"""
def __call__(
self,
*,
parent_excess_blob_gas: int | None = None,
parent_excess_blobs: int | None = None,
parent_blob_gas_used: int | None = None,
parent_blob_count: int | None = None,
parent_base_fee_per_gas: int,
) -> int:
"""
Return the excess blob gas given the parent's excess blob gas and blob
gas used.
"""
pass
class BaseForkMeta(ABCMeta):
"""Metaclass for BaseFork."""
@abstractmethod
def name(cls) -> str:
"""
Return the name of the fork (e.g., Berlin), must be implemented by
subclasses.
"""
pass
def __repr__(cls) -> str:
"""Print the name of the fork, instead of the class."""
return cls.name()
@staticmethod
def _maybe_transitioned(fork_cls: "BaseForkMeta") -> "BaseForkMeta":
"""
Return the transitioned fork, if a transition fork, otherwise return
`fork_cls`.
"""
return fork_cls.transitions_to() if hasattr(fork_cls, "transitions_to") else fork_cls
@staticmethod
def _is_subclass_of(a: "BaseForkMeta", b: "BaseForkMeta") -> bool:
"""
Check if `a` is a subclass of `b`, taking fork transitions into
account.
"""
a = BaseForkMeta._maybe_transitioned(a)
b = BaseForkMeta._maybe_transitioned(b)
return issubclass(a, b)
def __gt__(cls, other: "BaseForkMeta") -> bool:
"""Compare if a fork is newer than some other fork (cls > other)."""
return cls is not other and BaseForkMeta._is_subclass_of(cls, other)
def __ge__(cls, other: "BaseForkMeta") -> bool:
"""
Compare if a fork is newer than or equal to some other fork (cls >=
other).
"""
return cls is other or BaseForkMeta._is_subclass_of(cls, other)
def __lt__(cls, other: "BaseForkMeta") -> bool:
"""Compare if a fork is older than some other fork (cls < other)."""
# "Older" means other is a subclass of cls, but not the same.
return cls is not other and BaseForkMeta._is_subclass_of(other, cls)
def __le__(cls, other: "BaseForkMeta") -> bool:
"""
Compare if a fork is older than or equal to some other fork (cls <=
other).
"""
return cls is other or BaseForkMeta._is_subclass_of(other, cls)
class BaseFork(ABC, metaclass=BaseForkMeta):
"""
An abstract class representing an Ethereum fork.
Must contain all the methods used by every fork.
"""
_transition_tool_name: ClassVar[Optional[str]] = None
_solc_name: ClassVar[Optional[str]] = None
_ignore: ClassVar[bool] = False
_bpo_fork: ClassVar[bool] = False
_children: ClassVar[Set[Type["BaseFork"]]] = set()
# make mypy happy
BLOB_CONSTANTS: ClassVar[Dict[str, Union[int, Literal["big"]]]] = {}
@classmethod
def get_blob_constant(cls, name: str) -> int | Literal["big"]:
"""Return value of requested blob constant."""
raise NotImplementedError
def __init_subclass__(
cls,
*,
transition_tool_name: Optional[str] = None,
solc_name: Optional[str] = None,
ignore: bool = False,
bpo_fork: bool = False,
) -> None:
"""
Initialize new fork with values that don't carry over to subclass
forks.
"""
cls._transition_tool_name = transition_tool_name
cls._solc_name = solc_name
cls._ignore = ignore
cls._bpo_fork = bpo_fork
cls._children = set()
base_class = cls.__bases__[0]
assert issubclass(base_class, BaseFork)
if base_class != BaseFork:
base_class._children.add(cls)
# Header information abstract methods
@classmethod
@abstractmethod
def header_base_fee_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must contain base fee."""
pass
@classmethod
@abstractmethod
def header_prev_randao_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must contain Prev Randao value."""
pass
@classmethod
@abstractmethod
def header_zero_difficulty_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must have difficulty zero."""
pass
@classmethod
@abstractmethod
def header_withdrawals_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must contain withdrawals."""
pass
@classmethod
@abstractmethod
def header_excess_blob_gas_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must contain excess blob gas."""
pass
@classmethod
@abstractmethod
def header_blob_gas_used_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must contain blob gas used."""
pass
@classmethod
@abstractmethod
def header_beacon_root_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must contain parent beacon block root."""
pass
@classmethod
@abstractmethod
def header_requests_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must contain beacon chain requests."""
pass
@classmethod
@abstractmethod
def header_bal_hash_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return true if the header must contain block access list hash."""
pass
# Gas related abstract methods
@classmethod
@abstractmethod
def gas_costs(cls, *, block_number: int = 0, timestamp: int = 0) -> GasCosts:
"""Return dataclass with the gas costs constants for the fork."""
pass
@classmethod
@abstractmethod
def memory_expansion_gas_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> MemoryExpansionGasCalculator:
"""
Return a callable that calculates the gas cost of memory expansion for
the fork.
"""
pass
@classmethod
@abstractmethod
def calldata_gas_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> CalldataGasCalculator:
"""
Return callable that calculates the transaction gas cost for its
calldata depending on its contents.
"""
pass
@classmethod
@abstractmethod
def base_fee_per_gas_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> BaseFeePerGasCalculator:
"""
Return a callable that calculates the base fee per gas at a given fork.
"""
pass
@classmethod
@abstractmethod
def base_fee_change_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> BaseFeeChangeCalculator:
"""
Return a callable that calculates the gas that needs to be used to
change the base fee.
"""
pass
@classmethod
@abstractmethod
def base_fee_max_change_denominator(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the base fee max change denominator at a given fork."""
pass
@classmethod
@abstractmethod
def base_fee_elasticity_multiplier(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the base fee elasticity multiplier at a given fork."""
pass
@classmethod
@abstractmethod
def max_refund_quotient(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the max refund quotient at a given fork."""
pass
@classmethod
@abstractmethod
def transaction_data_floor_cost_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> TransactionDataFloorCostCalculator:
"""
Return a callable that calculates the transaction floor cost due to its
calldata.
"""
pass
@classmethod
@abstractmethod
def transaction_intrinsic_cost_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> TransactionIntrinsicCostCalculator:
"""
Return callable that calculates the intrinsic gas cost of a transaction
for the fork.
"""
pass
@classmethod
@abstractmethod
def blob_gas_price_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> BlobGasPriceCalculator:
"""
Return a callable that calculates the blob gas price at a given fork.
"""
pass
@classmethod
@abstractmethod
def excess_blob_gas_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> ExcessBlobGasCalculator:
"""
Return a callable that calculates the excess blob gas for a block at a
given fork.
"""
pass
@classmethod
@abstractmethod
def min_base_fee_per_blob_gas(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the minimum base fee per blob gas at a given fork."""
pass
@classmethod
@abstractmethod
def blob_gas_per_blob(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the amount of blob gas used per blob at a given fork."""
pass
@classmethod
@abstractmethod
def blob_base_fee_update_fraction(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the blob base fee update fraction at a given fork."""
pass
@classmethod
@abstractmethod
def supports_blobs(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Return whether the given fork supports blobs or not."""
pass
@classmethod
@abstractmethod
def target_blobs_per_block(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the target blobs per block at a given fork."""
pass
@classmethod
@abstractmethod
def max_blobs_per_tx(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the max blobs per transaction at a given fork."""
pass
@classmethod
@abstractmethod
def max_blobs_per_block(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the max blobs per block at a given fork."""
pass
@classmethod
@abstractmethod
def blob_reserve_price_active(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""
Return whether the fork uses a reserve price mechanism for blobs or
not.
"""
pass
@classmethod
@abstractmethod
def blob_base_cost(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the base cost of a blob at a given fork."""
pass
@classmethod
@abstractmethod
def full_blob_tx_wrapper_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> int | None:
"""
Return the version of the full blob transaction wrapper at a given
fork.
"""
pass
@classmethod
@prefer_transition_to_method
@abstractmethod
def blob_schedule(cls, *, block_number: int = 0, timestamp: int = 0) -> BlobSchedule | None:
"""Return the blob schedule up until the given fork."""
pass
@classmethod
@abstractmethod
def get_reward(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return expected reward amount in wei of a given fork."""
pass
# Transaction related abstract methods
@classmethod
@abstractmethod
def tx_types(cls, *, block_number: int = 0, timestamp: int = 0) -> List[int]:
"""Return list of the transaction types supported by the fork."""
pass
@classmethod
@abstractmethod
def contract_creating_tx_types(cls, *, block_number: int = 0, timestamp: int = 0) -> List[int]:
"""
Return list of the transaction types supported by the fork that can
create contracts.
"""
pass
@classmethod
@abstractmethod
def transaction_gas_limit_cap(cls, *, block_number: int = 0, timestamp: int = 0) -> int | None:
"""
Return the transaction gas limit cap, or None if no limit is imposed.
"""
pass
@classmethod
@abstractmethod
def block_rlp_size_limit(cls, *, block_number: int = 0, timestamp: int = 0) -> int | None:
"""
Return the maximum RLP size of a block in bytes, or None if no limit is
imposed.
"""
pass
@classmethod
@abstractmethod
def precompiles(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Address]:
"""Return list pre-compiles supported by the fork."""
pass
@classmethod
@abstractmethod
def system_contracts(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Address]:
"""Return list system-contracts supported by the fork."""
pass
@classmethod
@prefer_transition_to_method
@abstractmethod
def pre_allocation(cls, *, block_number: int = 0, timestamp: int = 0) -> Mapping:
"""
Return required pre-allocation of accounts for any kind of test.
This method must always call the `fork_to` method when transitioning,
because the allocation can only be set at genesis, and thus cannot be
changed at transition time.
"""
pass
@classmethod
@prefer_transition_to_method
@abstractmethod
def pre_allocation_blockchain(cls, *, block_number: int = 0, timestamp: int = 0) -> Mapping:
"""
Return required pre-allocation of accounts for any blockchain tests.
This method must always call the `fork_to` method when transitioning,
because the allocation can only be set at genesis, and thus cannot be
changed at transition time.
"""
pass
# Engine API information abstract methods
@classmethod
@abstractmethod
def engine_new_payload_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""
Return `None` if this fork's payloads cannot be sent over the engine
API, or the payload version if it can.
"""
pass
@classmethod
@abstractmethod
def engine_new_payload_blob_hashes(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""
Return true if the engine api version requires new payload calls to
include blob hashes.
"""
pass
@classmethod
@abstractmethod
def engine_new_payload_beacon_root(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""
Return true if the engine api version requires new payload calls to
include a parent beacon block root.
"""
pass
@classmethod
@abstractmethod
def engine_new_payload_requests(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""
Return true if the engine api version requires new payload calls to
include requests.
"""
pass
@classmethod
@abstractmethod
def engine_new_payload_target_blobs_per_block(
cls, *, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
Return true if the engine api version requires new payload calls to
include target blobs per block.
"""
pass
@classmethod
@abstractmethod
def engine_execution_payload_block_access_list(
cls, *, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
Return `True` if the engine api version requires execution payload to
include a `block_access_list`.
"""
pass
@classmethod
@abstractmethod
def engine_payload_attribute_target_blobs_per_block(
cls, *, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
Return true if the payload attributes include the target blobs per
block.
"""
pass
@classmethod
@abstractmethod
def engine_payload_attribute_max_blobs_per_block(
cls, *, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
Return true if the payload attributes include the max blobs per block.
"""
pass
@classmethod
@abstractmethod
def engine_forkchoice_updated_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""
Return `None` if the forks canonical chain cannot be set using the
forkchoice method.
"""
pass
@classmethod
@abstractmethod
def engine_get_payload_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""
Return `None` if the forks canonical chain cannot build a payload using
the engine API.
"""
pass
@classmethod
@abstractmethod
def engine_get_blobs_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""
Return `None` if the fork does not support the engine get blobs
version.
"""
pass
# EVM information abstract methods
@classmethod
@abstractmethod
def evm_code_types(cls, *, block_number: int = 0, timestamp: int = 0) -> List[EVMCodeType]:
"""Return list of EVM code types supported by the fork."""
pass
@classmethod
@abstractmethod
def max_code_size(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""
Return the maximum code size allowed to be deployed in a contract
creation.
"""
pass
@classmethod
@abstractmethod
def max_stack_height(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the maximum stack height allowed in the EVM stack."""
pass
@classmethod
@abstractmethod
def max_initcode_size(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""
Return the maximum initcode size allowed to be used in a contract
creation.
"""
pass
@classmethod
@abstractmethod
def call_opcodes(
cls, *, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
"""
Return list of tuples with the call opcodes and its corresponding EVM
code type.
"""
pass
@classmethod
@abstractmethod
def valid_opcodes(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Opcodes]:
"""Return list of Opcodes that are valid to work on this fork."""
pass
@classmethod
@abstractmethod
def create_opcodes(
cls, *, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
"""
Return list of tuples with the create opcodes and its corresponding EVM
code type.
"""
pass
@classmethod
@abstractmethod
def max_request_type(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return max request type supported by the fork."""
pass
# Meta information about the fork
@classmethod
def name(cls) -> str:
"""Return name of the fork."""
return cls.__name__
@classmethod
def fork_at(cls, *, block_number: int = 0, timestamp: int = 0) -> Type["BaseFork"]:
"""
Return fork at the given block number and timestamp. Useful only for
transition forks, and it's a no-op for normal forks.
"""
del block_number, timestamp
return cls
@classmethod
@abstractmethod
def transition_tool_name(cls, *, block_number: int = 0, timestamp: int = 0) -> str:
"""
Return fork name as it's meant to be passed to the transition tool for
execution.
"""
pass
@classmethod
@abstractmethod
def solc_name(cls) -> str:
"""Return fork name as it's meant to be passed to the solc compiler."""
pass
@classmethod
def is_deployed(cls) -> bool:
"""
Return whether the fork has been deployed to mainnet, or not.
Must be overridden and return False for forks that are still under
development.
"""
return True
@classmethod
def ignore(cls) -> bool:
"""Return whether the fork should be ignored during test generation."""
return cls._ignore
@classmethod
@prefer_transition_to_method
def bpo_fork(cls) -> bool:
"""Return whether the fork is a BPO fork."""
return cls._bpo_fork
@classmethod
def parent(cls) -> Type["BaseFork"] | None:
"""Return the parent fork."""
base_class = cls.__bases__[0]
assert issubclass(base_class, BaseFork)
if base_class == BaseFork:
return None
return base_class
@classmethod
def non_bpo_ancestor(cls) -> Type["BaseFork"]:
"""Return the nearest non-BPO ancestor fork."""
ancestor = cls
while ancestor.bpo_fork():
parent = ancestor.parent()
if parent is None:
break
ancestor = parent
return ancestor
@classmethod
def children(cls) -> Set[Type["BaseFork"]]:
"""Return the children forks."""
return set(cls._children)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/__init__.py | src/ethereum_test_forks/__init__.py | """Ethereum test fork definitions."""
from .base_fork import ForkAttribute
from .forks.forks import (
BPO1,
BPO2,
BPO3,
BPO4,
BPO5,
Amsterdam,
ArrowGlacier,
Berlin,
Byzantium,
Cancun,
Constantinople,
ConstantinopleFix,
EOFv1,
Frontier,
GrayGlacier,
Homestead,
Istanbul,
London,
MuirGlacier,
Osaka,
Paris,
Prague,
Shanghai,
)
from .forks.transition import (
BerlinToLondonAt5,
BPO1ToBPO2AtTime15k,
BPO2ToBPO3AtTime15k,
BPO3ToBPO4AtTime15k,
CancunToPragueAtTime15k,
OsakaToBPO1AtTime15k,
ParisToShanghaiAtTime15k,
PragueToOsakaAtTime15k,
ShanghaiToCancunAtTime15k,
)
from .gas_costs import GasCosts
from .helpers import (
ALL_FORKS,
ALL_FORKS_WITH_TRANSITIONS,
ALL_TRANSITION_FORKS,
Fork,
ForkAdapter,
ForkOrNoneAdapter,
ForkRangeDescriptor,
ForkSet,
ForkSetAdapter,
InvalidForkError,
TransitionFork,
TransitionForkAdapter,
TransitionForkOrNoneAdapter,
forks_from,
forks_from_until,
get_closest_fork,
get_deployed_forks,
get_development_forks,
get_fork_by_name,
get_forks,
get_forks_with_no_descendants,
get_forks_with_no_parents,
get_from_until_fork_set,
get_last_descendants,
get_relative_fork_markers,
get_selected_fork_set,
get_transition_fork_predecessor,
get_transition_fork_successor,
get_transition_forks,
transition_fork_from_to,
transition_fork_to,
)
__all__ = [
"ALL_FORKS_WITH_TRANSITIONS",
"ALL_FORKS",
"ALL_TRANSITION_FORKS",
"Fork",
"ForkAdapter",
"ForkOrNoneAdapter",
"ForkSet",
"ForkSetAdapter",
"TransitionFork",
"TransitionForkAdapter",
"TransitionForkOrNoneAdapter",
"ForkAttribute",
"Amsterdam",
"ArrowGlacier",
"Berlin",
"BerlinToLondonAt5",
"Byzantium",
"Constantinople",
"ConstantinopleFix",
"EOFv1",
"ForkRangeDescriptor",
"Frontier",
"GrayGlacier",
"Homestead",
"InvalidForkError",
"Istanbul",
"London",
"Paris",
"ParisToShanghaiAtTime15k",
"MuirGlacier",
"Shanghai",
"ShanghaiToCancunAtTime15k",
"Cancun",
"CancunToPragueAtTime15k",
"Prague",
"PragueToOsakaAtTime15k",
"Osaka",
"OsakaToBPO1AtTime15k",
"BPO1",
"BPO1ToBPO2AtTime15k",
"BPO2",
"BPO2ToBPO3AtTime15k",
"BPO3",
"BPO3ToBPO4AtTime15k",
"BPO4",
"BPO5",
"get_transition_forks",
"forks_from",
"forks_from_until",
"get_closest_fork",
"get_deployed_forks",
"get_development_forks",
"get_transition_fork_predecessor",
"get_transition_fork_successor",
"get_fork_by_name",
"get_forks_with_no_descendants",
"get_forks_with_no_parents",
"get_relative_fork_markers",
"get_forks",
"get_from_until_fork_set",
"get_last_descendants",
"get_selected_fork_set",
"transition_fork_from_to",
"transition_fork_to",
"GasCosts",
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/forks/forks.py | src/ethereum_test_forks/forks/forks.py | """All Ethereum fork class definitions."""
from dataclasses import replace
from hashlib import sha256
from os.path import realpath
from pathlib import Path
from typing import List, Literal, Mapping, Optional, Sized, Tuple
from ethereum_test_base_types import AccessList, Address, BlobSchedule, Bytes, ForkBlobSchedule
from ethereum_test_base_types.conversions import BytesConvertible
from ethereum_test_vm import EVMCodeType, Opcodes
from ..base_fork import (
BaseFeeChangeCalculator,
BaseFeePerGasCalculator,
BaseFork,
BlobGasPriceCalculator,
CalldataGasCalculator,
ExcessBlobGasCalculator,
MemoryExpansionGasCalculator,
TransactionDataFloorCostCalculator,
TransactionIntrinsicCostCalculator,
)
from ..gas_costs import GasCosts
from .helpers import ceiling_division, fake_exponential
CURRENT_FILE = Path(realpath(__file__))
CURRENT_FOLDER = CURRENT_FILE.parent
# All forks must be listed here !!! in the order they were introduced !!!
class Frontier(BaseFork, solc_name="homestead"):
"""Frontier fork."""
@classmethod
def transition_tool_name(cls, *, block_number: int = 0, timestamp: int = 0) -> str:
"""
Return fork name as it's meant to be passed to the transition tool for
execution.
"""
del block_number, timestamp
if cls._transition_tool_name is not None:
return cls._transition_tool_name
return cls.name()
@classmethod
def solc_name(cls) -> str:
"""Return fork name as it's meant to be passed to the solc compiler."""
if cls._solc_name is not None:
return cls._solc_name
return cls.name().lower()
@classmethod
def header_base_fee_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not contain base fee."""
del block_number, timestamp
return False
@classmethod
def header_prev_randao_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not contain Prev Randao value."""
del block_number, timestamp
return False
@classmethod
def header_zero_difficulty_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not have difficulty zero."""
del block_number, timestamp
return False
@classmethod
def header_withdrawals_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not contain withdrawals."""
del block_number, timestamp
return False
@classmethod
def header_excess_blob_gas_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not contain excess blob gas."""
del block_number, timestamp
return False
@classmethod
def header_blob_gas_used_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not contain blob gas used."""
del block_number, timestamp
return False
@classmethod
def gas_costs(cls, *, block_number: int = 0, timestamp: int = 0) -> GasCosts:
"""
Return dataclass with the defined gas costs constants for genesis.
"""
del block_number, timestamp
return GasCosts(
G_JUMPDEST=1,
G_BASE=2,
G_VERY_LOW=3,
G_LOW=5,
G_MID=8,
G_HIGH=10,
G_WARM_ACCOUNT_ACCESS=100,
G_COLD_ACCOUNT_ACCESS=2_600,
G_ACCESS_LIST_ADDRESS=2_400,
G_ACCESS_LIST_STORAGE=1_900,
G_WARM_SLOAD=100,
G_COLD_SLOAD=2_100,
G_STORAGE_SET=20_000,
G_STORAGE_RESET=2_900,
R_STORAGE_CLEAR=4_800,
G_SELF_DESTRUCT=5_000,
G_CREATE=32_000,
G_CODE_DEPOSIT_BYTE=200,
G_INITCODE_WORD=2,
G_CALL_VALUE=9_000,
G_CALL_STIPEND=2_300,
G_NEW_ACCOUNT=25_000,
G_EXP=10,
G_EXP_BYTE=50,
G_MEMORY=3,
G_TX_DATA_ZERO=4,
G_TX_DATA_NON_ZERO=68,
G_TX_DATA_STANDARD_TOKEN_COST=0,
G_TX_DATA_FLOOR_TOKEN_COST=0,
G_TRANSACTION=21_000,
G_TRANSACTION_CREATE=32_000,
G_LOG=375,
G_LOG_DATA=8,
G_LOG_TOPIC=375,
G_KECCAK_256=30,
G_KECCAK_256_WORD=6,
G_COPY=3,
G_BLOCKHASH=20,
G_AUTHORIZATION=0,
R_AUTHORIZATION_EXISTING_AUTHORITY=0,
)
@classmethod
def memory_expansion_gas_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> MemoryExpansionGasCalculator:
"""
Return callable that calculates the gas cost of memory expansion for
the fork.
"""
gas_costs = cls.gas_costs(block_number=block_number, timestamp=timestamp)
def fn(*, new_bytes: int, previous_bytes: int = 0) -> int:
if new_bytes <= previous_bytes:
return 0
new_words = ceiling_division(new_bytes, 32)
previous_words = ceiling_division(previous_bytes, 32)
def c(w: int) -> int:
return (gas_costs.G_MEMORY * w) + ((w * w) // 512)
return c(new_words) - c(previous_words)
return fn
@classmethod
def calldata_gas_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> CalldataGasCalculator:
"""
Return callable that calculates the transaction gas cost for its
calldata depending on its contents.
"""
gas_costs = cls.gas_costs(block_number=block_number, timestamp=timestamp)
def fn(*, data: BytesConvertible, floor: bool = False) -> int:
del floor
cost = 0
for b in Bytes(data):
if b == 0:
cost += gas_costs.G_TX_DATA_ZERO
else:
cost += gas_costs.G_TX_DATA_NON_ZERO
return cost
return fn
@classmethod
def base_fee_per_gas_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> BaseFeePerGasCalculator:
"""
Return a callable that calculates the base fee per gas at a given fork.
"""
raise NotImplementedError(f"Base fee per gas calculator is not supported in {cls.name()}")
@classmethod
def base_fee_change_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> BaseFeeChangeCalculator:
"""
Return a callable that calculates the gas that needs to be used to
change the base fee.
"""
raise NotImplementedError(f"Base fee change calculator is not supported in {cls.name()}")
@classmethod
def base_fee_max_change_denominator(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the base fee max change denominator at a given fork."""
del block_number, timestamp
raise NotImplementedError(
f"Base fee max change denominator is not supported in {cls.name()}"
)
@classmethod
def base_fee_elasticity_multiplier(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the base fee elasticity multiplier at a given fork."""
del block_number, timestamp
raise NotImplementedError(
f"Base fee elasticity multiplier is not supported in {cls.name()}"
)
@classmethod
def transaction_data_floor_cost_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> TransactionDataFloorCostCalculator:
"""At frontier, the transaction data floor cost is a constant zero."""
del block_number, timestamp
def fn(*, data: BytesConvertible) -> int:
del data
return 0
return fn
@classmethod
def transaction_intrinsic_cost_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> TransactionIntrinsicCostCalculator:
"""
Return callable that calculates the intrinsic gas cost of a transaction
for the fork.
"""
gas_costs = cls.gas_costs(block_number=block_number, timestamp=timestamp)
calldata_gas_calculator = cls.calldata_gas_calculator(
block_number=block_number, timestamp=timestamp
)
def fn(
*,
calldata: BytesConvertible = b"",
contract_creation: bool = False,
access_list: List[AccessList] | None = None,
authorization_list_or_count: Sized | int | None = None,
return_cost_deducted_prior_execution: bool = False,
) -> int:
del return_cost_deducted_prior_execution
assert access_list is None, f"Access list is not supported in {cls.name()}"
assert authorization_list_or_count is None, (
f"Authorizations are not supported in {cls.name()}"
)
intrinsic_cost: int = gas_costs.G_TRANSACTION
if contract_creation:
intrinsic_cost += gas_costs.G_INITCODE_WORD * ceiling_division(
len(Bytes(calldata)), 32
)
return intrinsic_cost + calldata_gas_calculator(data=calldata)
return fn
@classmethod
def blob_gas_price_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> BlobGasPriceCalculator:
"""
Return a callable that calculates the blob gas price at a given fork.
"""
raise NotImplementedError(f"Blob gas price calculator is not supported in {cls.name()}")
@classmethod
def excess_blob_gas_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> ExcessBlobGasCalculator:
"""
Return a callable that calculates the excess blob gas for a block at a
given fork.
"""
raise NotImplementedError(f"Excess blob gas calculator is not supported in {cls.name()}")
@classmethod
def min_base_fee_per_blob_gas(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the amount of blob gas used per blob at a given fork."""
del block_number, timestamp
raise NotImplementedError(f"Base fee per blob gas is not supported in {cls.name()}")
@classmethod
def blob_base_fee_update_fraction(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the blob base fee update fraction at a given fork."""
del block_number, timestamp
raise NotImplementedError(
f"Blob base fee update fraction is not supported in {cls.name()}"
)
@classmethod
def blob_gas_per_blob(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the amount of blob gas used per blob at a given fork."""
del block_number, timestamp
return 0
@classmethod
def supports_blobs(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""Blobs are not supported at Frontier."""
del block_number, timestamp
return False
@classmethod
def target_blobs_per_block(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the target number of blobs per block at a given fork."""
del block_number, timestamp
raise NotImplementedError(f"Target blobs per block is not supported in {cls.name()}")
@classmethod
def max_blobs_per_block(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the max number of blobs per block at a given fork."""
del block_number, timestamp
raise NotImplementedError(f"Max blobs per block is not supported in {cls.name()}")
@classmethod
def blob_reserve_price_active(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""
Return whether the fork uses a reserve price mechanism for blobs or
not.
"""
del block_number, timestamp
raise NotImplementedError(f"Blob reserve price is not supported in {cls.name()}")
@classmethod
def blob_base_cost(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the base cost of a blob at a given fork."""
del block_number, timestamp
raise NotImplementedError(f"Blob base cost is not supported in {cls.name()}")
@classmethod
def full_blob_tx_wrapper_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> int | None:
"""Return the version of the full blob transaction wrapper."""
raise NotImplementedError(
f"Full blob transaction wrapper version is not supported in {cls.name()}"
)
@classmethod
def max_blobs_per_tx(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the max number of blobs per tx at a given fork."""
del block_number, timestamp
raise NotImplementedError(f"Max blobs per tx is not supported in {cls.name()}")
@classmethod
def blob_schedule(cls, *, block_number: int = 0, timestamp: int = 0) -> BlobSchedule | None:
"""At genesis, no blob schedule is used."""
del block_number, timestamp
return None
@classmethod
def header_requests_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not contain beacon chain requests."""
del block_number, timestamp
return False
@classmethod
def header_bal_hash_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not contain block access list hash."""
del block_number, timestamp
return False
@classmethod
def engine_new_payload_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""At genesis, payloads cannot be sent through the engine API."""
del block_number, timestamp
return None
@classmethod
def header_beacon_root_required(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, header must not contain parent beacon block root."""
del block_number, timestamp
return False
@classmethod
def engine_new_payload_blob_hashes(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, payloads do not have blob hashes."""
del block_number, timestamp
return False
@classmethod
def engine_new_payload_beacon_root(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, payloads do not have a parent beacon block root."""
del block_number, timestamp
return False
@classmethod
def engine_new_payload_requests(cls, *, block_number: int = 0, timestamp: int = 0) -> bool:
"""At genesis, payloads do not have requests."""
del block_number, timestamp
return False
@classmethod
def engine_execution_payload_block_access_list(
cls, *, block_number: int = 0, timestamp: int = 0
) -> bool:
"""At genesis, payloads do not have block access list."""
del block_number, timestamp
return False
@classmethod
def engine_new_payload_target_blobs_per_block(
cls,
*,
block_number: int = 0,
timestamp: int = 0,
) -> bool:
"""At genesis, payloads do not have target blobs per block."""
del block_number, timestamp
return False
@classmethod
def engine_payload_attribute_target_blobs_per_block(
cls, *, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
At genesis, payload attributes do not include the target blobs per
block.
"""
del block_number, timestamp
return False
@classmethod
def engine_payload_attribute_max_blobs_per_block(
cls, *, block_number: int = 0, timestamp: int = 0
) -> bool:
"""
At genesis, payload attributes do not include the max blobs per block.
"""
del block_number, timestamp
return False
@classmethod
def engine_forkchoice_updated_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""
At genesis, forkchoice updates cannot be sent through the engine API.
"""
return cls.engine_new_payload_version(block_number=block_number, timestamp=timestamp)
@classmethod
def engine_get_payload_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""At genesis, payloads cannot be retrieved through the engine API."""
return cls.engine_new_payload_version(block_number=block_number, timestamp=timestamp)
@classmethod
def engine_get_blobs_version(
cls, *, block_number: int = 0, timestamp: int = 0
) -> Optional[int]:
"""At genesis, blobs cannot be retrieved through the engine API."""
del block_number, timestamp
return None
@classmethod
def get_reward(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""
At Genesis the expected reward amount in wei is
5_000_000_000_000_000_000.
"""
del block_number, timestamp
return 5_000_000_000_000_000_000
@classmethod
def tx_types(cls, *, block_number: int = 0, timestamp: int = 0) -> List[int]:
"""At Genesis, only legacy transactions are allowed."""
del block_number, timestamp
return [0]
@classmethod
def contract_creating_tx_types(cls, *, block_number: int = 0, timestamp: int = 0) -> List[int]:
"""At Genesis, only legacy transactions are allowed."""
del block_number, timestamp
return [0]
@classmethod
def transaction_gas_limit_cap(cls, *, block_number: int = 0, timestamp: int = 0) -> int | None:
"""At Genesis, no transaction gas limit cap is imposed."""
del block_number, timestamp
return None
@classmethod
def block_rlp_size_limit(cls, *, block_number: int = 0, timestamp: int = 0) -> int | None:
"""At Genesis, no RLP block size limit is imposed."""
del block_number, timestamp
return None
@classmethod
def precompiles(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Address]:
"""At Genesis, no pre-compiles are present."""
del block_number, timestamp
return []
@classmethod
def system_contracts(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Address]:
"""At Genesis, no system-contracts are present."""
del block_number, timestamp
return []
@classmethod
def evm_code_types(cls, *, block_number: int = 0, timestamp: int = 0) -> List[EVMCodeType]:
"""At Genesis, only legacy EVM code is supported."""
del block_number, timestamp
return [EVMCodeType.LEGACY]
@classmethod
def max_code_size(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""
At genesis, there is no upper bound for code size (bounded by block gas
limit).
However, the default is set to the limit of EIP-170 (Spurious Dragon)
"""
del block_number, timestamp
return 0x6000
@classmethod
def max_stack_height(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""At genesis, the maximum stack height is 1024."""
del block_number, timestamp
return 1024
@classmethod
def max_initcode_size(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""At genesis, there is no upper bound for initcode size."""
del block_number, timestamp
"""However, the default is set to the limit of EIP-3860 (Shanghai)"""
return 0xC000
@classmethod
def call_opcodes(
cls, *, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
"""Return list of call opcodes supported by the fork."""
del block_number, timestamp
return [
(Opcodes.CALL, EVMCodeType.LEGACY),
(Opcodes.CALLCODE, EVMCodeType.LEGACY),
]
@classmethod
def valid_opcodes(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Opcodes]:
"""Return list of Opcodes that are valid to work on this fork."""
del block_number, timestamp
return [
Opcodes.STOP,
Opcodes.ADD,
Opcodes.MUL,
Opcodes.SUB,
Opcodes.DIV,
Opcodes.SDIV,
Opcodes.MOD,
Opcodes.SMOD,
Opcodes.ADDMOD,
Opcodes.MULMOD,
Opcodes.EXP,
Opcodes.SIGNEXTEND,
Opcodes.LT,
Opcodes.GT,
Opcodes.SLT,
Opcodes.SGT,
Opcodes.EQ,
Opcodes.ISZERO,
Opcodes.AND,
Opcodes.OR,
Opcodes.XOR,
Opcodes.NOT,
Opcodes.BYTE,
Opcodes.SHA3,
Opcodes.ADDRESS,
Opcodes.BALANCE,
Opcodes.ORIGIN,
Opcodes.CALLER,
Opcodes.CALLVALUE,
Opcodes.CALLDATALOAD,
Opcodes.CALLDATASIZE,
Opcodes.CALLDATACOPY,
Opcodes.CODESIZE,
Opcodes.CODECOPY,
Opcodes.GASPRICE,
Opcodes.EXTCODESIZE,
Opcodes.EXTCODECOPY,
Opcodes.BLOCKHASH,
Opcodes.COINBASE,
Opcodes.TIMESTAMP,
Opcodes.NUMBER,
Opcodes.PREVRANDAO,
Opcodes.GASLIMIT,
Opcodes.POP,
Opcodes.MLOAD,
Opcodes.MSTORE,
Opcodes.MSTORE8,
Opcodes.SLOAD,
Opcodes.SSTORE,
Opcodes.PC,
Opcodes.MSIZE,
Opcodes.GAS,
Opcodes.JUMP,
Opcodes.JUMPI,
Opcodes.JUMPDEST,
Opcodes.PUSH1,
Opcodes.PUSH2,
Opcodes.PUSH3,
Opcodes.PUSH4,
Opcodes.PUSH5,
Opcodes.PUSH6,
Opcodes.PUSH7,
Opcodes.PUSH8,
Opcodes.PUSH9,
Opcodes.PUSH10,
Opcodes.PUSH11,
Opcodes.PUSH12,
Opcodes.PUSH13,
Opcodes.PUSH14,
Opcodes.PUSH15,
Opcodes.PUSH16,
Opcodes.PUSH17,
Opcodes.PUSH18,
Opcodes.PUSH19,
Opcodes.PUSH20,
Opcodes.PUSH21,
Opcodes.PUSH22,
Opcodes.PUSH23,
Opcodes.PUSH24,
Opcodes.PUSH25,
Opcodes.PUSH26,
Opcodes.PUSH27,
Opcodes.PUSH28,
Opcodes.PUSH29,
Opcodes.PUSH30,
Opcodes.PUSH31,
Opcodes.PUSH32,
Opcodes.DUP1,
Opcodes.DUP2,
Opcodes.DUP3,
Opcodes.DUP4,
Opcodes.DUP5,
Opcodes.DUP6,
Opcodes.DUP7,
Opcodes.DUP8,
Opcodes.DUP9,
Opcodes.DUP10,
Opcodes.DUP11,
Opcodes.DUP12,
Opcodes.DUP13,
Opcodes.DUP14,
Opcodes.DUP15,
Opcodes.DUP16,
Opcodes.SWAP1,
Opcodes.SWAP2,
Opcodes.SWAP3,
Opcodes.SWAP4,
Opcodes.SWAP5,
Opcodes.SWAP6,
Opcodes.SWAP7,
Opcodes.SWAP8,
Opcodes.SWAP9,
Opcodes.SWAP10,
Opcodes.SWAP11,
Opcodes.SWAP12,
Opcodes.SWAP13,
Opcodes.SWAP14,
Opcodes.SWAP15,
Opcodes.SWAP16,
Opcodes.LOG0,
Opcodes.LOG1,
Opcodes.LOG2,
Opcodes.LOG3,
Opcodes.LOG4,
Opcodes.CREATE,
Opcodes.CALL,
Opcodes.CALLCODE,
Opcodes.RETURN,
Opcodes.SELFDESTRUCT,
]
@classmethod
def create_opcodes(
cls, *, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
"""At Genesis, only `CREATE` opcode is supported."""
del block_number, timestamp
return [
(Opcodes.CREATE, EVMCodeType.LEGACY),
]
@classmethod
def max_refund_quotient(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""Return the max refund quotient at Genesis."""
del block_number, timestamp
return 2
@classmethod
def max_request_type(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""At genesis, no request type is supported, signaled by -1."""
del block_number, timestamp
return -1
@classmethod
def pre_allocation(cls, *, block_number: int = 0, timestamp: int = 0) -> Mapping:
"""
Return whether the fork expects pre-allocation of accounts.
Frontier does not require pre-allocated accounts
"""
del block_number, timestamp
return {}
@classmethod
def pre_allocation_blockchain(cls, *, block_number: int = 0, timestamp: int = 0) -> Mapping:
"""
Return whether the fork expects pre-allocation of accounts.
Frontier does not require pre-allocated accounts
"""
del block_number, timestamp
return {}
class Homestead(Frontier):
"""Homestead fork."""
@classmethod
def precompiles(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Address]:
"""
At Homestead, EC-recover, SHA256, RIPEMD160, and Identity pre-compiles
are introduced.
"""
return [
Address(1, label="ECREC"),
Address(2, label="SHA256"),
Address(3, label="RIPEMD160"),
Address(4, label="ID"),
] + super(Homestead, cls).precompiles(block_number=block_number, timestamp=timestamp)
@classmethod
def call_opcodes(
cls, *, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
"""At Homestead, DELEGATECALL opcode was introduced."""
return [(Opcodes.DELEGATECALL, EVMCodeType.LEGACY)] + super(Homestead, cls).call_opcodes(
block_number=block_number, timestamp=timestamp
)
@classmethod
def valid_opcodes(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Opcodes]:
"""Return the list of Opcodes that are valid to work on this fork."""
del block_number, timestamp
return [Opcodes.DELEGATECALL] + super(Homestead, cls).valid_opcodes()
@classmethod
def transaction_intrinsic_cost_calculator(
cls, *, block_number: int = 0, timestamp: int = 0
) -> TransactionIntrinsicCostCalculator:
"""
At Homestead, the transaction intrinsic cost needs to take contract
creation into account.
"""
super_fn = super(Homestead, cls).transaction_intrinsic_cost_calculator(
block_number=block_number, timestamp=timestamp
)
gas_costs = cls.gas_costs(block_number=block_number, timestamp=timestamp)
def fn(
*,
calldata: BytesConvertible = b"",
contract_creation: bool = False,
access_list: List[AccessList] | None = None,
authorization_list_or_count: Sized | int | None = None,
return_cost_deducted_prior_execution: bool = False,
) -> int:
del return_cost_deducted_prior_execution
intrinsic_cost: int = super_fn(
calldata=calldata,
contract_creation=contract_creation,
access_list=access_list,
authorization_list_or_count=authorization_list_or_count,
)
if contract_creation:
intrinsic_cost += gas_costs.G_TRANSACTION_CREATE
return intrinsic_cost
return fn
class DAOFork(Homestead, ignore=True):
"""DAO fork."""
pass
class Tangerine(DAOFork, ignore=True):
"""Tangerine fork (EIP-150)."""
pass
class SpuriousDragon(Tangerine, ignore=True):
"""SpuriousDragon fork (EIP-155, EIP-158)."""
pass
class Byzantium(Homestead):
"""Byzantium fork."""
@classmethod
def get_reward(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""
At Byzantium, the block reward is reduced to 3_000_000_000_000_000_000
wei.
"""
del block_number, timestamp
return 3_000_000_000_000_000_000
@classmethod
def precompiles(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Address]:
"""
At Byzantium, pre-compiles for bigint modular exponentiation, addition
and scalar multiplication on elliptic curve alt_bn128, and optimal ate
pairing check on elliptic curve alt_bn128 are introduced.
"""
return [
Address(5, label="MODEXP"),
Address(6, label="BN254_ADD"),
Address(7, label="BN254_MUL"),
Address(8, label="BN254_PAIRING"),
] + super(Byzantium, cls).precompiles(block_number=block_number, timestamp=timestamp)
@classmethod
def max_code_size(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
# NOTE: Move this to Spurious Dragon once this fork is introduced. See
# EIP-170.
"""
At Spurious Dragon, an upper bound was introduced for max contract code
size.
"""
del block_number, timestamp
return 0x6000
@classmethod
def call_opcodes(
cls, *, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
"""At Byzantium, STATICCALL opcode was introduced."""
return [(Opcodes.STATICCALL, EVMCodeType.LEGACY)] + super(Byzantium, cls).call_opcodes(
block_number=block_number, timestamp=timestamp
)
@classmethod
def valid_opcodes(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Opcodes]:
"""Return list of Opcodes that are valid to work on this fork."""
del block_number, timestamp
return [
Opcodes.REVERT,
Opcodes.RETURNDATASIZE,
Opcodes.RETURNDATACOPY,
Opcodes.STATICCALL,
] + super(Byzantium, cls).valid_opcodes()
class Constantinople(Byzantium):
"""Constantinople fork."""
@classmethod
def get_reward(cls, *, block_number: int = 0, timestamp: int = 0) -> int:
"""
At Constantinople, the block reward is reduced to
2_000_000_000_000_000_000 wei.
"""
del block_number, timestamp
return 2_000_000_000_000_000_000
@classmethod
def create_opcodes(
cls, *, block_number: int = 0, timestamp: int = 0
) -> List[Tuple[Opcodes, EVMCodeType]]:
"""At Constantinople, `CREATE2` opcode is added."""
return [(Opcodes.CREATE2, EVMCodeType.LEGACY)] + super(Constantinople, cls).create_opcodes(
block_number=block_number, timestamp=timestamp
)
@classmethod
def valid_opcodes(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Opcodes]:
"""Return list of Opcodes that are valid to work on this fork."""
del block_number, timestamp
return [
Opcodes.SHL,
Opcodes.SHR,
Opcodes.SAR,
Opcodes.EXTCODEHASH,
Opcodes.CREATE2,
] + super(Constantinople, cls).valid_opcodes()
class ConstantinopleFix(Constantinople, solc_name="constantinople"):
"""Constantinople Fix fork."""
pass
class Istanbul(ConstantinopleFix):
"""Istanbul fork."""
@classmethod
def precompiles(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Address]:
"""At Istanbul, pre-compile for blake2 compression is introduced."""
return [
Address(9, label="BLAKE2F"),
] + super(Istanbul, cls).precompiles(block_number=block_number, timestamp=timestamp)
@classmethod
def valid_opcodes(cls, *, block_number: int = 0, timestamp: int = 0) -> List[Opcodes]:
"""Return list of Opcodes that are valid to work on this fork."""
del block_number, timestamp
return [Opcodes.CHAINID, Opcodes.SELFBALANCE] + super(Istanbul, cls).valid_opcodes()
@classmethod
def gas_costs(cls, *, block_number: int = 0, timestamp: int = 0) -> GasCosts:
"""
On Istanbul, the non-zero transaction data byte cost is reduced to 16
due to EIP-2028.
"""
return replace(
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/forks/helpers.py | src/ethereum_test_forks/forks/helpers.py | """Helpers used to return fork-specific values."""
def ceiling_division(a: int, b: int) -> int:
"""
Calculate the ceil without using floating point.
Used by many of the EVM's formulas.
"""
return -(a // -b)
def fake_exponential(factor: int, numerator: int, denominator: int) -> int:
"""Calculate the blob gas cost."""
i = 1
output = 0
numerator_accumulator = factor * denominator
while numerator_accumulator > 0:
output += numerator_accumulator
numerator_accumulator = (numerator_accumulator * numerator) // (denominator * i)
i += 1
return output // denominator
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/forks/transition.py | src/ethereum_test_forks/forks/transition.py | """List of all transition fork definitions."""
from ..transition_base_fork import transition_fork
from .forks import BPO1, BPO2, BPO3, BPO4, Berlin, Cancun, London, Osaka, Paris, Prague, Shanghai
# Transition Forks
@transition_fork(to_fork=London, at_block=5)
class BerlinToLondonAt5(Berlin):
"""Berlin to London transition at Block 5."""
pass
@transition_fork(to_fork=Shanghai, at_timestamp=15_000)
class ParisToShanghaiAtTime15k(Paris):
"""Paris to Shanghai transition at Timestamp 15k."""
pass
@transition_fork(to_fork=Cancun, at_timestamp=15_000)
class ShanghaiToCancunAtTime15k(Shanghai):
"""Shanghai to Cancun transition at Timestamp 15k."""
pass
@transition_fork(to_fork=Prague, at_timestamp=15_000)
class CancunToPragueAtTime15k(Cancun):
"""Cancun to Prague transition at Timestamp 15k."""
pass
@transition_fork(to_fork=Osaka, at_timestamp=15_000)
class PragueToOsakaAtTime15k(Prague):
"""Prague to Osaka transition at Timestamp 15k."""
pass
@transition_fork(to_fork=BPO1, at_timestamp=15_000)
class OsakaToBPO1AtTime15k(Osaka):
"""Osaka to BPO1 transition at Timestamp 15k."""
pass
@transition_fork(to_fork=BPO2, at_timestamp=15_000)
class BPO1ToBPO2AtTime15k(BPO1):
"""BPO1 to BPO2 transition at Timestamp 15k."""
pass
@transition_fork(to_fork=BPO3, at_timestamp=15_000)
class BPO2ToBPO3AtTime15k(BPO2):
"""BPO2 to BPO3 transition at Timestamp 15k."""
pass
@transition_fork(to_fork=BPO4, at_timestamp=15_000)
class BPO3ToBPO4AtTime15k(BPO3):
"""BPO3 to BPO4 transition at Timestamp 15k."""
pass
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/forks/__init__.py | src/ethereum_test_forks/forks/__init__.py | """Listings of all forks, current and upcoming."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/tests/test_forks.py | src/ethereum_test_forks/tests/test_forks.py | """Test fork utilities."""
from typing import Dict, cast
import pytest
from pydantic import BaseModel
from ethereum_test_base_types import BlobSchedule
from ..forks.forks import (
BPO1,
BPO2,
BPO3,
BPO4,
Berlin,
Cancun,
Frontier,
Homestead,
Istanbul,
London,
Osaka,
Paris,
Prague,
Shanghai,
)
from ..forks.transition import (
BerlinToLondonAt5,
BPO1ToBPO2AtTime15k,
BPO2ToBPO3AtTime15k,
BPO3ToBPO4AtTime15k,
CancunToPragueAtTime15k,
OsakaToBPO1AtTime15k,
ParisToShanghaiAtTime15k,
PragueToOsakaAtTime15k,
ShanghaiToCancunAtTime15k,
)
from ..helpers import (
Fork,
ForkAdapter,
ForkOrNoneAdapter,
ForkSetAdapter,
forks_from,
forks_from_until,
get_deployed_forks,
get_forks,
transition_fork_from_to,
transition_fork_to,
)
from ..transition_base_fork import transition_fork
FIRST_DEPLOYED = Frontier
LAST_DEPLOYED = Prague
LAST_DEVELOPMENT = Osaka
DEVELOPMENT_FORKS = [Osaka]
def test_transition_forks() -> None:
"""Test transition fork utilities."""
assert transition_fork_from_to(Berlin, London) == BerlinToLondonAt5
assert transition_fork_from_to(Berlin, Paris) is None
assert transition_fork_to(Shanghai) == {ParisToShanghaiAtTime15k}
# Test forks transitioned to and from
assert BerlinToLondonAt5.transitions_to() == London # type: ignore
assert BerlinToLondonAt5.transitions_from() == Berlin # type: ignore
assert BerlinToLondonAt5.transition_tool_name(block_number=4, timestamp=0) == "Berlin"
assert BerlinToLondonAt5.transition_tool_name(block_number=5, timestamp=0) == "London"
# Default values of transition forks is the transition block
assert BerlinToLondonAt5.transition_tool_name() == "London"
assert (
ParisToShanghaiAtTime15k.transition_tool_name(block_number=0, timestamp=14_999) == "Merge"
)
assert (
ParisToShanghaiAtTime15k.transition_tool_name(block_number=0, timestamp=15_000)
== "Shanghai"
)
assert ParisToShanghaiAtTime15k.transition_tool_name() == "Shanghai"
assert BerlinToLondonAt5.header_base_fee_required(block_number=4, timestamp=0) is False
assert BerlinToLondonAt5.header_base_fee_required(block_number=5, timestamp=0) is True
assert (
ParisToShanghaiAtTime15k.header_withdrawals_required(block_number=0, timestamp=14_999)
is False
)
assert (
ParisToShanghaiAtTime15k.header_withdrawals_required(block_number=0, timestamp=15_000)
is True
)
assert (
ParisToShanghaiAtTime15k.engine_new_payload_version(block_number=0, timestamp=14_999) == 1
)
assert (
ParisToShanghaiAtTime15k.engine_new_payload_version(block_number=0, timestamp=15_000) == 2
)
assert BerlinToLondonAt5.fork_at(block_number=4, timestamp=0) == Berlin
assert BerlinToLondonAt5.fork_at(block_number=5, timestamp=0) == London
assert ParisToShanghaiAtTime15k.fork_at(block_number=0, timestamp=14_999) == Paris
assert ParisToShanghaiAtTime15k.fork_at(block_number=0, timestamp=15_000) == Shanghai
assert ParisToShanghaiAtTime15k.fork_at() == Paris
assert ParisToShanghaiAtTime15k.fork_at(block_number=10_000_000, timestamp=14_999) == Paris
def test_forks_from() -> None: # noqa: D103
assert forks_from(Paris)[0] == Paris
assert forks_from(Paris)[-1] == LAST_DEPLOYED
assert forks_from(Paris, deployed_only=True)[0] == Paris
assert forks_from(Paris, deployed_only=True)[-1] == LAST_DEPLOYED
assert forks_from(Paris, deployed_only=False)[0] == Paris
# Too flaky
# assert forks_from(Paris, deployed_only=False)[-1] == LAST_DEVELOPMENT
def test_forks() -> None:
"""Test fork utilities."""
assert forks_from_until(Berlin, Berlin) == [Berlin]
assert forks_from_until(Berlin, London) == [Berlin, London]
assert forks_from_until(Berlin, Paris) == [
Berlin,
London,
Paris,
]
# Test fork names
assert London.name() == "London"
assert ParisToShanghaiAtTime15k.name() == "ParisToShanghaiAtTime15k"
assert f"{London}" == "London"
assert f"{ParisToShanghaiAtTime15k}" == "ParisToShanghaiAtTime15k"
# Merge name will be changed to paris, but we need to check the inheriting
# fork name is still the default
assert Paris.transition_tool_name() == "Merge"
assert Shanghai.transition_tool_name() == "Shanghai"
assert f"{Paris}" == "Paris"
assert f"{Shanghai}" == "Shanghai"
assert f"{ParisToShanghaiAtTime15k}" == "ParisToShanghaiAtTime15k"
# Test some fork properties
assert Berlin.header_base_fee_required(block_number=0, timestamp=0) is False
assert London.header_base_fee_required(block_number=0, timestamp=0) is True
assert Paris.header_base_fee_required(block_number=0, timestamp=0) is True
# Default values of normal forks if the genesis block
assert Paris.header_base_fee_required() is True
# Transition forks too
assert (
cast(Fork, BerlinToLondonAt5).header_base_fee_required(block_number=4, timestamp=0)
is False
)
assert (
cast(Fork, BerlinToLondonAt5).header_base_fee_required(block_number=5, timestamp=0) is True
)
assert (
cast(Fork, ParisToShanghaiAtTime15k).header_withdrawals_required(
block_number=0, timestamp=14_999
)
is False
)
assert (
cast(Fork, ParisToShanghaiAtTime15k).header_withdrawals_required(
block_number=0, timestamp=15_000
)
is True
)
assert cast(Fork, ParisToShanghaiAtTime15k).header_withdrawals_required() is True
class ForkInPydanticModel(BaseModel):
"""Fork in pydantic model."""
fork_1: Fork
fork_2: Fork
fork_3: Fork | None
def test_fork_in_pydantic_model() -> None:
"""Test fork in pydantic model."""
model = ForkInPydanticModel(fork_1=Paris, fork_2=ParisToShanghaiAtTime15k, fork_3=None)
assert model.model_dump() == {
"fork_1": "Paris",
"fork_2": "ParisToShanghaiAtTime15k",
"fork_3": None,
}
assert (
model.model_dump_json()
== '{"fork_1":"Paris","fork_2":"ParisToShanghaiAtTime15k","fork_3":null}'
)
model = ForkInPydanticModel.model_validate_json(
'{"fork_1": "Paris", "fork_2": "ParisToShanghaiAtTime15k", "fork_3": null}'
)
assert model.fork_1 == Paris
assert model.fork_2 == ParisToShanghaiAtTime15k
assert model.fork_3 is None
def test_fork_comparison() -> None:
"""Test fork comparison operators."""
# Test fork comparison
assert Paris > Berlin
assert not Berlin > Paris
assert Berlin < Paris
assert not Paris < Berlin
assert Paris >= Berlin
assert not Berlin >= Paris
assert Berlin <= Paris
assert not Paris <= Berlin
assert London > Berlin
assert not Berlin > London
assert Berlin < London
assert not London < Berlin
assert London >= Berlin
assert not Berlin >= London
assert Berlin <= London
assert not London <= Berlin
assert Berlin >= Berlin
assert Berlin <= Berlin
assert not Berlin > Berlin
assert not Berlin < Berlin
fork = Berlin
assert fork >= Berlin
assert fork <= Berlin
assert not fork > Berlin
assert not fork < Berlin
assert fork == Berlin
def test_transition_fork_comparison() -> None:
"""
Test comparing to a transition fork.
The comparison logic is based on the logic we use to generate the tests.
E.g. given transition fork A->B, when filling, and given the from/until
markers, we expect the following logic:
Marker Comparison A->B Included
--------- ------------ ---------------
From A fork >= A True
Until A fork <= A False
From B fork >= B True
Until B fork <= B True
"""
assert BerlinToLondonAt5 >= Berlin
assert not BerlinToLondonAt5 <= Berlin
assert BerlinToLondonAt5 >= London
assert BerlinToLondonAt5 <= London
# Comparisons between transition forks is done against the `transitions_to`
# fork
assert BerlinToLondonAt5 < ParisToShanghaiAtTime15k
assert ParisToShanghaiAtTime15k > BerlinToLondonAt5
assert BerlinToLondonAt5 == BerlinToLondonAt5
assert BerlinToLondonAt5 != ParisToShanghaiAtTime15k
assert BerlinToLondonAt5 <= ParisToShanghaiAtTime15k
assert ParisToShanghaiAtTime15k >= BerlinToLondonAt5
assert sorted(
{
PragueToOsakaAtTime15k,
CancunToPragueAtTime15k,
ParisToShanghaiAtTime15k,
ShanghaiToCancunAtTime15k,
BerlinToLondonAt5,
}
) == [
BerlinToLondonAt5,
ParisToShanghaiAtTime15k,
ShanghaiToCancunAtTime15k,
CancunToPragueAtTime15k,
PragueToOsakaAtTime15k,
]
def test_get_forks() -> None: # noqa: D103
all_forks = get_forks()
assert all_forks[0] == FIRST_DEPLOYED
# assert all_forks[-1] == LAST_DEVELOPMENT # Too flaky
def test_deployed_forks() -> None: # noqa: D103
deployed_forks = get_deployed_forks()
assert deployed_forks[0] == FIRST_DEPLOYED
assert deployed_forks[-1] == LAST_DEPLOYED
class PrePreAllocFork(Shanghai):
"""Dummy fork used for testing."""
@classmethod
def pre_allocation(cls, *, block_number: int = 0, timestamp: int = 0) -> Dict:
"""Return some starting point for allocation."""
del block_number, timestamp
return {"test": "test"}
class PreAllocFork(PrePreAllocFork):
"""Dummy fork used for testing."""
@classmethod
def pre_allocation(cls, *, block_number: int = 0, timestamp: int = 0) -> Dict:
"""Add allocation to the pre-existing one from previous fork."""
del block_number, timestamp
return {"test2": "test2"} | super(PreAllocFork, cls).pre_allocation()
@transition_fork(to_fork=PreAllocFork, at_timestamp=15_000)
class PreAllocTransitionFork(PrePreAllocFork):
"""PrePreAllocFork to PreAllocFork transition at Timestamp 15k."""
pass
def test_pre_alloc() -> None: # noqa: D103
assert PrePreAllocFork.pre_allocation() == {"test": "test"}
assert PreAllocFork.pre_allocation() == {"test": "test", "test2": "test2"}
assert PreAllocTransitionFork.pre_allocation() == {
"test": "test",
"test2": "test2",
}
assert PreAllocTransitionFork.pre_allocation() == {
"test": "test",
"test2": "test2",
}
def test_precompiles() -> None: # noqa: D103
Cancun.precompiles() == list(range(11))[1:] # noqa: B015
def test_tx_types() -> None: # noqa: D103
Cancun.tx_types() == list(range(4)) # noqa: B015
@pytest.mark.parametrize(
"fork",
[
pytest.param(Berlin, id="Berlin"),
pytest.param(Istanbul, id="Istanbul"),
pytest.param(Homestead, id="Homestead"),
pytest.param(Frontier, id="Frontier"),
],
)
@pytest.mark.parametrize(
"calldata",
[
pytest.param(b"\0", id="zero-data"),
pytest.param(b"\1", id="non-zero-data"),
],
)
@pytest.mark.parametrize(
"create_tx",
[False, True],
)
def test_tx_intrinsic_gas_functions(fork: Fork, calldata: bytes, create_tx: bool) -> None: # noqa: D103
intrinsic_gas = 21_000
if calldata == b"\0":
intrinsic_gas += 4
else:
if fork >= Istanbul:
intrinsic_gas += 16
else:
intrinsic_gas += 68
if create_tx:
if fork >= Homestead:
intrinsic_gas += 32000
intrinsic_gas += 2
assert (
fork.transaction_intrinsic_cost_calculator()(
calldata=calldata,
contract_creation=create_tx,
)
== intrinsic_gas
)
class FutureFork(Osaka):
"""
Dummy fork used for testing.
Contains no changes to the blob parameters from the parent fork in order to
confirm that it's added to the blob schedule even if it doesn't have any
changes.
"""
pass
@pytest.mark.parametrize(
"fork,expected_schedule",
[
pytest.param(Frontier, None, id="Frontier"),
pytest.param(
Cancun,
{
"Cancun": {
"target_blobs_per_block": 3,
"max_blobs_per_block": 6,
"baseFeeUpdateFraction": 3338477,
},
},
id="Cancun",
),
pytest.param(
Prague,
{
"Cancun": {
"target_blobs_per_block": 3,
"max_blobs_per_block": 6,
"baseFeeUpdateFraction": 3338477,
},
"Prague": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
},
id="Prague",
),
pytest.param(
Osaka,
{
"Cancun": {
"target_blobs_per_block": 3,
"max_blobs_per_block": 6,
"baseFeeUpdateFraction": 3338477,
},
"Prague": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
"Osaka": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
},
id="Osaka",
),
pytest.param(
CancunToPragueAtTime15k,
{
"Cancun": {
"target_blobs_per_block": 3,
"max_blobs_per_block": 6,
"baseFeeUpdateFraction": 3338477,
},
"Prague": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
},
id="CancunToPragueAtTime15k",
),
pytest.param(
PragueToOsakaAtTime15k,
{
"Cancun": {
"target_blobs_per_block": 3,
"max_blobs_per_block": 6,
"baseFeeUpdateFraction": 3338477,
},
"Prague": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
"Osaka": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
},
id="PragueToOsakaAtTime15k",
),
pytest.param(
FutureFork,
{
"Cancun": {
"target_blobs_per_block": 3,
"max_blobs_per_block": 6,
"baseFeeUpdateFraction": 3338477,
},
"Prague": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
"Osaka": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
"FutureFork": {
"target_blobs_per_block": 6,
"max_blobs_per_block": 9,
"baseFeeUpdateFraction": 5007716,
},
},
id="FutureFork",
),
],
)
def test_blob_schedules(fork: Fork, expected_schedule: Dict | None) -> None:
"""Test blob schedules for different forks."""
if expected_schedule is None:
assert fork.blob_schedule() is None
else:
assert fork.blob_schedule() == BlobSchedule(**expected_schedule)
def test_bpo_fork() -> None: # noqa: D103
assert Osaka.bpo_fork() is False
assert BPO1.bpo_fork() is True
assert BPO2.bpo_fork() is True
assert BPO3.bpo_fork() is True
assert BPO4.bpo_fork() is True
assert OsakaToBPO1AtTime15k.bpo_fork() is True
assert BPO1ToBPO2AtTime15k.bpo_fork() is True
assert BPO2ToBPO3AtTime15k.bpo_fork() is True
assert BPO3ToBPO4AtTime15k.bpo_fork() is True
def test_fork_adapters() -> None: # noqa: D103
assert Osaka == ForkAdapter.validate_python("Osaka")
assert Osaka == ForkOrNoneAdapter.validate_python("Osaka")
assert ForkOrNoneAdapter.validate_python(None) is None
assert {Osaka, Prague} == ForkSetAdapter.validate_python("Osaka, Prague")
assert {Osaka, Prague} == ForkSetAdapter.validate_python("osaka, Prague")
assert {Osaka, Prague} == ForkSetAdapter.validate_python({"osaka", "Prague"})
assert {Osaka} == ForkSetAdapter.validate_python("Osaka")
assert {Osaka} == ForkSetAdapter.validate_python({Osaka})
assert set() == ForkSetAdapter.validate_python("")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/tests/__init__.py | src/ethereum_test_forks/tests/__init__.py | """`ethereum_test_forks` verification tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/ethereum_test_forks/tests/test_fork_range_descriptor.py | src/ethereum_test_forks/tests/test_fork_range_descriptor.py | """Test fork range descriptor parsing from string."""
import pytest
from ..forks.forks import Osaka, Prague
from ..helpers import ForkRangeDescriptor
@pytest.mark.parametrize(
"fork_range_descriptor_string,expected_fork_range_descriptor",
[
(
">=Osaka",
ForkRangeDescriptor(
greater_equal=Osaka,
less_than=None,
),
),
(
">= Prague < Osaka",
ForkRangeDescriptor(
greater_equal=Prague,
less_than=Osaka,
),
),
],
)
def test_parsing_fork_range_descriptor_from_string(
fork_range_descriptor_string: str,
expected_fork_range_descriptor: ForkRangeDescriptor,
) -> None:
"""
Test multiple strings used as fork range descriptors in ethereum/tests.
"""
assert (
ForkRangeDescriptor.model_validate(fork_range_descriptor_string)
== expected_fork_range_descriptor
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/config/check_eip_versions.py | src/config/check_eip_versions.py | """A module for managing configuration for the `check_eip_version` utility."""
from pydantic import BaseModel
class CheckEipVersionsConfig(BaseModel):
"""A class for accessing configurations for `check_eip_version`."""
UNTIL_FORK: str = "Prague"
"""The target fork to check eip versions until."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/config/__init__.py | src/config/__init__.py | """
Initializes the config package.
The config package is responsible for loading and managing application-wide
environment configurations, making them accessible throughout the application.
"""
# This import is done to facilitate cleaner imports in the project
# `from config import AppConfig` instead of `from config.app import AppConfig`
from .app import AppConfig
from .docs import DocsConfig
from .env import EnvConfig
__all__ = ["AppConfig", "DocsConfig", "EnvConfig"]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/config/docs.py | src/config/docs.py | """
A module for managing documentation-related configurations.
Classes:
DocsConfig: Holds configurations for documentation generation.
"""
from pydantic import BaseModel
class DocsConfig(BaseModel):
"""A class for accessing documentation-related configurations."""
TARGET_FORK: str = "Osaka"
"""The target fork for the documentation."""
GENERATE_UNTIL_FORK: str = "Osaka"
"""The fork until which documentation should be generated."""
DOCS_BASE_URL: str = "https://eest.ethereum.org"
# Documentation URLs prefixed with `DOCS_URL__` to avoid conflicts with
# other URLs
DOCS_URL__WRITING_TESTS: str = f"{DOCS_BASE_URL}/main/writing_tests/"
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/config/app.py | src/config/app.py | """
A module for managing application configurations.
Classes:
- AppConfig: Holds configurations for the application framework.
"""
from pathlib import Path
from pydantic import BaseModel
import pytest_plugins.consume.releases as releases
class AppConfig(BaseModel):
"""A class for accessing documentation-related configurations."""
@property
def version(self) -> str:
"""Get the current version from releases."""
spec = "stable@latest"
release_url = releases.get_release_url(spec)
return release_url.split("/v")[-1].split("/")[0]
DEFAULT_LOGS_DIR: Path = Path(__file__).resolve().parent.parent.parent / "logs"
"""The default directory where log files are stored."""
ROOT_DIR: Path = Path(__file__).resolve().parents[2]
"""The root directory of the project."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/config/env.py | src/config/env.py | """
A module for exposing application-wide environment variables.
This module is responsible for loading, parsing, and validating the
application's environment configuration from the `env.yaml` file. It uses
Pydantic to ensure that the configuration adheres to expected formats and
types.
Functions:
- create_default_config: Creates a default configuration file if it
doesn't exist.
Classes:
- EnvConfig: Loads the configuration and exposes it as Python objects.
- RemoteNode: Represents a remote node configuration with validation.
- Config: Represents the overall configuration structure with validation.
Usage:
- Initialize an instance of EnvConfig to load the configuration.
- Access configuration values via properties (e.g., EnvConfig().remote_nodes).
"""
from pathlib import Path
from typing import Dict, List
import yaml
from pydantic import BaseModel, HttpUrl, ValidationError
ENV_PATH = Path(__file__).resolve().parent.parent.parent / "env.yaml"
class RemoteNode(BaseModel):
"""
Represents a configuration for a remote node.
Attributes:
name (str): The name of the remote node.
node_url (HttpUrl): The URL for the remote node, validated as a
proper URL.
rpc_headers (Dict[str, str]): A dictionary of optional RPC headers,
defaults to empty dict.
"""
name: str = "mainnet_archive"
node_url: HttpUrl = HttpUrl("http://example.com")
rpc_headers: Dict[str, str] = {"client-secret": "<secret>"}
class Config(BaseModel):
"""
Represents the overall environment configuration.
Attributes:
remote_nodes (List[RemoteNode]): A list of remote node configurations.
"""
remote_nodes: List[RemoteNode] = [RemoteNode()]
class EnvConfig(Config):
"""
Loads and validates environment configuration from `env.yaml`.
This is a wrapper class for the Config model. It reads a config file from
disk into a Config model and then exposes it.
"""
def __init__(self) -> None:
"""Init for the EnvConfig class."""
if not ENV_PATH.exists():
raise FileNotFoundError(
f"The configuration file '{ENV_PATH}' does not exist. "
"Run `uv run eest make env` to create it."
)
with ENV_PATH.open("r") as file:
config_data = yaml.safe_load(file)
try:
# Validate and parse with Pydantic
super().__init__(**config_data)
except ValidationError as e:
raise ValueError(f"Invalid configuration: {e}") from e
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/gen_index.py | src/cli/gen_index.py | """
Generate an index file of all the json fixtures in the specified directory.
"""
import datetime
import json
import os
from pathlib import Path
from typing import List
import click
import rich
from rich.progress import (
BarColumn,
Column,
Progress,
TaskProgressColumn,
TextColumn,
TimeElapsedColumn,
)
from ethereum_test_base_types import HexNumber
from ethereum_test_fixtures.consume import IndexFile, TestCaseIndexFile
from ethereum_test_fixtures.file import Fixtures
from .hasher import HashableItem
# Files and directories to exclude from index generation
INDEX_EXCLUDED_FILES = frozenset({"index.json"})
INDEX_EXCLUDED_PATH_PARTS = frozenset({".meta", "pre_alloc"})
def count_json_files_exclude_index(start_path: Path) -> int:
"""Return the number of fixture json files in the specified directory."""
json_file_count = sum(
1
for file in start_path.rglob("*.json")
if file.name not in INDEX_EXCLUDED_FILES
and not any(part in INDEX_EXCLUDED_PATH_PARTS for part in file.parts)
)
return json_file_count
@click.command(
help=(
"Generate an index file of all the json fixtures in the specified directory."
"The index file is saved as 'index.json' in the specified directory."
)
)
@click.option(
"--input",
"-i",
"input_dir",
type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),
required=True,
help="The input directory",
)
@click.option(
"--quiet",
"-q",
"quiet_mode",
is_flag=True,
default=False,
expose_value=True,
help="Don't show the progress bar while processing fixture files.",
)
@click.option(
"--force",
"-f",
"force_flag",
is_flag=True,
default=False,
expose_value=True,
help="Force re-generation of the index file, even if it already exists.",
)
def generate_fixtures_index_cli(input_dir: str, quiet_mode: bool, force_flag: bool) -> None:
"""
CLI wrapper to an index of all the fixtures in the specified directory.
"""
generate_fixtures_index(
Path(input_dir),
quiet_mode=quiet_mode,
force_flag=force_flag,
)
def generate_fixtures_index(
input_path: Path,
quiet_mode: bool = False,
force_flag: bool = False,
) -> None:
"""
Generate an index file (index.json) of all the fixtures in specified dir.
"""
total_files = 0
if not os.path.isdir(input_path): # caught by click if using via cli
raise FileNotFoundError(f"The directory {input_path} does not exist.")
if not quiet_mode:
total_files = count_json_files_exclude_index(input_path)
output_file = Path(f"{input_path}/.meta/index.json")
output_file.parent.mkdir(parents=True, exist_ok=True) # no meta dir in <=v3.0.0
try:
root_hash = HashableItem.from_folder(folder_path=input_path).hash()
except (KeyError, TypeError):
root_hash = b"" # just regenerate a new index file
if not force_flag and output_file.exists():
index_data: IndexFile
try:
with open(output_file, "r") as f:
index_data = IndexFile(**json.load(f))
if index_data.root_hash and index_data.root_hash == HexNumber(root_hash):
if not quiet_mode:
rich.print(f"Index file [bold cyan]{output_file}[/] is up-to-date.")
return
except Exception as e:
rich.print(f"Ignoring exception {e}")
rich.print(f"...generating a new index file [bold cyan]{output_file}[/]")
filename_display_width = 25
with Progress(
TextColumn(
f"[bold cyan]{{task.fields[filename]:<{filename_display_width}}}[/]",
justify="left",
table_column=Column(ratio=1),
),
BarColumn(
complete_style="green3",
finished_style="bold green3",
table_column=Column(ratio=2),
),
TaskProgressColumn(),
TimeElapsedColumn(),
expand=False,
disable=quiet_mode,
) as progress: # type: Progress
task_id = progress.add_task("[cyan]Processing files...", total=total_files, filename="...")
forks = set()
fixture_formats = set()
test_cases: List[TestCaseIndexFile] = []
for file in input_path.rglob("*.json"):
if file.name in INDEX_EXCLUDED_FILES or any(
part in INDEX_EXCLUDED_PATH_PARTS for part in file.parts
):
continue
try:
fixtures: Fixtures = Fixtures.model_validate_json(file.read_text())
except Exception as e:
rich.print(f"[red]Error loading fixtures from {file}[/red]")
raise e
relative_file_path = Path(file).absolute().relative_to(Path(input_path).absolute())
for fixture_name, fixture in fixtures.items():
fixture_fork = fixture.get_fork()
test_cases.append(
TestCaseIndexFile(
id=fixture_name,
json_path=relative_file_path,
# eest uses hash; ethereum/tests uses generatedTestHash
fixture_hash=fixture.info.get("hash")
or f"0x{fixture.info.get('generatedTestHash')}",
fork=fixture_fork,
format=fixture.__class__,
pre_hash=getattr(fixture, "pre_hash", None),
)
)
if fixture_fork:
forks.add(fixture_fork)
fixture_formats.add(fixture.format_name)
display_filename = file.name
if len(display_filename) > filename_display_width:
display_filename = display_filename[: filename_display_width - 3] + "..."
else:
display_filename = display_filename.ljust(filename_display_width)
progress.update(task_id, advance=1, filename=display_filename)
progress.update(
task_id,
completed=total_files,
filename="Indexing complete π¦".ljust(filename_display_width),
)
index = IndexFile(
test_cases=test_cases,
root_hash=root_hash,
created_at=datetime.datetime.now(),
test_count=len(test_cases),
forks=list(forks),
fixture_formats=list(fixture_formats),
)
with open(output_file, "w") as f:
f.write(index.model_dump_json(exclude_none=False, indent=2))
if __name__ == "__main__":
generate_fixtures_index_cli()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/generate_checklist_stubs.py | src/cli/generate_checklist_stubs.py | """CLI tool to generate mypy stub files for EIPChecklist classes."""
import sys
from pathlib import Path
import click
def has_nested_classes(obj: type) -> bool:
"""Check if an object has nested classes with _path attribute."""
for attr_name in dir(obj):
if attr_name.startswith("_"):
continue
attr = getattr(obj, attr_name)
if isinstance(attr, type) and hasattr(attr, "_path"):
return True
return False
def generate_class_stub(obj: type, class_name: str, indent: int = 0) -> list[str]:
"""Generate stub for a class and its nested classes."""
lines = []
spaces = " " * indent
# Get all attributes that are classes with _path
nested_classes = []
leaf_classes = []
for attr_name in dir(obj):
if attr_name.startswith("_"):
continue
attr = getattr(obj, attr_name)
if isinstance(attr, type) and hasattr(attr, "_path"):
if has_nested_classes(attr):
nested_classes.append((attr_name, attr))
else:
leaf_classes.append(attr_name)
# Determine if this class itself should be callable
is_callable = class_name != "EIPChecklist" # All classes except the root are callable
# Generate class declaration
if is_callable:
lines.append(f"{spaces}class {class_name}(_CallableChecklistItem):")
else:
lines.append(f"{spaces}class {class_name}:")
# If no nested content, add pass
if not nested_classes and not leaf_classes:
lines.append(f"{spaces} pass")
return lines
# Generate leaf classes (final callable items)
for attr_name in sorted(leaf_classes):
lines.append(f"{spaces} {attr_name}: _CallableChecklistItem")
# Add blank line if we have both leaf classes and nested classes
if leaf_classes and nested_classes:
lines.append("")
# Generate nested classes
for i, (attr_name, attr_obj) in enumerate(sorted(nested_classes)):
if i > 0: # Add blank line between nested classes
lines.append("")
nested_lines = generate_class_stub(attr_obj, attr_name, indent + 1)
lines.extend(nested_lines)
return lines
@click.command()
@click.option(
"--output",
"-o",
type=click.Path(),
help="Output path for the stub file (default: auto-detect)",
)
@click.option(
"--dry-run",
is_flag=True,
help="Print the generated stub content instead of writing to file",
)
def generate_checklist_stubs(output: str | None, dry_run: bool) -> None:
"""
Generate mypy stub files for EIPChecklist classes.
This is a development tool that generates .pyi stub files to help mypy
understand that EIPChecklist classes are callable, fixing type checking
issues.
Examples:
Generate stub files (auto-detect location):
uv run generate_checklist_stubs
Generate to specific location:
uv run generate_checklist_stubs --output /path/to/stubs.pyi
Preview content without writing:
uv run generate_checklist_stubs --dry-run
"""
try:
# Add src to path so we can import the module
src_path = Path(__file__).parent.parent
sys.path.insert(0, str(src_path))
from ethereum_test_checklists.eip_checklist import EIPChecklist
stub_content = '''"""
Type stubs for EIP checklist - auto-generated.
DO NOT EDIT MANUALLY - This file is generated by running
`uv run generate_checklist_stubs`
"""
from typing import Any, Callable, TypeVar, overload
import pytest
F = TypeVar("F", bound=Callable[..., Any])
class _CallableChecklistItem:
"""Base type for all callable checklist items."""
@overload
def __call__(self, func: F) -> F: ...
@overload
def __call__(self, *, eip: Any = ..., **kwargs: Any) -> pytest.MarkDecorator: ...
def __str__(self) -> str: ...
'''
# Generate the main EIPChecklist structure
lines = generate_class_stub(EIPChecklist, "EIPChecklist", indent=0)
stub_content += "\n".join(lines) + "\n"
if dry_run:
click.echo("Generated stub content:")
click.echo("=" * 50)
click.echo(stub_content)
return
# Determine output path
if output:
stub_path = Path(output)
else:
stub_path = src_path / "ethereum_test_checklists" / "eip_checklist.pyi"
# Write to the stub file
stub_path.parent.mkdir(parents=True, exist_ok=True)
stub_path.write_text(stub_content)
click.echo(f"β
Generated stub file: {stub_path}")
click.echo(f"π Total lines: {len(stub_content.splitlines())}")
# Count classes for verification
class_count = stub_content.count("class ")
callable_count = stub_content.count(": _CallableChecklistItem")
click.echo(f"ποΈ Classes: {class_count}, Callable items: {callable_count}")
click.echo(
"\nπ‘ This stub file helps mypy understand that EIPChecklist classes are callable."
)
click.echo(
" You can now use @EIPChecklist.Opcode.Test.StackComplexOperations() "
"without type errors!"
)
except ImportError as e:
click.echo(f"β Error importing EIPChecklist: {e}", err=True)
click.echo("π‘ Make sure you're running this from the project root", err=True)
sys.exit(1)
except Exception as e:
click.echo(f"β Error generating stubs: {e}", err=True)
sys.exit(1)
if __name__ == "__main__":
generate_checklist_stubs()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/eofwrap.py | src/cli/eofwrap.py | """
Generate a JSON blockchain test from an existing JSON blockchain test by
wrapping its pre-state code in EOF wherever possible.
Example Usage:
1. Wrap tests
```console
eofwrap <input_dir/file_path> <output_dir_path>
```
"""
import json
import os
import sys
from pathlib import Path
from typing import Any, Dict, cast, no_type_check
import click
from ethereum_clis import CLINotFoundInPathError
from ethereum_clis.clis.evmone import EvmOneTransitionTool
from ethereum_test_base_types import Bytes, EthereumTestRootModel
from ethereum_test_base_types.conversions import to_hex
from ethereum_test_fixtures.blockchain import FixtureBlock, InvalidFixtureBlock
from ethereum_test_fixtures.file import Fixtures
from ethereum_test_forks.forks.forks import EOFv1
from ethereum_test_specs.blockchain import Block, BlockchainFixture, BlockchainTest
from ethereum_test_specs.debugging import print_traces
from ethereum_test_specs.eof import EOFParse
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types import Transaction
from ethereum_test_types.block_types import Environment
from ethereum_test_types.eof.v1 import Container
from ethereum_test_vm import Bytecode
from .evm_bytes import OpcodeWithOperands, process_evm_bytes
@click.command()
@click.argument("input_path", type=click.Path(exists=True, dir_okay=True, file_okay=True))
@click.argument("output_dir", type=click.Path(dir_okay=True, file_okay=False))
@click.option("--traces", is_flag=True, type=bool)
def eof_wrap(input_path: str, output_dir: str, traces: bool) -> None:
"""
Wrap JSON blockchain test file(s) found at `input_path`, output to
`output_dir`.
"""
eof_wrapper = EofWrapper()
try:
EvmOneTransitionTool()
except CLINotFoundInPathError:
print(f"Error: {EvmOneTransitionTool.default_binary} must be in the PATH.")
sys.exit(1)
except Exception as e:
raise Exception(f"Unexpected exception: {e}") from e
if os.path.isfile(input_path):
file = os.path.basename(input_path)
out_file = "eof_wrapped_" + file
out_path = os.path.join(output_dir, out_file)
eof_wrapper.wrap_file(input_path, out_path, traces)
else:
for subdir, _, files in os.walk(input_path):
for file in files:
rel_dir = Path(subdir).relative_to(input_path)
out_file = "eof_wrapped_" + file
out_path = os.path.join(output_dir, rel_dir, out_file)
in_path = os.path.join(subdir, file)
eof_wrapper.wrap_file(in_path, out_path, traces)
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "metrics.json"), "w") as f:
json.dump(eof_wrapper.metrics, f, indent=4)
class BlockchainFixtures(EthereumTestRootModel):
"""
Class needed due to some of the `ethereum/tests` fixtures not having the
`_info.fixture_format` field in the JSON files.
"""
root: Dict[str, BlockchainFixture]
class EofWrapper:
"""EOF wrapping of blockchain tests with some simple metrics tracking."""
# JSON files had at least one fixture generated successfully with EOF
FILES_GENERATED = "files_generated"
# JSON files skipped explicitly or didn't have a fixture with EOF
FILES_SKIPPED = "files_skipped"
# Test fixtures with at least one EOF code and generated successfully
FIXTURES_GENERATED = "fixtures_generated"
# Test fixtures with no code able to be EOF-wrapped
FIXTURES_CANT_WRAP = "fixtures_cant_wrap"
# Test fixtures with EOF code but test doesn't pass and generation fails
FIXTURES_CANT_GENERATE = "fixtures_cant_generate"
# Invalid blocks in fixtures skipped
INVALID_BLOCKS_SKIPPED = "invalid_blocks_skipped"
# State accounts with code wrapped into valid EOF
ACCOUNTS_WRAPPED = "accounts_wrapped"
# State accounts with code wrapped into valid unique EOF
UNIQUE_ACCOUNTS_WRAPPED = "unique_accounts_wrapped"
# State accounts wrapped but the code is not valid EOF
ACCOUNTS_INVALID_EOF = "accounts_invalid_eof"
# State accounts wrapped into valid EOF but in a fixture of a failing test
ACCOUNTS_CANT_GENERATE = "accounts_cant_generate"
# Breakdown of EOF validation errors summing up to `accounts_invalid_eof`
VALIDATION_ERRORS = "validation_errors"
# Breakdown of runtime test failures summing up to `fixtures_cant_generate`
GENERATION_ERRORS = "generation_errors"
def __init__(self) -> None:
"""
Initialize EofWrapper with metrics tracking and unique EOF set.
"""
self.metrics = {
self.FILES_GENERATED: 0,
self.FILES_SKIPPED: 0,
self.FIXTURES_GENERATED: 0,
self.FIXTURES_CANT_WRAP: 0,
self.FIXTURES_CANT_GENERATE: 0,
self.INVALID_BLOCKS_SKIPPED: 0,
self.ACCOUNTS_WRAPPED: 0,
self.UNIQUE_ACCOUNTS_WRAPPED: 0,
self.ACCOUNTS_INVALID_EOF: 0,
self.ACCOUNTS_CANT_GENERATE: 0,
self.VALIDATION_ERRORS: {},
self.GENERATION_ERRORS: {},
}
self.unique_eof: set[str] = set()
file_skip_list = [
"Pyspecs",
# EXTCODE* opcodes return different results for EOF targets and that is
# tested elsewhere
"stExtCodeHash",
# bigint syntax
"ValueOverflowParis",
"bc4895-withdrawals",
# EOF opcodes at diff places - tests obsolete
"opcD0DiffPlaces",
"opcD1DiffPlaces",
"opcD2DiffPlaces",
"opcD3DiffPlaces",
"opcE0DiffPlaces",
"opcE1DiffPlaces",
"opcE2DiffPlaces",
"opcE3DiffPlaces",
"opcE4DiffPlaces",
"opcE5DiffPlaces",
"opcE6DiffPlaces",
"opcE7DiffPlaces",
"opcE8DiffPlaces",
"opcECDiffPlaces",
"opcEEDiffPlaces",
"opcF7DiffPlaces",
"opcF8DiffPlaces",
"opcF9DiffPlaces",
"opcFBDiffPlaces",
# stack overflow always (limit of `max_stack_height` is 1023!)
"push0_fill_stack",
"push0_stack_overflow",
"blobbasefee_stack_overflow",
]
def wrap_file(self, in_path: str, out_path: str, traces: bool) -> None:
"""
Wrap code from blockchain test JSON file from `in_path` into
EOF containers. Tracks in metrics.
"""
for skip in self.file_skip_list:
if skip in in_path:
self.metrics[self.FILES_SKIPPED] = cast(int, self.metrics[self.FILES_SKIPPED]) + 1
return
fixtures: BlockchainFixtures = BlockchainFixtures.model_validate_json(
Path(in_path).read_text()
)
out_fixtures = Fixtures({})
fixture: BlockchainFixture
for fixture_id, fixture in fixtures.root.items():
fixture_eof_codes = []
wrapped_at_least_one_account = False
if fixture.pre:
for address, account in fixture.pre.root.items():
if account is None or account.code is None or len(account.code) == 0:
continue
try:
wrapped = wrap_code(account.code)
except ValueError as e:
self.metrics[self.ACCOUNTS_INVALID_EOF] = (
cast(int, self.metrics[self.ACCOUNTS_INVALID_EOF]) + 1
)
_inc_counter(
cast(
dict[Any, Any],
self.metrics[self.VALIDATION_ERRORS],
),
self._short_exception_msg(e),
)
continue
if self._validate_eof(wrapped):
account.code = Bytes(wrapped)
wrapped_at_least_one_account = True
self.metrics[self.ACCOUNTS_WRAPPED] = (
cast(int, self.metrics[self.ACCOUNTS_WRAPPED]) + 1
)
fixture_eof_codes.append(to_hex(account.code))
# wrap the same account in post state the same way
if fixture.post_state and fixture.post_state.root[address]:
fixture.post_state.root[address].code = Bytes(wrapped) # type: ignore
else:
self.metrics[self.ACCOUNTS_INVALID_EOF] = (
cast(int, self.metrics[self.ACCOUNTS_INVALID_EOF]) + 1
)
if not wrapped_at_least_one_account:
self.metrics[self.FIXTURES_CANT_WRAP] = (
cast(int, self.metrics[self.FIXTURES_CANT_WRAP]) + 1
)
continue
try:
out_fixture = self._wrap_fixture(fixture, traces)
out_fixtures[fixture_id] = out_fixture
self.metrics[self.FIXTURES_GENERATED] = (
cast(int, self.metrics[self.FIXTURES_GENERATED]) + 1
)
self.unique_eof.update(fixture_eof_codes)
self.metrics[self.UNIQUE_ACCOUNTS_WRAPPED] = len(self.unique_eof)
except Exception as e:
_inc_counter(
cast(
dict[Any, Any],
self.metrics[self.GENERATION_ERRORS],
),
self._short_exception_msg(e),
)
self.metrics[self.FIXTURES_CANT_GENERATE] = (
cast(int, self.metrics[self.FIXTURES_CANT_GENERATE]) + 1
)
self.metrics[self.ACCOUNTS_CANT_GENERATE] = cast(
int, self.metrics[self.ACCOUNTS_CANT_GENERATE]
) + len(fixture_eof_codes)
print(f"Exception {e} occurred during generation of {in_path}: {fixture_id}")
if len(out_fixtures) == 0:
self.metrics[self.FILES_SKIPPED] = cast(int, self.metrics[self.FILES_SKIPPED]) + 1
return
os.makedirs(os.path.dirname(out_path), exist_ok=True)
out_fixtures.collect_into_file(Path(out_path))
self.metrics[self.FILES_GENERATED] = cast(int, self.metrics[self.FILES_GENERATED]) + 1
def _short_exception_msg(self, e: Exception) -> str:
"""Shorten exception message for display."""
threshold = 30
short = str(e)
if len(short) > threshold:
short = short[:threshold] + "..."
return short
def _wrap_fixture(self, fixture: BlockchainFixture, traces: bool) -> BlockchainFixture:
env = Environment(
difficulty=fixture.genesis.difficulty,
gas_limit=fixture.genesis.gas_limit,
base_fee_per_gas=fixture.genesis.base_fee_per_gas,
blob_gas_used=fixture.genesis.blob_gas_used,
excess_blob_gas=fixture.genesis.excess_blob_gas,
parent_beacon_block_root=fixture.genesis.parent_beacon_block_root,
)
pre = fixture.pre
t8n = EvmOneTransitionTool(trace=traces)
test = BlockchainTest(
genesis_environment=env,
pre=pre.root,
post=fixture.post_state.root if fixture.post_state else {},
blocks=[],
tag="wrapped test",
)
for fixture_block in fixture.blocks:
if isinstance(fixture_block, FixtureBlock):
header = fixture_block.header
block = Block(
ommers_hash=header.ommers_hash,
fee_recipient=header.fee_recipient,
difficulty=header.difficulty,
number=header.number,
gas_limit=header.gas_limit,
timestamp=header.timestamp,
extra_data=header.extra_data,
prev_randao=header.prev_randao,
nonce=header.nonce,
base_fee_per_gas=header.base_fee_per_gas,
withdrawals_root=header.withdrawals_root,
parent_beacon_block_root=header.parent_beacon_block_root,
)
assert not fixture_block.ommers
assert not fixture_block.withdrawals
for fixture_tx in fixture_block.txs:
fixture_tx_dump = fixture_tx.model_dump()
fixture_tx_dump.pop("ty")
fixture_tx_dump.pop("data")
tx = Transaction(
type=fixture_tx.ty,
input=fixture_tx.data,
**fixture_tx_dump,
)
block.txs.append(tx)
test.blocks.append(block)
elif isinstance(fixture_block, InvalidFixtureBlock):
# Skip - invalid blocks are not supported. Reason:
# FixtureTransaction doesn't support expected exception. But we
# can continue and test the remaining blocks.
self.metrics[self.INVALID_BLOCKS_SKIPPED] = (
cast(int, self.metrics[self.INVALID_BLOCKS_SKIPPED]) + 1
)
else:
raise TypeError("not a FixtureBlock")
result = test.generate(
t8n=t8n,
fork=EOFv1,
fixture_format=BlockchainFixture,
)
assert isinstance(result, BlockchainFixture)
result.info["fixture-format"] = "blockchain_test"
if traces:
print_traces(t8n.get_traces())
return result
def _validate_eof(self, container: Container, metrics: bool = True) -> bool:
eof_parse = EOFParse()
result = eof_parse.run(input_value=to_hex(container))
actual_message = result.stdout.strip()
if "OK" not in actual_message:
if metrics:
_inc_counter(
cast(
dict[Any, Any],
self.metrics[self.VALIDATION_ERRORS],
),
actual_message,
)
return False
return True
# `no_type_check` required because OpcodeWithOperand.opcode can be `None` when
# formatting as a string, but here it can never be `None`.
@no_type_check
def wrap_code(account_code: Bytes) -> Container:
"""
Wrap `account_code` into a simplest EOF container, applying some simple
heuristics in order to obtain a valid code section termination.
"""
assert len(account_code) > 0
opcodes = process_evm_bytes(account_code)
if not opcodes[-1].terminating:
opcodes.append(OpcodeWithOperands(opcode=Op.STOP))
while len(opcodes) > 1 and opcodes[-2].terminating and opcodes[-1].terminating:
opcodes.pop()
bytecode = Bytecode()
for opcode in opcodes:
bytecode += opcode.bytecode
return Container.Code(bytecode)
def _inc_counter(d: dict, key: Any) -> None:
if key in d:
d[key] += 1
else:
d[key] = 1
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/hasher.py | src/cli/hasher.py | """Simple CLI tool to hash a directory of JSON fixtures."""
import hashlib
import json
from dataclasses import dataclass, field
from enum import IntEnum, auto
from pathlib import Path
from typing import Dict, List, Optional
import click
class HashableItemType(IntEnum):
"""Represents the type of a hashable item."""
FOLDER = 0
FILE = auto()
TEST = auto()
@dataclass(kw_only=True)
class HashableItem:
"""
Represents an item that can be hashed containing other items that can be
hashed as well.
"""
type: HashableItemType
parents: List[str] = field(default_factory=list)
root: Optional[bytes] = None
items: Optional[Dict[str, "HashableItem"]] = None
def hash(self) -> bytes:
"""Return the hash of the item."""
if self.root is not None:
return self.root
if self.items is None:
raise ValueError("No items to hash")
all_hash_bytes = b""
for _, item in sorted(self.items.items()):
item_hash_bytes = item.hash()
all_hash_bytes += item_hash_bytes
return hashlib.sha256(all_hash_bytes).digest()
def print(
self, *, name: str, level: int = 0, print_type: Optional[HashableItemType] = None
) -> None:
"""Print the hash of the item and sub-items."""
next_level = level
print_name = name
if level == 0 and self.parents:
separator = "::" if self.type == HashableItemType.TEST else "/"
print_name = f"{'/'.join(self.parents)}{separator}{name}"
if print_type is None or self.type >= print_type:
next_level += 1
print(f"{' ' * level}{print_name}: 0x{self.hash().hex()}")
if self.items is not None:
for key, item in sorted(self.items.items()):
item.print(name=key, level=next_level, print_type=print_type)
@classmethod
def from_json_file(cls, *, file_path: Path, parents: List[str]) -> "HashableItem":
"""Create a hashable item from a JSON file."""
items = {}
with file_path.open("r") as f:
data = json.load(f)
for key, item in sorted(data.items()):
if not isinstance(item, dict):
raise TypeError(f"Expected dict, got {type(item)} for {key}")
if "_info" not in item:
raise KeyError(f"Expected '_info' in {key}, json file: {file_path.name}")
# EEST uses 'hash'; ethereum/tests use 'generatedTestHash'
hash_value = item["_info"].get("hash") or item["_info"].get("generatedTestHash")
if hash_value is None:
raise KeyError(f"Expected 'hash' or 'generatedTestHash' in {key}")
if not isinstance(hash_value, str):
raise TypeError(f"Expected hash to be a string in {key}, got {type(hash_value)}")
item_hash_bytes = bytes.fromhex(hash_value[2:])
items[key] = cls(
type=HashableItemType.TEST,
root=item_hash_bytes,
parents=parents + [file_path.name],
)
return cls(type=HashableItemType.FILE, items=items, parents=parents)
@classmethod
def from_folder(
cls, *, folder_path: Path, parents: Optional[List[str]] = None
) -> "HashableItem":
"""Create a hashable item from a folder."""
if parents is None:
parents = []
items = {}
for file_path in sorted(folder_path.iterdir()):
if ".meta" in file_path.parts:
continue
if file_path.is_file() and file_path.suffix == ".json":
item = cls.from_json_file(
file_path=file_path, parents=parents + [folder_path.name]
)
items[file_path.name] = item
elif file_path.is_dir():
item = cls.from_folder(folder_path=file_path, parents=parents + [folder_path.name])
items[file_path.name] = item
return cls(type=HashableItemType.FOLDER, items=items, parents=parents)
@click.command()
@click.argument(
"folder_path_str", type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True)
)
@click.option("--files", "-f", is_flag=True, help="Print hash of files")
@click.option("--tests", "-t", is_flag=True, help="Print hash of tests")
@click.option("--root", "-r", is_flag=True, help="Only print hash of root folder")
def main(folder_path_str: str, files: bool, tests: bool, root: bool) -> None:
"""Hash folders of JSON fixtures and print their hashes."""
folder_path: Path = Path(folder_path_str)
item = HashableItem.from_folder(folder_path=folder_path)
if root:
print(f"0x{item.hash().hex()}")
return
print_type: Optional[HashableItemType] = None
if files:
print_type = HashableItemType.FILE
elif tests:
print_type = HashableItemType.TEST
item.print(name=folder_path.name, print_type=print_type)
if __name__ == "__main__":
main()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/src/cli/tox_helpers.py | src/cli/tox_helpers.py | """
CLI commands used by tox.ini.
Contains wrappers to the external commands markdownlint-cli2 and pyspelling
(requires aspell) that fail silently if the command is not available. The aim
is to avoid disruption to external contributors.
"""
import os
import re
import shutil
import subprocess
import sys
from pathlib import Path
import click
from pyspelling import __main__ as pyspelling_main # type: ignore
from rich.console import Console
def write_github_summary(
title: str, tox_env: str, error_message: str, fix_commands: list[str]
) -> None:
"""
Write a summary to GitHub Actions when a check fails.
Args:
title: The title of the check that failed tox_env: The tox
environment name (e.g., "spellcheck")
tox_env: The tox environment
error_message: Description of what went wrong
fix_commands: List of commands to fix the issue locally
"""
if not os.environ.get("GITHUB_ACTIONS"):
return
summary_file = os.environ.get("GITHUB_STEP_SUMMARY")
if not summary_file:
return
with open(summary_file, "a") as f:
f.write(f"## β {title}\n\n")
f.write(f"{error_message}\n\n")
f.write("### To reproduce this check locally:\n")
f.write("```bash\n")
f.write(f"uvx --with=tox-uv tox -e {tox_env}\n")
f.write("```\n\n")
if fix_commands:
f.write("### To verify and fix the issues:\n")
f.write("```bash\n")
for cmd in fix_commands:
f.write(f"{cmd}\n")
f.write("```\n")
@click.command(
context_settings={
"ignore_unknown_options": True,
"allow_extra_args": True,
}
)
@click.argument("args", nargs=-1, type=click.UNPROCESSED)
def markdownlint(args: tuple[str, ...]) -> None:
"""
Lint the markdown in ./README.md and ./docs/ using the external command
markdownlint-cli2.
Silently fail if markdownlint-cli2 is not installed.
Allows argument forwarding to markdownlint-cli2.
"""
markdownlint = shutil.which("markdownlint-cli2")
if not markdownlint:
# Note: There's an additional step in test.yaml to run markdownlint-
# cli2 in GitHub Actions
click.echo("********* Install 'markdownlint-cli2' to enable markdown linting *********")
sys.exit(0)
args_list: list[str] = list(args) if len(args) > 0 else ["./docs/**/*.md", "./README.md"]
command = ["node", markdownlint] + args_list
sys.exit(subprocess.run(command).returncode)
@click.command()
def pyspelling() -> None:
"""
Spellcheck the markdown in ./README.md and ./docs/ using the pyspelling
package.
Silently fails if aspell is not installed (required by pyspelling).
Command-line arguments are not forwarded to pyspelling.
"""
if not shutil.which("aspell"):
click.echo("aspell not installed, skipping spellcheck.")
if os.environ.get("GITHUB_ACTIONS"):
write_github_summary(
title="Pyspelling Check Failed",
tox_env="spellcheck",
error_message=(
"aspell is not installed. This tool is required for spell checking "
" documentation."
),
fix_commands=[
"# Install aspell on Ubuntu/Debian",
"sudo apt-get install aspell aspell-en",
"",
"# Install aspell on macOS",
"brew install aspell",
],
)
sys.exit(1)
else:
click.echo(
"********* Install 'aspell' and 'aspell-en' to enable spellcheck *********"
)
sys.exit(0)
result = pyspelling_main.main()
if result != 0:
write_github_summary(
title="Pyspelling Check Failed",
tox_env="spellcheck",
error_message="Pyspelling found spelling errors in the documentation.",
fix_commands=[
"# Check the pyspelling configuration",
"cat .pyspelling.yml",
"",
"# Review and fix spelling errors manually",
"# Pyspelling doesn't have an auto-fix option",
],
)
sys.exit(result)
@click.command()
def codespell() -> None:
"""
Run codespell on the codebase and provide helpful error messages.
Checks spelling in .github/, src/, tests/, and docs/ directories.
"""
console = Console()
# Define the paths to check
paths_to_check = ["*.md", "*.ini", ".github/", "src/", "tests/", "docs/"]
paths_str = " ".join(paths_to_check)
# Run codespell
result = subprocess.run(
["codespell"] + paths_to_check,
capture_output=True,
text=True,
)
# Print the output
if result.stdout:
console.print(result.stdout)
if result.stderr:
console.print(result.stderr, style="red")
# If there were spelling errors, show a helpful message
if result.returncode != 0:
console.print("\n[bold red]β Spellcheck Failed[/bold red]")
console.print(
"[yellow]Please review the errors above. For single-suggestion fixes, you can "
"automatically apply them with:[/yellow]"
)
console.print(f"[cyan]uv run codespell {paths_str} --write-changes[/cyan]\n")
# Write to GitHub Actions summary
write_github_summary(
title="Spellcheck Failed",
tox_env="spellcheck",
error_message="Codespell found spelling errors in the code.",
fix_commands=[
"# Ensure codespell is installed (part of docs extras)",
"uv sync --all-extras",
"",
"# Check for spelling errors",
f"uv run codespell {paths_str}",
"",
"# Automatically fix single-suggestion errors",
f"uv run codespell {paths_str} --write-changes",
],
)
sys.exit(1)
sys.exit(0)
sys.exit(pyspelling_main.main())
@click.command()
def validate_changelog() -> None:
"""
Validate changelog formatting to ensure bullet points end with proper
punctuation.
Checks that all bullet points (including nested ones) end with either:
- A period (.) for regular entries
- A colon (:) for section headers that introduce lists
"""
changelog_path = Path("docs/CHANGELOG.md")
if not changelog_path.exists():
click.echo(f"β Changelog file not found: {changelog_path}")
sys.exit(1)
try:
with open(changelog_path, "r", encoding="utf-8") as f:
content = f.read()
except Exception as e:
click.echo(f"β Error reading changelog: {e}.")
sys.exit(1)
# Find bullet points that don't end with period or colon
invalid_lines = []
for line_num, line in enumerate(content.splitlines(), 1):
if re.match(r"^\s*-\s+", line) and re.search(r"[^\.:]$", line.rstrip()):
invalid_lines.append((line_num, line.strip()))
if invalid_lines:
click.echo(f"β Found bullet points in {changelog_path} without proper punctuation:")
click.echo()
for line_num, line in invalid_lines:
click.echo(f"Line {line_num}: {line}")
click.echo()
click.echo("π‘ All bullet points should end with:")
click.echo(" - A period (.) for regular entries.")
click.echo(" - A colon (:) for paragraphs that introduce lists.")
sys.exit(1)
else:
click.echo("β
All bullet points have proper punctuation!")
sys.exit(0)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.