repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/programs/context_calls.py | tests/frontier/scenarios/programs/context_calls.py | """Define programs that will run all context opcodes for test scenarios."""
from functools import cached_property
from ethereum_test_forks import Byzantium, Cancun, Constantinople, Fork, Istanbul, London, Shanghai
from ethereum_test_tools import Alloc, Bytecode
from ethereum_test_types import ChainConfigDefaults
from ethereum_test_vm import Opcodes as Op
from ..common import (
ProgramResult,
ScenarioExpectOpcode,
ScenarioTestProgram,
make_gas_hash_contract,
)
class ProgramAddress(ScenarioTestProgram):
"""
Check that ADDRESS is really the code execution address in all scenarios.
"""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.ADDRESS) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_ADDRESS"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.CODE_ADDRESS)
class ProgramBalance(ScenarioTestProgram):
"""Check the BALANCE in all execution contexts."""
external_balance: int = 123
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
external_address = pre.deploy_contract(code=Op.ADD(1, 1), balance=self.external_balance)
return Op.MSTORE(0, Op.BALANCE(external_address)) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_BALANCE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=self.external_balance)
class ProgramOrigin(ScenarioTestProgram):
"""Check that ORIGIN stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.ORIGIN) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_ORIGIN"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.TX_ORIGIN)
class ProgramCaller(ScenarioTestProgram):
"""Check the CALLER in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.CALLER) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_CALLER"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.CODE_CALLER)
class ProgramCallValue(ScenarioTestProgram):
"""Check the CALLVALUE in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.CALLVALUE) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_CALLVALUE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.CALL_VALUE)
class ProgramCallDataLoad(ScenarioTestProgram):
"""Check the CALLDATALOAD in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.CALLDATALOAD(0)) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_CALLDATALOAD"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.CALL_DATALOAD_0)
class ProgramCallDataSize(ScenarioTestProgram):
"""Check the CALLDATASIZE in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.CALLDATASIZE) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_CALLDATASIZE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.CALL_DATASIZE)
class ProgramCallDataCopy(ScenarioTestProgram):
"""Check the CALLDATACOPY in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_CALLDATACOPY"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.CALL_DATALOAD_0)
class ProgramCodeCopyCodeSize(ScenarioTestProgram):
"""Check that codecopy and codesize stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.CODESIZE) + Op.CODECOPY(0, 0, 30) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_CODECOPY_CODESIZE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(
result=0x38600052601E600060003960206000F300000000000000000000000000000010
)
class ProgramGasPrice(ScenarioTestProgram):
"""Check that gasprice stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
gas_hash = make_gas_hash_contract(pre)
return (
Op.MSTORE(64, Op.GASPRICE)
+ Op.CALL(Op.SUB(Op.GAS, 200000), gas_hash, 0, 64, 32, 0, 0)
+ Op.MSTORE(0, 1)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_GASPRICE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1)
class ProgramExtCodeCopyExtCodeSize(ScenarioTestProgram):
"""Check that gasprice stays the same in all contexts."""
external_balance: int = 123
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
external_address = pre.deploy_contract(code=Op.ADD(1, 1), balance=self.external_balance)
return (
Op.MSTORE(0, Op.EXTCODESIZE(external_address))
+ Op.EXTCODECOPY(external_address, 0, 0, 30)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_EXTCODECOPY_EXTCODESIZE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(
result=0x6001600101000000000000000000000000000000000000000000000000000005
)
class ProgramReturnDataSize(ScenarioTestProgram):
"""Check that returndatasize stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return (
Op.MSTORE(0, Op.RETURNDATASIZE)
+ Op.CALL(100000, 2, 0, 0, 10, 32, 20)
+ Op.MSTORE(0, Op.ADD(Op.MLOAD(0), Op.RETURNDATASIZE))
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_RETURNDATASIZE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=32, from_fork=Byzantium)
class ProgramReturnDataCopy(ScenarioTestProgram):
"""Check that returndatacopy stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return (
Op.CALL(100000, 2, 0, 0, 10, 32, 20) + Op.RETURNDATACOPY(0, 0, 32) + Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_RETURNDATACOPY"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(
result=0x1D448AFD928065458CF670B60F5A594D735AF0172C8D67F22A81680132681CA,
from_fork=Byzantium,
)
class ProgramExtCodehash(ScenarioTestProgram):
"""Check that extcodehash stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
external_address = pre.deploy_contract(code=Op.ADD(1, 1), balance=123)
return Op.MSTORE(0, Op.EXTCODEHASH(external_address)) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_EXTCODEHASH"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(
result=0x8C634A8B28DD46F5DCB9A9F5DA1FAED26D0FB5ED98F3873A29AD27AAAFFDE0E4,
from_fork=Constantinople,
)
class ProgramBlockhash(ScenarioTestProgram):
"""Check that blockhash stays the same in all contexts."""
# Need a way to pre calculate at least hash of block 0
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
# Calculate gas hash of Op.BLOCKHASH(0) value
gas_hash = make_gas_hash_contract(pre)
return (
Op.MSTORE(64, Op.BLOCKHASH(0))
+ Op.CALL(Op.SUB(Op.GAS, 200000), gas_hash, 0, 64, 32, 0, 0)
+ Op.MSTORE(0, 1)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_BLOCKHASH"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1)
class ProgramCoinbase(ScenarioTestProgram):
"""Check that coinbase stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.COINBASE) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_COINBASE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.COINBASE)
class ProgramTimestamp(ScenarioTestProgram):
"""Check that timestamp stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
gas_hash = make_gas_hash_contract(pre)
return (
Op.MSTORE(64, Op.TIMESTAMP)
+ Op.CALL(Op.SUB(Op.GAS, 200000), gas_hash, 0, 64, 32, 0, 0)
+ Op.MSTORE(0, 1)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_TIMESTAMP"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1)
class ProgramNumber(ScenarioTestProgram):
"""Check that block number stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.NUMBER) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_NUMBER"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.NUMBER)
class ProgramDifficultyRandao(ScenarioTestProgram):
"""Check that difficulty/randao stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
# Calculate gas hash of DIFFICULTY value
gas_hash = make_gas_hash_contract(pre)
return (
Op.MSTORE(0, Op.PREVRANDAO)
+ Op.MSTORE(64, Op.PREVRANDAO)
+ Op.CALL(Op.SUB(Op.GAS, 200000), gas_hash, 0, 64, 32, 0, 0)
+ Op.MSTORE(0, 1)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_DIFFICULTY"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1)
class ProgramGasLimit(ScenarioTestProgram):
"""Check that gaslimit stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.GASLIMIT) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_GASLIMIT"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.GASLIMIT)
class ProgramChainid(ScenarioTestProgram):
"""Check that chainid stays the same in all contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.CHAINID) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_CHAINID"
def result(self) -> ProgramResult:
"""Test result."""
# TODO: use `chain_config` fixture instead.
chain_id = ChainConfigDefaults.chain_id
return ProgramResult(result=chain_id, from_fork=Istanbul)
class ProgramSelfbalance(ScenarioTestProgram):
"""Check the SELFBALANCE in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.SELFBALANCE) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_SELFBALANCE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=ScenarioExpectOpcode.SELFBALANCE, from_fork=Istanbul)
class ProgramBasefee(ScenarioTestProgram):
"""Check the BASEFEE in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
gas_hash = make_gas_hash_contract(pre)
return (
Op.MSTORE(64, Op.BASEFEE)
+ Op.CALL(Op.SUB(Op.GAS, 200000), gas_hash, 0, 64, 32, 0, 0)
+ Op.MSTORE(0, 1)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_BASEFEE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1, from_fork=London)
class ProgramBlobhash(ScenarioTestProgram):
"""Check the blobhash in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.BLOBHASH(0)) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_BLOBHASH"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=0, from_fork=Cancun)
class ProgramBlobBaseFee(ScenarioTestProgram):
"""Check the blob basefee in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del fork
gas_hash = make_gas_hash_contract(pre)
return (
Op.MSTORE(64, Op.BLOBBASEFEE)
+ Op.CALL(Op.SUB(Op.GAS, 200000), gas_hash, 0, 64, 32, 0, 0)
+ Op.MSTORE(0, 1)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_BLOBBASEFEE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1, from_fork=Cancun)
class ProgramTload(ScenarioTestProgram):
"""Check the tload in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, Op.TLOAD(0)) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_TLOAD"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=0, from_fork=Cancun)
class ProgramMcopy(ScenarioTestProgram):
"""Check the mcopy in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return (
Op.MSTORE(0, 0)
+ Op.MSTORE(32, 0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F)
+ Op.MCOPY(0, 32, 32)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_MCOPY"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(
result=0x000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F,
from_fork=Cancun,
)
class ProgramPush0(ScenarioTestProgram):
"""Check the push0 in all execution contexts."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.PUSH1(10) + Op.PUSH0 + Op.MSTORE + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_PUSH0"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=10, from_fork=Shanghai)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/programs/all_frontier_opcodes.py | tests/frontier/scenarios/programs/all_frontier_opcodes.py | """
Define a program for scenario test that executes all frontier opcodes and
entangles it's result.
"""
from functools import cached_property
from ethereum_test_forks import Fork
from ethereum_test_tools import Alloc, Bytecode, Conditional
from ethereum_test_vm import Opcodes as Op
from ..common import ProgramResult, ScenarioTestProgram
# Opcodes that are not in Frontier
# 1b - SHL
# 1c - SHR
# 1d - SAR
def make_all_opcode_program() -> Bytecode:
"""
Make a program that call each Frontier opcode and verifies it's result.
"""
code: Bytecode = (
# Test opcode 01 - ADD
Conditional(
condition=Op.EQ(Op.ADD(1, 1), 2),
if_true=Op.MSTORE(0, 2),
if_false=Op.MSTORE(0, 0) + Op.RETURN(0, 32),
)
# Test opcode 02 - MUL
+ Conditional(
condition=Op.EQ(
Op.MUL(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 2),
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE,
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 2) + Op.RETURN(0, 32),
)
# Test 03 - SUB
+ Conditional(
condition=Op.EQ(
Op.SUB(0, 1),
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 3) + Op.RETURN(0, 32),
)
# Test 04 - DIV
+ Conditional(
condition=Op.AND(Op.EQ(Op.DIV(1, 2), 0), Op.EQ(Op.DIV(10, 2), 5)),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 4) + Op.RETURN(0, 32),
)
# Test 05 - SDIV
+ Conditional(
condition=Op.EQ(
Op.SDIV(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
),
2,
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 5) + Op.RETURN(0, 32),
)
# Test 06 - MOD
+ Conditional(
condition=Op.AND(
Op.EQ(Op.MOD(10, 3), 1),
Op.EQ(
Op.MOD(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF8,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD,
),
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF8,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 6) + Op.RETURN(0, 32),
)
# Test 07 - SMOD
+ Conditional(
condition=Op.AND(
Op.EQ(Op.SMOD(10, 3), 1),
Op.EQ(
Op.SMOD(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF8,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD,
),
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 7) + Op.RETURN(0, 32),
)
# Test 08 - ADDMOD
+ Conditional(
condition=Op.AND(
Op.EQ(Op.ADDMOD(10, 10, 8), 4),
Op.EQ(
Op.ADDMOD(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
2,
2,
),
1,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 8) + Op.RETURN(0, 32),
)
# Test 09 - MULMOD
+ Conditional(
condition=Op.AND(
Op.EQ(Op.MULMOD(10, 10, 8), 4),
Op.EQ(
Op.MULMOD(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
12,
),
9,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 9) + Op.RETURN(0, 32),
)
# Test 0A - EXP
+ Conditional(
condition=Op.AND(
Op.EQ(Op.EXP(10, 2), 100),
Op.EQ(
Op.EXP(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD, 2),
9,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 10) + Op.RETURN(0, 32),
)
# Test 0B - SIGNEXTEND
+ Conditional(
condition=Op.AND(
Op.EQ(
Op.SIGNEXTEND(0, 0xFF),
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
),
Op.EQ(
Op.SIGNEXTEND(0, 0x7F),
0x7F,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 11) + Op.RETURN(0, 32),
)
# Test 10 - LT
+ Conditional(
condition=Op.AND(
Op.EQ(
Op.LT(9, 10),
1,
),
Op.EQ(
Op.LT(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 0),
0,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x10) + Op.RETURN(0, 32),
)
# Test 11 - GT
+ Conditional(
condition=Op.AND(
Op.EQ(
Op.GT(9, 10),
0,
),
Op.EQ(
Op.GT(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 0),
1,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x11) + Op.RETURN(0, 32),
)
# Test 12 - SLT
+ Conditional(
condition=Op.AND(
Op.EQ(
Op.SLT(9, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF),
0,
),
Op.EQ(
Op.SLT(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 0),
1,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x12) + Op.RETURN(0, 32),
)
# Test 13 - SGT
+ Conditional(
condition=Op.AND(
Op.EQ(
Op.SGT(10, 10),
0,
),
Op.EQ(
Op.SGT(0, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF),
1,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x13) + Op.RETURN(0, 32),
)
# Test 14 - EQ Skip
# Test 15 - ISZero
+ Conditional(
condition=Op.AND(
Op.EQ(Op.ISZERO(10), 0),
Op.EQ(Op.ISZERO(0), 1),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x15) + Op.RETURN(0, 32),
)
# Test 16 - AND Skip
# Test 17 - OR
+ Conditional(
condition=Op.AND(
Op.EQ(Op.OR(0xF0, 0xF), 0xFF),
Op.EQ(Op.OR(0xFF, 0xFF), 0xFF),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x17) + Op.RETURN(0, 32),
)
# Test 18 - XOR
+ Conditional(
condition=Op.AND(
Op.EQ(Op.XOR(0xF0, 0xF), 0xFF),
Op.EQ(Op.XOR(0xFF, 0xFF), 0),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x18) + Op.RETURN(0, 32),
)
# Test 19 - NOT
+ Conditional(
condition=Op.AND(
Op.EQ(
Op.NOT(0), 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
),
Op.EQ(
Op.NOT(0xFFFFFFFFFFFF),
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000,
),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x19) + Op.RETURN(0, 32),
)
# Test 1A - BYTE
+ Conditional(
condition=Op.AND(
Op.EQ(Op.BYTE(31, 0xFF), 0xFF),
Op.EQ(Op.BYTE(30, 0xFF00), 0xFF),
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x1A) + Op.RETURN(0, 32),
)
# Test 20 - SHA3
+ Op.MSTORE(0, 0xFFFFFFFF)
+ Conditional(
condition=Op.EQ(
Op.SHA3(28, 4),
0x29045A592007D0C246EF02C2223570DA9522D0CF0F73282C79A1BC8F0BB2C238,
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x20) + Op.RETURN(0, 32),
)
# 50 POP
# 51 MLOAD
# 52 MSTORE
# 53 MSTORE8
+ Op.MSTORE(0, 0)
+ Op.MSTORE8(0, 0xFFFF)
+ Conditional(
condition=Op.EQ(
Op.MLOAD(0),
0xFF00000000000000000000000000000000000000000000000000000000000000,
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x53) + Op.RETURN(0, 32),
)
# 54 SLOAD
+ Conditional(
condition=Op.EQ(Op.SLOAD(0), 0),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x54) + Op.RETURN(0, 32),
)
# 55 SSTORE # can't use because of static contexts
# 56 JUMP
# 57 JUMPI
# 58 PC
+ Conditional(
condition=Op.EQ(Op.PC, 1660),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x58) + Op.RETURN(0, 32),
)
# 59 MSIZE
+ Op.MSTORE(64, 123)
+ Conditional(
condition=Op.EQ(Op.MSIZE, 96),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x59) + Op.RETURN(0, 32),
)
# 5A GAS
# 5B JUMPDEST
# 5C TLOAD
# 5D TSTORE # can't use because of static contexts
# 5E MCOPY
# 5F PUSH0
# 60 - 7F PUSH X
+ Op.PUSH1(0xFF)
+ Op.PUSH2(0xFFFF)
+ Op.ADD
+ Op.PUSH3(0xFFFFFF)
+ Op.ADD
+ Op.PUSH4(0xFFFFFFFF)
+ Op.ADD
+ Op.PUSH5(0xFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH6(0xFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH7(0xFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH8(0xFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH9(0xFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH10(0xFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH11(0xFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH12(0xFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH13(0xFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH14(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH15(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH16(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH17(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH18(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH19(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH20(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH21(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH22(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH23(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH24(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH25(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH26(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH27(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH28(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH29(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH30(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH31(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH32(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)
+ Op.ADD
+ Op.PUSH1(0)
+ Op.MSTORE
+ Conditional(
condition=Op.EQ(
Op.MLOAD(0), 0x1010101010101010101010101010101010101010101010101010101010100E0
),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 60) + Op.RETURN(0, 32),
)
# 80 - 8F DUP X
+ Op.PUSH1(1)
+ Op.DUP1
+ Op.DUP2
+ Op.DUP3
+ Op.DUP4
+ Op.DUP5
+ Op.DUP6
+ Op.DUP7
+ Op.DUP8
+ Op.DUP9
+ Op.DUP10
+ Op.DUP11
+ Op.DUP12
+ Op.DUP13
+ Op.DUP14
+ Op.DUP15
+ Op.DUP16
+ Op.ADD * 16
+ Op.PUSH1(0)
+ Op.MSTORE
+ Conditional(
condition=Op.EQ(Op.MLOAD(0), 17),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x80) + Op.RETURN(0, 32),
)
# 90 - 9F SWAP X
+ Op.PUSH1(3)
+ Op.PUSH1(5)
+ Op.SWAP1
+ Op.PUSH1(7)
+ Op.SWAP2
+ Op.PUSH1(11)
+ Op.SWAP3
+ Op.PUSH1(13)
+ Op.SWAP4
+ Op.PUSH1(17)
+ Op.SWAP5
+ Op.PUSH1(19)
+ Op.SWAP6
+ Op.PUSH1(23)
+ Op.SWAP7
+ Op.PUSH1(29)
+ Op.SWAP8
+ Op.PUSH1(31)
+ Op.SWAP9
+ Op.PUSH1(37)
+ Op.SWAP10
+ Op.PUSH1(41)
+ Op.SWAP11
+ Op.PUSH1(43)
+ Op.SWAP12
+ Op.PUSH1(47)
+ Op.SWAP13
+ Op.PUSH1(53)
+ Op.SWAP14
+ Op.PUSH1(59)
+ Op.SWAP15
+ Op.PUSH1(61)
+ Op.SWAP16
+ Op.PUSH1(0)
+ Op.MSTORE
+ Conditional(
condition=Op.EQ(Op.MLOAD(0), 59),
if_true=Op.JUMPDEST,
if_false=Op.MSTORE(0, 0x90) + Op.RETURN(0, 32),
)
# A0 - A4 LOG X - can't use because non static
# F0 CREATE
# F1 CALL
# F2 CALLCODE
# F3 RETURN
# F4 DELEGATECALL
# F5 CREATE2
# FA STATICCALL
# FD REVERT
# FE INVALID
# FF SELFDESTRUCT
+ Op.MSTORE(0, 1)
+ Op.RETURN(0, 32)
)
return code
class ProgramAllFrontierOpcodes(ScenarioTestProgram):
"""Check every frontier opcode functions."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return make_all_opcode_program()
@cached_property
def id(self) -> str:
"""Id."""
return "program_ALL_FRONTIER_OPCODES"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/programs/__init__.py | tests/frontier/scenarios/programs/__init__.py | """Scenarios common import."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/programs/static_violation.py | tests/frontier/scenarios/programs/static_violation.py | """Define programs that can not be run in static context."""
from functools import cached_property
from ethereum_test_forks import Cancun, Fork
from ethereum_test_tools import Alloc, Bytecode
from ethereum_test_vm import Opcodes as Op
from ..common import ProgramResult, ScenarioTestProgram
class ProgramSstoreSload(ScenarioTestProgram):
"""Test sstore, sload working."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return (
Op.SSTORE(0, 11)
+ Op.MSTORE(0, Op.ADD(Op.MLOAD(0), Op.SLOAD(0)))
+ Op.SSTORE(0, 5)
+ Op.MSTORE(0, Op.ADD(Op.MLOAD(0), Op.SLOAD(0)))
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_SSTORE_SLOAD"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=16, static_support=False)
class ProgramTstoreTload(ScenarioTestProgram):
"""Test sstore, sload working."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.TSTORE(0, 11) + Op.MSTORE(0, Op.TLOAD(0)) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_TSTORE_TLOAD"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=11, static_support=False, from_fork=Cancun)
class ProgramLogs(ScenarioTestProgram):
"""Test Logs."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return (
Op.MSTORE(0, 0x1122334455667788991011121314151617181920212223242526272829303132)
+ Op.LOG0(0, 1)
+ Op.LOG1(1, 1, 0x1000)
+ Op.LOG2(2, 1, 0x2000, 0x2001)
+ Op.LOG3(3, 1, 0x3000, 0x3001, 0x3002)
+ Op.LOG4(4, 1, 0x4000, 0x4001, 0x4002, 0x4003)
+ Op.MSTORE(0, 1)
+ Op.RETURN(0, 32)
)
@cached_property
def id(self) -> str:
"""Id."""
return "program_LOGS"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1, static_support=False)
class ProgramSuicide(ScenarioTestProgram):
"""Test Suicide."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
del pre, fork
return Op.MSTORE(0, 1) + Op.SELFDESTRUCT(0) + Op.RETURN(0, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_SUICIDE"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=0, static_support=False)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/programs/invalid_opcodes.py | tests/frontier/scenarios/programs/invalid_opcodes.py | """Define programs that can not be run in static context."""
from functools import cached_property
from ethereum_test_forks import Fork
from ethereum_test_tools import Alloc, Bytecode
from ethereum_test_vm import Opcodes as Op
from ..common import ProgramResult, ScenarioTestProgram, make_invalid_opcode_contract
class ProgramInvalidOpcode(ScenarioTestProgram):
"""Test each invalid opcode."""
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code."""
contract = make_invalid_opcode_contract(pre, fork)
return Op.MSTORE(
0,
Op.CALL(Op.SUB(Op.GAS, 200000), contract, 0, 64, 32, 100, 32),
) + Op.RETURN(100, 32)
@cached_property
def id(self) -> str:
"""Id."""
return "program_INVALID"
def result(self) -> ProgramResult:
"""Test result."""
return ProgramResult(result=1)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/precompiles/test_precompile_absence.py | tests/frontier/precompiles/test_precompile_absence.py | """Test Calling Precompile Range (close to zero)."""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Address,
Alloc,
Bytecode,
StateTestFiller,
Storage,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
UPPER_BOUND = 0x101
RETURNDATASIZE_OFFSET = 0x10000000000000000 # Must be greater than UPPER_BOUND
@pytest.mark.parametrize(
"calldata_size",
[
pytest.param(0, id="empty_calldata"),
pytest.param(31, id="31_bytes"),
pytest.param(32, id="32_bytes"),
],
)
@pytest.mark.valid_from("Byzantium")
def test_precompile_absence(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
calldata_size: int,
) -> None:
"""
Test that addresses close to zero are not precompiles unless active in the
fork.
"""
active_precompiles = fork.precompiles()
storage = Storage()
call_code = Bytecode()
for address in range(1, UPPER_BOUND + 1):
if Address(address) in active_precompiles:
continue
call_code += Op.SSTORE(
address,
Op.CALL(gas=0, address=address, args_size=calldata_size),
)
storage[address] = 1
if Op.RETURNDATASIZE in fork.valid_opcodes():
call_code += Op.SSTORE(
address + RETURNDATASIZE_OFFSET,
Op.RETURNDATASIZE,
)
storage[address + RETURNDATASIZE_OFFSET] = 0
call_code += Op.STOP
entry_point_address = pre.deploy_contract(call_code, storage=storage.canary())
tx = Transaction(
to=entry_point_address,
gas_limit=10_000_000,
sender=pre.fund_eoa(),
protected=True,
)
state_test(
pre=pre,
tx=tx,
post={
entry_point_address: Account(
storage=storage,
)
},
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/precompiles/__init__.py | tests/frontier/precompiles/__init__.py | """Test for precompiles that apply for all forks starting from Frontier."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/precompiles/test_precompiles.py | tests/frontier/precompiles/test_precompiles.py | """Tests supported precompiled contracts."""
from typing import Iterator, Tuple
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Address,
Alloc,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
def precompile_addresses(fork: Fork) -> Iterator[Tuple[Address, bool]]:
"""
Yield the addresses of precompiled contracts and their support status for a
given fork.
Args:
fork (Fork): The fork instance containing precompiled
contract information.
Yields: Iterator[Tuple[str, bool]]: A tuple containing the address in
hexadecimal format and a boolean indicating whether the address is a
supported precompile.
"""
supported_precompiles = fork.precompiles()
for address in supported_precompiles:
address_int = int.from_bytes(address, byteorder="big")
yield (address, True)
if address_int > 0 and (address_int - 1) not in supported_precompiles:
yield (Address(address_int - 1), False)
if (address_int + 1) not in supported_precompiles:
yield (Address(address_int + 1), False)
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stPreCompiledContracts/idPrecompsFiller.yml"
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1120"],
coverage_missed_reason=(
"Original test saves variables to memory, loads from storage, uses calldataload to get "
"the precompile address to call, uses lt and gt to compare the gas differences, "
"sends non-zero data and value with the transaction, uses conditional jumps to save "
"different values to storage."
),
)
@pytest.mark.valid_from("Berlin")
@pytest.mark.parametrize_by_fork("address,precompile_exists", precompile_addresses)
def test_precompiles(
state_test: StateTestFiller, address: Address, precompile_exists: bool, pre: Alloc
) -> None:
"""
Tests the behavior of precompiled contracts in the Ethereum state test.
Args:
state_test (StateTestFiller): The state test filler object used to
run the test.
address (str): The address of the precompiled contract to test.
precompile_exists (bool): A flag indicating whether the precompiled
contract exists at the given address.
pre (Alloc): The allocation object used to deploy the contract and
set up the initial state.
This test deploys a contract that performs two CALL operations to the
specified address and a fixed address (0x10000), measuring the gas used for
each call. It then stores the difference in gas usage in storage slot 0.
The test verifies the expected storage value based on whether the
precompiled contract exists at the given address.
"""
env = Environment()
# Empty account to serve as reference
empty_account = pre.fund_eoa(amount=0)
# Memory
args_offset = 0
ret_offset = 32
length = 32
account = pre.deploy_contract(
Op.MSTORE(args_offset, 0xFF) # Pre-expand the memory and setup inputs for pre-compiles
+ Op.MSTORE(ret_offset, 0xFF)
+ Op.MSTORE8(args_offset, 0xFF)
+ Op.MSTORE8(ret_offset, 0xFF)
+ Op.POP(Op.BALANCE(empty_account)) # Warm the accounts
+ Op.POP(Op.BALANCE(address))
+ Op.GAS
+ Op.CALL(
gas=50_000,
address=address,
args_offset=args_offset,
args_size=length,
ret_offset=ret_offset,
ret_size=length,
)
+ Op.POP
+ Op.SUB(Op.SWAP1, Op.GAS)
+ Op.GAS
+ Op.CALL(
gas=50_000,
address=empty_account,
args_offset=args_offset,
args_size=length,
ret_offset=ret_offset,
ret_size=length,
)
+ Op.POP
+ Op.SUB(Op.SWAP1, Op.GAS)
+ Op.SWAP1
+ Op.SUB
+ Op.SSTORE(0, Op.ISZERO)
+ Op.STOP,
storage={0: 0xDEADBEEF},
)
tx = Transaction(
to=account,
sender=pre.fund_eoa(),
gas_limit=1_000_000,
protected=True,
)
# A high gas cost will result from calling a precompile
# Expect 0x00 when a precompile exists at the address, 0x01 otherwise
post = {account: Account(storage={0: 0 if precompile_exists else 1})}
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_swap.py | tests/frontier/opcodes/test_swap.py | """
A State test for the set of `SWAP*` opcodes.
Ported from: https://github.com/ethereum/tests/
blob/develop/src/GeneralStateTestsFiller/VMTests/vmTests/swapFiller.yml.
"""
import pytest # noqa: I001
from ethereum_test_forks import Fork, Frontier, Homestead
from ethereum_test_tools import Account, Alloc, Bytecode, Environment
from ethereum_test_tools import Opcodes as Op
from ethereum_test_tools import StateTestFiller, Storage, Transaction
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/VMTests/vmTests/swapFiller.yml"
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1163"],
coverage_missed_reason=(
"Test isolation (1 contract per execution) reduces evmone state "
"comparisons vs old dispatcher pattern (16 contracts per execution)"
),
)
@pytest.mark.parametrize(
"swap_opcode",
[getattr(Op, f"SWAP{i}") for i in range(1, 17)],
ids=lambda op: str(op),
)
@pytest.mark.valid_from("Frontier")
def test_swap(state_test: StateTestFiller, fork: Fork, pre: Alloc, swap_opcode: Op) -> None:
"""
The set of `SWAP*` opcodes swaps the top of the stack with a specific
element.
In this test, we ensure that the set of `SWAP*` opcodes correctly swaps
the top element with the nth element and stores the result in storage.
"""
env = Environment()
# Calculate which position we're swapping with (1-based index)
swap_pos = swap_opcode.int() - 0x90 + 1
# Generate stack values
stack_values = list(range(swap_pos + 16))
# Push the stack values onto the stack (in reverse order).
contract_code = Bytecode()
for value in reversed(stack_values):
contract_code += Op.PUSH1(value)
# Perform the SWAP operation.
contract_code += swap_opcode
# Store multiple values to storage.
for slot in range(16):
contract_code += Op.PUSH1(slot) + Op.SSTORE
# Deploy the contract with the generated bytecode.
contract_address = pre.deploy_contract(contract_code)
# Create a transaction to execute the contract.
tx = Transaction(
sender=pre.fund_eoa(),
to=contract_address,
gas_limit=500_000,
protected=False if fork in [Frontier, Homestead] else True,
)
# Calculate expected storage values after SWAP and storage operations
# Initial stack (after pushes, before swap): [0, 1, 2, ..., swap_pos+15]
# (top is index 0)
# After SWAP at position swap_pos: top and position swap_pos are swapped
# Then we do: PUSH1(slot) SSTORE 16 times, which pops values from stack
# Build the stack state after SWAP
stack_after_swap = stack_values.copy()
stack_after_swap[0], stack_after_swap[swap_pos] = (
stack_after_swap[swap_pos],
stack_after_swap[0],
)
# Store the first 16 values from the post-swap stack
storage = Storage()
for value in stack_after_swap[:16]:
storage.store_next(value)
post = {contract_address: Account(storage=storage)}
# Run the state test.
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"swap_opcode",
[getattr(Op, f"SWAP{i}") for i in range(1, 17)],
ids=lambda op: str(op),
)
@pytest.mark.valid_from("Frontier")
def test_stack_underflow(
state_test: StateTestFiller,
fork: Fork,
pre: Alloc,
swap_opcode: Op,
) -> None:
"""
A test to ensure that the stack underflow when there are not enough
elements for the `SWAP*` opcode to operate.
For each SWAPn operation, we push exactly (n-1) elements to cause an
underflow when trying to swap with the nth element.
"""
env = Environment()
# Calculate which position we're swapping with (1-based index)
swap_pos = swap_opcode.int() - 0x90 + 1
# Push exactly (n-1) elements for SWAPn to cause underflow
contract_code = Bytecode()
for i in range(swap_pos - 1):
contract_code += Op.PUSH1(i % 256)
# Attempt to perform the SWAP operation
contract_code += swap_opcode
# Store the top of the stack in storage slot 0
contract_code += Op.PUSH1(0) + Op.SSTORE
# Deploy the contract with the generated bytecode.
contract = pre.deploy_contract(contract_code)
# Create a transaction to execute the contract.
tx = Transaction(
sender=pre.fund_eoa(),
to=contract,
gas_limit=500_000,
protected=False if fork in [Frontier, Homestead] else True,
)
# Define the expected post-state.
post = {}
storage = Storage()
storage.store_next(0, f"SWAP{swap_pos} failed due to stack underflow")
post[contract] = Account(storage=storage)
# Run the state test.
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_calldataload.py | tests/frontier/opcodes/test_calldataload.py | """test `CALLDATALOAD` opcode."""
import pytest
from ethereum_test_forks import Byzantium, Fork
from ethereum_test_tools import Account, Alloc, StateTestFiller, Transaction
from ethereum_test_tools import Macros as Om
from ethereum_test_vm import Opcodes as Op
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/VMTests/vmTests/calldataloadFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1236"],
)
@pytest.mark.parametrize(
"calldata,calldata_offset,expected_storage",
[
(
b"\x25\x60",
0x0,
0x2560000000000000000000000000000000000000000000000000000000000000,
),
(
b"\xff" * 32 + b"\x23",
0x1,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF23,
),
(
bytes.fromhex("123456789ABCDEF00000000000000000000000000000000000000000000000000024"),
0x5,
0xBCDEF00000000000000000000000000000000000000000000000000024000000,
),
],
ids=[
"two_bytes",
"word_n_byte",
"34_bytes",
],
)
@pytest.mark.parametrize("calldata_source", ["contract", "tx"])
def test_calldataload(
state_test: StateTestFiller,
calldata: bytes,
calldata_offset: int,
fork: Fork,
pre: Alloc,
expected_storage: Account,
calldata_source: str,
) -> None:
"""
Test `CALLDATALOAD` opcode.
Tests two scenarios:
- calldata_source is "contract": CALLDATALOAD reads from calldata
passed by another contract
- calldata_source is "tx": CALLDATALOAD reads directly from
transaction calldata
Based on
https://github.com/ethereum/tests/blob/
ae4791077e8fcf716136e70fe8392f1a1f1495fb/src/
GeneralStateTestsFiller/VMTests/vmTests/calldatacopyFiller.yml
"""
contract_address = pre.deploy_contract(
Op.SSTORE(0, Op.CALLDATALOAD(offset=calldata_offset)) + Op.STOP,
)
if calldata_source == "contract":
to = pre.deploy_contract(
Om.MSTORE(calldata, 0x0)
+ Op.CALL(
gas=Op.SUB(Op.GAS(), 0x100),
address=contract_address,
value=0x0,
args_offset=0x0,
args_size=len(calldata),
ret_offset=0x0,
ret_size=0x0,
)
+ Op.STOP
)
tx = Transaction(
data=calldata,
gas_limit=100_000,
protected=fork >= Byzantium,
sender=pre.fund_eoa(),
to=to,
)
else:
tx = Transaction(
data=calldata,
gas_limit=100_000,
protected=fork >= Byzantium,
sender=pre.fund_eoa(),
to=contract_address,
)
post = {contract_address: Account(storage={0x00: expected_storage})}
state_test(pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_calldatacopy.py | tests/frontier/opcodes/test_calldatacopy.py | """test `CALLDATACOPY` opcode."""
import pytest
from ethereum_test_forks import Byzantium, Fork
from ethereum_test_tools import Account, Alloc, Bytecode, StateTestFiller, Transaction
from ethereum_test_vm import Opcodes as Op
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/VMTests/vmTests/calldatacopyFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1056"],
)
@pytest.mark.parametrize(
"code,tx_data,code_address_storage,to_address_storage",
[
(
(
Op.CALLDATACOPY(dest_offset=0, offset=1, size=2)
+ Op.SSTORE(key=0x0, value=Op.MLOAD(offset=0))
+ Op.RETURN(offset=0, size=Op.MSIZE)
),
b"\x00",
Account(
storage={0x00: 0x3456000000000000000000000000000000000000000000000000000000000000}
),
Account(
storage={0x00: 0x3456000000000000000000000000000000000000000000000000000000000000}
),
),
(
(
Op.CALLDATACOPY(dest_offset=0, offset=1, size=1)
+ Op.SSTORE(key=0x0, value=Op.MLOAD(offset=0))
+ Op.RETURN(offset=0, size=Op.MSIZE)
),
b"\x01",
Account(
storage={0x00: 0x3400000000000000000000000000000000000000000000000000000000000000},
),
Account(
storage={0x00: 0x3400000000000000000000000000000000000000000000000000000000000000},
),
),
(
(
Op.CALLDATACOPY(dest_offset=0, offset=1, size=0)
+ Op.SSTORE(key=0x0, value=Op.MLOAD(offset=0))
+ Op.RETURN(offset=0, size=Op.MSIZE)
),
b"\x02",
Account(
storage={0x00: 0x00},
),
Account(
storage={0x00: 0x00},
),
),
(
(
Op.CALLDATACOPY(dest_offset=0, offset=0, size=0)
+ Op.SSTORE(key=0x0, value=Op.MLOAD(offset=0))
+ Op.RETURN(offset=0, size=Op.MSIZE)
),
b"\x03",
Account(
storage={0x00: 0x00},
),
Account(
storage={0x00: 0x00},
),
),
(
(
Op.CALLDATACOPY(
dest_offset=0,
offset=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA,
size=0xFF,
)
+ Op.SSTORE(key=0x0, value=Op.MLOAD(offset=0))
+ Op.RETURN(offset=0, size=Op.MSIZE)
),
b"\x04",
Account(storage={0x00: 0x00}),
Account(storage={0x00: 0x00}),
),
(
(
Op.CALLDATACOPY(
dest_offset=0,
offset=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA,
size=0x9,
)
+ Op.SSTORE(key=0x0, value=Op.MLOAD(offset=0))
+ Op.RETURN(offset=0, size=Op.MSIZE)
),
b"\x05",
Account(storage={0x00: 0x00}),
Account(storage={0x00: 0x00}),
),
(
(Op.SSTORE(key=0x1, value=0x1) + Op.PUSH1[0x1] + Op.PUSH1[0x2] + Op.CALLDATACOPY),
b"\x10",
Account(storage={0x01: 0x00}),
None,
),
(
(
Op.JUMP(pc=0x5)
+ Op.JUMPDEST
+ Op.STOP
+ Op.JUMPDEST
+ Op.MSTORE8(offset=0x1F, value=0x42)
+ Op.CALLDATACOPY(dest_offset=0x1F, offset=0x0, size=0x103)
+ Op.MLOAD(offset=0x0)
+ Op.DUP1
+ Op.PUSH1[0x60]
+ Op.JUMPI(pc=0x3, condition=Op.EQ)
+ Op.SSTORE(key=0xFF, value=0xBADC0FFEE)
),
b"\x11",
Account(storage={0xFF: 0xBADC0FFEE}),
None,
),
],
ids=[
"cdc 0 1 2",
"cdc 0 1 1",
"cdc 0 1 0",
"cdc 0 0 0",
"cdc 0 neg6 ff",
"cdc 0 neg6 9",
"underflow",
"sec",
],
)
def test_calldatacopy(
state_test: StateTestFiller,
code: Bytecode,
fork: Fork,
tx_data: bytes,
pre: Alloc,
code_address_storage: Account,
to_address_storage: Account | None,
) -> None:
"""
Test `CALLDATACOPY` opcode.
Based on https://github.com/ethereum/tests/blob/ae4791077e8fcf716136e70fe8392f1a1f1495fb/src/GeneralStateTestsFiller/VMTests/vmTests/calldatacopyFiller.yml
"""
code_address = pre.deploy_contract(code)
to = pre.deploy_contract(
code=(
Op.MSTORE(offset=0x0, value=0x1234567890ABCDEF01234567890ABCDEF0)
+ Op.CALL(
gas=Op.SUB(Op.GAS(), 0x100),
address=code_address,
value=0x0,
args_offset=0xF,
args_size=0x10,
ret_offset=0x20,
ret_size=0x40,
)
+ Op.POP
+ Op.SSTORE(key=0x0, value=Op.MLOAD(offset=0x20))
+ Op.SSTORE(key=0x1, value=Op.MLOAD(offset=0x40))
+ Op.STOP
),
)
tx = Transaction(
data=tx_data,
gas_limit=100_000,
gas_price=0x0A,
protected=fork >= Byzantium,
sender=pre.fund_eoa(),
to=to,
value=0x01,
)
if to_address_storage:
post = {code_address: code_address_storage, to: to_address_storage}
else:
post = {code_address: code_address_storage}
state_test(pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_push.py | tests/frontier/opcodes/test_push.py | """
A State test for the set of `PUSH*` opcodes.
Ported from:
https://github.com/ethereum/tests/blob/
4f65a0a7cbecf4442415c226c65e089acaaf6a8b/src/
GeneralStateTestsFiller/VMTests/vmTests/pushFiller.yml.
"""
import pytest
from ethereum_test_forks import Fork, Frontier, Homestead
from ethereum_test_tools import Account, Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from ethereum_test_vm import Bytecode
def get_input_for_push_opcode(opcode: Op) -> bytes:
"""
Get a sample input for the `PUSH*` opcode.
The input is a portion of an excerpt from the Ethereum yellow paper.
"""
ethereum_state_machine = b"Ethereum is a transaction-based state machine."
input_size = opcode.data_portion_length
return ethereum_state_machine[0:input_size]
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/VMTests/vmTests/pushFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/975"],
)
@pytest.mark.parametrize(
"push_opcode",
# Dynamically parametrize PUSH opcodes
[getattr(Op, f"PUSH{i}") for i in range(1, 33)],
ids=lambda op: str(op),
)
@pytest.mark.valid_from("Frontier")
def test_push(state_test: StateTestFiller, fork: Fork, pre: Alloc, push_opcode: Op) -> None:
"""
The set of `PUSH*` opcodes pushes data onto the stack.
In this test, we ensure that the set of `PUSH*` opcodes writes a portion of
an excerpt from the Ethereum yellow paper to storage.
"""
# Input used to test the `PUSH*` opcode.
excerpt = get_input_for_push_opcode(push_opcode)
env = Environment()
"""
** Bytecode explanation **
+---------------------------------------------------+
| Bytecode | Stack | Storage |
|---------------------------------------------------|
| PUSH* excerpt | excerpt | |
| PUSH1 0 | 0 excerpt | |
| SSTORE | | [0]: excerpt |
+---------------------------------------------------+
"""
contract_code = push_opcode(excerpt) + Op.PUSH1(0) + Op.SSTORE
contract = pre.deploy_contract(contract_code)
tx = Transaction(
sender=pre.fund_eoa(),
to=contract,
gas_limit=500_000,
protected=False if fork in [Frontier, Homestead] else True,
)
post = {}
post[contract] = Account(storage={0: int.from_bytes(excerpt, byteorder="big")})
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/VMTests/vmTests/pushFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/975"],
)
@pytest.mark.parametrize(
"push_opcode",
[getattr(Op, f"PUSH{i}") for i in range(1, 33)],
ids=lambda op: str(op),
)
@pytest.mark.parametrize("stack_height", range(1024, 1026))
@pytest.mark.valid_from("Frontier")
@pytest.mark.slow()
def test_stack_overflow(
state_test: StateTestFiller, fork: Fork, pre: Alloc, push_opcode: Op, stack_height: int
) -> None:
"""
A test the stack overflows when the stack limit of 1024 is exceeded.
"""
env = Environment()
# Input used to test the `PUSH*` opcode.
excerpt = get_input_for_push_opcode(push_opcode)
"""
Essentially write a n-byte message to storage by pushing [1024,1025] times
to stack. This simulates a "jump" over the stack limit of 1024.
The message is UTF-8 encoding of excerpt (say 0x45 for PUSH1). Within the
stack limit, the message is written to the to the storage at the same
offset (0x45 for PUSH1). The last iteration will overflow the stack and the
storage slot will be empty.
** Bytecode explanation **
+---------------------------------------------------+
| Bytecode | Stack | Storage |
|---------------------------------------------------|
| PUSH* excerpt | excerpt | |
| PUSH1 0 | 0 excerpt | |
| SSTORE | | [0]: excerpt |
+---------------------------------------------------+
"""
contract_code: Bytecode = Bytecode()
for _ in range(stack_height - 2):
# mostly push 0 to avoid contract size limit exceeded
contract_code += Op.PUSH1(0)
contract_code += push_opcode(excerpt) * 2 + Op.SSTORE
contract = pre.deploy_contract(contract_code)
tx = Transaction(
sender=pre.fund_eoa(),
to=contract,
gas_limit=500_000,
protected=False if fork in [Frontier, Homestead] else True,
)
post = {}
key = int.from_bytes(excerpt, "big")
# Storage should ONLY have the message if stack does not overflow.
value = key if stack_height <= 1024 else 0
post[contract] = Account(storage={key: value})
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_calldatasize.py | tests/frontier/opcodes/test_calldatasize.py | """test `CALLDATASIZE` opcode."""
import pytest
from ethereum_test_forks import Byzantium, Fork
from ethereum_test_tools import Account, Alloc, StateTestFiller, Transaction
from ethereum_test_tools import Macros as Om
from ethereum_test_vm import Opcodes as Op
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/VMTests/vmTests/calldatasizeFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1236"],
)
@pytest.mark.parametrize(
"args_size",
[0, 2, 16, 33, 257],
)
@pytest.mark.parametrize("calldata_source", ["contract", "tx"])
@pytest.mark.slow()
def test_calldatasize(
state_test: StateTestFiller,
fork: Fork,
args_size: int,
pre: Alloc,
calldata_source: str,
) -> None:
"""
Test `CALLDATASIZE` opcode.
Tests two scenarios:
- calldata_source is "contract": CALLDATASIZE reads from calldata
passed by another contract
- calldata_source is "tx": CALLDATASIZE reads directly from
transaction calldata
Based on
https://github.com/ethereum/tests/blob/
81862e4848585a438d64f911a19b3825f0f4cd95/src/
GeneralStateTestsFiller/VMTests/vmTests/calldatasizeFiller.yml
"""
contract_address = pre.deploy_contract(Op.SSTORE(key=0x0, value=Op.CALLDATASIZE))
calldata = b"\x01" * args_size
if calldata_source == "contract":
to = pre.deploy_contract(
code=(
Om.MSTORE(calldata, 0x0)
+ Op.CALL(
gas=Op.SUB(Op.GAS(), 0x100),
address=contract_address,
value=0x0,
args_offset=0x0,
args_size=args_size,
ret_offset=0x0,
ret_size=0x0,
)
)
)
tx = Transaction(
gas_limit=100_000,
protected=fork >= Byzantium,
sender=pre.fund_eoa(),
to=to,
)
else:
tx = Transaction(
data=calldata,
gas_limit=100_000,
protected=fork >= Byzantium,
sender=pre.fund_eoa(),
to=contract_address,
)
post = {contract_address: Account(storage={0x00: args_size})}
state_test(pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py | tests/frontier/opcodes/test_call_and_callcode_gas_calculation.py | """
Tests nested CALL/CALLCODE gas usage with positive value transfer.
This test investigates an issue identified in EthereumJS, as reported in:
https://github.com/ethereumjs/ethereumjs-monorepo/issues/3194.
The issue pertains to the incorrect gas calculation for CALL/CALLCODE
operations with a positive value transfer, due to the pre-addition of the
gas stipend (2300) to the currently available gas instead of adding it to
the new call frame. This bug was specific to the case where insufficient
gas was provided for the CALL/CALLCODE operation. Due to the pre-addition
of the stipend to the currently available gas, the case for insufficient
gas was not properly failing with an out-of-gas error.
Test setup:
Given two smart contract accounts, 0x0A (caller) and 0x0B (callee):
1. An arbitrary transaction calls into the contract 0x0A.
2. Contract 0x0A executes a CALL to contract 0x0B with a specific gas limit
(X).
3. Contract 0x0B then attempts a CALL/CALLCODE to a non-existent contract
0x0C, with a positive value transfer (activating the gas stipend).
4. If the gas X provided by contract 0x0A to 0x0B is sufficient, contract
0x0B will push 0x01 onto the stack after returning to the call frame in
0x0A. Otherwise, it should push 0x00, indicating the insufficiency of
gas X (for the bug in EthereumJS, the CALL/CALLCODE operation would
return 0x01 due to the pre-addition of the gas stipend).
5. The resulting stack value is saved into contract 0x0A's storage,
allowing us to verify whether the provided gas was sufficient or
insufficient.
"""
from typing import Dict
import pytest
from ethereum_test_tools import (
EOA,
Account,
Address,
Alloc,
Bytecode,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
"""
PUSH opcode cost is 3, GAS opcode cost is 2.
We need 6 PUSH's and one GAS to fill the stack for both CALL & CALLCODE,
in the callee contract.
"""
CALLEE_INIT_STACK_GAS = 6 * 3 + 2
"""
CALL gas breakdowns: (https://www.evm.codes/#f1)
memory_exp_cost + code_exec_cost + address_access_cost +
positive_value_cost + empty_account_cost
= 0 + 0 + 2600 + 9000 + 25000 = 36600
"""
CALL_GAS = 36600
CALL_SUFFICIENT_GAS = CALL_GAS + CALLEE_INIT_STACK_GAS
"""
CALLCODE gas breakdowns: (https://www.evm.codes/#f2)
memory_exp_cost + code_exec_cost + address_access_cost +
positive_value_cost = 0 + 0 + 2600 + 9000 = 11600
"""
CALLCODE_GAS = 11600
CALLCODE_SUFFICIENT_GAS = CALLCODE_GAS + CALLEE_INIT_STACK_GAS
@pytest.fixture
def callee_code(pre: Alloc, callee_opcode: Op) -> Bytecode:
"""
Code called by the caller contract:
PUSH1 0x00 * 4
PUSH1 0x01 <- for positive value transfer
PUSH2 Contract.nonexistent
GAS <- value doesn't matter
CALL/CALLCODE.
"""
# The address needs to be empty and different for each execution of the
# fixture, otherwise the calculations (empty_account_cost) are incorrect.
return callee_opcode(Op.GAS(), pre.empty_account(), 1, 0, 0, 0, 0)
@pytest.fixture
def sender(pre: Alloc) -> EOA:
"""Sender for all transactions."""
return pre.fund_eoa()
@pytest.fixture
def callee_address(pre: Alloc, callee_code: Bytecode) -> Address:
"""Address of the callee."""
return pre.deploy_contract(callee_code, balance=0x03)
@pytest.fixture
def caller_code(caller_gas_limit: int, callee_address: Address) -> Bytecode:
"""
Code to CALL the callee contract:
PUSH1 0x00 * 5
PUSH2 Contract.callee
PUSH2 caller_gas <- gas limit set for CALL to callee contract
CALL
PUSH1 0x00
SSTORE.
"""
return Op.SSTORE(0, Op.CALL(caller_gas_limit, callee_address, 0, 0, 0, 0, 0))
@pytest.fixture
def caller_address(pre: Alloc, caller_code: Bytecode) -> Address:
"""
Code to CALL the callee contract:
PUSH1 0x00 * 5
PUSH2 Contract.callee
PUSH2 caller_gas <- gas limit set for CALL to callee contract
CALL
PUSH1 0x00
SSTORE.
"""
return pre.deploy_contract(caller_code, balance=0x03)
@pytest.fixture
def caller_tx(sender: EOA, caller_address: Address) -> Transaction:
"""Transaction that performs the call to the caller contract."""
return Transaction(
to=caller_address,
value=1,
gas_limit=500_000,
sender=sender,
)
@pytest.fixture
def post(caller_address: Address, is_sufficient_gas: bool) -> Dict[Address, Account]: # noqa: D103
return {
caller_address: Account(storage={0x00: 0x01 if is_sufficient_gas else 0x00}),
}
@pytest.mark.parametrize(
"callee_opcode, caller_gas_limit, is_sufficient_gas",
[
(Op.CALL, CALL_SUFFICIENT_GAS, True),
(Op.CALL, CALL_SUFFICIENT_GAS - 1, False),
(Op.CALLCODE, CALLCODE_SUFFICIENT_GAS, True),
(Op.CALLCODE, CALLCODE_SUFFICIENT_GAS - 1, False),
],
)
@pytest.mark.valid_from("London")
def test_value_transfer_gas_calculation(
state_test: StateTestFiller,
pre: Alloc,
caller_tx: Transaction,
post: Dict[str, Account],
) -> None:
"""
Tests the nested CALL/CALLCODE opcode gas consumption with a positive value
transfer.
"""
state_test(env=Environment(), pre=pre, post=post, tx=caller_tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_blockhash.py | tests/frontier/opcodes/test_blockhash.py | """Tests for BLOCKHASH opcode."""
import pytest
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Storage,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
@pytest.mark.valid_from("Frontier")
def test_genesis_hash_available(blockchain_test: BlockchainTestFiller, pre: Alloc) -> None:
"""
Verify BLOCKHASH returns genesis and block 1 hashes.
Regression test: Blockchain test infrastructure must populate block hashes
before execution. Without this, BLOCKHASH returns 0, breaking dynamic
address computations like BLOCKHASH(0) | TIMESTAMP.
Tests both genesis (block 0) and first executed block (block 1) hash
insertion by calling the contract in block 2.
Bug context: revm blockchaintest runner wasn't inserting block_hashes,
causing failures in tests with BLOCKHASH-derived addresses.
"""
storage = Storage()
# Store ISZERO(BLOCKHASH(0)) and ISZERO(BLOCKHASH(1))
# Both should be 0 (false) if hashes exist
code = Op.SSTORE(storage.store_next(0), Op.ISZERO(Op.BLOCKHASH(0))) + Op.SSTORE(
storage.store_next(0), Op.ISZERO(Op.BLOCKHASH(1))
)
contract = pre.deploy_contract(code=code)
sender = pre.fund_eoa()
blocks = [
Block(
txs=[
Transaction(
sender=sender,
to=contract,
gas_limit=100_000,
protected=False,
)
]
),
Block(
txs=[
Transaction(
sender=sender,
to=contract,
gas_limit=100_000,
protected=False,
)
]
),
]
post = {
contract: Account(
storage={
0: 0, # ISZERO(BLOCKHASH(0)) = 0 (genesis hash exists)
1: 0, # ISZERO(BLOCKHASH(1)) = 0 (block 1 hash exists)
}
)
}
blockchain_test(pre=pre, post=post, blocks=blocks)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/__init__.py | tests/frontier/opcodes/__init__.py | """Test for opcodes introduced in Frontier."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_call.py | tests/frontier/opcodes/test_call.py | """test `CALL` opcode."""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
CodeGasMeasure,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
# TODO: There's an issue with gas definitions on forks previous to Berlin,
# remove this when fixed. https://github.com/ethereum/execution-spec-
# tests/pull/1952#discussion_r2237634275
@pytest.mark.valid_from("Berlin")
def test_call_large_offset_mstore(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
) -> None:
"""
CALL with ret_offset larger than memory size and ret_size zero Then do an
MSTORE in that offset to see if memory was expanded in CALL.
This is for bug in a faulty EVM implementation where memory is expanded
when it shouldn't.
"""
sender = pre.fund_eoa()
gsc = fork.gas_costs()
mem_offset = 128 # arbitrary number
call_measure = CodeGasMeasure(
code=Op.CALL(gas=0, ret_offset=mem_offset, ret_size=0),
# Cost of pushing CALL args
overhead_cost=gsc.G_VERY_LOW * len(Op.CALL.kwargs),
extra_stack_items=1, # Because CALL pushes 1 item to the stack
sstore_key=0,
stop=False, # Because it's the first CodeGasMeasure
)
mstore_measure = CodeGasMeasure(
code=Op.MSTORE(offset=mem_offset, value=1),
# Cost of pushing MSTORE args
overhead_cost=gsc.G_VERY_LOW * len(Op.MSTORE.kwargs),
extra_stack_items=0,
sstore_key=1,
)
contract = pre.deploy_contract(call_measure + mstore_measure)
tx = Transaction(
gas_limit=500_000,
to=contract,
value=0,
sender=sender,
)
# this call cost is just the address_access_cost
call_cost = gsc.G_COLD_ACCOUNT_ACCESS
memory_expansion_gas_calc = fork.memory_expansion_gas_calculator()
# mstore cost: base cost + expansion cost
mstore_cost = gsc.G_MEMORY + memory_expansion_gas_calc(new_bytes=mem_offset + 1)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
contract: Account(
storage={
0: call_cost,
1: mstore_cost,
},
)
},
)
# TODO: There's an issue with gas definitions on forks previous to Berlin,
# remove this when fixed. https://github.com/ethereum/execution-spec-
# tests/pull/1952#discussion_r2237634275
@pytest.mark.valid_from("Berlin")
def test_call_memory_expands_on_early_revert(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
) -> None:
"""
When CALL reverts early (e.g. because of not enough balance by the sender),
memory should be expanded anyway. We check this with an MSTORE.
This is for a bug in an EVM implementation where memory is expanded after
executing a CALL, but not when an early revert happens.
"""
sender = pre.fund_eoa()
gsc = fork.gas_costs()
# arbitrary number, greater than memory size to trigger an expansion
ret_size = 128
call_measure = CodeGasMeasure(
# CALL with value
code=Op.CALL(gas=0, value=100, ret_size=ret_size),
# Cost of pushing CALL args
overhead_cost=gsc.G_VERY_LOW * len(Op.CALL.kwargs),
# Because CALL pushes 1 item to the stack
extra_stack_items=1,
sstore_key=0,
# Because it's the first CodeGasMeasure
stop=False,
)
mstore_measure = CodeGasMeasure(
# Low offset for not expanding memory
code=Op.MSTORE(offset=ret_size // 2, value=1),
# Cost of pushing MSTORE args
overhead_cost=gsc.G_VERY_LOW * len(Op.MSTORE.kwargs),
extra_stack_items=0,
sstore_key=1,
)
# Contract without enough balance to send value transfer
contract = pre.deploy_contract(code=call_measure + mstore_measure, balance=0)
tx = Transaction(
gas_limit=500_000,
to=contract,
value=0,
sender=sender,
)
memory_expansion_gas_calc = fork.memory_expansion_gas_calculator()
# call cost:
# address_access_cost+new_acc_cost+memory_expansion_cost+value-stipend
call_cost = (
gsc.G_COLD_ACCOUNT_ACCESS
+ gsc.G_NEW_ACCOUNT
+ memory_expansion_gas_calc(new_bytes=ret_size)
+ gsc.G_CALL_VALUE
- gsc.G_CALL_STIPEND
)
# mstore cost: base cost. No memory expansion cost needed, it was expanded
# on CALL.
mstore_cost = gsc.G_MEMORY
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
contract: Account(
storage={
0: call_cost,
1: mstore_cost,
},
)
},
)
# TODO: There's an issue with gas definitions on forks previous to Berlin,
# remove this when fixed. https://github.com/ethereum/execution-spec-
# tests/pull/1952#discussion_r2237634275
@pytest.mark.with_all_call_opcodes
@pytest.mark.valid_from("Berlin")
def test_call_large_args_offset_size_zero(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
call_opcode: Op,
) -> None:
"""
Test xCALL with an extremely large args_offset and args_size set to zero.
Since the size is zero, the large offset should not cause a revert.
"""
sender = pre.fund_eoa()
gsc = fork.gas_costs()
very_large_offset = 2**100
call_measure = CodeGasMeasure(
code=call_opcode(gas=0, args_offset=very_large_offset, args_size=0),
# Cost of pushing xCALL args
overhead_cost=gsc.G_VERY_LOW * len(call_opcode.kwargs),
extra_stack_items=1, # Because xCALL pushes 1 item to the stack
sstore_key=0,
)
contract = pre.deploy_contract(call_measure)
tx = Transaction(
gas_limit=500_000,
to=contract,
value=0,
sender=sender,
)
# this call cost is just the address_access_cost
call_cost = gsc.G_COLD_ACCOUNT_ACCESS
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
contract: Account(
storage={
0: call_cost,
},
)
},
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_all_opcodes.py | tests/frontier/opcodes/test_all_opcodes.py | """
Call every possible opcode and test that the subcall is successful if the
opcode is supported by the fork supports and fails otherwise.
"""
from typing import Dict
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Address,
Alloc,
Bytecode,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_vm import Opcode, UndefinedOpcodes
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "N/A"
REFERENCE_SPEC_VERSION = "N/A"
def prepare_stack(opcode: Opcode) -> Bytecode:
"""Prepare valid stack for opcode."""
if opcode == Op.CREATE:
return Op.MSTORE(0, 0x6001600155) + Op.PUSH1(5) + Op.PUSH1(27) + Op.PUSH1(5)
if opcode == Op.CREATE2:
return Op.MSTORE(0, 0x6001600155) + Op.PUSH1(1) + Op.PUSH1(5) + Op.PUSH1(27) + Op.PUSH1(5)
if opcode == Op.JUMPI:
return Op.PUSH1(1) + Op.PUSH1(5)
if opcode == Op.JUMP:
return Op.PUSH1(3)
if opcode == Op.RETURNDATACOPY:
return Op.PUSH1(0) * 3
return Op.PUSH1(0x01) * 32
def prepare_suffix(opcode: Opcode) -> Bytecode:
"""Prepare after opcode instructions."""
if opcode == Op.JUMPI or opcode == Op.JUMP:
return Op.JUMPDEST
return Op.STOP
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stBadOpcode/badOpcodesFiller.json",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stBugs/evmBytecodeFiller.json",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/748"],
)
@pytest.mark.valid_from("Frontier")
def test_all_opcodes(state_test: StateTestFiller, pre: Alloc, fork: Fork) -> None:
"""
Test each possible opcode on the fork with a single contract that calls
each opcode in succession. Check that each subcall passes if the opcode is
supported and fails otherwise.
"""
code_worked = 1000
code_contract: Dict[Opcode, Address] = {}
for opcode in sorted(set(Op) | set(UndefinedOpcodes)):
code_contract[opcode] = pre.deploy_contract(
balance=10,
code=prepare_stack(opcode) + opcode + prepare_suffix(opcode),
storage={},
)
# EVM code to make the call and store the result
contract_address = pre.deploy_contract(
code=sum(
Op.SSTORE(
Op.PUSH1(opcode.int()),
# Limit gas to limit the gas consumed by the exceptional aborts
# in each subcall that uses an undefined opcode.
Op.CALL(35_000, opcode_address, 0, 0, 0, 0, 0),
)
for opcode, opcode_address in code_contract.items()
)
+ Op.SSTORE(code_worked, 1)
+ Op.STOP,
)
post = {
contract_address: Account(
storage={
**{
opcode.int(): 1 if opcode != Op.REVERT else 0
for opcode in fork.valid_opcodes()
},
code_worked: 1,
}
),
}
tx = Transaction(
sender=pre.fund_eoa(),
gas_limit=9_000_000,
to=contract_address,
data=b"",
value=0,
protected=False,
)
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.valid_from("Cancun")
def test_cover_revert(state_test: StateTestFiller, pre: Alloc) -> None:
"""Cover state revert from original tests for the coverage script."""
tx = Transaction(
sender=pre.fund_eoa(),
gas_limit=1_000_000,
data=Op.SSTORE(1, 1) + Op.REVERT(0, 0),
to=None,
value=0,
protected=False,
)
state_test(env=Environment(), pre=pre, post={}, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_selfdestruct.py | tests/frontier/opcodes/test_selfdestruct.py | """Test the SELFDESTRUCT opcode."""
import pytest
from ethereum_test_tools import Account, Alloc, Block, BlockchainTestFiller, Initcode, Transaction
from ethereum_test_tools import Opcodes as Op
@pytest.mark.valid_from("Frontier")
@pytest.mark.valid_until("Homestead")
def test_double_kill(blockchain_test: BlockchainTestFiller, pre: Alloc) -> None:
"""
Test that when two transactions attempt to destruct a contract, the second
transaction actually resurrects the contract as an empty account (prior to
Spurious Dragon).
"""
sender = pre.fund_eoa()
deploy_code = Op.SELFDESTRUCT(Op.ADDRESS)
initcode = Initcode(deploy_code=deploy_code)
create_tx = Transaction(
gas_limit=100_000,
protected=False,
to=None,
data=initcode,
sender=sender,
)
block_1 = Block(txs=[create_tx])
first_kill = Transaction(
gas_limit=100_000,
protected=False,
to=create_tx.created_contract,
sender=sender,
)
second_kill = Transaction(
gas_limit=100_000,
protected=False,
to=create_tx.created_contract,
sender=sender,
)
block_2 = Block(txs=[first_kill, second_kill])
post = {
create_tx.created_contract: Account(
nonce=0,
balance=0,
code=b"",
storage={},
),
}
blockchain_test(pre=pre, post=post, blocks=[block_1, block_2])
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/opcodes/test_dup.py | tests/frontier/opcodes/test_dup.py | """Test DUP Test the DUP opcodes."""
import pytest
from ethereum_test_forks import Frontier, Homestead
from ethereum_test_tools import Account, Alloc, Environment, StateTestFiller, Storage, Transaction
from ethereum_test_tools import Opcodes as Op
@pytest.mark.parametrize(
"dup_opcode",
[
Op.DUP1,
Op.DUP2,
Op.DUP3,
Op.DUP4,
Op.DUP5,
Op.DUP6,
Op.DUP7,
Op.DUP8,
Op.DUP9,
Op.DUP10,
Op.DUP11,
Op.DUP12,
Op.DUP13,
Op.DUP14,
Op.DUP15,
Op.DUP16,
],
ids=lambda op: str(op),
)
@pytest.mark.with_all_evm_code_types
def test_dup(
state_test: StateTestFiller,
fork: str,
dup_opcode: Op,
pre: Alloc,
) -> None:
"""
Test the DUP1-DUP16 opcodes.
Note: Test case ported from
[ethereum/tests](https://github.com/ethereum/tests).
Test ported from [ethereum/tests/GeneralStateTests/VMTests/
vmTests/dup.json](https://github.com/ethereum/tests/blob/
v14.0/GeneralStateTests/VMTests/vmTests/dup.json) by Ori Pomerantz.
"""
env = Environment()
sender = pre.fund_eoa()
post = {}
# Push 0x00 - 0x10 onto the stack
account_code = sum(Op.PUSH1(i) for i in range(0x11))
# Use the DUP opcode
account_code += dup_opcode
# Save each stack value into different keys in storage
account_code += sum(Op.PUSH1(i) + Op.SSTORE for i in range(0x11))
account = pre.deploy_contract(account_code)
tx = Transaction(
ty=0x0,
nonce=0,
to=account,
gas_limit=500000,
gas_price=10,
protected=False if fork in [Frontier, Homestead] else True,
data="",
sender=sender,
)
"""
Storage will be structured as follows:
0x00: 0x10-0x01 (Depending on DUP opcode)
0x01: 0x10
0x02: 0x0F
0x03: 0x0E
0x04: 0x0D
0x05: 0x0C
0x06: 0x0B
0x07: 0x0A
0x08: 0x09
0x09: 0x08
0x0A: 0x07
0x0B: 0x06
0x0C: 0x05
0x0D: 0x04
0x0E: 0x03
0x0F: 0x02
0x10: 0x01
DUP1 copies the first element of the stack (0x10).
DUP16 copies the 16th element of the stack (0x01).
"""
s: Storage.StorageDictType = dict(zip(range(1, 17), range(16, 0, -1), strict=False))
s[0] = 16 - (dup_opcode.int() - 0x80)
post[account] = Account(storage=s)
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/examples/__init__.py | tests/frontier/examples/__init__.py | """Test examples, patterns, templates to use in .py tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/examples/test_block_intermediate_state.py | tests/frontier/examples/test_block_intermediate_state.py | """Test the SELFDESTRUCT opcode."""
import pytest
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Environment,
Transaction,
)
@pytest.mark.valid_from("Frontier")
@pytest.mark.valid_until("Homestead")
def test_block_intermediate_state(blockchain_test: BlockchainTestFiller, pre: Alloc) -> None:
"""Verify intermediate block states."""
env = Environment()
sender = pre.fund_eoa()
tx = Transaction(gas_limit=100_000, to=None, data=b"", sender=sender, protected=False)
tx_2 = Transaction(gas_limit=100_000, to=None, data=b"", sender=sender, protected=False)
block_1 = Block(
txs=[tx],
expected_post_state={
sender: Account(
nonce=1,
),
},
)
block_2 = Block(txs=[])
block_3 = Block(
txs=[tx_2],
expected_post_state={
sender: Account(
nonce=2,
),
},
)
blockchain_test(
genesis_environment=env,
pre=pre,
post=block_3.expected_post_state,
blocks=[block_1, block_2, block_3],
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/create/test_create_suicide_during_init.py | tests/frontier/create/test_create_suicide_during_init.py | """Deploy contract that calls selfdestruct in it's initcode."""
from enum import Enum
import pytest
from ethereum_test_forks import Byzantium, Fork
from ethereum_test_tools import (
Account,
Alloc,
Environment,
Initcode,
StateTestFiller,
Transaction,
compute_create_address,
)
from ethereum_test_tools import Opcodes as Op
class Operation(Enum):
"""Enum for created contract actions."""
SUICIDE = 1
SUICIDE_TO_ITSELF = 2
def __int__(self) -> int:
"""Convert to int."""
return int(self.value)
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stCreateTest/CREATE_ContractSuicideDuringInit_ThenStoreThenReturnFiller.json",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stCreateTest/CREATE_ContractSuicideDuringInit_WithValueFiller.json",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stCreateTest/CREATE_ContractSuicideDuringInit_WithValueToItselfFiller.json",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stCreateTest/CREATE_ContractSuicideDuringInitFiller.json",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1871"],
coverage_missed_reason="Tip to coinbase, original test contains empty account.",
)
@pytest.mark.valid_from("Frontier")
@pytest.mark.with_all_create_opcodes
@pytest.mark.parametrize("transaction_create", [False, True])
@pytest.mark.parametrize(
"operation",
[Operation.SUICIDE, Operation.SUICIDE_TO_ITSELF],
)
def test_create_suicide_during_transaction_create(
state_test: StateTestFiller,
fork: Fork,
pre: Alloc,
create_opcode: Op,
operation: Operation,
transaction_create: bool,
) -> None:
"""Contract init code calls suicide then measures different metrics."""
if create_opcode != Op.CREATE and transaction_create:
pytest.skip(f"Excluded: {create_opcode} with transaction_create=True")
sender = pre.fund_eoa()
contract_deploy = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE)
+ create_opcode(size=Op.CALLDATASIZE(), value=Op.CALLVALUE())
)
contract_success = pre.deploy_contract(code=Op.SSTORE(1, 1))
self_destruct_destination = pre.deploy_contract(code=Op.STOP)
contract_after_suicide = pre.deploy_contract(code=Op.SSTORE(1, 1))
contract_initcode = Initcode(
initcode_prefix=Op.CALL(address=contract_success, gas=Op.SUB(Op.GAS, 100_000))
+ Op.SELFDESTRUCT(
Op.ADDRESS if operation == Operation.SUICIDE_TO_ITSELF else self_destruct_destination
)
+ Op.CALL(address=contract_after_suicide, gas=Op.SUB(Op.GAS, 100_000)),
deploy_code=Op.SSTORE(0, 1),
)
expected_create_address = compute_create_address(
address=sender if transaction_create else contract_deploy,
nonce=1 if transaction_create else 0,
initcode=contract_initcode,
opcode=create_opcode,
)
tx_value = 100
tx = Transaction(
gas_limit=1_000_000,
to=None if transaction_create else contract_deploy,
data=contract_initcode,
value=tx_value,
sender=sender,
protected=fork >= Byzantium,
)
post = {
contract_success: Account(storage={1: 1}),
self_destruct_destination: Account(
balance=0 if operation == Operation.SUICIDE_TO_ITSELF else tx_value
),
contract_deploy: Account(storage={0: 0}),
contract_after_suicide: Account(storage={1: 0}), # suicide eats all gas
expected_create_address: Account.NONEXISTENT,
}
state_test(env=Environment(), pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/create/test_create_one_byte.py | tests/frontier/create/test_create_one_byte.py | """
The test calls CREATE in a loop deploying 1-byte contracts with all possible
byte values, records in storage the values that failed to deploy.
"""
import pytest
from ethereum_test_forks import Byzantium, Fork, London
from ethereum_test_tools import (
Account,
Address,
Alloc,
Bytecode,
Environment,
StateTestFiller,
Storage,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types import compute_create_address
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stCreateTest/CREATE_FirstByte_loopFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1615"],
coverage_missed_reason=(
"coinbase is deleted in original test (tx.gas_price==env.base_fee), "
"opcodes lt, iszero, jump are no longer used"
),
)
@pytest.mark.valid_from("Frontier")
@pytest.mark.with_all_create_opcodes
def test_create_one_byte(
state_test: StateTestFiller,
fork: Fork,
pre: Alloc,
create_opcode: Op,
) -> None:
"""Run create deploys with single bytes for each byte."""
initcode: dict[int, Bytecode] = {}
for byte in range(256):
initcode[byte] = Op.MSTORE8(0, byte) + Op.RETURN(0, 1)
initcode_length = 10
sender = pre.fund_eoa()
expect_post = Storage()
# make a subcontract that deploys code, because deploy 0xef eats ALL gas
create_contract = pre.deploy_contract(
code=Op.MSTORE(0, Op.CALLDATALOAD(0))
+ Op.MSTORE(32, create_opcode(offset=32 - initcode_length, size=initcode_length))
+ Op.RETURN(32, 32)
)
code = pre.deploy_contract(
nonce=1,
code=Op.MSTORE(0, Op.PUSH32(bytes(initcode[0])))
+ sum(
[
Op.MSTORE8(23, opcode) # correct the deploy byte
+ Op.CALL(
gas=50_000,
address=create_contract,
args_size=32,
ret_offset=32,
ret_size=32,
)
+ Op.POP # remove call result from stack for vm trace files
+ Op.SSTORE(
opcode,
Op.MLOAD(32),
)
for opcode, _ in initcode.items()
],
)
+ Op.SSTORE(256, 1),
)
created_accounts: dict[int, Address] = {}
for opcode, opcode_init in initcode.items():
ef_exception = opcode == 239 and fork >= London
created_accounts[opcode] = compute_create_address(
address=create_contract,
salt=0,
nonce=opcode + 1,
initcode=opcode_init,
opcode=create_opcode,
)
if not ef_exception:
expect_post[opcode] = created_accounts[opcode]
expect_post[256] = 1
tx = Transaction(
gas_limit=14_000_000,
to=code,
data=b"",
nonce=0,
sender=sender,
protected=fork >= Byzantium,
)
post = {
code: Account(storage=expect_post),
}
for opcode, _ in initcode.items():
ef_exception = opcode == 239 and fork >= London
if not ef_exception:
post[created_accounts[opcode]] = Account(code=bytes.fromhex(f"{opcode:02x}"))
state_test(env=Environment(), pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/create/test_create_suicide_store.py | tests/frontier/create/test_create_suicide_store.py | """
Test dynamically created address is still callable and perform storage
operations after being called for self destruct in a call.
"""
from enum import IntEnum
import pytest
from ethereum_test_forks import Byzantium, Fork
from ethereum_test_tools import (
Account,
Alloc,
CalldataCase,
Initcode,
StateTestFiller,
Storage,
Switch,
Transaction,
compute_create_address,
)
from ethereum_test_tools import Opcodes as Op
class Operation(IntEnum):
"""Enum for created contract actions."""
SUICIDE = 1
ADD_STORAGE = 2
GET_STORAGE = 3
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stCreateTest/CREATE_AcreateB_BSuicide_BStoreFiller.json",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1867"],
coverage_missed_reason="Converting solidity code result in following opcode not being used:"
"PUSH29, DUP4, DUP8, SWAP2, ISZERO, AND, MUL, DIV, CALLVALUE, EXTCODESIZE."
"Changed 0x11 address to new address (no check for precompile).",
)
@pytest.mark.valid_from("Frontier")
@pytest.mark.with_all_create_opcodes
def test_create_suicide_store(
state_test: StateTestFiller,
fork: Fork,
pre: Alloc,
create_opcode: Op,
) -> None:
"""
Create dynamic contract that suicides, then called to push some storage
and then called to return that storage value.
"""
tload_support = fork.valid_opcodes().count(Op.TLOAD)
subcall_storage = 0x12
suicide_initcode: Initcode = Initcode(
deploy_code=Switch(
cases=[
CalldataCase(
value=Operation.SUICIDE,
action=Op.SELFDESTRUCT(pre.empty_account()),
),
CalldataCase(
value=Operation.ADD_STORAGE,
action=Op.SSTORE(1, Op.ADD(Op.SLOAD(1), subcall_storage))
+ (
Op.TSTORE(1, Op.ADD(Op.TLOAD(1), subcall_storage))
if tload_support
else Op.STOP
),
),
CalldataCase(
value=Operation.GET_STORAGE,
action=(
Op.MSTORE(0, Op.ADD(Op.SLOAD(1), Op.TLOAD(1)))
if tload_support
else Op.MSTORE(0, Op.SLOAD(1))
)
+ Op.RETURN(0, 32),
),
],
default_action=None,
)
)
sender = pre.fund_eoa()
expect_post = Storage()
slot_create_result = 0
slot_after_suicide_sstore_return = 1
slot_program_success = 2
create_contract = pre.deploy_contract(
code=Op.CALLDATACOPY(size=Op.CALLDATASIZE())
+ Op.SSTORE(slot_create_result, create_opcode(size=Op.CALLDATASIZE()))
# Put some storage before suicide
+ Op.MSTORE(64, Operation.ADD_STORAGE)
+ Op.CALL(
gas=Op.SUB(Op.GAS, 300_000),
address=Op.SLOAD(slot_create_result),
args_offset=64,
args_size=32,
)
+ Op.MSTORE(64, Operation.SUICIDE)
+ Op.CALL(
gas=Op.SUB(Op.GAS, 300_000),
address=Op.SLOAD(slot_create_result),
args_offset=64,
args_size=32,
)
# Put some storage after suicide
+ Op.MSTORE(64, Operation.ADD_STORAGE)
+ Op.CALL(
gas=Op.SUB(Op.GAS, 300_000),
address=Op.SLOAD(slot_create_result),
args_offset=64,
args_size=32,
)
+ Op.MSTORE(64, Operation.GET_STORAGE)
+ Op.CALL(
gas=Op.SUB(Op.GAS, 300_000),
address=Op.SLOAD(0),
args_offset=64,
args_size=32,
ret_offset=100,
ret_size=32,
)
+ Op.SSTORE(slot_after_suicide_sstore_return, Op.MLOAD(100))
+ Op.SSTORE(slot_program_success, 1)
)
expected_create_address = compute_create_address(
address=create_contract, nonce=1, initcode=suicide_initcode, opcode=create_opcode
)
expect_post[slot_create_result] = expected_create_address
expect_post[slot_after_suicide_sstore_return] = (
subcall_storage * 2 # added value before and after suicide
+ (subcall_storage * 2 if tload_support else 0) # tload value added
)
expect_post[slot_program_success] = 1
tx = Transaction(
gas_limit=1_000_000,
to=create_contract,
data=suicide_initcode,
sender=sender,
protected=fork >= Byzantium,
)
post = {
create_contract: Account(storage=expect_post),
expected_create_address: Account.NONEXISTENT,
}
state_test(pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/create/__init__.py | tests/frontier/create/__init__.py | """Test examples, patterns, templates to use in .py tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/__init__.py | tests/prague/__init__.py | """
Test cases for EVM functionality introduced in Prague, [EIP-7600: Hardfork Meta
- Pectra](https://eip.directory/eips/eip-7600).
Devnet Specifications:
- [ethpandaops/pectra-devnet-5](https://notes.ethereum.org/@ethpandaops/pectra-devnet-5).
- [ethpandaops/pectra-devnet-4](https://notes.ethereum.org/@ethpandaops/pectra-devnet-4).
- [ethpandaops/pectra-devnet-3](https://notes.ethereum.org/@ethpandaops/pectra-devnet-3).
- [ethpandaops/pectra-devnet-2](https://notes.ethereum.org/@ethpandaops/pectra-devnet-2).
- [ethpandaops/pectra-devnet-1](https://notes.ethereum.org/@ethpandaops/pectra-devnet-1).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py | tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py | """
Tests [EIP-7251: Execution layer triggerable consolidation](https://eips.ethereum.org/EIPS/eip-7251).
"""
from typing import List
import pytest
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Bytecode,
Transaction,
generate_system_contract_error_test,
)
from ethereum_test_tools import Macros as Om
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types import Requests
from .helpers import (
ConsolidationRequest,
ConsolidationRequestTransaction,
)
from .spec import Spec as Spec_EIP7251
from .spec import ref_spec_7251
REFERENCE_SPEC_GIT_PATH: str = ref_spec_7251.git_path
REFERENCE_SPEC_VERSION: str = ref_spec_7251.version
pytestmark: pytest.MarkDecorator = pytest.mark.valid_from("Prague")
def consolidation_list_with_custom_fee(n: int) -> List[ConsolidationRequest]: # noqa: D103
return [
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec_EIP7251.get_fee(10),
)
for i in range(n)
]
@pytest.mark.parametrize(
"requests_list",
[
pytest.param(
[],
id="empty_request_list",
),
pytest.param(
[
*consolidation_list_with_custom_fee(1),
],
id="1_consolidation_request",
),
pytest.param(
[
*consolidation_list_with_custom_fee(2),
],
id="2_consolidation_requests",
),
pytest.param(
[
*consolidation_list_with_custom_fee(3),
],
id="3_consolidation_requests",
),
pytest.param(
[
*consolidation_list_with_custom_fee(4),
],
id="4_consolidation_requests",
),
pytest.param(
[
*consolidation_list_with_custom_fee(5),
],
id="5_consolidation_requests",
),
],
)
@pytest.mark.pre_alloc_group("separate", reason="Deploys custom consolidation contract bytecode")
def test_extra_consolidations(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
requests_list: List[ConsolidationRequest],
) -> None:
"""
Test how clients were to behave with more than 2 consolidations per block.
"""
modified_code: Bytecode = Bytecode()
memory_offset: int = 0
amount_of_requests: int = 0
for consolidation_request in requests_list:
# update memory_offset with the correct value
consolidation_request_bytes_amount: int = len(bytes(consolidation_request))
assert consolidation_request_bytes_amount == 116, (
"Expected consolidation request to be of size 116 but got size "
f"{consolidation_request_bytes_amount}"
)
memory_offset += consolidation_request_bytes_amount
modified_code += Om.MSTORE(bytes(consolidation_request), memory_offset)
amount_of_requests += 1
modified_code += Op.RETURN(0, Op.MSIZE())
pre[Spec_EIP7251.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS] = Account(
code=modified_code,
nonce=1,
balance=0,
)
# given a list of consolidation requests construct a consolidation request
# transaction
consolidation_request_transaction = ConsolidationRequestTransaction(requests=requests_list)
# prepare consolidation senders
consolidation_request_transaction.update_pre(pre=pre)
# get transaction list
txs: List[Transaction] = consolidation_request_transaction.transactions()
blockchain_test(
pre=pre,
blocks=[
Block(
txs=txs,
requests_hash=Requests(*requests_list),
),
],
post={},
)
@pytest.mark.parametrize(
"system_contract", [Address(Spec_EIP7251.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS)]
)
@pytest.mark.pre_alloc_group("separate", reason="Deploys custom consolidation contract bytecode")
@generate_system_contract_error_test( # type: ignore[arg-type]
max_gas_limit=Spec_EIP7251.SYSTEM_CALL_GAS_LIMIT,
)
def test_system_contract_errors() -> None:
"""
Test system contract raising different errors when called by the system
account at the end of the block execution.
To see the list of generated tests, please refer to the
`generate_system_contract_error_test` decorator definition.
"""
pass
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/spec.py | tests/prague/eip7251_consolidations/spec.py | """Defines EIP-7251 specification constants and functions."""
from dataclasses import dataclass
from ethereum_test_tools import Address
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7251 = ReferenceSpec("EIPS/eip-7251.md", "f29c0eda1e7495c071ef5b25fbd850dc3ef6bfdf")
# Constants
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-7251 specifications as defined at
https://eips.ethereum.org/EIPS/eip-7251#execution-layer.
"""
CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS = 0x0000BBDDC7CE488642FB579F8B00F3A590007251
CONSOLIDATION_REQUEST_PREDEPLOY_SENDER = Address(0x13D1913D623E6A9D8811736359E50FD31FE54FCA)
SYSTEM_ADDRESS = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE
SYSTEM_CALL_GAS_LIMIT = 30_000_000
EXCESS_CONSOLIDATION_REQUESTS_STORAGE_SLOT = 0
CONSOLIDATION_REQUEST_COUNT_STORAGE_SLOT = 1
CONSOLIDATION_REQUEST_QUEUE_HEAD_STORAGE_SLOT = (
2 # Pointer to head of the consolidation request message queue
)
CONSOLIDATION_REQUEST_QUEUE_TAIL_STORAGE_SLOT = (
3 # Pointer to the tail of the consolidation request message queue
)
CONSOLIDATION_REQUEST_QUEUE_STORAGE_OFFSET = (
4 # The start memory slot of the in-state consolidation request message queue
)
MAX_CONSOLIDATION_REQUESTS_PER_BLOCK = (
2 # Maximum number of consolidation requests that can be de-queued into a block
)
TARGET_CONSOLIDATION_REQUESTS_PER_BLOCK = 1
MIN_CONSOLIDATION_REQUEST_FEE = 1
CONSOLIDATION_REQUEST_FEE_UPDATE_FRACTION = 17
EXCESS_INHIBITOR = 1181
@staticmethod
def fake_exponential(factor: int, numerator: int, denominator: int) -> int:
"""Calculate the consolidation request fee."""
i = 1
output = 0
numerator_accumulator = factor * denominator
while numerator_accumulator > 0:
output += numerator_accumulator
numerator_accumulator = (numerator_accumulator * numerator) // (denominator * i)
i += 1
return output // denominator
@staticmethod
def get_fee(excess_consolidation_requests: int) -> int:
"""Calculate the fee for the excess consolidation requests."""
return Spec.fake_exponential(
Spec.MIN_CONSOLIDATION_REQUEST_FEE,
excess_consolidation_requests,
Spec.CONSOLIDATION_REQUEST_FEE_UPDATE_FRACTION,
)
@staticmethod
def get_excess_consolidation_requests(previous_excess: int, count: int) -> int:
"""Calculate the new excess consolidation requests."""
if previous_excess + count > Spec.TARGET_CONSOLIDATION_REQUESTS_PER_BLOCK:
return previous_excess + count - Spec.TARGET_CONSOLIDATION_REQUESTS_PER_BLOCK
return 0
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/test_consolidations.py | tests/prague/eip7251_consolidations/test_consolidations.py | """
Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
"""
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Address,
Alloc,
Block,
BlockchainTestFiller,
BlockException,
Environment,
Header,
Macros,
Requests,
TestAddress,
TestAddress2,
)
from ethereum_test_tools import Opcodes as Op
from .helpers import (
ConsolidationRequest,
ConsolidationRequestContract,
ConsolidationRequestInteractionBase,
ConsolidationRequestTransaction,
get_n_fee_increment_blocks,
)
from .spec import Spec, ref_spec_7251
REFERENCE_SPEC_GIT_PATH = ref_spec_7251.git_path
REFERENCE_SPEC_VERSION = ref_spec_7251.version
pytestmark = pytest.mark.valid_from("Prague")
@pytest.mark.parametrize(
"blocks_consolidation_requests",
[
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
)
],
),
],
],
id="single_block_single_consolidation_request_from_eoa",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x01,
fee=Spec.get_fee(0),
)
],
),
],
],
id="single_block_single_consolidation_request_from_eoa_equal_pubkeys",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=-1,
target_pubkey=-2,
fee=Spec.get_fee(0),
)
],
),
],
],
id="single_block_single_consolidation_request_from_eoa_max_pubkeys",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=0,
valid=False,
)
],
),
],
],
id="single_block_single_consolidation_request_from_eoa_insufficient_fee",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
calldata_modifier=lambda x: x[:-1],
valid=False,
)
],
),
],
],
id="single_block_single_consolidation_request_from_eoa_input_too_short",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
calldata_modifier=lambda x: x + b"\x00",
valid=False,
)
],
),
],
],
id="single_block_single_consolidation_request_from_eoa_input_too_long",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
fee=Spec.get_fee(0),
),
],
),
],
],
id="single_block_multiple_consolidation_request_from_same_eoa",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
)
],
),
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
fee=Spec.get_fee(0),
)
],
),
],
],
id="single_block_multiple_consolidation_request_from_different_eoa",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK)
],
)
],
],
id="single_block_max_consolidation_requests_from_eoa",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=0,
),
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
fee=Spec.get_fee(0),
),
]
),
],
],
id="single_block_multiple_consolidation_request_first_reverts",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
fee=0,
),
]
),
],
],
id="single_block_multiple_consolidation_request_last_reverts",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
gas_limit=136_534 - 1,
valid=False,
),
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
fee=Spec.get_fee(0),
),
]
),
],
],
id="single_block_multiple_consolidation_request_first_oog",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
fee=Spec.get_fee(0),
gas_limit=102_334 - 1,
valid=False,
),
]
),
],
],
id="single_block_multiple_consolidation_request_last_oog",
),
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
]
)
],
],
id="multiple_block_above_max_consolidation_requests_from_eoa",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
]
),
],
],
id="single_block_single_consolidation_request_from_contract",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
],
),
],
],
id="single_block_multiple_consolidation_requests_from_contract",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
],
call_depth=3,
),
],
],
id="single_block_multiple_consolidation_requests_from_contract_call_depth_3",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
gas_limit=6_000_000,
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
],
call_depth=100,
),
],
],
id="single_block_multiple_consolidation_requests_from_contract_call_depth_high",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x00,
target_pubkey=0x01,
fee=0,
)
]
+ [
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
)
for i in range(1, Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
],
),
],
],
id="single_block_multiple_consolidation_requests_from_contract_first_reverts",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
]
+ [
ConsolidationRequest(
source_pubkey=-1,
target_pubkey=-2,
fee=0,
)
],
),
],
],
id="single_block_multiple_consolidation_requests_from_contract_last_reverts",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=-1,
target_pubkey=-2,
gas_limit=100,
fee=Spec.get_fee(0),
valid=False,
)
]
+ [
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
gas_limit=1_000_000,
fee=Spec.get_fee(0),
valid=True,
)
for i in range(1, Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
],
),
],
],
id="single_block_multiple_consolidation_requests_from_contract_first_oog",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
gas_limit=1_000_000,
valid=True,
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
]
+ [
ConsolidationRequest(
source_pubkey=-1,
target_pubkey=-2,
gas_limit=100,
fee=Spec.get_fee(0),
valid=False,
)
],
),
],
],
id="single_block_multiple_consolidation_requests_from_contract_last_oog",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
valid=False,
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
],
extra_code=Op.REVERT(0, 0),
),
],
],
id="single_block_multiple_consolidation_requests_from_contract_caller_reverts",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=Spec.get_fee(0),
valid=False,
)
for i in range(Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK * 5)
],
extra_code=Macros.OOG(),
),
],
],
id="single_block_multiple_consolidation_requests_from_contract_caller_oog",
),
pytest.param(
# Test the first 50 fee increments
get_n_fee_increment_blocks(50),
id="multiple_block_fee_increments",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.DELEGATECALL,
),
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.STATICCALL,
),
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.CALLCODE,
),
],
],
id="single_block_single_consolidation_request_delegatecall_staticcall_callcode",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.DELEGATECALL,
call_depth=3,
),
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.STATICCALL,
call_depth=3,
),
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.CALLCODE,
call_depth=3,
),
],
],
id="single_block_single_consolidation_request_delegatecall_staticcall_callcode_call_depth_3",
),
pytest.param(
[
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.DELEGATECALL,
call_depth=1024,
),
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.STATICCALL,
call_depth=1024,
),
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.CALLCODE,
call_depth=1024,
),
],
],
id="single_block_single_consolidation_request_delegatecall_staticcall_callcode_call_depth_high",
),
],
)
@pytest.mark.pre_alloc_group(
"consolidation_requests", reason="Tests standard consolidation request functionality"
)
def test_consolidation_requests(
blockchain_test: BlockchainTestFiller,
blocks: List[Block],
pre: Alloc,
) -> None:
"""Test making a consolidation request to the beacon chain."""
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=blocks,
)
@pytest.mark.parametrize(
"requests,block_body_override_requests,exception",
[
pytest.param(
[],
[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
source_address=Address(0),
),
],
BlockException.INVALID_REQUESTS,
id="no_consolidations_non_empty_requests_list",
),
pytest.param(
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
]
),
],
[],
BlockException.INVALID_REQUESTS,
id="single_consolidation_request_empty_requests_list",
),
pytest.param(
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
]
),
],
[
ConsolidationRequest(
source_pubkey=0x00,
target_pubkey=0x02,
source_address=TestAddress,
)
],
BlockException.INVALID_REQUESTS,
id="single_consolidation_request_source_public_key_mismatch",
),
pytest.param(
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
]
),
],
[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x00,
source_address=TestAddress,
)
],
BlockException.INVALID_REQUESTS,
id="single_consolidation_request_target_public_key_mismatch",
),
pytest.param(
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
]
),
],
[
ConsolidationRequest(
source_pubkey=0x02,
target_pubkey=0x01,
source_address=TestAddress,
)
],
BlockException.INVALID_REQUESTS,
id="single_consolidation_request_pubkeys_swapped",
),
pytest.param(
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
)
],
),
],
[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
source_address=TestAddress2,
)
],
BlockException.INVALID_REQUESTS,
id="single_consolidation_request_source_address_mismatch",
),
pytest.param(
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
),
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
fee=Spec.get_fee(0),
),
],
),
],
[
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
source_address=TestAddress,
),
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
source_address=TestAddress,
),
],
BlockException.INVALID_REQUESTS,
id="two_consolidation_requests_out_of_order",
),
pytest.param(
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
)
],
),
],
[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
source_address=TestAddress,
),
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
source_address=TestAddress,
),
],
BlockException.INVALID_REQUESTS,
id="single_consolidation_requests_duplicate_in_requests_list",
),
],
)
@pytest.mark.exception_test
@pytest.mark.pre_alloc_group(
"consolidation_requests", reason="Tests standard consolidation request functionality"
)
def test_consolidation_requests_negative(
pre: Alloc,
fork: Fork,
blockchain_test: BlockchainTestFiller,
requests: List[ConsolidationRequestInteractionBase],
block_body_override_requests: List[ConsolidationRequest],
exception: BlockException,
) -> None:
"""
Test blocks where the requests list and the actual consolidation requests
that happened in the block's transactions do not match.
"""
for d in requests:
d.update_pre(pre)
# No previous block so fee is the base
fee = 1
current_block_requests = []
for w in requests:
current_block_requests += w.valid_requests(fee)
included_requests = current_block_requests[: Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK]
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=[
Block(
txs=sum((r.transactions() for r in requests), []),
header_verify=Header(
requests_hash=Requests(*included_requests),
),
requests=(
Requests(*block_body_override_requests).requests_list
if block_body_override_requests is not None
else None
),
exception=exception,
)
],
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/test_consolidations_during_fork.py | tests/prague/eip7251_consolidations/test_consolidations_during_fork.py | """
Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
"""
from os.path import realpath
from pathlib import Path
from typing import List
import pytest
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Environment,
Transaction,
)
from .helpers import ConsolidationRequest, ConsolidationRequestTransaction
from .spec import Spec, ref_spec_7251
REFERENCE_SPEC_GIT_PATH = ref_spec_7251.git_path
REFERENCE_SPEC_VERSION = ref_spec_7251.version
pytestmark = pytest.mark.valid_at_transition_to("Prague")
BLOCKS_BEFORE_FORK = 2
@pytest.mark.parametrize(
"blocks_consolidation_requests",
[
pytest.param(
[
[], # No consolidation requests, but we deploy the contract
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(10),
# Pre-fork consolidation request
valid=False,
)
],
),
],
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x03,
target_pubkey=0x04,
fee=Spec.get_fee(10),
# First post-fork consolidation request, will
# not be included because the inhibitor is
# cleared at the end of the block
valid=False,
)
],
),
],
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x05,
target_pubkey=0x06,
fee=Spec.get_fee(0),
# First consolidation that is valid
valid=True,
)
],
),
],
],
id="one_valid_request_second_block_after_fork",
),
],
)
@pytest.mark.parametrize("timestamp", [15_000 - BLOCKS_BEFORE_FORK], ids=[""])
@pytest.mark.pre_alloc_group(
"separate", reason="Deploys consolidation system contract at fork transition"
)
def test_consolidation_requests_during_fork(
blockchain_test: BlockchainTestFiller,
blocks: List[Block],
pre: Alloc,
) -> None:
"""
Test making a consolidation request to the beacon chain at the time of the
fork.
"""
# We need to delete the deployed contract that comes by default in the pre
# state.
pre[Spec.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS] = Account(
balance=0,
code=bytes(),
nonce=0,
storage={},
)
with open(Path(realpath(__file__)).parent / "contract_deploy_tx.json", mode="r") as f:
deploy_tx = Transaction.model_validate_json(f.read()).with_signature_and_sender()
deployer_address = deploy_tx.sender
assert deployer_address is not None
assert Address(deployer_address) == Spec.CONSOLIDATION_REQUEST_PREDEPLOY_SENDER
tx_gas_price = deploy_tx.gas_price
assert tx_gas_price is not None
deployer_required_balance = deploy_tx.gas_limit * tx_gas_price
pre.fund_address(Spec.CONSOLIDATION_REQUEST_PREDEPLOY_SENDER, deployer_required_balance)
# Append the deployment transaction to the first block
blocks[0].txs.append(deploy_tx)
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=blocks,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/conftest.py | tests/prague/eip7251_consolidations/conftest.py | """Fixtures for the EIP-7251 consolidations tests."""
from itertools import zip_longest
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import Alloc, Block, Header, Requests
from .helpers import ConsolidationRequest, ConsolidationRequestInteractionBase
from .spec import Spec
@pytest.fixture
def update_pre(
pre: Alloc,
blocks_consolidation_requests: List[List[ConsolidationRequestInteractionBase]],
) -> None:
"""
Init state of the accounts. Every deposit transaction defines their own
pre-state requirements, and this fixture aggregates them all.
"""
for requests in blocks_consolidation_requests:
for r in requests:
r.update_pre(pre)
@pytest.fixture
def included_requests(
update_pre: None, # Fixture is used for its side effects
blocks_consolidation_requests: List[List[ConsolidationRequestInteractionBase]],
) -> List[List[ConsolidationRequest]]:
"""
Return the list of consolidation requests that should be included in each
block.
"""
excess_consolidation_requests = 0
carry_over_requests: List[ConsolidationRequest] = []
per_block_included_requests: List[List[ConsolidationRequest]] = []
for block_consolidation_requests in blocks_consolidation_requests:
# Get fee for the current block
current_minimum_fee = Spec.get_fee(excess_consolidation_requests)
# With the fee, get the valid consolidation requests for the current
# block
current_block_requests = []
for w in block_consolidation_requests:
current_block_requests += w.valid_requests(current_minimum_fee)
# Get the consolidation requests that should be included in the block
pending_requests = carry_over_requests + current_block_requests
per_block_included_requests.append(
pending_requests[: Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK]
)
carry_over_requests = pending_requests[Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK :]
# Update the excess consolidation requests
excess_consolidation_requests = Spec.get_excess_consolidation_requests(
excess_consolidation_requests,
len(current_block_requests),
)
while carry_over_requests:
# Keep adding blocks until all consolidation requests are included
per_block_included_requests.append(
carry_over_requests[: Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK]
)
carry_over_requests = carry_over_requests[Spec.MAX_CONSOLIDATION_REQUESTS_PER_BLOCK :]
return per_block_included_requests
@pytest.fixture
def timestamp() -> int:
"""Return the timestamp for the first block."""
return 1
@pytest.fixture
def blocks(
fork: Fork,
update_pre: None, # Fixture is used for its side effects
blocks_consolidation_requests: List[List[ConsolidationRequestInteractionBase]],
included_requests: List[List[ConsolidationRequest]],
timestamp: int,
) -> List[Block]:
"""Return the list of blocks that should be included in the test."""
blocks: List[Block] = []
for block_requests, block_included_requests in zip_longest( # type: ignore
blocks_consolidation_requests,
included_requests,
fillvalue=[],
):
header_verify: Header | None = None
if fork.header_requests_required(
block_number=len(blocks) + 1,
timestamp=timestamp,
):
header_verify = Header(requests_hash=Requests(*block_included_requests))
else:
assert not block_included_requests
blocks.append(
Block(
txs=sum((r.transactions() for r in block_requests), []),
header_verify=header_verify,
timestamp=timestamp,
)
)
timestamp += 1
return blocks + [
Block(
header_verify=Header(requests_hash=Requests()),
timestamp=timestamp,
)
] # Add an empty block at the end to verify that no more consolidation
# requests are included
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/helpers.py | tests/prague/eip7251_consolidations/helpers.py | """Helpers for the EIP-7251 consolidation tests."""
from dataclasses import dataclass, field
from functools import cached_property
from itertools import count
from typing import Callable, ClassVar, List
from ethereum_test_tools import EOA, Address, Alloc, Bytecode, Transaction
from ethereum_test_tools import ConsolidationRequest as ConsolidationRequestBase
from ethereum_test_tools import Opcodes as Op
from .spec import Spec
class ConsolidationRequest(ConsolidationRequestBase):
"""Class used to describe a consolidation request in a test."""
fee: int = 0
"""Fee to be paid to the system contract for the consolidation request."""
valid: bool = True
"""Whether the consolidation request is valid or not."""
gas_limit: int = 1_000_000
"""Gas limit for the call."""
calldata_modifier: Callable[[bytes], bytes] = lambda x: x
"""Calldata modifier function."""
interaction_contract_address: ClassVar[Address] = Address(
Spec.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS
)
@property
def value(self) -> int:
"""
Return the value of the call to the consolidation request contract,
equal to the fee to be paid.
"""
return self.fee
@cached_property
def calldata(self) -> bytes:
"""
Return the calldata needed to call the consolidation request contract
and make the consolidation.
"""
return self.calldata_modifier(self.source_pubkey + self.target_pubkey)
def with_source_address(self, source_address: Address) -> "ConsolidationRequest":
"""
Return a new instance of the consolidation request with the source
address set.
"""
return self.copy(source_address=source_address)
@dataclass(kw_only=True)
class ConsolidationRequestInteractionBase:
"""
Base class for all types of consolidation transactions we want to test.
"""
sender_balance: int = 1_000_000_000_000_000_000
"""Balance of the account that sends the transaction."""
sender_account: EOA | None = None
"""Account that will send the transaction."""
requests: List[ConsolidationRequest]
"""Consolidation requests to be included in the block."""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the consolidation request."""
raise NotImplementedError
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
raise NotImplementedError
def valid_requests(self, current_minimum_fee: int) -> List[ConsolidationRequest]:
"""
Return the list of consolidation requests that should be valid in the
block.
"""
raise NotImplementedError
@dataclass(kw_only=True)
class ConsolidationRequestTransaction(ConsolidationRequestInteractionBase):
"""
Class to describe a consolidation request originated from an externally
owned account.
"""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the consolidation request."""
assert self.sender_account is not None, "Sender account not initialized"
return [
Transaction(
gas_limit=request.gas_limit,
gas_price=1_000_000_000,
to=request.interaction_contract_address,
value=request.value,
data=request.calldata,
sender=self.sender_account,
)
for request in self.requests
]
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
self.sender_account = pre.fund_eoa(self.sender_balance)
def valid_requests(self, current_minimum_fee: int) -> List[ConsolidationRequest]:
"""Return the list of consolidation requests that are valid."""
assert self.sender_account is not None, "Sender account not initialized"
return [
request.with_source_address(self.sender_account)
for request in self.requests
if request.valid and request.fee >= current_minimum_fee
]
@dataclass(kw_only=True)
class ConsolidationRequestContract(ConsolidationRequestInteractionBase):
"""Class used to describe a consolidation originated from a contract."""
tx_gas_limit: int = 10_000_000
"""Gas limit for the transaction."""
contract_balance: int = 1_000_000_000_000_000_000
"""
Balance of the contract that will make the call to the pre-deploy contract.
"""
contract_address: Address | None = None
"""
Address of the contract that will make the call to the pre-deploy contract.
"""
entry_address: Address | None = None
"""Address to send the transaction to."""
call_type: Op = field(default_factory=lambda: Op.CALL)
"""Type of call to be used to make the consolidation request."""
call_depth: int = 2
"""Frame depth of the pre-deploy contract when it executes the call."""
extra_code: Bytecode = field(default_factory=Bytecode)
"""Extra code to be added to the contract code."""
@property
def contract_code(self) -> Bytecode:
"""Contract code used by the relay contract."""
code = Bytecode()
current_offset = 0
for r in self.requests:
value_arg = [r.value] if self.call_type in (Op.CALL, Op.CALLCODE) else []
code += Op.CALLDATACOPY(0, current_offset, len(r.calldata)) + Op.POP(
self.call_type(
Op.GAS if r.gas_limit == -1 else r.gas_limit,
r.interaction_contract_address,
*value_arg,
0,
len(r.calldata),
0,
0,
)
)
current_offset += len(r.calldata)
return code + self.extra_code
def transactions(self) -> List[Transaction]:
"""Return a transaction for the consolidation request."""
assert self.entry_address is not None, "Entry address not initialized"
return [
Transaction(
gas_limit=self.tx_gas_limit,
gas_price=1_000_000_000,
to=self.entry_address,
value=0,
data=b"".join(r.calldata for r in self.requests),
sender=self.sender_account,
)
]
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
self.sender_account = pre.fund_eoa(self.sender_balance)
self.contract_address = pre.deploy_contract(
code=self.contract_code, balance=self.contract_balance
)
self.entry_address = self.contract_address
if self.call_depth > 2:
for _ in range(1, self.call_depth - 1):
self.entry_address = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE)
+ Op.POP(
Op.CALL(
Op.GAS,
self.entry_address,
0,
0,
Op.CALLDATASIZE,
0,
0,
)
)
)
def valid_requests(self, current_minimum_fee: int) -> List[ConsolidationRequest]:
"""Return the list of consolidation requests that are valid."""
assert self.contract_address is not None, "Contract address not initialized"
return [
r.with_source_address(self.contract_address)
for r in self.requests
if r.valid and r.value >= current_minimum_fee
]
def get_n_fee_increments(n: int) -> List[int]:
"""Get the first N excess consolidation requests that increase the fee."""
excess_consolidation_requests_counts = []
last_fee = 1
for i in count(0):
if Spec.get_fee(i) > last_fee:
excess_consolidation_requests_counts.append(i)
last_fee = Spec.get_fee(i)
if len(excess_consolidation_requests_counts) == n:
break
return excess_consolidation_requests_counts
def get_n_fee_increment_blocks(n: int) -> List[List[ConsolidationRequestContract]]:
"""
Return N blocks that should be included in the test such that each
subsequent block has an increasing fee for the consolidation requests.
This is done by calculating the number of consolidations required to reach
the next fee increment and creating a block with that number of
consolidation requests plus the number of consolidations required to reach
the target.
"""
blocks = []
previous_excess = 0
consolidation_index = 0
previous_fee = 0
for required_excess_consolidations in get_n_fee_increments(n):
consolidations_required = (
required_excess_consolidations
+ Spec.TARGET_CONSOLIDATION_REQUESTS_PER_BLOCK
- previous_excess
)
fee = Spec.get_fee(previous_excess)
assert fee > previous_fee
blocks.append(
[
ConsolidationRequestContract(
requests=[
ConsolidationRequest(
source_pubkey=i * 2,
target_pubkey=i * 2 + 1,
fee=fee,
)
for i in range(
consolidation_index, consolidation_index + consolidations_required
)
],
)
],
)
previous_fee = fee
consolidation_index += consolidations_required
previous_excess = required_excess_consolidations
return blocks
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/__init__.py | tests/prague/eip7251_consolidations/__init__.py | """Cross-client EIP-7251 Tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/test_contract_deployment.py | tests/prague/eip7251_consolidations/test_contract_deployment.py | """
Tests [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
"""
from os.path import realpath
from pathlib import Path
from typing import Any, Generator
import pytest
from ethereum_test_forks import Fork, Prague
from ethereum_test_tools import (
Address,
Alloc,
Block,
Header,
Requests,
Transaction,
generate_system_contract_deploy_test,
)
from .helpers import ConsolidationRequest
from .spec import Spec, ref_spec_7251
REFERENCE_SPEC_GIT_PATH = ref_spec_7251.git_path
REFERENCE_SPEC_VERSION = ref_spec_7251.version
@pytest.mark.pre_alloc_group(
"separate", reason="Deploys consolidation system contract at hardcoded predeploy address"
)
@generate_system_contract_deploy_test(
fork=Prague,
tx_json_path=Path(realpath(__file__)).parent / "contract_deploy_tx.json",
expected_deploy_address=Address(Spec.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS),
fail_on_empty_code=True,
)
def test_system_contract_deployment(
*,
fork: Fork,
pre: Alloc,
**kwargs: Any,
) -> Generator[Block, None, None]:
"""Verify calling the consolidation system contract after deployment."""
sender = pre.fund_eoa()
consolidation_request = ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
source_address=sender,
)
pre.fund_address(sender, consolidation_request.value)
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
test_transaction_gas = intrinsic_gas_calculator(calldata=consolidation_request.calldata)
test_transaction = Transaction(
data=consolidation_request.calldata,
gas_limit=test_transaction_gas * 10,
to=Spec.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS,
sender=sender,
value=consolidation_request.value,
)
yield Block(
txs=[test_transaction],
header=Header(
requests_hash=Requests(consolidation_request),
),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7251_consolidations/test_eip_mainnet.py | tests/prague/eip7251_consolidations/test_eip_mainnet.py | """
abstract: Crafted tests for mainnet of [EIP-7251: Increase the MAX_EFFECTIVE_BALANCE](https://eips.ethereum.org/EIPS/eip-7251).
""" # noqa: E501
from typing import List
import pytest
from ethereum_test_tools import (
Alloc,
Block,
BlockchainTestFiller,
)
from .helpers import ConsolidationRequest, ConsolidationRequestTransaction
from .spec import Spec, ref_spec_7251
REFERENCE_SPEC_GIT_PATH = ref_spec_7251.git_path
REFERENCE_SPEC_VERSION = ref_spec_7251.version
pytestmark = [pytest.mark.valid_at("Prague"), pytest.mark.mainnet]
@pytest.mark.parametrize(
"blocks_consolidation_requests",
[
pytest.param(
[
[
ConsolidationRequestTransaction(
requests=[
ConsolidationRequest(
source_pubkey=0x01,
target_pubkey=0x02,
fee=Spec.get_fee(0),
)
],
),
],
],
id="single_consolidation_request",
),
],
)
def test_eip_7251(
blockchain_test: BlockchainTestFiller,
blocks: List[Block],
pre: Alloc,
) -> None:
"""Test making a consolidation request."""
blockchain_test(
pre=pre,
post={},
blocks=blocks,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7685_general_purpose_el_requests/spec.py | tests/prague/eip7685_general_purpose_el_requests/spec.py | """
Common procedures to test
[EIP-7685: General purpose execution
layer requests](https://eips.ethereum.org/EIPS/eip-7685).
"""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7685 = ReferenceSpec("EIPS/eip-7685.md", "67ecb425d78f1d40c4f1cb957f3214afd0ece945")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7685_general_purpose_el_requests/conftest.py | tests/prague/eip7685_general_purpose_el_requests/conftest.py | """Fixtures for the EIP-7685 deposit tests."""
from typing import List, SupportsBytes
import pytest
from ethereum_test_tools import (
Alloc,
Block,
BlockException,
Bytes,
EngineAPIError,
Header,
Requests,
)
from ..eip6110_deposits.helpers import DepositInteractionBase, DepositRequest
from ..eip7002_el_triggerable_withdrawals.helpers import (
WithdrawalRequest,
WithdrawalRequestInteractionBase,
)
from ..eip7251_consolidations.helpers import (
ConsolidationRequest,
ConsolidationRequestInteractionBase,
)
@pytest.fixture
def block_body_override_requests(
request: pytest.FixtureRequest,
) -> List[DepositRequest | WithdrawalRequest | ConsolidationRequest] | None:
"""
List of requests that overwrite the requests in the header. None by
default.
"""
if hasattr(request, "param"):
return request.param
return None
@pytest.fixture
def correct_requests_hash_in_header() -> bool:
"""
Whether to include the correct requests hash in the header so the
calculated block hash is correct, even though the requests in the new
payload parameters might be wrong.
"""
return False
@pytest.fixture
def exception() -> BlockException | None:
"""Block exception expected by the tests. None by default."""
return None
@pytest.fixture
def engine_api_error_code(
block_body_override_requests: List[Bytes | SupportsBytes] | None,
) -> EngineAPIError | None:
"""Engine API error code if any."""
if block_body_override_requests is None:
return None
block_body_override_requests_bytes = [bytes(r) for r in block_body_override_requests]
if any(len(r) <= 1 for r in block_body_override_requests_bytes):
return EngineAPIError.InvalidParams
def is_monotonically_increasing(requests: List[bytes]) -> bool:
return all(x[0] < y[0] for x, y in zip(requests, requests[1:], strict=False))
if not is_monotonically_increasing(block_body_override_requests_bytes):
return EngineAPIError.InvalidParams
return None
@pytest.fixture
def blocks(
pre: Alloc,
requests: List[
DepositInteractionBase
| WithdrawalRequestInteractionBase
| ConsolidationRequestInteractionBase
],
block_body_override_requests: List[Bytes | SupportsBytes] | None,
correct_requests_hash_in_header: bool,
exception: BlockException | None,
engine_api_error_code: EngineAPIError | None,
) -> List[Block]:
"""List of blocks that comprise the test."""
valid_requests_list: List[DepositRequest | WithdrawalRequest | ConsolidationRequest] = []
# Single block therefore base fee
withdrawal_request_fee = 1
consolidation_request_fee = 1
for r in requests:
r.update_pre(pre)
if isinstance(r, DepositInteractionBase):
valid_requests_list += r.valid_requests(10**18)
elif isinstance(r, WithdrawalRequestInteractionBase):
valid_requests_list += r.valid_requests(withdrawal_request_fee)
elif isinstance(r, ConsolidationRequestInteractionBase):
valid_requests_list += r.valid_requests(consolidation_request_fee)
valid_requests = Requests(*valid_requests_list)
rlp_modifier: Header | None = None
if correct_requests_hash_in_header:
rlp_modifier = Header(
requests_hash=valid_requests,
)
return [
Block(
txs=sum((r.transactions() for r in requests), []),
header_verify=Header(requests_hash=valid_requests),
requests=block_body_override_requests,
exception=exception,
rlp_modifier=rlp_modifier,
engine_api_error_code=engine_api_error_code,
)
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7685_general_purpose_el_requests/test_multi_type_requests.py | tests/prague/eip7685_general_purpose_el_requests/test_multi_type_requests.py | """
Tests EIP-7685 General purpose execution layer requests.
Cross testing for withdrawal and deposit request for
[EIP-7685: General purpose execution layer requests](https://eips.ethereum.org/EIPS/eip-7685).
"""
from itertools import permutations
from typing import Callable, Dict, Generator, List, Tuple
import pytest
from ethereum_test_base_types.base_types import Address
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
BlockException,
Bytecode,
Bytes,
Environment,
Header,
Requests,
Storage,
TestAddress,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
from ethereum_test_tools.utility.pytest import ParameterSet
from ethereum_test_types import EOA
from ..eip6110_deposits.helpers import DepositContract, DepositRequest, DepositTransaction
from ..eip6110_deposits.spec import Spec as Spec_EIP6110
from ..eip7002_el_triggerable_withdrawals.helpers import (
WithdrawalRequest,
WithdrawalRequestContract,
WithdrawalRequestTransaction,
)
from ..eip7002_el_triggerable_withdrawals.spec import Spec as Spec_EIP7002
from ..eip7251_consolidations.helpers import (
ConsolidationRequest,
ConsolidationRequestContract,
ConsolidationRequestTransaction,
)
from ..eip7251_consolidations.spec import Spec as Spec_EIP7251
from .spec import ref_spec_7685
REFERENCE_SPEC_GIT_PATH: str = ref_spec_7685.git_path
REFERENCE_SPEC_VERSION: str = ref_spec_7685.version
pytestmark: pytest.MarkDecorator = pytest.mark.valid_from("Prague")
def single_deposit(i: int) -> DepositRequest: # noqa: D103
return DepositRequest(
pubkey=(i * 3),
withdrawal_credentials=(i * 3) + 1,
amount=32_000_000_000,
signature=(i * 3) + 2,
index=i,
)
def single_deposit_from_eoa(i: int) -> DepositTransaction: # noqa: D103
return DepositTransaction(requests=[single_deposit(i)])
def single_deposit_from_contract(i: int) -> DepositContract: # noqa: D103
return DepositContract(requests=[single_deposit(i)])
def single_withdrawal(i: int) -> WithdrawalRequest: # noqa: D103
return WithdrawalRequest(
validator_pubkey=i + 1,
amount=0,
fee=1,
)
def single_withdrawal_from_eoa(i: int) -> WithdrawalRequestTransaction: # noqa: D103
return WithdrawalRequestTransaction(requests=[single_withdrawal(i)])
def single_withdrawal_from_contract(i: int) -> WithdrawalRequestContract: # noqa: D103
return WithdrawalRequestContract(requests=[single_withdrawal(i)])
def single_consolidation(i: int) -> ConsolidationRequest: # noqa: D103
return ConsolidationRequest(
source_pubkey=(i * 2),
target_pubkey=(i * 2) + 1,
fee=1,
)
def single_consolidation_from_eoa(i: int) -> ConsolidationRequestTransaction: # noqa: D103
return ConsolidationRequestTransaction(requests=[single_consolidation(i)])
def single_consolidation_from_contract(i: int) -> ConsolidationRequestContract: # noqa: D103
return ConsolidationRequestContract(requests=[single_consolidation(i)])
def get_permutations(n: int = 3) -> Generator[ParameterSet, None, None]:
"""Return possible permutations of the requests from an EOA."""
requests: list = [
(
"deposit",
single_deposit(0),
),
(
"withdrawal",
single_withdrawal(0),
),
(
"consolidation",
single_consolidation(0),
),
]
for perm in permutations(requests, n):
yield pytest.param([p[1] for p in perm], id="+".join([p[0] for p in perm]))
def get_eoa_permutations(n: int = 3) -> Generator[ParameterSet, None, None]:
"""Return possible permutations of the requests from an EOA."""
requests: list = [
(
"deposit_from_eoa",
single_deposit_from_eoa(0),
),
(
"withdrawal_from_eoa",
single_withdrawal_from_eoa(0),
),
(
"consolidation_from_eoa",
single_consolidation_from_eoa(0),
),
]
for perm in permutations(requests, n):
yield pytest.param([p[1] for p in perm], id="+".join([p[0] for p in perm]))
def get_contract_permutations(n: int = 3) -> Generator[ParameterSet, None, None]:
"""Return possible permutations of the requests from a contract."""
requests: list = [
(
"deposit_from_contract",
single_deposit_from_contract(0),
),
(
"withdrawal_from_contract",
single_withdrawal_from_contract(0),
),
(
"consolidation_from_contract",
single_consolidation_from_contract(0),
),
]
for perm in permutations(requests, n):
yield pytest.param([p[1] for p in perm], id="+".join([p[0] for p in perm]))
@pytest.mark.parametrize(
"requests",
[
*get_eoa_permutations(),
*get_contract_permutations(),
pytest.param(
[
single_deposit_from_eoa(0),
single_withdrawal_from_eoa(0),
single_deposit_from_contract(1),
],
id="deposit_from_eoa+withdrawal_from_eoa+deposit_from_contract",
),
pytest.param(
[
single_deposit_from_eoa(0),
single_consolidation_from_eoa(0),
single_deposit_from_contract(1),
],
id="deposit_from_eoa+consolidation_from_eoa+deposit_from_contract",
),
pytest.param(
[
single_consolidation_from_eoa(0),
single_deposit_from_eoa(0),
single_consolidation_from_contract(1),
],
id="consolidation_from_eoa+deposit_from_eoa+consolidation_from_contract",
),
pytest.param(
[
single_consolidation_from_eoa(0),
single_withdrawal_from_eoa(0),
single_consolidation_from_contract(1),
],
id="consolidation_from_eoa+withdrawal_from_eoa+consolidation_from_contract",
),
pytest.param(
[
single_withdrawal_from_eoa(0),
single_consolidation_from_eoa(0),
single_withdrawal_from_contract(1),
],
id="withdrawal_from_eoa+consolidation_from_eoa+withdrawal_from_contract",
),
pytest.param(
[
single_withdrawal_from_eoa(0),
single_deposit_from_eoa(0),
single_withdrawal_from_contract(1),
],
id="withdrawal_from_eoa+deposit_from_eoa+withdrawal_from_contract",
),
pytest.param(
[],
id="empty_requests",
),
# contract: consolidation + withdrawal
pytest.param(
[
single_withdrawal_from_eoa(0),
single_consolidation_from_contract(0),
single_withdrawal_from_contract(1),
],
id="withdrawal_from_eoa+consolidation_from_contract+withdrawal_from_contract",
),
pytest.param(
[
single_deposit_from_eoa(0),
single_consolidation_from_contract(0),
single_withdrawal_from_contract(0),
],
id="deposit_from_eoa+consolidation_from_contract+withdrawal_from_contract",
),
pytest.param(
[
single_consolidation_from_eoa(0),
single_consolidation_from_contract(1),
single_withdrawal_from_contract(0),
],
id="consolidation_from_eoa+consolidation_from_contract+withdrawal_from_contract",
),
# contract: consolidation + deposit
pytest.param(
[
single_withdrawal_from_eoa(0),
single_consolidation_from_contract(0),
single_deposit_from_contract(0),
],
id="withdrawal_from_eoa+consolidation_from_contract+deposit_from_contract",
),
pytest.param(
[
single_deposit_from_eoa(0),
single_consolidation_from_contract(0),
single_deposit_from_contract(1),
],
id="deposit_from_eoa+consolidation_from_contract+deposit_from_contract",
),
pytest.param(
[
single_consolidation_from_eoa(0),
single_consolidation_from_contract(1),
single_deposit_from_contract(0),
],
id="consolidation_from_eoa+consolidation_from_contract+deposit_from_contract",
),
# contract: withdrawal + deposit
pytest.param(
[
single_withdrawal_from_eoa(0),
single_withdrawal_from_contract(1),
single_deposit_from_contract(0),
],
id="withdrawal_from_eoa+withdrawal_from_contract+deposit_from_contract",
),
pytest.param(
[
single_deposit_from_eoa(0),
single_withdrawal_from_contract(0),
single_deposit_from_contract(1),
],
id="deposit_from_eoa+withdrawal_from_contract+deposit_from_contract",
),
pytest.param(
[
single_consolidation_from_eoa(0),
single_withdrawal_from_contract(0),
single_deposit_from_contract(0),
],
id="consolidation_from_eoa+withdrawal_from_contract+deposit_from_contract",
),
# testing upper limits of each request type per slot if it exists
pytest.param(
[
single_consolidation_from_contract(0),
single_consolidation_from_contract(1),
# the following performs single_withdrawal_from_contract(0) to
# (16)
*[
single_withdrawal_from_contract(i)
for i in range(
0,
16,
)
],
# single_withdrawal_from_contract(16) not allowed cuz only 16
# MAX WITHDRAWALS PER BLOCK (EIP-7002)
# the following performs single_deposit_from_contract(0) to
# (18)
*[
single_deposit_from_contract(i)
for i in range(
0,
18,
)
],
],
id="max_withdrawals_per_slot+max_consolidations_per_slot+unlimited_deposits_per_slot",
),
],
)
@pytest.mark.pre_alloc_group(
"multi_type_requests", reason="Tests combinations of multiple request types"
)
def test_valid_multi_type_requests(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
blocks: List[Block],
) -> None:
"""
Test making a deposit to the beacon chain deposit contract and a withdrawal
in the same block.
"""
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=blocks,
)
@pytest.mark.parametrize("requests", [*get_permutations()])
@pytest.mark.pre_alloc_group(
"multi_type_requests", reason="Tests combinations of multiple request types"
)
def test_valid_multi_type_request_from_same_tx(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
requests: List[DepositRequest | WithdrawalRequest | ConsolidationRequest],
fork: Fork,
) -> None:
"""
Test making a deposit to the beacon chain deposit contract and a withdrawal
in the same tx.
"""
withdrawal_request_fee: int = 1
consolidation_request_fee: int = 1
calldata: bytes = b""
contract_code: Bytecode = Bytecode()
total_value: int = 0
storage: Storage = Storage()
for request in requests:
calldata_start: int = len(calldata)
current_calldata: bytes = request.calldata
calldata += current_calldata
contract_code += Op.CALLDATACOPY(0, calldata_start, len(current_calldata))
call_contract_address: int = 0
value: int = 0
if isinstance(request, DepositRequest):
call_contract_address = Spec_EIP6110.DEPOSIT_CONTRACT_ADDRESS
value = request.value
elif isinstance(request, WithdrawalRequest):
call_contract_address = Spec_EIP7002.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS
value = withdrawal_request_fee
elif isinstance(request, ConsolidationRequest):
call_contract_address = Spec_EIP7251.CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS
value = consolidation_request_fee
total_value += value
contract_code += Op.SSTORE(
storage.store_next(1),
Op.CALL(
address=call_contract_address,
value=value,
args_offset=0,
args_size=len(current_calldata),
),
)
sender: EOA = pre.fund_eoa()
contract_address: Address = pre.deploy_contract(
code=contract_code,
)
tx: Transaction = Transaction(
gas_limit=10_000_000,
to=contract_address,
value=total_value,
data=calldata,
sender=sender,
)
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={
contract_address: Account(
storage=storage,
)
},
blocks=[
Block(
txs=[tx],
header_verify=Header(
requests_hash=Requests(
*[
request.with_source_address(contract_address)
for request in sorted(requests, key=lambda r: r.type)
],
)
),
)
],
)
def invalid_requests_block_combinations(
*,
correct_requests_hash_in_header: bool,
) -> Callable[[Fork], List[ParameterSet]]:
"""
Return a list of invalid request combinations for the given fork.
In the event of a new request type, the `all_request_types` dictionary
should be updated with the new request type and its corresponding
request-generating transaction.
Returned parameters are: requests, block_body_override_requests, exception
"""
def func(fork: Fork) -> List[ParameterSet]:
assert fork.max_request_type() == 2, "Test update is needed for new request types"
all_request_types: Dict[
str,
Tuple[
DepositTransaction
| WithdrawalRequestTransaction
| ConsolidationRequestTransaction,
DepositRequest | WithdrawalRequest | ConsolidationRequest,
],
] = {
"deposit": (
single_deposit_from_eoa(0), # eoa_request
single_deposit(0), # block_request
),
"withdrawal": (
single_withdrawal_from_eoa(0), # eoa_request
single_withdrawal(0).with_source_address(TestAddress), # block_request
),
"consolidation": (
single_consolidation_from_eoa(0), # eoa_request
single_consolidation(0).with_source_address(TestAddress), # block_request
),
}
expected_exceptions: List[BlockException] = [BlockException.INVALID_REQUESTS]
if correct_requests_hash_in_header:
# The client also might reject the block with an invalid-block-hash
# error because it might convert the requests in the new payload
# parameters to the requests hash in the header and compare it with
# the block hash.
expected_exceptions.append(BlockException.INVALID_BLOCK_HASH)
# - Empty requests list with invalid hash
combinations: List[ParameterSet] = [
pytest.param(
[],
[
bytes([i]) for i in range(fork.max_request_type() + 1)
], # Using empty requests, calculate the hash using an invalid
# calculation method:
# sha256(sha256(b"\0") ++ sha256(b"\1") ++ sha256(b"\2") ++
# ...)
expected_exceptions,
id="no_requests_and_invalid_hash_calculation_method",
),
pytest.param(
[],
[
bytes([]) for _ in range(fork.max_request_type() + 1)
], # Using empty requests, calculate the hash using an invalid
# calculation method:
# sha256(sha256(b"") ++ sha256(b"") ++ sha256(b"") ++ ...)
expected_exceptions,
id="no_requests_and_invalid_hash_calculation_method_2",
),
]
# - Missing request or request type byte tests
for request_type, (eoa_request, block_request) in all_request_types.items():
combinations.extend(
[
pytest.param(
[eoa_request],
[block_request], # The request type byte missing because we need to
# use `Requests`
expected_exceptions,
id=f"single_{request_type}_missing_type_byte",
),
pytest.param(
[eoa_request],
[],
expected_exceptions,
id=f"single_{request_type}_empty_requests_list",
),
]
)
# - Incorrect order tests
correct_order: List[Bytes] = Requests(
*[r[1] for r in all_request_types.values()]
).requests_list # Requests automatically adds the type byte
correct_order_transactions: List[
DepositTransaction | WithdrawalRequestTransaction | ConsolidationRequestTransaction
] = [r[0] for r in all_request_types.values()]
# Send first element to the end
combinations.append(
pytest.param(
correct_order_transactions[1:] + [correct_order_transactions[0]],
correct_order[1:] + [correct_order[0]],
expected_exceptions,
id="incorrect_order_first_request_at_end",
),
)
# Send second element to the end
combinations.append(
pytest.param(
[correct_order_transactions[0]]
+ correct_order_transactions[2:]
+ [correct_order_transactions[1]],
[correct_order[0]] + correct_order[2:] + [correct_order[1]],
expected_exceptions,
id="incorrect_order_second_request_at_end",
),
)
# Bring last element to the beginning
combinations.append(
pytest.param(
[correct_order_transactions[-1]] + correct_order_transactions[:-1],
[correct_order[-1]] + correct_order[:-1],
expected_exceptions,
id="incorrect_order_last_request_at_beginning",
),
)
# - Duplicate request tests
for request_type, (eoa_request, block_request) in all_request_types.items():
combinations.append(
pytest.param(
[eoa_request],
Requests(block_request).requests_list * 2,
expected_exceptions,
id=f"duplicate_{request_type}_request",
),
)
# - Extra invalid request tests
combinations.append(
pytest.param(
correct_order_transactions,
correct_order + [b""],
expected_exceptions,
id="extra_empty_request",
),
)
combinations.append(
pytest.param(
correct_order_transactions,
correct_order + [bytes([fork.max_request_type() + 1])],
expected_exceptions,
id="extra_invalid_type_request_with_no_data",
),
)
combinations.append(
pytest.param(
correct_order_transactions,
correct_order + [bytes([fork.max_request_type() + 1, 0x00])],
expected_exceptions,
id="extra_invalid_type_request_with_data_0x00",
),
)
combinations.append(
pytest.param(
correct_order_transactions,
correct_order + [bytes([fork.max_request_type() + 1, 0x01])],
expected_exceptions,
id="extra_invalid_type_request_with_data_0x01",
),
)
combinations.append(
pytest.param(
correct_order_transactions,
correct_order + [bytes([fork.max_request_type() + 1, 0xFF])],
expected_exceptions,
id="extra_invalid_type_request_with_data_0xff",
),
)
return combinations
return func
@pytest.mark.parametrize_by_fork(
"requests,block_body_override_requests,exception",
invalid_requests_block_combinations(correct_requests_hash_in_header=False),
)
@pytest.mark.exception_test
@pytest.mark.pre_alloc_group(
"multi_type_requests", reason="Tests combinations of multiple request types"
)
def test_invalid_multi_type_requests(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
blocks: List[Block],
) -> None:
"""
Negative testing for all request types in the same block.
In these tests, the requests hash in the header reflects what's received in
the parameters portion of the `engine_newPayloadVX` call, so the block hash
calculation might pass if a client copies the info received verbatim, but
block validation must fail after the block is executed (via RLP or Engine
API).
"""
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=blocks,
)
@pytest.mark.parametrize_by_fork(
"requests,block_body_override_requests,exception",
invalid_requests_block_combinations(correct_requests_hash_in_header=True),
)
@pytest.mark.parametrize("correct_requests_hash_in_header", [True])
@pytest.mark.blockchain_test_engine_only
@pytest.mark.exception_test
@pytest.mark.pre_alloc_group(
"multi_type_requests", reason="Tests combinations of multiple request types"
)
def test_invalid_multi_type_requests_engine(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
blocks: List[Block],
) -> None:
"""
Negative testing for all request types in the same block with incorrect
parameters in the Engine API new payload parameters, but with the correct
requests hash in the header so the block hash is correct.
In these tests, the requests hash in the header reflects what's actually in
the executed block, so the block might execute properly if the client
ignores the requests in the new payload parameters.
Note that the only difference between the engine version produced by this
test and the ones produced by `test_invalid_multi_type_requests` is the
`blockHash` value in the new payloads, which is calculated using different
request hashes for each test, but since the request hash is not a value
that is included in the payload, it might not be immediately apparent.
Also these tests would not fail if the block is imported via RLP (syncing
from a peer), so we only generate the BlockchainTestEngine for them.
The client also might reject the block with an invalid-block-hash error
because it might convert the requests in the new payload parameters to the
requests hash in the header and compare it with the block hash.
"""
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=blocks,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7685_general_purpose_el_requests/__init__.py | tests/prague/eip7685_general_purpose_el_requests/__init__.py | """Cross-client EIP-7685 Tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip6110_deposits/spec.py | tests/prague/eip6110_deposits/spec.py | """Defines EIP-6110 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_6110 = ReferenceSpec("EIPS/eip-6110.md", "cbe8bf6a28fa1d096f9756af3513675849c4158e")
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-6110 specifications as defined at
https://eips.ethereum.org/EIPS/eip-6110.
"""
DEPOSIT_CONTRACT_ADDRESS = 0x00000000219AB540356CBB839CBE05303D7705FA # Mainnet
DEPOSIT_EVENT_SIGNATURE_HASH = (
0x649BBC62D0E31342AFEA4E5CD82D4049E7E1EE912FC0889AA790803BE39038C5
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip6110_deposits/conftest.py | tests/prague/eip6110_deposits/conftest.py | """Fixtures for the EIP-6110 deposit tests."""
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import Alloc, Block, BlockException, Header, Requests, Transaction
from .helpers import DepositInteractionBase, DepositRequest
@pytest.fixture
def update_pre(pre: Alloc, requests: List[DepositInteractionBase]) -> None:
"""
Init state of the accounts. Every deposit transaction defines their own
pre-state requirements, and this fixture aggregates them all.
"""
for d in requests:
d.update_pre(pre)
@pytest.fixture
def txs(
requests: List[DepositInteractionBase],
update_pre: None, # Fixture is used for its side effects
) -> List[Transaction]:
"""List of transactions to include in the block."""
txs = []
for r in requests:
txs += r.transactions()
return txs
@pytest.fixture
def block_body_override_requests() -> List[DepositRequest] | None:
"""
List of requests that overwrite the requests in the header. None by
default.
"""
return None
@pytest.fixture
def exception() -> BlockException | None:
"""Block exception expected by the tests. None by default."""
return None
@pytest.fixture
def included_requests(
requests: List[DepositInteractionBase],
) -> List[DepositRequest]:
"""
Return the list of deposit requests that should be included in each block.
"""
valid_requests: List[DepositRequest] = []
for d in requests:
valid_requests += d.valid_requests(10**18)
return valid_requests
@pytest.fixture
def blocks(
fork: Fork,
included_requests: List[DepositRequest],
block_body_override_requests: List[DepositRequest] | None,
txs: List[Transaction],
exception: BlockException | None,
) -> List[Block]:
"""List of blocks that comprise the test."""
return [
Block(
txs=txs,
header_verify=Header(
requests_hash=Requests(
*included_requests,
),
),
requests=Requests(
*block_body_override_requests,
).requests_list
if block_body_override_requests is not None
else None,
exception=exception,
)
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip6110_deposits/helpers.py | tests/prague/eip6110_deposits/helpers.py | """Helpers for the EIP-6110 deposit tests."""
from dataclasses import dataclass, field
from functools import cached_property
from hashlib import sha256 as sha256_hashlib
from typing import Callable, ClassVar, List
from ethereum_test_tools import EOA, Address, Alloc, Bytecode, Hash, Transaction
from ethereum_test_tools import DepositRequest as DepositRequestBase
from ethereum_test_tools import Opcodes as Op
from .spec import Spec
def sha256(*args: bytes) -> bytes:
"""Return sha256 hash of the input."""
return sha256_hashlib(b"".join(args)).digest()
def create_deposit_log_bytes(
pubkey_size: int = 48,
pubkey_data: bytes = b"",
pubkey_offset: int = 160,
withdrawal_credentials_size: int = 32,
withdrawal_credentials_data: bytes = b"",
withdrawal_credentials_offset: int = 256,
amount_size: int = 8,
amount_data: bytes = b"",
amount_offset: int = 320,
signature_size: int = 96,
signature_data: bytes = b"",
signature_offset: int = 384,
index_size: int = 8,
index_data: bytes = b"",
index_offset: int = 512,
) -> bytes:
"""Create the deposit log bytes."""
result = bytearray(576)
offset = 0
def write_uint256(value: int) -> None:
nonlocal offset
result[offset : offset + 32] = value.to_bytes(32, byteorder="big")
offset += 32
def write_bytes(data: bytes, size: int) -> None:
nonlocal offset
padded = data.ljust(size, b"\x00")
result[offset : offset + size] = padded
offset += size
write_uint256(pubkey_offset)
write_uint256(withdrawal_credentials_offset)
write_uint256(amount_offset)
write_uint256(signature_offset)
write_uint256(index_offset)
write_uint256(pubkey_size)
write_bytes(pubkey_data, 64)
write_uint256(withdrawal_credentials_size)
write_bytes(withdrawal_credentials_data, 32)
write_uint256(amount_size)
write_bytes(amount_data, 32)
write_uint256(signature_size)
write_bytes(signature_data, 96)
write_uint256(index_size)
write_bytes(index_data, 32)
return bytes(result)
class DepositRequest(DepositRequestBase):
"""Deposit request descriptor."""
valid: bool = True
"""Whether the deposit request is valid or not."""
gas_limit: int = 1_000_000
"""Gas limit for the call."""
calldata_modifier: Callable[[bytes], bytes] = lambda x: x
"""Calldata modifier function."""
extra_wei: int = 0
"""
Extra amount in wei to be sent with the deposit. If this value modulo 10**9
is not zero, the deposit will be invalid. The value can be negative but if
the total value is negative, an exception will be raised.
"""
interaction_contract_address: ClassVar[Address] = Address(Spec.DEPOSIT_CONTRACT_ADDRESS)
@cached_property
def value(self) -> int:
"""
Return the value of the deposit transaction, equal to the amount in
gwei plus the extra amount in wei.
"""
value = (self.amount * 10**9) + self.extra_wei
if value < 0:
raise ValueError("Value cannot be negative")
return value
@cached_property
def deposit_data_root(self) -> Hash:
"""Return the deposit data root of the deposit."""
pubkey_root = sha256(self.pubkey, b"\x00" * 16)
signature_root = sha256(
sha256(self.signature[:64]), sha256(self.signature[64:], b"\x00" * 32)
)
pubkey_withdrawal_root = sha256(pubkey_root, self.withdrawal_credentials)
amount_bytes = (self.amount).to_bytes(32, byteorder="little")
amount_signature_root = sha256(amount_bytes, signature_root)
return Hash(sha256(pubkey_withdrawal_root, amount_signature_root))
@cached_property
def calldata(self) -> bytes:
"""
Return the calldata needed to call the beacon chain deposit contract
and make the deposit.
deposit(
bytes calldata pubkey,
bytes calldata withdrawal_credentials,
bytes calldata signature,
bytes32 deposit_data_root
)
"""
offset_length = 32
pubkey_offset = offset_length * 3 + len(self.deposit_data_root)
withdrawal_offset = pubkey_offset + offset_length + len(self.pubkey)
signature_offset = withdrawal_offset + offset_length + len(self.withdrawal_credentials)
return self.calldata_modifier(
b"\x22\x89\x51\x18"
+ pubkey_offset.to_bytes(offset_length, byteorder="big")
+ withdrawal_offset.to_bytes(offset_length, byteorder="big")
+ signature_offset.to_bytes(offset_length, byteorder="big")
+ self.deposit_data_root
+ len(self.pubkey).to_bytes(offset_length, byteorder="big")
+ self.pubkey
+ len(self.withdrawal_credentials).to_bytes(offset_length, byteorder="big")
+ self.withdrawal_credentials
+ len(self.signature).to_bytes(offset_length, byteorder="big")
+ self.signature
)
def log(self, *, include_abi_encoding: bool = True) -> bytes:
"""
Return the log data for the deposit event.
event DepositEvent(
bytes pubkey,
bytes withdrawal_credentials,
bytes amount,
bytes signature,
bytes index
);
"""
data = bytearray(576)
if include_abi_encoding:
# Insert ABI encoding
data[30:32] = b"\x00\xa0" # Offset: pubkey (160)
data[62:64] = b"\x01\x00" # Offset: withdrawal_credentials (256)
data[94:96] = b"\x01\x40" # Offset: amount (320)
data[126:128] = b"\x01\x80" # Offset: signature (384)
data[158:160] = b"\x02\x00" # Offset: index (512)
data[190:192] = b"\x00\x30" # Size: pubkey (48)
data[286:288] = b"\x00\x20" # Size: withdrawal_credentials (32)
data[350:352] = b"\x00\x08" # Size: amount (8)
data[414:416] = b"\x00\x60" # Size: signature (96)
data[542:544] = b"\x00\x08" # Size: index (8)
offset = 192
data[offset : offset + len(self.pubkey)] = self.pubkey # [192:240]
offset += 48 + len(self.pubkey)
data[offset : offset + len(self.withdrawal_credentials)] = (
self.withdrawal_credentials
) # [288:320]
offset += 32 + len(self.withdrawal_credentials)
data[offset : offset + 8] = (self.amount).to_bytes(8, byteorder="little") # [352:360]
offset += 56 + 8
data[offset : offset + len(self.signature)] = self.signature # [416:512]
offset += 32 + len(self.signature)
data[offset : offset + 8] = (self.index).to_bytes(8, byteorder="little") # [544:552]
return bytes(data)
def with_source_address(self, source_address: Address) -> "DepositRequest":
"""Return a copy."""
del source_address
return self.copy()
@dataclass(kw_only=True)
class DepositInteractionBase:
"""Base class for all types of deposit transactions we want to test."""
sender_balance: int = 32_000_000_000_000_000_000 * 100
"""Balance of the account that sends the transaction."""
sender_account: EOA | None = None
"""Account that sends the transaction."""
requests: List[DepositRequest]
"""Deposit request to be included in the block."""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the deposit request."""
raise NotImplementedError
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
raise NotImplementedError
def valid_requests(self, current_minimum_fee: int) -> List[DepositRequest]:
"""
Return the list of deposit requests that should be included in the
block.
"""
raise NotImplementedError
@dataclass(kw_only=True)
class DepositTransaction(DepositInteractionBase):
"""
Class used to describe a deposit originated from an externally owned
account.
"""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the deposit request."""
assert self.sender_account is not None, "Sender account not initialized"
return [
Transaction(
gas_limit=request.gas_limit,
gas_price=0x07,
to=request.interaction_contract_address,
value=request.value,
data=request.calldata,
sender=self.sender_account,
)
for request in self.requests
]
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
self.sender_account = pre.fund_eoa(self.sender_balance)
def valid_requests(self, current_minimum_fee: int) -> List[DepositRequest]:
"""
Return the list of deposit requests that should be included in the
block.
"""
return [
request
for request in self.requests
if request.valid and request.value >= current_minimum_fee
]
@dataclass(kw_only=True)
class DepositContract(DepositInteractionBase):
"""Class used to describe a deposit originated from a contract."""
tx_gas_limit: int = 1_000_000
"""Gas limit for the transaction."""
tx_value: int = 0
"""Value to send with the transaction."""
contract_balance: int = 32_000_000_000_000_000_000 * 100
"""Balance of the contract that sends the deposit requests."""
contract_address: Address | None = None
"""Address of the contract that sends the deposit requests."""
entry_address: Address | None = None
"""Address to send the transaction to."""
call_type: Op = field(default_factory=lambda: Op.CALL)
"""Type of call to be made to the deposit contract."""
call_depth: int = 2
"""
Frame depth of the beacon chain deposit contract when it executes the
deposit requests.
"""
extra_code: Bytecode = field(default_factory=Bytecode)
"""
Extra code to be included in the contract that sends the deposit requests.
"""
@property
def contract_code(self) -> Bytecode:
"""Contract code used by the relay contract."""
code = Bytecode()
current_offset = 0
for r in self.requests:
value_arg = [r.value] if self.call_type in (Op.CALL, Op.CALLCODE) else []
code += Op.CALLDATACOPY(0, current_offset, len(r.calldata)) + Op.POP(
self.call_type(
Op.GAS if r.gas_limit == -1 else r.gas_limit,
r.interaction_contract_address,
*value_arg,
0,
len(r.calldata),
0,
0,
)
)
current_offset += len(r.calldata)
return code + self.extra_code
def transactions(self) -> List[Transaction]:
"""Return a transaction for the deposit request."""
return [
Transaction(
gas_limit=self.tx_gas_limit,
gas_price=0x07,
to=self.entry_address,
value=self.tx_value,
data=b"".join(r.calldata for r in self.requests),
sender=self.sender_account,
)
]
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
required_balance = self.sender_balance
if self.tx_value > 0:
required_balance = max(required_balance, self.tx_value + self.tx_gas_limit * 7)
self.sender_account = pre.fund_eoa(required_balance)
self.contract_address = pre.deploy_contract(
code=self.contract_code, balance=self.contract_balance
)
self.entry_address = self.contract_address
if self.call_depth > 2:
for _ in range(1, self.call_depth - 1):
self.entry_address = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE)
+ Op.POP(
Op.CALL(
Op.GAS,
self.entry_address,
0,
0,
Op.CALLDATASIZE,
0,
0,
)
),
)
def valid_requests(self, current_minimum_fee: int) -> List[DepositRequest]:
"""
Return the list of deposit requests that should be included in the
block.
"""
return [d for d in self.requests if d.valid and d.value >= current_minimum_fee]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip6110_deposits/test_modified_contract.py | tests/prague/eip6110_deposits/test_modified_contract.py | """
Test variants of the deposit contract which adheres the log-style as described
in EIP-6110.
"""
import pytest
from ethereum_test_exceptions.exceptions import BlockException
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Header,
Requests,
Transaction,
)
from ethereum_test_tools import Macros as Om
from ethereum_test_tools import Opcodes as Op
from .helpers import DepositRequest, create_deposit_log_bytes
from .spec import Spec, ref_spec_6110
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.execute(pytest.mark.skip(reason="modifies pre-alloc")),
]
REFERENCE_SPEC_GIT_PATH = ref_spec_6110.git_path
REFERENCE_SPEC_VERSION = ref_spec_6110.version
EVENT_ARGUMENTS_NAMES = ["pubkey", "withdrawal_credentials", "amount", "signature", "index"]
EVENT_ARGUMENTS_LAYOUT_TYPE = ["size", "offset"]
EVENT_ARGUMENTS = [
f"{name}_{layout}" for name in EVENT_ARGUMENTS_NAMES for layout in EVENT_ARGUMENTS_LAYOUT_TYPE
]
EVENT_ARGUMENT_VALUES = ["zero", "max_uint256"]
DEFAULT_DEPOSIT_REQUEST = DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=120_000_000_000_000_000,
signature=0x03,
index=0x0,
)
DEFAULT_DEPOSIT_REQUEST_LOG_DATA_DICT = {
"pubkey_data": bytes(DEFAULT_DEPOSIT_REQUEST.pubkey),
"withdrawal_credentials_data": bytes(DEFAULT_DEPOSIT_REQUEST.withdrawal_credentials),
# Note: after converting to bytes, it is converted to little-endian by
# `[::-1]` (This happens on-chain also, but this is done by the solidity
# contract)
"amount_data": bytes.fromhex("0" + DEFAULT_DEPOSIT_REQUEST.amount.hex()[2:])[::-1],
"signature_data": bytes(DEFAULT_DEPOSIT_REQUEST.signature),
"index_data": bytes(DEFAULT_DEPOSIT_REQUEST.index),
}
DEFAULT_REQUEST_LOG = create_deposit_log_bytes(**DEFAULT_DEPOSIT_REQUEST_LOG_DATA_DICT) # type: ignore
@pytest.mark.parametrize(
"include_deposit_event",
[
pytest.param(
True,
marks=pytest.mark.pre_alloc_group(
"deposit_extra_logs_with_event",
reason="Deposit contract with Transfer log AND deposit event",
),
),
pytest.param(
False,
marks=pytest.mark.pre_alloc_group(
"deposit_extra_logs_no_event",
reason="Deposit contract with Transfer log but NO deposit event",
),
),
],
)
def test_extra_logs(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
include_deposit_event: bool,
) -> None:
"""
Test deposit contract emitting more log event types than the ones in
mainnet.
"""
# Supplant mainnet contract with a variant that emits a `Transfer`` log If
# `include_deposit_event` is `True``, it will also emit a `DepositEvent`
# log`
# ERC20 token transfer log (Sepolia)
# https://sepolia.etherscan.io/tx/
# 0x2d71f3085a796a0539c9cc28acd9073a67cf862260a41475f000dd101279f94f
# JSON RPC: curl https://sepolia.infura.io/v3/APIKEY \ -X POST \ -H
# "Content-Type: application/json" \ -d '{"jsonrpc": "2.0", "method":
# "eth_getLogs", "params": [{"address":
# "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D", "blockHash":
# "0x8062a17fa791f5dbd59ea68891422e3299ca4e80885a89acf3fc706c8bceef53"}],
# "id": 1}'
# {"jsonrpc":"2.0","id":1,"result":
# [{"removed":false,"logIndex":"0x80","transactionIndex":"0x56",
# "transactionHash":
# "0x2d71f3085a796a0539c9cc28acd9073a67cf862260a41475f000dd101279f94f",
# "blockHash":
# "0x8062a17fa791f5dbd59ea68891422e3299ca4e80885a89acf3fc706c8bceef53",
# "blockNumber":"0x794fb5",
# "address":"0x7f02c3e3c98b133055b8b348b2ac625669ed295d",
# "data":
# "0x0000000000000000000000000000000000000000000000000000000000000001",
# "topics":
# ["0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef",
# "0x0000000000000000000000006885e36bfcb68cb383dfe90023a462c03bcb2ae5",
# "0x00000000000000000000000080b5dc88c98e528bf9cb4b7f0f076ac41da24651"]
bytecode = Op.LOG3(
# ERC-20 token transfer log ERC-20 token transfers are LOG3, since the
# topic, the sender, and receiver are all topics (the sender and
# receiver are `indexed` in the solidity event)
0,
32,
0xDDF252AD1BE2C89B69C2B068FC378DAA952BA7F163C4A11628F55A4DF523B3EF,
0x000000000000000000000000AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA,
0x000000000000000000000000BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB,
)
requests = Requests()
if include_deposit_event:
bytecode += Om.MSTORE(DEFAULT_REQUEST_LOG) + Op.LOG1(
0,
len(DEFAULT_REQUEST_LOG),
Spec.DEPOSIT_EVENT_SIGNATURE_HASH,
)
requests = Requests(DEFAULT_DEPOSIT_REQUEST)
bytecode += Op.STOP
pre[Spec.DEPOSIT_CONTRACT_ADDRESS] = Account(
code=bytecode,
nonce=1,
balance=0,
)
sender = pre.fund_eoa()
tx = Transaction(
to=Spec.DEPOSIT_CONTRACT_ADDRESS,
sender=sender,
gas_limit=100_000,
)
blockchain_test(
pre=pre,
blocks=[
Block(
txs=[tx],
header_verify=Header(
requests_hash=requests,
),
),
],
post={},
)
@pytest.mark.parametrize(
"log_argument,value",
[
pytest.param(
arg,
val,
marks=pytest.mark.pre_alloc_group(
f"deposit_layout_{arg}_{val}",
reason=f"Deposit contract with invalid {arg} set to {val}",
),
)
for arg in EVENT_ARGUMENTS
for val in EVENT_ARGUMENT_VALUES
],
)
@pytest.mark.exception_test
def test_invalid_layout(
blockchain_test: BlockchainTestFiller, pre: Alloc, log_argument: str, value: str
) -> None:
"""
Test deposit contract emitting logs with invalid layouts (sizes/offsets).
"""
log_params = {**DEFAULT_DEPOSIT_REQUEST_LOG_DATA_DICT}
log_params[log_argument] = 0 if value == "zero" else 2**256 - 1 # type: ignore
deposit_request_log = create_deposit_log_bytes(**log_params) # type: ignore
bytecode = Om.MSTORE(deposit_request_log) + Op.LOG1(
0,
len(deposit_request_log),
Spec.DEPOSIT_EVENT_SIGNATURE_HASH,
)
bytecode += Op.STOP
pre[Spec.DEPOSIT_CONTRACT_ADDRESS] = Account(
code=bytecode,
nonce=1,
balance=0,
)
sender = pre.fund_eoa()
tx = Transaction(
to=Spec.DEPOSIT_CONTRACT_ADDRESS,
sender=sender,
gas_limit=100_000,
)
blockchain_test(
pre=pre,
blocks=[
Block(
txs=[tx],
exception=[
BlockException.INVALID_DEPOSIT_EVENT_LAYOUT,
],
),
],
post={},
)
@pytest.mark.parametrize(
"slice_bytes",
[
pytest.param(
True,
marks=pytest.mark.pre_alloc_group(
"deposit_log_length_short", reason="Deposit contract with shortened log data"
),
),
pytest.param(
False,
marks=pytest.mark.pre_alloc_group(
"deposit_log_length_long", reason="Deposit contract with lengthened log data"
),
),
],
)
@pytest.mark.exception_test
def test_invalid_log_length(
blockchain_test: BlockchainTestFiller, pre: Alloc, slice_bytes: bool
) -> None:
"""
Test deposit contract emitting logs with invalid log length (one byte more
or less).
"""
changed_log = DEFAULT_REQUEST_LOG[:-1] if slice_bytes else DEFAULT_REQUEST_LOG + b"\x00"
bytecode = Om.MSTORE(changed_log) + Op.LOG1(
0,
len(changed_log),
Spec.DEPOSIT_EVENT_SIGNATURE_HASH,
)
bytecode += Op.STOP
pre[Spec.DEPOSIT_CONTRACT_ADDRESS] = Account(
code=bytecode,
nonce=1,
balance=0,
)
sender = pre.fund_eoa()
tx = Transaction(
to=Spec.DEPOSIT_CONTRACT_ADDRESS,
sender=sender,
gas_limit=100_000,
)
blockchain_test(
pre=pre,
blocks=[
Block(
txs=[tx],
exception=[
BlockException.INVALID_DEPOSIT_EVENT_LAYOUT,
],
),
],
post={},
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip6110_deposits/__init__.py | tests/prague/eip6110_deposits/__init__.py | """Cross-client EIP-6110 Tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip6110_deposits/test_eip_mainnet.py | tests/prague/eip6110_deposits/test_eip_mainnet.py | """
abstract: Crafted tests for mainnet of [EIP-6110: Supply validator deposits on chain](https://eips.ethereum.org/EIPS/eip-6110).
""" # noqa: E501
from typing import List
import pytest
from ethereum_test_tools import (
Alloc,
Block,
BlockchainTestFiller,
)
from .helpers import DepositRequest, DepositTransaction
from .spec import ref_spec_6110
REFERENCE_SPEC_GIT_PATH = ref_spec_6110.git_path
REFERENCE_SPEC_VERSION = ref_spec_6110.version
pytestmark = [pytest.mark.valid_at("Prague"), pytest.mark.mainnet]
@pytest.mark.parametrize(
"requests",
[
pytest.param(
[
DepositTransaction(
# TODO: Use a real public key to allow recovery of
# the funds.
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
)
],
),
],
id="single_deposit_from_eoa_minimum",
),
],
)
def test_eip_6110(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
blocks: List[Block],
) -> None:
"""Test making a deposit to the beacon chain deposit contract."""
blockchain_test(
pre=pre,
post={},
blocks=blocks,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip6110_deposits/test_deposits.py | tests/prague/eip6110_deposits/test_deposits.py | """
Tests validator deposit functionality.
Tests the validator deposit functionality implementation from
[EIP-6110: Supply validator deposits on chain](https://eips.ethereum.org/EIPS/eip-6110).
"""
from typing import List
import pytest
from ethereum_test_tools import (
Alloc,
Block,
BlockchainTestFiller,
BlockException,
Environment,
Macros,
)
from ethereum_test_tools import Opcodes as Op
from .helpers import DepositContract, DepositRequest, DepositTransaction
from .spec import ref_spec_6110
REFERENCE_SPEC_GIT_PATH = ref_spec_6110.git_path
REFERENCE_SPEC_VERSION = ref_spec_6110.version
pytestmark = pytest.mark.valid_from("Prague")
@pytest.mark.parametrize(
"requests",
[
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
)
],
),
],
id="single_deposit_from_eoa",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=120_000_000_000_000_000,
signature=0x03,
index=0x0,
)
],
sender_balance=120_000_001_000_000_000 * 10**9,
),
],
id="single_deposit_from_eoa_huge_amount",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x1,
),
],
),
],
id="multiple_deposit_from_same_eoa",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=i,
)
for i in range(200)
],
)
],
id="multiple_deposit_from_same_eoa_high_count",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
)
],
),
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x1,
)
],
),
],
id="multiple_deposit_from_different_eoa",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=999_999_999,
signature=0x03,
index=0x0,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
],
),
],
id="multiple_deposit_from_same_eoa_first_reverts",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=999_999_999,
signature=0x03,
index=0x0,
),
],
),
],
id="multiple_deposit_from_same_eoa_last_reverts",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
# From traces, gas used by the first tx is 82,718
# so reduce by one here
gas_limit=0x1431D,
valid=False,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
],
),
],
id="multiple_deposit_from_same_eoa_first_oog",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
# From traces, gas used by the second tx is 68,594,
# reduce by one here
gas_limit=0x10BF1,
valid=False,
),
],
),
],
id="multiple_deposit_from_same_eoa_last_oog",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
calldata_modifier=lambda _: b"",
valid=False,
)
],
),
],
id="send_eth_from_eoa",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
valid=False,
calldata_modifier=lambda _: b"",
)
],
),
],
# TODO: EIP-5920: Send using PAY opcode
id="send_eth_to_contract_no_deposit_data",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
)
],
),
],
id="single_deposit_from_contract",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x1,
),
],
),
],
id="multiple_deposits_from_contract",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=i,
)
for i in range(450)
],
tx_gas_limit=16_777_216,
),
],
id="many_deposits_from_contract",
marks=pytest.mark.slow,
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=999_999_999,
signature=0x03,
index=0x0,
valid=False,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
),
],
),
],
id="multiple_deposits_from_contract_first_reverts",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=999_999_999,
signature=0x03,
index=0x1,
valid=False,
),
],
),
],
id="multiple_deposits_from_contract_last_reverts",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
gas_limit=100,
index=0x0,
valid=False,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
gas_limit=1_000_000,
index=0x0,
),
],
),
],
id="multiple_deposits_from_contract_first_oog",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
gas_limit=1_000_000,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
gas_limit=100,
valid=False,
),
],
),
],
id="multiple_deposits_from_contract_last_oog",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
valid=False,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x1,
valid=False,
),
],
extra_code=Op.REVERT(0, 0),
),
],
id="multiple_deposits_from_contract_caller_reverts",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
valid=False,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x1,
valid=False,
),
],
extra_code=Macros.OOG(),
),
],
id="multiple_deposits_from_contract_caller_oog",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=i,
valid=False,
)
for i in range(450)
],
tx_gas_limit=10_000_000,
),
],
id="many_deposits_from_contract_oog",
marks=pytest.mark.slow,
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
],
),
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x1,
)
],
),
],
id="single_deposit_from_contract_single_deposit_from_eoa",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
)
],
),
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x1,
),
],
),
],
id="single_deposit_from_eoa_single_deposit_from_contract",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
)
],
),
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x1,
),
],
),
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x2,
)
],
),
],
id="single_deposit_from_contract_between_eoa_deposits",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
],
),
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x1,
)
],
),
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x2,
),
],
),
],
id="single_deposit_from_eoa_between_contract_deposits",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
valid=False,
)
],
call_type=Op.DELEGATECALL,
),
],
id="single_deposit_from_contract_delegatecall",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
valid=False,
)
],
call_type=Op.STATICCALL,
),
],
id="single_deposit_from_contract_staticcall",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
valid=False,
)
],
call_type=Op.CALLCODE,
),
],
id="single_deposit_from_contract_callcode",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
)
],
call_depth=3,
),
],
id="single_deposit_from_contract_call_depth_3",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
)
],
call_depth=271,
tx_gas_limit=16_777_216,
),
],
id="single_deposit_from_contract_call_depth_high",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_001,
signature=0x03,
index=0x0,
)
],
),
],
id="single_deposit_from_eoa_minimum_plus_one",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
extra_wei=1,
valid=False,
)
],
),
],
id="single_deposit_from_eoa_minimum_plus_one_wei",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
extra_wei=-1,
valid=False,
)
],
),
],
id="single_deposit_from_eoa_minimum_minus_one_wei",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
extra_wei=1,
valid=False,
)
],
),
],
id="single_deposit_from_contract_minimum_plus_one_wei",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_000,
signature=0x03,
index=0x0,
extra_wei=-1,
valid=False,
)
],
),
],
id="single_deposit_from_contract_minimum_minus_one_wei",
),
pytest.param(
[
DepositTransaction(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=1_000_000_001,
signature=0x03,
index=0x0,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=999_999_999,
signature=0x03,
index=0x1,
valid=False,
),
],
),
],
id="multiple_deposits_from_eoa_minimum_plus_one_minimum_minus_one",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
valid=False,
)
],
# Send 32 ETH minus 1 wei to the contract, note
# `DepositRequest.amount` is in gwei
tx_value=32_000_000_000 * 10**9 - 1,
contract_balance=0,
),
],
id="send_not_enough_eth_to_contract_with_zero_balance",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=999_999_999,
signature=0x03,
index=0x0,
valid=False,
)
],
tx_value=1_000_000_000 * 10**9,
),
],
id="send_eth_to_contract_insufficient_deposit",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
)
],
# Send 32 ETH (in wei) to the contract
tx_value=32_000_000_000 * 10**9,
contract_balance=0,
),
],
id="send_exact_eth_amount_for_deposit",
),
pytest.param(
[
DepositContract(
requests=[
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x0,
),
DepositRequest(
pubkey=0x01,
withdrawal_credentials=0x02,
amount=32_000_000_000,
signature=0x03,
index=0x1,
),
],
# Send 64 ETH (in wei) to the contract
tx_value=64_000_000_000 * 10**9,
contract_balance=0,
),
],
id="send_exact_eth_amount_for_multiple_deposits",
),
],
)
@pytest.mark.pre_alloc_group(
"deposit_requests", reason="Tests standard deposit request functionality using system contract"
)
def test_deposit(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
blocks: List[Block],
) -> None:
"""Test making a deposit to the beacon chain deposit contract."""
total_gas_limit = sum(tx.gas_limit for tx in blocks[0].txs)
env = Environment()
if total_gas_limit > env.gas_limit:
env = Environment(gas_limit=total_gas_limit)
blockchain_test(
genesis_environment=env,
pre=pre,
post={},
blocks=blocks,
)
@pytest.mark.parametrize(
"requests,block_body_override_requests,exception",
[
pytest.param(
[],
[
DepositRequest(
pubkey=0x01,
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2935_historical_block_hashes_from_state/spec.py | tests/prague/eip2935_historical_block_hashes_from_state/spec.py | """Defines EIP-2935 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_2935 = ReferenceSpec("EIPS/eip-2935.md", "06aadd458ee04ede80498db55927b052eb5bef38")
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-2935 specifications as defined at
https://eips.ethereum.org/EIPS/eip-2935.
"""
FORK_TIMESTAMP = 15_000
HISTORY_STORAGE_ADDRESS = 0x0000F90827F1C53A10CB7A02335B175320002935
HISTORY_SERVE_WINDOW = 8191
BLOCKHASH_OLD_WINDOW = 256
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2935_historical_block_hashes_from_state/__init__.py | tests/prague/eip2935_historical_block_hashes_from_state/__init__.py | """Cross-client EIP-2935 Tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2935_historical_block_hashes_from_state/test_contract_deployment.py | tests/prague/eip2935_historical_block_hashes_from_state/test_contract_deployment.py | """
Tests [EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935).
"""
from os.path import realpath
from pathlib import Path
from typing import Any, Dict, Generator
import pytest
from ethereum_test_forks import Prague
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
DeploymentTestType,
Transaction,
generate_system_contract_deploy_test,
)
from ethereum_test_tools import Opcodes as Op
from .spec import Spec, ref_spec_2935
REFERENCE_SPEC_GIT_PATH = ref_spec_2935.git_path
REFERENCE_SPEC_VERSION = ref_spec_2935.version
@pytest.mark.pre_alloc_group(
"separate", reason="Deploys history storage system contract at hardcoded predeploy address"
)
@generate_system_contract_deploy_test(
fork=Prague,
tx_json_path=Path(realpath(__file__)).parent / "contract_deploy_tx.json",
expected_deploy_address=Address(Spec.HISTORY_STORAGE_ADDRESS),
fail_on_empty_code=False,
)
def test_system_contract_deployment(
*,
pre: Alloc,
post: Alloc,
test_type: DeploymentTestType,
**kwargs: Any,
) -> Generator[Block, None, None]:
"""Verify deployment of the block hashes system contract."""
# Deploy a contract that calls the history contract and verifies the block
# hashes.
yield Block() # Empty block just to have more history in the contract.
# We are going to query blocks even before contract deployment.
code = (
sum(
Op.MSTORE(0, block_number)
+ Op.POP(
Op.CALL(
address=Spec.HISTORY_STORAGE_ADDRESS,
args_offset=0,
args_size=32,
ret_offset=32,
ret_size=32,
),
)
+ Op.SSTORE(block_number, Op.ISZERO(Op.ISZERO(Op.MLOAD(32))))
for block_number in range(1, 4)
)
+ Op.STOP
)
deployed_contract = pre.deploy_contract(code)
tx = Transaction(
to=deployed_contract,
gas_limit=10_000_000,
sender=pre.fund_eoa(),
)
yield Block(txs=[tx])
storage: Dict
if test_type == DeploymentTestType.DEPLOY_BEFORE_FORK:
# Fork happens at block 2, and the contract is already there, so from
# block number 1 and after, the block hashes should be there.
storage = {
1: 1, # Block prior to the fork, it's the first hash saved.
2: 1, # Fork block, hash should be there.
3: 1, # Empty block added at the start of this function, hash
# should be there.
}
elif test_type == DeploymentTestType.DEPLOY_ON_FORK_BLOCK:
# The contract should have the block hashes after contract deployment.
storage = {
1: 1, # Fork and deployment block, the first hash that gets added.
2: 1, # Deployment block, hash should be there.
3: 1, # Empty block added at the start of this function, hash
# should be there.
}
elif test_type == DeploymentTestType.DEPLOY_AFTER_FORK:
# The contract should have the block hashes after contract deployment.
storage = {
1: 0, # Fork block, but contract is not there yet.
2: 1, # Deployment block, this is the first hash that gets added
# because it's added on
# the next block.
3: 1, # Empty block added at the start of this function, hash
# should be there.
}
post[deployed_contract] = Account(
balance=0,
storage=storage,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2935_historical_block_hashes_from_state/test_eip_mainnet.py | tests/prague/eip2935_historical_block_hashes_from_state/test_eip_mainnet.py | """
abstract: Crafted tests for mainnet of
[EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935).
""" # noqa: E501
import pytest
from ethereum_test_tools import Account, Alloc, Block, BlockchainTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .spec import Spec, ref_spec_2935
REFERENCE_SPEC_GIT_PATH = ref_spec_2935.git_path
REFERENCE_SPEC_VERSION = ref_spec_2935.version
pytestmark = [pytest.mark.valid_at("Prague"), pytest.mark.mainnet]
def test_eip_2935(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
) -> None:
"""Test a simple block hash request from EIP-2935 system contract."""
check_block_number = Op.SUB(Op.NUMBER, 1) # Parent block number
check_contract_code = (
Op.MSTORE(0, check_block_number)
+ Op.POP(
Op.CALL(
address=Spec.HISTORY_STORAGE_ADDRESS,
args_offset=0,
args_size=32,
ret_offset=32,
ret_size=32,
)
)
+ Op.SSTORE(0, Op.EQ(Op.MLOAD(32), Op.BLOCKHASH(check_block_number)))
)
check_contract_address = pre.deploy_contract(check_contract_code)
tx = Transaction(
to=check_contract_address,
gas_limit=50_000,
sender=pre.fund_eoa(),
)
block = Block(txs=[tx])
blockchain_test(
pre=pre,
blocks=[block],
post={
check_contract_address: Account(storage={0: 1}),
},
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2935_historical_block_hashes_from_state/test_block_hashes.py | tests/prague/eip2935_historical_block_hashes_from_state/test_block_hashes.py | """
Tests [EIP-2935: Serve historical block hashes from state](https://eips.ethereum.org/EIPS/eip-2935).
"""
from typing import Dict, List
import pytest
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Bytecode,
Storage,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
from .spec import Spec, ref_spec_2935
REFERENCE_SPEC_GIT_PATH = ref_spec_2935.git_path
REFERENCE_SPEC_VERSION = ref_spec_2935.version
def generate_block_check_code(
check_block_number: int,
current_block_number: int,
fork_block_number: int,
storage: Storage,
check_contract_first: bool = False,
) -> Bytecode:
"""
Generate EVM code to check that the block hashes are correctly stored in
the state.
Args:
check_block_number (int): The block number to check.
current_block_number (int): The current block number where the check is
taking place.
fork_block_number (int): The block number of the fork transition.
storage (Storage): The storage object to use.
check_contract_first (bool): Whether to check the contract first,
for slot warming checks.
"""
contract_ret_offset = 32
if check_block_number < 0:
# Block number outside of range, nothing to check
return Bytecode()
populated_blockhash = (
current_block_number - check_block_number <= Spec.BLOCKHASH_OLD_WINDOW
and check_block_number < current_block_number
)
populated_history_storage_contract = (
check_block_number >= fork_block_number - 1
and current_block_number - check_block_number <= Spec.HISTORY_SERVE_WINDOW
and check_block_number < current_block_number
)
blockhash_key = storage.store_next(not populated_blockhash)
contract_key = storage.store_next(not populated_history_storage_contract)
check_blockhash = Op.SSTORE(blockhash_key, Op.ISZERO(Op.BLOCKHASH(check_block_number)))
check_contract = (
Op.MSTORE(0, check_block_number)
+ Op.POP(Op.CALL(Op.GAS, Spec.HISTORY_STORAGE_ADDRESS, 0, 0, 32, contract_ret_offset, 32))
+ Op.SSTORE(contract_key, Op.ISZERO(Op.MLOAD(contract_ret_offset)))
)
if check_contract_first:
code = check_contract + check_blockhash
else:
code = check_blockhash + check_contract
if populated_history_storage_contract and populated_blockhash:
# Both values must be equal
store_equal_key = storage.store_next(True)
code += Op.SSTORE(
store_equal_key, Op.EQ(Op.MLOAD(contract_ret_offset), Op.BLOCKHASH(check_block_number))
)
# Reset the contract return value
code += Op.MSTORE(contract_ret_offset, 0)
return code
# TODO: Test at transition: `BLOCKHASH_OLD_WINDOW + 1` blocks before transition
# TODO: Test post fork: `HISTORY_SERVE_WINDOW` + 1 blocks after transition
@pytest.mark.parametrize(
"blocks_before_fork, blocks_after_fork",
[
[1, 2],
[Spec.BLOCKHASH_OLD_WINDOW + 1, 10],
[1, Spec.BLOCKHASH_OLD_WINDOW + 1],
],
)
@pytest.mark.slow()
@pytest.mark.valid_at_transition_to("Prague")
def test_block_hashes_history_at_transition(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
blocks_before_fork: int,
blocks_after_fork: int,
) -> None:
"""
Tests that block hashes are stored correctly at the system contract address
after the fork transition. Block hashes are stored incrementally at the
transition until the `HISTORY_SERVE_WINDOW` ring buffer is full. Afterwards
the oldest block hash is replaced by the new one.
Note: The block hashes before the fork are no longer stored in the contract
at the moment of the transition.
"""
blocks: List[Block] = []
assert blocks_before_fork >= 1 and blocks_before_fork < Spec.FORK_TIMESTAMP
sender = pre.fund_eoa(10_000_000_000)
post: Dict[Address, Account] = {}
current_block_number = 1
fork_block_number = current_block_number + blocks_before_fork
for i in range(blocks_before_fork):
txs: List[Transaction] = []
if i == blocks_before_fork - 1:
# On the last block before the fork, `BLOCKHASH` must return values
# for the last 256 blocks but not for the blocks before that. And
# `HISTORY_STORAGE_ADDRESS` should be empty.
code = Bytecode()
storage = Storage()
# Check the last block before blockhash the window
code += generate_block_check_code(
check_block_number=current_block_number - Spec.BLOCKHASH_OLD_WINDOW - 1,
current_block_number=current_block_number,
fork_block_number=fork_block_number,
storage=storage,
)
# Check the first block inside blockhash the window
code += generate_block_check_code(
check_block_number=(
current_block_number - Spec.BLOCKHASH_OLD_WINDOW
if current_block_number > Spec.BLOCKHASH_OLD_WINDOW
else 0 # Entire chain is inside the window, check genesis
),
current_block_number=current_block_number,
fork_block_number=fork_block_number,
storage=storage,
)
check_blocks_before_fork_address = pre.deploy_contract(code)
txs.append(
Transaction(
to=check_blocks_before_fork_address,
gas_limit=10_000_000,
sender=sender,
)
)
post[check_blocks_before_fork_address] = Account(storage=storage)
blocks.append(Block(timestamp=current_block_number, txs=txs))
current_block_number += 1
# Add blocks after the fork transition to gradually fill up the
# `HISTORY_SERVE_WINDOW`
for i in range(blocks_after_fork):
txs = []
# On these blocks, `BLOCKHASH` will still return values for the last
# 256 blocks, and `HISTORY_STORAGE_ADDRESS` should now serve values for
# the previous blocks in the new fork.
code = Bytecode()
storage = Storage()
# Check that each block can return previous blockhashes if
# `BLOCKHASH_OLD_WINDOW` and or `HISTORY_SERVE_WINDOW`.
for j in range(current_block_number):
code += generate_block_check_code(
check_block_number=j,
current_block_number=current_block_number,
fork_block_number=fork_block_number,
storage=storage,
)
check_blocks_after_fork_address = pre.deploy_contract(code)
txs.append(
Transaction(
to=check_blocks_after_fork_address,
gas_limit=10_000_000,
sender=sender,
)
)
post[check_blocks_after_fork_address] = Account(storage=storage)
blocks.append(Block(timestamp=Spec.FORK_TIMESTAMP + i, txs=txs))
current_block_number += 1
blockchain_test(
pre=pre,
blocks=blocks,
post=post,
)
@pytest.mark.parametrize(
"block_count,check_contract_first",
[
pytest.param(1, False, id="single_block_check_blockhash_first"),
pytest.param(1, True, id="single_block_check_contract_first"),
pytest.param(2, False, id="two_blocks_check_blockhash_first"),
pytest.param(2, True, id="two_blocks_check_contract_first"),
pytest.param(
Spec.HISTORY_SERVE_WINDOW + 1,
False,
marks=[pytest.mark.skip("Slow test not relevant anymore"), pytest.mark.slow],
id="full_history_plus_one_check_blockhash_first",
),
],
)
@pytest.mark.valid_from("Prague")
def test_block_hashes_history(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
block_count: int,
check_contract_first: bool,
) -> None:
"""
Tests that block hashes are stored correctly at the system contract address
after the fork transition. Block hashes are stored incrementally at the
transition until the `HISTORY_SERVE_WINDOW` ring buffer is full. Afterwards
the oldest block hash is replaced by the new one.
"""
blocks: List[Block] = []
sender = pre.fund_eoa(10_000_000_000)
post: Dict[Address, Account] = {}
current_block_number = 1
fork_block_number = 0 # We fork at genesis
for _ in range(block_count - 1):
# Generate empty blocks after the fork.
blocks.append(Block())
current_block_number += 1
txs = []
# On these blocks, `BLOCKHASH` will still return values for the last 256
# blocks, and `HISTORY_STORAGE_ADDRESS` should now serve values for the
# previous blocks in the new fork.
code = Bytecode()
storage = Storage()
# Check the first block outside of the window if any
code += generate_block_check_code(
check_block_number=current_block_number - Spec.HISTORY_SERVE_WINDOW - 1,
current_block_number=current_block_number,
fork_block_number=fork_block_number,
storage=storage,
check_contract_first=check_contract_first,
)
# Check the first block inside the window
code += generate_block_check_code(
check_block_number=current_block_number - Spec.HISTORY_SERVE_WINDOW,
current_block_number=current_block_number,
fork_block_number=fork_block_number,
storage=storage,
check_contract_first=check_contract_first,
)
# Check the first block outside the BLOCKHASH window
code += generate_block_check_code(
check_block_number=current_block_number - Spec.BLOCKHASH_OLD_WINDOW - 1,
current_block_number=current_block_number,
fork_block_number=fork_block_number,
storage=storage,
check_contract_first=check_contract_first,
)
# Check the first block inside the BLOCKHASH window
code += generate_block_check_code(
check_block_number=current_block_number - Spec.BLOCKHASH_OLD_WINDOW,
current_block_number=current_block_number,
fork_block_number=fork_block_number,
storage=storage,
check_contract_first=check_contract_first,
)
# Check the previous block
code += generate_block_check_code(
check_block_number=current_block_number - 1,
current_block_number=current_block_number,
fork_block_number=fork_block_number,
storage=storage,
check_contract_first=check_contract_first,
)
check_blocks_after_fork_address = pre.deploy_contract(code)
txs.append(
Transaction(
to=check_blocks_after_fork_address,
gas_limit=10_000_000,
sender=sender,
)
)
post[check_blocks_after_fork_address] = Account(storage=storage)
blocks.append(Block(txs=txs))
current_block_number += 1
blockchain_test(
pre=pre,
blocks=blocks,
post=post,
)
@pytest.mark.valid_from("Prague")
@pytest.mark.with_all_call_opcodes
def test_block_hashes_call_opcodes(
blockchain_test: BlockchainTestFiller, pre: Alloc, call_opcode: Op
) -> None:
"""
Test that the call opcodes can be used to call the history contract and get
the block hashes.
"""
blocks = []
blocks.append(Block())
storage = Storage()
return_code_slot = storage.store_next(0x1)
blockhash_value_slot = storage.store_next(
True if call_opcode in [Op.DELEGATECALL, Op.CALLCODE] else False
)
code = (
Op.MSTORE(0, 1)
+ Op.SSTORE(
return_code_slot,
call_opcode(
address=Spec.HISTORY_STORAGE_ADDRESS,
args_offset=0,
args_size=32,
ret_offset=32,
ret_size=32,
),
)
+ Op.SSTORE(blockhash_value_slot, Op.ISZERO(Op.MLOAD(32)))
)
contract_address = pre.deploy_contract(code, storage=storage.canary())
blocks.append(
Block(
txs=[
Transaction(
to=contract_address,
gas_limit=10_000_000,
sender=pre.fund_eoa(),
)
]
)
)
blockchain_test(
pre=pre,
blocks=blocks,
post={contract_address: Account(storage=storage)},
)
@pytest.mark.parametrize(
"block_number,reverts",
[
pytest.param(1, True, id="current_block"),
pytest.param(2, True, id="future_block"),
pytest.param(2**64 - 1, True, id="2**64-1"),
pytest.param(2**64, True, id="2**64"),
],
)
@pytest.mark.valid_from("Prague")
def test_invalid_history_contract_calls(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
block_number: int,
reverts: bool,
) -> None:
"""
Test calling the history contract with invalid block numbers, such as
blocks from the future or overflowing block numbers.
Also test the BLOCKHASH opcode with the same block numbers, which should
not affect the behavior of the opcode, even after verkle.
"""
storage = Storage()
return_code_slot = storage.store_next(not reverts)
returned_block_hash_slot = storage.store_next(0)
block_hash_opcode_slot = storage.store_next(0)
return_offset = 64
return_size = 32
args_size = 32
# Check the first block outside of the window if any
code = (
Op.MSTORE(0, block_number)
+ Op.SSTORE(
return_code_slot,
Op.CALL(
address=Spec.HISTORY_STORAGE_ADDRESS,
args_offset=0,
args_size=args_size,
ret_offset=return_offset,
ret_size=return_size,
),
)
+ Op.SSTORE(returned_block_hash_slot, Op.MLOAD(return_offset))
+ Op.SSTORE(block_hash_opcode_slot, Op.BLOCKHASH(block_number))
)
check_contract_address = pre.deploy_contract(code, storage=storage.canary())
txs = [
Transaction(
to=check_contract_address,
gas_limit=10_000_000,
sender=pre.fund_eoa(),
)
]
post = {check_contract_address: Account(storage=storage)}
blocks = [Block(txs=txs)]
blockchain_test(
pre=pre,
blocks=blocks,
post=post,
)
@pytest.mark.parametrize(
"args_size,reverts",
[
pytest.param(0, True, id="zero_size"),
pytest.param(33, True, id="too_large"),
pytest.param(31, True, id="too_small"),
],
)
@pytest.mark.valid_from("Prague")
def test_invalid_history_contract_calls_input_size(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
reverts: bool,
args_size: int,
) -> None:
"""Test calling the history contract with invalid input sizes."""
storage = Storage()
return_code_slot = storage.store_next(not reverts, "history storage call result")
returned_block_hash_slot = storage.store_next(0)
return_offset = 64
return_size = 32
block_number = 0
# Check the first block outside of the window if any
code = (
Op.MSTORE(0, block_number)
+ Op.SSTORE(
return_code_slot,
Op.CALL(
address=Spec.HISTORY_STORAGE_ADDRESS,
args_offset=0,
args_size=args_size,
ret_offset=return_offset,
ret_size=return_size,
),
)
+ Op.SSTORE(returned_block_hash_slot, Op.MLOAD(return_offset))
)
check_contract_address = pre.deploy_contract(code, storage=storage.canary())
txs = [
Transaction(
to=check_contract_address,
gas_limit=10_000_000,
sender=pre.fund_eoa(),
)
]
post = {check_contract_address: Account(storage=storage)}
blocks = [Block(txs=txs)]
blockchain_test(
pre=pre,
blocks=blocks,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1mul.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1mul.py | """
Tests BLS12_G1MUL precompile.
Tests the BLS12_G1MUL precompile implementation from [EIP-2537:
Precompile for BLS12-381 curve operations]
(https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .conftest import G1_POINTS_NOT_IN_SUBGROUP, G1_POINTS_NOT_ON_CURVE
from .helpers import vectors_from_file
from .spec import PointG1, Scalar, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.G1MSM], ids=[""]),
]
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("mul_G1_bls.json")
+ [
# Basic multiplication test cases.
pytest.param(
Spec.INF_G1 + Scalar(0),
Spec.INF_G1,
None,
id="zero_times_inf",
),
pytest.param(
Spec.INF_G1 + Scalar(1),
Spec.INF_G1,
None,
id="one_times_inf",
),
pytest.param(
Spec.INF_G1 + Scalar(2),
Spec.INF_G1,
None,
id="two_times_inf",
),
pytest.param(
Spec.INF_G1 + Scalar(Spec.Q),
Spec.INF_G1,
None,
id="q_times_inf",
),
pytest.param(
Spec.INF_G1 + Scalar(2**256 - 1),
Spec.INF_G1,
None,
id="max_scalar_times_inf",
),
pytest.param(
Spec.G1 + Scalar(0),
Spec.INF_G1,
None,
id="zero_times_generator",
),
pytest.param(
Spec.P1 + Scalar(0),
Spec.INF_G1,
None,
id="zero_times_point",
),
pytest.param(
Spec.G1 + Scalar(1),
Spec.G1,
None,
id="one_times_generator",
),
pytest.param(
Spec.P1 + Scalar(1),
Spec.P1,
None,
id="one_times_point",
),
pytest.param(
Spec.P1 + Scalar(2**256 - 1),
PointG1(
0x3DA1F13DDEF2B8B5A46CD543CE56C0A90B8B3B0D6D43DEC95836A5FD2BACD6AA8F692601F870CF22E05DDA5E83F460B,
0x18D64F3C0E9785365CBDB375795454A8A4FA26F30B9C4F6E33CA078EB5C29B7AEA478B076C619BC1ED22B14C95569B2D,
),
None,
id="max_scalar_times_point",
),
# Subgroup related test cases.
pytest.param(
Spec.P1 + Scalar(Spec.Q - 1),
-Spec.P1, # negated P1
None,
id="q_minus_1_times_point",
),
pytest.param(
Spec.P1 + Scalar(Spec.Q),
Spec.INF_G1,
None,
id="q_times_point",
),
pytest.param(
Spec.G1 + Scalar(Spec.Q),
Spec.INF_G1,
None,
id="q_times_generator",
),
pytest.param(
Spec.P1 + Scalar(Spec.Q + 1),
Spec.P1,
None,
id="q_plus_1_times_point",
),
pytest.param(
Spec.P1 + Scalar(2 * Spec.Q),
Spec.INF_G1,
None,
id="2q_times_point",
),
pytest.param(
Spec.P1 + Scalar((2**256 // Spec.Q) * Spec.Q),
Spec.INF_G1,
None,
id="large_multiple_of_q_times_point",
),
],
)
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1MUL precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("fail-mul_G1_bls.json")
+ [
pytest.param(
PointG1(0, 1) + Scalar(0),
id="invalid_point_1",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y - 1) + Scalar(0),
id="invalid_point_2",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y + 1) + Scalar(0),
id="invalid_point_3",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.x) + Scalar(0),
id="invalid_point_4",
),
pytest.param(
PointG1(Spec.P, 0) + Scalar(0),
id="x_equal_to_p_times_0",
),
pytest.param(
PointG1(0, Spec.P) + Scalar(0),
id="y_equal_to_p_times_0",
),
pytest.param(
PointG1(Spec.P + 1, 0) + Scalar(0),
id="x_above_modulus_times_0",
),
pytest.param(
PointG1(Spec.P1.y, Spec.P1.x) + Scalar(0),
id="swapped_coordinates_times_0",
),
pytest.param(
# Point on wrong curve y^2 = x^3 + 5
PointG1(0x01, 0x07) + Scalar(0),
id="point_on_wrong_curve_times_0",
),
pytest.param(
b"\x80" + bytes(Spec.INF_G1)[1:] + Scalar(0),
id="invalid_encoding",
),
pytest.param(
(Spec.INF_G1 + Scalar(0))[:-1],
id="input_too_short",
),
pytest.param(
b"\x00" + (Spec.INF_G1 + Scalar(0)),
id="input_too_long",
),
pytest.param(
b"",
id="zero_length_input",
),
pytest.param(
b"\x00" * 96,
id="all_zero_96_bytes",
),
pytest.param(
b"\xff" + b"\x00" * 47 + b"\xff" + b"\x00" * 31,
id="bad_inf_flag_with_scalar",
),
pytest.param(
b"\xc0" + b"\x00" * 47 + b"\x00" * 32,
id="comp_instead_of_uncomp_with_scalar",
),
pytest.param(
Spec.G1 + Spec.G1,
id="g1_input_invalid_length",
),
pytest.param(
Spec.G2 + Spec.G2,
id="g2_input_invalid_length",
),
pytest.param(
Spec.G1,
id="g1_truncated_input",
),
pytest.param(
Spec.INF_G1 + Scalar(0).x.to_bytes(30, byteorder="big"),
id="inf_with_short_scalar",
),
pytest.param(
Spec.INF_G1 + Scalar(0).x.to_bytes(34, byteorder="big"),
id="inf_with_long_scalar",
),
pytest.param(
Spec.INF_G1 + (b"\x01" + b"\x00" * 32),
id="scalar_too_large_bytes",
),
pytest.param(
Spec.P1 + (b"\x01" + b"\x00" * 32),
id="scalar_too_large_bytes_with_point",
),
pytest.param(
Spec.G1 + (b"\x01\x23\x45"),
id="scalar_too_small_bytes",
),
pytest.param(
Scalar(1) + Scalar(1),
id="two_scalars",
),
pytest.param(
PointG1(Spec.P1.x | Spec.MAX_FP_BIT_SET, Spec.P1.y) + Scalar(1),
id="non_zero_byte_16_boundary_violation_x",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y | Spec.MAX_FP_BIT_SET) + Scalar(1),
id="non_zero_byte_16_boundary_violation_y",
),
# Not in the r-order subgroup test cases.
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Scalar(1),
id="not_in_subgroup_times_1",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Scalar(2),
id="not_in_subgroup_times_2",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Scalar(3),
id="not_in_subgroup_times_3",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Scalar(0),
id="not_in_subgroup_times_0",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP_TIMES_2 + Scalar(1),
id="doubled_not_in_subgroup_times_1",
),
pytest.param(
Scalar(Spec.Q) + Spec.P1_NOT_IN_SUBGROUP,
id="q_times_not_in_subgroup",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Scalar(Spec.Q - 1),
id="not_in_subgroup_times_q_minus_1",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP_TIMES_2 + Scalar(Spec.Q),
id="doubled_not_in_subgroup_times_q",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Scalar(Spec.Q + 1),
id="not_in_subgroup_times_q_plus_1",
),
# More not in the r-order subgroup test cases, but using random
# generated points.
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[0] + Scalar(1),
id="rand_not_in_subgroup_0_times_1",
),
pytest.param(
Scalar(2) + G1_POINTS_NOT_IN_SUBGROUP[1],
id="2_times_rand_not_in_subgroup_1",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[2] + Scalar(Spec.Q),
id="rand_not_in_subgroup_2_times_q",
),
pytest.param(
Scalar(0) + G1_POINTS_NOT_IN_SUBGROUP[3],
id="0_times_rand_not_in_subgroup_3",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[4] + Scalar(2**255 - 1),
id="rand_not_in_subgroup_4_times_large_scalar",
),
# Not on the curve cases using random generated points.
pytest.param(
G1_POINTS_NOT_ON_CURVE[0] + Scalar(1),
id="rand_not_on_curve_0_times_1",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[1] + Scalar(2),
id="rand_not_on_curve_1_times_2",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[2] + Scalar(Spec.Q),
id="rand_not_on_curve_2_times_q",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[3] + Scalar(0),
id="rand_not_on_curve_3_times_0",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[4] + Scalar(Spec.Q - 1),
id="rand_not_on_curve_4_times_q_minus_1",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Negative tests for the BLS12_G1MUL precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data,expected_output,precompile_gas_modifier",
[
pytest.param(
Spec.INF_G1 + Scalar(0),
Spec.INF_G1,
1,
id="extra_gas",
),
pytest.param(
Spec.INF_G1 + Scalar(0),
Spec.INVALID,
-1,
id="insufficient_gas",
),
],
)
def test_gas(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1MUL precompile gas requirements."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
Spec.INF_G1 + Scalar(0),
Spec.INF_G1,
id="zero_times_inf",
),
pytest.param(
Spec.INF_G1 + Scalar(2),
Spec.INF_G1,
id="two_times_inf",
),
],
)
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1MUL precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_precompiles_before_fork.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_precompiles_before_fork.py | """
Tests BLS12 precompiles before fork activation.
Tests the BLS12 precompiles behavior before fork activation from
[EIP-2537: Precompile for BLS12-381 curve operations]
(https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from .spec import FP, FP2, Scalar, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = pytest.mark.valid_at_transition_to("Prague")
@pytest.mark.parametrize(
"precompile_address,input_data",
[
pytest.param(
Spec.G1ADD,
Spec.INF_G1 + Spec.INF_G1,
id="G1ADD",
),
pytest.param(
Spec.G1MSM,
Spec.INF_G1 + Scalar(0),
id="G1MSM",
),
pytest.param(
Spec.G2ADD,
Spec.INF_G2 + Spec.INF_G2,
id="G2ADD",
),
pytest.param(
Spec.G2MSM,
Spec.INF_G2 + Scalar(0),
id="G2MSM",
),
pytest.param(
Spec.PAIRING,
Spec.INF_G1 + Spec.INF_G2,
id="PAIRING",
),
pytest.param(
Spec.MAP_FP_TO_G1,
FP(0),
id="MAP_FP_TO_G1",
),
pytest.param(
Spec.MAP_FP2_TO_G2,
FP2((0, 0)),
id="MAP_FP2_TO_G2",
),
],
)
@pytest.mark.parametrize("expected_output,call_succeeds", [pytest.param(b"", True, id="")])
def test_precompile_before_fork(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test all BLS12 precompiles before the Prague hard fork is active.
The call must succeed but the output must be empty.
"""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/spec.py | tests/prague/eip2537_bls_12_381_precompiles/spec.py | """Defines EIP-2537 specification constants and functions."""
from dataclasses import dataclass
from enum import Enum, auto
from typing import Callable, Sized, SupportsBytes, Tuple
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_2537 = ReferenceSpec("EIPS/eip-2537.md", "c561ec1426fe5ec470eade499a0bd4174f270583")
class BytesConcatenation(SupportsBytes, Sized):
"""A class that can be concatenated with bytes."""
def __len__(self) -> int:
"""Return length of the object when converted to bytes."""
return len(bytes(self))
def __add__(self, other: bytes | SupportsBytes) -> bytes:
"""Concatenates the object with another bytes object."""
return bytes(self) + bytes(other)
def __radd__(self, other: bytes | SupportsBytes) -> bytes:
"""Concatenates the object with another bytes object."""
return bytes(other) + bytes(self)
@dataclass(frozen=True)
class FP(BytesConcatenation):
"""Dataclass that defines a single element of Fp."""
x: int = 0
def __bytes__(self) -> bytes:
"""Convert field element to bytes."""
return self.x.to_bytes(64, byteorder="big")
@dataclass(frozen=True)
class PointG1(BytesConcatenation):
"""Dataclass that defines a single point in G1."""
x: int = 0
y: int = 0
def __bytes__(self) -> bytes:
"""Convert point to bytes."""
return self.x.to_bytes(64, byteorder="big") + self.y.to_bytes(64, byteorder="big")
def __neg__(self) -> "PointG1":
"""Negates the point."""
return PointG1(self.x, Spec.P - self.y)
@dataclass(frozen=True)
class FP2(BytesConcatenation):
"""Dataclass that defines a single element of Fp2."""
x: Tuple[int, int] = (0, 0)
def __bytes__(self) -> bytes:
"""Convert field element to bytes."""
return self.x[0].to_bytes(64, byteorder="big") + self.x[1].to_bytes(64, byteorder="big")
@dataclass(frozen=True)
class PointG2(BytesConcatenation):
"""Dataclass that defines a single point in G2."""
x: Tuple[int, int] = (0, 0)
y: Tuple[int, int] = (0, 0)
def __bytes__(self) -> bytes:
"""Convert point to bytes."""
return (
self.x[0].to_bytes(64, byteorder="big")
+ self.x[1].to_bytes(64, byteorder="big")
+ self.y[0].to_bytes(64, byteorder="big")
+ self.y[1].to_bytes(64, byteorder="big")
)
def __neg__(self) -> "PointG2":
"""Negates the point."""
return PointG2(self.x, (Spec.P - self.y[0], Spec.P - self.y[1]))
@dataclass(frozen=True)
class Scalar(BytesConcatenation):
"""Dataclass that defines a single scalar."""
x: int = 0
def __bytes__(self) -> bytes:
"""Convert scalar to bytes."""
return self.x.to_bytes(32, byteorder="big")
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-2537 specifications as defined at
https://eips.ethereum.org/EIPS/eip-2537.
"""
# Addresses
G1ADD = 0x0B
G1MSM = 0x0C
G2ADD = 0x0D
G2MSM = 0x0E
PAIRING = 0x0F
MAP_FP_TO_G1 = 0x10
MAP_FP2_TO_G2 = 0x11
# Gas constants
G1ADD_GAS = 375
G1MUL_GAS = 12_000
G2ADD_GAS = 600
G2MUL_GAS = 22_500
MAP_FP_TO_G1_GAS = 5_500
MAP_FP2_TO_G2_GAS = 23_800
PAIRING_BASE_GAS = 37_700
PAIRING_PER_PAIR_GAS = 32_600
# Other constants
B_COEFFICIENT = 0x04
X = -0xD201000000010000
Q = X**4 - X**2 + 1
P = (X - 1) ** 2 * Q // 3 + X
LEN_PER_PAIR = len(PointG1() + PointG2())
MSM_MULTIPLIER = 1_000
MAX_FP_BIT_SET = 1 << (48 * 8)
# fmt: off
G1MSM_DISCOUNT_TABLE = [
0,
1000, 949, 848, 797, 764, 750, 738, 728, 719, 712, 705, 698, 692, 687, 682, 677, 673, 669,
665, 661, 658, 654, 651, 648, 645, 642, 640, 637, 635, 632, 630, 627, 625, 623, 621, 619,
617, 615, 613, 611, 609, 608, 606, 604, 603, 601, 599, 598, 596, 595, 593, 592, 591, 589,
588, 586, 585, 584, 582, 581, 580, 579, 577, 576, 575, 574, 573, 572, 570, 569, 568, 567,
566, 565, 564, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 551, 550, 549,
548, 547, 547, 546, 545, 544, 543, 542, 541, 540, 540, 539, 538, 537, 536, 536, 535, 534,
533, 532, 532, 531, 530, 529, 528, 528, 527, 526, 525, 525, 524, 523, 522, 522, 521, 520,
520, 519
]
G2MSM_DISCOUNT_TABLE = [
0,
1000, 1000, 923, 884, 855, 832, 812, 796, 782, 770, 759, 749, 740, 732, 724, 717, 711, 704,
699, 693, 688, 683, 679, 674, 670, 666, 663, 659, 655, 652, 649, 646, 643, 640, 637, 634,
632, 629, 627, 624, 622, 620, 618, 615, 613, 611, 609, 607, 606, 604, 602, 600, 598, 597,
595, 593, 592, 590, 589, 587, 586, 584, 583, 582, 580, 579, 578, 576, 575, 574, 573, 571,
570, 569, 568, 567, 566, 565, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552,
552, 551, 550, 549, 548, 547, 546, 545, 545, 544, 543, 542, 541, 541, 540, 539, 538, 537,
537, 536, 535, 535, 534, 533, 532, 532, 531, 530, 530, 529, 528, 528, 527, 526, 526, 525,
524, 524
]
# fmt: on
# Test constants from
# https://github.com/ethereum/bls12-381-tests/tree/eip-2537
P1 = PointG1( # random point in G1
0x112B98340EEE2777CC3C14163DEA3EC97977AC3DC5C70DA32E6E87578F44912E902CCEF9EFE28D4A78B8999DFBCA9426,
0x186B28D92356C4DFEC4B5201AD099DBDEDE3781F8998DDF929B4CD7756192185CA7B8F4EF7088F813270AC3D48868A21,
)
G1 = PointG1(
0x17F1D3A73197D7942695638C4FA9AC0FC3688C4F9774B905A14E3A3F171BAC586C55E83FF97A1AEFFB3AF00ADB22C6BB,
0x8B3F481E3AAA0F1A09E30ED741D8AE4FCF5E095D5D00AF600DB18CB2C04B3EDD03CC744A2888AE40CAA232946C5E7E1,
)
# point at infinity in G1
INF_G1 = PointG1(0, 0)
# random point in G2
P2 = PointG2(
(
0x103121A2CEAAE586D240843A398967325F8EB5A93E8FEA99B62B9F88D8556C80DD726A4B30E84A36EEABAF3592937F27,
0x86B990F3DA2AEAC0A36143B7D7C824428215140DB1BB859338764CB58458F081D92664F9053B50B3FBD2E4723121B68,
),
(
0xF9E7BA9A86A8F7624AA2B42DCC8772E1AF4AE115685E60ABC2C9B90242167ACEF3D0BE4050BF935EED7C3B6FC7BA77E,
0xD22C3652D0DC6F0FC9316E14268477C2049EF772E852108D269D9C38DBA1D4802E8DAE479818184C08F9A569D878451,
),
)
G2 = PointG2(
(
0x24AA2B2F08F0A91260805272DC51051C6E47AD4FA403B02B4510B647AE3D1770BAC0326A805BBEFD48056C8C121BDB8,
0x13E02B6052719F607DACD3A088274F65596BD0D09920B61AB5DA61BBDC7F5049334CF11213945D57E5AC7D055D042B7E,
),
(
0xCE5D527727D6E118CC9CDC6DA2E351AADFD9BAA8CBDD3A76D429A695160D12C923AC9CC3BACA289E193548608B82801,
0x606C4A02EA734CC32ACD2B02BC28B99CB3E287E85A763AF267492AB572E99AB3F370D275CEC1DA1AAA9075FF05F79BE,
),
)
# point at infinity in G2
INF_G2 = PointG2((0, 0), (0, 0))
# Other test constants
# point not in subgroup in curve Fp
P1_NOT_IN_SUBGROUP = PointG1(0, 2)
P1_NOT_IN_SUBGROUP_TIMES_2 = PointG1(0, P - 2)
# point not in subgroup in curve Fp2
P2_NOT_IN_SUBGROUP = PointG2(
(1, 1),
(
0x17FAA6201231304F270B858DAD9462089F2A5B83388E4B10773ABC1EEF6D193B9FCE4E8EA2D9D28E3C3A315AA7DE14CA,
0xCC12449BE6AC4E7F367E7242250427C4FB4C39325D3164AD397C1837A90F0EA1A534757DF374DD6569345EB41ED76E,
),
)
P2_NOT_IN_SUBGROUP_TIMES_2 = PointG2(
(
0x919F97860ECC3E933E3477FCAC0E2E4FCC35A6E886E935C97511685232456263DEF6665F143CCCCB44C733333331553,
0x18B4376B50398178FA8D78ED2654B0FFD2A487BE4DBE6B69086E61B283F4E9D58389CCCB8EDC99995718A66666661555,
),
(
0x26898F699C4B07A405AB4183A10B47F923D1C0FDA1018682DD2CCC88968C1B90D44534D6B9270CF57F8DC6D4891678A,
0x3270414330EAD5EC92219A03A24DFA059DBCBE610868BE1851CC13DAC447F60B40D41113FD007D3307B19ADD4B0F061,
),
)
# Pairing precompile results
PAIRING_TRUE = int.to_bytes(1, length=32, byteorder="big")
PAIRING_FALSE = int.to_bytes(0, length=32, byteorder="big")
# Returned on precompile failure
INVALID = b""
class BLS12Group(Enum):
"""Helper enum to specify the BLS12 group in discount table helpers."""
G1 = auto()
G2 = auto()
def msm_discount(group: BLS12Group, k: int) -> int:
"""Return the discount for the G1MSM and G2MSM precompiles."""
assert k >= 1, "k must be greater than or equal to 1"
match group:
case BLS12Group.G1:
return Spec.G1MSM_DISCOUNT_TABLE[min(k, 128)]
case BLS12Group.G2:
return Spec.G2MSM_DISCOUNT_TABLE[min(k, 128)]
case _:
raise ValueError(f"Unsupported group: {group}")
def msm_gas_func_gen(
group: BLS12Group, len_per_pair: int, multiplication_cost: int
) -> Callable[[int], int]:
"""
Generate a function that calculates the gas cost for the G1MSM and G2MSM
precompiles.
"""
def msm_gas(input_length: int) -> int:
"""Calculate gas cost for the G1MSM and G2MSM precompiles."""
k = input_length // len_per_pair
if k == 0:
return 0
gas_cost = k * multiplication_cost * msm_discount(group, k) // Spec.MSM_MULTIPLIER
return gas_cost
return msm_gas
def pairing_gas(input_length: int) -> int:
"""Calculate gas cost for the PAIRING precompile."""
k = input_length // Spec.LEN_PER_PAIR
return (Spec.PAIRING_PER_PAIR_GAS * k) + Spec.PAIRING_BASE_GAS
GAS_CALCULATION_FUNCTION_MAP = {
Spec.G1ADD: lambda _: Spec.G1ADD_GAS,
Spec.G1MSM: msm_gas_func_gen(BLS12Group.G1, len(PointG1() + Scalar()), Spec.G1MUL_GAS),
Spec.G2ADD: lambda _: Spec.G2ADD_GAS,
Spec.G2MSM: msm_gas_func_gen(BLS12Group.G2, len(PointG2() + Scalar()), Spec.G2MUL_GAS),
Spec.PAIRING: pairing_gas,
Spec.MAP_FP_TO_G1: lambda _: Spec.MAP_FP_TO_G1_GAS,
Spec.MAP_FP2_TO_G2: lambda _: Spec.MAP_FP2_TO_G2_GAS,
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp2_to_g2.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp2_to_g2.py | """
Test the BLS12_MAP_FP2_TO_G2 precompile.
Test the BLS12_MAP_FP2_TO_G2 precompile introduced in
[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .conftest import G2_FIELD_POINTS_MAP_TO_IDENTITY
from .helpers import vectors_from_file
from .spec import FP2, PointG2, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.MAP_FP2_TO_G2], ids=[""]),
]
G2_POINT_ZERO_FP = PointG2(
(
0x18320896EC9EEF9D5E619848DC29CE266F413D02DD31D9B9D44EC0C79CD61F18B075DDBA6D7BD20B7FF27A4B324BFCE,
0xA67D12118B5A35BB02D2E86B3EBFA7E23410DB93DE39FB06D7025FA95E96FFA428A7A27C3AE4DD4B40BD251AC658892,
),
(
0x260E03644D1A2C321256B3246BAD2B895CAD13890CBE6F85DF55106A0D334604FB143C7A042D878006271865BC35941,
0x4C69777A43F0BDA07679D5805E63F18CF4E0E7C6112AC7F70266D199B4F76AE27C6269A3CEEBDAE30806E9A76AADF5C,
),
)
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("map_fp2_to_G2_bls.json")
+ [
pytest.param(
FP2((0, 0)),
G2_POINT_ZERO_FP,
None,
id="fp_0",
),
pytest.param(
FP2((Spec.P - 1, Spec.P - 1)),
PointG2(
(
0x9BF1B857D8C15F317F649ACCFA7023EF21CFC03059936B83B487DB476FF9D2FE64C6147140A5F0A436B875F51FFDF07,
0xBB10E09BDF236CB2951BD7BCC044E1B9A6BB5FD4B2019DCC20FFDE851D52D4F0D1A32382AF9D7DA2C5BA27E0F1C69E6,
),
(
0xDD416A927AB1C15490AB753C973FD377387B12EFCBE6BED2BF768B9DC95A0CA04D1A8F0F30DBC078A2350A1F823CFD3,
0x171565CE4FCD047B35EA6BCEE4EF6FDBFEC8CC73B7ACDB3A1EC97A776E13ACDFEFFC21ED6648E3F0EEC53DDB6C20FB61,
),
),
None,
id="fp_p_minus_1",
),
pytest.param(
FP2(
(
3510328712861478240121438855244276237335901234329585006107499559909114695366216070652508985150831181717984778988906,
2924545590598115509050131525615277284817672420174395176262156166974132393611647670391999011900253695923948997972401,
)
),
Spec.INF_G2,
None,
id="fp_map_to_inf",
),
],
)
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_MAP_FP2_TO_G2 precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize("expected_output", [Spec.INF_G2], ids=[""])
@pytest.mark.parametrize(
"input_data,vector_gas_value",
[
pytest.param(t, None, id=f"isogeny_kernel_{i}")
for i, t in enumerate(G2_FIELD_POINTS_MAP_TO_IDENTITY)
],
)
def test_isogeny_kernel_values(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_MAP_FP2_TO_G2 precompile with isogeny kernel values. Note
this test only exists to align with the G1 test.
`G2_FIELD_POINTS_MAP_TO_IDENTITY` is empty so there are no cases.
The isogeny kernel is simply the set of special field values, that after
the two step mapping (first SWU onto an auxiliary curve, then a 3-degree
isogeny back to G2), collapse exactly to the identity point.
For the G2 case the only kernel element is the point at infinity, and SWU
never produces the identity point from a finite input t. Hence
`G2_FIELD_POINTS_MAP_TO_IDENTITY` is empty. Please proceed to the generator
in `helpers.py` for more details.
"""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("fail-map_fp2_to_G2_bls.json")
+ [
pytest.param(b"\x80" + bytes(FP2((0, 0)))[1:], id="invalid_encoding"),
pytest.param(bytes(FP2((0, 0)))[1:], id="input_too_short"),
pytest.param(b"\x00" + FP2((0, 0)), id="input_too_long"),
pytest.param(b"", id="zero_length_input"),
pytest.param(FP2((Spec.P, 0)), id="fq_eq_q"),
pytest.param(FP2((0, Spec.P)), id="fq_eq_q_2"),
pytest.param(FP2((2**512 - 1, 0)), id="fq_eq_2_512_minus_1"),
pytest.param(FP2((0, 2**512 - 1)), id="fq_eq_2_512_minus_1_2"),
pytest.param(Spec.G2, id="g2_input"),
pytest.param(FP2((Spec.P + 1, 0)), id="fp2_above_modulus_c0"),
pytest.param(FP2((0, Spec.P + 1)), id="fp2_above_modulus_c1"),
pytest.param(FP2((2**384, 0)), id="fp2_large_power_of_2_c0"),
pytest.param(FP2((0, 2**384)), id="fp2_large_power_of_2_c1"),
pytest.param(bytes(FP2((0, 0))) + bytes([0x00]), id="fp2_with_extra_byte"),
pytest.param(bytes(FP2((0, 0)))[:95], id="fp2_one_byte_short"),
pytest.param(bytes([0xFF]) + bytes(FP2((0, 0)))[1:], id="fp2_invalid_first_byte"),
pytest.param(Spec.INF_G2, id="g2_inf_input"),
pytest.param(
FP2(((Spec.P - 1) | Spec.MAX_FP_BIT_SET, Spec.P - 1)),
id="non_zero_byte_16_boundary_violation_c0",
),
pytest.param(
FP2((Spec.P - 1, (Spec.P - 1) | Spec.MAX_FP_BIT_SET)),
id="non_zero_byte_16_boundary_violation_c1",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Negative tests for the BLS12_MAP_FP_TO_G2 precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data,expected_output,precompile_gas_modifier",
[
pytest.param(
FP2((0, 0)),
G2_POINT_ZERO_FP,
1,
id="extra_gas",
),
pytest.param(
FP2((0, 0)),
Spec.INVALID,
-1,
id="insufficient_gas",
),
],
)
def test_gas(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_MAP_FP_TO_G2 precompile gas requirements."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
FP2((0, 0)),
G2_POINT_ZERO_FP,
id="fp_0",
),
],
)
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_MAP_FP_TO_G2 precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp_to_g1.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_map_fp_to_g1.py | """
Tests BLS12_MAP_FP_TO_G1 precompile.
Tests the BLS12_MAP_FP_TO_G1 precompile implementation from
[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .conftest import G1_FIELD_POINTS_MAP_TO_IDENTITY
from .helpers import vectors_from_file
from .spec import FP, PointG1, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.MAP_FP_TO_G1], ids=[""]),
]
G1_POINT_ZERO_FP = PointG1(
0x11A9A0372B8F332D5C30DE9AD14E50372A73FA4C45D5F2FA5097F2D6FB93BCAC592F2E1711AC43DB0519870C7D0EA415,
0x92C0F994164A0719F51C24BA3788DE240FF926B55F58C445116E8BC6A47CD63392FD4E8E22BDF9FEAA96EE773222133,
)
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("map_fp_to_G1_bls.json")
+ [
pytest.param(
FP(0),
G1_POINT_ZERO_FP,
None,
id="fp_0",
),
pytest.param(
FP(Spec.P - 1),
PointG1(
0x1073311196F8EF19477219CCEE3A48035FF432295AA9419EED45D186027D88B90832E14C4F0E2AA4D15F54D1C3ED0F93,
0x16B3A3B2E3DDDF6A11459DDAF657FDE21C4F10282A56029D9B55AB3CE1F41E1CF39AD27E0EA35823C7D3250E81FF3D66,
),
None,
id="fp_p_minus_1",
),
pytest.param(
FP(
799950832265136997107648781861994410980648980263584507133499364313075404851459407870655748616451882783569609925573
),
Spec.INF_G1,
None,
id="fp_map_to_inf",
),
],
)
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_MAP_FP_TO_G1 precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize("expected_output", [Spec.INF_G1], ids=[""])
@pytest.mark.parametrize(
"input_data,vector_gas_value",
[
pytest.param(t, None, id=f"isogeny_kernel_{i}")
for i, t in enumerate(G1_FIELD_POINTS_MAP_TO_IDENTITY)
],
)
def test_isogeny_kernel_values(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_MAP_FP_TO_G1 precompile with isogeny kernel inputs.
The isogeny kernel is simply the set of special field values, that after
the two step mapping (first SWU onto an auxiliary curve, then an 11-degree
isogeny back to G1), collapse exactly to the identity point.
Please proceed to the generator in `helpers.py` to see how the isogeny
kernel values are generated.
"""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("fail-map_fp_to_G1_bls.json")
+ [
pytest.param(b"\x80" + bytes(FP(0))[1:], id="invalid_encoding"),
pytest.param(bytes(FP(0))[1:], id="input_too_short"),
pytest.param(b"\x00" + FP(0), id="input_too_long"),
pytest.param(b"", id="zero_length_input"),
pytest.param(FP(Spec.P), id="fq_eq_q"),
pytest.param(FP(2**512 - 1), id="fq_eq_2_512_minus_1"),
pytest.param(Spec.G1, id="g1_input"),
pytest.param(FP(Spec.P + 1), id="fp_above_modulus"),
pytest.param(FP(2**384), id="fp_large_power_of_2"),
pytest.param(bytes(FP(0)) + bytes([0x00]), id="fp_with_extra_byte"),
pytest.param(bytes(FP(0))[:47], id="fp_one_byte_short"),
pytest.param(bytes([0xFF]) + bytes(FP(0))[1:], id="fp_invalid_first_byte"),
pytest.param(Spec.INF_G1, id="g1_inf_input"),
pytest.param(
FP((Spec.P - 1) | Spec.MAX_FP_BIT_SET), id="non_zero_byte_16_boundary_violation"
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Negative tests for the BLS12_MAP_FP_TO_G1 precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data,expected_output,precompile_gas_modifier",
[
pytest.param(
FP(0),
G1_POINT_ZERO_FP,
1,
id="extra_gas",
),
pytest.param(
FP(0),
Spec.INVALID,
-1,
id="insufficient_gas",
),
],
)
def test_gas(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_MAP_FP_TO_G1 precompile gas requirements."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
FP(0),
G1_POINT_ZERO_FP,
id="fp_0",
),
],
)
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_MAP_FP_TO_G1 precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_variable_length_input_contracts.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_variable_length_input_contracts.py | """
Tests minimum gas and input length for BLS12 precompiles.
Tests minimum gas and input length requirements for BLS12_G1MSM,
BLS12_G2MSM, and BLS12_PAIRING precompiles from [EIP-2537: Precompile
for BLS12-381 curve operations]
(https://eips.ethereum.org/EIPS/eip-2537).
"""
from typing import Callable, List, SupportsBytes
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import Alloc, Bytecode, Environment, StateTestFiller, Storage, Transaction
from ethereum_test_tools import Opcodes as Op
from ethereum_test_tools.utility.pytest import ParameterSet
from .spec import GAS_CALCULATION_FUNCTION_MAP, PointG1, PointG2, Scalar, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = pytest.mark.valid_from("Prague")
G1_MSM_K_INPUT_LENGTH = len(PointG1() + Scalar())
G2_MSM_K_INPUT_LENGTH = len(PointG2() + Scalar())
G1_GAS = GAS_CALCULATION_FUNCTION_MAP[Spec.G1MSM]
G2_GAS = GAS_CALCULATION_FUNCTION_MAP[Spec.G2MSM]
PAIRING_GAS = GAS_CALCULATION_FUNCTION_MAP[Spec.PAIRING]
PAIRINGS_TO_TEST = 20
@pytest.fixture
def input_data() -> bytes:
"""
Calldata of the transaction is empty because all input in these tests is
zero.
"""
return b""
@pytest.fixture
def gas_modifier() -> int:
"""Gas modifier to apply to each element of the precompile_gas_list."""
return 0
@pytest.fixture
def input_length_modifier() -> int:
"""
Input length modifier to apply to each element of the precompile_gas_list.
"""
return 0
@pytest.fixture
def env(fork: Fork, tx: Transaction) -> Environment:
"""Environment used for all tests."""
env = Environment()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
if tx_gas_limit_cap is not None:
assert tx.gas_limit <= tx_gas_limit_cap, (
f"tx exceeds gas limit cap: {int(tx.gas_limit)} > {tx_gas_limit_cap}"
)
if tx.gas_limit > env.gas_limit:
env = Environment(gas_limit=tx.gas_limit)
return env
@pytest.fixture
def call_contract_code(
precompile_address: int,
precompile_gas_list: List[int],
precompile_data_length_list: List[int],
gas_modifier: int,
input_length_modifier: int,
expected_output: bytes | SupportsBytes,
call_opcode: Op,
call_contract_post_storage: Storage,
) -> Bytecode:
"""
Code of the test contract to validate minimum expected gas in precompiles,
as well as expected input lengths on all variable-length input precompiles.
Code differs from the one used in all other tests in this file, because it
accepts a list of precompile gas values and a list of precompile data
lengths, and for each pair of values, it calls the precompile with the
given gas and data length, data being passed to the precompile is all
zeros.
Args:
precompile_address: Address of the precompile to call.
precompile_gas_list: List of gas values to be used to call the
precompile, one for each call.
precompile_data_length_list: List of data lengths to be used to call
the precompile, one for each call.
gas_modifier: Integer to add to the gas passed to the precompile.
input_length_modifier: Integer to add to the length of the input
passed to the precompile.
expected_output: Expected output of the contract, it is only used to
determine if the call is expected to succeed or fail.
call_opcode: Type of call used to call the precompile (Op.CALL,
Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL).
call_contract_post_storage: Storage of the test contract after the
transaction is executed.
"""
expected_output = bytes(expected_output)
# Depending on the expected output, we can deduce if the call is expected
# to succeed or fail.
call_succeeds = len(expected_output) > 0
assert len(precompile_gas_list) == len(precompile_data_length_list)
assert call_opcode in [Op.CALL, Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL]
value = [0] if call_opcode in [Op.CALL, Op.CALLCODE] else []
code = Bytecode()
for precompile_gas, precompile_args_length in zip(
precompile_gas_list, precompile_data_length_list, strict=False
):
# For each given precompile gas value, and given arguments length, call
# the precompile with the given gas and call data (all zeros) and
# compare the result.
code += Op.SSTORE(
call_contract_post_storage.store_next(1 if call_succeeds else 0),
Op.CALL(
precompile_gas + gas_modifier,
precompile_address,
*value, # Optional, only used for CALL and CALLCODE.
0,
precompile_args_length
+ input_length_modifier, # Memory is empty, so we pass zeros.
0,
0,
),
)
return code
def tx_gas_limit_calculator(
fork: Fork, precompile_gas_list: List[int], max_precompile_input_length: int
) -> int:
"""
Calculate the gas used to execute the transaction with the given precompile
gas list.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 22_500 * len(precompile_gas_list)
return (
extra_gas
+ intrinsic_gas_cost_calculator()
+ memory_expansion_gas_calculator(new_bytes=max_precompile_input_length)
+ sum(precompile_gas_list)
)
@pytest.fixture
def tx_gas_limit(
fork: Fork,
input_data: bytes,
precompile_gas_list: List[int],
precompile_data_length_list: List[int],
) -> int:
"""
Transaction gas limit used for the test (Can be overridden in the test).
"""
assert len(input_data) == 0, "Expected empty data in the transaction."
return tx_gas_limit_calculator(fork, precompile_gas_list, max(precompile_data_length_list))
def get_split_discount_table_by_fork(
gas_fn: Callable, discount_table_length: int, element_length: int
) -> Callable[[Fork], List[ParameterSet]]:
"""
Get the number of test cases needed to cover the given discount table
adjusted for the fork transaction gas limit cap.
The function will return the full discount table as a single test case if
the fork has no transaction gas limit cap, otherwise it will iterate to
determine the splits required to fit the full discount table across
multiple test cases.
"""
def parametrize_by_fork(fork: Fork) -> List[ParameterSet]:
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
if tx_gas_limit_cap is None:
return [
pytest.param(
[gas_fn(i * element_length) for i in range(1, discount_table_length + 1)],
[i * element_length for i in range(1, discount_table_length + 1)],
id="full_discount_table",
)
]
else:
def gas_list_from_range(min_index: int, max_index: int) -> List[int]:
return [gas_fn(i * element_length) for i in range(min_index, max_index)]
def get_range_cost(min_index: int, max_index: int) -> int:
return tx_gas_limit_calculator(
fork,
gas_list_from_range(min_index, max_index),
max_index * element_length,
)
g1_msm_discount_table_ranges = []
current_min = 1
for current_max in range(2, discount_table_length + 1):
range_cost = get_range_cost(current_min, current_max + 1)
if range_cost > tx_gas_limit_cap:
new_range = (current_min, current_max)
g1_msm_discount_table_ranges.append(new_range)
current_min = current_max
elif current_max == discount_table_length:
new_range = (current_min, current_max + 1)
g1_msm_discount_table_ranges.append(new_range)
g1_msm_discount_table_splits = [
[
[gas_fn(i * element_length) for i in range(r[0], r[1])],
[i * element_length for i in range(r[0], r[1])],
]
for r in g1_msm_discount_table_ranges
]
assert (
sum(len(split[0]) for split in g1_msm_discount_table_splits)
== discount_table_length
)
assert (
sum(len(split[1]) for split in g1_msm_discount_table_splits)
== discount_table_length
)
return [
pytest.param(
*split,
id=f"discount_table_{idx + 1}_of_{len(g1_msm_discount_table_splits)}",
)
for idx, split in enumerate(g1_msm_discount_table_splits)
]
return parametrize_by_fork
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(
G1_GAS, len(Spec.G1MSM_DISCOUNT_TABLE), G1_MSM_K_INPUT_LENGTH
),
)
@pytest.mark.parametrize("gas_modifier", [pytest.param(0, id="exact_gas")])
@pytest.mark.parametrize("expected_output", [PointG1()], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G1MSM])
@pytest.mark.slow()
def test_valid_gas_g1msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_G1MSM discount gas table in full, by expecting the call to
succeed for all possible input lengths because the appropriate amount of
gas is provided.
If any of the calls fail, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"precompile_gas_list,precompile_data_length_list",
[
pytest.param(
[0],
[G1_MSM_K_INPUT_LENGTH],
id="zero_gas_passed",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G1MSM])
def test_invalid_zero_gas_g1msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1MSM precompile calling it with zero gas."""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(
G1_GAS, len(Spec.G1MSM_DISCOUNT_TABLE), G1_MSM_K_INPUT_LENGTH
),
)
@pytest.mark.parametrize("gas_modifier", [pytest.param(-1, id="insufficient_gas")])
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G1MSM])
def test_invalid_gas_g1msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_G1MSM discount gas table in full, by expecting the call to
fail for all possible input lengths because the appropriate amount of gas
is not provided.
If any of the calls succeeds, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"precompile_gas_list,precompile_data_length_list",
[
pytest.param(
[G1_GAS(G1_MSM_K_INPUT_LENGTH)],
[0],
id="zero_length_input",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G1MSM])
def test_invalid_zero_length_g1msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1MSM precompile by passing an input with zero length."""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(
G1_GAS, len(Spec.G1MSM_DISCOUNT_TABLE), G1_MSM_K_INPUT_LENGTH
),
)
@pytest.mark.parametrize(
"input_length_modifier",
[
pytest.param(-1, id="input_one_byte_too_short"),
pytest.param(1, id="input_one_byte_too_long"),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G1MSM])
def test_invalid_length_g1msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_G1MSM discount gas table in full, by expecting the call to
fail for all possible input lengths provided because they are too long or
short, or zero length.
If any of the calls succeeds, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(
G2_GAS, len(Spec.G2MSM_DISCOUNT_TABLE), G2_MSM_K_INPUT_LENGTH
),
)
@pytest.mark.parametrize("gas_modifier", [pytest.param(0, id="exact_gas")])
@pytest.mark.parametrize("expected_output", [PointG2()], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G2MSM])
@pytest.mark.slow()
def test_valid_gas_g2msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_G2MSM discount gas table in full, by expecting the call to
succeed for all possible input lengths because the appropriate amount of
gas is provided.
If any of the calls fail, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"precompile_gas_list,precompile_data_length_list",
[
pytest.param(
[0],
[G2_MSM_K_INPUT_LENGTH],
id="zero_gas_passed",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G2MSM])
def test_invalid_zero_gas_g2msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2MSM precompile calling it with zero gas."""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(
G2_GAS, len(Spec.G2MSM_DISCOUNT_TABLE), G2_MSM_K_INPUT_LENGTH
),
)
@pytest.mark.parametrize("gas_modifier", [pytest.param(-1, id="insufficient_gas")])
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G2MSM])
def test_invalid_gas_g2msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_G2MSM discount gas table in full, by expecting the call to
fail for all possible input lengths because the appropriate amount of gas
is not provided.
If any of the calls succeeds, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"precompile_gas_list,precompile_data_length_list",
[
pytest.param(
[G2_GAS(G2_MSM_K_INPUT_LENGTH)],
[0],
id="zero_length_input",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G2MSM])
def test_invalid_zero_length_g2msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2MSM precompile by passing an input with zero length."""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(
G2_GAS, len(Spec.G2MSM_DISCOUNT_TABLE), G2_MSM_K_INPUT_LENGTH
),
)
@pytest.mark.parametrize(
"input_length_modifier",
[
pytest.param(-1, id="input_one_byte_too_short"),
pytest.param(1, id="input_one_byte_too_long"),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.G2MSM])
def test_invalid_length_g2msm(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_G2MSM discount gas table in full, by expecting the call to
fail for all possible input lengths provided because they are too long or
short, or zero length.
If any of the calls succeeds, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(PAIRING_GAS, PAIRINGS_TO_TEST, Spec.LEN_PER_PAIR),
)
@pytest.mark.parametrize("gas_modifier", [pytest.param(0, id="exact_gas")])
@pytest.mark.parametrize("expected_output", [Spec.PAIRING_TRUE], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.PAIRING])
@pytest.mark.slow()
def test_valid_gas_pairing(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_PAIRING precompile, by expecting the call to succeed for all
possible input lengths (up to k == PAIRINGS_TO_TEST).
If any of the calls fails, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"precompile_gas_list,precompile_data_length_list",
[
pytest.param(
[0],
[Spec.LEN_PER_PAIR],
id="zero_gas_passed",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.PAIRING])
def test_invalid_zero_gas_pairing(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_PAIRING precompile calling it with zero gas."""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(PAIRING_GAS, PAIRINGS_TO_TEST, Spec.LEN_PER_PAIR),
)
@pytest.mark.parametrize("gas_modifier", [pytest.param(-1, id="insufficient_gas")])
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.PAIRING])
def test_invalid_gas_pairing(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_PAIRING precompile, by expecting the call to fail for all
possible input lengths (up to k == PAIRINGS_TO_TEST) because the
appropriate amount of gas is not provided.
If any of the calls succeeds, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"precompile_gas_list,precompile_data_length_list",
[
pytest.param(
[PAIRING_GAS(Spec.LEN_PER_PAIR)],
[0],
id="zero_length",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.PAIRING])
def test_invalid_zero_length_pairing(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_PAIRING precompile by passing an input with zero length.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize_by_fork(
"precompile_gas_list,precompile_data_length_list",
get_split_discount_table_by_fork(PAIRING_GAS, PAIRINGS_TO_TEST, Spec.LEN_PER_PAIR),
)
@pytest.mark.parametrize(
"input_length_modifier",
[
pytest.param(-1, id="input_one_byte_too_short"),
pytest.param(1, id="input_one_byte_too_long"),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.PAIRING])
def test_invalid_length_pairing(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test the BLS12_PAIRING precompile, by expecting the call to fail for all
possible input lengths (up to k == PAIRINGS_TO_TEST) because the incorrect
input length was used.
If any of the calls succeeds, the test will fail.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_pairing.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_pairing.py | """
Tests BLS12_PAIRING precompile.
Tests the BLS12_PAIRING precompile implementation from
[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
EOA,
Address,
Alloc,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
from .conftest import (
G1_POINTS_NOT_IN_SUBGROUP,
G1_POINTS_NOT_ON_CURVE,
G2_POINTS_NOT_IN_SUBGROUP,
G2_POINTS_NOT_ON_CURVE,
)
from .helpers import vectors_from_file
from .spec import PointG1, PointG2, Spec, pairing_gas, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.PAIRING], ids=[""]),
]
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("pairing_check_bls.json")
+ [
pytest.param(
Spec.G1 + Spec.INF_G2,
Spec.PAIRING_TRUE,
None,
id="generator_with_inf_g2",
),
pytest.param(
Spec.INF_G1 + Spec.G2,
Spec.PAIRING_TRUE,
None,
id="inf_g1_with_generator",
),
pytest.param( # e(inf, inf) == 1
Spec.INF_G1 + Spec.INF_G2,
Spec.PAIRING_TRUE,
None,
id="inf_pair",
),
pytest.param( # e(P, Q) . e(P, −Q) == 1 (inverse pair, factors cancel)
Spec.G1 + Spec.G2 + Spec.G1 + (-Spec.G2),
Spec.PAIRING_TRUE,
None,
id="g1_g2_and_inverse",
),
pytest.param( # e(P,Q) · e(P,−Q) · e(−P,Q) · e(−P,−Q) == 1 (full sign cancellation)
Spec.G1
+ Spec.G2
+ Spec.G1
+ (-Spec.G2)
+ (-Spec.G1)
+ Spec.G2
+ (-Spec.G1)
+ (-Spec.G2),
Spec.PAIRING_TRUE,
None,
id="full_sign_cancellation",
),
pytest.param( # 127 × e(inf, inf) . e(P, Q) + e(P, −Q) == 1
(Spec.INF_G1 + Spec.INF_G2) * 127 + Spec.G1 + Spec.G2 + Spec.G1 + (-Spec.G2),
Spec.PAIRING_TRUE,
None,
id="large_input_with_cancellation",
),
pytest.param( # e(P, Q) . e(−P, −Q) = e(P, Q)^2
Spec.G1 + Spec.G2 + (-Spec.G1) + (-Spec.G2),
Spec.PAIRING_FALSE,
None,
id="negated_both_pairs",
),
pytest.param( # e(inf, inf) . e(P, −Q)
(Spec.INF_G1 + Spec.INF_G2) + (Spec.G1 + (-Spec.G2)),
Spec.PAIRING_FALSE,
None,
id="multi_inf_g1_neg_g2",
),
pytest.param(
(Spec.G1 + (-Spec.G2)) + (Spec.INF_G1 + Spec.INF_G2),
Spec.PAIRING_FALSE,
None,
id="g1_neg_g2_multi_inf",
),
pytest.param(
Spec.G1 + Spec.G2,
Spec.PAIRING_FALSE,
None,
id="single_generator_pair",
),
pytest.param(
(Spec.INF_G1 + Spec.INF_G2) + (Spec.G1 + Spec.G2),
Spec.PAIRING_FALSE,
None,
id="inf_plus_generator_pair",
),
pytest.param( # e(P, Q) . e(P, −Q) . e(−P, Q)
Spec.G1 + Spec.G2 + Spec.G1 + (-Spec.G2) + (-Spec.G1) + Spec.G2,
Spec.PAIRING_FALSE,
None,
id="partial_sign_cancellation",
),
],
)
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_PAIRING precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.slow
@pytest.mark.parametrize("precompile_gas", [None], ids=[""])
@pytest.mark.parametrize("expected_output", [Spec.PAIRING_TRUE], ids=[""])
def test_valid_multi_inf(
state_test: StateTestFiller,
pre: Alloc,
call_contract_address: Address,
sender: EOA,
fork: Fork,
post: dict,
) -> None:
"""
Test maximum input given the current environment gas limit for the
BLS12_PAIRING precompile.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 100_000
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
max_gas_limit = Environment().gas_limit if tx_gas_limit_cap is None else tx_gas_limit_cap
inf_data = Spec.INF_G1 + Spec.INF_G2
input_data = inf_data
while True:
precompile_gas = pairing_gas(len(input_data + inf_data))
new_tx_gas_limit = (
extra_gas
+ intrinsic_gas_cost_calculator(calldata=input_data + inf_data)
+ memory_expansion_gas_calculator(new_bytes=len(input_data + inf_data))
+ precompile_gas
)
if new_tx_gas_limit > max_gas_limit:
break
tx_gas_limit = new_tx_gas_limit
input_data += inf_data
tx = Transaction(
gas_limit=tx_gas_limit,
data=input_data,
to=call_contract_address,
sender=sender,
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("fail-pairing_check_bls.json")
+ [
# Coordinate equal to p (modulus) in G1
pytest.param(
PointG1(Spec.P, 0) + Spec.INF_G2,
id="g1_p_g2_inf_1",
),
pytest.param(
PointG1(0, Spec.P) + Spec.INF_G2,
id="g1_p_g2_inf_2",
),
# Coordinate equal to p (modulus) in G2
pytest.param(
Spec.INF_G1 + PointG2((Spec.P, 0), (0, 0)),
id="g1_inf_g2_p_1",
),
pytest.param(
Spec.INF_G1 + PointG2((0, Spec.P), (0, 0)),
id="g1_inf_g2_p_2",
),
pytest.param(
Spec.INF_G1 + PointG2((0, 0), (Spec.P, 0)),
id="g1_inf_g2_p_3",
),
pytest.param(
Spec.INF_G1 + PointG2((0, 0), (0, Spec.P)),
id="g1_inf_g2_p_4",
),
pytest.param(
b"\x80" + bytes(Spec.INF_G1)[1:] + Spec.INF_G2,
id="invalid_encoding_g1",
),
pytest.param(
Spec.INF_G1 + b"\x80" + bytes(Spec.INF_G2)[1:],
id="invalid_encoding_g2",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Spec.INF_G2,
id="p1_not_in_subgroup",
),
pytest.param(
Spec.INF_G1 + Spec.P2_NOT_IN_SUBGROUP,
id="p2_not_in_subgroup",
),
# Points not in the subgroup or not on the curve randomly generated.
pytest.param(
G1_POINTS_NOT_ON_CURVE[0] + Spec.INF_G2,
id="rand_not_on_curve_g1_0_plus_inf",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[1] + Spec.G2,
id="rand_not_on_curve_g1_1_plus_g2",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[0] + Spec.G2,
id="rand_not_in_subgroup_g1_0_plus_g2",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[1] + Spec.INF_G2,
id="rand_not_in_subgroup_g1_1_plus_inf",
),
pytest.param(
Spec.INF_G1 + G2_POINTS_NOT_ON_CURVE[0],
id="inf_plus_rand_not_on_curve_g2_0",
),
pytest.param(
Spec.G1 + G2_POINTS_NOT_ON_CURVE[1],
id="g1_plus_rand_not_on_curve_g2_1",
),
pytest.param(
Spec.INF_G1 + G2_POINTS_NOT_IN_SUBGROUP[0],
id="inf_plus_rand_not_in_subgroup_g2_0",
),
pytest.param(
Spec.G1 + G2_POINTS_NOT_IN_SUBGROUP[1],
id="g1_plus_rand_not_in_subgroup_g2_1",
),
# Coordinates above modulus p cases.
pytest.param(
PointG1(Spec.P1.x + Spec.P, Spec.P1.y) + Spec.INF_G2,
id="g1_x_above_p_with_inf_g2",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y + Spec.P) + Spec.INF_G2,
id="g1_y_above_p_with_inf_g2",
),
pytest.param(
Spec.INF_G1 + PointG2((Spec.P2.x[0] + Spec.P, Spec.P2.x[1]), Spec.P2.y),
id="inf_g1_with_g2_x_c0_above_p",
),
pytest.param(
Spec.INF_G1 + PointG2((Spec.P2.x[0], Spec.P2.x[1] + Spec.P), Spec.P2.y),
id="inf_g1_with_g2_x_c1_above_p",
),
pytest.param(
Spec.INF_G1 + PointG2(Spec.P2.x, (Spec.P2.y[0] + Spec.P, Spec.P2.y[1])),
id="inf_g1_with_g2_y_c0_above_p",
),
pytest.param(
Spec.INF_G1 + PointG2(Spec.P2.x, (Spec.P2.y[0], Spec.P2.y[1] + Spec.P)),
id="inf_g1_with_g2_y_c1_above_p",
),
# Non-zero byte 16 boundary violation test cases.
pytest.param(
PointG1(Spec.G1.x | Spec.MAX_FP_BIT_SET, Spec.G1.y) + Spec.INF_G2,
id="non_zero_byte_16_boundary_violation_g1_x",
),
pytest.param(
PointG1(Spec.G1.x, Spec.G1.y | Spec.MAX_FP_BIT_SET) + Spec.INF_G2,
id="non_zero_byte_16_boundary_violation_g1_y",
),
pytest.param(
Spec.INF_G1 + PointG2((Spec.G2.x[0] | Spec.MAX_FP_BIT_SET, Spec.G2.x[1]), Spec.G2.y),
id="non_zero_byte_16_boundary_violation_g1_x1",
),
pytest.param(
Spec.INF_G1 + PointG2((Spec.G2.x[0], Spec.G2.x[1] | Spec.MAX_FP_BIT_SET), Spec.G2.y),
id="non_zero_byte_16_boundary_violation_g1_x2",
),
pytest.param(
Spec.INF_G1 + PointG2(Spec.G2.x, (Spec.G2.y[0] | Spec.MAX_FP_BIT_SET, Spec.G2.y[1])),
id="non_zero_byte_16_boundary_violation_g1_y1",
),
pytest.param(
Spec.INF_G1 + PointG2(Spec.G2.x, (Spec.G2.y[0], Spec.G2.y[1] | Spec.MAX_FP_BIT_SET)),
id="non_zero_byte_16_boundary_violation_g1_y2",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Negative tests for the BLS12_PAIRING precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.slow
@pytest.mark.parametrize("precompile_gas", [None], ids=[""])
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid_multi_inf(
state_test: StateTestFiller,
pre: Alloc,
call_contract_address: Address,
sender: EOA,
fork: Fork,
post: dict,
) -> None:
"""
Test maximum input given the current environment gas limit for the
BLS12_PAIRING precompile and an invalid tail.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 100_000
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
max_gas_limit = Environment().gas_limit if tx_gas_limit_cap is None else tx_gas_limit_cap
inf_data = Spec.INF_G1 + Spec.INF_G2
input_data = PointG1(Spec.P, 0) + Spec.INF_G2
while True:
precompile_gas = pairing_gas(len(input_data + inf_data))
new_tx_gas_limit = (
extra_gas
+ intrinsic_gas_cost_calculator(calldata=input_data + inf_data)
+ memory_expansion_gas_calculator(new_bytes=len(input_data + inf_data))
+ precompile_gas
)
if new_tx_gas_limit > max_gas_limit:
break
tx_gas_limit = new_tx_gas_limit
input_data = inf_data + input_data
tx = Transaction(
gas_limit=tx_gas_limit,
data=input_data,
to=call_contract_address,
sender=sender,
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data,expected_output,precompile_gas_modifier",
[
pytest.param(
Spec.INF_G1 + Spec.INF_G2,
Spec.PAIRING_TRUE,
1,
id="extra_gas",
),
pytest.param(
Spec.INF_G1 + Spec.INF_G2,
Spec.INVALID,
-1,
id="insufficient_gas",
),
],
)
def test_gas(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_PAIRING precompile gas requirements."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
Spec.INF_G1 + Spec.INF_G2,
Spec.PAIRING_TRUE,
id="inf_pair",
),
],
)
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_PAIRING precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1msm.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1msm.py | """
Test the BLS12_G1MSM precompile.
Test the BLS12_G1MSM precompile introduced in
[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .conftest import G1_POINTS_NOT_IN_SUBGROUP, G1_POINTS_NOT_ON_CURVE
from .helpers import PointG1, vectors_from_file
from .spec import Scalar, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.G1MSM], ids=[""]),
]
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("msm_G1_bls.json")
+ [
# Multiple pair scalar multiplication cases.
pytest.param(
Spec.G1 + Scalar(1) + Spec.INF_G1 + Scalar(1),
Spec.G1,
None,
id="g1_plus_inf",
),
pytest.param(
Spec.G1 + Scalar(0) + Spec.P1 + Scalar(0) + Spec.INF_G1 + Scalar(0),
Spec.INF_G1,
None,
id="all_zero_scalars",
),
pytest.param(
Spec.G1 + Scalar(1) + (-Spec.G1) + Scalar(1),
Spec.INF_G1,
None,
id="sum_to_identity_opposite",
),
pytest.param(
Spec.G1 + Scalar(Spec.Q - 1) + Spec.G1 + Scalar(1),
Spec.INF_G1,
None,
id="scalars_sum_to_q",
),
pytest.param(
Spec.G1 + Scalar(1) + Spec.G1 + Scalar(0) + Spec.INF_G1 + Scalar(5),
Spec.G1,
None,
id="combined_basic_cases",
),
pytest.param(
Spec.G1 + Scalar(1) + Spec.INF_G1 + Scalar(500),
Spec.G1,
None,
id="identity_with_large_scalar",
),
pytest.param(
Spec.G1 + Scalar(0) + Spec.P1 + Scalar(0) + (-Spec.G1) + Scalar(0),
Spec.INF_G1,
None,
id="multiple_points_zero_scalar",
),
# Cases with maximum discount table (test vector for gas cost
# calculation)
pytest.param(
(Spec.P1 + Scalar(Spec.Q)) * (len(Spec.G1MSM_DISCOUNT_TABLE) - 1),
Spec.INF_G1,
None,
id="max_discount",
),
pytest.param(
(Spec.P1 + Scalar(Spec.Q)) * len(Spec.G1MSM_DISCOUNT_TABLE),
Spec.INF_G1,
None,
id="max_discount_plus_1",
),
],
)
@pytest.mark.slow()
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test valid calls to the BLS12_G1MSM precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("fail-msm_G1_bls.json")
+ [
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Scalar(Spec.Q),
id="not_in_subgroup_1_pos_0",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP_TIMES_2 + Scalar(Spec.Q),
id="not_in_subgroup_2_pos_0",
),
pytest.param(
Spec.G1 + Scalar(1) + Spec.P1_NOT_IN_SUBGROUP + Scalar(Spec.Q),
id="not_in_subgroup_1_pos_1",
),
pytest.param(
Spec.G1 + Scalar(1) + Spec.P1_NOT_IN_SUBGROUP_TIMES_2 + Scalar(Spec.Q),
id="not_in_subgroup_2_pos_1",
),
pytest.param(
Spec.G1,
id="bls_g1_truncated_input",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[0] + Scalar(0),
id="rand_not_in_subgroup_0_pos_0",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[1] + Scalar(1),
id="rand_not_in_subgroup_1_pos_0",
),
pytest.param(
Spec.G1 + Scalar(1) + G1_POINTS_NOT_IN_SUBGROUP[0] + Scalar(0),
id="rand_not_in_subgroup_0_pos_1",
),
pytest.param(
Spec.G1 + Scalar(1) + G1_POINTS_NOT_IN_SUBGROUP[1] + Scalar(1),
id="rand_not_in_subgroup_1_pos_1",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[0] + Scalar(0),
id="not_on_curve_0_pos_0",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[1] + Scalar(1),
id="not_on_curve_1_pos_0",
),
pytest.param(
Spec.G1 + Scalar(1) + G1_POINTS_NOT_ON_CURVE[0] + Scalar(0),
id="not_on_curve_0_pos_1",
),
pytest.param(
Spec.G1 + Scalar(1) + G1_POINTS_NOT_ON_CURVE[1] + Scalar(1),
id="not_on_curve_1_pos_1",
),
pytest.param(
Spec.G1 + Scalar(1) + Spec.G1,
id="incomplete_input_missing_scalar",
),
pytest.param(
Spec.G1 + Scalar(1) + bytes([0]),
id="incomplete_input_extra_byte",
),
pytest.param(
Spec.G1 + Scalar(1) + Spec.G2 + Scalar(1),
id="mixing_g1_with_g2",
),
pytest.param(
Spec.G1 + (b"\x01" + b"\x00" * 32), # Scalar > UINT256_MAX
id="scalar_too_large",
),
pytest.param(
# Invalid scalar length
Spec.G1 + Scalar(1).x.to_bytes(16, byteorder="big"),
id="scalar_too_short",
),
pytest.param(
bytes([0]) * 159, # Just under minimum valid length
id="input_too_short_by_1",
),
# Coordinates above modulus p cases.
pytest.param(
PointG1(Spec.P1.x + Spec.P, Spec.P1.y) + Scalar(1),
id="x_above_p_pos_0",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y + Spec.P) + Scalar(1),
id="y_above_p_pos_0",
),
pytest.param(
Spec.G1 + Scalar(1) + PointG1(Spec.P1.x + Spec.P, Spec.P1.y) + Scalar(0),
id="x_above_p_pos_1",
),
pytest.param(
Spec.G1 + Scalar(1) + PointG1(Spec.P1.x, Spec.P1.y + Spec.P) + Scalar(0),
id="y_above_p_pos_1",
),
],
# Input length tests can be found in
# ./test_bls12_variable_length_input_contracts.py
)
@pytest.mark.parametrize(
"precompile_gas_modifier", [100_000], ids=[""]
) # Add gas so that won't be the cause of failure
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test invalid calls to the BLS12_G1MSM precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
Spec.INF_G1 + Scalar(0),
Spec.INF_G1,
id="single_inf_times_zero",
),
pytest.param(
(Spec.P1 + Scalar(Spec.Q)) * (len(Spec.G1MSM_DISCOUNT_TABLE) - 1),
Spec.INF_G1,
id="max_discount",
),
],
)
@pytest.mark.slow()
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1MSM precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/conftest.py | tests/prague/eip2537_bls_12_381_precompiles/conftest.py | """Shared pytest definitions local to EIP-2537 tests."""
from typing import SupportsBytes
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import EOA, Address, Alloc, Bytecode, Storage, Transaction, keccak256
from ethereum_test_tools import Opcodes as Op
from .helpers import BLSPointGenerator
from .spec import GAS_CALCULATION_FUNCTION_MAP
@pytest.fixture
def vector_gas_value() -> int | None:
"""
Gas value from the test vector if any.
If `None` it means that the test scenario did not come from a file, so no
comparison is needed.
The `vectors_from_file` function reads the gas value from the file and
overwrites this fixture.
"""
return None
@pytest.fixture
def precompile_gas(
precompile_address: int, input_data: bytes, vector_gas_value: int | None
) -> int:
"""Gas cost for the precompile."""
calculated_gas = GAS_CALCULATION_FUNCTION_MAP[precompile_address](len(input_data))
if vector_gas_value is not None:
assert calculated_gas == vector_gas_value, (
f"Calculated gas {calculated_gas} != Vector gas {vector_gas_value}"
)
return calculated_gas
@pytest.fixture
def precompile_gas_modifier() -> int:
"""
Modify the gas passed to the precompile, for testing purposes.
By default the call is made with the exact gas amount required for the
given opcode, but when this fixture is overridden, the gas amount can be
modified to, e.g., test a lower amount and test if the precompile call
fails.
"""
return 0
@pytest.fixture
def call_opcode() -> Op:
"""
Type of call used to call the precompile.
By default it is Op.CALL, but it can be overridden in the test.
"""
return Op.CALL
@pytest.fixture
def call_contract_post_storage() -> Storage:
"""
Storage of the test contract after the transaction is executed.
Note:
Fixture `call_contract_code` fills the actual expected storage values.
"""
return Storage()
@pytest.fixture
def call_succeeds(
expected_output: bytes | SupportsBytes,
) -> bool:
"""
By default, depending on the expected output, we can deduce if the call is
expected to succeed or fail.
"""
return len(bytes(expected_output)) > 0
@pytest.fixture
def call_contract_code(
precompile_address: int,
precompile_gas: int | None,
precompile_gas_modifier: int,
expected_output: bytes | SupportsBytes,
call_succeeds: bool,
call_opcode: Op,
call_contract_post_storage: Storage,
) -> Bytecode:
"""
Code of the test contract.
Args:
precompile_address: Address of the precompile to call.
precompile_gas: Gas cost for the precompile, which is automatically
calculated by the `precompile_gas` fixture, but can
be overridden in the test.
precompile_gas_modifier: Gas cost modifier for the precompile, which
is automatically set to zero by the
`precompile_gas_modifier` fixture, but
can be overridden in the test.
expected_output: Expected output of the precompile call.
This value is used to determine if the call is
expected to succeed or fail.
call_succeeds: Boolean that indicates if the call is expected to
succeed or fail.
call_opcode: Type of call used to call the precompile (Op.CALL,
Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL).
call_contract_post_storage: Storage of the test contract after the
transaction is executed.
"""
expected_output = bytes(expected_output)
assert call_opcode in [Op.CALL, Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL]
value = [0] if call_opcode in [Op.CALL, Op.CALLCODE] else []
precompile_gas_value_opcode: int | Op
if precompile_gas is None:
precompile_gas_value_opcode = Op.GAS
else:
precompile_gas_value_opcode = precompile_gas + precompile_gas_modifier
code = (
Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.SSTORE(
call_contract_post_storage.store_next(call_succeeds),
call_opcode(
precompile_gas_value_opcode,
precompile_address,
*value, # Optional, only used for CALL and CALLCODE.
0,
Op.CALLDATASIZE(),
0,
0,
),
)
+ Op.SSTORE(
call_contract_post_storage.store_next(len(expected_output)),
Op.RETURNDATASIZE(),
)
)
if call_succeeds:
# Add integrity check only if the call is expected to succeed.
code += Op.RETURNDATACOPY(0, 0, Op.RETURNDATASIZE()) + Op.SSTORE(
call_contract_post_storage.store_next(keccak256(expected_output)),
Op.SHA3(0, Op.RETURNDATASIZE()),
)
return code
@pytest.fixture
def call_contract_address(pre: Alloc, call_contract_code: Bytecode) -> Address:
"""Address where the test contract will be deployed."""
return pre.deploy_contract(call_contract_code)
@pytest.fixture
def sender(pre: Alloc) -> EOA:
"""Sender of the transaction."""
return pre.fund_eoa()
@pytest.fixture
def post(call_contract_address: Address, call_contract_post_storage: Storage) -> dict:
"""Test expected post outcome."""
return {
call_contract_address: {
"storage": call_contract_post_storage,
},
}
@pytest.fixture
def tx_gas_limit(fork: Fork, input_data: bytes, precompile_gas: int) -> int:
"""
Transaction gas limit used for the test (Can be overridden in the test).
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 100_000
return (
extra_gas
+ intrinsic_gas_cost_calculator(calldata=input_data)
+ memory_expansion_gas_calculator(new_bytes=len(input_data))
+ precompile_gas
)
@pytest.fixture
def tx(
input_data: bytes,
tx_gas_limit: int,
call_contract_address: Address,
sender: EOA,
) -> Transaction:
"""Transaction for the test."""
return Transaction(
gas_limit=tx_gas_limit,
data=input_data,
to=call_contract_address,
sender=sender,
)
NUM_TEST_POINTS = 5
# Random points not in the subgroup (fast to generate)
G1_POINTS_NOT_IN_SUBGROUP = [
BLSPointGenerator.generate_random_g1_point_not_in_subgroup(seed=i)
for i in range(NUM_TEST_POINTS)
]
G2_POINTS_NOT_IN_SUBGROUP = [
BLSPointGenerator.generate_random_g2_point_not_in_subgroup(seed=i)
for i in range(NUM_TEST_POINTS)
]
# Field points that maps to the identity point using `BLS12_MAP_FP_TO_G1`
G1_FIELD_POINTS_MAP_TO_IDENTITY = BLSPointGenerator.generate_g1_map_isogeny_kernel_points()
# Random points not on the curve (fast to generate)
G1_POINTS_NOT_ON_CURVE = [
BLSPointGenerator.generate_random_g1_point_not_on_curve(seed=i) for i in range(NUM_TEST_POINTS)
]
G2_POINTS_NOT_ON_CURVE = [
BLSPointGenerator.generate_random_g2_point_not_on_curve(seed=i) for i in range(NUM_TEST_POINTS)
]
# Field points that maps to the identity point using `BLS12_MAP_FP_TO_G2`
G2_FIELD_POINTS_MAP_TO_IDENTITY = BLSPointGenerator.generate_g2_map_isogeny_kernel_points()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/helpers.py | tests/prague/eip2537_bls_12_381_precompiles/helpers.py | """Helper functions for the EIP-2537 BLS12-381 precompiles tests."""
import hashlib
import os
from typing import Annotated, Any, List, Optional
import pytest
from joblib import Memory
from py_ecc.bls12_381 import FQ, FQ2, add, field_modulus, multiply
from pydantic import BaseModel, BeforeValidator, ConfigDict, RootModel, TypeAdapter
from pydantic.alias_generators import to_pascal
from .spec import FP, FP2, PointG1, PointG2, Spec
def current_python_script_directory(*args: str) -> str:
"""
Get the current Python script directory, optionally appending additional
path components.
"""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), *args)
class Vector(BaseModel):
"""Test vector for the BLS12-381 precompiles."""
input: Annotated[bytes, BeforeValidator(bytes.fromhex)]
expected: Annotated[bytes, BeforeValidator(bytes.fromhex)]
gas: int
name: str
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self) -> Any:
"""
Convert the test vector to a tuple that can be used as a parameter in a
pytest test.
"""
return pytest.param(self.input, self.expected, self.gas, id=self.name)
class FailVector(BaseModel):
"""Test vector for the BLS12-381 precompiles."""
input: Annotated[bytes, BeforeValidator(bytes.fromhex)]
expected_error: str
name: str
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self) -> Any:
"""
Convert the test vector to a tuple that can be used as a parameter in a
pytest test.
"""
return pytest.param(self.input, id=self.name)
class VectorList(RootModel):
"""List of test vectors for the BLS12-381 precompiles."""
root: List[Vector | FailVector]
VectorListAdapter = TypeAdapter(VectorList)
def vectors_from_file(filename: str) -> List:
"""Load test vectors from a file."""
with open(
current_python_script_directory(
"vectors",
filename,
),
"rb",
) as f:
return [v.to_pytest_param() for v in VectorListAdapter.validate_json(f.read()).root]
def add_points_g1(point_a: PointG1, point_b: PointG1) -> PointG1:
"""
Add two points in G1 using standard formulas.
For points P = (x, y) and Q = (u, v), compute R = P + Q.
"""
if point_a.x == 0 and point_a.y == 0:
return point_b
if point_b.x == 0 and point_b.y == 0:
return point_a
py_ecc_point_a = (FQ(point_a.x), FQ(point_a.y))
py_ecc_point_b = (FQ(point_b.x), FQ(point_b.y))
result = add(py_ecc_point_a, py_ecc_point_b)
if result is None:
return Spec.INF_G1
return PointG1(int(result[0]), int(result[1]))
def add_points_g2(point_a: PointG2, point_b: PointG2) -> PointG2:
"""
Add two points in G2 using standard formulas.
For points P = ((x_0, x_1), (y_0, y_1)) and
Q = ((u_0, u_1), (v_0, v_1)), compute R = P + Q.
"""
if point_a.x == (0, 0) and point_a.y == (0, 0):
return point_b
if point_b.x == (0, 0) and point_b.y == (0, 0):
return point_a
py_ecc_point_a = (FQ2(point_a.x), FQ2(point_a.y))
py_ecc_point_b = (FQ2(point_b.x), FQ2(point_b.y))
result = add(py_ecc_point_a, py_ecc_point_b)
if result is None:
return Spec.INF_G2
new_x = (int(result[0].coeffs[0]), int(result[0].coeffs[1]))
new_y = (int(result[1].coeffs[0]), int(result[1].coeffs[1]))
return PointG2(new_x, new_y)
class BLSPointGenerator:
"""
Generator for points on the BLS12-381 curve with various properties.
Provides methods to generate points with specific properties:
- on the standard curve
- in the correct r-order subgroup or not
- on the curve or not
- on an isomorphic curve (not standard curve) but in the correct
r-order subgroup
Additional resource that helped the class implementation:
https://hackmd.io/@benjaminion/bls12-381
"""
# Constants for G1 curve equations
# The b-coefficient in the elliptic curve equation y^2 = x^3 + b
# Standard BLS12-381 G1 curve uses b=4
# This is a known parameter of the BLS12-381 curve specification
STANDARD_B_G1 = Spec.B_COEFFICIENT
# Isomorphic G1 curve uses b=24 (can be any b value for an isomorphic
# curve)
ISOMORPHIC_B_G1 = 24 # Isomorphic curve: y^2 = x^3 + 24
# Constants for G2 curve equations
# Standard BLS12-381 G2 curve uses b=(4,4)
STANDARD_B_G2 = (Spec.B_COEFFICIENT, Spec.B_COEFFICIENT)
# Isomorphic G2 curve uses b=(24,24)
ISOMORPHIC_B_G2 = (24, 24)
# Cofactors for G1 and G2
# These are known constants for the BLS12-381 curve.
# G1 cofactor h₁: (x-1)²/3 where x is the BLS parameter
G1_COFACTOR = 0x396C8C005555E1568C00AAAB0000AAAB
# G2 cofactor h₂: (x⁸ - 4x⁷ + 5x⁶ - 4x⁴ + 6x³ - 4x² - 4x + 13)/9
G2_COFACTOR = 0x5D543A95414E7F1091D50792876A202CD91DE4547085ABAA68A205B2E5A7DDFA628F1CB4D9E82EF21537E293A6691AE1616EC6E786F0C70CF1C38E31C7238E5 # noqa: E501
# Memory cache for expensive functions
memory = Memory(location=".cache", verbose=0)
@staticmethod
def is_on_curve_g1(x: int, y: int) -> bool:
"""Check if point (x,y) is on the BLS12-381 G1 curve."""
x_fq = FQ(x)
y_fq = FQ(y)
return y_fq * y_fq == x_fq * x_fq * x_fq + FQ(Spec.B_COEFFICIENT)
@staticmethod
def is_on_curve_g2(x: tuple, y: tuple) -> bool:
"""Check if point (x,y) is on the BLS12-381 G2 curve."""
x_fq2 = FQ2(x)
y_fq2 = FQ2(y)
return y_fq2 * y_fq2 == x_fq2 * x_fq2 * x_fq2 + FQ2(
(Spec.B_COEFFICIENT, Spec.B_COEFFICIENT)
)
@staticmethod
def check_in_g1_subgroup(point: PointG1) -> bool:
"""Check if a G1 point is in the correct r-order subgroup."""
try:
# Check q*P = O where q is the subgroup order
x = FQ(point.x)
y = FQ(point.y)
result = multiply((x, y), Spec.Q)
# If point is in the subgroup, q*P should be infinity
return result is None
except Exception:
return False
@staticmethod
def check_in_g2_subgroup(point: PointG2) -> bool:
"""Check if a G2 point is in the correct r-order subgroup."""
try:
# Check q*P = O where q is the subgroup order
x = FQ2(point.x)
y = FQ2(point.y)
result = multiply((x, y), Spec.Q)
# If point is in the subgroup, q*P should be infinity
return result is None
except Exception:
return False
@staticmethod
def sqrt_fq(a: FQ) -> Optional[FQ]:
"""
Compute smallest square root of FQ element (if it exists). Used when
finding valid y-coordinates for a given x-coordinate on the G1 curve.
"""
assert field_modulus % 4 == 3, "This sqrt method requires p % 4 == 3"
candidate = a ** ((field_modulus + 1) // 4)
if candidate * candidate == a:
if int(candidate) > field_modulus // 2:
return -candidate
return candidate
return None
@staticmethod
def sqrt_fq2(a: FQ2) -> Optional[FQ2]:
"""
Compute square root of FQ2 element (if it exists). Used when finding
valid y-coordinates for a given x-coordinate on the G2 curve.
"""
if a == FQ2([0, 0]):
return FQ2([0, 0])
candidate = a ** ((field_modulus**2 + 7) // 16)
if candidate * candidate == a:
int_c0, int_c1 = int(candidate.coeffs[0]), int(candidate.coeffs[1])
if int_c1 > 0 or (int_c1 == 0 and int_c0 > field_modulus // 2):
return -candidate
return candidate
return None
@classmethod
def multiply_by_cofactor(cls, point: Any, is_g2: bool = False) -> Any:
"""
Multiply a point by the cofactor to ensure it's in the correct r-order
subgroup. Used for creating points in the correct r-order subgroup when
using isomorphic curves.
"""
cofactor = cls.G2_COFACTOR if is_g2 else cls.G1_COFACTOR
try:
if is_g2:
# For G2, the point is given in this form: ((x0, x1), (y0, y1))
x = FQ2([point[0][0], point[0][1]])
y = FQ2([point[1][0], point[1][1]])
base_point = (x, y)
result = multiply(base_point, cofactor)
return (
(int(result[0].coeffs[0]), int(result[0].coeffs[1])), # type: ignore
(int(result[1].coeffs[0]), int(result[1].coeffs[1])), # type: ignore
)
else:
# For G1, the point is given as (x, y).
x = FQ(point[0]) # type: ignore
y = FQ(point[1]) # type: ignore
base_point = (x, y)
result = multiply(base_point, cofactor)
return (int(result[0]), int(result[1])) # type: ignore
except Exception as e:
raise ValueError("Failed to multiply point by cofactor") from e
@classmethod
@memory.cache
def find_g1_point_by_x(cls, x_value: int, in_subgroup: bool, on_curve: bool = True) -> PointG1:
"""
Find a G1 point with x-coordinate at or near the given value, with the
specified subgroup membership and curve membership.
"""
max_offset = 5000
isomorphic_b = cls.ISOMORPHIC_B_G1
for offset in range(max_offset + 1):
for direction in [1, -1]:
if offset == 0 and direction == -1:
continue
try_x = (x_value + direction * offset) % Spec.P
try:
x = FQ(try_x)
# Calculate y² = x³ + b (standard curve or isomorphic
# curve)
b_value = cls.STANDARD_B_G1 if on_curve else isomorphic_b
y_squared = x**3 + FQ(b_value)
# Try to find y such that y² = x³ + b
y = cls.sqrt_fq(y_squared)
if y is None:
continue # No valid y exists for this x
# Create the initial points on either curve
raw_point = (int(x), int(y))
raw_point2 = (int(x), Spec.P - int(y))
# For isomorphic curve points in subgroup, apply cofactor
# multiplication
if not on_curve and in_subgroup:
try:
subgroup_point = cls.multiply_by_cofactor(raw_point, is_g2=False)
point1 = PointG1(subgroup_point[0], subgroup_point[1])
except ValueError:
continue # Skip if fails
else:
point1 = PointG1(int(x), int(y))
if not on_curve and in_subgroup:
try:
subgroup_point2 = cls.multiply_by_cofactor(raw_point2, is_g2=False)
point2 = PointG1(subgroup_point2[0], subgroup_point2[1])
except ValueError:
continue # Skip if fails
else:
point2 = PointG1(int(x), Spec.P - int(y))
# Verify points have the required properties
point1_on_curve = cls.is_on_curve_g1(point1.x, point1.y)
point2_on_curve = cls.is_on_curve_g1(point2.x, point2.y)
point1_in_subgroup = cls.check_in_g1_subgroup(point1)
point2_in_subgroup = cls.check_in_g1_subgroup(point2)
# Return required point if found based on properties
if on_curve == point1_on_curve and in_subgroup == point1_in_subgroup:
return point1
if on_curve == point2_on_curve and in_subgroup == point2_in_subgroup:
return point2
except Exception:
continue
raise ValueError(
(
f"Failed to find G1 point by x={x_value},",
"in_subgroup={in_subgroup},",
"on_curve={on_curve}",
)
)
@classmethod
@memory.cache
def find_g2_point_by_x(
cls, x_value: tuple, in_subgroup: bool, on_curve: bool = True
) -> PointG2:
"""
Find a G2 point with x-coordinate at or near the given value, with the
specified subgroup membership and curve membership.
"""
max_offset = 5000
isomorphic_b = cls.ISOMORPHIC_B_G2
for offset in range(max_offset + 1):
for direction in [1, -1]:
if offset == 0 and direction == -1:
continue
try_x0 = (x_value[0] + direction * offset) % Spec.P
try_x = (try_x0, x_value[1]) # Keep x1 the same
try:
x = FQ2(try_x)
# Calculate y² = x³ + b (standard curve or isomorphic
# curve)
b_value = cls.STANDARD_B_G2 if on_curve else isomorphic_b
y_squared = x**3 + FQ2(b_value)
# Try to find y such that y² = x³ + b
y = cls.sqrt_fq2(y_squared)
if y is None:
continue # No valid y exists for this x
# Create the initial points on either curve
raw_point = (
(int(x.coeffs[0]), int(x.coeffs[1])),
(int(y.coeffs[0]), int(y.coeffs[1])),
)
raw_point2 = (
(int(x.coeffs[0]), int(x.coeffs[1])),
(Spec.P - int(y.coeffs[0]), Spec.P - int(y.coeffs[1])),
)
# For isomorphic curve points in subgroup, apply cofactor
# multiplication
if not on_curve and in_subgroup:
try:
subgroup_point = cls.multiply_by_cofactor(raw_point, is_g2=True)
point1 = PointG2(subgroup_point[0], subgroup_point[1])
except ValueError:
continue # Skip if fails
else:
point1 = PointG2(
(int(x.coeffs[0]), int(x.coeffs[1])),
(int(y.coeffs[0]), int(y.coeffs[1])),
)
if not on_curve and in_subgroup:
try:
subgroup_point2 = cls.multiply_by_cofactor(raw_point2, is_g2=True)
point2 = PointG2(subgroup_point2[0], subgroup_point2[1])
except ValueError:
continue # Skip if fails
else:
point2 = PointG2(
(int(x.coeffs[0]), int(x.coeffs[1])),
(Spec.P - int(y.coeffs[0]), Spec.P - int(y.coeffs[1])),
)
# Verify points have the required properties
point1_on_curve = cls.is_on_curve_g2(point1.x, point1.y)
point2_on_curve = cls.is_on_curve_g2(point2.x, point2.y)
point1_in_subgroup = cls.check_in_g2_subgroup(point1)
point2_in_subgroup = cls.check_in_g2_subgroup(point2)
# Return required point if found based on properties
if on_curve == point1_on_curve and in_subgroup == point1_in_subgroup:
return point1
if on_curve == point2_on_curve and in_subgroup == point2_in_subgroup:
return point2
except Exception:
continue
raise ValueError(
(
f"Failed to find G2 point by x={x_value},",
"in_subgroup={in_subgroup},",
"on_curve={on_curve}",
)
)
# G1 points by x coordinate (near or on the x value)
@classmethod
def generate_g1_point_in_subgroup_by_x(cls, x_value: int) -> PointG1:
"""
G1 point that is in the r-order subgroup with x-coordinate by/on the
given value.
"""
return cls.find_g1_point_by_x(x_value, in_subgroup=True, on_curve=True)
@classmethod
def generate_g1_point_not_in_subgroup_by_x(cls, x_value: int) -> PointG1:
"""
G1 point that is NOT in the r-order subgroup with x-coordinate by/on
the given value.
"""
return cls.find_g1_point_by_x(x_value, in_subgroup=False, on_curve=True)
@classmethod
def generate_g1_point_not_on_curve_by_x(cls, x_value: int) -> PointG1:
"""
G1 point that is NOT on the curve with x-coordinate by/on the given
value.
"""
return cls.find_g1_point_by_x(x_value, in_subgroup=False, on_curve=False)
@classmethod
def generate_g1_point_on_isomorphic_curve_by_x(cls, x_value: int) -> PointG1:
"""
G1 point that is on an isomorphic curve (not standard curve) but in the
r-order subgroup with x-coordinate by/on the given value.
Uses cofactor multiplication to ensure the point is in the correct
subgroup.
"""
return cls.find_g1_point_by_x(x_value, in_subgroup=True, on_curve=False)
# G1 random points required to be generated with a seed
@classmethod
def generate_random_g1_point_in_subgroup(cls, seed: int) -> PointG1:
"""Generate a random G1 point that is in the r-order subgroup."""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"in_subgroup").digest()
x_value = int.from_bytes(hash_output, "big") % Spec.P
return cls.generate_g1_point_in_subgroup_by_x(x_value)
@classmethod
def generate_random_g1_point_not_in_subgroup(cls, seed: int) -> PointG1:
"""Generate a random G1 point that is NOT in the r-order subgroup."""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"not_in_subgroup").digest()
x_value = int.from_bytes(hash_output, "big") % Spec.P
return cls.generate_g1_point_not_in_subgroup_by_x(x_value)
@classmethod
def generate_random_g1_point_not_on_curve(cls, seed: int) -> PointG1:
"""Generate a random G1 point that is NOT on the curve."""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"not_on_curve").digest()
x_value = int.from_bytes(hash_output, "big") % Spec.P
return cls.generate_g1_point_not_on_curve_by_x(x_value)
@classmethod
def generate_random_g1_point_on_isomorphic_curve(cls, seed: int) -> PointG1:
"""
Generate a random G1 point that is on an isomorphic curve (not standard
curve) but in the r-order subgroup.
Uses cofactor multiplication to ensure the point is in the correct
subgroup.
"""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"on_isomorphic_curve").digest()
x_value = int.from_bytes(hash_output, "big") % Spec.P
return cls.generate_g1_point_on_isomorphic_curve_by_x(x_value)
# G2 point generators - by x coordinate (near or on the x value)
@classmethod
def generate_g2_point_in_subgroup_by_x(cls, x_value: tuple) -> PointG2:
"""
G2 point that is in the r-order subgroup with x-coordinate by/on the
given value.
"""
return cls.find_g2_point_by_x(x_value, in_subgroup=True, on_curve=True)
@classmethod
def generate_g2_point_not_in_subgroup_by_x(cls, x_value: tuple) -> PointG2:
"""
G2 point that is NOT in the r-order subgroup with x-coordinate by/on
the given value.
"""
return cls.find_g2_point_by_x(x_value, in_subgroup=False, on_curve=True)
@classmethod
def generate_g2_point_not_on_curve_by_x(cls, x_value: tuple) -> PointG2:
"""
G2 point that is NOT on the curve with x-coordinate by/on the given
value.
"""
return cls.find_g2_point_by_x(x_value, in_subgroup=False, on_curve=False)
@classmethod
def generate_g2_point_on_isomorphic_curve_by_x(cls, x_value: tuple) -> PointG2:
"""
G2 point that is on an isomorphic curve (not standard curve) but in the
r-order subgroup with x-coordinate near the given value.
Uses cofactor multiplication to ensure the point is in the correct
subgroup.
"""
return cls.find_g2_point_by_x(x_value, in_subgroup=True, on_curve=False)
# G2 random points required to be generated with a seed
@classmethod
def generate_random_g2_point_in_subgroup(cls, seed: int) -> PointG2:
"""Generate a random G2 point that is in the r-order subgroup."""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"g2_in_subgroup").digest()
hash_len = len(hash_output)
half_len = hash_len // 2
x0 = int.from_bytes(hash_output[:half_len], "big") % Spec.P
x1 = int.from_bytes(hash_output[half_len:], "big") % Spec.P
return cls.generate_g2_point_in_subgroup_by_x((x0, x1))
@classmethod
def generate_random_g2_point_not_in_subgroup(cls, seed: int) -> PointG2:
"""Generate a random G2 point that is NOT in the r-order subgroup."""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"g2_not_in_subgroup").digest()
hash_len = len(hash_output)
half_len = hash_len // 2
x0 = int.from_bytes(hash_output[:half_len], "big") % Spec.P
x1 = int.from_bytes(hash_output[half_len:], "big") % Spec.P
return cls.generate_g2_point_not_in_subgroup_by_x((x0, x1))
@classmethod
def generate_random_g2_point_not_on_curve(cls, seed: int) -> PointG2:
"""Generate a random G2 point that is NOT on the curve."""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"g2_not_on_curve").digest()
hash_len = len(hash_output)
half_len = hash_len // 2
x0 = int.from_bytes(hash_output[:half_len], "big") % Spec.P
x1 = int.from_bytes(hash_output[half_len:], "big") % Spec.P
return cls.generate_g2_point_not_on_curve_by_x((x0, x1))
@classmethod
def generate_random_g2_point_on_isomorphic_curve(cls, seed: int) -> PointG2:
"""
Generate a random G2 point that is on an isomorphic curve (not standard
curve) but in the r-order subgroup. Uses cofactor multiplication to
ensure the point is in the correct subgroup.
"""
seed_bytes = seed.to_bytes(32, "big")
hash_output = hashlib.sha384(seed_bytes + b"g2_on_isomorphic_curve").digest()
hash_len = len(hash_output)
half_len = hash_len // 2
x0 = int.from_bytes(hash_output[:half_len], "big") % Spec.P
x1 = int.from_bytes(hash_output[half_len:], "big") % Spec.P
return cls.generate_g2_point_on_isomorphic_curve_by_x((x0, x1))
# G1 map to curve 11-isogeny kernel point generator
@classmethod
def generate_g1_map_isogeny_kernel_points(cls) -> List[FP]:
"""
Return precomputed kernel points for the BLS12-381 G1 map to curve
function. These map to the G1 identity point `Spec.INF_G1`. They are
generated using sage math externally with the following script as its
significantly faster than using `py_ecc` (200-1000x faster).
For reference we can imagine the map to curve function as a simple 2
step process, where an input t value is mapped to a point on the
auxiliary curve via a SWU map, and then that point is mapped to the BLS
curve via an 11-isogeny. For reference:
https://eips.ethereum.org/assets/eip-2537/field_to_curve
Note we cannot use sage math directly within EEST as it is not a pure
python library and requires an external dependency to be installed on
the system machine.
Thanks to @petertdavies (Peter Miller) for the sage math script to
generate these points: ```sage
q = 0x1A0111EA397FE69A4B1BA7B6434BACD764774B84F3
8512BF6730D2A0F6B0F6241EABFFFEB153FFFFB9FEFFFFFFFFAAAB
Fq = GF(q)
E1 = EllipticCurve(Fq, (0, 4)) # BLS12-381 curve
ISO_11_A = Fq(0x144698A3B8E9433D693A02C96D4982B0EA985383EE66A8D8E
8981AEFD881AC98936F8DA0E0F97F5CF428082D584C1D)
ISO_11_B = Fq(0x12E2908D11688030018B12E8753EEE3B2016C1F0F24F4070A0B
9C14FCEF35EF55A23215A316CEAA5D1CC48E98E172BE0)
ISO_11_Z = Fq(11)
Ei = EllipticCurve(Fq, (ISO_11_A, ISO_11_B))
iso = EllipticCurveIsogeny(
E=E1,
kernel=None,
codomain=Ei,
degree=11).dual()
for (x, _) in iso.kernel_polynomial().roots():
discriminant = 1 - 4 / (ISO_11_A / ISO_11_B * x + 1)
if not discriminant.is_square():
continue
for sign in [1, -1]:
zt2 = (-1 + sign * discriminant.sqrt()) / 2
t2 = zt2 / ISO_11_Z
if t2.is_square():
t = t2.sqrt()
assert x == -ISO_11_B / ISO_11_A * (1 + 1 / (ISO_11_Z**2 *
t**4 + ISO_11_Z * t**2))
print(t)
```
To reproduce, add the script contents to a file called `points.sage`,
then run `sage points.sage`!
Please see the sage math installation guide to replicate: -
https://doc.sagemath.org/html/en/installation/index.html
As G1 uses an 11-degree isogeny, its kernel contains exactly 11 points
on the auxiliary curve that maps to the point at infinity on the BLS
curve. This includes the point at infinity (doesn't concern us as the
initial SWU map can never output infinity from any int t) and 10 other
unique kernel points.
These 10 other kernel points correspond to 5 x-coords on the curve
(since each x-coord yields two points with y and -y). However, not all
of these kernel points can be reached by the SWU map, which is why we
only have 4 unique t values below.
The kernel polynomial has 5 roots (x-coords), and each root can
potentially yield two t values that map to kernel points via the SWU
function. Analysis shows that only 2 of these roots yield valid t
values because the other 3 roots fail either the discriminant square
check or the t^2 square check in the SWU inverse calculation. From
these 2 valid roots, we get the 4 unique t values listed below.
The roots and their properties are as follows:
- Root 1
(x=3447232547282837364692125741875673748077489238391001187748258
124039623697289612052402753422028380156396811587142615):
Fails because its discriminant is not a square.
- Root 2
(x=3086251397349454634226049654186198282625136597600255705376316
455943570106637401671127489553534256598630507009270951):
Fails because its discriminant is not a square.
- Root 3
(x=2512099095366387796245759085729510986367032014959769672734622
752070562589059815523018960565849753051338812932816014):
Has a square discriminant, but both sign options yield t^2 values
that are not squares.
- Root 4
(x=2077344747421819657086473418925078480898358265217674456436079
722467637536216749299440611432676849905020722484031356):
Yields two valid t values:
- 173108157456581746932131744927527835530698278615407257619875
8675751495027640363897075486577327802192163339186341827
and
- 861410691052762088300790587394810074303505896628048305535645
284922135116676755956131724844456716837983264353875219
- Root 5
(x=1629023065307570116876483814580399609058797608540074345321518
03806422383239905014872915974221245198317567396330740):
Yields two valid t values:
- 100604475543156059528179355793117172998496451568296191191139
8807521437683216171091013202870577238485832047490326971
- 15620013383368772677174003254551890147802280979855962
77514975439801739125527323838522116502949589758528550231396418
Additionally we also have the additive inverses of these t values,
which are also valid kernel (non-unique) points. These are generated
using the relationship: `(-t) mod p === (p - t) mod p`
"""
unique_kernel_ts = [
1731081574565817469321317449275278355306982786154072576198758675751495027640363897075486577327802192163339186341827,
861410691052762088300790587394810074303505896628048305535645284922135116676755956131724844456716837983264353875219,
1006044755431560595281793557931171729984964515682961911911398807521437683216171091013202870577238485832047490326971,
1562001338336877267717400325455189014780228097985596277514975439801739125527323838522116502949589758528550231396418,
]
additive_inverses = [(Spec.P - t) % Spec.P for t in unique_kernel_ts]
return [FP(t) for t in (unique_kernel_ts + additive_inverses)]
# G2 map to curve 3-isogeny kernel point generator
@classmethod
def generate_g2_map_isogeny_kernel_points(cls) -> List[FP2]:
"""
Return precomputed kernel points for the BLS12-381 G2 map to curve
function. These map to the G2 identity point `Spec.INF_G2`. They are
generated using sage math externally with the following script as its
significantly faster than using `py_ecc` (200-1000x faster).
For reference we can imagine the map to curve function as a simple 2
step process, where an input t value is mapped to a point on the
auxiliary curve via a SWU map, and then that point is mapped to the
BLS curve via a 3-isogeny. For reference:
- https://eips.ethereum.org/assets/eip-2537/field_to_curve
Note we cannot use sage math directly within EEST as it is not a pure
python library and requires an external dependency to be installed on
the system machine.
```sage
q = 0x1A0111EA397FE69A4B1BA7B6434BACD764774B84F38512BF6730D2A0F6B0F6241EABFFFEB153FFFFB9FEFFFFFFFFAAAB
Fp = GF(q)
R.<x> = PolynomialRing(Fp)
Fp2.<u> = Fp.extension(x^2 + 1)
E2 = EllipticCurve(Fp2, [0, 4*(1+u)])
ISO_3_A = 240 * u
ISO_3_B = 1012 * (1 + u)
ISO_3_Z = -(2 + u)
Ei = EllipticCurve(Fp2, [ISO_3_A, ISO_3_B])
iso = EllipticCurveIsogeny(E=E2, kernel=None, codomain=Ei, degree=3).dual()
x_den_k0 = 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaa63 * u
x_den_k1 = 0xc + 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaa9f * u
for (x, _) in iso.kernel_polynomial().roots():
y_squared = x^3 + ISO_3_A * x + ISO_3_B
is_on_curve = y_squared.is_square()
print("Root is on curve:" if is_on_curve else "Warning: Root is not on the curve")
inv_factor = (x * ISO_3_A / -ISO_3_B) - 1
if inv_factor == 0:
continue
discriminant = 1 + 4 / inv_factor
if not discriminant.is_square():
continue
for sign in [1, -1]:
zt2 = (-1 + sign * discriminant.sqrt()) / 2
t2 = zt2 / ISO_3_Z
if t2.is_square():
t = t2.sqrt()
# Perform the proper SWU mapping
tv1_num = ISO_3_Z^2 * t^4 + ISO_3_Z * t^2
tv1 = 1 / tv1_num
x1 = (-ISO_3_B / ISO_3_A) * (1 + tv1)
gx1 = x1^3 + ISO_3_A * x1 + ISO_3_B
x2 = ISO_3_Z * t^2 * x1
swu_x = x1 if gx1.is_square() else x2
x_den_value = swu_x^2 + x_den_k1 * swu_x + x_den_k0
is_kernel_point = (x_den_value == 0)
print("Is a kernel point:", is_kernel_point)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1add.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g1add.py | """
Tests BLS12_G1ADD precompile.
Tests the BLS12_G1ADD precompile implementation from [EIP-2537:
Precompile for BLS12-381 curve operations]
(https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .conftest import G1_POINTS_NOT_IN_SUBGROUP, G1_POINTS_NOT_ON_CURVE
from .helpers import add_points_g1, vectors_from_file
from .spec import PointG1, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.G1ADD], ids=[""]),
]
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("add_G1_bls.json")
+ [
# Identity (infinity) element test cases. Checks that any point added
# to the identity element (INF) equals itself.
pytest.param(
Spec.G1 + Spec.INF_G1,
Spec.G1,
None,
id="generator_plus_inf",
),
pytest.param(
Spec.INF_G1 + Spec.G1,
Spec.G1,
None,
id="inf_plus_generator",
),
pytest.param(
Spec.INF_G1 + Spec.INF_G1,
Spec.INF_G1,
None,
id="inf_plus_inf",
),
pytest.param(
Spec.INF_G1 + Spec.P1,
Spec.P1,
None,
id="inf_plus_point",
),
# Basic arithmetic properties test cases.
# Checks fundamental properties of the BLS12-381 curve.
pytest.param(
Spec.P1 + (-Spec.P1),
Spec.INF_G1,
None,
id="point_plus_neg_point",
),
pytest.param(
Spec.G1 + (-Spec.G1),
Spec.INF_G1,
None,
id="generator_plus_neg_point",
),
pytest.param(
Spec.P1 + Spec.G1,
add_points_g1(Spec.G1, Spec.P1),
None,
id="commutative_check_a",
),
pytest.param(
Spec.G1 + Spec.P1,
add_points_g1(Spec.P1, Spec.G1),
None,
id="commutative_check_b",
),
pytest.param(
Spec.P1 + Spec.P1,
add_points_g1(Spec.P1, Spec.P1),
None,
id="point_doubling",
),
pytest.param( # (P + G) + P = P + (G + P)
add_points_g1(Spec.P1, Spec.G1) + Spec.P1,
add_points_g1(Spec.P1, add_points_g1(Spec.G1, Spec.P1)),
None,
id="associativity_check",
),
pytest.param( # -(P+G) = (-P)+(-G)
(-(add_points_g1(Spec.P1, Spec.G1))) + Spec.INF_G1,
add_points_g1((-Spec.P1), (-Spec.G1)),
None,
id="negation_of_sum",
),
pytest.param(
add_points_g1(Spec.G1, Spec.G1) + add_points_g1(Spec.P1, Spec.P1),
add_points_g1(add_points_g1(Spec.G1, Spec.G1), add_points_g1(Spec.P1, Spec.P1)),
None,
id="double_generator_plus_double_point",
),
pytest.param(
add_points_g1(Spec.G1, Spec.G1) + add_points_g1(Spec.G1, Spec.G1),
add_points_g1(add_points_g1(Spec.G1, Spec.G1), add_points_g1(Spec.G1, Spec.G1)),
None,
id="double_generator_plus_double_generator",
),
pytest.param( # (x,y) + (x,-y) = INF
PointG1(Spec.P1.x, Spec.P1.y) + PointG1(Spec.P1.x, Spec.P - Spec.P1.y),
Spec.INF_G1,
None,
id="point_plus_reflected_point",
),
# Not in the r-order subgroup test cases. Checks that any point on the
# curve but not in the subgroup is used for operations.
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Spec.P1_NOT_IN_SUBGROUP,
Spec.P1_NOT_IN_SUBGROUP_TIMES_2,
None,
id="non_sub_plus_non_sub",
),
pytest.param( # `P1_NOT_IN_SUBGROUP` has an small order subgroup of 3:
# 3P = INF.
Spec.P1_NOT_IN_SUBGROUP + Spec.P1_NOT_IN_SUBGROUP_TIMES_2,
Spec.INF_G1,
None,
id="non_sub_order_3_to_inf",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + Spec.INF_G1,
Spec.P1_NOT_IN_SUBGROUP,
None,
id="non_sub_plus_inf",
),
pytest.param(
Spec.G1 + Spec.P1_NOT_IN_SUBGROUP,
add_points_g1(Spec.G1, Spec.P1_NOT_IN_SUBGROUP),
None,
id="generator_plus_non_sub",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP + (-Spec.P1_NOT_IN_SUBGROUP),
Spec.INF_G1,
None,
id="non_sub_plus_neg_non_sub",
),
pytest.param(
Spec.P1 + Spec.P1_NOT_IN_SUBGROUP,
add_points_g1(Spec.P1, Spec.P1_NOT_IN_SUBGROUP),
None,
id="in_sub_plus_non_sub",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP_TIMES_2 + Spec.P1,
add_points_g1(Spec.P1_NOT_IN_SUBGROUP_TIMES_2, Spec.P1),
None,
id="doubled_non_sub_plus_in_sub",
),
pytest.param(
Spec.P1_NOT_IN_SUBGROUP_TIMES_2 + (-Spec.P1_NOT_IN_SUBGROUP),
Spec.P1_NOT_IN_SUBGROUP,
None,
id="doubled_non_sub_plus_neg",
),
# More not in the r-order subgroup test cases, but using random
# generated points.
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[0] + Spec.P1,
add_points_g1(G1_POINTS_NOT_IN_SUBGROUP[0], Spec.P1),
None,
id="rand_not_in_subgroup_0_plus_point",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[1] + Spec.G1,
add_points_g1(G1_POINTS_NOT_IN_SUBGROUP[1], Spec.G1),
None,
id="rand_not_in_subgroup_1_plus_generator",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[2] + Spec.INF_G1,
G1_POINTS_NOT_IN_SUBGROUP[2],
None,
id="rand_not_in_subgroup_2_plus_inf",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[3] + (-G1_POINTS_NOT_IN_SUBGROUP[3]),
Spec.INF_G1,
None,
id="rand_not_in_subgroup_3_plus_neg_self",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[4] + G1_POINTS_NOT_IN_SUBGROUP[0],
add_points_g1(G1_POINTS_NOT_IN_SUBGROUP[4], G1_POINTS_NOT_IN_SUBGROUP[0]),
None,
id="rand_not_in_subgroup_4_plus_0",
),
],
)
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1ADD precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("fail-add_G1_bls.json")
+ [
pytest.param(
PointG1(0, 1) + Spec.INF_G1,
id="invalid_point_a_1",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y - 1) + Spec.INF_G1,
id="invalid_point_a_2",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y + 1) + Spec.INF_G1,
id="invalid_point_a_3",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.x) + Spec.INF_G1,
id="invalid_point_a_4",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y - 1) + Spec.P1,
id="invalid_point_a_5",
),
pytest.param(
Spec.INF_G1 + PointG1(0, 1),
id="invalid_point_b_1",
),
pytest.param(
Spec.INF_G1 + PointG1(Spec.P1.x, Spec.P1.y - 1),
id="invalid_point_b_2",
),
pytest.param(
Spec.INF_G1 + PointG1(Spec.P1.x, Spec.P1.y + 1),
id="invalid_point_b_3",
),
pytest.param(
Spec.INF_G1 + PointG1(Spec.P1.x, Spec.P1.x),
id="invalid_point_b_4",
),
pytest.param(
Spec.P1 + PointG1(Spec.P1.x, Spec.P1.y - 1),
id="invalid_point_b_5",
),
pytest.param(
PointG1(Spec.P, 0) + Spec.INF_G1,
id="a_x_equal_to_p",
),
pytest.param(
Spec.INF_G1 + PointG1(Spec.P, 0),
id="b_x_equal_to_p",
),
pytest.param(
PointG1(0, Spec.P) + Spec.INF_G1,
id="a_y_equal_to_p",
),
pytest.param(
Spec.INF_G1 + PointG1(0, Spec.P),
id="b_y_equal_to_p",
),
pytest.param(
PointG1(Spec.P1.x + Spec.P, Spec.P1.y) + Spec.G1,
id="a_x_above_p",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y + Spec.P) + Spec.G1,
id="a_y_above_p",
),
pytest.param(
Spec.P1 + PointG1(Spec.G1.x + Spec.P, Spec.G1.y),
id="b_x_above_p",
),
pytest.param(
Spec.P1 + PointG1(Spec.G1.x, Spec.G1.y + Spec.P),
id="b_y_above_p",
),
pytest.param(
b"\x80" + bytes(Spec.INF_G1)[1:] + Spec.INF_G1,
id="invalid_encoding_a",
),
pytest.param(
Spec.INF_G1 + b"\x80" + bytes(Spec.INF_G1)[1:],
id="invalid_encoding_b",
),
pytest.param(
(Spec.INF_G1 + PointG1(Spec.P1.x, Spec.P1.x))[:-1],
id="input_too_short",
),
pytest.param(
b"\x00" + (Spec.INF_G1 + PointG1(Spec.P1.x, Spec.P1.x)),
id="input_too_long",
),
pytest.param(
b"",
id="zero_length_input",
),
pytest.param(
Spec.G1,
id="only_one_point",
),
pytest.param(
Spec.G2 + Spec.G2,
id="g2_points",
),
pytest.param(
PointG1(Spec.P + 1, 0) + Spec.INF_G1,
id="x_above_modulus",
),
pytest.param( # Point on curve y^2 = x^3 + 5.
PointG1(0x01, 0x07) + Spec.INF_G1,
id="point_on_wrong_curve_b=5",
),
pytest.param(
PointG1(Spec.P1.y, Spec.P1.x) + Spec.INF_G1,
id="swapped_coordinates",
),
pytest.param(
b"\x00" * 96,
id="all_zero_96_bytes",
),
pytest.param(
b"\xff" + b"\x00" * 47 + b"\xff" + b"\x00" * 47,
id="bad_inf_flag",
),
pytest.param(
b"\xc0" + b"\x00" * 47 + b"\xc0" + b"\x00" * 47,
id="comp_instead_of_uncomp",
),
pytest.param(
PointG1(Spec.P1.x | Spec.MAX_FP_BIT_SET, Spec.P1.y) + Spec.P1,
id="non_zero_byte_16_boundary_violation_x",
),
pytest.param(
PointG1(Spec.P1.x, Spec.P1.y | Spec.MAX_FP_BIT_SET) + Spec.P1,
id="non_zero_byte_16_boundary_violation_y",
),
# Not on the curve cases using random generated points.
pytest.param(
G1_POINTS_NOT_ON_CURVE[0] + Spec.INF_G1,
id="rand_not_on_curve_0_plus_inf",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[1] + Spec.P1,
id="rand_not_on_curve_1_plus_point",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[2] + G1_POINTS_NOT_IN_SUBGROUP[0],
id="rand_not_on_curve_2_plus_not_in_subgroup_0",
),
pytest.param(
G1_POINTS_NOT_ON_CURVE[3] + G1_POINTS_NOT_ON_CURVE[4],
id="rand_not_on_curve_3_plus_4",
),
pytest.param(
Spec.INF_G1 + G1_POINTS_NOT_ON_CURVE[0],
id="inf_plus_rand_not_on_curve_0",
),
pytest.param(
Spec.P1 + G1_POINTS_NOT_ON_CURVE[1],
id="point_plus_rand_not_on_curve_1",
),
pytest.param(
G1_POINTS_NOT_IN_SUBGROUP[2] + G1_POINTS_NOT_ON_CURVE[2],
id="rand_not_in_subgroup_2_plus_rand_not_on_curve_2",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
@pytest.mark.slow()
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Negative tests for the BLS12_G1ADD precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data,expected_output,precompile_gas_modifier",
[
pytest.param(
Spec.INF_G1 + Spec.INF_G1,
Spec.INF_G1,
1,
id="extra_gas",
),
pytest.param(
Spec.INF_G1 + Spec.INF_G1,
Spec.INVALID,
-1,
id="insufficient_gas",
),
],
)
def test_gas(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1ADD precompile gas requirements."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
Spec.INF_G1 + Spec.INF_G1,
Spec.INF_G1,
id="inf_plus_inf",
),
pytest.param(
Spec.INF_G1 + Spec.G1,
Spec.G1,
id="inf_plus_generator",
),
],
)
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G1ADD precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2mul.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2mul.py | """
Test the BLS12_G2MUL precompile.
Test the BLS12_G2MUL precompile introduced in
[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .conftest import G2_POINTS_NOT_IN_SUBGROUP, G2_POINTS_NOT_ON_CURVE
from .helpers import vectors_from_file
from .spec import PointG2, Scalar, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.G2MSM], ids=[""]),
]
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("mul_G2_bls.json")
+ [
# Basic multiplication test cases.
pytest.param(
Spec.INF_G2 + Scalar(0),
Spec.INF_G2,
None,
id="zero_times_inf",
),
pytest.param(
Spec.INF_G2 + Scalar(1),
Spec.INF_G2,
None,
id="one_times_inf",
),
pytest.param(
Spec.INF_G2 + Scalar(2),
Spec.INF_G2,
None,
id="two_times_inf",
),
pytest.param(
Spec.INF_G2 + Scalar(Spec.Q),
Spec.INF_G2,
None,
id="q_times_inf",
),
pytest.param(
Spec.INF_G2 + Scalar(2**256 - 1),
Spec.INF_G2,
None,
id="max_scalar_times_inf",
),
pytest.param(
Spec.G2 + Scalar(0),
Spec.INF_G2,
None,
id="zero_times_generator",
),
pytest.param(
Spec.P2 + Scalar(0),
Spec.INF_G2,
None,
id="zero_times_point",
),
pytest.param(
Spec.G2 + Scalar(1),
Spec.G2,
None,
id="one_times_generator",
),
pytest.param(
Spec.P2 + Scalar(1),
Spec.P2,
None,
id="one_times_point",
),
pytest.param(
Spec.P2 + Scalar(2**256 - 1),
PointG2(
(
0x2663E1C3431E174CA80E5A84489569462E13B52DA27E7720AF5567941603475F1F9BC0102E13B92A0A21D96B94E9B22,
0x6A80D056486365020A6B53E2680B2D72D8A93561FC2F72B960936BB16F509C1A39C4E4174A7C9219E3D7EF130317C05,
),
(
0xC49EAD39E9EB7E36E8BC25824299661D5B6D0E200BBC527ECCB946134726BF5DBD861E8E6EC946260B82ED26AFE15FB,
0x5397DAD1357CF8333189821B737172B18099ECF7EE8BDB4B3F05EBCCDF40E1782A6C71436D5ACE0843D7F361CBC6DB2,
),
),
None,
id="max_scalar_times_point",
),
# Subgroup related test cases.
pytest.param(
Spec.P2 + Scalar(Spec.Q - 1),
-Spec.P2, # negated P2
None,
id="q_minus_1_times_point",
),
pytest.param(
Spec.P2 + Scalar(Spec.Q),
Spec.INF_G2,
None,
id="q_times_point",
),
pytest.param(
Spec.G2 + Scalar(Spec.Q),
Spec.INF_G2,
None,
id="q_times_generator",
),
pytest.param(
Spec.P2 + Scalar(Spec.Q + 1),
Spec.P2,
None,
id="q_plus_1_times_point",
),
pytest.param(
Spec.P2 + Scalar(2 * Spec.Q),
Spec.INF_G2,
None,
id="2q_times_point",
),
pytest.param(
Spec.P2 + Scalar((2**256 // Spec.Q) * Spec.Q),
Spec.INF_G2,
None,
id="large_multiple_of_q_times_point",
),
],
)
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2MUL precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("fail-mul_G2_bls.json")
+ [
pytest.param(
PointG2((1, 0), (0, 0)) + Scalar(0),
id="invalid_point_a_1",
),
pytest.param(
PointG2((0, 1), (0, 0)) + Scalar(0),
id="invalid_point_a_2",
),
pytest.param(
PointG2((0, 0), (1, 0)) + Scalar(0),
id="invalid_point_a_3",
),
pytest.param(
PointG2((0, 0), (0, 1)) + Scalar(0),
id="invalid_point_a_4",
),
pytest.param(
PointG2((Spec.P, 0), (0, 0)) + Scalar(0),
id="x_1_equal_to_p_times_0",
),
pytest.param(
PointG2((0, Spec.P), (0, 0)) + Scalar(0),
id="x_2_equal_to_p_times_0",
),
pytest.param(
PointG2((0, 0), (Spec.P, 0)) + Scalar(0),
id="y_1_equal_to_p_times_0",
),
pytest.param(
PointG2((0, 0), (0, Spec.P)) + Scalar(0),
id="y_2_equal_to_p_times_0",
),
pytest.param(
PointG2((Spec.P + 1, 0), (0, 0)) + Scalar(0),
id="x1_above_modulus_times_0",
),
pytest.param(
PointG2(
(0x01, 0), # x coordinate in Fp2 (1 + 0i)
(0x07, 0), # y coordinate satisfying y^2 = x^3 + 5 in Fp2
)
+ Scalar(0),
id="point_on_wrong_curve_times_0",
),
pytest.param(
b"\x80" + bytes(Spec.INF_G2)[1:] + Scalar(0),
id="invalid_encoding",
),
pytest.param(
(Spec.INF_G2 + Scalar(0))[:-1],
id="input_too_short",
),
pytest.param(
b"\x00" + (Spec.INF_G2 + Scalar(0)),
id="input_too_long",
),
pytest.param(
b"",
id="zero_length_input",
),
pytest.param(
b"\x00" * 160,
id="all_zero_160_bytes",
),
pytest.param(
b"\xff" + b"\x00" * 127 + b"\xff" + b"\x00" * 31,
id="bad_inf_flag_with_scalar",
),
pytest.param(
b"\xc0" + b"\x00" * 127 + b"\x00" * 32,
id="comp_instead_of_uncomp_with_scalar",
),
pytest.param(
Spec.G1 + Spec.G1,
id="g1_input_invalid_length",
),
pytest.param(
Spec.G2 + Spec.G2,
id="g2_input_invalid_length",
),
pytest.param(
Spec.G2,
id="g2_truncated_input",
),
pytest.param(
Spec.INF_G2 + Scalar(0).x.to_bytes(30, byteorder="big"),
id="inf_with_short_scalar",
),
pytest.param(
Spec.INF_G2 + Scalar(0).x.to_bytes(34, byteorder="big"),
id="inf_with_long_scalar",
),
pytest.param(
Spec.INF_G2 + (b"\x01" + b"\x00" * 32),
id="scalar_too_large_bytes",
),
pytest.param(
Spec.P2 + (b"\x01" + b"\x00" * 32),
id="scalar_too_large_bytes_with_point",
),
pytest.param(
Spec.G2 + (b"\x01\x23\x45"),
id="scalar_too_small_bytes",
),
pytest.param(
Scalar(1) + Scalar(1),
id="two_scalars",
),
pytest.param(
bytes(Spec.G2) + bytes(Scalar(0))[128:],
id="mixed_g2_scalar_truncated",
),
pytest.param(
PointG2((Spec.P2.x[0] | Spec.MAX_FP_BIT_SET, Spec.P2.x[1]), Spec.P2.y) + Scalar(1),
id="non_zero_byte_16_boundary_violation_x1",
),
pytest.param(
PointG2((Spec.P2.x[0], Spec.P2.x[1] | Spec.MAX_FP_BIT_SET), Spec.P2.y) + Scalar(1),
id="non_zero_byte_16_boundary_violation_x2",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0] | Spec.MAX_FP_BIT_SET, Spec.P2.y[1])) + Scalar(1),
id="non_zero_byte_16_boundary_violation_y1",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0], Spec.P2.y[1] | Spec.MAX_FP_BIT_SET)) + Scalar(1),
id="non_zero_byte_16_boundary_violation_y2",
),
# Not in the r-order subgroup test cases.
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Scalar(0),
id="not_in_subgroup_times_0",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Scalar(1),
id="not_in_subgroup_times_1",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Scalar(2),
id="not_in_subgroup_times_2",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Scalar(3),
id="not_in_subgroup_times_3",
),
pytest.param(
Scalar(Spec.Q) + Spec.P2_NOT_IN_SUBGROUP,
id="q_times_not_in_subgroup",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Scalar(Spec.Q - 1),
id="not_in_subgroup_times_q_minus_1",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Scalar(Spec.Q + 1),
id="not_in_subgroup_times_q_plus_1",
),
# More not in the r-order subgroup test cases, but using random
# generated points.
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[0] + Scalar(1),
id="rand_not_in_subgroup_0_times_1",
),
pytest.param(
Scalar(2) + G2_POINTS_NOT_IN_SUBGROUP[1],
id="2_times_rand_not_in_subgroup_1",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[2] + Scalar(Spec.Q),
id="rand_not_in_subgroup_2_times_q",
),
pytest.param(
Scalar(0) + G2_POINTS_NOT_IN_SUBGROUP[3],
id="0_times_rand_not_in_subgroup_3",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[4] + Scalar(2**255 - 1),
id="rand_not_in_subgroup_4_times_large_scalar",
),
# Not on the curve cases using random generated points.
pytest.param(
G2_POINTS_NOT_ON_CURVE[0] + Scalar(1),
id="rand_not_on_curve_0_times_1",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[1] + Scalar(2),
id="rand_not_on_curve_1_times_2",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[2] + Scalar(Spec.Q),
id="rand_not_on_curve_2_times_q",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[3] + Scalar(0),
id="rand_not_on_curve_3_times_0",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[4] + Scalar(Spec.Q - 1),
id="rand_not_on_curve_4_times_q_minus_1",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Negative tests for the BLS12_G2MUL precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data,expected_output,precompile_gas_modifier",
[
pytest.param(
Spec.INF_G2 + Scalar(0),
Spec.INF_G2,
1,
id="extra_gas",
),
pytest.param(
Spec.INF_G2 + Scalar(0),
Spec.INVALID,
-1,
id="insufficient_gas",
),
],
)
def test_gas(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2MUL precompile gas requirements."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
Spec.INF_G2 + Scalar(0),
Spec.INF_G2,
id="zero_times_inf",
),
pytest.param(
Spec.INF_G2 + Scalar(2),
Spec.INF_G2,
id="two_times_inf",
),
],
)
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2MUL precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2msm.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2msm.py | """
Test the BLS12_G2MSM precompile.
Test the BLS12_G2MSM precompile introduced in
[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .conftest import G2_POINTS_NOT_IN_SUBGROUP, G2_POINTS_NOT_ON_CURVE
from .helpers import PointG2, vectors_from_file
from .spec import Scalar, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.G2MSM], ids=[""]),
]
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("msm_G2_bls.json")
+ [
# Multiple pair scalar multiplication cases.
pytest.param(
Spec.G2 + Scalar(1) + Spec.INF_G2 + Scalar(1),
Spec.G2,
None,
id="g2_plus_inf",
),
pytest.param(
Spec.G2 + Scalar(0) + Spec.P2 + Scalar(0) + Spec.INF_G2 + Scalar(0),
Spec.INF_G2,
None,
id="all_zero_scalars",
),
pytest.param(
Spec.G2 + Scalar(1) + (-Spec.G2) + Scalar(1),
Spec.INF_G2,
None,
id="sum_to_identity_opposite",
),
pytest.param(
Spec.G2 + Scalar(Spec.Q - 1) + Spec.G2 + Scalar(1),
Spec.INF_G2,
None,
id="scalars_sum_to_q",
),
pytest.param(
Spec.G2 + Scalar(1) + Spec.G2 + Scalar(0) + Spec.INF_G2 + Scalar(5),
Spec.G2,
None,
id="combined_basic_cases",
),
pytest.param(
Spec.G2 + Scalar(1) + Spec.INF_G2 + Scalar(500),
Spec.G2,
None,
id="identity_with_large_scalar",
),
pytest.param(
Spec.G2 + Scalar(0) + Spec.P2 + Scalar(0) + (-Spec.G2) + Scalar(0),
Spec.INF_G2,
None,
id="multiple_points_zero_scalar",
),
# Cases with maximum discount table (test vector for gas cost
# calculation)
pytest.param(
(Spec.P2 + Scalar(Spec.Q)) * (len(Spec.G2MSM_DISCOUNT_TABLE) - 1),
Spec.INF_G2,
None,
id="max_discount",
marks=pytest.mark.slow,
),
pytest.param(
(Spec.P2 + Scalar(Spec.Q)) * len(Spec.G2MSM_DISCOUNT_TABLE),
Spec.INF_G2,
None,
id="max_discount_plus_1",
marks=pytest.mark.slow,
),
],
)
@pytest.mark.slow()
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test valid calls to the BLS12_G2MSM precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("fail-msm_G2_bls.json")
+ [
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Scalar(1),
id="not_in_subgroup_1_pos_0",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP_TIMES_2 + Scalar(1),
id="not_in_subgroup_2_pos_0",
),
pytest.param(
Spec.G2 + Scalar(1) + Spec.P2_NOT_IN_SUBGROUP + Scalar(1),
id="not_in_subgroup_1_pos_1",
),
pytest.param(
Spec.G2 + Scalar(1) + Spec.P2_NOT_IN_SUBGROUP_TIMES_2 + Scalar(1),
id="not_in_subgroup_2_pos_1",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[0] + Scalar(1),
id="rand_not_in_subgroup_0_pos_0",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[1] + Scalar(1),
id="rand_not_in_subgroup_1_pos_0",
),
pytest.param(
Spec.G2 + Scalar(1) + G2_POINTS_NOT_IN_SUBGROUP[0] + Scalar(1),
id="rand_not_in_subgroup_0_pos_1",
),
pytest.param(
Spec.G2 + Scalar(1) + G2_POINTS_NOT_IN_SUBGROUP[1] + Scalar(1),
id="rand_not_in_subgroup_1_pos_1",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[0] + Scalar(0),
id="not_on_curve_0_pos_0",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[1] + Scalar(1),
id="not_on_curve_1_pos_0",
),
pytest.param(
Spec.G2 + Scalar(1) + G2_POINTS_NOT_ON_CURVE[0] + Scalar(0),
id="not_on_curve_0_pos_1",
),
pytest.param(
Spec.G2 + Scalar(1) + G2_POINTS_NOT_ON_CURVE[1] + Scalar(1),
id="not_on_curve_1_pos_1",
),
pytest.param(
Spec.G2 + Scalar(1) + Spec.G2,
id="incomplete_input_missing_scalar",
),
pytest.param(
Spec.G2 + Scalar(1) + bytes([0]),
id="incomplete_input_extra_byte",
),
pytest.param(
Spec.G2 + Scalar(1) + Spec.G1 + Scalar(1),
id="mixing_g2_with_g1",
),
pytest.param(
Spec.G2 + (b"\x01" + b"\x00" * 32), # Scalar > UINT256_MAX
id="scalar_too_large",
),
pytest.param(
# Invalid scalar length
Spec.G2 + Scalar(1).x.to_bytes(16, byteorder="big"),
id="scalar_too_short",
),
pytest.param(
bytes([0]) * 287, # Just under minimum valid length
id="input_too_short_by_1",
),
# Coordinates above modulus p cases.
pytest.param(
PointG2((Spec.P2.x[0] + Spec.P, Spec.P2.x[1]), Spec.P2.y) + Scalar(1),
id="x_c0_above_p_pos_0",
),
pytest.param(
PointG2((Spec.P2.x[0], Spec.P2.x[1] + Spec.P), Spec.P2.y) + Scalar(1),
id="x_c1_above_p_pos_0",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0] + Spec.P, Spec.P2.y[1])) + Scalar(1),
id="y_c0_above_p_pos_0",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0], Spec.P2.y[1] + Spec.P)) + Scalar(1),
id="y_c1_above_p_pos_0",
),
pytest.param(
Spec.G2
+ Scalar(1)
+ PointG2((Spec.P2.x[0] + Spec.P, Spec.P2.x[1]), Spec.P2.y)
+ Scalar(0),
id="x_c0_above_p_pos_1",
),
pytest.param(
Spec.G2
+ Scalar(1)
+ PointG2((Spec.P2.x[0], Spec.P2.x[1] + Spec.P), Spec.P2.y)
+ Scalar(0),
id="x_c1_above_p_pos_1",
),
pytest.param(
Spec.G2
+ Scalar(1)
+ PointG2(Spec.P2.x, (Spec.P2.y[0] + Spec.P, Spec.P2.y[1]))
+ Scalar(0),
id="y_c0_above_p_pos_1",
),
pytest.param(
Spec.G2
+ Scalar(1)
+ PointG2(Spec.P2.x, (Spec.P2.y[0], Spec.P2.y[1] + Spec.P))
+ Scalar(0),
id="y_c1_above_p_pos_1",
),
],
# Input length tests can be found in
# ./test_bls12_variable_length_input_contracts.py
)
@pytest.mark.parametrize(
"precompile_gas_modifier", [100_000], ids=[""]
) # Add gas so that won't be the cause of failure
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test invalid calls to the BLS12_G2MSM precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
Spec.INF_G2 + Scalar(0),
Spec.INF_G2,
id="single_inf_times_zero",
),
pytest.param(
Spec.G2 + Scalar(0) + Spec.INF_G2 + Scalar(0),
Spec.INF_G2,
id="msm_all_zeros_different_call_types",
),
],
)
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2MSM precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/__init__.py | tests/prague/eip2537_bls_12_381_precompiles/__init__.py | """
Tests [EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_eip_mainnet.py | tests/prague/eip2537_bls_12_381_precompiles/test_eip_mainnet.py | """
abstract: Crafted tests for mainnet of
[EIP-2537: Precompile for BLS12-381 curve operations](https://eips.ethereum.org/EIPS/eip-2537).
""" # noqa: E501
import pytest
from ethereum_test_tools import Alloc, StateTestFiller, Transaction
from .spec import FP, FP2, Scalar, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [pytest.mark.valid_at("Prague"), pytest.mark.mainnet]
@pytest.mark.parametrize(
"precompile_address,input_data,expected_output,vector_gas_value",
[
pytest.param(
Spec.G1ADD,
Spec.G1 + Spec.INF_G1,
Spec.G1,
None,
id="G1ADD",
),
pytest.param(
Spec.G1MSM,
Spec.G1 + Scalar(1) + Spec.INF_G1 + Scalar(1),
Spec.G1,
None,
id="G1MSM",
),
pytest.param(
Spec.G2ADD,
Spec.G2 + Spec.INF_G2,
Spec.G2,
None,
id="G2ADD",
),
pytest.param(
Spec.G2MSM,
Spec.G2 + Scalar(1) + Spec.INF_G2 + Scalar(1),
Spec.G2,
None,
id="G2MSM",
),
pytest.param(
Spec.PAIRING,
Spec.G1 + Spec.INF_G2,
Spec.PAIRING_TRUE,
None,
id="PAIRING",
),
pytest.param(
Spec.MAP_FP_TO_G1,
FP(
799950832265136997107648781861994410980648980263584507133499364313075404851459407870655748616451882783569609925573 # noqa: E501
),
Spec.INF_G1,
None,
id="fp_map_to_inf",
),
pytest.param(
Spec.MAP_FP2_TO_G2,
FP2(
(
3510328712861478240121438855244276237335901234329585006107499559909114695366216070652508985150831181717984778988906, # noqa: E501
2924545590598115509050131525615277284817672420174395176262156166974132393611647670391999011900253695923948997972401, # noqa: E501
)
),
Spec.INF_G2,
None,
id="fp_map_to_inf",
),
],
)
def test_eip_2537(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the all precompiles of EIP-2537."""
state_test(
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2add.py | tests/prague/eip2537_bls_12_381_precompiles/test_bls12_g2add.py | """
Tests BLS12_G2ADD precompile.
Tests the BLS12_G2ADD precompile implementation from [EIP-2537:
Precompile for BLS12-381 curve operations]
(https://eips.ethereum.org/EIPS/eip-2537).
"""
import pytest
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_tools import Opcodes as Op
from .conftest import G2_POINTS_NOT_IN_SUBGROUP, G2_POINTS_NOT_ON_CURVE
from .helpers import add_points_g2, vectors_from_file
from .spec import PointG2, Spec, ref_spec_2537
REFERENCE_SPEC_GIT_PATH = ref_spec_2537.git_path
REFERENCE_SPEC_VERSION = ref_spec_2537.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.parametrize("precompile_address", [Spec.G2ADD], ids=[""]),
]
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
# Test vectors from the reference spec (from the cryptography team)
vectors_from_file("add_G2_bls.json")
+ [
# Identity (infinity) element test cases. Checks that any point added
# to the identity element (INF) equals itself.
pytest.param(
Spec.G2 + Spec.INF_G2,
Spec.G2,
None,
id="generator_plus_inf",
),
pytest.param(
Spec.INF_G2 + Spec.G2,
Spec.G2,
None,
id="inf_plus_generator",
),
pytest.param(
Spec.INF_G2 + Spec.INF_G2,
Spec.INF_G2,
None,
id="inf_plus_inf",
),
pytest.param(
Spec.INF_G2 + Spec.P2,
Spec.P2,
None,
id="inf_plus_point",
),
# Basic arithmetic properties test cases.
# Checks fundamental properties of the BLS12-381 curve.
pytest.param(
Spec.P2 + (-Spec.P2),
Spec.INF_G2,
None,
id="point_plus_neg_point",
),
pytest.param(
Spec.G2 + (-Spec.G2),
Spec.INF_G2,
None,
id="generator_plus_neg_point",
),
pytest.param(
Spec.P2 + Spec.G2,
add_points_g2(Spec.G2, Spec.P2),
None,
id="commutative_check_a",
),
pytest.param(
Spec.G2 + Spec.P2,
add_points_g2(Spec.P2, Spec.G2),
None,
id="commutative_check_b",
),
pytest.param(
Spec.P2 + Spec.P2,
add_points_g2(Spec.P2, Spec.P2),
None,
id="point_doubling",
),
pytest.param( # (P + G) + P = P + (G + P)
add_points_g2(Spec.P2, Spec.G2) + Spec.P2,
add_points_g2(Spec.P2, add_points_g2(Spec.G2, Spec.P2)),
None,
id="associativity_check",
),
pytest.param( # -(P+G) = (-P)+(-G)
(-(add_points_g2(Spec.P2, Spec.G2))) + Spec.INF_G2,
add_points_g2((-Spec.P2), (-Spec.G2)),
None,
id="negation_of_sum",
),
pytest.param(
add_points_g2(Spec.G2, Spec.G2) + add_points_g2(Spec.P2, Spec.P2),
add_points_g2(add_points_g2(Spec.G2, Spec.G2), add_points_g2(Spec.P2, Spec.P2)),
None,
id="double_generator_plus_double_point",
),
pytest.param(
add_points_g2(Spec.G2, Spec.G2) + add_points_g2(Spec.G2, Spec.G2),
add_points_g2(add_points_g2(Spec.G2, Spec.G2), add_points_g2(Spec.G2, Spec.G2)),
None,
id="double_generator_plus_double_generator",
),
pytest.param( # (x,y) + (x,-y) = INF
PointG2(Spec.P2.x, Spec.P2.y)
+ PointG2(Spec.P2.x, (-Spec.P2.y[0] % Spec.P, -Spec.P2.y[1] % Spec.P)),
Spec.INF_G2,
None,
id="point_plus_reflected_point",
),
# Not in the r-order subgroup test cases. Checks that any point on the
# curve but not in the subgroup is used for operations.
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Spec.P2_NOT_IN_SUBGROUP,
Spec.P2_NOT_IN_SUBGROUP_TIMES_2,
None,
id="non_sub_plus_non_sub",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Spec.P2_NOT_IN_SUBGROUP_TIMES_2,
add_points_g2(Spec.P2_NOT_IN_SUBGROUP, Spec.P2_NOT_IN_SUBGROUP_TIMES_2),
None,
id="non_sub_plus_doubled_non_sub",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + Spec.INF_G2,
Spec.P2_NOT_IN_SUBGROUP,
None,
id="non_sub_plus_inf",
),
pytest.param(
Spec.G2 + Spec.P2_NOT_IN_SUBGROUP,
add_points_g2(Spec.G2, Spec.P2_NOT_IN_SUBGROUP),
None,
id="generator_plus_non_sub",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP + (-Spec.P2_NOT_IN_SUBGROUP),
Spec.INF_G2,
None,
id="non_sub_plus_neg_non_sub",
),
pytest.param(
Spec.P2 + Spec.P2_NOT_IN_SUBGROUP,
add_points_g2(Spec.P2, Spec.P2_NOT_IN_SUBGROUP),
None,
id="in_sub_plus_non_sub",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP_TIMES_2 + Spec.P2,
add_points_g2(Spec.P2_NOT_IN_SUBGROUP_TIMES_2, Spec.P2),
None,
id="doubled_non_sub_plus_in_sub",
),
pytest.param(
Spec.P2_NOT_IN_SUBGROUP_TIMES_2 + (-Spec.P2_NOT_IN_SUBGROUP),
Spec.P2_NOT_IN_SUBGROUP,
None,
id="doubled_non_sub_plus_neg",
),
# More not in the r-order subgroup test cases, but using random
# generated points.
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[0] + Spec.P2,
add_points_g2(G2_POINTS_NOT_IN_SUBGROUP[0], Spec.P2),
None,
id="rand_not_in_subgroup_0_plus_point",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[1] + Spec.G2,
add_points_g2(G2_POINTS_NOT_IN_SUBGROUP[1], Spec.G2),
None,
id="rand_not_in_subgroup_1_plus_generator",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[2] + Spec.INF_G2,
G2_POINTS_NOT_IN_SUBGROUP[2],
None,
id="rand_not_in_subgroup_2_plus_inf",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[3] + (-G2_POINTS_NOT_IN_SUBGROUP[3]),
Spec.INF_G2,
None,
id="rand_not_in_subgroup_3_plus_neg_self",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[4] + G2_POINTS_NOT_IN_SUBGROUP[0],
add_points_g2(G2_POINTS_NOT_IN_SUBGROUP[4], G2_POINTS_NOT_IN_SUBGROUP[0]),
None,
id="rand_not_in_subgroup_4_plus_0",
),
],
)
def test_valid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2ADD precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data",
vectors_from_file("fail-add_G2_bls.json")
+ [
pytest.param(
PointG2((1, 0), (0, 0)) + Spec.INF_G2,
id="invalid_point_a_1",
),
pytest.param(
PointG2((0, 0), (1, 0)) + Spec.INF_G2,
id="invalid_point_a_2",
),
pytest.param(
PointG2((0, 1), (0, 0)) + Spec.INF_G2,
id="invalid_point_a_3",
),
pytest.param(
PointG2((0, 0), (0, 1)) + Spec.INF_G2,
id="invalid_point_a_4",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0], Spec.P2.y[1] - 1)) + Spec.P2,
id="invalid_point_a_5",
),
pytest.param(
Spec.INF_G2 + PointG2((1, 0), (0, 0)),
id="invalid_point_b_1",
),
pytest.param(
Spec.INF_G2 + PointG2((0, 0), (1, 0)),
id="invalid_point_b_2",
),
pytest.param(
Spec.INF_G2 + PointG2((0, 1), (0, 0)),
id="invalid_point_b_3",
),
pytest.param(
Spec.INF_G2 + PointG2((0, 0), (0, 1)),
id="invalid_point_b_4",
),
pytest.param(
Spec.P2 + PointG2(Spec.P2.x, (Spec.P2.y[0], Spec.P2.y[1] - 1)),
id="invalid_point_b_5",
),
pytest.param(
PointG2((Spec.P, 0), (0, 0)) + Spec.INF_G2,
id="a_x_1_equal_to_p",
),
pytest.param(
PointG2((0, Spec.P), (0, 0)) + Spec.INF_G2,
id="a_x_2_equal_to_p",
),
pytest.param(
PointG2((0, 0), (Spec.P, 0)) + Spec.INF_G2,
id="a_y_1_equal_to_p",
),
pytest.param(
PointG2((0, 0), (0, Spec.P)) + Spec.INF_G2,
id="a_y_2_equal_to_p",
),
pytest.param(
Spec.INF_G2 + PointG2((Spec.P, 0), (0, 0)),
id="b_x_1_equal_to_p",
),
pytest.param(
Spec.INF_G2 + PointG2((0, Spec.P), (0, 0)),
id="b_x_2_equal_to_p",
),
pytest.param(
Spec.INF_G2 + PointG2((0, 0), (Spec.P, 0)),
id="b_y_1_equal_to_p",
),
pytest.param(
Spec.INF_G2 + PointG2((0, 0), (0, Spec.P)),
id="b_y_2_equal_to_p",
),
pytest.param(
PointG2((Spec.P2.x[0] + Spec.P, Spec.P2.x[1]), Spec.P2.y) + Spec.G2,
id="a_x_1_above_p",
),
pytest.param(
PointG2((Spec.P2.x[0], Spec.P2.x[1] + Spec.P), Spec.P2.y) + Spec.G2,
id="a_x_2_above_p",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0] + Spec.P, Spec.P2.y[1])) + Spec.G2,
id="a_y_1_above_p",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0], Spec.P2.y[1] + Spec.P)) + Spec.G2,
id="a_y_2_above_p",
),
pytest.param(
Spec.P2 + PointG2((Spec.G2.x[0] + Spec.P, Spec.G2.x[1]), Spec.G2.y),
id="b_x_1_above_p",
),
pytest.param(
Spec.P2 + PointG2((Spec.G2.x[0], Spec.G2.x[1] + Spec.P), Spec.G2.y),
id="b_x_2_above_p",
),
pytest.param(
Spec.P2 + PointG2(Spec.G2.x, (Spec.G2.y[0] + Spec.P, Spec.G2.y[1])),
id="b_y_1_above_p",
),
pytest.param(
Spec.P2 + PointG2(Spec.G2.x, (Spec.G2.y[0], Spec.G2.y[1] + Spec.P)),
id="b_y_2_above_p",
),
pytest.param(
b"\x80" + bytes(Spec.INF_G2)[1:] + Spec.INF_G2,
id="invalid_encoding_a",
),
pytest.param(
Spec.INF_G2 + b"\x80" + bytes(Spec.INF_G2)[1:],
id="invalid_encoding_b",
),
pytest.param(
(Spec.INF_G2 + Spec.INF_G2)[:-1],
id="input_too_short",
),
pytest.param(
b"\x00" + (Spec.INF_G2 + Spec.INF_G2),
id="input_too_long",
),
pytest.param(
b"",
id="zero_length_input",
),
pytest.param(
Spec.G2,
id="only_one_point",
),
pytest.param(
Spec.G1 + Spec.G1,
id="g1_points",
),
pytest.param(
PointG2((Spec.P + 1, 0), (0, 0)) + Spec.INF_G2,
id="x1_above_modulus",
),
pytest.param(
PointG2(
(0x01, 0), # x coordinate in Fp2 (1 + 0i)
(0x07, 0), # y coordinate satisfying y^2 = x^3 + 5 in Fp2
)
+ Spec.INF_G2,
id="point_on_wrong_curve_b=5",
),
pytest.param(
bytes(Spec.G2) + bytes(Spec.G2)[128:],
id="mixed_g1_g2_points",
),
pytest.param(
PointG2((Spec.P2.x[0] | Spec.MAX_FP_BIT_SET, Spec.P2.x[1]), Spec.P2.y) + Spec.P2,
id="non_zero_byte_16_boundary_violation_x1",
),
pytest.param(
PointG2((Spec.P2.x[0], Spec.P2.x[1] | Spec.MAX_FP_BIT_SET), Spec.P2.y) + Spec.P2,
id="non_zero_byte_16_boundary_violation_x2",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0] | Spec.MAX_FP_BIT_SET, Spec.P2.y[1])) + Spec.P2,
id="non_zero_byte_16_boundary_violation_y1",
),
pytest.param(
PointG2(Spec.P2.x, (Spec.P2.y[0], Spec.P2.y[1] | Spec.MAX_FP_BIT_SET)) + Spec.P2,
id="non_zero_byte_16_boundary_violation_y2",
),
# Not on the curve cases using random generated points.
pytest.param(
G2_POINTS_NOT_ON_CURVE[0] + Spec.INF_G2,
id="rand_not_on_curve_0_plus_inf",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[1] + Spec.P2,
id="rand_not_on_curve_1_plus_point",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[2] + G2_POINTS_NOT_IN_SUBGROUP[0],
id="rand_not_on_curve_2_plus_not_in_subgroup_0",
),
pytest.param(
G2_POINTS_NOT_ON_CURVE[3] + G2_POINTS_NOT_ON_CURVE[4],
id="rand_not_on_curve_3_plus_4",
),
pytest.param(
Spec.INF_G2 + G2_POINTS_NOT_ON_CURVE[0],
id="inf_plus_rand_not_on_curve_0",
),
pytest.param(
Spec.P2 + G2_POINTS_NOT_ON_CURVE[1],
id="point_plus_rand_not_on_curve_1",
),
pytest.param(
G2_POINTS_NOT_IN_SUBGROUP[2] + G2_POINTS_NOT_ON_CURVE[2],
id="rand_not_in_subgroup_2_plus_rand_not_on_curve_2",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID], ids=[""])
def test_invalid(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Negative tests for the BLS12_G2ADD precompile."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"input_data,expected_output,precompile_gas_modifier",
[
pytest.param(
Spec.INF_G2 + Spec.INF_G2,
Spec.INF_G2,
1,
id="extra_gas",
),
pytest.param(
Spec.INF_G2 + Spec.INF_G2,
Spec.INVALID,
-1,
id="insufficient_gas",
),
],
)
def test_gas(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2ADD precompile gas requirements."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode", # Note `Op.CALL` is used for all the `test_valid` cases.
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
Spec.INF_G2 + Spec.INF_G2,
Spec.INF_G2,
id="inf_plus_inf",
),
pytest.param(
Spec.INF_G2 + Spec.G2,
Spec.G2,
id="inf_plus_generator",
),
],
)
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test the BLS12_G2ADD precompile using different call types."""
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7623_increase_calldata_cost/test_refunds.py | tests/prague/eip7623_increase_calldata_cost/test_refunds.py | """
Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
"""
from enum import Enum, Flag, auto
from typing import Dict, List
import pytest
from ethereum_test_forks import Fork, Prague
from ethereum_test_tools import (
Address,
Alloc,
AuthorizationTuple,
Bytecode,
StateTestFiller,
Transaction,
TransactionReceipt,
)
from ethereum_test_tools import Opcodes as Op
from .helpers import DataTestType
from .spec import ref_spec_7623
REFERENCE_SPEC_GIT_PATH = ref_spec_7623.git_path
REFERENCE_SPEC_VERSION = ref_spec_7623.version
ENABLE_FORK = Prague
pytestmark = [pytest.mark.valid_from(str(ENABLE_FORK))]
class RefundTestType(Enum):
"""Refund test type."""
EXECUTION_GAS_MINUS_REFUND_GREATER_THAN_DATA_FLOOR = 0
"""
The execution gas minus the refund is greater than the data floor, hence
the execution gas cost is charged.
"""
EXECUTION_GAS_MINUS_REFUND_LESS_THAN_DATA_FLOOR = 1
"""
The execution gas minus the refund is less than the data floor, hence the
data floor cost is charged.
"""
EXECUTION_GAS_MINUS_REFUND_EQUAL_TO_DATA_FLOOR = 2
"""The execution gas minus the refund is equal to the data floor."""
class RefundType(Flag):
"""Refund type."""
STORAGE_CLEAR = auto()
"""The storage is cleared from a non-zero value."""
AUTHORIZATION_EXISTING_AUTHORITY = auto()
"""
The authorization list contains an authorization where the authority exists
in the state.
"""
@pytest.fixture
def data_test_type() -> DataTestType:
"""Return data test type."""
return DataTestType.FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS
@pytest.fixture
def authorization_list(pre: Alloc, refund_type: RefundType) -> List[AuthorizationTuple] | None:
"""
Modify fixture from conftest to automatically read the refund_type
information.
"""
if RefundType.AUTHORIZATION_EXISTING_AUTHORITY not in refund_type:
return None
return [AuthorizationTuple(signer=pre.fund_eoa(1), address=Address(1))]
@pytest.fixture
def ty(refund_type: RefundType) -> int:
"""
Modify fixture from conftest to automatically read the refund_type
information.
"""
if RefundType.AUTHORIZATION_EXISTING_AUTHORITY in refund_type:
return 4
return 2
@pytest.fixture
def max_refund(fork: Fork, refund_type: RefundType) -> int:
"""Return the max refund gas of the transaction."""
gas_costs = fork.gas_costs()
max_refund = gas_costs.R_STORAGE_CLEAR if RefundType.STORAGE_CLEAR in refund_type else 0
max_refund += (
gas_costs.R_AUTHORIZATION_EXISTING_AUTHORITY
if RefundType.AUTHORIZATION_EXISTING_AUTHORITY in refund_type
else 0
)
return max_refund
@pytest.fixture
def prefix_code_gas(fork: Fork, refund_type: RefundType) -> int:
"""Return the minimum execution gas cost due to the refund type."""
if RefundType.STORAGE_CLEAR in refund_type:
# Minimum code to generate a storage clear is Op.SSTORE(0, 0).
gas_costs = fork.gas_costs()
return gas_costs.G_COLD_SLOAD + gas_costs.G_STORAGE_RESET + (gas_costs.G_VERY_LOW * 2)
return 0
@pytest.fixture
def prefix_code(refund_type: RefundType) -> Bytecode:
"""Return the minimum execution gas cost due to the refund type."""
if RefundType.STORAGE_CLEAR in refund_type:
# Clear the storage to trigger a refund.
return Op.SSTORE(0, 0)
return Bytecode()
@pytest.fixture
def code_storage(refund_type: RefundType) -> Dict:
"""Return the minimum execution gas cost due to the refund type."""
if RefundType.STORAGE_CLEAR in refund_type:
# Pre-set the storage to be cleared.
return {0: 1}
return {}
@pytest.fixture
def contract_creating_tx() -> bool:
"""
Override fixture in order to avoid a circular fixture dependency since none
of these tests are contract creating transactions.
"""
return False
@pytest.fixture
def intrinsic_gas_data_floor_minimum_delta() -> int:
"""
Induce a minimum delta between the transaction intrinsic gas cost and the
floor data gas cost.
Since at least one of the cases requires some execution gas expenditure
(SSTORE clearing), we need to introduce an increment of the floor data cost
above the transaction intrinsic gas cost, otherwise the floor data cost
would always be the below the execution gas cost even after the refund is
applied.
This value has been set as of Prague and should be adjusted if the gas
costs change.
"""
return 250
@pytest.fixture
def execution_gas_used(
tx_intrinsic_gas_cost_before_execution: int,
tx_floor_data_cost: int,
max_refund: int,
prefix_code_gas: int,
refund_test_type: RefundTestType,
) -> int:
"""
Return the amount of gas that needs to be consumed by the execution.
This gas amount is on top of the transaction intrinsic gas cost.
If this value were zero it would result in the refund being applied to the
execution gas cost and the resulting amount being always below the floor
data cost, hence we need to find a higher value in this function to ensure
we get both scenarios where the refund drives the execution cost below the
floor data cost and above the floor data cost.
"""
def execution_gas_cost(execution_gas: int) -> int:
total_gas_used = tx_intrinsic_gas_cost_before_execution + execution_gas
return total_gas_used - min(max_refund, total_gas_used // 5)
execution_gas = prefix_code_gas
assert execution_gas_cost(execution_gas) < tx_floor_data_cost, (
"tx_floor_data_cost is too low, there might have been a gas cost change that caused this "
"test to fail. Try increasing the intrinsic_gas_data_floor_minimum_delta fixture."
)
# Dumb for-loop to find the execution gas cost that will result in the
# expected refund.
while execution_gas_cost(execution_gas) < tx_floor_data_cost:
execution_gas += 1
if refund_test_type == RefundTestType.EXECUTION_GAS_MINUS_REFUND_EQUAL_TO_DATA_FLOOR:
return execution_gas
elif refund_test_type == RefundTestType.EXECUTION_GAS_MINUS_REFUND_GREATER_THAN_DATA_FLOOR:
return execution_gas + 1
elif refund_test_type == RefundTestType.EXECUTION_GAS_MINUS_REFUND_LESS_THAN_DATA_FLOOR:
return execution_gas - 1
raise ValueError("Invalid refund test type")
@pytest.fixture
def refund(
tx_intrinsic_gas_cost_before_execution: int,
execution_gas_used: int,
max_refund: int,
) -> int:
"""Return the refund gas of the transaction."""
total_gas_used = tx_intrinsic_gas_cost_before_execution + execution_gas_used
return min(max_refund, total_gas_used // 5)
@pytest.fixture
def to(
pre: Alloc,
execution_gas_used: int,
prefix_code: Bytecode,
prefix_code_gas: int,
code_storage: Dict,
) -> Address | None:
"""
Return a contract that consumes the expected execution gas.
At the moment we naively use JUMPDEST to consume the gas, which can yield
very big contracts.
Ideally, we can use memory expansion to consume gas.
"""
extra_gas = execution_gas_used - prefix_code_gas
return pre.deploy_contract(
prefix_code + (Op.JUMPDEST * extra_gas) + Op.STOP,
storage=code_storage,
)
@pytest.fixture
def tx_gas_limit(
tx_intrinsic_gas_cost_including_floor_data_cost: int,
tx_intrinsic_gas_cost_before_execution: int,
execution_gas_used: int,
) -> int:
"""
Gas limit for the transaction.
The gas delta is added to the intrinsic gas cost to generate different test
scenarios.
"""
tx_gas_limit = tx_intrinsic_gas_cost_before_execution + execution_gas_used
assert tx_gas_limit >= tx_intrinsic_gas_cost_including_floor_data_cost
return tx_gas_limit
@pytest.mark.parametrize(
"refund_test_type",
[
RefundTestType.EXECUTION_GAS_MINUS_REFUND_GREATER_THAN_DATA_FLOOR,
RefundTestType.EXECUTION_GAS_MINUS_REFUND_LESS_THAN_DATA_FLOOR,
RefundTestType.EXECUTION_GAS_MINUS_REFUND_EQUAL_TO_DATA_FLOOR,
],
)
@pytest.mark.parametrize(
"refund_type",
[
RefundType.STORAGE_CLEAR,
RefundType.STORAGE_CLEAR | RefundType.AUTHORIZATION_EXISTING_AUTHORITY,
RefundType.AUTHORIZATION_EXISTING_AUTHORITY,
],
)
def test_gas_refunds_from_data_floor(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
tx_floor_data_cost: int,
tx_intrinsic_gas_cost_before_execution: int,
execution_gas_used: int,
refund: int,
refund_test_type: RefundTestType,
) -> None:
"""
Test gas refunds deducted from the execution gas cost and not the data
floor.
"""
gas_used = tx_intrinsic_gas_cost_before_execution + execution_gas_used - refund
if refund_test_type == RefundTestType.EXECUTION_GAS_MINUS_REFUND_LESS_THAN_DATA_FLOOR:
assert gas_used < tx_floor_data_cost
elif refund_test_type == RefundTestType.EXECUTION_GAS_MINUS_REFUND_GREATER_THAN_DATA_FLOOR:
assert gas_used > tx_floor_data_cost
elif refund_test_type == RefundTestType.EXECUTION_GAS_MINUS_REFUND_EQUAL_TO_DATA_FLOOR:
assert gas_used == tx_floor_data_cost
else:
raise ValueError("Invalid refund test type")
if gas_used < tx_floor_data_cost:
gas_used = tx_floor_data_cost
# This is the actual test verification:
# - During test filling, the receipt returned by the transition tool
# (t8n) is verified against the expected receipt.
# - During test consumption, this is reflected in the balance difference
# and the state root.
tx.expected_receipt = TransactionReceipt(gas_used=gas_used)
state_test(
pre=pre,
post={
tx.to: {
# Verify that the storage was cleared (for storage clear
# refund). See `code_storage` fixture for more details.
"storage": {0: 0},
}
},
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7623_increase_calldata_cost/spec.py | tests/prague/eip7623_increase_calldata_cost/spec.py | """Defines EIP-7623 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7623 = ReferenceSpec("EIPS/eip-7623.md", "744f2075ba5deee9c1040eb089104d55bd89960d")
# Constants
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-7623 specifications as defined at
https://eips.ethereum.org/EIPS/eip-7623.
"""
STANDARD_TOKEN_COST = 4
TOTAL_COST_FLOOR_PER_TOKEN = 10
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7623_increase_calldata_cost/conftest.py | tests/prague/eip7623_increase_calldata_cost/conftest.py | """Fixtures for the EIP-7623 tests."""
from typing import List, Sequence
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
EOA,
AccessList,
Address,
Alloc,
AuthorizationTuple,
Bytecode,
Bytes,
Hash,
Transaction,
TransactionException,
add_kzg_version,
)
from ethereum_test_tools import Opcodes as Op
from ...cancun.eip4844_blobs.spec import Spec as EIP_4844_Spec
from .helpers import DataTestType, find_floor_cost_threshold
@pytest.fixture
def to(
request: pytest.FixtureRequest,
pre: Alloc,
) -> Address | None:
"""Create the sender account."""
if hasattr(request, "param"):
param = request.param
else:
param = Op.STOP
if param is None:
return None
if isinstance(param, str) and param == "eoa":
return pre.fund_eoa(amount=0)
if isinstance(param, Bytecode):
return pre.deploy_contract(param)
raise ValueError(f"Invalid value for `to` fixture: {param}")
@pytest.fixture
def protected() -> bool:
"""
Return whether the transaction is protected or not. Only valid for type-0
transactions.
"""
return True
@pytest.fixture
def access_list() -> List[AccessList] | None:
"""Access list for the transaction."""
return None
@pytest.fixture
def authorization_refund() -> bool:
"""
Return whether the transaction has an existing authority in the
authorization list.
"""
return False
@pytest.fixture
def authorization_list(
request: pytest.FixtureRequest,
pre: Alloc,
authorization_refund: bool,
) -> List[AuthorizationTuple] | None:
"""
Authorization-list for the transaction.
This fixture needs to be parametrized indirectly in order to generate the
authorizations with valid signers using `pre` in this function, and the
parametrized value should be a list of addresses.
"""
if not hasattr(request, "param"):
return None
if request.param is None:
return None
return [
AuthorizationTuple(signer=pre.fund_eoa(1 if authorization_refund else 0), address=address)
for address in request.param
]
@pytest.fixture
def blob_versioned_hashes(ty: int) -> Sequence[Hash] | None:
"""Versioned hashes for the transaction."""
return (
add_kzg_version(
[Hash(1)],
EIP_4844_Spec.BLOB_COMMITMENT_VERSION_KZG,
)
if ty == 3
else None
)
@pytest.fixture
def contract_creating_tx(to: Address | None) -> bool:
"""Return whether the transaction creates a contract or not."""
return to is None
@pytest.fixture
def intrinsic_gas_data_floor_minimum_delta() -> int:
"""
Induce a minimum delta between the transaction intrinsic gas cost and the
floor data gas cost.
"""
return 0
@pytest.fixture
def tx_data(
fork: Fork,
data_test_type: DataTestType,
access_list: List[AccessList] | None,
authorization_list: List[AuthorizationTuple] | None,
contract_creating_tx: bool,
intrinsic_gas_data_floor_minimum_delta: int,
) -> Bytes:
"""
All tests in this file use data that is generated dynamically depending on
the case and the attributes of the transaction in order to reach the edge
cases where the floor gas cost is equal or barely greater than the
intrinsic gas cost.
We have two different types of tests:
- FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS: The floor gas cost is
less than or equal to the intrinsic gas cost, which means that the size
of the tokens in the data are not enough to trigger the floor gas cost.
- FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS: The floor gas cost is greater
than the intrinsic gas cost, which means that the size of the tokens in
the data are enough to trigger the floor gas cost.
E.g. Given a transaction with a single access list and a single storage
key, its intrinsic gas cost (as of Prague fork) can be calculated as:
- 21,000 gas for the transaction
- 2,400 gas for the access list
- 1,900 gas for the storage key
- 16 gas for each non-zero byte in the data
- 4 gas for each zero byte in the data
Its floor data gas cost can be calculated as:
- 21,000 gas for the transaction
- 40 gas for each non-zero byte in the data
- 10 gas for each zero byte in the data
Notice that the data included in the transaction affects both the intrinsic
gas cost and the floor data cost, but at different rates.
The purpose of this function is to find the exact amount of data where the
floor data gas cost starts exceeding the intrinsic gas cost.
After a binary search we find that adding 717 tokens of data (179 non-zero
bytes + 1 zero byte) triggers the floor gas cost.
Therefore, this function will return a Bytes object with 179 non-zero bytes
and 1 zero byte for `FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS` and a Bytes
object with 179 non-zero bytes and no zero bytes for
`FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS`
"""
def tokens_to_data(tokens: int) -> Bytes:
return Bytes(b"\x01" * (tokens // 4) + b"\x00" * (tokens % 4))
fork_intrinsic_cost_calculator = fork.transaction_intrinsic_cost_calculator()
def transaction_intrinsic_cost_calculator(tokens: int) -> int:
return (
fork_intrinsic_cost_calculator(
calldata=tokens_to_data(tokens),
contract_creation=contract_creating_tx,
access_list=access_list,
authorization_list_or_count=authorization_list,
return_cost_deducted_prior_execution=True,
)
+ intrinsic_gas_data_floor_minimum_delta
)
fork_data_floor_cost_calculator = fork.transaction_data_floor_cost_calculator()
def transaction_data_floor_cost_calculator(tokens: int) -> int:
return fork_data_floor_cost_calculator(data=tokens_to_data(tokens))
# Start with zero data and check the difference in the gas calculator
# between the intrinsic gas cost and the floor gas cost.
if transaction_data_floor_cost_calculator(0) >= transaction_intrinsic_cost_calculator(0):
# Special case which is a transaction with no extra intrinsic gas costs
# other than the data cost, any data will trigger the floor gas cost.
if data_test_type == DataTestType.FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS:
return Bytes(b"")
else:
return Bytes(b"\0")
tokens = find_floor_cost_threshold(
floor_data_gas_cost_calculator=transaction_data_floor_cost_calculator,
intrinsic_gas_cost_calculator=transaction_intrinsic_cost_calculator,
)
if data_test_type == DataTestType.FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS:
return tokens_to_data(tokens + 1)
return tokens_to_data(tokens)
@pytest.fixture
def tx_gas_delta() -> int:
"""
Gas delta to modify the gas amount included with the transaction.
If negative, the transaction will be invalid because the intrinsic gas cost
is greater than the gas limit.
This value operates regardless of whether the floor data gas cost is
reached or not.
If the value is greater than zero, the transaction will also be valid and
the test will check that transaction processing does not consume more gas
than it should.
"""
return 0
@pytest.fixture
def tx_intrinsic_gas_cost_before_execution(
fork: Fork,
tx_data: Bytes,
access_list: List[AccessList] | None,
authorization_list: List[AuthorizationTuple] | None,
contract_creating_tx: bool,
) -> int:
"""
Return the intrinsic gas cost that is applied before the execution start.
This value never includes the floor data gas cost.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
return intrinsic_gas_cost_calculator(
calldata=tx_data,
contract_creation=contract_creating_tx,
access_list=access_list,
authorization_list_or_count=authorization_list,
return_cost_deducted_prior_execution=True,
)
@pytest.fixture
def tx_intrinsic_gas_cost_including_floor_data_cost(
fork: Fork,
tx_data: Bytes,
access_list: List[AccessList] | None,
authorization_list: List[AuthorizationTuple] | None,
contract_creating_tx: bool,
) -> int:
"""
Transaction intrinsic gas cost.
The calculated value takes into account the normal intrinsic gas cost and
the floor data gas cost if it is greater than the intrinsic gas cost.
In other words, this is the value that is required for the transaction to
be valid.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
return intrinsic_gas_cost_calculator(
calldata=tx_data,
contract_creation=contract_creating_tx,
access_list=access_list,
authorization_list_or_count=authorization_list,
)
@pytest.fixture
def tx_floor_data_cost(
fork: Fork,
tx_data: Bytes,
) -> int:
"""Floor data cost for the given transaction data."""
fork_data_floor_cost_calculator = fork.transaction_data_floor_cost_calculator()
return fork_data_floor_cost_calculator(data=tx_data)
@pytest.fixture
def tx_gas_limit(
tx_intrinsic_gas_cost_including_floor_data_cost: int,
tx_gas_delta: int,
) -> int:
"""
Gas limit for the transaction.
The gas delta is added to the intrinsic gas cost to generate different test
scenarios.
"""
return tx_intrinsic_gas_cost_including_floor_data_cost + tx_gas_delta
@pytest.fixture
def tx_error(tx_gas_delta: int, data_test_type: DataTestType) -> TransactionException | None:
"""Transaction error, only expected if the gas delta is negative."""
if tx_gas_delta < 0:
if data_test_type == DataTestType.FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS:
return TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST
else:
return TransactionException.INTRINSIC_GAS_TOO_LOW
return None
@pytest.fixture
def tx(
sender: EOA,
ty: int,
tx_data: Bytes,
to: Address | None,
protected: bool,
access_list: List[AccessList] | None,
authorization_list: List[AuthorizationTuple] | None,
blob_versioned_hashes: Sequence[Hash] | None,
tx_gas_limit: int,
tx_error: TransactionException | None,
) -> Transaction:
"""Create the transaction used in each test."""
return Transaction(
ty=ty,
sender=sender,
data=tx_data,
to=to,
protected=protected,
access_list=access_list,
authorization_list=authorization_list,
gas_limit=tx_gas_limit,
blob_versioned_hashes=blob_versioned_hashes,
error=tx_error,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7623_increase_calldata_cost/test_transaction_validity.py | tests/prague/eip7623_increase_calldata_cost/test_transaction_validity.py | """
Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
"""
import pytest
from ethereum_test_forks import Prague
from ethereum_test_tools import (
AccessList,
Address,
Alloc,
Hash,
StateTestFiller,
Transaction,
add_kzg_version,
)
from ethereum_test_tools import Opcodes as Op
from ...cancun.eip4844_blobs.spec import Spec as EIP_4844_Spec
from .helpers import DataTestType
from .spec import ref_spec_7623
REFERENCE_SPEC_GIT_PATH = ref_spec_7623.git_path
REFERENCE_SPEC_VERSION = ref_spec_7623.version
ENABLE_FORK = Prague
pytestmark = [pytest.mark.valid_from(str(ENABLE_FORK))]
# All tests in this file are parametrized with the following parameters:
pytestmark += [
pytest.mark.parametrize(
"tx_gas_delta",
[
# Test the case where the included gas is greater than the
# intrinsic gas to verify that the data floor does not consume more
# gas than it should.
pytest.param(1, id="extra_gas"),
pytest.param(0, id="exact_gas"),
pytest.param(-1, id="insufficient_gas", marks=pytest.mark.exception_test),
],
),
pytest.mark.parametrize(
"data_test_type",
[
pytest.param(
DataTestType.FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS,
id="floor_gas_less_than_or_equal_to_intrinsic_gas",
),
pytest.param(
DataTestType.FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS,
id="floor_gas_greater_than_intrinsic_gas",
),
],
),
]
@pytest.mark.parametrize(
"protected",
[
pytest.param(True, id="protected"),
pytest.param(False, id="unprotected"),
],
)
@pytest.mark.parametrize(
"ty",
[pytest.param(0, id="type_0")],
)
@pytest.mark.parametrize(
"to",
[
pytest.param("eoa", id="to_eoa"),
pytest.param(None, id="contract_creating"),
pytest.param(Op.STOP, id=""),
],
indirect=True,
)
def test_transaction_validity_type_0(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
) -> None:
"""
Test transaction validity for transactions without access lists and
contract creation.
"""
state_test(
pre=pre,
post={},
tx=tx,
)
@pytest.mark.parametrize(
"to",
[
pytest.param("eoa", id="to_eoa"),
pytest.param(None, id="contract_creating"),
pytest.param(Op.STOP, id=""),
],
indirect=True,
)
@pytest.mark.parametrize(
"access_list",
[
pytest.param(
None,
id="no_access_list",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[])],
id="single_access_list_no_storage_keys",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[Hash(0)])],
id="single_access_list_single_storage_key",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[Hash(k) for k in range(10)])],
id="single_access_list_multiple_storage_keys",
),
pytest.param(
[AccessList(address=Address(a), storage_keys=[]) for a in range(10)],
id="multiple_access_lists_no_storage_keys",
),
pytest.param(
[AccessList(address=Address(a), storage_keys=[Hash(0)]) for a in range(10)],
id="multiple_access_lists_single_storage_key",
),
pytest.param(
[
AccessList(address=Address(a), storage_keys=[Hash(k) for k in range(10)])
for a in range(10)
],
id="multiple_access_lists_multiple_storage_keys",
),
],
)
@pytest.mark.parametrize(
"ty",
[pytest.param(1, id="type_1"), pytest.param(2, id="type_2")],
)
def test_transaction_validity_type_1_type_2(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
) -> None:
"""
Test transaction validity for transactions with access lists and contract
creation.
"""
state_test(
pre=pre,
post={},
tx=tx,
)
@pytest.mark.parametrize(
"access_list",
[
pytest.param(
None,
id="no_access_list",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[])],
id="single_access_list_no_storage_keys",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[Hash(0)])],
id="single_access_list_single_storage_key",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[Hash(k) for k in range(10)])],
id="single_access_list_multiple_storage_keys",
),
pytest.param(
[AccessList(address=Address(a), storage_keys=[]) for a in range(10)],
id="multiple_access_lists_no_storage_keys",
),
pytest.param(
[AccessList(address=Address(a), storage_keys=[Hash(0)]) for a in range(10)],
id="multiple_access_lists_single_storage_key",
),
pytest.param(
[
AccessList(address=Address(a), storage_keys=[Hash(k) for k in range(10)])
for a in range(10)
],
id="multiple_access_lists_multiple_storage_keys",
),
],
)
@pytest.mark.parametrize(
# Blobs don't really have an effect because the blob gas does is not
# considered in the intrinsic gas calculation, but we still test it to make
# sure that the transaction is correctly processed.
"blob_versioned_hashes",
[
pytest.param(
add_kzg_version(
[Hash(x) for x in range(1)],
EIP_4844_Spec.BLOB_COMMITMENT_VERSION_KZG,
),
id="single_blob",
),
pytest.param(
add_kzg_version(
[Hash(x) for x in range(6)],
EIP_4844_Spec.BLOB_COMMITMENT_VERSION_KZG,
),
id="multiple_blobs",
),
],
)
@pytest.mark.parametrize(
"ty",
[pytest.param(3, id="type_3")],
)
def test_transaction_validity_type_3(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
) -> None:
"""
Test transaction validity for transactions with access lists, blobs, but no
contract creation.
"""
state_test(
pre=pre,
post={},
tx=tx,
)
@pytest.mark.parametrize(
"access_list",
[
pytest.param(
None,
id="no_access_list",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[])],
id="single_access_list_no_storage_keys",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[Hash(0)])],
id="single_access_list_single_storage_key",
),
pytest.param(
[AccessList(address=Address(1), storage_keys=[Hash(k) for k in range(10)])],
id="single_access_list_multiple_storage_keys",
),
pytest.param(
[AccessList(address=Address(a), storage_keys=[]) for a in range(10)],
id="multiple_access_lists_no_storage_keys",
),
pytest.param(
[AccessList(address=Address(a), storage_keys=[Hash(0)]) for a in range(10)],
id="multiple_access_lists_single_storage_key",
),
pytest.param(
[
AccessList(address=Address(a), storage_keys=[Hash(k) for k in range(10)])
for a in range(10)
],
id="multiple_access_lists_multiple_storage_keys",
),
],
)
@pytest.mark.parametrize(
"authorization_list",
[
pytest.param(
[Address(1)],
id="single_authorization",
),
pytest.param(
[Address(i + 1) for i in range(10)],
id="multiple_authorizations",
),
],
indirect=True,
)
@pytest.mark.parametrize(
"ty",
[pytest.param(4, id="type_4")],
)
def test_transaction_validity_type_4(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
) -> None:
"""
Test transaction validity for transactions with access lists, authorization
lists, but no contract creation.
"""
state_test(
pre=pre,
post={},
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7623_increase_calldata_cost/helpers.py | tests/prague/eip7623_increase_calldata_cost/helpers.py | """Helpers for testing EIP-7623."""
from enum import Enum, auto
from typing import Callable
class DataTestType(Enum):
"""Enum for the different types of data tests."""
FLOOR_GAS_COST_LESS_THAN_OR_EQUAL_TO_INTRINSIC_GAS = auto()
FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS = auto()
def find_floor_cost_threshold(
floor_data_gas_cost_calculator: Callable[[int], int],
intrinsic_gas_cost_calculator: Callable[[int], int],
) -> int:
"""
Find the minimum amount of tokens that will trigger the floor gas cost, by
using a binary search and the intrinsic gas cost and floor data
calculators.
"""
# Start with 1000 tokens and if the intrinsic gas cost is greater than the
# floor gas cost, multiply the number of tokens by 2 until it's not.
tokens = 1000
while floor_data_gas_cost_calculator(tokens) < intrinsic_gas_cost_calculator(tokens):
tokens *= 2
# Binary search to find the minimum number of tokens that will trigger the
# floor gas cost.
left = 0
right = tokens
while left < right:
tokens = (left + right) // 2
if floor_data_gas_cost_calculator(tokens) < intrinsic_gas_cost_calculator(tokens):
left = tokens + 1
else:
right = tokens
tokens = left
if floor_data_gas_cost_calculator(tokens) > intrinsic_gas_cost_calculator(tokens):
tokens -= 1
# Verify that increasing the tokens by one would always trigger the floor
# gas cost.
assert (
floor_data_gas_cost_calculator(tokens) <= intrinsic_gas_cost_calculator(tokens)
) and floor_data_gas_cost_calculator(tokens + 1) > intrinsic_gas_cost_calculator(tokens + 1), (
"invalid case"
)
return tokens
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7623_increase_calldata_cost/__init__.py | tests/prague/eip7623_increase_calldata_cost/__init__.py | """
Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7623_increase_calldata_cost/test_eip_mainnet.py | tests/prague/eip7623_increase_calldata_cost/test_eip_mainnet.py | """
abstract: Crafted tests for mainnet of [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
""" # noqa: E501
import pytest
from ethereum_test_tools import (
AccessList,
Address,
Alloc,
Hash,
StateTestFiller,
Transaction,
add_kzg_version,
)
from ...cancun.eip4844_blobs.spec import Spec as EIP_4844_Spec
from .helpers import DataTestType
from .spec import ref_spec_7623
REFERENCE_SPEC_GIT_PATH = ref_spec_7623.git_path
REFERENCE_SPEC_VERSION = ref_spec_7623.version
pytestmark = [pytest.mark.valid_at("Prague"), pytest.mark.mainnet]
@pytest.mark.parametrize(
"ty,protected,access_list,blob_versioned_hashes,authorization_list",
[
pytest.param(0, True, None, None, None, id="type_0_protected"),
pytest.param(0, False, None, None, None, id="type_0_unprotected"),
pytest.param(
1,
True,
[AccessList(address=Address(1), storage_keys=[Hash(0)])],
None,
None,
id="type_1",
),
pytest.param(
2,
True,
[AccessList(address=Address(1), storage_keys=[Hash(0)])],
None,
None,
id="type_2",
),
pytest.param(
3,
True,
[AccessList(address=Address(1), storage_keys=[Hash(0)])],
add_kzg_version(
[Hash(x) for x in range(1)],
EIP_4844_Spec.BLOB_COMMITMENT_VERSION_KZG,
),
None,
id="type_3",
marks=pytest.mark.execute(
pytest.mark.skip(reason="Blob txs not supported by execute")
),
),
pytest.param(
4,
True,
[AccessList(address=Address(1), storage_keys=[Hash(0)])],
None,
[Address(1)],
id="type_4",
),
],
indirect=["authorization_list"],
)
@pytest.mark.parametrize(
"tx_gas_delta",
[
pytest.param(0, id=""),
],
)
@pytest.mark.parametrize(
"to",
[
pytest.param("eoa", id=""),
],
indirect=True,
)
@pytest.mark.parametrize(
"data_test_type",
[
pytest.param(
DataTestType.FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS,
id="",
),
],
)
def test_eip_7623(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
) -> None:
"""
Test transaction validity for transactions without access lists
and contract creation.
"""
state_test(
pre=pre,
post={},
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7623_increase_calldata_cost/test_execution_gas.py | tests/prague/eip7623_increase_calldata_cost/test_execution_gas.py | """
Test [EIP-7623: Increase calldata cost](https://eips.ethereum.org/EIPS/eip-7623).
"""
from typing import List
import pytest
from ethereum_test_forks import Fork, Prague
from ethereum_test_tools import (
AccessList,
Address,
Alloc,
AuthorizationTuple,
Bytes,
StateTestFiller,
Transaction,
TransactionReceipt,
)
from ethereum_test_tools import Opcodes as Op
from .helpers import DataTestType
from .spec import ref_spec_7623
REFERENCE_SPEC_GIT_PATH = ref_spec_7623.git_path
REFERENCE_SPEC_VERSION = ref_spec_7623.version
ENABLE_FORK = Prague
pytestmark = [pytest.mark.valid_from(str(ENABLE_FORK))]
@pytest.fixture
def data_test_type() -> DataTestType:
"""Return data test type."""
return DataTestType.FLOOR_GAS_COST_GREATER_THAN_INTRINSIC_GAS
class TestGasConsumption:
"""Test gas consumption with EIP-7623 active."""
@pytest.fixture
def intrinsic_gas_data_floor_minimum_delta(self) -> int:
"""
Force a minimum delta in order to have some gas to execute the invalid
opcode.
"""
return 50_000
@pytest.fixture
def to(
self,
pre: Alloc,
) -> Address | None:
"""
Return a contract that consumes all gas when executed by calling an
invalid opcode.
"""
return pre.deploy_contract(Op.INVALID)
@pytest.mark.parametrize(
"ty,protected,authorization_list",
[
pytest.param(0, False, None, id="type_0_unprotected"),
pytest.param(0, True, None, id="type_0_protected"),
pytest.param(1, True, None, id="type_1"),
pytest.param(2, True, None, id="type_2"),
pytest.param(3, True, None, id="type_3"),
pytest.param(4, True, [Address(1)], id="type_4"),
],
indirect=["authorization_list"],
)
@pytest.mark.parametrize(
"tx_gas_delta",
[
# Test with exact gas and extra gas.
pytest.param(1, id="extra_gas"),
pytest.param(0, id="exact_gas"),
],
)
def test_full_gas_consumption(
self,
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
) -> None:
"""
Test executing a transaction that fully consumes its execution gas
allocation.
"""
tx.expected_receipt = TransactionReceipt(gas_used=tx.gas_limit)
state_test(
pre=pre,
post={},
tx=tx,
)
class TestGasConsumptionBelowDataFloor:
"""Test gas consumption barely below the floor data cost (1 gas below)."""
@pytest.fixture
def contract_creating_tx(self) -> bool:
"""Use a constant in order to avoid circular fixture dependencies."""
return False
@pytest.fixture
def to(
self,
pre: Alloc,
fork: Fork,
tx_data: Bytes,
access_list: List[AccessList] | None,
authorization_list: List[AuthorizationTuple] | None,
tx_floor_data_cost: int,
) -> Address | None:
"""
Return a contract that consumes almost all the gas before reaching the
floor data cost.
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
execution_gas = tx_floor_data_cost - intrinsic_gas_cost_calculator(
calldata=tx_data,
contract_creation=False,
access_list=access_list,
authorization_list_or_count=authorization_list,
return_cost_deducted_prior_execution=True,
)
assert execution_gas > 0
return pre.deploy_contract((Op.JUMPDEST * (execution_gas - 1)) + Op.STOP)
@pytest.mark.parametrize(
"ty,protected,authorization_list",
[
pytest.param(0, False, None, id="type_0_unprotected"),
pytest.param(0, True, None, id="type_0_protected"),
pytest.param(1, True, None, id="type_1"),
pytest.param(2, True, None, id="type_2"),
pytest.param(3, True, None, id="type_3"),
pytest.param(4, True, [Address(1)], id="type_4"),
],
indirect=["authorization_list"],
)
@pytest.mark.parametrize(
"tx_gas_delta",
[
# Test with exact gas and extra gas, to verify that the refund is
# correctly applied to the full consumed execution gas.
pytest.param(0, id="exact_gas"),
],
)
def test_gas_consumption_below_data_floor(
self,
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
tx_floor_data_cost: int,
) -> None:
"""
Test executing a transaction that almost consumes the floor data cost.
"""
tx.expected_receipt = TransactionReceipt(gas_used=tx_floor_data_cost)
state_test(
pre=pre,
post={},
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests.py | tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests.py | """
Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
"""
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Address,
Alloc,
Block,
BlockchainTestFiller,
BlockException,
Environment,
Header,
Macros,
Requests,
TestAddress,
TestAddress2,
)
from ethereum_test_tools import Opcodes as Op
from .helpers import (
WithdrawalRequest,
WithdrawalRequestContract,
WithdrawalRequestInteractionBase,
WithdrawalRequestTransaction,
get_n_fee_increment_blocks,
)
from .spec import Spec, ref_spec_7002
REFERENCE_SPEC_GIT_PATH = ref_spec_7002.git_path
REFERENCE_SPEC_VERSION = ref_spec_7002.version
pytestmark = pytest.mark.valid_from("Prague")
@pytest.mark.parametrize(
"blocks_withdrawal_requests",
[
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
)
],
),
],
],
id="single_block_single_withdrawal_request_from_eoa",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=0,
valid=False,
)
],
),
],
],
id="single_block_single_withdrawal_request_from_eoa_insufficient_fee",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
calldata_modifier=lambda x: x[:-1],
valid=False,
)
],
),
],
],
id="single_block_single_withdrawal_request_from_eoa_input_too_short",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
calldata_modifier=lambda x: x + b"\x00",
valid=False,
)
],
),
],
],
id="single_block_single_withdrawal_request_from_eoa_input_too_long",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
WithdrawalRequest(
validator_pubkey=0x02,
amount=Spec.MAX_AMOUNT - 1,
fee=Spec.get_fee(0),
),
],
),
],
],
id="single_block_multiple_withdrawal_request_from_same_eoa",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
)
],
),
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x02,
amount=Spec.MAX_AMOUNT - 1,
fee=Spec.get_fee(0),
)
],
),
],
],
id="single_block_multiple_withdrawal_request_from_different_eoa",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=i + 1,
amount=0 if i % 2 == 0 else Spec.MAX_AMOUNT,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK)
],
)
],
],
id="single_block_max_withdrawal_requests_from_eoa",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=0,
),
WithdrawalRequest(
validator_pubkey=0x02,
amount=Spec.MAX_AMOUNT - 1,
fee=Spec.get_fee(0),
),
]
),
],
],
id="single_block_multiple_withdrawal_request_first_reverts",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
WithdrawalRequest(
validator_pubkey=0x02,
amount=Spec.MAX_AMOUNT - 1,
fee=0,
),
]
),
],
],
id="single_block_multiple_withdrawal_request_last_reverts",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
# Value obtained from trace minus one
gas_limit=114_247 - 1,
valid=False,
),
WithdrawalRequest(
validator_pubkey=0x02,
amount=0,
fee=Spec.get_fee(0),
),
]
),
],
],
id="single_block_multiple_withdrawal_request_first_oog",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
WithdrawalRequest(
validator_pubkey=0x02,
amount=0,
fee=Spec.get_fee(0),
# Value obtained from trace minus one
gas_limit=80_047 - 1,
valid=False,
),
]
),
],
],
id="single_block_multiple_withdrawal_request_last_oog",
),
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=i + 1,
amount=0 if i % 2 == 0 else Spec.MAX_AMOUNT,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK * 2)
]
)
],
],
id="multiple_block_above_max_withdrawal_requests_from_eoa",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
]
),
],
],
id="single_block_single_withdrawal_request_from_contract",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
],
call_depth=3,
),
],
],
id="single_block_single_withdrawal_request_from_contract_call_depth_3",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
],
call_depth=264,
tx_gas_limit=16_777_216,
),
],
],
id="single_block_single_withdrawal_request_from_contract_call_depth_high",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=i + 1,
amount=Spec.MAX_AMOUNT - 1 if i % 2 == 0 else 0,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK)
],
),
],
],
id="single_block_multiple_withdrawal_requests_from_contract",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=1,
amount=Spec.MAX_AMOUNT,
fee=0,
)
]
+ [
WithdrawalRequest(
validator_pubkey=i + 1,
amount=Spec.MAX_AMOUNT - 1 if i % 2 == 0 else 0,
fee=Spec.get_fee(0),
)
for i in range(1, Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK)
],
),
],
],
id="single_block_multiple_withdrawal_requests_from_contract_first_reverts",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=i + 1,
amount=Spec.MAX_AMOUNT - 1 if i % 2 == 0 else 0,
fee=Spec.get_fee(0),
)
for i in range(Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK - 1)
]
+ [
WithdrawalRequest(
validator_pubkey=Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK,
amount=(
Spec.MAX_AMOUNT - 1
if (Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK - 1) % 2 == 0
else 0
),
fee=0,
)
],
),
],
],
id="single_block_multiple_withdrawal_requests_from_contract_last_reverts",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=1,
amount=Spec.MAX_AMOUNT - 1,
gas_limit=100,
fee=Spec.get_fee(0),
valid=False,
)
]
+ [
WithdrawalRequest(
validator_pubkey=i + 1,
amount=Spec.MAX_AMOUNT - 1 if i % 2 == 0 else 0,
gas_limit=1_000_000,
fee=Spec.get_fee(0),
valid=True,
)
for i in range(1, Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK)
],
),
],
],
id="single_block_multiple_withdrawal_requests_from_contract_first_oog",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=i + 1,
amount=Spec.MAX_AMOUNT - 1 if i % 2 == 0 else 0,
fee=Spec.get_fee(0),
gas_limit=1_000_000,
valid=True,
)
for i in range(Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK)
]
+ [
WithdrawalRequest(
validator_pubkey=Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK,
amount=Spec.MAX_AMOUNT - 1,
gas_limit=100,
fee=Spec.get_fee(0),
valid=False,
)
],
),
],
],
id="single_block_multiple_withdrawal_requests_from_contract_last_oog",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=i + 1,
amount=Spec.MAX_AMOUNT - 1 if i % 2 == 0 else 0,
fee=Spec.get_fee(0),
valid=False,
)
for i in range(Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK)
],
extra_code=Op.REVERT(0, 0),
),
],
],
id="single_block_multiple_withdrawal_requests_from_contract_caller_reverts",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=i + 1,
amount=Spec.MAX_AMOUNT - 1 if i % 2 == 0 else 0,
fee=Spec.get_fee(0),
valid=False,
)
for i in range(Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK)
],
extra_code=Macros.OOG(),
),
],
],
id="single_block_multiple_withdrawal_requests_from_contract_caller_oog",
),
pytest.param(
# Test the first 50 fee increments
get_n_fee_increment_blocks(50),
id="multiple_block_fee_increments",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.DELEGATECALL,
),
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.STATICCALL,
),
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.CALLCODE,
),
],
],
id="single_block_single_withdrawal_request_delegatecall_staticcall_callcode",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.DELEGATECALL,
call_depth=3,
),
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.STATICCALL,
call_depth=3,
),
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.CALLCODE,
call_depth=3,
),
],
],
id="single_block_single_withdrawal_request_delegatecall_staticcall_callcode_call_depth_3",
),
pytest.param(
[
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.DELEGATECALL,
call_depth=1024,
),
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.STATICCALL,
call_depth=1024,
),
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
valid=False,
)
],
call_type=Op.CALLCODE,
call_depth=1024,
),
],
],
id="single_block_single_withdrawal_request_delegatecall_staticcall_callcode_call_depth_high",
),
],
)
@pytest.mark.pre_alloc_group(
"withdrawal_requests", reason="Tests standard withdrawal request functionality"
)
def test_withdrawal_requests(
blockchain_test: BlockchainTestFiller,
blocks: List[Block],
pre: Alloc,
) -> None:
"""Test making a withdrawal request to the beacon chain."""
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=blocks,
)
@pytest.mark.parametrize(
"requests,block_body_override_requests,exception",
[
pytest.param(
[],
[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
source_address=Address(0),
),
],
BlockException.INVALID_REQUESTS,
id="no_withdrawals_non_empty_requests_list",
),
pytest.param(
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
]
),
],
[],
BlockException.INVALID_REQUESTS,
id="single_withdrawal_request_empty_requests_list",
),
pytest.param(
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
]
),
],
[
WithdrawalRequest(
validator_pubkey=0x02,
amount=0,
source_address=TestAddress,
)
],
BlockException.INVALID_REQUESTS,
id="single_withdrawal_request_public_key_mismatch",
),
pytest.param(
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
)
],
),
],
[
WithdrawalRequest(
validator_pubkey=0x01,
amount=1,
source_address=TestAddress,
)
],
BlockException.INVALID_REQUESTS,
id="single_withdrawal_request_amount_mismatch",
),
pytest.param(
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
)
],
),
],
[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
source_address=TestAddress2,
)
],
BlockException.INVALID_REQUESTS,
id="single_withdrawal_request_source_address_mismatch",
),
pytest.param(
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
),
WithdrawalRequest(
validator_pubkey=0x02,
amount=0,
fee=Spec.get_fee(0),
),
],
),
],
[
WithdrawalRequest(
validator_pubkey=0x02,
amount=0,
source_address=TestAddress,
),
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
source_address=TestAddress,
),
],
BlockException.INVALID_REQUESTS,
id="two_withdrawal_requests_out_of_order",
),
pytest.param(
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
)
],
),
],
[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
source_address=TestAddress,
),
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
source_address=TestAddress,
),
],
BlockException.INVALID_REQUESTS,
id="single_withdrawal_requests_duplicate_in_requests_list",
),
],
)
@pytest.mark.exception_test
@pytest.mark.pre_alloc_group(
"withdrawal_requests", reason="Tests standard withdrawal request functionality"
)
def test_withdrawal_requests_negative(
pre: Alloc,
fork: Fork,
blockchain_test: BlockchainTestFiller,
requests: List[WithdrawalRequestInteractionBase],
block_body_override_requests: List[WithdrawalRequest],
exception: BlockException,
) -> None:
"""
Test blocks where the requests list and the actual withdrawal requests that
happened in the block's transactions do not match.
"""
for d in requests:
d.update_pre(pre)
# No previous block so fee is the base
fee = 1
current_block_requests = []
for w in requests:
current_block_requests += w.valid_requests(fee)
included_requests = current_block_requests[: Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK]
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=[
Block(
txs=sum((r.transactions() for r in requests), []),
header_verify=Header(
requests_hash=Requests(
*included_requests,
),
),
requests=(
Requests(
*block_body_override_requests,
).requests_list
if block_body_override_requests is not None
else None
),
exception=exception,
)
],
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/spec.py | tests/prague/eip7002_el_triggerable_withdrawals/spec.py | """
Common procedures to test
[EIP-7002: Execution layer triggerable
withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
"""
from dataclasses import dataclass
from ethereum_test_tools import Address
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7002 = ReferenceSpec("EIPS/eip-7002.md", "695ac757472b9bbbdcbc88a020ba15c1ac782869")
# Constants
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-7002 specifications as defined at
https://eips.ethereum.org/EIPS/eip-7002#configuration.
If the parameter is not currently used within the tests, it is commented
out.
"""
WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS = 0x00000961EF480EB55E80D19AD83579A64C007002
WITHDRAWAL_REQUEST_PREDEPLOY_SENDER = Address(0x8646861A7CF453DDD086874D622B0696DE5B9674)
SYSTEM_ADDRESS = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE
SYSTEM_CALL_GAS_LIMIT = 30_000_000
EXCESS_WITHDRAWAL_REQUESTS_STORAGE_SLOT = 0
WITHDRAWAL_REQUEST_COUNT_STORAGE_SLOT = 1
WITHDRAWAL_REQUEST_QUEUE_HEAD_STORAGE_SLOT = (
2 # Pointer to head of the withdrawal request message queue
)
WITHDRAWAL_REQUEST_QUEUE_TAIL_STORAGE_SLOT = (
3 # Pointer to the tail of the withdrawal request message queue
)
WITHDRAWAL_REQUEST_QUEUE_STORAGE_OFFSET = (
4 # The start memory slot of the in-state withdrawal request message queue
)
MAX_WITHDRAWAL_REQUESTS_PER_BLOCK = (
16 # Maximum number of withdrawal requests that can be de-queued into a block
)
TARGET_WITHDRAWAL_REQUESTS_PER_BLOCK = 2
MIN_WITHDRAWAL_REQUEST_FEE = 1
WITHDRAWAL_REQUEST_FEE_UPDATE_FRACTION = 17
EXCESS_RETURN_GAS_STIPEND = 2300
MAX_AMOUNT = 2**64 - 1
@staticmethod
def fake_exponential(factor: int, numerator: int, denominator: int) -> int:
"""Calculate the withdrawal request fee."""
i = 1
output = 0
numerator_accumulator = factor * denominator
while numerator_accumulator > 0:
output += numerator_accumulator
numerator_accumulator = (numerator_accumulator * numerator) // (denominator * i)
i += 1
return output // denominator
@staticmethod
def get_fee(excess_withdrawal_requests: int) -> int:
"""Calculate the fee for the excess withdrawal requests."""
return Spec.fake_exponential(
Spec.MIN_WITHDRAWAL_REQUEST_FEE,
excess_withdrawal_requests,
Spec.WITHDRAWAL_REQUEST_FEE_UPDATE_FRACTION,
)
@staticmethod
def get_excess_withdrawal_requests(previous_excess: int, count: int) -> int:
"""Calculate the new excess withdrawal requests."""
if previous_excess + count > Spec.TARGET_WITHDRAWAL_REQUESTS_PER_BLOCK:
return previous_excess + count - Spec.TARGET_WITHDRAWAL_REQUESTS_PER_BLOCK
return 0
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py | tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py | """
Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
"""
from typing import List
import pytest
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Bytecode,
Transaction,
generate_system_contract_error_test,
)
from ethereum_test_tools import Macros as Om
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types import Requests
from .helpers import (
WithdrawalRequest,
WithdrawalRequestTransaction,
)
from .spec import Spec as Spec_EIP7002
from .spec import ref_spec_7002
REFERENCE_SPEC_GIT_PATH: str = ref_spec_7002.git_path
REFERENCE_SPEC_VERSION: str = ref_spec_7002.version
pytestmark: pytest.MarkDecorator = pytest.mark.valid_from("Prague")
def withdrawal_list_with_custom_fee(n: int) -> List[WithdrawalRequest]: # noqa: D103
return [
WithdrawalRequest(
validator_pubkey=i + 1,
amount=0,
fee=Spec_EIP7002.get_fee(0),
)
for i in range(n)
]
@pytest.mark.parametrize(
"requests_list",
[
pytest.param(
[],
id="empty_request_list",
),
pytest.param(
[
*withdrawal_list_with_custom_fee(1),
],
id="1_withdrawal_request",
),
pytest.param(
[
*withdrawal_list_with_custom_fee(15),
],
id="15_withdrawal_requests",
),
pytest.param(
[
*withdrawal_list_with_custom_fee(16),
],
id="16_withdrawal_requests",
),
pytest.param(
[
*withdrawal_list_with_custom_fee(17),
],
id="17_withdrawal_requests",
),
pytest.param(
[
*withdrawal_list_with_custom_fee(18),
],
id="18_withdrawal_requests",
),
],
)
@pytest.mark.pre_alloc_group("separate", reason="Deploys custom withdrawal contract bytecode")
def test_extra_withdrawals(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
requests_list: List[WithdrawalRequest],
) -> None:
"""
Test how clients were to behave when more than 16 withdrawals would be
allowed per block.
"""
modified_code: Bytecode = Bytecode()
memory_offset: int = 0
amount_of_requests: int = 0
for withdrawal_request in requests_list:
# update memory_offset with the correct value
withdrawal_request_bytes_amount: int = len(bytes(withdrawal_request))
assert withdrawal_request_bytes_amount == 76, (
"Expected withdrawal request to be of size 76 but got size "
f"{withdrawal_request_bytes_amount}"
)
memory_offset += withdrawal_request_bytes_amount
modified_code += Om.MSTORE(bytes(withdrawal_request), memory_offset)
amount_of_requests += 1
modified_code += Op.RETURN(0, Op.MSIZE())
pre[Spec_EIP7002.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS] = Account(
code=modified_code,
nonce=1,
balance=0,
)
# given a list of withdrawal requests construct a withdrawal request
# transaction
withdrawal_request_transaction = WithdrawalRequestTransaction(requests=requests_list)
# prepare withdrawal senders
withdrawal_request_transaction.update_pre(pre=pre)
# get transaction list
txs: List[Transaction] = withdrawal_request_transaction.transactions()
blockchain_test(
pre=pre,
blocks=[
Block(
txs=txs,
requests_hash=Requests(*requests_list),
),
],
post={},
)
@pytest.mark.parametrize(
"system_contract", [Address(Spec_EIP7002.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS)]
)
@pytest.mark.pre_alloc_group("separate", reason="Deploys custom withdrawal contract bytecode")
@generate_system_contract_error_test( # type: ignore[arg-type]
max_gas_limit=Spec_EIP7002.SYSTEM_CALL_GAS_LIMIT,
)
def test_system_contract_errors() -> None:
"""
Test system contract raising different errors when called by the system
account at the end of the block execution.
To see the list of generated tests, please refer to the
`generate_system_contract_error_test` decorator definition.
"""
pass
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests_during_fork.py | tests/prague/eip7002_el_triggerable_withdrawals/test_withdrawal_requests_during_fork.py | """
Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
"""
from os.path import realpath
from pathlib import Path
from typing import List
import pytest
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Environment,
Transaction,
)
from .helpers import WithdrawalRequest, WithdrawalRequestTransaction
from .spec import Spec, ref_spec_7002
REFERENCE_SPEC_GIT_PATH = ref_spec_7002.git_path
REFERENCE_SPEC_VERSION = ref_spec_7002.version
pytestmark = pytest.mark.valid_at_transition_to("Prague")
BLOCKS_BEFORE_FORK = 2
@pytest.mark.parametrize(
"blocks_withdrawal_requests",
[
pytest.param(
[
[], # No withdrawal requests, but we deploy the contract
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(10),
# Pre-fork withdrawal request
valid=False,
)
],
),
],
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x02,
amount=0,
fee=Spec.get_fee(10),
# First post-fork withdrawal request, will not
# be included because the inhibitor is cleared
# at the end of the block
valid=False,
)
],
),
],
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x03,
amount=0,
fee=Spec.get_fee(0),
# First withdrawal that is valid
valid=True,
)
],
),
],
],
id="one_valid_request_second_block_after_fork",
),
],
)
@pytest.mark.parametrize("timestamp", [15_000 - BLOCKS_BEFORE_FORK], ids=[""])
@pytest.mark.pre_alloc_group(
"separate", reason="Deploys withdrawal system contract at fork transition"
)
def test_withdrawal_requests_during_fork(
blockchain_test: BlockchainTestFiller,
blocks: List[Block],
pre: Alloc,
) -> None:
"""
Test making a withdrawal request to the beacon chain at the time of the
fork.
"""
# We need to delete the deployed contract that comes by default in the pre
# state.
pre[Spec.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS] = Account(
balance=0,
code=bytes(),
nonce=0,
storage={},
)
with open(Path(realpath(__file__)).parent / "contract_deploy_tx.json", mode="r") as f:
deploy_tx = Transaction.model_validate_json(f.read()).with_signature_and_sender()
deployer_address = deploy_tx.sender
assert deployer_address is not None
assert Address(deployer_address) == Spec.WITHDRAWAL_REQUEST_PREDEPLOY_SENDER
tx_gas_price = deploy_tx.gas_price
assert tx_gas_price is not None
deployer_required_balance = deploy_tx.gas_limit * tx_gas_price
pre.fund_address(Spec.WITHDRAWAL_REQUEST_PREDEPLOY_SENDER, deployer_required_balance)
# Append the deployment transaction to the first block
blocks[0].txs.append(deploy_tx)
blockchain_test(
genesis_environment=Environment(),
pre=pre,
post={},
blocks=blocks,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/conftest.py | tests/prague/eip7002_el_triggerable_withdrawals/conftest.py | """Fixtures for the EIP-7002 deposit tests."""
from itertools import zip_longest
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import Alloc, Block, Header, Requests
from .helpers import WithdrawalRequest, WithdrawalRequestInteractionBase
from .spec import Spec
@pytest.fixture
def update_pre(
pre: Alloc,
blocks_withdrawal_requests: List[List[WithdrawalRequestInteractionBase]],
) -> None:
"""
Init state of the accounts. Every deposit transaction defines their own
pre-state requirements, and this fixture aggregates them all.
"""
for requests in blocks_withdrawal_requests:
for r in requests:
r.update_pre(pre)
@pytest.fixture
def included_requests(
update_pre: None, # Fixture is used for its side effects
blocks_withdrawal_requests: List[List[WithdrawalRequestInteractionBase]],
) -> List[List[WithdrawalRequest]]:
"""
Return the list of withdrawal requests that should be included in each
block.
"""
excess_withdrawal_requests = 0
carry_over_requests: List[WithdrawalRequest] = []
per_block_included_requests: List[List[WithdrawalRequest]] = []
for block_withdrawal_requests in blocks_withdrawal_requests:
# Get fee for the current block
current_minimum_fee = Spec.get_fee(excess_withdrawal_requests)
# With the fee, get the valid withdrawal requests for the current block
current_block_requests = []
for w in block_withdrawal_requests:
current_block_requests += w.valid_requests(current_minimum_fee)
# Get the withdrawal requests that should be included in the block
pending_requests = carry_over_requests + current_block_requests
per_block_included_requests.append(
pending_requests[: Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK]
)
carry_over_requests = pending_requests[Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK :]
# Update the excess withdrawal requests
excess_withdrawal_requests = Spec.get_excess_withdrawal_requests(
excess_withdrawal_requests,
len(current_block_requests),
)
while carry_over_requests:
# Keep adding blocks until all withdrawal requests are included
per_block_included_requests.append(
carry_over_requests[: Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK]
)
carry_over_requests = carry_over_requests[Spec.MAX_WITHDRAWAL_REQUESTS_PER_BLOCK :]
return per_block_included_requests
@pytest.fixture
def timestamp() -> int:
"""Return the timestamp for the first block."""
return 1
@pytest.fixture
def blocks(
fork: Fork,
update_pre: None, # Fixture is used for its side effects
blocks_withdrawal_requests: List[List[WithdrawalRequestInteractionBase]],
included_requests: List[List[WithdrawalRequest]],
timestamp: int,
) -> List[Block]:
"""Return the list of blocks that should be included in the test."""
blocks: List[Block] = []
for block_requests, block_included_requests in zip_longest( # type: ignore
blocks_withdrawal_requests,
included_requests,
fillvalue=[],
):
header_verify: Header | None = None
if fork.header_requests_required(
block_number=len(blocks) + 1,
timestamp=timestamp,
):
header_verify = Header(
requests_hash=Requests(
*block_included_requests,
)
)
else:
assert not block_included_requests
blocks.append(
Block(
txs=sum((r.transactions() for r in block_requests), []),
header_verify=header_verify,
timestamp=timestamp,
)
)
timestamp += 1
return blocks + [
# Add an empty block at the end to verify that no more withdrawal
# requests are included
Block(
header_verify=Header(requests_hash=Requests()),
timestamp=timestamp,
)
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/helpers.py | tests/prague/eip7002_el_triggerable_withdrawals/helpers.py | """Helpers for the EIP-7002 deposit tests."""
from dataclasses import dataclass, field
from functools import cached_property
from itertools import count
from typing import Callable, ClassVar, List
from ethereum_test_tools import EOA, Address, Alloc, Bytecode, Transaction
from ethereum_test_tools import Opcodes as Op
from ethereum_test_tools import WithdrawalRequest as WithdrawalRequestBase
from .spec import Spec
class WithdrawalRequest(WithdrawalRequestBase):
"""Class used to describe a withdrawal request in a test."""
fee: int = 0
"""
Fee to be paid to the system contract for the withdrawal request. This is
different from `amount` which is the amount of gwei to be withdrawn on the
beacon chain.
"""
valid: bool = True
"""Whether the withdrawal request is valid or not."""
gas_limit: int = 1_000_000
"""Gas limit for the call."""
calldata_modifier: Callable[[bytes], bytes] = lambda x: x
"""Calldata modifier function."""
interaction_contract_address: ClassVar[Address] = Address(
Spec.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS
)
@property
def value(self) -> int:
"""
Return the value of the call to the withdrawal request contract, equal
to the fee to be paid.
"""
return self.fee
@cached_property
def calldata(self) -> bytes:
"""
Return the calldata needed to call the withdrawal request contract and
make the withdrawal.
"""
return self.calldata_modifier(
self.validator_pubkey + self.amount.to_bytes(8, byteorder="big")
)
def with_source_address(self, source_address: Address) -> "WithdrawalRequest":
"""
Return a new instance of the withdrawal request with the source address
set.
"""
return self.copy(source_address=source_address)
@dataclass(kw_only=True)
class WithdrawalRequestInteractionBase:
"""Base class for all types of withdrawal transactions we want to test."""
sender_balance: int = 1_000_000_000_000_000_000
"""Balance of the account that sends the transaction."""
sender_account: EOA | None = None
"""Account that will send the transaction."""
requests: List[WithdrawalRequest]
"""Withdrawal request to be included in the block."""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the withdrawal request."""
raise NotImplementedError
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
raise NotImplementedError
def valid_requests(self, current_minimum_fee: int) -> List[WithdrawalRequest]:
"""
Return the list of withdrawal requests that should be valid in the
block.
"""
raise NotImplementedError
@dataclass(kw_only=True)
class WithdrawalRequestTransaction(WithdrawalRequestInteractionBase):
"""
Class used to describe a withdrawal request originated from an externally
owned account.
"""
def transactions(self) -> List[Transaction]:
"""Return a transaction for the withdrawal request."""
assert self.sender_account is not None, "Sender account not initialized"
return [
Transaction(
gas_limit=request.gas_limit,
gas_price=1_000_000_000,
to=request.interaction_contract_address,
value=request.value,
data=request.calldata,
sender=self.sender_account,
)
for request in self.requests
]
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
self.sender_account = pre.fund_eoa(self.sender_balance)
def valid_requests(self, current_minimum_fee: int) -> List[WithdrawalRequest]:
"""Return the list of withdrawal requests that are valid."""
assert self.sender_account is not None, "Sender account not initialized"
return [
request.with_source_address(self.sender_account)
for request in self.requests
if request.valid and request.fee >= current_minimum_fee
]
@dataclass(kw_only=True)
class WithdrawalRequestContract(WithdrawalRequestInteractionBase):
"""Class used to describe a withdrawal originated from a contract."""
tx_gas_limit: int = 1_000_000
"""Gas limit for the transaction."""
contract_balance: int = 1_000_000_000_000_000_000
"""
Balance of the contract that will make the call to the pre-deploy contract.
"""
contract_address: Address | None = None
"""
Address of the contract that will make the call to the pre-deploy contract.
"""
entry_address: Address | None = None
"""Address to send the transaction to."""
call_type: Op = field(default_factory=lambda: Op.CALL)
"""Type of call to be used to make the withdrawal request."""
call_depth: int = 2
"""Frame depth of the pre-deploy contract when it executes the call."""
extra_code: Bytecode = field(default_factory=Bytecode)
"""Extra code to be added to the contract code."""
@property
def contract_code(self) -> Bytecode:
"""Contract code used by the relay contract."""
code = Bytecode()
current_offset = 0
for r in self.requests:
value_arg = [r.value] if self.call_type in (Op.CALL, Op.CALLCODE) else []
code += Op.CALLDATACOPY(0, current_offset, len(r.calldata)) + Op.POP(
self.call_type(
Op.GAS if r.gas_limit == -1 else r.gas_limit,
r.interaction_contract_address,
*value_arg,
0,
len(r.calldata),
0,
0,
)
)
current_offset += len(r.calldata)
return code + self.extra_code
def transactions(self) -> List[Transaction]:
"""Return a transaction for the withdrawal request."""
assert self.entry_address is not None, "Entry address not initialized"
return [
Transaction(
gas_limit=self.tx_gas_limit,
gas_price=1_000_000_000,
to=self.entry_address,
value=0,
data=b"".join(r.calldata for r in self.requests),
sender=self.sender_account,
)
]
def update_pre(self, pre: Alloc) -> None:
"""Return the pre-state of the account."""
self.sender_account = pre.fund_eoa(self.sender_balance)
self.contract_address = pre.deploy_contract(
code=self.contract_code, balance=self.contract_balance
)
self.entry_address = self.contract_address
if self.call_depth > 2:
for _ in range(1, self.call_depth - 1):
self.entry_address = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE)
+ Op.POP(
Op.CALL(
Op.GAS,
self.entry_address,
0,
0,
Op.CALLDATASIZE,
0,
0,
)
)
)
def valid_requests(self, current_minimum_fee: int) -> List[WithdrawalRequest]:
"""Return the list of withdrawal requests that are valid."""
assert self.contract_address is not None, "Contract address not initialized"
return [
r.with_source_address(self.contract_address)
for r in self.requests
if r.valid and r.value >= current_minimum_fee
]
def get_n_fee_increments(n: int) -> List[int]:
"""Get the first N excess withdrawal requests that increase the fee."""
excess_withdrawal_requests_counts = []
last_fee = 1
for i in count(0):
if Spec.get_fee(i) > last_fee:
excess_withdrawal_requests_counts.append(i)
last_fee = Spec.get_fee(i)
if len(excess_withdrawal_requests_counts) == n:
break
return excess_withdrawal_requests_counts
def get_n_fee_increment_blocks(n: int) -> List[List[WithdrawalRequestContract]]:
"""
Return N blocks that should be included in the test such that each
subsequent block has an increasing fee for the withdrawal requests.
This is done by calculating the number of withdrawals required to reach the
next fee increment and creating a block with that number of withdrawal
requests plus the number of withdrawals required to reach the target.
"""
blocks = []
previous_excess = 0
withdrawal_index = 0
previous_fee = 0
for required_excess_withdrawals in get_n_fee_increments(n):
withdrawals_required = (
required_excess_withdrawals
+ Spec.TARGET_WITHDRAWAL_REQUESTS_PER_BLOCK
- previous_excess
)
fee = Spec.get_fee(previous_excess)
assert fee > previous_fee
blocks.append(
[
WithdrawalRequestContract(
requests=[
WithdrawalRequest(
validator_pubkey=i,
amount=0,
fee=fee,
)
for i in range(withdrawal_index, withdrawal_index + withdrawals_required)
],
)
],
)
previous_fee = fee
withdrawal_index += withdrawals_required
previous_excess = required_excess_withdrawals
return blocks
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/__init__.py | tests/prague/eip7002_el_triggerable_withdrawals/__init__.py | """Cross-client EIP-7002 Tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py | tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py | """
Tests [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
"""
from os.path import realpath
from pathlib import Path
from typing import Any, Generator
import pytest
from ethereum_test_forks import Fork, Prague
from ethereum_test_tools import (
Address,
Alloc,
Block,
Header,
Requests,
Transaction,
generate_system_contract_deploy_test,
)
from .helpers import WithdrawalRequest
from .spec import Spec, ref_spec_7002
REFERENCE_SPEC_GIT_PATH = ref_spec_7002.git_path
REFERENCE_SPEC_VERSION = ref_spec_7002.version
@pytest.mark.pre_alloc_group(
"separate", reason="Deploys withdrawal system contract at hardcoded predeploy address"
)
@generate_system_contract_deploy_test(
fork=Prague,
tx_json_path=Path(realpath(__file__)).parent / "contract_deploy_tx.json",
expected_deploy_address=Address(Spec.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS),
fail_on_empty_code=True,
)
def test_system_contract_deployment(
*,
fork: Fork,
pre: Alloc,
**kwargs: Any,
) -> Generator[Block, None, None]:
"""Verify calling the withdrawals system contract after deployment."""
sender = pre.fund_eoa()
withdrawal_request = WithdrawalRequest(
validator_pubkey=0x01,
amount=1,
fee=Spec.get_fee(0),
source_address=sender,
)
pre.fund_address(sender, withdrawal_request.value)
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
test_transaction_gas = intrinsic_gas_calculator(calldata=withdrawal_request.calldata)
test_transaction = Transaction(
data=withdrawal_request.calldata,
gas_limit=test_transaction_gas * 10,
to=Spec.WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS,
sender=sender,
value=withdrawal_request.value,
)
yield Block(
txs=[test_transaction],
header=Header(
requests_hash=Requests(withdrawal_request),
),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7002_el_triggerable_withdrawals/test_eip_mainnet.py | tests/prague/eip7002_el_triggerable_withdrawals/test_eip_mainnet.py | """
abstract: Crafted tests for mainnet of [EIP-7002: Execution layer triggerable withdrawals](https://eips.ethereum.org/EIPS/eip-7002).
""" # noqa: E501
from typing import List
import pytest
from ethereum_test_tools import (
Alloc,
Block,
BlockchainTestFiller,
)
from .helpers import WithdrawalRequest, WithdrawalRequestTransaction
from .spec import Spec, ref_spec_7002
REFERENCE_SPEC_GIT_PATH = ref_spec_7002.git_path
REFERENCE_SPEC_VERSION = ref_spec_7002.version
pytestmark = [pytest.mark.valid_at("Prague"), pytest.mark.mainnet]
@pytest.mark.parametrize(
"blocks_withdrawal_requests",
[
pytest.param(
[
[
WithdrawalRequestTransaction(
requests=[
WithdrawalRequest(
validator_pubkey=0x01,
amount=0,
fee=Spec.get_fee(0),
)
],
),
],
],
id="single_withdrawal_request",
),
],
)
def test_eip_7002(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
blocks: List[Block],
) -> None:
"""Test making a withdrawal request."""
blockchain_test(
pre=pre,
post={},
blocks=blocks,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/test_invalid_tx.py | tests/prague/eip7702_set_code_tx/test_invalid_tx.py | """
Tests invalid set-code transactions from EIP-7702.
Tests invalid set-code transactions from
[EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
"""
from enum import Enum, auto
from typing import List, Type
import pytest
from ethereum_test_base_types import Bytes, FixedSizeBytes, HexNumber
from ethereum_test_tools import (
Address,
Alloc,
AuthorizationTuple,
ChainConfig,
Transaction,
TransactionException,
TransactionTestFiller,
)
from .spec import Spec, ref_spec_7702
REFERENCE_SPEC_GIT_PATH = ref_spec_7702.git_path
REFERENCE_SPEC_VERSION = ref_spec_7702.version
pytestmark = [pytest.mark.valid_from("Prague"), pytest.mark.exception_test]
auth_account_start_balance = 0
class OversizedInt(FixedSizeBytes[2]): # type: ignore
"""
Oversized 2-byte int.
Will only fail if the int value is less than 2**8.
"""
pass
class OversizedAddress(FixedSizeBytes[21]): # type: ignore
"""Oversized Address Type."""
pass
class UndersizedAddress(FixedSizeBytes[19]): # type: ignore
"""Undersized Address Type."""
pass
class InvalidRLPMode(Enum):
"""Enum for invalid RLP modes."""
TRUNCATED_RLP = auto()
EXTRA_BYTES = auto()
def test_empty_authorization_list(
transaction_test: TransactionTestFiller,
pre: Alloc,
) -> None:
"""Test sending a transaction with an empty authorization list."""
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[],
error=TransactionException.TYPE_4_EMPTY_AUTHORIZATION_LIST,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"v,r,s",
[
pytest.param(2**8, 1, 1, id="v=2**8"),
pytest.param(1, 2**256, 1, id="r=2**256"),
pytest.param(1, 1, 2**256, id="s=2**256"),
pytest.param(2**8, 2**256, 2**256, id="v=2**8,r=s=2**256"),
],
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_auth_signature(
chain_config: ChainConfig,
transaction_test: TransactionTestFiller,
pre: Alloc,
v: int,
r: int,
s: int,
delegate_address: Address,
) -> None:
"""
Test sending a transaction where one of the signature elements is out of
range.
"""
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[
AuthorizationTuple(
address=delegate_address,
nonce=0,
chain_id=chain_config.chain_id,
v=v,
r=r,
s=s,
),
],
error=[
TransactionException.TYPE_4_INVALID_AUTHORITY_SIGNATURE,
TransactionException.TYPE_4_INVALID_AUTHORITY_SIGNATURE_S_TOO_HIGH,
],
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"auth_chain_id",
[
pytest.param(Spec.MAX_AUTH_CHAIN_ID + 1, id="auth_chain_id=2**256"),
],
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_auth_chain_id(
transaction_test: TransactionTestFiller,
pre: Alloc,
auth_chain_id: int,
delegate_address: Address,
) -> None:
"""
Test sending a transaction where the chain id field of an authorization
overflows the maximum value.
"""
authorization = AuthorizationTuple(
address=delegate_address,
nonce=0,
chain_id=auth_chain_id,
signer=pre.fund_eoa(auth_account_start_balance),
)
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[authorization],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"auth_chain_id",
[pytest.param(0), pytest.param(1)],
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_auth_chain_id_encoding(
transaction_test: TransactionTestFiller,
pre: Alloc,
delegate_address: Address,
auth_chain_id: int,
) -> None:
"""
Test sending a transaction where the chain id field of an authorization has
an incorrect encoding.
"""
class ModifiedAuthorizationTuple(AuthorizationTuple):
chain_id: OversizedInt # type: ignore
authorization = ModifiedAuthorizationTuple(
address=delegate_address,
nonce=0,
chain_id=auth_chain_id,
signer=pre.fund_eoa(auth_account_start_balance),
)
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[authorization],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"nonce",
[
pytest.param(Spec.MAX_NONCE + 1, id="nonce=2**64"),
pytest.param(2**256, id="nonce=2**256"),
],
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_nonce(
transaction_test: TransactionTestFiller,
pre: Alloc,
nonce: int,
delegate_address: Address,
) -> None:
"""
Test sending a transaction where the nonce field of an authorization
overflows the maximum value.
"""
auth_signer = pre.fund_eoa()
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[
AuthorizationTuple(
address=delegate_address,
nonce=nonce,
signer=auth_signer,
),
],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"nonce",
[
pytest.param([], id="nonce=empty-list"),
pytest.param([0], id="nonce=non-empty-list"),
pytest.param([0, 0], id="nonce=multi-element-list"),
],
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_nonce_as_list(
transaction_test: TransactionTestFiller,
pre: Alloc,
nonce: List[int],
delegate_address: Address,
) -> None:
"""
Test sending a transaction where the nonce field of an authorization
overflows the maximum value.
"""
auth_signer = pre.fund_eoa()
class AuthorizationTupleWithNonceAsList(AuthorizationTuple):
nonce: List[HexNumber] # type: ignore
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[
AuthorizationTupleWithNonceAsList(
address=delegate_address,
nonce=nonce,
signer=auth_signer,
),
],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_nonce_encoding(
transaction_test: TransactionTestFiller,
pre: Alloc,
delegate_address: Address,
) -> None:
"""
Test sending a transaction where the chain id field of an authorization has
an incorrect encoding.
"""
class ModifiedAuthorizationTuple(AuthorizationTuple):
nonce: OversizedInt # type: ignore
authorization = ModifiedAuthorizationTuple(
address=delegate_address,
nonce=0,
signer=pre.fund_eoa(auth_account_start_balance),
)
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[authorization],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"address_type",
[
pytest.param(
OversizedAddress,
id="oversized",
),
pytest.param(
UndersizedAddress,
id="undersized",
),
],
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(
int.from_bytes(Spec.RESET_DELEGATION_ADDRESS, byteorder="big"),
id="reset_delegation_address",
),
pytest.param(1, id="non_zero_address"),
],
)
def test_invalid_tx_invalid_address(
transaction_test: TransactionTestFiller,
pre: Alloc,
delegate_address: int,
address_type: Type[FixedSizeBytes],
) -> None:
"""
Test sending a transaction where the address field of an authorization is
incorrectly serialized.
"""
auth_signer = pre.fund_eoa()
class ModifiedAuthorizationTuple(AuthorizationTuple):
address: address_type # type: ignore
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[
ModifiedAuthorizationTuple(
address=delegate_address,
nonce=0,
signer=auth_signer,
),
],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize("extra_element_value", [0, 1])
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_authorization_tuple_extra_element(
transaction_test: TransactionTestFiller,
pre: Alloc,
delegate_address: Address,
extra_element_value: int,
) -> None:
"""
Test sending a transaction where the authorization tuple field of the
type-4 transaction is serialized to contain an extra element.
"""
auth_signer = pre.fund_eoa()
class ExtraElementAuthorizationTuple(AuthorizationTuple):
extra_element: HexNumber
def get_rlp_fields(self) -> List[str]:
"""
Append the extra field to the list of fields to be encoded in RLP.
"""
rlp_fields = super().get_rlp_fields()[:]
rlp_fields.append("extra_element")
return rlp_fields
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[
ExtraElementAuthorizationTuple(
address=delegate_address,
nonce=0,
signer=auth_signer,
extra_element=extra_element_value,
),
],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"missing_index",
[
pytest.param(0, id="missing_chain_id"),
pytest.param(1, id="missing_address"),
pytest.param(2, id="missing_nonce"),
pytest.param(3, id="missing_signature_y_parity"),
pytest.param(4, id="missing_signature_r"),
pytest.param(5, id="missing_signature_s"),
],
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_authorization_tuple_missing_element(
transaction_test: TransactionTestFiller,
pre: Alloc,
delegate_address: Address,
missing_index: int,
) -> None:
"""
Test sending a transaction where the authorization tuple field of the
type-4 transaction is serialized to miss one element.
"""
auth_signer = pre.fund_eoa()
class MissingElementAuthorizationTuple(AuthorizationTuple):
missing_element_index: int
def get_rlp_fields(self) -> List[str]:
"""
Remove the field that is specified by the missing element index.
"""
rlp_fields = super().get_rlp_fields()[:]
rlp_fields.pop(self.missing_element_index)
return rlp_fields
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[
MissingElementAuthorizationTuple(
address=delegate_address,
nonce=0,
signer=auth_signer,
missing_element_index=missing_index,
),
],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_authorization_tuple_encoded_as_bytes(
transaction_test: TransactionTestFiller,
pre: Alloc,
delegate_address: Address,
) -> None:
"""
Test sending a transaction where the authorization tuple field of the
type-4 transaction is encoded in the outer element as bytes instead of a
list of elements.
"""
class ModifiedTransaction(Transaction):
authorization_list: List[Bytes] | None # type: ignore
auth_signer = pre.fund_eoa()
authorization_list = AuthorizationTuple(
address=delegate_address,
nonce=0,
signer=auth_signer,
)
tx = ModifiedTransaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[authorization_list.rlp()],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
transaction_test(
pre=pre,
tx=tx,
)
@pytest.mark.parametrize(
"invalid_rlp_mode",
[
pytest.param(InvalidRLPMode.TRUNCATED_RLP, id="truncated_rlp"),
pytest.param(InvalidRLPMode.EXTRA_BYTES, id="extra_bytes"),
],
)
@pytest.mark.parametrize(
"delegate_address",
[
pytest.param(Spec.RESET_DELEGATION_ADDRESS, id="reset_delegation_address"),
pytest.param(Address(1), id="non_zero_address"),
],
)
def test_invalid_tx_invalid_rlp_encoding(
transaction_test: TransactionTestFiller,
pre: Alloc,
delegate_address: Address,
invalid_rlp_mode: InvalidRLPMode,
) -> None:
"""
Test sending a transaction type-4 where the RLP encoding of the transaction
is invalid.
"""
auth_signer = pre.fund_eoa()
tx = Transaction(
gas_limit=100_000,
to=0,
value=0,
authorization_list=[
AuthorizationTuple(
address=delegate_address,
nonce=0,
signer=auth_signer,
)
],
error=TransactionException.TYPE_4_INVALID_AUTHORIZATION_FORMAT,
sender=pre.fund_eoa(),
)
if invalid_rlp_mode == InvalidRLPMode.TRUNCATED_RLP:
# Truncate the last byte of the RLP encoding
tx.rlp_override = Bytes(tx.rlp()[:-1])
elif invalid_rlp_mode == InvalidRLPMode.EXTRA_BYTES:
# Add an extra byte to the end of the RLP encoding
tx.rlp_override = Bytes(tx.rlp() + b"\x00")
transaction_test(
pre=pre,
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/test_set_code_txs_2.py | tests/prague/eip7702_set_code_tx/test_set_code_txs_2.py | """
A state test for [EIP-7702 SetCodeTX](https://eips.ethereum.org/EIPS/eip-7702).
"""
from enum import Enum, IntEnum
import pytest
from ethereum_test_forks import Fork, GasCosts
from ethereum_test_tools import (
AccessList,
Account,
Alloc,
AuthorizationTuple,
Block,
BlockchainTestFiller,
Bytes,
Case,
CodeGasMeasure,
Conditional,
Environment,
Hash,
StateTestFiller,
Storage,
Switch,
Transaction,
TransactionException,
compute_create_address,
)
from ethereum_test_types.eof.v1 import Container, Section
from ethereum_test_vm import Macros
from ethereum_test_vm import Opcodes as Op
from .spec import Spec, ref_spec_7702
REFERENCE_SPEC_GIT_PATH = ref_spec_7702.git_path
REFERENCE_SPEC_VERSION = ref_spec_7702.version
@pytest.mark.valid_from("Prague")
def test_pointer_contract_pointer_loop(state_test: StateTestFiller, pre: Alloc) -> None:
"""
Tx -> call -> pointer A -> contract A -> pointer B -> contract loop C.
Call pointer that goes more level of depth to call a contract loop.
Loop is created only if pointers are set with auth tuples.
"""
env = Environment()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
pointer_b = pre.fund_eoa()
storage: Storage = Storage()
contract_a = pre.deploy_contract(
code=Op.SSTORE(storage.store_next(1, "contract_a_worked"), 0x1)
+ Op.CALL(gas=1_000_000, address=pointer_b)
+ Op.STOP,
)
storage_loop: Storage = Storage()
contract_worked = storage_loop.store_next(112, "contract_loop_worked")
contract_loop = pre.deploy_contract(
code=Op.SSTORE(contract_worked, Op.ADD(1, Op.SLOAD(0)))
+ Op.CALL(gas=1_000_000, address=pointer_a)
+ Op.STOP,
)
tx = Transaction(
to=pointer_a,
gas_limit=1_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=contract_a,
nonce=0,
signer=pointer_a,
),
AuthorizationTuple(
address=contract_loop,
nonce=0,
signer=pointer_b,
),
],
)
post = {
pointer_a: Account(storage=storage),
pointer_b: Account(storage=storage_loop),
}
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.valid_from("Prague")
def test_pointer_to_pointer(state_test: StateTestFiller, pre: Alloc) -> None:
"""
Tx -> call -> pointer A -> pointer B.
Direct call from pointer to pointer is OOG
"""
env = Environment()
storage: Storage = Storage()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
pointer_b = pre.fund_eoa()
contract_a = pre.deploy_contract(
code=Op.SSTORE(storage.store_next(0, "contract_a_worked"), 0x1)
+ Op.CALL(gas=1_000_000, address=pointer_b)
+ Op.STOP,
)
tx = Transaction(
to=pointer_a,
gas_limit=1_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=pointer_b,
nonce=0,
signer=pointer_a,
),
AuthorizationTuple(
address=contract_a,
nonce=0,
signer=pointer_b,
),
],
)
post = {pointer_a: Account(storage=storage)}
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.valid_from("Prague")
def test_pointer_normal(blockchain_test: BlockchainTestFiller, pre: Alloc) -> None:
"""
Tx -> call -> pointer A -> contract.
Other normal tx can interact with
previously assigned pointers.
"""
env = Environment()
storage: Storage = Storage()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
slot_worked = storage.store_next(3, "contract_a_worked")
contract_a = pre.deploy_contract(
code=Op.SSTORE(slot_worked, Op.ADD(1, Op.SLOAD(slot_worked))) + Op.STOP,
)
tx = Transaction(
to=pointer_a,
gas_limit=1_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=contract_a,
nonce=0,
signer=pointer_a,
)
],
)
# Other normal tx can interact with previously assigned pointers
tx_2 = Transaction(
to=pointer_a,
gas_limit=1_000_000,
data=b"",
value=0,
sender=sender,
)
# Event from another block
tx_3 = Transaction(
to=pointer_a,
gas_limit=1_000_000,
data=b"",
value=0,
sender=sender,
)
post = {pointer_a: Account(storage=storage)}
blockchain_test(
genesis_environment=env,
pre=pre,
post=post,
blocks=[Block(txs=[tx, tx_2]), Block(txs=[tx_3])],
)
@pytest.mark.valid_from("Prague")
def test_pointer_measurements(blockchain_test: BlockchainTestFiller, pre: Alloc) -> None:
"""
Check extcode* operations on pointer before and after pointer is set.
Check context opcode results when called under pointer call.
Opcodes have context of an original pointer account (balance, storage).
"""
env = Environment()
sender = pre.fund_eoa()
pointer = pre.fund_eoa(amount=100)
storage_normal: Storage = Storage()
storage_pointer: Storage = Storage()
storage_pointer_code: Storage = Storage() # this storage will be applied
# to pointer address
pointer_code = pre.deploy_contract(
balance=200,
code=Op.SSTORE(storage_pointer_code.store_next(pointer, "address"), Op.ADDRESS())
+ Op.SSTORE(storage_pointer_code.store_next(3, "callvalue"), Op.CALLVALUE())
+ Op.CALL(gas=1000, address=0, value=3)
+ Op.SSTORE(storage_pointer_code.store_next(100, "selfbalance"), Op.SELFBALANCE())
+ Op.SSTORE(storage_pointer_code.store_next(sender, "origin"), Op.ORIGIN())
+ Op.SSTORE(
storage_pointer_code.store_next(
"0x1122334400000000000000000000000000000000000000000000000000000000",
"calldataload",
),
Op.CALLDATALOAD(0),
)
+ Op.SSTORE(storage_pointer_code.store_next(4, "calldatasize"), Op.CALLDATASIZE())
+ Op.CALLDATACOPY(0, 0, 32)
+ Op.SSTORE(
storage_pointer_code.store_next(
"0x1122334400000000000000000000000000000000000000000000000000000000",
"calldatacopy",
),
Op.MLOAD(0),
)
+ Op.MSTORE(0, 0)
+ Op.SSTORE(storage_pointer_code.store_next(83, "codesize"), Op.CODESIZE())
+ Op.CODECOPY(0, 0, 32)
+ Op.SSTORE(
storage_pointer_code.store_next(
"0x30600055346001556000600060006000600360006103e8f14760025532600355", "codecopy"
),
Op.MLOAD(0),
)
+ Op.SSTORE(storage_pointer_code.store_next(0, "sload"), Op.SLOAD(15)),
storage={15: 25},
)
contract_measurements = pre.deploy_contract(
code=Op.EXTCODECOPY(pointer, 0, 0, 32)
+ Op.SSTORE(
storage_normal.store_next(Bytes().keccak256(), "extcodehash"),
Op.EXTCODEHASH(pointer),
)
+ Op.SSTORE(storage_normal.store_next(0, "extcodesize"), Op.EXTCODESIZE(pointer))
+ Op.SSTORE(storage_normal.store_next(0, "extcodecopy"), Op.MLOAD(0))
+ Op.SSTORE(storage_normal.store_next(100, "balance"), Op.BALANCE(pointer))
+ Op.STOP,
)
delegation_designation = Spec.delegation_designation(pointer_code)
contract_measurements_pointer = pre.deploy_contract(
code=Op.EXTCODECOPY(pointer, 0, 0, 32)
+ Op.SSTORE(
storage_pointer.store_next(delegation_designation.keccak256(), "extcodehash"),
Op.EXTCODEHASH(pointer),
)
+ Op.SSTORE(
storage_pointer.store_next(len(delegation_designation), "extcodesize"),
Op.EXTCODESIZE(pointer),
)
+ Op.SSTORE(
storage_pointer.store_next(
Hash(delegation_designation, right_padding=True), "extcodecopy"
),
Op.MLOAD(0),
)
+ Op.SSTORE(storage_pointer.store_next(100, "balance"), Op.BALANCE(pointer))
+ Op.STOP,
)
tx = Transaction(
to=contract_measurements,
gas_limit=1_000_000,
data=b"",
value=0,
sender=sender,
)
tx_pointer = Transaction(
to=contract_measurements_pointer,
gas_limit=1_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=pointer_code,
nonce=0,
signer=pointer,
)
],
)
tx_pointer_call = Transaction(
to=pointer,
gas_limit=1_000_000,
data=bytes.fromhex("11223344"),
value=3,
sender=sender,
)
post = {
contract_measurements: Account(storage=storage_normal),
contract_measurements_pointer: Account(storage=storage_pointer),
pointer: Account(storage=storage_pointer_code),
}
blockchain_test(
genesis_environment=env,
pre=pre,
post=post,
blocks=[Block(txs=[tx]), Block(txs=[tx_pointer, tx_pointer_call])],
)
@pytest.mark.with_all_precompiles
@pytest.mark.valid_from("Prague")
def test_call_to_precompile_in_pointer_context(
state_test: StateTestFiller, pre: Alloc, precompile: int
) -> None:
"""
Tx -> call -> pointer A -> precompile contract.
Make sure that gas consumed when calling precompiles in normal call
are the same As from inside the pointer context call.
"""
env = Environment()
storage: Storage = Storage()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
contract_test = pre.deploy_contract(
code=Op.MSTORE(1000, Op.GAS())
+ Op.CALL(gas=100_000, address=precompile, args_size=Op.CALLDATASIZE())
+ Op.MSTORE(0, Op.SUB(Op.MLOAD(1000), Op.GAS()))
+ Op.RETURN(0, 32)
)
normal_call_gas = 2000
pointer_call_gas = 3000
contract_a = pre.deploy_contract(
code=Op.CALL(
gas=1_000_000,
address=contract_test,
args_size=Op.CALLDATASIZE(),
ret_offset=1000,
ret_size=32,
)
+ Op.MSTORE(normal_call_gas, Op.MLOAD(1000))
+ Op.CALL(
gas=1_000_000,
address=pointer_a,
args_size=Op.CALLDATASIZE(),
ret_offset=1000,
ret_size=32,
)
+ Op.MSTORE(pointer_call_gas, Op.MLOAD(1000))
+ Op.SSTORE(
storage.store_next(0, "call_gas_diff"),
Op.SUB(Op.MLOAD(normal_call_gas), Op.MLOAD(pointer_call_gas)),
)
+ Op.SSTORE(storage.store_next(1, "tx_worked"), 1)
)
tx = Transaction(
to=contract_a,
gas_limit=3_000_000,
data=[0x11] * 256,
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=contract_test,
nonce=0,
signer=pointer_a,
)
],
)
post = {contract_a: Account(storage=storage)}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
@pytest.mark.with_all_precompiles
@pytest.mark.valid_from("Prague")
def test_pointer_to_precompile(state_test: StateTestFiller, pre: Alloc, precompile: int) -> None:
"""
Tx -> call -> pointer A -> precompile contract.
In case a delegation designator points to a precompile address, retrieved
code is considered empty and CALL, CALLCODE, STATICCALL, DELEGATECALL
instructions targeting this account will execute empty code, i.e. succeed
with no execution given enough gas.
So call to a pointer that points to a precompile is like call to an empty
account
"""
env = Environment()
storage: Storage = Storage()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
contract_test_normal = pre.deploy_contract(
code=Op.MSTORE(0, Op.CALL(gas=0, address=precompile, args_size=Op.CALLDATASIZE()))
+ Op.RETURN(0, 32)
)
contract_test_pointer = pre.deploy_contract(
code=Op.MSTORE(0, Op.CALL(gas=0, address=pointer_a, args_size=Op.CALLDATASIZE()))
+ Op.RETURN(0, 32)
)
contract_a = pre.deploy_contract(
code=Op.CALL(
gas=1_000_000,
address=contract_test_normal,
args_size=Op.CALLDATASIZE(),
ret_offset=1000,
ret_size=32,
)
# direct call to a precompile with 0 gas always return 0
+ Op.SSTORE(storage.store_next(0, "direct_call_result"), Op.MLOAD(1000))
+ Op.CALL(
gas=1_000_000,
address=contract_test_pointer,
args_size=Op.CALLDATASIZE(),
ret_offset=1000,
ret_size=32,
)
# pointer call to a precompile with 0 gas always return 1 as if calling
# empty address
+ Op.SSTORE(storage.store_next(1, "pointer_call_result"), Op.MLOAD(1000))
)
tx = Transaction(
to=contract_a,
gas_limit=3_000_000,
data=[0x11] * 256,
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=precompile,
nonce=0,
signer=pointer_a,
)
],
)
post = {contract_a: Account(storage=storage)}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
class AccessListCall(Enum):
"""Add addresses to access list."""
NONE = 1
IN_NORMAL_TX_ONLY = 2
IN_POINTER_TX_ONLY = 3
IN_BOTH_TX = 4
class PointerDefinition(Enum):
"""Define pointer in transactions."""
SEPARATE = 1
IN_NORMAL_TX_ONLY = 2
IN_POINTER_TX_ONLY = 3
IN_BOTH_TX = 4
class AccessListTo(Enum):
"""Define access list to."""
POINTER_ADDRESS = 1
CONTRACT_ADDRESS = 2
@pytest.mark.parametrize(
"access_list_rule",
[
AccessListCall.NONE,
AccessListCall.IN_BOTH_TX,
AccessListCall.IN_NORMAL_TX_ONLY,
AccessListCall.IN_POINTER_TX_ONLY,
],
)
@pytest.mark.parametrize(
"pointer_definition",
[
PointerDefinition.SEPARATE,
PointerDefinition.IN_BOTH_TX,
PointerDefinition.IN_NORMAL_TX_ONLY,
PointerDefinition.IN_POINTER_TX_ONLY,
],
)
@pytest.mark.parametrize(
"access_list_to",
[AccessListTo.POINTER_ADDRESS, AccessListTo.CONTRACT_ADDRESS],
)
@pytest.mark.valid_from("Prague")
def test_gas_diff_pointer_vs_direct_call(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
access_list_rule: AccessListCall,
pointer_definition: PointerDefinition,
access_list_to: AccessListTo,
) -> None:
"""
Check the gas difference when calling the contract directly vs as a pointer
Combine with AccessList and AuthTuple gas reductions scenarios.
"""
env = Environment()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
call_worked = 1
gas_costs: GasCosts = fork.gas_costs()
opcodes_price = 37
direct_call_gas: int = (
# 20_000 + 2_600 + 2_100 + 37 = 24737
gas_costs.G_STORAGE_SET
+ (
# access account price
# If storage and account is declared in access list then discount
gas_costs.G_WARM_ACCOUNT_ACCESS + gas_costs.G_WARM_SLOAD
if access_list_rule in [AccessListCall.IN_NORMAL_TX_ONLY, AccessListCall.IN_BOTH_TX]
else gas_costs.G_COLD_ACCOUNT_ACCESS + gas_costs.G_COLD_SLOAD
)
+ opcodes_price
)
pointer_call_gas: int = (
# sstore + addr + addr + sload + op
# no access list, no pointer, all accesses are hot
# 20_000 + 2_600 * 2 + 2_100 + 37 = 27_337
#
# access list for pointer, pointer is set
# additional 2_600 charged for access of contract
# 20_000 + 100 + 2_600 + 100 + 37 = 22_837
#
# no access list, pointer is set
# pointer access is hot, sload and contract are hot
# 20_000 + 100 + 2_600 + 2_100 + 37 = 24_837
#
# access list for contract, pointer is set
# contract call is hot, pointer call is call because pointer is set
# only sload is hot because access list is for contract
# 20_000 + 100 + 100 + 2100 + 37 = 22_337
gas_costs.G_STORAGE_SET
# pointer address access
+ (
gas_costs.G_WARM_ACCOUNT_ACCESS
if (
pointer_definition
in [PointerDefinition.IN_BOTH_TX, PointerDefinition.IN_POINTER_TX_ONLY]
or access_list_rule
in [AccessListCall.IN_BOTH_TX, AccessListCall.IN_POINTER_TX_ONLY]
and access_list_to == AccessListTo.POINTER_ADDRESS
)
else gas_costs.G_COLD_ACCOUNT_ACCESS
)
# storage access
+ (
gas_costs.G_WARM_SLOAD
if (
access_list_rule in [AccessListCall.IN_BOTH_TX, AccessListCall.IN_POINTER_TX_ONLY]
and access_list_to == AccessListTo.POINTER_ADDRESS
)
else gas_costs.G_COLD_SLOAD
)
# contract address access
+ (
gas_costs.G_WARM_ACCOUNT_ACCESS
if (
access_list_rule in [AccessListCall.IN_BOTH_TX, AccessListCall.IN_POINTER_TX_ONLY]
and access_list_to == AccessListTo.CONTRACT_ADDRESS
)
else gas_costs.G_COLD_ACCOUNT_ACCESS
)
+ opcodes_price
)
contract = pre.deploy_contract(code=Op.SSTORE(call_worked, Op.ADD(Op.SLOAD(call_worked), 1)))
# Op.CALLDATASIZE() does not work with kwargs
storage_normal: Storage = Storage()
contract_test_normal = pre.deploy_contract(
code=Op.GAS()
+ Op.POP(Op.CALL(gas=100_000, address=contract))
+ Op.SSTORE(
storage_normal.store_next(direct_call_gas, "normal_call_price"),
Op.SUB(Op.SWAP1(), Op.GAS()),
)
)
storage_pointer: Storage = Storage()
contract_test_pointer = pre.deploy_contract(
code=Op.GAS()
+ Op.POP(Op.CALL(gas=100_000, address=pointer_a))
+ Op.SSTORE(
storage_pointer.store_next(pointer_call_gas, "pointer_call_price"),
Op.SUB(Op.SWAP1(), Op.GAS()),
)
)
tx_0 = Transaction(
to=1,
gas_limit=3_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=(
[
AuthorizationTuple(
address=contract,
nonce=0,
signer=pointer_a,
)
]
if pointer_definition == PointerDefinition.SEPARATE
else None
),
)
tx = Transaction(
to=contract_test_normal,
gas_limit=3_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=(
[
AuthorizationTuple(
address=contract,
nonce=0,
signer=pointer_a,
)
]
if pointer_definition == PointerDefinition.IN_BOTH_TX
or pointer_definition == PointerDefinition.IN_NORMAL_TX_ONLY
else None
),
access_list=(
[
AccessList(
address=contract,
storage_keys=[call_worked],
)
]
if access_list_rule == AccessListCall.IN_BOTH_TX
or access_list_rule == AccessListCall.IN_NORMAL_TX_ONLY
else None
),
)
tx2 = Transaction(
to=contract_test_pointer,
gas_limit=3_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=(
[
AuthorizationTuple(
address=contract,
nonce=0,
signer=pointer_a,
)
]
if pointer_definition == PointerDefinition.IN_BOTH_TX
or pointer_definition == PointerDefinition.IN_POINTER_TX_ONLY
else None
),
access_list=(
[
AccessList(
address=(
pointer_a if access_list_to == AccessListTo.POINTER_ADDRESS else contract
),
storage_keys=[call_worked],
)
]
if access_list_rule == AccessListCall.IN_BOTH_TX
or access_list_rule == AccessListCall.IN_POINTER_TX_ONLY
else None
),
)
post = {
contract: Account(storage={call_worked: 1}),
pointer_a: Account(storage={call_worked: 1}),
contract_test_normal: Account(storage=storage_normal),
contract_test_pointer: Account(storage=storage_pointer),
}
blockchain_test(
genesis_environment=env,
pre=pre,
post=post,
blocks=[Block(txs=[tx_0]), Block(txs=[tx]), Block(txs=[tx2])],
)
@pytest.mark.valid_from("Prague")
def test_pointer_call_followed_by_direct_call(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
) -> None:
"""
If we first call by pointer then direct call, will the call/sload be hot
The direct call will warm because pointer access marks it warm But the
sload is still cold because storage marked hot from pointer's account in a
pointer call.
"""
env = Environment()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
gas_costs: GasCosts = fork.gas_costs()
call_worked = 1
opcodes_price: int = 37
pointer_call_gas = (
gas_costs.G_STORAGE_SET
+ gas_costs.G_WARM_ACCOUNT_ACCESS # pointer is warm
+ gas_costs.G_COLD_ACCOUNT_ACCESS # contract is cold
+ gas_costs.G_COLD_SLOAD # storage access under pointer call is cold
+ opcodes_price
)
direct_call_gas = (
gas_costs.G_STORAGE_SET
+ gas_costs.G_WARM_ACCOUNT_ACCESS # since previous pointer call,
# contract is now warm
+ gas_costs.G_COLD_SLOAD # but storage is cold, because it's
# contract's direct
+ opcodes_price
)
contract = pre.deploy_contract(code=Op.SSTORE(call_worked, Op.ADD(Op.SLOAD(call_worked), 1)))
storage_test_gas: Storage = Storage()
contract_test_gas = pre.deploy_contract(
code=Op.GAS()
+ Op.POP(Op.CALL(gas=100_000, address=pointer_a))
+ Op.SSTORE(
storage_test_gas.store_next(pointer_call_gas, "pointer_call_price"),
Op.SUB(Op.SWAP1(), Op.GAS()),
)
+ Op.GAS()
+ Op.POP(Op.CALL(gas=100_000, address=contract))
+ Op.SSTORE(
storage_test_gas.store_next(direct_call_gas, "direct_call_price"),
Op.SUB(Op.SWAP1(), Op.GAS()),
)
)
tx = Transaction(
to=contract_test_gas,
gas_limit=3_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=(
[
AuthorizationTuple(
address=contract,
nonce=0,
signer=pointer_a,
)
]
),
)
post = {
contract: Account(storage={call_worked: 1}),
pointer_a: Account(storage={call_worked: 1}),
contract_test_gas: Account(storage=storage_test_gas),
}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
@pytest.mark.valid_from("Prague")
def test_pointer_to_static(state_test: StateTestFiller, pre: Alloc) -> None:
"""
Tx -> call -> pointer A -> static -> static violation.
Verify that static context is active when called under pointer.
"""
env = Environment()
storage: Storage = Storage()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
contract_b = pre.deploy_contract(code=Op.SSTORE(0, 5))
contract_a = pre.deploy_contract(
code=Op.SSTORE(
storage.store_next(0, "static_call"),
Op.STATICCALL(
gas=1_000_000, address=contract_b, args_size=32, ret_offset=1000, ret_size=32
),
)
+ Op.SSTORE(storage.store_next(1, "call_worked"), 1)
)
tx = Transaction(
to=pointer_a,
gas_limit=3_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=contract_a,
nonce=0,
signer=pointer_a,
)
],
)
post = {pointer_a: Account(storage=storage), contract_b: Account(storage={0: 0})}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
@pytest.mark.valid_from("Prague")
def test_static_to_pointer(state_test: StateTestFiller, pre: Alloc) -> None:
"""
Tx -> staticcall -> pointer A -> static violation.
Verify that static context is active when make sub call to pointer.
"""
env = Environment()
storage: Storage = Storage()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
contract_b = pre.deploy_contract(code=Op.SSTORE(0, 5))
contract_a = pre.deploy_contract(
code=Op.SSTORE(
storage.store_next(0, "static_call"),
Op.STATICCALL(
gas=1_000_000, address=pointer_a, args_size=32, ret_offset=1000, ret_size=32
),
)
+ Op.SSTORE(storage.store_next(1, "call_worked"), 1)
)
tx = Transaction(
to=contract_a,
gas_limit=3_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=contract_b,
nonce=0,
signer=pointer_a,
)
],
)
post = {contract_a: Account(storage=storage), pointer_a: Account(storage={0: 0})}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
@pytest.mark.valid_from("EOFv1")
def test_pointer_to_eof(state_test: StateTestFiller, pre: Alloc) -> None:
"""
Tx -> call -> pointer A -> EOF.
Pointer to eof contract works.
"""
env = Environment()
storage: Storage = Storage()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
contract_a = pre.deploy_contract(
code=Container(
sections=[
Section.Code(
code=Op.SSTORE(storage.store_next(5, "eof_call_result"), 5) + Op.STOP,
)
]
)
)
tx = Transaction(
to=pointer_a,
gas_limit=3_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=contract_a,
nonce=0,
signer=pointer_a,
)
],
)
post = {pointer_a: Account(storage=storage)}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
@pytest.mark.valid_from("Prague")
def test_pointer_to_static_reentry(state_test: StateTestFiller, pre: Alloc) -> None:
"""
Tx call -> pointer A -> static -> code -> pointer A -> static violation
Verify that static context is active when called under pointer.
"""
env = Environment()
storage: Storage = Storage()
sender = pre.fund_eoa()
pointer_a = pre.fund_eoa()
contract_b = pre.deploy_contract(
code=Op.MSTORE(0, Op.ADD(1, Op.CALLDATALOAD(0)))
+ Conditional(
condition=Op.EQ(Op.MLOAD(0), 2), if_true=Op.SSTORE(5, 5), if_false=Op.JUMPDEST()
)
+ Op.CALL(gas=100_000, address=pointer_a, args_offset=0, args_size=Op.CALLDATASIZE())
)
contract_a = pre.deploy_contract(
code=Op.MSTORE(0, Op.CALLDATALOAD(0))
+ Conditional(
condition=Op.EQ(Op.MLOAD(0), 0),
if_true=Op.SSTORE(
storage.store_next(1, "static_call"),
Op.STATICCALL(
gas=1_000_000,
address=contract_b,
args_size=Op.CALLDATASIZE(),
ret_offset=1000,
ret_size=32,
),
)
+ Op.SSTORE(storage.store_next(1, "call_worked"), 1),
if_false=Op.CALL(
gas=1_000_000,
address=contract_b,
args_size=Op.CALLDATASIZE(),
ret_offset=1000,
ret_size=32,
),
)
)
tx = Transaction(
to=pointer_a,
gas_limit=3_000_000,
data=[0x00] * 32,
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=contract_a,
nonce=0,
signer=pointer_a,
)
],
)
post = {pointer_a: Account(storage=storage)}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
@pytest.mark.valid_from("Prague")
@pytest.mark.parametrize(
"call_type",
[Op.CALL, Op.DELEGATECALL, Op.CALLCODE],
)
def test_contract_storage_to_pointer_with_storage(
state_test: StateTestFiller, pre: Alloc, call_type: Op
) -> None:
"""
Tx call -> contract with storage -> pointer A with storage ->
storage/tstorage modify.
Check storage/tstorage modifications when interacting with pointers.
"""
env = Environment()
sender = pre.fund_eoa()
# Pointer B
storage_pointer_b: Storage = Storage()
storage_pointer_b.store_next(
0 if call_type in [Op.DELEGATECALL, Op.CALLCODE] else 1, "first_slot"
)
storage_pointer_b.store_next(0, "second_slot")
storage_pointer_b.store_next(0, "third_slot")
pointer_b = pre.fund_eoa()
# Contract B
storage_b: Storage = Storage()
first_slot = storage_b.store_next(10, "first_slot")
second_slot = storage_b.store_next(20, "second_slot")
third_slot = storage_b.store_next(30, "third_slot")
fourth_slot = storage_b.store_next(0, "fourth_slot")
contract_b = pre.deploy_contract(
code=Conditional(
condition=Op.EQ(Op.CALLDATALOAD(0), 1),
if_true=Op.SSTORE(fourth_slot, Op.TLOAD(third_slot)),
if_false=Op.SSTORE(first_slot, Op.ADD(Op.SLOAD(first_slot), 1))
+ Op.TSTORE(third_slot, Op.ADD(Op.TLOAD(third_slot), 1)),
),
storage={
# Original contract storage is untouched
first_slot: 10,
second_slot: 20,
third_slot: 30,
},
)
# Contract A
storage_a: Storage = Storage()
contract_a = pre.deploy_contract(
code=Op.TSTORE(third_slot, 1)
+ call_type(address=pointer_b, gas=500_000)
+ Op.SSTORE(third_slot, Op.TLOAD(third_slot))
# Verify tstorage in contract after interacting with pointer, it must
# be 0
+ Op.MSTORE(0, 1)
+ Op.CALL(address=contract_b, gas=500_000, args_offset=0, args_size=32),
storage={
storage_a.store_next(
# caller storage is modified when calling pointer with delegate
# or callcode
6 if call_type in [Op.DELEGATECALL, Op.CALLCODE] else 5,
"first_slot",
): 5,
storage_a.store_next(2, "second_slot"): 2,
storage_a.store_next(
# TSTORE is modified when calling pointer with delegate or
# callcode
2 if call_type in [Op.DELEGATECALL, Op.CALLCODE] else 1,
"third_slot",
): 3,
},
)
tx = Transaction(
to=contract_a,
gas_limit=3_000_000,
data=b"",
value=0,
sender=sender,
authorization_list=[
AuthorizationTuple(
address=contract_b,
nonce=0,
signer=pointer_b,
)
],
)
post = {
contract_a: Account(storage=storage_a),
contract_b: Account(storage=storage_b),
pointer_b: Account(storage=storage_pointer_b),
}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
class ReentryAction(IntEnum):
"""Reentry logic action."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/spec.py | tests/prague/eip7702_set_code_tx/spec.py | """Defines EIP-7702 specification constants and functions."""
from dataclasses import dataclass
from ethereum_test_base_types import Address, Bytes
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7702 = ReferenceSpec("EIPS/eip-7702.md", "99f1be49f37c034bdd5c082946f5968710dbfc87")
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-7702 specifications as defined at
https://eips.ethereum.org/EIPS/eip-7702.
"""
SET_CODE_TX_TYPE = 0x04
MAGIC = 0x05
PER_AUTH_BASE_COST = 12_500
PER_EMPTY_ACCOUNT_COST = 25_000
DELEGATION_DESIGNATION = Bytes("ef0100")
RESET_DELEGATION_ADDRESS = Address(0)
MAX_AUTH_CHAIN_ID = 2**256 - 1
MAX_NONCE = 2**64 - 1
@staticmethod
def delegation_designation(address: Address) -> Bytes:
"""Return delegation designation for the given address."""
return Bytes(Spec.DELEGATION_DESIGNATION + bytes(address))
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/test_set_code_txs.py | tests/prague/eip7702_set_code_tx/test_set_code_txs.py | """
Tests use of set-code transactions from EIP-7702.
Tests use of set-code transactions from
[EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
"""
from enum import StrEnum
from hashlib import sha256
from itertools import count
from typing import List
import pytest
from ethereum_test_base_types import HexNumber
from ethereum_test_checklists import EIPChecklist
from ethereum_test_forks import Fork
from ethereum_test_tools import (
AccessList,
Account,
Address,
Alloc,
AuthorizationTuple,
Block,
BlockchainTestFiller,
Bytecode,
Bytes,
ChainConfig,
CodeGasMeasure,
Conditional,
Environment,
EVMCodeType,
Hash,
Initcode,
Requests,
StateTestFiller,
Storage,
Transaction,
TransactionException,
add_kzg_version,
call_return_code,
compute_create_address,
)
from ethereum_test_tools import Macros as Om
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types import TransactionReceipt
from ethereum_test_types.eof.v1 import Container, Section
from ...cancun.eip4844_blobs.spec import Spec as Spec4844
from ..eip6110_deposits.helpers import DepositRequest
from ..eip7002_el_triggerable_withdrawals.helpers import WithdrawalRequest
from ..eip7251_consolidations.helpers import ConsolidationRequest
from .helpers import AddressType
from .spec import Spec, ref_spec_7702
REFERENCE_SPEC_GIT_PATH = ref_spec_7702.git_path
REFERENCE_SPEC_VERSION = ref_spec_7702.version
pytestmark = [
pytest.mark.valid_from("Prague"),
pytest.mark.pre_alloc_group(
"set_code_tests", reason="Tests EIP-7702 set code transactions with system contracts"
),
]
auth_account_start_balance = 0
@pytest.mark.parametrize(
"tx_value",
[0, 1],
)
@pytest.mark.parametrize(
"suffix,succeeds",
[
pytest.param(Op.STOP, True, id="stop"),
pytest.param(Op.RETURN(0, 0), True, id="return"),
pytest.param(Op.REVERT, False, id="revert"),
pytest.param(Op.INVALID, False, id="invalid"),
pytest.param(Om.OOG, False, id="out-of-gas"),
],
)
def test_self_sponsored_set_code(
state_test: StateTestFiller,
pre: Alloc,
suffix: Bytecode,
succeeds: bool,
tx_value: int,
) -> None:
"""
Test the executing a self-sponsored set-code transaction.
The transaction is sent to the sender, and the sender is the signer of the
only authorization tuple in the authorization list.
The authorization tuple has a nonce of 1 because the self-sponsored
transaction increases the nonce of the sender from zero to one first.
The expected nonce at the end of the transaction is 2.
"""
storage = Storage()
sender = pre.fund_eoa()
set_code = (
Op.SSTORE(storage.store_next(sender), Op.ORIGIN)
+ Op.SSTORE(storage.store_next(sender), Op.CALLER)
+ Op.SSTORE(storage.store_next(tx_value), Op.CALLVALUE)
+ suffix
)
set_code_to_address = pre.deploy_contract(
set_code,
)
tx = Transaction(
gas_limit=10_000_000,
to=sender,
value=tx_value,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=1,
signer=sender,
),
],
sender=sender,
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
set_code_to_address: Account(storage=dict.fromkeys(storage, 0)),
sender: Account(
nonce=2,
code=Spec.delegation_designation(set_code_to_address),
storage=storage if succeeds else {},
),
},
)
@pytest.mark.parametrize(
"eoa_balance,self_sponsored",
[
pytest.param(0, False, id="zero_balance_authority"),
pytest.param(1, False, id="one_wei_balance_authority"),
pytest.param(None, True, id="self_sponsored_tx"),
],
)
@pytest.mark.parametrize(
"tx_value",
[0, 1],
)
@pytest.mark.parametrize(
"suffix,succeeds",
[
pytest.param(Op.STOP, True, id="stop"),
pytest.param(Op.RETURN(0, 0), True, id="return"),
pytest.param(Op.REVERT(0, 0), False, id="revert"),
pytest.param(Op.INVALID, False, id="invalid"),
pytest.param(Om.OOG + Op.STOP, False, id="out-of-gas"),
],
)
def test_set_code_to_sstore(
state_test: StateTestFiller,
pre: Alloc,
suffix: Bytecode,
succeeds: bool,
tx_value: int,
eoa_balance: int,
self_sponsored: bool,
) -> None:
"""Test the executing a simple SSTORE in a set-code transaction."""
storage = Storage()
if self_sponsored:
sender = pre.fund_eoa()
auth_signer = sender
else:
auth_signer = pre.fund_eoa(eoa_balance)
sender = pre.fund_eoa()
set_code = (
Op.SSTORE(storage.store_next(sender), Op.ORIGIN)
+ Op.SSTORE(storage.store_next(sender), Op.CALLER)
+ Op.SSTORE(storage.store_next(tx_value), Op.CALLVALUE)
+ suffix
)
set_code_to_address = pre.deploy_contract(
set_code,
)
tx = Transaction(
gas_limit=500_000,
to=auth_signer,
value=tx_value,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=1 if self_sponsored else 0,
signer=auth_signer,
),
],
sender=sender,
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
set_code_to_address: Account(
storage=dict.fromkeys(storage, 0),
),
auth_signer: Account(
nonce=2 if self_sponsored else 1,
code=Spec.delegation_designation(set_code_to_address),
storage=storage if succeeds else {},
),
},
)
def test_set_code_to_non_empty_storage_non_zero_nonce(
state_test: StateTestFiller,
pre: Alloc,
) -> None:
"""Test the setting the code to an account that has non-empty storage."""
auth_signer = pre.fund_eoa(
amount=0,
storage=Storage({0: 1}), # type: ignore
)
sender = pre.fund_eoa()
set_code = Op.SSTORE(0, Op.ADD(Op.SLOAD(0), 1)) + Op.STOP
set_code_to_address = pre.deploy_contract(
set_code,
)
tx = Transaction(
gas_limit=500_000,
to=auth_signer,
value=0,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=auth_signer.nonce,
signer=auth_signer,
),
],
sender=sender,
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
set_code_to_address: Account(
storage={},
),
auth_signer: Account(
storage={0: 2},
),
},
)
@pytest.mark.parametrize(
"access_list_in_tx",
[
pytest.param(None, id=""),
pytest.param("sender", id="sender_in_access_list"),
pytest.param("auth_signer", id="auth_signer_in_access_list"),
],
)
def test_set_code_to_sstore_then_sload(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
access_list_in_tx: str | None,
) -> None:
"""
Test the executing a simple SSTORE then SLOAD in two separate set-code
transactions.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
sender = pre.fund_eoa()
storage_key_1 = 0x1
storage_key_2 = 0x2
storage_value = 0x1234
set_code_1 = Op.SSTORE(storage_key_1, storage_value) + Op.STOP
set_code_1_address = pre.deploy_contract(set_code_1)
set_code_2 = Op.SSTORE(storage_key_2, Op.ADD(Op.SLOAD(storage_key_1), 1)) + Op.STOP
set_code_2_address = pre.deploy_contract(set_code_2)
tx_1 = Transaction(
gas_limit=100_000,
to=auth_signer,
value=0,
authorization_list=[
AuthorizationTuple(
address=set_code_1_address,
nonce=0,
signer=auth_signer,
),
],
sender=sender,
)
access_list = (
[
AccessList(
address=sender if access_list_in_tx == "sender" else auth_signer,
storage_keys=[Hash(storage_key_1)],
)
]
if access_list_in_tx
else []
)
tx_2 = Transaction(
gas_limit=100_000,
to=auth_signer,
value=0,
authorization_list=[
AuthorizationTuple(
address=set_code_2_address,
nonce=1,
signer=auth_signer,
),
],
access_list=access_list,
sender=sender,
)
block = Block(
txs=[tx_1, tx_2],
)
blockchain_test(
pre=pre,
post={
auth_signer: Account(
nonce=2,
code=Spec.delegation_designation(set_code_2_address),
storage={
storage_key_1: storage_value,
storage_key_2: storage_value + 1,
},
),
},
blocks=[block],
)
@pytest.mark.parametrize(
"return_opcode",
[
Op.RETURN,
Op.REVERT,
],
)
@pytest.mark.with_all_call_opcodes
def test_set_code_to_tstore_reentry(
state_test: StateTestFiller,
pre: Alloc,
call_opcode: Op,
return_opcode: Op,
evm_code_type: EVMCodeType,
) -> None:
"""
Test the executing a simple TSTORE in a set-code transaction, which also
performs a re-entry to TLOAD the value.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
tload_value = 0x1234
set_code = Conditional(
condition=Op.ISZERO(Op.TLOAD(1)),
if_true=Op.TSTORE(1, tload_value)
+ call_opcode(address=Op.ADDRESS)
+ Op.RETURNDATACOPY(0, 0, 32)
+ Op.SSTORE(2, Op.MLOAD(0)),
if_false=Op.MSTORE(0, Op.TLOAD(1)) + return_opcode(size=32),
evm_code_type=evm_code_type,
)
set_code_to_address = pre.deploy_contract(set_code)
tx = Transaction(
gas_limit=100_000,
to=auth_signer,
value=0,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
auth_signer: Account(
nonce=1,
code=Spec.delegation_designation(set_code_to_address),
storage={2: tload_value},
),
},
)
@pytest.mark.with_all_call_opcodes(
selector=lambda call_opcode: call_opcode
not in [Op.DELEGATECALL, Op.CALLCODE, Op.STATICCALL, Op.EXTDELEGATECALL, Op.EXTSTATICCALL]
)
@pytest.mark.parametrize("call_eoa_first", [True, False])
def test_set_code_to_tstore_available_at_correct_address(
state_test: StateTestFiller,
pre: Alloc,
call_opcode: Op,
call_eoa_first: bool,
) -> None:
"""
Test TLOADing from slot 2 and then SSTORE this in slot 1, then TSTORE 3 in
slot 2. This is done both from the EOA which is delegated to account A, and
then A is called. The storage should stay empty on both the EOA and the
delegated account.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
storage_slot = 1
tload_slot = 2
tstore_value = 3
tstore_check_code = Op.SSTORE(storage_slot, Op.TLOAD(tload_slot)) + Op.TSTORE(
tload_slot, tstore_value
)
set_code_to_address = pre.deploy_contract(tstore_check_code)
def make_call(call_type: Op, call_eoa: bool) -> Bytecode:
call_target = auth_signer if call_eoa else set_code_to_address
return call_type(address=call_target)
chain_code = make_call(call_type=call_opcode, call_eoa=call_eoa_first) + make_call(
call_type=call_opcode, call_eoa=not call_eoa_first
)
target_call_chain_address = pre.deploy_contract(chain_code)
tx = Transaction(
gas_limit=100_000,
to=target_call_chain_address,
value=0,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
auth_signer: Account(
storage={storage_slot: 0},
),
set_code_to_address: Account(
storage={storage_slot: 0},
),
},
)
@pytest.mark.parametrize(
"external_sendall_recipient",
[False, True],
)
@pytest.mark.parametrize(
"balance",
[0, 1],
)
def test_set_code_to_self_destruct(
state_test: StateTestFiller,
pre: Alloc,
external_sendall_recipient: bool,
balance: int,
) -> None:
"""Test the executing self-destruct opcode in a set-code transaction."""
auth_signer = pre.fund_eoa(balance)
if external_sendall_recipient:
recipient = pre.fund_eoa(0)
else:
recipient = auth_signer
set_code_to_address = pre.deploy_contract(Op.SSTORE(1, 1) + Op.SELFDESTRUCT(recipient))
tx = Transaction(
gas_limit=10_000_000,
to=auth_signer,
value=0,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
post = {
auth_signer: Account(
nonce=1,
code=Spec.delegation_designation(set_code_to_address),
storage={1: 1},
balance=balance if not external_sendall_recipient else 0,
),
}
if external_sendall_recipient and balance > 0:
post[recipient] = Account(balance=balance)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.with_all_create_opcodes
@pytest.mark.slow()
def test_set_code_to_contract_creator(
state_test: StateTestFiller,
pre: Alloc,
create_opcode: Op,
evm_code_type: EVMCodeType,
) -> None:
"""
Test the executing a contract-creating opcode in a set-code transaction.
"""
storage = Storage()
auth_signer = pre.fund_eoa(auth_account_start_balance)
deployed_code: Bytecode | Container = Op.STOP
initcode: Bytecode | Container
if evm_code_type == EVMCodeType.LEGACY:
initcode = Initcode(deploy_code=deployed_code)
elif evm_code_type == EVMCodeType.EOF_V1:
deployed_code = Container.Code(deployed_code)
initcode = Container.Init(deploy_container=deployed_code)
else:
raise ValueError(f"Unsupported EVM code type: {evm_code_type}")
deployed_contract_address = compute_create_address(
address=auth_signer,
nonce=1,
initcode=initcode,
opcode=create_opcode,
)
creator_code: Bytecode | Container
if evm_code_type == EVMCodeType.LEGACY:
creator_code = Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE) + Op.SSTORE(
storage.store_next(deployed_contract_address),
create_opcode(value=0, offset=0, size=Op.CALLDATASIZE),
)
elif evm_code_type == EVMCodeType.EOF_V1:
creator_code = Container(
sections=[
Section.Code(
code=Op.EOFCREATE[0](0, 0, 0, 0) + Op.STOP(),
),
Section.Container(
container=initcode,
),
]
)
else:
raise ValueError(f"Unsupported EVM code type: {evm_code_type}")
creator_code_address = pre.deploy_contract(creator_code)
tx = Transaction(
gas_limit=10_000_000,
to=auth_signer,
value=0,
data=initcode if evm_code_type == EVMCodeType.LEGACY else b"",
authorization_list=[
AuthorizationTuple(
address=creator_code_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
creator_code_address: Account(storage={}),
auth_signer: Account(
nonce=2,
code=Spec.delegation_designation(creator_code_address),
storage=storage,
),
deployed_contract_address: Account(
code=deployed_code,
storage={},
),
},
)
@pytest.mark.parametrize(
"value",
[0, 1],
)
@pytest.mark.with_all_call_opcodes
def test_set_code_to_self_caller(
state_test: StateTestFiller,
pre: Alloc,
call_opcode: Op,
value: int,
evm_code_type: EVMCodeType,
) -> None:
"""Test the executing a self-call in a set-code transaction."""
if "value" not in call_opcode.kwargs and value != 0:
pytest.skip(f"Call opcode {call_opcode} does not support value")
storage = Storage()
auth_signer = pre.fund_eoa(auth_account_start_balance)
static_call = call_opcode in [Op.STATICCALL, Op.EXTSTATICCALL]
first_entry_slot = storage.store_next(True)
re_entry_success_slot = storage.store_next(not static_call)
re_entry_call_return_code_slot = storage.store_next(
call_return_code(opcode=call_opcode, success=not static_call)
)
if "value" in call_opcode.kwargs:
call_bytecode = call_opcode(address=auth_signer, value=value)
else:
call_bytecode = call_opcode(address=auth_signer)
set_code = Conditional(
condition=Op.ISZERO(Op.SLOAD(first_entry_slot)),
if_true=Op.SSTORE(first_entry_slot, 1)
+ Op.SSTORE(re_entry_call_return_code_slot, call_bytecode)
+ Op.STOP,
if_false=Op.SSTORE(re_entry_success_slot, 1) + Op.STOP,
evm_code_type=evm_code_type,
)
set_code_to_address = pre.deploy_contract(set_code)
tx = Transaction(
gas_limit=10_000_000,
to=auth_signer,
value=value,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
set_code_to_address: Account(storage={}),
auth_signer: Account(
nonce=1,
code=Spec.delegation_designation(set_code_to_address),
storage=storage,
balance=auth_account_start_balance + value,
),
},
)
@pytest.mark.execute(pytest.mark.skip(reason="excessive gas"))
def test_set_code_max_depth_call_stack(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
) -> None:
"""
Test re-entry to delegated account until the max call stack depth possible
in a transaction is reached.
"""
storage = Storage()
auth_signer = pre.fund_eoa(auth_account_start_balance)
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
match tx_gas_limit_cap:
case None:
gas_limit = 100_000_000_000_000
max_depth = 1025
case 16_777_216:
gas_limit = tx_gas_limit_cap
max_depth = 389
case _:
raise NotImplementedError(f"Unexpected transaction gas limit cap: {tx_gas_limit_cap}")
set_code = Conditional(
condition=Op.ISZERO(Op.TLOAD(0)),
if_true=Op.TSTORE(0, 1)
+ Op.CALL(address=auth_signer)
+ Op.SSTORE(storage.store_next(max_depth), Op.TLOAD(0)),
if_false=Op.TSTORE(0, Op.ADD(1, Op.TLOAD(0))) + Op.CALL(address=auth_signer),
)
set_code_to_address = pre.deploy_contract(set_code)
tx = Transaction(
gas_limit=gas_limit,
to=auth_signer,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(gas_limit=tx.gas_limit),
pre=pre,
tx=tx,
post={
set_code_to_address: Account(storage={}),
auth_signer: Account(
nonce=1,
code=Spec.delegation_designation(set_code_to_address),
storage=storage,
balance=auth_account_start_balance,
),
},
)
@pytest.mark.with_all_call_opcodes
@pytest.mark.parametrize(
"value",
[0, 1],
)
def test_set_code_call_set_code(
state_test: StateTestFiller,
pre: Alloc,
call_opcode: Op,
value: int,
) -> None:
"""Test the calling a set-code account from another set-code account."""
if "value" not in call_opcode.kwargs and value != 0:
pytest.skip(f"Call opcode {call_opcode} does not support value")
auth_signer_1 = pre.fund_eoa(auth_account_start_balance)
storage_1 = Storage()
static_call = call_opcode in [Op.STATICCALL, Op.EXTSTATICCALL]
set_code_1_call_result_slot = storage_1.store_next(
call_return_code(opcode=call_opcode, success=not static_call)
)
set_code_1_success = storage_1.store_next(True)
auth_signer_2 = pre.fund_eoa(auth_account_start_balance)
storage_2 = Storage().set_next_slot(storage_1.peek_slot())
set_code_2_success = storage_2.store_next(not static_call)
if "value" in call_opcode.kwargs:
call_bytecode = call_opcode(address=auth_signer_2, value=value)
else:
call_bytecode = call_opcode(address=auth_signer_2)
set_code_1 = (
Op.SSTORE(set_code_1_call_result_slot, call_bytecode)
+ Op.SSTORE(set_code_1_success, 1)
+ Op.STOP
)
set_code_to_address_1 = pre.deploy_contract(set_code_1)
set_code_2 = Op.SSTORE(set_code_2_success, 1) + Op.STOP
set_code_to_address_2 = pre.deploy_contract(set_code_2)
tx = Transaction(
gas_limit=10_000_000,
to=auth_signer_1,
value=value,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address_1,
nonce=0,
signer=auth_signer_1,
),
AuthorizationTuple(
address=set_code_to_address_2,
nonce=0,
signer=auth_signer_2,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
set_code_to_address_1: Account(storage=dict.fromkeys(storage_1, 0)),
set_code_to_address_2: Account(storage=dict.fromkeys(storage_2, 0)),
auth_signer_1: Account(
nonce=1,
code=Spec.delegation_designation(set_code_to_address_1),
storage=(
storage_1
if call_opcode in [Op.CALL, Op.STATICCALL, Op.EXTCALL, Op.EXTSTATICCALL]
else storage_1 + storage_2
),
balance=(0 if call_opcode in [Op.CALL, Op.EXTCALL] else value)
+ auth_account_start_balance,
),
auth_signer_2: Account(
nonce=1,
code=Spec.delegation_designation(set_code_to_address_2),
storage=storage_2 if call_opcode in [Op.CALL, Op.EXTCALL] else {},
balance=(value if call_opcode in [Op.CALL, Op.EXTCALL] else 0)
+ auth_account_start_balance,
),
},
)
def test_address_from_set_code(
state_test: StateTestFiller,
pre: Alloc,
) -> None:
"""Test the address opcode in a set-code transaction."""
storage = Storage()
auth_signer = pre.fund_eoa(auth_account_start_balance)
set_code = Op.SSTORE(storage.store_next(auth_signer), Op.ADDRESS) + Op.STOP
set_code_to_address = pre.deploy_contract(set_code)
tx = Transaction(
gas_limit=10_000_000,
to=auth_signer,
value=0,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
set_code_to_address: Account(storage={}),
auth_signer: Account(
nonce=1,
code=Spec.delegation_designation(set_code_to_address),
storage=storage,
),
},
)
def test_tx_into_self_delegating_set_code(
state_test: StateTestFiller,
pre: Alloc,
) -> None:
"""
Test a transaction that has entry-point into a set-code account that
delegates to itself.
"""
auth_signer = pre.fund_eoa(auth_account_start_balance)
tx = Transaction(
gas_limit=10_000_000,
to=auth_signer,
value=0,
authorization_list=[
AuthorizationTuple(
address=auth_signer,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
auth_signer: Account(
nonce=1,
code=Spec.delegation_designation(auth_signer),
),
},
)
def test_tx_into_chain_delegating_set_code(
state_test: StateTestFiller,
pre: Alloc,
) -> None:
"""
Test a transaction that has entry-point into a set-code account that
delegates to another set-code account.
"""
auth_signer_1 = pre.fund_eoa(auth_account_start_balance)
auth_signer_2 = pre.fund_eoa(auth_account_start_balance)
tx = Transaction(
gas_limit=10_000_000,
to=auth_signer_1,
value=0,
authorization_list=[
AuthorizationTuple(
address=auth_signer_2,
nonce=0,
signer=auth_signer_1,
),
AuthorizationTuple(
address=auth_signer_1,
nonce=0,
signer=auth_signer_2,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
auth_signer_1: Account(nonce=1, code=Spec.delegation_designation(auth_signer_2)),
auth_signer_2: Account(nonce=1, code=Spec.delegation_designation(auth_signer_1)),
},
)
@pytest.mark.with_all_call_opcodes
def test_call_into_self_delegating_set_code(
state_test: StateTestFiller,
pre: Alloc,
call_opcode: Op,
) -> None:
"""Test call into a set-code account that delegates to itself."""
auth_signer = pre.fund_eoa(auth_account_start_balance)
storage = Storage()
entry_code = (
Op.SSTORE(
storage.store_next(
call_return_code(
opcode=call_opcode,
success=False,
revert=(call_opcode == Op.EXTDELEGATECALL),
)
),
call_opcode(address=auth_signer),
)
+ Op.STOP
)
entry_address = pre.deploy_contract(entry_code)
tx = Transaction(
gas_limit=10_000_000,
to=entry_address,
value=0,
authorization_list=[
AuthorizationTuple(
address=auth_signer,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
entry_address: Account(storage=storage),
auth_signer: Account(nonce=1, code=Spec.delegation_designation(auth_signer)),
},
)
@pytest.mark.with_all_call_opcodes
def test_call_into_chain_delegating_set_code(
state_test: StateTestFiller,
pre: Alloc,
call_opcode: Op,
) -> None:
"""
Test call into a set-code account that delegates to another set-code
account.
"""
auth_signer_1 = pre.fund_eoa(auth_account_start_balance)
auth_signer_2 = pre.fund_eoa(auth_account_start_balance)
storage = Storage()
entry_code = (
Op.SSTORE(
storage.store_next(
call_return_code(
opcode=call_opcode,
success=False,
revert=(call_opcode == Op.EXTDELEGATECALL),
)
),
call_opcode(address=auth_signer_1),
)
+ Op.STOP
)
entry_address = pre.deploy_contract(entry_code)
tx = Transaction(
gas_limit=10_000_000,
to=entry_address,
value=0,
authorization_list=[
AuthorizationTuple(
address=auth_signer_2,
nonce=0,
signer=auth_signer_1,
),
AuthorizationTuple(
address=auth_signer_1,
nonce=0,
signer=auth_signer_2,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
entry_address: Account(storage=storage),
auth_signer_1: Account(nonce=1, code=Spec.delegation_designation(auth_signer_2)),
auth_signer_2: Account(nonce=1, code=Spec.delegation_designation(auth_signer_1)),
},
)
@pytest.mark.parametrize(
"balance",
[0, 1],
)
@pytest.mark.parametrize(
"set_code_type",
list(AddressType),
ids=lambda address_type: address_type.name,
)
def test_ext_code_on_set_code(
state_test: StateTestFiller,
pre: Alloc,
balance: int,
set_code_type: AddressType,
) -> None:
"""Test different ext*code operations on a set-code address."""
auth_signer = pre.fund_eoa(balance)
slot = count(1)
slot_ext_code_size_result = next(slot)
slot_ext_code_hash_result = next(slot)
slot_ext_code_copy_result = next(slot)
slot_ext_balance_result = next(slot)
callee_code = (
Op.SSTORE(slot_ext_code_size_result, Op.EXTCODESIZE(auth_signer))
+ Op.SSTORE(slot_ext_code_hash_result, Op.EXTCODEHASH(auth_signer))
+ Op.EXTCODECOPY(auth_signer, 0, 0, Op.EXTCODESIZE(auth_signer))
+ Op.SSTORE(slot_ext_code_copy_result, Op.MLOAD(0))
+ Op.SSTORE(slot_ext_balance_result, Op.BALANCE(auth_signer))
+ Op.STOP
)
callee_address = pre.deploy_contract(callee_code)
set_code_to_address: Address
set_code: Bytecode | Bytes
match set_code_type:
case AddressType.EMPTY_ACCOUNT:
set_code = Bytecode()
set_code_to_address = pre.fund_eoa(0)
case AddressType.EOA:
set_code = Bytecode()
set_code_to_address = pre.fund_eoa(1)
case AddressType.EOA_WITH_SET_CODE:
set_code_account = pre.fund_eoa(0)
set_code = Spec.delegation_designation(set_code_account)
set_code_to_address = pre.fund_eoa(1, delegation=set_code_account)
case AddressType.CONTRACT:
set_code = Op.STOP
set_code_to_address = pre.deploy_contract(set_code)
case _:
raise ValueError(f"Unsupported set code type: {set_code_type}")
callee_storage = Storage()
callee_storage[slot_ext_code_size_result] = len(
Spec.delegation_designation(set_code_to_address)
)
callee_storage[slot_ext_code_hash_result] = Spec.delegation_designation(
set_code_to_address
).keccak256()
callee_storage[slot_ext_code_copy_result] = Hash(
Spec.delegation_designation(set_code_to_address), right_padding=True
)
callee_storage[slot_ext_balance_result] = balance
tx = Transaction(
gas_limit=10_000_000,
to=callee_address,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
set_code_to_address: (
Account.NONEXISTENT
if set_code_type == AddressType.EMPTY_ACCOUNT
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/test_calls.py | tests/prague/eip7702_set_code_tx/test_calls.py | """Test related to making calls to accounts having a delegation set on them."""
import itertools
from enum import Enum, auto, unique
import pytest
from ethereum_test_tools import (
Account,
Address,
Alloc,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
pytestmark = pytest.mark.valid_from("Prague")
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-7702.md"
REFERENCE_SPEC_VERSION = "99f1be49f37c034bdd5c082946f5968710dbfc87"
LEGACY_CALL_FAILURE = 0
LEGACY_CALL_SUCCESS = 1
"""Storage addresses for common testing fields"""
_slot = itertools.count(1)
slot_code_worked = next(_slot)
slot_call_result = next(_slot)
slot_returndata = next(_slot)
slot_last_slot = next(_slot)
"""Storage values for common testing fields"""
value_code_worked = 0x2015
identity = Address(0x04)
@unique
class TargetAccountType(Enum):
"""Kinds of target accounts for calls."""
EMPTY = auto()
EOA = auto()
LEGACY_CONTRACT = auto()
LEGACY_CONTRACT_INVALID = auto()
LEGACY_CONTRACT_REVERT = auto()
IDENTITY_PRECOMPILE = auto()
def __str__(self) -> str:
"""Return string representation of the enum."""
return f"{self.name}"
@pytest.fixture
def target_address(pre: Alloc, target_account_type: TargetAccountType) -> Address:
"""Target address of the call depending on required type of account."""
match target_account_type:
case TargetAccountType.EMPTY:
return pre.fund_eoa(amount=0)
case TargetAccountType.EOA:
return pre.fund_eoa()
case TargetAccountType.LEGACY_CONTRACT:
return pre.deploy_contract(
code=Op.STOP,
)
case TargetAccountType.LEGACY_CONTRACT_INVALID:
return pre.deploy_contract(
code=Op.INVALID,
)
case TargetAccountType.LEGACY_CONTRACT_REVERT:
return pre.deploy_contract(
code=Op.REVERT(0, 0),
)
case TargetAccountType.IDENTITY_PRECOMPILE:
return identity
@pytest.mark.parametrize("target_account_type", TargetAccountType)
@pytest.mark.parametrize("delegate", [True, False])
@pytest.mark.parametrize("call_from_initcode", [True, False])
def test_delegate_call_targets(
state_test: StateTestFiller,
pre: Alloc,
target_account_type: TargetAccountType,
target_address: Address,
delegate: bool,
call_from_initcode: bool,
) -> None:
"""
Test contracts doing delegatecall to various targets resolved via 7702
delegation.
"""
env = Environment()
if delegate:
target_address = pre.fund_eoa(0, delegation=target_address)
delegate_call_code = Op.SSTORE(
slot_call_result, Op.DELEGATECALL(address=target_address)
) + Op.SSTORE(slot_code_worked, value_code_worked)
if call_from_initcode:
# Call from initcode
caller_contract = delegate_call_code + Op.RETURN(0, 0)
tx = Transaction(
sender=pre.fund_eoa(),
to=None,
data=caller_contract,
gas_limit=4_000_000,
)
calling_contract_address = tx.created_contract
else:
# Normal call from existing contract
caller_contract = delegate_call_code + Op.STOP
calling_contract_address = pre.deploy_contract(caller_contract)
tx = Transaction(
sender=pre.fund_eoa(),
to=calling_contract_address,
gas_limit=4_000_000,
)
calling_storage = {
slot_code_worked: value_code_worked,
slot_call_result: LEGACY_CALL_FAILURE
if target_account_type
in [TargetAccountType.LEGACY_CONTRACT_INVALID, TargetAccountType.LEGACY_CONTRACT_REVERT]
else LEGACY_CALL_SUCCESS,
}
post = {
calling_contract_address: Account(storage=calling_storage),
}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/helpers.py | tests/prague/eip7702_set_code_tx/helpers.py | """
Helper types, functions and classes for testing EIP-7702 Set Code Transaction.
"""
from enum import Enum, auto
class AddressType(Enum):
"""
Different types of addresses used to specify the type of authority that
signs an authorization, and the type of address to which the authority
authorizes to set the code to.
"""
EMPTY_ACCOUNT = auto()
EOA = auto()
EOA_WITH_SET_CODE = auto()
CONTRACT = auto()
class ChainIDType(Enum):
"""Different types of chain IDs used in the authorization list."""
GENERIC = auto()
CHAIN_SPECIFIC = auto()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/__init__.py | tests/prague/eip7702_set_code_tx/__init__.py | """Cross-client EIP-7702 Tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/test_gas.py | tests/prague/eip7702_set_code_tx/test_gas.py | """
Tests related to gas of set-code transactions from EIP-7702.
Tests related to gas of set-code transactions from
[EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
"""
from dataclasses import dataclass
from enum import Enum, auto
from itertools import cycle
from typing import Dict, Generator, Iterator, List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
EOA,
AccessList,
Account,
Address,
Alloc,
AuthorizationTuple,
Bytecode,
Bytes,
ChainConfig,
CodeGasMeasure,
StateTestFiller,
Storage,
Transaction,
TransactionException,
TransactionReceipt,
extend_with_defaults,
)
from ethereum_test_tools import Opcodes as Op
from .helpers import AddressType, ChainIDType
from .spec import Spec, ref_spec_7702
REFERENCE_SPEC_GIT_PATH = ref_spec_7702.git_path
REFERENCE_SPEC_VERSION = ref_spec_7702.version
pytestmark = pytest.mark.valid_from("Prague")
# Enum classes used to parametrize the tests
class SignerType(Enum):
"""
Different cases of authorization lists for testing gas cost of set-code
transactions.
"""
SINGLE_SIGNER = auto()
MULTIPLE_SIGNERS = auto()
class AuthorizationInvalidityType(Enum):
"""Different types of invalidity for the authorization list."""
INVALID_NONCE = auto()
REPEATED_NONCE = auto()
INVALID_CHAIN_ID = auto()
AUTHORITY_IS_CONTRACT = auto()
class AccessListType(Enum):
"""
Different cases of access lists for testing gas cost of set-code
transactions.
"""
EMPTY = auto()
CONTAINS_AUTHORITY = auto()
CONTAINS_SET_CODE_ADDRESS = auto()
CONTAINS_AUTHORITY_AND_SET_CODE_ADDRESS = auto()
def contains_authority(self) -> bool:
"""Return True if the access list contains the authority address."""
return self in {
AccessListType.CONTAINS_AUTHORITY,
AccessListType.CONTAINS_AUTHORITY_AND_SET_CODE_ADDRESS,
}
def contains_set_code_address(self) -> bool:
"""
Return True if the access list contains the address to which the
authority authorizes to set the code to.
"""
return self in {
AccessListType.CONTAINS_SET_CODE_ADDRESS,
AccessListType.CONTAINS_AUTHORITY_AND_SET_CODE_ADDRESS,
}
# Fixtures used to parametrize the tests
@dataclass(kw_only=True)
class AuthorityWithProperties:
"""Dataclass to hold the properties of the authority address."""
authority: EOA
"""The address of the authority to be used in the transaction."""
address_type: AddressType
"""The type of the address the authority was before the authorization."""
invalidity_type: AuthorizationInvalidityType | None
"""
Whether the authorization will be invalid and if so, which type of
invalidity it is.
"""
@property
def empty(self) -> bool:
"""
Return True if the authority address is an empty account before the
authorization.
"""
return self.address_type == AddressType.EMPTY_ACCOUNT
@pytest.fixture()
def authority_iterator(
pre: Alloc,
sender: EOA,
authority_type: AddressType | List[AddressType],
authorize_to_address: Address,
self_sponsored: bool,
) -> Iterator[AuthorityWithProperties]:
"""Fixture to return the generator for the authority addresses."""
authority_type_iterator = (
cycle([authority_type])
if isinstance(authority_type, AddressType)
else cycle(authority_type)
)
def generator(
authority_type_iterator: Iterator[AddressType],
) -> Generator[AuthorityWithProperties, None, None]:
for i, current_authority_type in enumerate(authority_type_iterator):
match current_authority_type:
case AddressType.EMPTY_ACCOUNT:
assert not self_sponsored, (
"Self-sponsored empty-account authority is not supported"
)
yield AuthorityWithProperties(
authority=pre.fund_eoa(0),
address_type=current_authority_type,
invalidity_type=None,
)
case AddressType.EOA:
if i == 0 and self_sponsored:
yield AuthorityWithProperties(
authority=sender,
address_type=current_authority_type,
invalidity_type=None,
)
else:
yield AuthorityWithProperties(
authority=pre.fund_eoa(),
address_type=current_authority_type,
invalidity_type=None,
)
case AddressType.EOA_WITH_SET_CODE:
if i == 0 and self_sponsored:
yield AuthorityWithProperties(
authority=sender,
address_type=current_authority_type,
invalidity_type=None,
)
else:
yield AuthorityWithProperties(
authority=pre.fund_eoa(0, delegation=authorize_to_address),
address_type=current_authority_type,
invalidity_type=None,
)
case AddressType.CONTRACT:
assert not self_sponsored or i > 0, (
"Self-sponsored contract authority is not supported"
)
authority = pre.fund_eoa()
authority_account = pre[authority]
assert authority_account is not None
authority_account.code = Bytes(Op.STOP)
yield AuthorityWithProperties(
authority=authority,
address_type=current_authority_type,
invalidity_type=AuthorizationInvalidityType.AUTHORITY_IS_CONTRACT,
)
case _:
raise ValueError(f"Unsupported authority type: {current_authority_type}")
return generator(authority_type_iterator)
@dataclass(kw_only=True)
class AuthorizationWithProperties:
"""Dataclass to hold the properties of the authorization list."""
tuple: AuthorizationTuple
"""The authorization tuple to be used in the transaction."""
invalidity_type: AuthorizationInvalidityType | None
"""
Whether the authorization is invalid and if so, which type of invalidity it
is.
"""
authority_type: AddressType
"""The type of the address the authority was before the authorization."""
skip: bool
"""
Whether the authorization should be skipped and therefore not included in
the transaction.
Used for tests where the authorization was already in the state before the
transaction was created.
"""
@property
def empty(self) -> bool:
"""
Return True if the authority address is an empty account before the
authorization.
"""
return self.authority_type == AddressType.EMPTY_ACCOUNT
@pytest.fixture
def authorization_list_with_properties(
chain_config: ChainConfig,
signer_type: SignerType,
authorization_invalidity_type: AuthorizationInvalidityType | None,
authorizations_count: int,
invalid_authorization_index: int,
chain_id_type: ChainIDType,
authority_iterator: Iterator[AuthorityWithProperties],
authorize_to_address: Address,
self_sponsored: bool,
re_authorize: bool,
) -> List[AuthorizationWithProperties]:
"""
Fixture to return the authorization-list-with-properties for the given
case.
"""
authorization_list: List[AuthorizationWithProperties] = []
environment_chain_id = chain_config.chain_id
match signer_type:
case SignerType.SINGLE_SIGNER:
authority_with_properties = next(authority_iterator)
# We have to take into account the cases where the nonce has
# already been increased before the authorization is processed.
increased_nonce = (
self_sponsored
or authority_with_properties.address_type == AddressType.EOA_WITH_SET_CODE
)
for i in range(authorizations_count):
# Get the validity of this authorization
invalidity_type: AuthorizationInvalidityType | None
if authorization_invalidity_type is None or (
authorization_invalidity_type == AuthorizationInvalidityType.REPEATED_NONCE
and i == 0
):
invalidity_type = authority_with_properties.invalidity_type
else:
if i == invalid_authorization_index or invalid_authorization_index == -1:
invalidity_type = authorization_invalidity_type
else:
invalidity_type = authority_with_properties.invalidity_type
# Get the nonce of this authorization
match invalidity_type:
case AuthorizationInvalidityType.INVALID_NONCE:
nonce = 0 if increased_nonce else 1
case AuthorizationInvalidityType.REPEATED_NONCE:
nonce = 1 if increased_nonce else 0
case _:
nonce = i if not increased_nonce else i + 1
chain_id = 0 if chain_id_type == ChainIDType.GENERIC else environment_chain_id
if invalidity_type == AuthorizationInvalidityType.INVALID_CHAIN_ID:
chain_id = environment_chain_id + 1
skip = (
authority_with_properties.address_type == AddressType.EOA_WITH_SET_CODE
and not re_authorize
)
authorization_list.append(
AuthorizationWithProperties(
tuple=AuthorizationTuple(
chain_id=chain_id,
address=authorize_to_address,
nonce=nonce,
signer=authority_with_properties.authority,
),
invalidity_type=invalidity_type,
authority_type=authority_with_properties.address_type,
skip=skip,
)
)
return authorization_list
case SignerType.MULTIPLE_SIGNERS:
if authorization_invalidity_type == AuthorizationInvalidityType.REPEATED_NONCE:
# Reuse the first two authorities for the repeated nonce case
authority_iterator = cycle([next(authority_iterator), next(authority_iterator)])
for i in range(authorizations_count):
authority_with_properties = next(authority_iterator)
# Get the validity of this authorization
if authorization_invalidity_type is None or (
authorization_invalidity_type == AuthorizationInvalidityType.REPEATED_NONCE
and i <= 1
):
invalidity_type = authority_with_properties.invalidity_type
else:
if i == invalid_authorization_index or invalid_authorization_index == -1:
invalidity_type = authorization_invalidity_type
else:
invalidity_type = authority_with_properties.invalidity_type
# Get the nonce of this authorization
increased_nonce = (
self_sponsored and i == 0
) or authority_with_properties.address_type == AddressType.EOA_WITH_SET_CODE
if increased_nonce:
if invalidity_type == AuthorizationInvalidityType.INVALID_NONCE:
nonce = 0
else:
nonce = 1
else:
if invalidity_type == AuthorizationInvalidityType.INVALID_NONCE:
nonce = 1
else:
nonce = 0
chain_id = 0 if chain_id_type == ChainIDType.GENERIC else environment_chain_id
if invalidity_type == AuthorizationInvalidityType.INVALID_CHAIN_ID:
chain_id = environment_chain_id + 1
skip = False
if (
authority_with_properties.address_type == AddressType.EOA_WITH_SET_CODE
and not re_authorize
):
skip = True
authorization_list.append(
AuthorizationWithProperties(
tuple=AuthorizationTuple(
chain_id=chain_id,
address=authorize_to_address,
nonce=nonce,
signer=authority_with_properties.authority,
),
invalidity_type=invalidity_type,
authority_type=authority_with_properties.address_type,
skip=skip,
)
)
return authorization_list
case _:
raise ValueError(f"Unsupported authorization list case: {signer_type}")
@pytest.fixture
def authorization_list(
authorization_list_with_properties: List[AuthorizationWithProperties],
) -> List[AuthorizationTuple]:
"""Fixture to return the authorization list for the given case."""
return [
authorization_tuple.tuple
for authorization_tuple in authorization_list_with_properties
if not authorization_tuple.skip
]
@pytest.fixture()
def authorize_to_address(request: pytest.FixtureRequest, pre: Alloc) -> Address:
"""
Fixture to return the address to which the authority authorizes to set the
code to.
"""
match request.param:
case AddressType.EMPTY_ACCOUNT:
return pre.fund_eoa(0)
case AddressType.EOA:
return pre.fund_eoa(1)
case AddressType.CONTRACT:
return pre.deploy_contract(Op.STOP)
raise ValueError(f"Unsupported authorization address case: {request.param}")
@pytest.fixture()
def access_list(
access_list_case: AccessListType,
authorization_list: List[AuthorizationTuple],
) -> List[AccessList]:
"""Fixture to return the access list for the given case."""
access_list: List[AccessList] = []
if access_list_case == AccessListType.EMPTY:
return access_list
if access_list_case.contains_authority():
authority_set = {a.signer for a in authorization_list}
access_list.extend(
AccessList(address=authority, storage_keys=[0]) for authority in authority_set
)
if access_list_case.contains_set_code_address():
authorized_addresses = {a.address for a in authorization_list}
access_list.extend(
AccessList(address=address, storage_keys=[0]) for address in authorized_addresses
)
return access_list
@pytest.fixture()
def sender(
pre: Alloc,
authority_type: AddressType | List[AddressType],
authorize_to_address: Address,
self_sponsored: bool,
) -> EOA:
"""Fixture to return the sender address."""
if self_sponsored and (
(isinstance(authority_type, list) and AddressType.EOA_WITH_SET_CODE in authority_type)
or (authority_type == AddressType.EOA_WITH_SET_CODE)
):
return pre.fund_eoa(delegation=authorize_to_address)
return pre.fund_eoa()
# Helper functions to parametrize the tests
def gas_test_parameter_args(
include_many: bool = True,
include_data: bool = True,
include_pre_authorized: bool = True,
execution_gas_allowance: bool = False,
) -> dict:
"""
Return the parametrize decorator that can be used in all gas test
functions.
"""
multiple_authorizations_count = 2
defaults = {
"signer_type": SignerType.SINGLE_SIGNER,
"authorization_invalidity_type": None,
"authorizations_count": 1,
"invalid_authorization_index": -1, # All authorizations are equally
# invalid
"chain_id_type": ChainIDType.GENERIC,
"authorize_to_address": AddressType.EMPTY_ACCOUNT,
"access_list_case": AccessListType.EMPTY,
"self_sponsored": False,
"re_authorize": False,
"authority_type": AddressType.EMPTY_ACCOUNT,
"data": b"",
}
cases = [
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorizations_count": 1,
},
id="single_valid_authorization_single_signer",
),
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorizations_count": 1,
"chain_id_type": ChainIDType.CHAIN_SPECIFIC,
},
id="single_valid_chain_specific_authorization_single_signer",
),
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorizations_count": multiple_authorizations_count,
},
id="multiple_valid_authorizations_single_signer",
),
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorization_invalidity_type": AuthorizationInvalidityType.INVALID_NONCE,
"authorizations_count": 1,
},
id="single_invalid_nonce_authorization_single_signer",
),
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorization_invalidity_type": AuthorizationInvalidityType.INVALID_CHAIN_ID,
"authorizations_count": 1,
},
id="single_invalid_authorization_invalid_chain_id_single_signer",
),
pytest.param(
{
"authority_type": AddressType.EOA_WITH_SET_CODE,
"signer_type": SignerType.MULTIPLE_SIGNERS,
"re_authorize": True,
"authorization_invalidity_type": AuthorizationInvalidityType.INVALID_NONCE,
"authorizations_count": multiple_authorizations_count,
"invalid_authorization_index": 0,
},
id="single_invalid_authorization_eoa_authority_multiple_signers_1",
),
pytest.param(
{
"authority_type": AddressType.EOA_WITH_SET_CODE,
"signer_type": SignerType.MULTIPLE_SIGNERS,
"re_authorize": True,
"authorization_invalidity_type": AuthorizationInvalidityType.INVALID_NONCE,
"authorizations_count": multiple_authorizations_count,
"invalid_authorization_index": multiple_authorizations_count - 1,
},
id="single_invalid_authorization_eoa_authority_multiple_signers_2",
),
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorization_invalidity_type": AuthorizationInvalidityType.INVALID_NONCE,
"authorizations_count": multiple_authorizations_count,
},
id="multiple_invalid_nonce_authorizations_single_signer",
),
pytest.param(
{
"signer_type": SignerType.MULTIPLE_SIGNERS,
"authorization_invalidity_type": AuthorizationInvalidityType.INVALID_NONCE,
"authorizations_count": multiple_authorizations_count,
},
id="multiple_invalid_nonce_authorizations_multiple_signers",
),
pytest.param(
{
"signer_type": SignerType.MULTIPLE_SIGNERS,
"authority_type": AddressType.EOA,
"authorization_invalidity_type": AuthorizationInvalidityType.INVALID_NONCE,
"self_sponsored": True,
"authorizations_count": multiple_authorizations_count,
},
id="multiple_invalid_nonce_authorizations_self_sponsored_multiple_signers",
),
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorization_invalidity_type": AuthorizationInvalidityType.INVALID_CHAIN_ID,
"authorizations_count": multiple_authorizations_count,
},
id="multiple_invalid_chain_id_authorizations_single_signer",
),
pytest.param(
{
"signer_type": SignerType.MULTIPLE_SIGNERS,
"authorizations_count": multiple_authorizations_count,
},
id="multiple_valid_authorizations_multiple_signers",
),
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorization_invalidity_type": AuthorizationInvalidityType.REPEATED_NONCE,
"authorizations_count": multiple_authorizations_count,
},
id="first_valid_then_single_repeated_nonce_authorization",
),
pytest.param(
{
"signer_type": SignerType.MULTIPLE_SIGNERS,
"authorization_invalidity_type": AuthorizationInvalidityType.REPEATED_NONCE,
"authorizations_count": multiple_authorizations_count * 2,
},
id="first_valid_then_single_repeated_nonce_authorizations_multiple_signers",
),
pytest.param(
{
"authorize_to_address": AddressType.EOA,
},
id="single_valid_authorization_to_eoa",
),
pytest.param(
{
"authorize_to_address": AddressType.CONTRACT,
},
id="single_valid_authorization_to_contract",
),
pytest.param(
{
"access_list_case": AccessListType.CONTAINS_AUTHORITY,
},
id="single_valid_authorization_with_authority_in_access_list",
),
pytest.param(
{
"access_list_case": AccessListType.CONTAINS_SET_CODE_ADDRESS,
},
id="single_valid_authorization_with_set_code_address_in_access_list",
),
pytest.param(
{
"access_list_case": AccessListType.CONTAINS_AUTHORITY_AND_SET_CODE_ADDRESS,
},
id="single_valid_authorization_with_authority_and_set_code_address_in_access_list",
),
pytest.param(
{
"authority_type": AddressType.EOA,
},
id="single_valid_authorization_eoa_authority",
),
pytest.param(
{
"authority_type": AddressType.EOA_WITH_SET_CODE,
"re_authorize": True,
},
id="single_valid_re_authorization_eoa_authority",
),
pytest.param(
{
"authority_type": AddressType.EOA,
"authorizations_count": multiple_authorizations_count,
},
id="multiple_valid_authorizations_eoa_authority",
),
pytest.param(
{
"self_sponsored": True,
"authority_type": AddressType.EOA,
},
id="single_valid_authorization_eoa_self_sponsored_authority",
),
pytest.param(
{
"self_sponsored": True,
"authority_type": AddressType.EOA,
"authorizations_count": multiple_authorizations_count,
},
id="multiple_valid_authorizations_eoa_self_sponsored_authority",
),
pytest.param(
{
"authority_type": AddressType.CONTRACT,
},
marks=pytest.mark.pre_alloc_modify,
id="single_valid_authorization_invalid_contract_authority",
),
pytest.param(
{
"signer_type": SignerType.MULTIPLE_SIGNERS,
"authority_type": [AddressType.EMPTY_ACCOUNT, AddressType.CONTRACT],
"authorizations_count": multiple_authorizations_count,
},
marks=pytest.mark.pre_alloc_modify,
id="multiple_authorizations_empty_account_then_contract_authority",
),
pytest.param(
{
"signer_type": SignerType.MULTIPLE_SIGNERS,
"authority_type": [AddressType.EOA, AddressType.CONTRACT],
"authorizations_count": multiple_authorizations_count,
},
marks=pytest.mark.pre_alloc_modify,
id="multiple_authorizations_eoa_then_contract_authority",
),
pytest.param(
{
"self_sponsored": True,
"signer_type": SignerType.MULTIPLE_SIGNERS,
"authority_type": [AddressType.EOA, AddressType.CONTRACT],
"authorizations_count": multiple_authorizations_count,
},
marks=pytest.mark.pre_alloc_modify,
id="multiple_authorizations_eoa_self_sponsored_then_contract_authority",
),
]
if include_pre_authorized:
cases += [
pytest.param(
{
"authority_type": AddressType.EOA_WITH_SET_CODE,
"re_authorize": False,
},
id="pre_authorized_eoa_authority_no_re_authorization",
),
pytest.param(
{
"authority_type": AddressType.EOA_WITH_SET_CODE,
"re_authorize": False,
"self_sponsored": True,
},
id="pre_authorized_eoa_authority_no_re_authorization_self_sponsored",
),
]
if include_data:
cases += [
pytest.param(
{
"data": b"\x01",
},
id="single_valid_authorization_with_single_non_zero_byte_data",
),
pytest.param(
{
"data": b"\x00",
},
id="single_valid_authorization_with_single_zero_byte_data",
),
]
if include_many:
# Fit as many authorizations as possible within the transaction gas
# limit.
max_gas = 16_777_216 - 21_000
if execution_gas_allowance:
# Leave some gas for the execution of the test code.
max_gas -= 1_000_000
many_authorizations_count = max_gas // Spec.PER_EMPTY_ACCOUNT_COST
cases += [
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorizations_count": many_authorizations_count,
},
id="many_valid_authorizations_single_signer",
),
pytest.param(
{
"signer_type": SignerType.MULTIPLE_SIGNERS,
"authorizations_count": many_authorizations_count,
},
id="many_valid_authorizations_multiple_signers",
),
pytest.param(
{
"signer_type": SignerType.SINGLE_SIGNER,
"authorization_invalidity_type": AuthorizationInvalidityType.REPEATED_NONCE,
"authorizations_count": many_authorizations_count,
},
id="first_valid_then_many_duplicate_authorizations",
),
]
return extend_with_defaults(cases=cases, defaults=defaults, indirect=["authorize_to_address"])
# Tests
@pytest.mark.parametrize(
**gas_test_parameter_args(include_pre_authorized=False, execution_gas_allowance=True)
)
@pytest.mark.slow()
def test_gas_cost(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
authorization_list_with_properties: List[AuthorizationWithProperties],
authorization_list: List[AuthorizationTuple],
data: bytes,
access_list: List[AccessList],
sender: EOA,
) -> None:
"""
Test gas at the execution start of a set-code transaction in multiple
scenarios.
"""
# Calculate the intrinsic gas cost of the authorizations, by default the
# full empty account cost is charged for each authorization.
intrinsic_gas = fork.transaction_intrinsic_cost_calculator()(
calldata=data,
access_list=access_list,
authorization_list_or_count=authorization_list,
)
discounted_authorizations = 0
seen_authority = set()
for authorization_with_properties in authorization_list_with_properties:
if authorization_with_properties.invalidity_type is None:
authority = authorization_with_properties.tuple.signer
if not authorization_with_properties.empty:
seen_authority.add(authority)
if authority in seen_authority:
discounted_authorizations += 1
else:
seen_authority.add(authority)
discount_gas = (
Spec.PER_EMPTY_ACCOUNT_COST - Spec.PER_AUTH_BASE_COST
) * discounted_authorizations
# We calculate the exact gas required to execute the test code. We add
# SSTORE opcodes in order to make sure that the refund is less than one
# fifth (EIP-3529) of the total gas used, so we can see the full discount
# being reflected in most of the tests.
gas_costs = fork.gas_costs()
gas_opcode_cost = gas_costs.G_BASE
sstore_opcode_count = 10
push_opcode_count = (2 * (sstore_opcode_count)) - 1
push_opcode_cost = gas_costs.G_VERY_LOW * push_opcode_count
sstore_opcode_cost = gas_costs.G_STORAGE_SET * sstore_opcode_count
cold_storage_cost = gas_costs.G_COLD_SLOAD * sstore_opcode_count
execution_gas = gas_opcode_cost + push_opcode_cost + sstore_opcode_cost + cold_storage_cost
# The first opcode that executes in the code is the GAS opcode, which costs
# 2 gas, so we subtract that from the expected gas measure.
expected_gas_measure = execution_gas - gas_opcode_cost
test_code_storage = Storage()
test_code = (
Op.SSTORE(test_code_storage.store_next(expected_gas_measure), Op.GAS)
+ sum(
Op.SSTORE(test_code_storage.store_next(1), 1) for _ in range(sstore_opcode_count - 1)
)
+ Op.STOP
)
test_code_address = pre.deploy_contract(test_code)
tx_gas_limit = intrinsic_gas + execution_gas
# EIP-3529
max_discount = tx_gas_limit // 5
if discount_gas > max_discount:
# Only one test hits this condition, but it's ok to also test this
# case.
discount_gas = max_discount
gas_used = tx_gas_limit - discount_gas
sender_account = pre[sender]
assert sender_account is not None
tx = Transaction(
gas_limit=tx_gas_limit,
to=test_code_address,
value=0,
data=data,
authorization_list=authorization_list,
access_list=access_list,
sender=sender,
expected_receipt=TransactionReceipt(gas_used=gas_used),
)
state_test(
pre=pre,
tx=tx,
post={
test_code_address: Account(storage=test_code_storage),
},
)
@pytest.mark.parametrize("check_delegated_account_first", [True, False])
@pytest.mark.parametrize(**gas_test_parameter_args(include_many=False, include_data=False))
def test_account_warming(
state_test: StateTestFiller,
pre: Alloc,
authorization_list_with_properties: List[AuthorizationWithProperties],
authorization_list: List[AuthorizationTuple],
access_list: List[AccessList],
data: bytes,
sender: EOA,
check_delegated_account_first: bool,
) -> None:
"""
Test warming of the authority and authorized accounts for set-code
transactions.
"""
# Overhead cost is the single push operation required for the address to
# check.
overhead_cost = 3 * len(Op.CALL.kwargs)
cold_account_cost = 2600
warm_account_cost = 100
access_list_addresses = {access_list.address for access_list in access_list}
# Dictionary to keep track of the addresses to check for warming, and the
# expected cost of accessing such account.
addresses_to_check: Dict[Address, int] = {}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/prague/eip7702_set_code_tx/test_eip_mainnet.py | tests/prague/eip7702_set_code_tx/test_eip_mainnet.py | """
abstract: Crafted tests for mainnet of [EIP-7702: Set EOA account code for one transaction](https://eips.ethereum.org/EIPS/eip-7702).
""" # noqa: E501
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
AuthorizationTuple,
StateTestFiller,
Storage,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
from .spec import Spec, ref_spec_7702
REFERENCE_SPEC_GIT_PATH = ref_spec_7702.git_path
REFERENCE_SPEC_VERSION = ref_spec_7702.version
pytestmark = [pytest.mark.valid_at("Prague"), pytest.mark.mainnet]
def test_eip_7702(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
) -> None:
"""Test the executing a simple SSTORE in a set-code transaction."""
storage = Storage()
sender = pre.fund_eoa()
auth_signer = sender
tx_value = 1
set_code = (
Op.SSTORE(storage.store_next(sender), Op.ORIGIN)
+ Op.SSTORE(storage.store_next(sender), Op.CALLER)
+ Op.SSTORE(storage.store_next(tx_value), Op.CALLVALUE)
+ Op.STOP
)
set_code_to_address = pre.deploy_contract(
set_code,
)
authorization_list = [
AuthorizationTuple(
address=set_code_to_address,
nonce=1,
signer=auth_signer,
),
]
gas_costs = fork.gas_costs()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
intrinsic_gas_cost = intrinsic_gas_cost_calc(
access_list=[],
authorization_list_or_count=authorization_list,
)
execution_cost = (
(gas_costs.G_COLD_SLOAD + gas_costs.G_STORAGE_SET) * 3
+ (gas_costs.G_VERY_LOW * 3)
+ (gas_costs.G_BASE * 3)
)
tx = Transaction(
gas_limit=intrinsic_gas_cost + execution_cost,
to=auth_signer,
value=tx_value,
authorization_list=authorization_list,
sender=sender,
)
state_test(
pre=pre,
tx=tx,
post={
set_code_to_address: Account(
storage=dict.fromkeys(storage, 0),
),
auth_signer: Account(
nonce=2,
code=Spec.delegation_designation(set_code_to_address),
storage=storage,
),
},
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/unscheduled/__init__.py | tests/unscheduled/__init__.py | """
Test cases for unscheduled EVM functionality. A temporary home for features
that are not yet CFI'd for inclusion in the next hardfork.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/unscheduled/eip7692_eof_v1/gas_test.py | tests/unscheduled/eip7692_eof_v1/gas_test.py | """Utility to generate gas usage related state tests automatically."""
import itertools
from ethereum_test_base_types.base_types import Address
from ethereum_test_tools import Account, Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_types.eof.v1 import Container, Section
from ethereum_test_vm import Bytecode, EVMCodeType
from ethereum_test_vm import Opcodes as Op
from .eip7069_extcall.spec import (
LEGACY_CALL_FAILURE,
LEGACY_CALL_SUCCESS,
)
WARM_ACCOUNT_ACCESS_GAS = 100
"""Storage addresses for common testing fields"""
_slot = itertools.count()
slot_cold_gas = next(_slot)
slot_warm_gas = next(_slot)
slot_oog_call_result = next(_slot)
slot_sanity_call_result = next(_slot)
def gas_test(
state_test: StateTestFiller,
env: Environment,
pre: Alloc,
setup_code: Bytecode,
subject_code: Bytecode,
tear_down_code: Bytecode,
cold_gas: int,
warm_gas: int | None = None,
subject_subcontainer: Container | None = None,
subject_address: Address | None = None,
subject_balance: int = 0,
oog_difference: int = 1,
out_of_gas_testing: bool = True,
*,
prelude_code: Bytecode | None = None,
) -> None:
"""
Create State Test to check the gas cost of a sequence of EOF code.
`setup_code` and `tear_down_code` are called multiple times during the
test, and MUST NOT have any side-effects which persist across message
calls, and in particular, any effects on the gas usage of `subject_code`.
"""
if cold_gas <= 0:
raise ValueError(f"Target gas allocations (cold_gas) must be > 0, got {cold_gas}")
if warm_gas is None:
warm_gas = cold_gas
sender = pre.fund_eoa()
address_baseline = pre.deploy_contract(Container.Code(setup_code + tear_down_code))
code_subject = setup_code + subject_code + tear_down_code
address_subject = pre.deploy_contract(
Container.Code(code_subject)
if not subject_subcontainer
else Container(
sections=[
Section.Code(code_subject),
Section.Container(subject_subcontainer),
]
),
balance=subject_balance,
address=subject_address,
)
# 2 times GAS, POP, CALL, 6 times PUSH1 - instructions charged for at every
# gas run
gas_single_gas_run = 2 * 2 + 2 + WARM_ACCOUNT_ACCESS_GAS + 6 * 3
address_legacy_harness = pre.deploy_contract(
code=(
# warm subject and baseline without executing
(Op.BALANCE(address_subject) + Op.POP + Op.BALANCE(address_baseline) + Op.POP)
# run any "prelude" code that may have universal side effects
+ prelude_code
# Baseline gas run
+ (
Op.GAS
+ Op.CALL(address=address_baseline, gas=Op.GAS)
+ Op.POP
+ Op.GAS
+ Op.SWAP1
+ Op.SUB
)
# cold gas run
+ (
Op.GAS
+ Op.CALL(address=address_subject, gas=Op.GAS)
+ Op.POP
+ Op.GAS
+ Op.SWAP1
+ Op.SUB
)
# warm gas run
+ (
Op.GAS
+ Op.CALL(address=address_subject, gas=Op.GAS)
+ Op.POP
+ Op.GAS
+ Op.SWAP1
+ Op.SUB
)
# Store warm gas: DUP3 is the gas of the baseline gas run
+ (Op.DUP3 + Op.SWAP1 + Op.SUB + Op.PUSH2(slot_warm_gas) + Op.SSTORE)
# store cold gas: DUP2 is the gas of the baseline gas run
+ (Op.DUP2 + Op.SWAP1 + Op.SUB + Op.PUSH2(slot_cold_gas) + Op.SSTORE)
+ (
(
# do an oog gas run, unless skipped with
# `out_of_gas_testing=False`:
#
# - DUP7 is the gas of the baseline gas run, after other
# CALL args were pushed
# - subtract the gas charged by the harness
# - add warm gas charged by the subject
# - subtract `oog_difference` to cause OOG exception
# (1 by default)
Op.SSTORE(
slot_oog_call_result,
Op.CALL(
gas=Op.ADD(warm_gas - gas_single_gas_run - oog_difference, Op.DUP7),
address=address_subject,
),
)
# sanity gas run: not subtracting 1 to see if enough gas
# makes the call succeed
+ Op.SSTORE(
slot_sanity_call_result,
Op.CALL(
gas=Op.ADD(warm_gas - gas_single_gas_run, Op.DUP7),
address=address_subject,
),
)
+ Op.STOP
)
if out_of_gas_testing
else Op.STOP
)
),
evm_code_type=EVMCodeType.LEGACY, # Needs to be legacy to use GAS
# opcode
)
post = {
address_legacy_harness: Account(
storage={
slot_warm_gas: warm_gas,
slot_cold_gas: cold_gas,
},
),
}
if out_of_gas_testing:
post[address_legacy_harness].storage[slot_oog_call_result] = LEGACY_CALL_FAILURE
post[address_legacy_harness].storage[slot_sanity_call_result] = LEGACY_CALL_SUCCESS
tx = Transaction(to=address_legacy_harness, gas_limit=env.gas_limit, sender=sender)
state_test(env=env, pre=pre, tx=tx, post=post)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/unscheduled/eip7692_eof_v1/__init__.py | tests/unscheduled/eip7692_eof_v1/__init__.py | """
Test cases for [EIP-7692: EVM Object Format (EOFv1) Meta](https://eips.ethereum.org/EIPS/eip-7692).
Test cases for the EIPs included in EIP-7692 EOFv1 Meta.
* [EIP-663: SWAPN, DUPN and EXCHANGE instructions](https://eips.ethereum.org/EIPS/eip-663).
* [EIP-3540: EOF - EVM Object Format v1](https://eips.ethereum.org/EIPS/eip-3540).
* [EIP-3670: EOF - Code Validation](https://eips.ethereum.org/EIPS/eip-3670).
* [EIP-4200: EOF - Static relative jumps](https://eips.ethereum.org/EIPS/eip-4200).
* [EIP-4750: EOF - Functions](https://eips.ethereum.org/EIPS/eip-4750).
* [EIP-5450: EOF - Stack Validation](https://eips.ethereum.org/EIPS/eip-5450).
* [EIP-6206: EOF - JUMPF and non-returning functions](https://eips.ethereum.org/EIPS/eip-6206).
* [EIP-7069: Revamped CALL instructions](https://eips.ethereum.org/EIPS/eip-7069).
* [EIP-7480: EOF - Data section access instructions](https://eips.ethereum.org/EIPS/eip-7480).
* [EIP-7620: EOF Contract Creation](https://eips.ethereum.org/EIPS/eip-7620).
* [EIP-7873: EOF - TXCREATE and InitcodeTransaction type](https://eips.ethereum.org/EIPS/eip-7873).
## Devnet Specifications
- [ethpandaops/eof-devnet-0](https://notes.ethereum.org/@ethpandaops/eof-devnet-0).
"""
EOF_FORK_NAME = "EOFv1"
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_execution_function.py | tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/test_execution_function.py | """Execution of CALLF, RETF opcodes within EOF V1 containers tests."""
from typing import List
import pytest
from ethereum_test_tools import Account, Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_types.eof.v1 import Container, Section
from ethereum_test_types.eof.v1.constants import MAX_CODE_SECTIONS, MAX_RETURN_STACK_HEIGHT
from ethereum_test_vm import Opcodes as Op
from .. import EOF_FORK_NAME
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-4750.md"
REFERENCE_SPEC_VERSION = "90f716078d0b08ce508a1e57803f885cc2f2e15e"
# List all containers used within execution tests, since they will need to be
# valid EOF V1 containers too
pytestmark = pytest.mark.valid_from(EOF_FORK_NAME)
contract_call_within_deep_nested_callf = Container(
name="contract_call_within_deep_nested_callf",
sections=[
Section.Code(
code=Op.CALLF[1] + Op.SSTORE(0, 1) + Op.STOP,
)
]
+ [
# All sections call next section and on return, store a 1
# to their call stack height key
Section.Code(
code=(Op.CALLF[i] + Op.SSTORE(i - 1, 1) + Op.RETF),
code_inputs=0,
code_outputs=0,
)
for i in range(2, MAX_CODE_SECTIONS)
]
+ [
# Last section makes external contract call
Section.Code(
code=(
Op.PUSH0
+ Op.PUSH0
+ Op.PUSH0
+ Op.PUSH2(0x200)
+ Op.EXTCALL
+ Op.ISZERO
+ Op.PUSH2(MAX_CODE_SECTIONS - 1)
+ Op.SSTORE
+ Op.RETF
),
code_inputs=0,
code_outputs=0,
)
],
)
recursive_contract_call_within_deep_nested_callf = Container(
name="recursive_contract_call_within_deep_nested_callf",
sections=[
# All sections call next section and on return, store a 1
# to their call stack height key
Section.Code(
code=Op.CALLF[i + 1] + Op.SSTORE(i, 1) + Op.STOP,
)
for i in range(MAX_CODE_SECTIONS - 1)
]
+ [
# Last section makes external contract call
Section.Code(
code=(
Op.PUSH0
+ Op.PUSH0
+ Op.PUSH0
+ Op.PUSH0
+ Op.PUSH0
+ Op.PUSH2(0x200)
+ Op.GAS
+ Op.CALL
+ Op.PUSH2(MAX_CODE_SECTIONS - 1)
+ Op.SSTORE
+ Op.RETF
),
code_inputs=0,
code_outputs=0,
)
],
)
CALL_SUCCEED_CONTRACTS: List[Container] = [
Container(
name="function_finishes_contract_execution",
sections=[
Section.Code(
code=(Op.CALLF[1] + Op.STOP),
),
Section.Code(
code=(Op.RETF),
code_inputs=0,
code_outputs=0,
),
],
),
Container(
name="max_recursive_callf",
sections=[
Section.Code(
code=(Op.PUSH1(1) + Op.CALLF[1] + Op.STOP),
),
Section.Code(
code=(
Op.DUP1
+ Op.PUSH2(MAX_RETURN_STACK_HEIGHT)
+ Op.SUB
+ Op.RJUMPI[len(Op.POP) + len(Op.RETF)]
+ Op.POP
+ Op.RETF
+ Op.PUSH1(1)
+ Op.ADD
+ Op.CALLF[1]
+ Op.RETF
),
code_inputs=1,
code_outputs=0,
max_stack_height=3,
),
],
),
Container(
name="max_recursive_callf_sstore",
sections=[
Section.Code(
code=Op.SSTORE(0, 1) + Op.CALLF[1] + Op.STOP,
max_stack_height=2,
),
Section.Code(
code=(
Op.PUSH0
+ Op.SLOAD
+ Op.DUP1
+ Op.PUSH2(MAX_RETURN_STACK_HEIGHT)
+ Op.SUB
+ Op.RJUMPI[len(Op.POP) + len(Op.STOP)]
+ Op.POP
+ Op.RETF
+ Op.PUSH1(1)
+ Op.ADD
+ Op.PUSH0
+ Op.SSTORE
+ Op.CALLF[1]
+ Op.RETF
),
code_inputs=0,
code_outputs=0,
max_stack_height=3,
),
],
),
Container(
name="max_recursive_callf_memory",
sections=[
Section.Code(
code=(Op.PUSH1(1) + Op.PUSH0 + Op.MSTORE + Op.CALLF[1] + Op.STOP),
),
Section.Code(
code=(
Op.PUSH0
+ Op.MLOAD
+ Op.DUP1
+ Op.PUSH2(MAX_RETURN_STACK_HEIGHT)
+ Op.SUB
+ Op.RJUMPI[len(Op.POP) + len(Op.RETF)]
+ Op.POP
+ Op.RETF
+ Op.PUSH1(1)
+ Op.ADD
+ Op.PUSH0
+ Op.MSTORE
+ Op.CALLF[1]
+ Op.RETF
),
code_inputs=0,
code_outputs=0,
max_stack_height=3,
),
],
),
]
"""
List of all EOF V1 Containers that simply need to succeed on execution.
"""
CALL_FAIL_CONTRACTS: List[Container] = [
Container(
name="invalid_opcode",
sections=[
Section.Code(
code=(Op.INVALID),
),
],
),
Container(
name="overflow_recursive_callf",
sections=[
Section.Code(
code=(Op.PUSH1(1) + Op.CALLF[1] + Op.STOP),
),
Section.Code(
code=(
Op.DUP1
+ Op.PUSH2(MAX_RETURN_STACK_HEIGHT + 1)
+ Op.SUB
+ Op.RJUMPI[len(Op.POP) + len(Op.RETF)]
+ Op.POP
+ Op.RETF
+ Op.PUSH1(1)
+ Op.ADD
+ Op.CALLF[1]
+ Op.RETF
),
code_inputs=1,
code_outputs=0,
max_stack_height=3,
),
],
),
Container(
name="overflow_recursive_callf_sstore",
sections=[
Section.Code(
code=Op.SSTORE(0, 1) + Op.CALLF[1] + Op.STOP,
max_stack_height=2,
),
Section.Code(
code=(
Op.PUSH0
+ Op.SLOAD
+ Op.DUP1
+ Op.PUSH2(MAX_RETURN_STACK_HEIGHT + 1)
+ Op.SUB
+ Op.RJUMPI[len(Op.POP) + len(Op.RETF)]
+ Op.POP
+ Op.RETF
+ Op.PUSH1(1)
+ Op.ADD
+ Op.PUSH0
+ Op.SSTORE
+ Op.CALLF[1]
+ Op.RETF
),
code_inputs=0,
code_outputs=0,
max_stack_height=3,
),
],
),
Container(
name="overflow_recursive_callf_memory",
sections=[
Section.Code(
code=Op.MSTORE(0, 1) + Op.CALLF[1] + Op.STOP,
max_stack_height=2,
),
Section.Code(
code=(
Op.PUSH0
+ Op.MLOAD
+ Op.DUP1
+ Op.PUSH2(MAX_RETURN_STACK_HEIGHT + 1)
+ Op.SUB
+ Op.RJUMPI[len(Op.POP) + len(Op.RETF)]
+ Op.POP
+ Op.RETF
+ Op.PUSH1(1)
+ Op.ADD
+ Op.PUSH0
+ Op.MSTORE
+ Op.CALLF[1]
+ Op.RETF
),
code_inputs=0,
code_outputs=0,
max_stack_height=3,
),
],
),
]
"""
List of all EOF V1 Containers that simply need to fail (exceptional halt) on
execution.
These contracts have a valid EOF V1 container format but fail when executed.
"""
VALID: List[Container] = CALL_SUCCEED_CONTRACTS + CALL_FAIL_CONTRACTS
"""
List of all EOF V1 Containers used during execution tests.
"""
@pytest.mark.parametrize("container", CALL_SUCCEED_CONTRACTS, ids=lambda x: x.name)
def test_eof_functions_contract_call_succeed(
state_test: StateTestFiller,
pre: Alloc,
container: Container,
) -> None:
"""Test simple contracts that are simply expected to succeed on call."""
env = Environment()
sender = pre.fund_eoa()
container_address = pre.deploy_contract(container)
caller_contract = Op.SSTORE(0, Op.CALL(Op.GAS, container_address, 0, 0, 0, 0, 0)) + Op.STOP()
caller_address = pre.deploy_contract(caller_contract)
tx = Transaction(
to=caller_address,
gas_limit=50000000,
gas_price=10,
protected=False,
data="",
sender=sender,
)
post = {caller_address: Account(storage={0: 1})}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
@pytest.mark.parametrize("container", CALL_FAIL_CONTRACTS, ids=lambda x: x.name)
def test_eof_functions_contract_call_fail(
state_test: StateTestFiller,
pre: Alloc,
container: Container,
) -> None:
"""Test simple contracts that are simply expected to fail on call."""
env = Environment()
sender = pre.fund_eoa()
container_address = pre.deploy_contract(container)
caller_contract = Op.SSTORE(Op.CALL(Op.GAS, container_address, 0, 0, 0, 0, 0), 1) + Op.STOP()
caller_address = pre.deploy_contract(caller_contract)
tx = Transaction(
to=caller_address,
gas_limit=50000000,
gas_price=10,
protected=False,
data="",
sender=sender,
)
post = {caller_address: Account(storage={0: 1})}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
def test_eof_functions_contract_call_within_deep_nested(
state_test: StateTestFiller,
pre: Alloc,
) -> None:
"""
Test performing a call within a nested callf and verify correct behavior of
return stack in calling contract.
TODO: This test belongs in EIP-7069 test folder, not code validation.
"""
env = Environment()
nested_callee_address = pre.deploy_contract(code=Op.SSTORE(0, 1) + Op.STOP())
contract_call_within_deep_nested_callf = Container(
name="contract_call_within_deep_nested_callf",
sections=[
Section.Code(
code=Op.CALLF[1] + Op.SSTORE(0, 1) + Op.STOP,
)
]
+ [
# All sections call next section and on return, store a 1
# to their call stack height key
Section.Code(
code=(Op.CALLF[i] + Op.SSTORE(i - 1, 1) + Op.RETF),
code_outputs=0,
)
for i in range(2, MAX_CODE_SECTIONS)
]
+ [
# Last section makes external contract call
Section.Code(
code=(
Op.EXTCALL(nested_callee_address, 0, 0, 0)
+ Op.ISZERO
+ Op.PUSH2(MAX_CODE_SECTIONS - 1)
+ Op.SSTORE
+ Op.RETF
),
code_outputs=0,
)
],
)
callee_address = pre.deploy_contract(contract_call_within_deep_nested_callf)
sender = pre.fund_eoa()
tx = Transaction(
to=callee_address,
gas_limit=50000000,
gas_price=10,
protected=False,
data="",
sender=sender,
)
post = {
callee_address: Account(storage=dict.fromkeys(range(MAX_CODE_SECTIONS), 1)),
nested_callee_address: Account(
storage={
0: 1,
}
),
}
state_test(
env=env,
pre=pre,
post=post,
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/opcodes.py | tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/opcodes.py | """EOF Valid Opcodes."""
from typing import List
from ethereum_test_vm import Opcodes as Op
V1_EOF_OPCODES: List[Op] = [
# new eof ops
# EIP-663 Swap and Dup
Op.DUPN,
Op.SWAPN,
Op.EXCHANGE,
# EIP-4200 Relative Jumps
Op.RJUMP,
Op.RJUMPI,
Op.RJUMPV,
# EIP-4750 functions
Op.CALLF,
Op.RETF,
# EIP-6209 JUMPF Instruction
Op.JUMPF,
# EIP-7069 Revamped EOF Call
Op.EXTCALL,
Op.EXTDELEGATECALL,
Op.EXTSTATICCALL,
Op.RETURNDATALOAD,
# EIP-7480 EOF Data Section Access
Op.DATALOAD,
Op.DATALOADN,
Op.DATASIZE,
Op.DATACOPY,
# EIP-7620 EOF Create and Return Contract operation
Op.EOFCREATE,
# EIP-7873 TXCREATE and InitcodeTransaction
Op.TXCREATE,
# EIP-7620 EOF Create and Return Contract operation
Op.RETURNCODE,
# Non-deprecated Legacy Opcodes
Op.STOP,
Op.ADD,
Op.MUL,
Op.SUB,
Op.DIV,
Op.SDIV,
Op.MOD,
Op.SMOD,
Op.ADDMOD,
Op.MULMOD,
Op.EXP,
Op.SIGNEXTEND,
Op.LT,
Op.GT,
Op.SLT,
Op.SGT,
Op.EQ,
Op.ISZERO,
Op.AND,
Op.OR,
Op.XOR,
Op.NOT,
Op.BYTE,
Op.SHL,
Op.SHR,
Op.SAR,
Op.SHA3,
Op.ADDRESS,
Op.BALANCE,
Op.ORIGIN,
Op.CALLER,
Op.CALLVALUE,
Op.CALLDATALOAD,
Op.CALLDATASIZE,
Op.CALLDATACOPY,
Op.CODESIZE,
Op.CODECOPY,
Op.GASPRICE,
Op.EXTCODESIZE,
Op.EXTCODECOPY,
Op.RETURNDATASIZE,
Op.RETURNDATACOPY,
Op.EXTCODEHASH,
Op.BLOCKHASH,
Op.COINBASE,
Op.TIMESTAMP,
Op.NUMBER,
Op.PREVRANDAO,
Op.GASLIMIT,
Op.CHAINID,
Op.SELFBALANCE,
Op.BASEFEE,
Op.POP,
Op.MLOAD,
Op.MSTORE,
Op.MSTORE8,
Op.SLOAD,
Op.SSTORE,
Op.MSIZE,
Op.GAS,
Op.JUMPDEST,
Op.PUSH1,
Op.PUSH2,
Op.PUSH3,
Op.PUSH4,
Op.PUSH5,
Op.PUSH6,
Op.PUSH7,
Op.PUSH8,
Op.PUSH9,
Op.PUSH10,
Op.PUSH11,
Op.PUSH12,
Op.PUSH13,
Op.PUSH14,
Op.PUSH15,
Op.PUSH16,
Op.PUSH17,
Op.PUSH18,
Op.PUSH19,
Op.PUSH20,
Op.PUSH21,
Op.PUSH22,
Op.PUSH23,
Op.PUSH24,
Op.PUSH25,
Op.PUSH26,
Op.PUSH27,
Op.PUSH28,
Op.PUSH29,
Op.PUSH30,
Op.PUSH31,
Op.PUSH32,
Op.DUP1,
Op.DUP2,
Op.DUP3,
Op.DUP4,
Op.DUP5,
Op.DUP6,
Op.DUP7,
Op.DUP8,
Op.DUP9,
Op.DUP10,
Op.DUP11,
Op.DUP12,
Op.DUP13,
Op.DUP14,
Op.DUP15,
Op.DUP16,
Op.SWAP1,
Op.SWAP2,
Op.SWAP3,
Op.SWAP4,
Op.SWAP5,
Op.SWAP6,
Op.SWAP7,
Op.SWAP8,
Op.SWAP9,
Op.SWAP10,
Op.SWAP11,
Op.SWAP12,
Op.SWAP13,
Op.SWAP14,
Op.SWAP15,
Op.SWAP16,
Op.LOG0,
Op.LOG1,
Op.LOG2,
Op.LOG3,
Op.LOG4,
Op.CREATE,
Op.CALL,
# Op.CALLCODE,
Op.RETURN,
Op.DELEGATECALL,
Op.CREATE2,
Op.STATICCALL,
Op.REVERT,
Op.INVALID,
# Op.SELFDESTRUCT,
]
"""
List of all valid EOF V1 opcodes for Shanghai.
"""
V1_EOF_DEPRECATED_OPCODES = [
Op.SELFDESTRUCT,
Op.CALLCODE,
Op.JUMP,
Op.JUMPI,
Op.PC,
]
"""
List of opcodes that will be deprecated for EOF V1.
For these opcodes we will also add the correct expected amount of stack items
so the container is not considered invalid due to buffer underflow.
"""
V1_EOF_ONLY_OPCODES = [
Op.DUPN,
Op.SWAPN,
Op.EXCHANGE,
# EIP-4200 Relative Jumps
Op.RJUMP,
Op.RJUMPI,
Op.RJUMPV,
# EIP-4750 functions
Op.CALLF,
Op.RETF,
# EIP-6209 JUMPF Instruction
Op.JUMPF,
# EIP-7069 Revamped EOF Call
Op.EXTCALL,
Op.EXTDELEGATECALL,
Op.EXTSTATICCALL,
# EIP-7480 EOF Data Section Access
Op.DATALOAD,
Op.DATALOADN,
Op.DATASIZE,
Op.DATACOPY,
# EIP-7620 EOF Create and Return Contract operation
Op.EOFCREATE,
Op.RETURNCODE,
]
"""
List of valid EOF V1 opcodes that are disabled in legacy bytecode.
"""
VALID_TERMINATING_OPCODES = [
Op.STOP,
Op.RETURN,
Op.REVERT,
Op.INVALID,
Op.RETF,
Op.JUMPF,
]
INVALID_TERMINATING_OPCODES = [op for op in V1_EOF_OPCODES if op not in VALID_TERMINATING_OPCODES]
INVALID_OPCODES = [
bytes([i])
for i in range(256)
if i not in [x.int() for x in V1_EOF_OPCODES] + [x.int() for x in V1_EOF_DEPRECATED_OPCODES]
]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/spec.py | tests/unscheduled/eip7692_eof_v1/eip3540_eof_v1/spec.py | """EOF V1 Constants used throughout all tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.