repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py | tests/cancun/eip6780_selfdestruct/test_dynamic_create2_selfdestruct_collision.py | """
Suicide scenario requested test
https://github.com/ethereum/execution-spec-tests/issues/381.
"""
from typing import Dict, Union
import pytest
from ethereum_test_forks import Cancun, Fork
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Bytecode,
Conditional,
Initcode,
StateTestFiller,
Transaction,
compute_create2_address,
)
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-6780.md"
REFERENCE_SPEC_VERSION = "1b6a0e94cc47e859b9866e570391cf37dc55059a"
@pytest.mark.valid_from("Paris")
@pytest.mark.parametrize(
"create2_dest_already_in_state",
(pytest.param(True, marks=pytest.mark.execute(pytest.mark.skip("Modifies pre"))), False),
)
@pytest.mark.parametrize(
"call_create2_contract_in_between,call_create2_contract_at_the_end",
[
(True, True),
(True, False),
(False, True),
],
)
def test_dynamic_create2_selfdestruct_collision(
fork: Fork,
create2_dest_already_in_state: bool,
call_create2_contract_in_between: bool,
call_create2_contract_at_the_end: bool,
pre: Alloc,
state_test: StateTestFiller,
) -> None:
"""
Dynamic Create2->Suicide->Create2 collision scenario.
Perform a CREATE2, make sure that the initcode sets at least a couple of
storage keys, then on a different call, in the same tx, perform a
self-destruct.
Then:
a) on the same tx, attempt to recreate the contract
-> Covered in this test
1) and create2 contract already in the state
2) and create2 contract is not in the state
b) on a different tx, attempt to recreate the contract
Perform a CREATE2, make sure that the initcode sets at least a couple
of storage keys, then in a different tx, perform a self-destruct.
Then:
a) on the same tx, attempt to recreate the contract
b) on a different tx, attempt to recreate the contract
Check the test case described in
https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/
22156575/2024-01-06+Mainnet+Halting+Event
"""
assert call_create2_contract_in_between or call_create2_contract_at_the_end, "invalid test"
# Storage locations
create2_constructor_worked = 1
first_create2_result = 2
second_create2_result = 3
code_worked = 4
# Constants
address_zero = Address(0x00)
create2_salt = 1
# Create EOA for sendall destination (receives selfdestruct funds)
sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct
# calls
# Create storage contract that will be called during initialization
address_create2_storage = pre.deploy_contract(
code=Op.SSTORE(1, 1),
balance=7000000000000000000,
)
# CREATE2 Initcode
deploy_code = Op.SELFDESTRUCT(sendall_destination)
initcode = Initcode(
deploy_code=deploy_code,
initcode_prefix=Op.SSTORE(create2_constructor_worked, 1)
+ Op.CALL(Op.GAS(), address_create2_storage, 0, 0, 0, 0, 0),
)
# Create the contract that performs CREATE2 operations
address_code = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.MSTORE(
0,
Op.CREATE2(Op.SELFBALANCE(), 0, Op.CALLDATASIZE(), create2_salt),
)
+ Op.RETURN(0, 32),
)
# Created addresses - now we can compute create2_address
create2_address = compute_create2_address(address_code, create2_salt, initcode)
call_address_in_between = create2_address if call_create2_contract_in_between else address_zero
call_address_in_the_end = create2_address if call_create2_contract_at_the_end else address_zero
# Values
pre_existing_create2_balance = 1
first_create2_value = 10
first_call_value = 100
second_create2_value = 1000
second_call_value = 10000
# Create the main contract that orchestrates the test
address_to = pre.deploy_contract(
code=Op.JUMPDEST()
# Make a subcall that do CREATE2 and returns its the result
+ Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, first_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
first_create2_result,
Op.MLOAD(0),
)
# In case the create2 didn't work, flush account balance
+ Op.CALL(100000, address_code, 0, 0, 0, 0, 0)
# Call to the created account to trigger selfdestruct
+ Op.CALL(100000, call_address_in_between, first_call_value, 0, 0, 0, 0)
# Make a subcall that do CREATE2 collision and returns its address as
# the result
+ Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
second_create2_result,
Op.MLOAD(0),
)
# Call to the created account to trigger selfdestruct
+ Op.CALL(100000, call_address_in_the_end, second_call_value, 0, 0, 0, 0)
+ Op.SSTORE(code_worked, 1),
balance=100000000,
storage={first_create2_result: 0xFF, second_create2_result: 0xFF},
)
# Create the sender EOA
sender = pre.fund_eoa(7000000000000000000)
if create2_dest_already_in_state:
# Create2 address already in the state, e.g. deployed in a previous
# block
pre[create2_address] = Account(
balance=pre_existing_create2_balance,
nonce=1,
code=deploy_code,
storage={},
)
post: Dict[Address, Union[Account, object]] = {}
# Create2 address only exists if it was pre-existing and after cancun
post[create2_address] = (
Account(balance=0, nonce=1, code=deploy_code, storage={create2_constructor_worked: 0x00})
if create2_dest_already_in_state and fork >= Cancun
else Account.NONEXISTENT
)
# Create2 initcode is only executed if the contract did not already exist
post[address_create2_storage] = Account(
storage={create2_constructor_worked: int(not create2_dest_already_in_state)}
)
# Entry code that makes the calls to the create2 contract creator
post[address_to] = Account(
storage={
code_worked: 0x01,
# First create2 only works if the contract was not preexisting
first_create2_result: 0x00 if create2_dest_already_in_state else create2_address,
# Second create2 must never work
second_create2_result: 0x00,
}
)
# Calculate the destination account expected balance for the
# selfdestruct/sendall calls
sendall_destination_balance = (
pre_existing_create2_balance if create2_dest_already_in_state else first_create2_value
)
if call_create2_contract_in_between:
sendall_destination_balance += first_call_value
if call_create2_contract_at_the_end:
sendall_destination_balance += second_call_value
post[sendall_destination] = Account(balance=sendall_destination_balance)
tx = Transaction(
to=address_to,
data=initcode,
gas_limit=5_000_000,
sender=sender,
)
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.valid_from("Paris")
@pytest.mark.parametrize(
"create2_dest_already_in_state",
(pytest.param(True, marks=pytest.mark.execute(pytest.mark.skip("Modifies pre"))), False),
)
@pytest.mark.parametrize(
"call_create2_contract_at_the_end",
(True, False),
)
def test_dynamic_create2_selfdestruct_collision_two_different_transactions(
fork: Fork,
create2_dest_already_in_state: bool,
call_create2_contract_at_the_end: bool,
pre: Alloc,
blockchain_test: BlockchainTestFiller,
) -> None:
"""
Dynamic Create2->Suicide->Create2 collision scenario.
Perform a CREATE2, make sure that the initcode sets at least a couple of
storage keys, then on a different call, in the same tx, perform a
self-destruct.
Then:
a) on the same tx, attempt to recreate the contract
1) and create2 contract already in the state
2) and create2 contract is not in the state
b) on a different tx, attempt to recreate the contract
-> Covered in this test
Perform a CREATE2, make sure that the initcode sets at
least a couple of storage keys, then in a different tx, perform a
self-destruct.
Then:
a) on the same tx, attempt to recreate the contract
b) on a different tx, attempt to recreate the contract
Check the test case described in
https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/22156575/2024-01-06
+Mainnet+Halting+Event
"""
# assert call_create2_contract_at_the_end, "invalid test"
# Storage locations
create2_constructor_worked = 1
first_create2_result = 2
second_create2_result = 3
code_worked = 4
# Constants
address_zero = Address(0x00)
create2_salt = 1
# Create EOA for sendall destination (receives selfdestruct funds)
sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct
# calls
# Create storage contract that will be called during initialization
address_create2_storage = pre.deploy_contract(
code=Op.SSTORE(1, 1),
balance=7000000000000000000,
)
# CREATE2 Initcode
deploy_code = Op.SELFDESTRUCT(sendall_destination)
initcode = Initcode(
deploy_code=deploy_code,
initcode_prefix=Op.SSTORE(create2_constructor_worked, 1)
+ Op.CALL(Op.GAS(), address_create2_storage, 0, 0, 0, 0, 0),
)
# Create the contract that performs CREATE2 operations
address_code = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.MSTORE(
0,
Op.CREATE2(Op.SELFBALANCE(), 0, Op.CALLDATASIZE(), create2_salt),
)
+ Op.RETURN(0, 32),
)
# Created addresses
create2_address = compute_create2_address(address_code, create2_salt, initcode)
call_address_in_the_end = create2_address if call_create2_contract_at_the_end else address_zero
# Values
pre_existing_create2_balance = 1
first_create2_value = 10
first_call_value = 100
second_create2_value = 1000
second_call_value = 10000
# Create the first contract that performs the first transaction
address_to = pre.deploy_contract(
code=Op.JUMPDEST()
# Make a subcall that do CREATE2 and returns its the result
+ Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, first_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
first_create2_result,
Op.MLOAD(0),
)
# In case the create2 didn't work, flush account balance
+ Op.CALL(100000, address_code, 0, 0, 0, 0, 0)
# Call to the created account to trigger selfdestruct
+ Op.CALL(100000, create2_address, first_call_value, 0, 0, 0, 0)
+ Op.SSTORE(code_worked, 1),
balance=100000000,
storage={first_create2_result: 0xFF},
)
# Create the second contract that performs the second transaction
address_to_second = pre.deploy_contract(
code=Op.JUMPDEST()
# Make a subcall that do CREATE2 collision and returns its address as
# the result
+ Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
second_create2_result,
Op.MLOAD(0),
)
# Call to the created account to trigger selfdestruct
+ Op.CALL(200000, call_address_in_the_end, second_call_value, 0, 0, 0, 0)
+ Op.SSTORE(code_worked, 1),
balance=100000000,
storage={second_create2_result: 0xFF},
)
# Create the sender EOA
sender = pre.fund_eoa(7000000000000000000)
if create2_dest_already_in_state:
# Create2 address already in the state, e.g. deployed in a previous
# block
pre[create2_address] = Account(
balance=pre_existing_create2_balance,
nonce=1,
code=deploy_code,
storage={},
)
post: Dict[Address, Union[Account, object]] = {}
# Create2 address only exists if it was pre-existing and after cancun
post[create2_address] = (
Account(balance=0, nonce=1, code=deploy_code, storage={create2_constructor_worked: 0x00})
if create2_dest_already_in_state and fork >= Cancun
else (
Account.NONEXISTENT
if call_create2_contract_at_the_end
else Account(balance=1000, nonce=1, code=deploy_code)
)
)
# after Cancun Create2 initcode is only executed if the contract did not
# already exist and before it will always be executed as the first tx
# deletes the account
post[address_create2_storage] = Account(
storage={
create2_constructor_worked: int(fork < Cancun or not create2_dest_already_in_state)
}
)
# Entry code that makes the calls to the create2 contract creator
post[address_to] = Account(
storage={
code_worked: 0x01,
# First create2 only works if the contract was not preexisting
first_create2_result: 0x00 if create2_dest_already_in_state else create2_address,
}
)
post[address_to_second] = Account(
storage={
code_worked: 0x01,
# Second create2 will not collide before Cancun as the first tx
# calls selfdestruct
#
# After cancun it will collide only if
# create2_dest_already_in_state otherwise the first tx creates and
# deletes it
second_create2_result: (
(0x00 if create2_dest_already_in_state else create2_address)
if fork >= Cancun
else create2_address
),
}
)
# Calculate the destination account expected balance for the
# selfdestruct/sendall calls
sendall_destination_balance = 0
if create2_dest_already_in_state:
sendall_destination_balance += pre_existing_create2_balance
if fork >= Cancun:
# first create2 fails, but first calls ok. the account is not
# removed on cancun therefore with the second create2 it is not
# successful
sendall_destination_balance += first_call_value
else:
# first create2 fails, first calls totally removes the account
# in the second transaction second create2 is successful
sendall_destination_balance += first_call_value
if call_create2_contract_at_the_end:
sendall_destination_balance += second_create2_value
else:
# if no account in the state, first create2 successful, first call
# successful and removes because it is removed in the next transaction
# second create2 successful
sendall_destination_balance = first_create2_value + first_call_value
if call_create2_contract_at_the_end:
sendall_destination_balance += second_create2_value
if call_create2_contract_at_the_end:
sendall_destination_balance += second_call_value
post[sendall_destination] = Account(balance=sendall_destination_balance)
blockchain_test(
pre=pre,
post=post,
blocks=[
Block(
txs=[
Transaction(
to=address_to,
data=initcode,
gas_limit=5_000_000,
sender=sender,
),
Transaction(
to=address_to_second,
data=initcode,
gas_limit=5_000_000,
sender=sender,
),
]
)
],
)
@pytest.mark.valid_from("Paris")
@pytest.mark.parametrize(
"selfdestruct_on_first_tx,recreate_on_first_tx",
[
(False, False),
(True, False),
(True, True),
],
)
def test_dynamic_create2_selfdestruct_collision_multi_tx(
fork: Fork,
selfdestruct_on_first_tx: bool,
recreate_on_first_tx: bool,
pre: Alloc,
blockchain_test: BlockchainTestFiller,
) -> None:
"""
Dynamic Create2->Suicide->Create2 collision scenario over multiple
transactions.
Perform a CREATE2, make sure that the initcode sets at least a couple of
storage keys, then on a different call, in the same or different tx but
same block, perform a self-destruct.
Then:
a) on the same tx, attempt to recreate the contract
b) on a different tx, attempt to recreate the contract
Perform a CREATE2, make sure that the initcode sets at least a
couple of storage keys, then in a different tx, perform a self-destruct.
Then:
a) on the same tx, attempt to recreate the contract
-> Covered in this test
b) on a different tx, attempt to recreate the contract
-> Covered in this test
Check the test case described in
https://lf-hyperledger.atlassian.net/wiki/spaces/BESU/pages/22156575/2024-01-06
+Mainnet+Halting+Event
"""
if recreate_on_first_tx:
assert selfdestruct_on_first_tx, "invalid test"
# Storage locations
create2_constructor_worked = 1
first_create2_result = 2
second_create2_result = 3
part_1_worked = 4
part_2_worked = 5
# Constants
create2_salt = 1
# Create EOA for sendall destination (receives selfdestruct funds)
sendall_destination = pre.fund_eoa(0) # Will be funded by selfdestruct
# calls
# Create storage contract that will be called during initialization
address_create2_storage = pre.deploy_contract(
code=Op.SSTORE(1, 1),
balance=7000000000000000000,
)
# CREATE2 Initcode
deploy_code = Op.SELFDESTRUCT(sendall_destination)
initcode = Initcode(
deploy_code=deploy_code,
initcode_prefix=Op.SSTORE(create2_constructor_worked, 1)
+ Op.CALL(Op.GAS(), address_create2_storage, 0, 0, 0, 0, 0),
)
# Create the contract that performs CREATE2 operations
address_code = pre.deploy_contract(
code=Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.MSTORE(
0,
Op.CREATE2(Op.SELFBALANCE(), 0, Op.CALLDATASIZE(), create2_salt),
)
+ Op.RETURN(0, 32),
)
# Created addresses
create2_address = compute_create2_address(address_code, create2_salt, initcode)
# Values
first_create2_value = 3
first_call_value = 5
second_create2_value = 7
second_call_value = 11
# Code is divided in two transactions part of the same block
first_tx_code = Bytecode()
second_tx_code = Bytecode()
first_tx_code += (
Op.JUMPDEST()
# Make a subcall that do CREATE2 and returns its the result
+ Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, first_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
first_create2_result,
Op.MLOAD(0),
)
)
if selfdestruct_on_first_tx:
first_tx_code += (
# Call to the created account to trigger selfdestruct
Op.CALL(100000, create2_address, first_call_value, 0, 0, 0, 0)
)
else:
second_tx_code += (
# Call to the created account to trigger selfdestruct
Op.CALL(100000, create2_address, first_call_value, 0, 0, 0, 0)
)
if recreate_on_first_tx:
first_tx_code += (
# Make a subcall that do CREATE2 collision and returns its address
# as the result
Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
second_create2_result,
Op.MLOAD(0),
)
)
else:
second_tx_code += (
# Make a subcall that do CREATE2 collision and returns its address
# as the result
Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE())
+ Op.CALL(100000, address_code, second_create2_value, 0, Op.CALLDATASIZE(), 0, 32)
+ Op.SSTORE(
second_create2_result,
Op.MLOAD(0),
)
)
# Second tx code always calls the create2 contract at the end
second_tx_code += Op.CALL(100000, create2_address, second_call_value, 0, 0, 0, 0)
first_tx_code += Op.SSTORE(part_1_worked, 1)
second_tx_code += Op.SSTORE(part_2_worked, 1)
# Create the main contract that uses conditional logic to handle both
# transactions
address_to = pre.deploy_contract(
code=Conditional(
# Depending on the tx, execute the first or second tx code
condition=Op.EQ(Op.SLOAD(part_1_worked), 0),
if_true=first_tx_code,
if_false=second_tx_code,
),
balance=100000000,
storage={first_create2_result: 0xFF, second_create2_result: 0xFF},
)
# Create the sender EOA
sender = pre.fund_eoa(7000000000000000000)
post: Dict[Address, Union[Account, object]] = {}
# Create2 address only exists if it was pre-existing and after cancun
account_will_exist_with_code = not selfdestruct_on_first_tx and fork >= Cancun
# If the contract is self-destructed and we also attempt to recreate it on
# the first tx, the second call on the second tx will only place balance in
# the account
account_will_exist_with_balance = selfdestruct_on_first_tx and recreate_on_first_tx
post[create2_address] = (
Account(balance=0, nonce=1, code=deploy_code, storage={create2_constructor_worked: 0x01})
if account_will_exist_with_code
else (
Account(balance=second_call_value, nonce=0)
if account_will_exist_with_balance
else Account.NONEXISTENT
)
)
# Create2 initcode saves storage unconditionally
post[address_create2_storage] = Account(storage={create2_constructor_worked: 0x01})
# Entry code that makes the calls to the create2 contract creator
post[address_to] = Account(
storage={
part_1_worked: 0x01,
part_2_worked: 0x01,
# First create2 always works
first_create2_result: create2_address,
# Second create2 only works if we successfully self-destructed on
# the first tx
second_create2_result: (
create2_address if selfdestruct_on_first_tx and not recreate_on_first_tx else 0x00
),
}
)
# Calculate the destination account expected balance for the
# selfdestruct/sendall calls
sendall_destination_balance = first_create2_value + first_call_value
if not account_will_exist_with_balance:
sendall_destination_balance += second_call_value
if selfdestruct_on_first_tx and not recreate_on_first_tx:
sendall_destination_balance += second_create2_value
post[sendall_destination] = Account(balance=sendall_destination_balance)
blockchain_test(
pre=pre,
post=post,
blocks=[
Block(
txs=[
Transaction(
to=address_to,
data=initcode,
gas_limit=5_000_000,
sender=sender,
),
Transaction(
to=address_to,
data=initcode,
gas_limit=5_000_000,
sender=sender,
),
]
)
],
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py | tests/cancun/eip6780_selfdestruct/test_selfdestruct_revert.py | """Tests for selfdestruct interaction with revert."""
from typing import Dict
import pytest
from ethereum_test_forks import Cancun
from ethereum_test_tools import (
EOA,
Account,
Address,
Alloc,
Bytecode,
Environment,
Initcode,
StateTestFiller,
Storage,
Transaction,
compute_create_address,
)
from ethereum_test_tools import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-6780.md"
REFERENCE_SPEC_VERSION = "1b6a0e94cc47e859b9866e570391cf37dc55059a"
SELFDESTRUCT_ENABLE_FORK = Cancun
@pytest.fixture
def entry_code_address(sender: EOA) -> Address:
"""Address where the entry code will run."""
return compute_create_address(address=sender, nonce=0)
@pytest.fixture
def recursive_revert_contract_address_init_balance() -> int:
"""Return initial balance for recursive_revert_contract_address."""
return 3
@pytest.fixture
def recursive_revert_contract_address(
pre: Alloc,
recursive_revert_contract_code: Bytecode,
recursive_revert_contract_address_init_balance: int,
) -> Address:
"""Address where the recursive revert contract address exists."""
return pre.deploy_contract(
code=recursive_revert_contract_code,
balance=recursive_revert_contract_address_init_balance,
)
@pytest.fixture
def selfdestruct_on_outer_call() -> int:
"""Whether to selfdestruct the target contract in the outer call scope."""
return 0
@pytest.fixture
def recursive_revert_contract_code(
selfdestruct_on_outer_call: int,
selfdestruct_with_transfer_contract_address: Address,
) -> Bytecode:
"""
Contract code that:
Given selfdestructable contract A, transfer value to A
and call A.selfdestruct.
Then, recurse into a new call which transfers value to A,
call A.selfdestruct, and reverts.
"""
# Common prefix for all three cases:
# case 1: selfdestruct_on_outer_call=1
# case 2: selfdestruct_on_outer_call=2
# case 3: selfdestruct_on_outer_call has a different value
common_prefix = (
Op.PUSH0
+ Op.CALLDATALOAD
+ Op.PUSH1(0x1)
+ Op.PUSH20(selfdestruct_with_transfer_contract_address)
+ Op.SWAP2
+ Op.SWAP1
+ Op.DUP2
+ Op.PUSH0
+ Op.EQ
+ Op.PUSH1(0x3A)
+ Op.JUMPI
+ Op.POP
+ Op.PUSH1(0x1)
+ Op.EQ
+ Op.PUSH1(0x29)
+ Op.JUMPI
+ Op.STOP
+ Op.JUMPDEST
+ Op.PUSH0
+ Op.PUSH1(0x20)
+ Op.DUP2
+ Op.PUSH1(0x1)
+ Op.DUP2
+ Op.SWAP5
+ Op.DUP2
+ Op.DUP4
+ Op.MSTORE
+ Op.GASLIMIT
+ Op.CALL
+ Op.PUSH0
+ Op.DUP1
+ Op.REVERT
+ Op.JUMPDEST
)
if selfdestruct_on_outer_call == 1:
suffix = (
Op.SWAP1
+ Op.POP
+ Op.PUSH1(0x1)
+ Op.PUSH0
+ Op.MSTORE
+ Op.PUSH0
+ Op.DUP1
+ Op.PUSH1(0x20)
+ Op.DUP2
+ Op.PUSH1(0x1)
+ Op.DUP7
+ Op.GASLIMIT
+ Op.CALL
+ Op.POP
+ Op.PUSH0
+ Op.DUP1
+ Op.MSTORE
+ Op.PUSH0
+ Op.DUP1
+ Op.PUSH1(0x20)
+ Op.DUP2
+ Op.PUSH1(0x1)
+ Op.DUP7
+ Op.GASLIMIT
+ Op.CALL
+ Op.POP
+ Op.PUSH0
+ Op.MSTORE
+ Op.PUSH0
+ Op.DUP1
+ Op.PUSH1(0x20)
+ Op.DUP2
+ Op.DUP1
+ Op.ADDRESS
+ Op.GASLIMIT
+ Op.CALL
+ Op.POP
+ Op.BALANCE
+ Op.PUSH1(0x1)
+ Op.SSTORE
+ Op.PUSH0
+ Op.DUP1
+ Op.RETURN
)
elif selfdestruct_on_outer_call == 2:
suffix = (
Op.PUSH0
+ Op.PUSH1(0x20)
+ Op.DUP2
+ Op.PUSH1(0x1)
+ Op.DUP7
+ Op.DUP3
+ Op.SWAP6
+ Op.DUP4
+ Op.DUP1
+ Op.MSTORE
+ Op.DUP4
+ Op.DUP1
+ Op.DUP7
+ Op.DUP2
+ Op.DUP7
+ Op.DUP7
+ Op.GASLIMIT
+ Op.CALL
+ Op.POP
+ Op.DUP4
+ Op.MSTORE
+ Op.DUP3
+ Op.DUP1
+ Op.DUP6
+ Op.DUP2
+ Op.DUP1
+ Op.ADDRESS
+ Op.GASLIMIT
+ Op.CALL
+ Op.POP
+ Op.DUP1
+ Op.BALANCE
+ Op.DUP3
+ Op.SSTORE
+ Op.DUP2
+ Op.DUP4
+ Op.MSTORE
+ Op.GASLIMIT
+ Op.CALL
+ Op.PUSH0
+ Op.DUP1
+ Op.RETURN
)
else: # selfdestruct_on_outer_call is neither 1 nor 2
suffix = (
Op.SWAP1
+ Op.POP
+ Op.PUSH0
+ Op.DUP1
+ Op.MSTORE
+ Op.PUSH0
+ Op.DUP1
+ Op.PUSH1(0x20)
+ Op.DUP2
+ Op.PUSH1(0x1)
+ Op.DUP7
+ Op.GASLIMIT
+ Op.CALL
+ Op.POP
+ Op.PUSH0
+ Op.MSTORE
+ Op.PUSH0
+ Op.DUP1
+ Op.PUSH1(0x20)
+ Op.DUP2
+ Op.DUP1
+ Op.ADDRESS
+ Op.GASLIMIT
+ Op.CALL
+ Op.POP
+ Op.BALANCE
+ Op.PUSH1(0x1)
+ Op.SSTORE
+ Op.PUSH0
+ Op.DUP1
+ Op.RETURN
)
return common_prefix + suffix
@pytest.fixture
def selfdestruct_with_transfer_contract_address(
pre: Alloc,
entry_code_address: Address,
selfdestruct_with_transfer_contract_code: Bytecode,
same_tx: bool,
) -> Address:
"""
Contract address for contract that can selfdestruct and receive value.
"""
if same_tx:
return compute_create_address(address=entry_code_address, nonce=1)
# We need to deploy the contract before.
return pre.deploy_contract(selfdestruct_with_transfer_contract_code)
@pytest.fixture
def selfdestruct_with_transfer_contract_code(selfdestruct_recipient_address: Address) -> Bytecode:
"""Contract that can selfdestruct and receive value."""
code: Bytecode = (
Op.PUSH0
+ Op.CALLDATALOAD
+ Op.PUSH20(selfdestruct_recipient_address)
+ Op.SWAP1
+ Op.EQ(Op.PUSH0, Op.DUP1)
+ Op.PUSH1(0x2F)
+ Op.JUMPI
+ Op.PUSH1(0x1)
+ Op.EQ
+ Op.PUSH1(0x25)
+ Op.JUMPI
+ Op.STOP
+ Op.JUMPDEST
+ Op.PUSH1(0x1)
+ Op.DUP1
+ Op.SLOAD
+ Op.ADD
+ Op.PUSH1(0x1)
+ Op.SSTORE
+ Op.SELFDESTRUCT
+ Op.JUMPDEST
+ Op.PUSH1(0x1)
+ Op.PUSH0
+ Op.SLOAD
+ Op.ADD
+ Op.PUSH0
+ Op.SSTORE
+ Op.PUSH0
+ Op.DUP1
+ Op.RETURN
)
return code
@pytest.fixture
def selfdestruct_with_transfer_contract_initcode(
selfdestruct_with_transfer_contract_code: Bytecode,
) -> Bytecode:
"""Initcode for selfdestruct_with_transfer_contract_code."""
return Initcode(deploy_code=selfdestruct_with_transfer_contract_code)
@pytest.fixture
def selfdestruct_with_transfer_initcode_copy_from_address(
pre: Alloc,
selfdestruct_with_transfer_contract_initcode: Bytecode,
) -> Address:
"""
Address of a pre-existing contract we use to simply copy initcode from.
"""
addr = pre.deploy_contract(selfdestruct_with_transfer_contract_initcode)
return addr
@pytest.mark.parametrize(
"same_tx",
[True],
ids=["same_tx"],
)
@pytest.mark.parametrize(
"selfdestruct_on_outer_call",
[0, 1, 2],
ids=[
"no_outer_selfdestruct",
"outer_selfdestruct_before_inner_call",
"outer_selfdestruct_after_inner_call",
],
)
@pytest.mark.valid_from("Cancun")
def test_selfdestruct_created_in_same_tx_with_revert( # noqa SC200
state_test: StateTestFiller,
sender: EOA,
env: Environment,
pre: Alloc,
entry_code_address: Address,
selfdestruct_on_outer_call: int,
selfdestruct_with_transfer_contract_code: Bytecode,
selfdestruct_with_transfer_contract_initcode: Bytecode,
selfdestruct_with_transfer_contract_address: Address,
selfdestruct_recipient_address: Address,
selfdestruct_with_transfer_initcode_copy_from_address: Address,
recursive_revert_contract_address: Address,
recursive_revert_contract_code: Bytecode,
) -> None:
"""
Given:
Contract A which has methods to receive balance and selfdestruct,
and was created in current tx.
Test the following call sequence:
Transfer value to A and call A.selfdestruct. Recurse into a new call
from transfers value to A, calls A.selfdestruct, and reverts.
"""
entry_code = Op.EXTCODECOPY(
selfdestruct_with_transfer_initcode_copy_from_address,
0,
0,
len(bytes(selfdestruct_with_transfer_contract_initcode)),
)
entry_code += Op.SSTORE(
0,
Op.CREATE(
0,
0,
# Value Offset
len(bytes(selfdestruct_with_transfer_contract_initcode)),
),
)
entry_code += Op.CALL(
Op.GASLIMIT(),
recursive_revert_contract_address,
0, # value
0, # arg offset
0, # arg length
0, # ret offset
0, # ret length
)
post: Dict[Address, Account] = {
entry_code_address: Account(
code="0x",
storage=Storage(
{
0: selfdestruct_with_transfer_contract_address, # type: ignore
}
),
),
selfdestruct_with_transfer_initcode_copy_from_address: Account(
code=selfdestruct_with_transfer_contract_initcode,
),
recursive_revert_contract_address: Account(
code=recursive_revert_contract_code,
storage=Storage({1: 1}), # type: ignore
),
}
if selfdestruct_on_outer_call > 0:
post[selfdestruct_with_transfer_contract_address] = Account.NONEXISTENT # type: ignore
post[selfdestruct_recipient_address] = Account(
balance=1 if selfdestruct_on_outer_call == 1 else 2,
)
else:
post[selfdestruct_with_transfer_contract_address] = Account(
balance=1,
code=selfdestruct_with_transfer_contract_code,
storage=Storage(
{
# 2 value transfers (1 in outer call, 1 in reverted inner
# call)
0: 1, # type: ignore
# 1 selfdestruct in reverted inner call
1: 0, # type: ignore
}
),
)
post[selfdestruct_recipient_address] = Account.NONEXISTENT # type: ignore
tx = Transaction(
value=0,
data=entry_code,
sender=sender,
to=None,
gas_limit=500_000,
)
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"recursive_revert_contract_address_init_balance",
[2],
ids=["init_balance_2"],
)
@pytest.mark.parametrize(
"same_tx",
[False],
ids=["not_same_tx"],
)
@pytest.mark.parametrize(
"selfdestruct_on_outer_call",
[0, 1, 2],
ids=[
"no_outer_selfdestruct",
"outer_selfdestruct_before_inner_call",
"outer_selfdestruct_after_inner_call",
],
)
@pytest.mark.valid_from("Cancun")
def test_selfdestruct_not_created_in_same_tx_with_revert(
state_test: StateTestFiller,
sender: EOA,
env: Environment,
entry_code_address: Address,
pre: Alloc,
selfdestruct_on_outer_call: int,
selfdestruct_with_transfer_contract_code: Bytecode,
selfdestruct_with_transfer_contract_address: Address,
selfdestruct_recipient_address: Address,
recursive_revert_contract_address: Address,
recursive_revert_contract_code: Bytecode,
) -> None:
"""
Same test as selfdestruct_created_in_same_tx_with_revert except
selfdestructable contract is pre-existing.
"""
entry_code = Op.CALL(
Op.GASLIMIT(),
recursive_revert_contract_address,
0, # value
0, # arg offset
0, # arg length
0, # ret offset
0, # ret length
)
post: Dict[Address, Account] = {
entry_code_address: Account(code="0x"),
}
if selfdestruct_on_outer_call > 0:
post[selfdestruct_with_transfer_contract_address] = Account(
balance=1 if selfdestruct_on_outer_call == 1 else 0,
code=selfdestruct_with_transfer_contract_code,
storage=Storage(
{
# 2 value transfers: 1 in outer call, 1 in reverted inner
# call
0: 1, # type: ignore
# 1 selfdestruct in reverted inner call
1: 1, # type: ignore
}
),
)
post[selfdestruct_recipient_address] = Account(
balance=1 if selfdestruct_on_outer_call == 1 else 2
)
else:
post[selfdestruct_with_transfer_contract_address] = Account(
balance=1,
code=selfdestruct_with_transfer_contract_code,
storage=Storage(
{
# 2 value transfers:
# 1 in outer call, 1 in reverted inner call
0: 1, # type: ignore
# 2 selfdestructs:
# 1 in outer call, 1 in reverted inner call
1: 0, # type: ignore
}
),
)
post[selfdestruct_recipient_address] = Account.NONEXISTENT # type: ignore
tx = Transaction(
value=0,
data=entry_code,
sender=sender,
to=None,
gas_limit=500_000,
)
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py | tests/cancun/eip6780_selfdestruct/test_reentrancy_selfdestruct_revert.py | """Suicide scenario requested test https://github.com/ethereum/tests/issues/1325."""
from typing import SupportsBytes
import pytest
from ethereum_test_forks import Cancun, Fork
from ethereum_test_tools import (
EOA,
Account,
Address,
Alloc,
Bytecode,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-6780.md"
REFERENCE_SPEC_VERSION = "1b6a0e94cc47e859b9866e570391cf37dc55059a"
@pytest.fixture
def selfdestruct_contract_bytecode(selfdestruct_recipient_address: Address) -> Bytecode:
"""Contract code that performs a SELFDESTRUCT operation."""
return Op.SELFDESTRUCT(selfdestruct_recipient_address)
@pytest.fixture
def selfdestruct_contract_init_balance() -> int: # noqa: D103
return 300_000
@pytest.fixture
def selfdestruct_contract_address(
pre: Alloc, selfdestruct_contract_bytecode: Bytecode, selfdestruct_contract_init_balance: int
) -> Address:
"""Address of the selfdestruct contract."""
return pre.deploy_contract(
code=selfdestruct_contract_bytecode, balance=selfdestruct_contract_init_balance
)
@pytest.fixture
def executor_contract_bytecode(
first_suicide: Op,
revert_contract_address: Address,
selfdestruct_contract_address: Address,
) -> Bytecode:
"""Contract code that performs a selfdestruct call then revert."""
return (
Op.SSTORE(
1,
(
first_suicide(address=selfdestruct_contract_address, value=0)
if first_suicide in [Op.CALL, Op.CALLCODE]
else first_suicide(address=selfdestruct_contract_address)
),
)
+ Op.SSTORE(2, Op.CALL(address=revert_contract_address))
+ Op.RETURNDATACOPY(0, 0, Op.RETURNDATASIZE())
+ Op.SSTORE(3, Op.MLOAD(0))
)
@pytest.fixture
def executor_contract_init_storage() -> ( # noqa: D103
dict[str | bytes | SupportsBytes | int, str | bytes | SupportsBytes | int]
):
return {0x01: 0x0100, 0x02: 0x0100, 0x03: 0x0100}
@pytest.fixture
def executor_contract_init_balance() -> int: # noqa: D103
return 100_000
@pytest.fixture
def executor_contract_address(
pre: Alloc,
executor_contract_bytecode: Bytecode,
executor_contract_init_balance: int,
executor_contract_init_storage: dict[
str | bytes | SupportsBytes | int, str | bytes | SupportsBytes | int
],
) -> Address:
"""Address of the executor contract."""
return pre.deploy_contract(
executor_contract_bytecode,
balance=executor_contract_init_balance,
storage=executor_contract_init_storage,
)
@pytest.fixture
def revert_contract_bytecode(
second_suicide: Op,
selfdestruct_contract_address: Address,
) -> Bytecode:
"""Contract code that performs a call and then reverts."""
call_op = (
second_suicide(address=selfdestruct_contract_address, value=100)
if second_suicide in [Op.CALL, Op.CALLCODE]
else second_suicide(address=selfdestruct_contract_address)
)
return Op.MSTORE(0, Op.ADD(15, call_op)) + Op.REVERT(0, 32)
@pytest.fixture
def revert_contract_init_balance() -> int: # noqa: D103
return 500_000
@pytest.fixture
def revert_contract_address(
pre: Alloc,
revert_contract_bytecode: Bytecode,
revert_contract_init_balance: int,
) -> Address:
"""Address of the revert contract."""
return pre.deploy_contract(revert_contract_bytecode, balance=revert_contract_init_balance)
@pytest.mark.valid_from("Paris")
@pytest.mark.parametrize("first_suicide", [Op.CALL, Op.CALLCODE, Op.DELEGATECALL])
@pytest.mark.parametrize("second_suicide", [Op.CALL, Op.CALLCODE, Op.DELEGATECALL])
def test_reentrancy_selfdestruct_revert(
pre: Alloc,
env: Environment,
sender: EOA,
fork: Fork,
first_suicide: Op,
second_suicide: Op,
state_test: StateTestFiller,
selfdestruct_contract_bytecode: Bytecode,
selfdestruct_contract_address: Address,
selfdestruct_contract_init_balance: int,
revert_contract_address: Address,
revert_contract_init_balance: int,
executor_contract_address: Address,
executor_contract_init_balance: int,
selfdestruct_recipient_address: Address,
) -> None:
"""
Suicide reentrancy scenario.
Call|Callcode|Delegatecall the contract S.
S self destructs.
Call the revert proxy contract R.
R Calls|Callcode|Delegatecall S.
S self destructs (for the second time).
R reverts (including the effects of the second selfdestruct).
It is expected the S is self destructed after the transaction.
"""
post = {
# Second caller unchanged as call gets reverted
revert_contract_address: Account(balance=revert_contract_init_balance, storage={}),
}
if first_suicide in [Op.CALLCODE, Op.DELEGATECALL]:
if fork >= Cancun:
# On Cancun even callcode/delegatecall does not remove the account,
# so the value remain
post[executor_contract_address] = Account(
storage={
0x01: 0x01, # First call to contract S->suicide success
0x02: 0x00, # Second call to contract S->suicide reverted
0x03: 16, # Reverted value to check that revert really
# worked
},
)
else:
# Callcode executed first suicide from sender. sender is deleted
post[executor_contract_address] = Account.NONEXISTENT # type: ignore
# Original suicide account remains in state
post[selfdestruct_contract_address] = Account(
balance=selfdestruct_contract_init_balance, storage={}
)
# Suicide destination
post[selfdestruct_recipient_address] = Account(
balance=executor_contract_init_balance,
)
# On Cancun suicide no longer destroys the account from state, just cleans
# the balance
if first_suicide in [Op.CALL]:
post[executor_contract_address] = Account(
storage={
0x01: 0x01, # First call to contract S->suicide success
0x02: 0x00, # Second call to contract S->suicide reverted
0x03: 16, # Reverted value to check that revert really worked
},
)
if fork >= Cancun:
# On Cancun suicide does not remove the account, just sends the
# balance
post[selfdestruct_contract_address] = Account(
balance=0, code=selfdestruct_contract_bytecode, storage={}
)
else:
post[selfdestruct_contract_address] = Account.NONEXISTENT # type: ignore
# Suicide destination
post[selfdestruct_recipient_address] = Account(
balance=selfdestruct_contract_init_balance,
)
tx = Transaction(
sender=sender,
to=executor_contract_address,
gas_limit=500_000,
value=0,
)
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/cancun/eip6780_selfdestruct/conftest.py | tests/cancun/eip6780_selfdestruct/conftest.py | """Pytest (plugin) definitions local to EIP-6780 tests."""
import pytest
from ethereum_test_tools import Address, Alloc, Environment
@pytest.fixture
def env() -> Environment:
"""Environment for all tests."""
return Environment()
@pytest.fixture
def selfdestruct_recipient_address(pre: Alloc) -> Address:
"""Address that can receive a SELFDESTRUCT operation."""
return pre.fund_eoa(amount=0)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/cancun/eip6780_selfdestruct/__init__.py | tests/cancun/eip6780_selfdestruct/__init__.py | """Tests for EIP-6780: SELFDESTRUCT only in same transaction."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/cancun/eip6780_selfdestruct/test_selfdestruct.py | tests/cancun/eip6780_selfdestruct/test_selfdestruct.py | """
SELFDESTRUCT only in same transaction tests.
Tests for [EIP-6780: SELFDESTRUCT only in same transaction](https://eips.ethereum.org/EIPS/eip-6780).
"""
from itertools import cycle
from typing import Dict, List
import pytest
from ethereum_test_forks import Cancun, Fork
from ethereum_test_tools import (
EOA,
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Bytecode,
Conditional,
Hash,
Initcode,
StateTestFiller,
Storage,
Transaction,
compute_create_address,
)
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-6780.md"
REFERENCE_SPEC_VERSION = "1b6a0e94cc47e859b9866e570391cf37dc55059a"
SELFDESTRUCT_DISABLE_FORK = Cancun
"""
Address of a pre-existing contract that self-destructs.
"""
# Sentinel value to indicate that the self-destructing contract address should
# be used, only for use in `pytest.mark.parametrize`, not for use within the
# test method itself.
SELF_ADDRESS = Address(0x01)
# Sentinel value to indicate that the contract should not self-destruct.
NO_SELFDESTRUCT = Address(0x00)
PRE_DEPLOY_CONTRACT_1 = "pre_deploy_contract_1"
PRE_DEPLOY_CONTRACT_2 = "pre_deploy_contract_2"
PRE_DEPLOY_CONTRACT_3 = "pre_deploy_contract_3"
@pytest.fixture
def eip_enabled(fork: Fork) -> bool:
"""Whether the EIP is enabled or not."""
return fork >= SELFDESTRUCT_DISABLE_FORK
@pytest.fixture
def sendall_recipient_addresses(request: pytest.FixtureRequest, pre: Alloc) -> List[Address]:
"""
List of addresses that receive the SENDALL operation in any test.
If the test case requires a pre-existing contract, it will be deployed
here.
By default the list is a single pre-deployed contract that unconditionally
sets storage.
"""
address_list = getattr(request, "param", [PRE_DEPLOY_CONTRACT_1])
deployed_contracts: Dict[str, Address] = {}
return_list = []
for sendall_recipient in address_list:
if type(sendall_recipient) is str:
if sendall_recipient not in deployed_contracts:
deployed_contracts[sendall_recipient] = pre.deploy_contract(
code=Op.SSTORE(0, 0),
storage={0: 1},
)
return_list.append(deployed_contracts[sendall_recipient])
else:
return_list.append(sendall_recipient)
return return_list
def selfdestruct_code_preset(
*,
sendall_recipient_addresses: List[Address],
) -> Bytecode:
"""Return a bytecode that self-destructs."""
# First we register entry into the contract
bytecode = Op.SSTORE(0, Op.ADD(Op.SLOAD(0), 1))
if len(sendall_recipient_addresses) != 1:
# Load the recipient address from calldata, each test case needs to
# pass the addresses as calldata
bytecode += Conditional(
# We avoid having the caller to give us our own address by checking
# against a constant that is a magic number
condition=Op.EQ(Op.CALLDATALOAD(0), SELF_ADDRESS),
if_true=Op.MSTORE(0, Op.ADDRESS()),
if_false=Op.MSTORE(0, Op.CALLDATALOAD(0)),
)
bytecode += Conditional(
condition=Op.EQ(Op.MLOAD(0), NO_SELFDESTRUCT),
if_true=Op.STOP,
if_false=Op.SELFDESTRUCT(Op.MLOAD(0)),
)
else:
# Hard-code the single only possible recipient address
sendall_recipient = sendall_recipient_addresses[0]
assert sendall_recipient != NO_SELFDESTRUCT, "test error"
if sendall_recipient == SELF_ADDRESS:
bytecode += Op.SELFDESTRUCT(Op.ADDRESS)
else:
bytecode += Op.SELFDESTRUCT(sendall_recipient_addresses[0])
bytecode += Op.SSTORE(0, 0)
return bytecode + Op.STOP
@pytest.fixture
def selfdestruct_code(
sendall_recipient_addresses: List[Address],
) -> Bytecode:
"""
Create default self-destructing bytecode, which can be modified by each
test if necessary.
"""
return selfdestruct_code_preset(sendall_recipient_addresses=sendall_recipient_addresses)
@pytest.mark.parametrize("create_opcode", [Op.CREATE, Op.CREATE2])
@pytest.mark.parametrize(
"call_times,sendall_recipient_addresses",
[
pytest.param(
1,
[PRE_DEPLOY_CONTRACT_1],
id="single_call",
),
pytest.param(
1,
[SELF_ADDRESS],
id="single_call_self",
),
pytest.param(
2,
[PRE_DEPLOY_CONTRACT_1],
id="multiple_calls_single_sendall_recipient",
),
pytest.param(
2,
[SELF_ADDRESS],
id="multiple_calls_single_self_recipient",
),
pytest.param(
3,
[PRE_DEPLOY_CONTRACT_1, PRE_DEPLOY_CONTRACT_2, PRE_DEPLOY_CONTRACT_3],
id="multiple_calls_multiple_sendall_recipients",
),
pytest.param(
3,
[SELF_ADDRESS, PRE_DEPLOY_CONTRACT_2, PRE_DEPLOY_CONTRACT_3],
id="multiple_calls_multiple_sendall_recipients_including_self",
),
pytest.param(
3,
[PRE_DEPLOY_CONTRACT_1, PRE_DEPLOY_CONTRACT_2, SELF_ADDRESS],
id="multiple_calls_multiple_sendall_recipients_including_self_last",
),
pytest.param(
6,
[SELF_ADDRESS, PRE_DEPLOY_CONTRACT_2, PRE_DEPLOY_CONTRACT_3],
id="multiple_calls_multiple_repeating_sendall_recipients_including_self",
),
pytest.param(
6,
[PRE_DEPLOY_CONTRACT_1, PRE_DEPLOY_CONTRACT_2, SELF_ADDRESS],
id="multiple_calls_multiple_repeating_sendall_recipients_including_self_last",
),
],
indirect=["sendall_recipient_addresses"],
)
@pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 100_000])
@pytest.mark.valid_from("Shanghai")
def test_create_selfdestruct_same_tx(
state_test: StateTestFiller,
pre: Alloc,
sender: EOA,
selfdestruct_code: Bytecode,
sendall_recipient_addresses: List[Address],
create_opcode: Op,
call_times: int,
selfdestruct_contract_initial_balance: int,
) -> None:
"""
Use CREATE or CREATE2 to create a self-destructing contract, and call it in
the same transaction.
Behavior should be the same before and after EIP-6780.
Test using:
- Different send-all recipient addresses: single, multiple,
including self
- Different initial balances for the self-destructing contract
- Different opcodes: CREATE, CREATE2
"""
selfdestruct_contract_initcode = Initcode(deploy_code=selfdestruct_code)
initcode_copy_from_address = pre.deploy_contract(selfdestruct_contract_initcode)
# Our entry point is an initcode that in turn creates a self-destructing
# contract
entry_code_storage = Storage()
# Bytecode used to create the contract, can be CREATE or CREATE2
create_bytecode = create_opcode(size=len(selfdestruct_contract_initcode))
selfdestruct_contract_address = compute_create_address(
address=compute_create_address(address=sender, nonce=0),
nonce=1,
initcode=selfdestruct_contract_initcode,
opcode=create_opcode,
)
for i in range(len(sendall_recipient_addresses)):
if sendall_recipient_addresses[i] == SELF_ADDRESS:
sendall_recipient_addresses[i] = selfdestruct_contract_address
if selfdestruct_contract_initial_balance > 0:
pre.fund_address(selfdestruct_contract_address, selfdestruct_contract_initial_balance)
# Create a dict to record the expected final balances
sendall_final_balances = dict(
zip(sendall_recipient_addresses, [0] * len(sendall_recipient_addresses), strict=False)
)
selfdestruct_contract_current_balance = selfdestruct_contract_initial_balance
# Entry code that will be executed, creates the contract and then calls it
# in the same tx
entry_code = (
# Initcode is already deployed at `initcode_copy_from_address`, so just
# copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
0,
len(selfdestruct_contract_initcode),
)
# And we store the created address for verification purposes
+ Op.SSTORE(
entry_code_storage.store_next(selfdestruct_contract_address),
create_bytecode,
)
)
# Store the EXTCODE* properties of the created address
entry_code += Op.SSTORE(
entry_code_storage.store_next(len(selfdestruct_code)),
Op.EXTCODESIZE(selfdestruct_contract_address),
)
entry_code += Op.SSTORE(
entry_code_storage.store_next(selfdestruct_code.keccak256()),
Op.EXTCODEHASH(selfdestruct_contract_address),
)
# Call the self-destructing contract multiple times as required, increasing
# the wei sent each time
entry_code_balance = 0
for i, sendall_recipient in zip(range(call_times), cycle(sendall_recipient_addresses)):
entry_code += Op.MSTORE(0, sendall_recipient)
entry_code += Op.SSTORE(
entry_code_storage.store_next(1),
Op.CALL(
Op.GASLIMIT, # Gas
selfdestruct_contract_address, # Address
i, # Value
0,
32,
0,
0,
),
)
entry_code_balance += i
selfdestruct_contract_current_balance += i
# Balance is always sent to other contracts
if sendall_recipient != selfdestruct_contract_address:
sendall_final_balances[sendall_recipient] += selfdestruct_contract_current_balance
# Self-destructing contract must always have zero balance after the
# call because the self-destruct always happens in the same transaction
# in this test
selfdestruct_contract_current_balance = 0
entry_code += Op.SSTORE(
entry_code_storage.store_next(0),
Op.BALANCE(selfdestruct_contract_address),
)
# Check the EXTCODE* properties of the self-destructing contract again
entry_code += Op.SSTORE(
entry_code_storage.store_next(len(selfdestruct_code)),
Op.EXTCODESIZE(selfdestruct_contract_address),
)
entry_code += Op.SSTORE(
entry_code_storage.store_next(selfdestruct_code.keccak256()),
Op.EXTCODEHASH(selfdestruct_contract_address),
)
# Lastly return zero so the entry point contract is created and we can
# retain the stored values for verification.
entry_code += Op.RETURN(max(len(selfdestruct_contract_initcode), 32), 1)
tx = Transaction(
value=entry_code_balance,
data=entry_code,
sender=sender,
to=None,
gas_limit=500_000,
)
entry_code_address = tx.created_contract
post: Dict[Address, Account] = {
entry_code_address: Account(
storage=entry_code_storage,
),
}
# Check the balances of the sendall recipients
for address, balance in sendall_final_balances.items():
post[address] = Account(balance=balance, storage={0: 1})
post[selfdestruct_contract_address] = Account.NONEXISTENT # type: ignore
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.parametrize("create_opcode", [Op.CREATE, Op.CREATE2])
@pytest.mark.parametrize("call_times", [0, 1])
@pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 100_000])
@pytest.mark.valid_from("Shanghai")
def test_self_destructing_initcode(
state_test: StateTestFiller,
pre: Alloc,
sender: EOA,
selfdestruct_code: Bytecode,
sendall_recipient_addresses: List[Address],
create_opcode: Op,
call_times: int, # Number of times to call the self-destructing contract
# in the same tx
selfdestruct_contract_initial_balance: int,
) -> None:
"""
Test that a contract can self-destruct in its initcode.
Behavior is the same before and after EIP-6780.
Test using:
- Different initial balances for the self-destructing contract
- Different opcodes: CREATE, CREATE2
- Different number of calls to the self-destructing contract in
the same tx
"""
initcode_copy_from_address = pre.deploy_contract(selfdestruct_code)
# Our entry point is an initcode that in turn creates a self-destructing
# contract
entry_code_storage = Storage()
sendall_amount = 0
# Bytecode used to create the contract, can be CREATE or CREATE2
create_bytecode = create_opcode(size=len(selfdestruct_code))
selfdestruct_contract_address = compute_create_address(
address=compute_create_address(address=sender, nonce=0),
nonce=1,
initcode=selfdestruct_code,
opcode=create_opcode,
)
# Entry code that will be executed, creates the contract and then calls it
# in the same tx
entry_code = (
# Initcode is already deployed at `initcode_copy_from_address`, so just
# copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
0,
len(selfdestruct_code),
)
# And we store the created address for verification purposes
+ Op.SSTORE(
entry_code_storage.store_next(selfdestruct_contract_address),
create_bytecode,
)
)
# Store the EXTCODE* properties of the created address
entry_code += Op.SSTORE(
entry_code_storage.store_next(0),
Op.EXTCODESIZE(selfdestruct_contract_address),
)
entry_code += Op.SSTORE(
entry_code_storage.store_next(Bytecode().keccak256()),
Op.EXTCODEHASH(selfdestruct_contract_address),
)
# Call the self-destructing contract multiple times as required, increasing
# the wei sent each time
entry_code_balance = 0
for i in range(call_times):
entry_code += Op.SSTORE(
entry_code_storage.store_next(1),
Op.CALL(
Op.GASLIMIT, # Gas
selfdestruct_contract_address, # Address
i, # Value
0,
0,
0,
0,
),
)
entry_code_balance += i
entry_code += Op.SSTORE(
entry_code_storage.store_next(0),
Op.BALANCE(selfdestruct_contract_address),
)
# Lastly return zero so the entry point contract is created and we can
# retain the stored values for verification.
entry_code += Op.RETURN(max(len(selfdestruct_code), 32), 1)
if selfdestruct_contract_initial_balance > 0:
# Address where the contract is created already had some balance,
# which must be included in the send-all operation
sendall_amount += selfdestruct_contract_initial_balance
pre.fund_address(selfdestruct_contract_address, selfdestruct_contract_initial_balance)
tx = Transaction(
value=entry_code_balance,
data=entry_code,
sender=sender,
to=None,
gas_limit=500_000,
)
entry_code_address = tx.created_contract
post: Dict[Address, Account] = {
entry_code_address: Account(
storage=entry_code_storage,
),
selfdestruct_contract_address: Account.NONEXISTENT, # type: ignore
sendall_recipient_addresses[0]: Account(balance=sendall_amount, storage={0: 1}),
}
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.parametrize("tx_value", [0, 100_000])
@pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 100_000])
@pytest.mark.valid_from("Shanghai")
def test_self_destructing_initcode_create_tx(
state_test: StateTestFiller,
pre: Alloc,
sender: EOA,
tx_value: int,
selfdestruct_code: Bytecode,
sendall_recipient_addresses: List[Address],
selfdestruct_contract_initial_balance: int,
) -> None:
"""
Use a Create Transaction to execute a self-destructing initcode.
Behavior should be the same before and after EIP-6780.
Test using:
- Different initial balances for the self-destructing contract
- Different transaction value amounts
"""
tx = Transaction(
sender=sender,
value=tx_value,
data=selfdestruct_code,
to=None,
gas_limit=500_000,
)
selfdestruct_contract_address = tx.created_contract
pre.fund_address(selfdestruct_contract_address, selfdestruct_contract_initial_balance)
# Our entry point is an initcode that in turn creates a self-destructing
# contract
sendall_amount = selfdestruct_contract_initial_balance + tx_value
post: Dict[Address, Account] = {
selfdestruct_contract_address: Account.NONEXISTENT, # type: ignore
sendall_recipient_addresses[0]: Account(balance=sendall_amount, storage={0: 1}),
}
state_test(pre=pre, post=post, tx=tx)
# Can only recreate using CREATE2
@pytest.mark.parametrize("create_opcode", [Op.CREATE2])
@pytest.mark.parametrize(
"sendall_recipient_addresses",
[
pytest.param(
[PRE_DEPLOY_CONTRACT_1],
id="selfdestruct_other_address",
),
pytest.param(
[SELF_ADDRESS],
id="selfdestruct_to_self",
),
],
indirect=["sendall_recipient_addresses"],
)
@pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 100_000])
@pytest.mark.parametrize("recreate_times", [1])
@pytest.mark.parametrize("call_times", [1])
@pytest.mark.valid_from("Shanghai")
def test_recreate_self_destructed_contract_different_txs(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
sender: EOA,
selfdestruct_code: Bytecode,
selfdestruct_contract_initial_balance: int,
sendall_recipient_addresses: List[Address],
create_opcode: Op,
# Number of times to recreate the contract in different transactions
recreate_times: int,
# Number of times to call the self-destructing contract in the same tx
call_times: int,
) -> None:
"""
Test that a contract can be recreated after it has self-destructed, over
the lapse of multiple transactions.
Behavior should be the same before and after EIP-6780.
Test using:
- Different initial balances for the self-destructing contract
- Contract creating opcodes that are not CREATE
"""
selfdestruct_contract_initcode = Initcode(deploy_code=selfdestruct_code)
initcode_copy_from_address = pre.deploy_contract(selfdestruct_contract_initcode)
entry_code_storage = Storage()
sendall_amount = selfdestruct_contract_initial_balance
# Bytecode used to create the contract
assert create_opcode != Op.CREATE, "cannot recreate contract using CREATE opcode"
create_bytecode = create_opcode(size=len(selfdestruct_contract_initcode))
# Entry code that will be executed, creates the contract and then calls it
entry_code = (
# Initcode is already deployed at initcode_copy_from_address, so just
# copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
0,
len(selfdestruct_contract_initcode),
)
+ Op.MSTORE(0, create_bytecode)
+ Op.SSTORE(
Op.CALLDATALOAD(0),
Op.MLOAD(0),
)
)
for i in range(call_times):
entry_code += Op.CALL(
Op.GASLIMIT,
Op.MLOAD(0),
i,
0,
0,
0,
0,
)
sendall_amount += i
entry_code += Op.STOP
entry_code_address = pre.deploy_contract(code=entry_code)
selfdestruct_contract_address = compute_create_address(
address=entry_code_address, initcode=selfdestruct_contract_initcode, opcode=create_opcode
)
pre.fund_address(selfdestruct_contract_address, selfdestruct_contract_initial_balance)
for i in range(len(sendall_recipient_addresses)):
if sendall_recipient_addresses[i] == SELF_ADDRESS:
sendall_recipient_addresses[i] = selfdestruct_contract_address
txs: List[Transaction] = []
for i in range(recreate_times + 1):
txs.append(
Transaction(
data=Hash(i),
sender=sender,
to=entry_code_address,
gas_limit=500_000,
)
)
entry_code_storage[i] = selfdestruct_contract_address
post: Dict[Address, Account] = {
entry_code_address: Account(
storage=entry_code_storage,
),
selfdestruct_contract_address: Account.NONEXISTENT, # type: ignore
}
if sendall_recipient_addresses[0] != selfdestruct_contract_address:
post[sendall_recipient_addresses[0]] = Account(balance=sendall_amount, storage={0: 1})
blockchain_test(pre=pre, post=post, blocks=[Block(txs=txs)])
@pytest.mark.parametrize(
"call_times,sendall_recipient_addresses",
[
pytest.param(
1,
[PRE_DEPLOY_CONTRACT_1],
id="single_call",
),
pytest.param(
1,
[SELF_ADDRESS],
id="single_call_self",
),
pytest.param(
2,
[PRE_DEPLOY_CONTRACT_1],
id="multiple_calls_single_sendall_recipient",
),
pytest.param(
2,
[SELF_ADDRESS],
id="multiple_calls_single_self_recipient",
),
pytest.param(
3,
[PRE_DEPLOY_CONTRACT_1, PRE_DEPLOY_CONTRACT_2, PRE_DEPLOY_CONTRACT_3],
id="multiple_calls_multiple_sendall_recipients",
),
pytest.param(
3,
[SELF_ADDRESS, PRE_DEPLOY_CONTRACT_2, PRE_DEPLOY_CONTRACT_3],
id="multiple_calls_multiple_sendall_recipients_including_self",
),
pytest.param(
3,
[PRE_DEPLOY_CONTRACT_1, PRE_DEPLOY_CONTRACT_2, SELF_ADDRESS],
id="multiple_calls_multiple_sendall_recipients_including_self_last",
),
pytest.param(
6,
[SELF_ADDRESS, PRE_DEPLOY_CONTRACT_2, PRE_DEPLOY_CONTRACT_3],
id="multiple_calls_multiple_repeating_sendall_recipients_including_self",
),
pytest.param(
6,
[PRE_DEPLOY_CONTRACT_1, PRE_DEPLOY_CONTRACT_2, SELF_ADDRESS],
id="multiple_calls_multiple_repeating_sendall_recipients_including_self_last",
),
],
indirect=["sendall_recipient_addresses"],
)
@pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 100_000])
@pytest.mark.valid_from("Shanghai")
def test_selfdestruct_pre_existing(
state_test: StateTestFiller,
eip_enabled: bool,
pre: Alloc,
sender: EOA,
selfdestruct_code: Bytecode,
selfdestruct_contract_initial_balance: int,
sendall_recipient_addresses: List[Address],
call_times: int,
) -> None:
"""
Test calling a previously created account that contains a selfdestruct, and
verify its balance is sent to the destination address.
After EIP-6780, the balance should be sent to the send-all recipient
address, similar to the behavior before the EIP, but the account is not
deleted.
Test using:
- Different send-all recipient addresses: single, multiple,
including self
- Different initial balances for the self-destructing contract
"""
selfdestruct_contract_address = pre.deploy_contract(
selfdestruct_code, balance=selfdestruct_contract_initial_balance
)
entry_code_storage = Storage()
for i in range(len(sendall_recipient_addresses)):
if sendall_recipient_addresses[i] == SELF_ADDRESS:
sendall_recipient_addresses[i] = selfdestruct_contract_address
# Create a dict to record the expected final balances
sendall_final_balances = dict(
zip(sendall_recipient_addresses, [0] * len(sendall_recipient_addresses), strict=False)
)
selfdestruct_contract_current_balance = selfdestruct_contract_initial_balance
# Entry code in this case will simply call the pre-existing self-
# destructing contract, as many times as required
entry_code = Bytecode()
# Call the self-destructing contract multiple times as required, increasing
# the wei sent each time
entry_code_balance = 0
for i, sendall_recipient in zip(range(call_times), cycle(sendall_recipient_addresses)):
entry_code += Op.MSTORE(0, sendall_recipient)
entry_code += Op.SSTORE(
entry_code_storage.store_next(1),
Op.CALL(
Op.GASLIMIT, # Gas
selfdestruct_contract_address, # Address
i, # Value
0,
32,
0,
0,
),
)
entry_code_balance += i
selfdestruct_contract_current_balance += i
# Balance is always sent to other contracts
if sendall_recipient != selfdestruct_contract_address:
sendall_final_balances[sendall_recipient] += selfdestruct_contract_current_balance
# Balance is only kept by the self-destructing contract if we are
# sending to self and the EIP is activated, otherwise the balance is
# destroyed
if sendall_recipient != selfdestruct_contract_address or not eip_enabled:
selfdestruct_contract_current_balance = 0
entry_code += Op.SSTORE(
entry_code_storage.store_next(selfdestruct_contract_current_balance),
Op.BALANCE(selfdestruct_contract_address),
)
# Check the EXTCODE* properties of the self-destructing contract
entry_code += Op.SSTORE(
entry_code_storage.store_next(len(selfdestruct_code)),
Op.EXTCODESIZE(selfdestruct_contract_address),
)
entry_code += Op.SSTORE(
entry_code_storage.store_next(selfdestruct_code.keccak256()),
Op.EXTCODEHASH(selfdestruct_contract_address),
)
# Lastly return zero so the entry point contract is created and we can
# retain the stored values for verification.
entry_code += Op.RETURN(32, 1)
tx = Transaction(
value=entry_code_balance,
data=entry_code,
sender=sender,
to=None,
gas_limit=500_000,
)
entry_code_address = tx.created_contract
post: Dict[Address, Account] = {
entry_code_address: Account(
storage=entry_code_storage,
),
}
# Check the balances of the sendall recipients
for address, balance in sendall_final_balances.items():
if address != selfdestruct_contract_address:
post[address] = Account(balance=balance, storage={0: 1})
if eip_enabled:
balance = selfdestruct_contract_current_balance
post[selfdestruct_contract_address] = Account(
balance=balance,
storage={0: call_times},
)
else:
post[selfdestruct_contract_address] = Account.NONEXISTENT # type: ignore
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 1])
@pytest.mark.parametrize("call_times", [1, 10])
@pytest.mark.valid_from("Shanghai")
def test_selfdestruct_created_same_block_different_tx(
blockchain_test: BlockchainTestFiller,
eip_enabled: bool,
pre: Alloc,
sender: EOA,
selfdestruct_contract_initial_balance: int,
sendall_recipient_addresses: List[Address],
call_times: int,
) -> None:
"""
Test that if an account created in the same block that contains a
selfdestruct is called, its balance is sent to the send-all address, but
the account is not deleted.
"""
selfdestruct_code = selfdestruct_code_preset(
sendall_recipient_addresses=sendall_recipient_addresses,
)
selfdestruct_contract_initcode = Initcode(deploy_code=selfdestruct_code)
selfdestruct_contract_address = compute_create_address(address=sender, nonce=0)
entry_code_address = compute_create_address(address=sender, nonce=1)
entry_code_storage = Storage()
sendall_amount = selfdestruct_contract_initial_balance
entry_code = Bytecode()
# Entry code in this case will simply call the pre-existing self-
# destructing contract, as many times as required
# Call the self-destructing contract multiple times as required, increasing
# the wei sent each time
entry_code_balance = 0
for i in range(call_times):
entry_code += Op.SSTORE(
entry_code_storage.store_next(1),
Op.CALL(
Op.GASLIMIT, # Gas
selfdestruct_contract_address, # Address
i, # Value
0,
0,
0,
0,
),
)
entry_code_balance += i
sendall_amount += i
entry_code += Op.SSTORE(
entry_code_storage.store_next(0),
Op.BALANCE(selfdestruct_contract_address),
)
# Check the EXTCODE* properties of the self-destructing contract
entry_code += Op.SSTORE(
entry_code_storage.store_next(len(selfdestruct_code)),
Op.EXTCODESIZE(selfdestruct_contract_address),
)
entry_code += Op.SSTORE(
entry_code_storage.store_next(selfdestruct_code.keccak256()),
Op.EXTCODEHASH(selfdestruct_contract_address),
)
# Lastly return zero so the entry point contract is created and we can
# retain the stored values for verification.
entry_code += Op.RETURN(32, 1)
post: Dict[Address, Account] = {
entry_code_address: Account(
storage=entry_code_storage,
),
sendall_recipient_addresses[0]: Account(balance=sendall_amount, storage={0: 1}),
}
if eip_enabled:
post[selfdestruct_contract_address] = Account(balance=0, storage={0: call_times})
else:
post[selfdestruct_contract_address] = Account.NONEXISTENT # type: ignore
txs = [
Transaction(
value=selfdestruct_contract_initial_balance,
data=selfdestruct_contract_initcode,
sender=sender,
to=None,
gas_limit=500_000,
),
Transaction(
value=entry_code_balance,
data=entry_code,
sender=sender,
to=None,
gas_limit=500_000,
),
]
blockchain_test(pre=pre, post=post, blocks=[Block(txs=txs)])
@pytest.mark.parametrize("call_times", [1])
@pytest.mark.parametrize("selfdestruct_contract_initial_balance", [0, 1])
@pytest.mark.parametrize("call_opcode", [Op.DELEGATECALL, Op.CALLCODE])
@pytest.mark.parametrize("create_opcode", [Op.CREATE])
@pytest.mark.valid_from("Shanghai")
def test_calling_from_new_contract_to_pre_existing_contract(
state_test: StateTestFiller,
pre: Alloc,
sender: EOA,
sendall_recipient_addresses: List[Address],
create_opcode: Op,
call_opcode: Op,
call_times: int,
selfdestruct_contract_initial_balance: int,
) -> None:
"""
Test that if an account created in the current transaction delegate-call a
previously created account that executes self-destruct, the calling account
is deleted.
"""
pre_existing_selfdestruct_address = pre.deploy_contract(
selfdestruct_code_preset(
sendall_recipient_addresses=sendall_recipient_addresses,
),
)
# Our entry point is an initcode that in turn creates a self-destructing
# contract
entry_code_storage = Storage()
sendall_amount = 0
entry_code_address = compute_create_address(address=sender, nonce=0)
selfdestruct_contract_address = compute_create_address(address=entry_code_address, nonce=1)
pre.fund_address(selfdestruct_contract_address, selfdestruct_contract_initial_balance)
# self-destructing call
selfdestruct_code = call_opcode(address=pre_existing_selfdestruct_address)
selfdestruct_contract_initcode = Initcode(deploy_code=selfdestruct_code)
initcode_copy_from_address = pre.deploy_contract(selfdestruct_contract_initcode)
# Bytecode used to create the contract, can be CREATE or CREATE2
create_bytecode = create_opcode(size=len(selfdestruct_contract_initcode))
# Entry code that will be executed, creates the contract and then calls it
# in the same tx
entry_code = (
# Initcode is already deployed at `initcode_copy_from_address`, so just
# copy it
Op.EXTCODECOPY(
initcode_copy_from_address,
0,
0,
len(selfdestruct_contract_initcode),
)
# And we store the created address for verification purposes
+ Op.SSTORE(
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/test_worst_memory.py | tests/benchmark/test_worst_memory.py | """
Tests that benchmark EVMs in the worst-case memory opcodes.
"""
from enum import auto
import pytest
from ethereum_test_base_types.base_types import Bytes
from ethereum_test_benchmark.benchmark_code_generator import JumpLoopGenerator
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Alloc,
BenchmarkTestFiller,
Bytecode,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "TODO"
REFERENCE_SPEC_VERSION = "TODO"
class CallDataOrigin:
"""Enum for calldata origins."""
TRANSACTION = auto()
CALL = auto()
@pytest.mark.parametrize(
"origin",
[
pytest.param(CallDataOrigin.TRANSACTION, id="transaction"),
pytest.param(CallDataOrigin.CALL, id="call"),
],
)
@pytest.mark.parametrize(
"size",
[
pytest.param(0, id="0 bytes"),
pytest.param(100, id="100 bytes"),
pytest.param(10 * 1024, id="10KiB"),
pytest.param(1024 * 1024, id="1MiB"),
],
)
@pytest.mark.parametrize(
"fixed_src_dst",
[
True,
False,
],
)
@pytest.mark.parametrize(
"non_zero_data",
[
True,
False,
],
)
def test_worst_calldatacopy(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
fork: Fork,
origin: CallDataOrigin,
size: int,
fixed_src_dst: bool,
non_zero_data: bool,
gas_benchmark_value: int,
) -> None:
"""Test running a block filled with CALLDATACOPY executions."""
if size == 0 and non_zero_data:
pytest.skip("Non-zero data with size 0 is not applicable.")
# If `non_zero_data` is True, we fill the calldata with deterministic
# random data. Note that if `size == 0` and `non_zero_data` is a skipped
# case.
data = Bytes([i % 256 for i in range(size)]) if non_zero_data else Bytes()
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
min_gas = intrinsic_gas_calculator(calldata=data)
if min_gas > gas_benchmark_value:
pytest.skip("Minimum gas required for calldata ({min_gas}) is greater than the gas limit")
# We create the contract that will be doing the CALLDATACOPY multiple
# times.
#
# If `non_zero_data` is True, we leverage CALLDATASIZE for the copy
# length. Otherwise, since we
# don't send zero data explicitly via calldata, PUSH the target size and
# use DUP1 to copy it.
setup = Bytecode() if non_zero_data or size == 0 else Op.PUSH3(size)
src_dst = 0 if fixed_src_dst else Op.MOD(Op.GAS, 7)
attack_block = Op.CALLDATACOPY(
src_dst, src_dst, Op.CALLDATASIZE if non_zero_data or size == 0 else Op.DUP1
)
code_address = JumpLoopGenerator(setup=setup, attack_block=attack_block).deploy_contracts(
pre=pre, fork=fork
)
tx_target = code_address
# If the origin is CALL, we need to create a contract that will call the
# target contract with the calldata.
if origin == CallDataOrigin.CALL:
# If `non_zero_data` is False we leverage just using zeroed memory.
# Otherwise, we copy the calldata received from the transaction.
setup = (
Op.CALLDATACOPY(Op.PUSH0, Op.PUSH0, Op.CALLDATASIZE) if non_zero_data else Bytecode()
) + Op.JUMPDEST
arg_size = Op.CALLDATASIZE if non_zero_data else size
attack_block = Op.STATICCALL(
address=code_address, args_offset=Op.PUSH0, args_size=arg_size
)
tx_target = JumpLoopGenerator(setup=setup, attack_block=attack_block).deploy_contracts(
pre=pre, fork=fork
)
tx = Transaction(
to=tx_target,
gas_limit=gas_benchmark_value,
data=data,
sender=pre.fund_eoa(),
)
benchmark_test(tx=tx)
@pytest.mark.parametrize(
"max_code_size_ratio",
[
pytest.param(0, id="0 bytes"),
pytest.param(0.25, id="0.25x max code size"),
pytest.param(0.50, id="0.50x max code size"),
pytest.param(0.75, id="0.75x max code size"),
pytest.param(1.00, id="max code size"),
],
)
@pytest.mark.parametrize(
"fixed_src_dst",
[
True,
False,
],
)
def test_worst_codecopy(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
fork: Fork,
max_code_size_ratio: float,
fixed_src_dst: bool,
) -> None:
"""Test running a block filled with CODECOPY executions."""
max_code_size = fork.max_code_size()
size = int(max_code_size * max_code_size_ratio)
setup = Op.PUSH32(size)
src_dst = 0 if fixed_src_dst else Op.MOD(Op.GAS, 7)
attack_block = Op.CODECOPY(src_dst, src_dst, Op.DUP1) # DUP1 copies size.
code = JumpLoopGenerator(setup=setup, attack_block=attack_block).generate_repeated_code(
repeated_code=attack_block, setup=setup, fork=fork
)
# The code generated above is not guaranteed to be of max_code_size, so
# we pad it since
# a test parameter targets CODECOPYing a contract with max code size.
# Padded bytecode values
# are not relevant.
code += Op.INVALID * (max_code_size - len(code))
assert len(code) == max_code_size, (
f"Code size {len(code)} is not equal to max code size {max_code_size}."
)
tx = Transaction(
to=pre.deploy_contract(code=code),
sender=pre.fund_eoa(),
)
benchmark_test(tx=tx)
@pytest.mark.parametrize(
"size",
[
pytest.param(0, id="0 bytes"),
pytest.param(100, id="100 bytes"),
pytest.param(10 * 1024, id="10KiB"),
pytest.param(1024 * 1024, id="1MiB"),
],
)
@pytest.mark.parametrize(
"fixed_dst",
[
True,
False,
],
)
def test_worst_returndatacopy(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
size: int,
fixed_dst: bool,
) -> None:
"""Test running a block filled with RETURNDATACOPY executions."""
# Create the contract that will RETURN the data that will be used for
# RETURNDATACOPY.
# Random-ish data is injected at different points in memory to avoid
# making the content
# predictable. If `size` is 0, this helper contract won't be used.
code = (
Op.MSTORE8(0, Op.GAS)
+ Op.MSTORE8(size // 2, Op.GAS)
+ Op.MSTORE8(size - 1, Op.GAS)
+ Op.RETURN(0, size)
)
helper_contract = pre.deploy_contract(code=code)
returndata_gen = Op.STATICCALL(address=helper_contract) if size > 0 else Bytecode()
dst = 0 if fixed_dst else Op.MOD(Op.GAS, 7)
# We create the contract that will be doing the RETURNDATACOPY multiple
# times.
returndata_gen = Op.STATICCALL(address=helper_contract) if size > 0 else Bytecode()
attack_block = Op.RETURNDATACOPY(dst, Op.PUSH0, Op.RETURNDATASIZE)
# The attack loop is constructed as:
# ```
# JUMPDEST(#)
# RETURNDATACOPY(...)
# RETURNDATACOPY(...)
# ...
# STATICCALL(address=helper_contract)
# JUMP(#)
# ```
# The goal is that once per (big) loop iteration, the helper contract is
# called to
# generate fresh returndata to continue calling RETURNDATACOPY.
benchmark_test(
code_generator=JumpLoopGenerator(
setup=returndata_gen, attack_block=attack_block, cleanup=returndata_gen
),
)
@pytest.mark.parametrize(
"size",
[
pytest.param(0, id="0 bytes"),
pytest.param(100, id="100 bytes"),
pytest.param(10 * 1024, id="10KiB"),
pytest.param(1024 * 1024, id="1MiB"),
],
)
@pytest.mark.parametrize(
"fixed_src_dst",
[
True,
False,
],
)
def test_worst_mcopy(
benchmark_test: BenchmarkTestFiller,
size: int,
fixed_src_dst: bool,
) -> None:
"""Test running a block filled with MCOPY executions."""
src_dst = 0 if fixed_src_dst else Op.MOD(Op.GAS, 7)
attack_block = Op.MCOPY(src_dst, src_dst, size)
mem_touch = (
Op.MSTORE8(0, Op.GAS) + Op.MSTORE8(size // 2, Op.GAS) + Op.MSTORE8(size - 1, Op.GAS)
if size > 0
else Bytecode()
)
benchmark_test(
code_generator=JumpLoopGenerator(
setup=mem_touch, attack_block=attack_block, cleanup=mem_touch
),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/test_worst_bytecode.py | tests/benchmark/test_worst_bytecode.py | """
Tests that benchmark EVMs in worst-case opcode scenarios.
"""
import math
import pytest
from ethereum_test_benchmark.benchmark_code_generator import JumpLoopGenerator
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
BenchmarkTestFiller,
Block,
BlockchainTestFiller,
Bytecode,
Environment,
Hash,
Transaction,
While,
compute_create2_address,
)
from ethereum_test_types.helpers import compute_create_address
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "TODO"
REFERENCE_SPEC_VERSION = "TODO"
XOR_TABLE_SIZE = 256
XOR_TABLE = [Hash(i).sha256() for i in range(XOR_TABLE_SIZE)]
@pytest.mark.parametrize(
"opcode",
[
Op.EXTCODESIZE,
Op.EXTCODEHASH,
Op.CALL,
Op.CALLCODE,
Op.DELEGATECALL,
Op.STATICCALL,
Op.EXTCODECOPY,
],
)
def test_worst_bytecode_single_opcode(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
opcode: Op,
env: Environment,
gas_benchmark_value: int,
) -> None:
"""
Test a block execution where a single opcode execution maxes out the gas
limit, and the opcodes access a huge amount of contract code.
We first use a single block to deploy a factory contract that will be used
to deploy a large number of contracts.
This is done to avoid having a big pre-allocation size for the test.
The test is performed in the last block of the test, and the entire block
gas limit is consumed by repeated opcode executions.
"""
# The attack gas limit is the gas limit which the target tx will use The
# test will scale the block gas limit to setup the contracts accordingly to
# be able to pay for the contract deposit. This has to take into account
# the 200 gas per byte, but also the quadratic memory expansion costs which
# have to be paid each time the memory is being setup
attack_gas_limit = gas_benchmark_value
max_contract_size = fork.max_code_size()
gas_costs = fork.gas_costs()
# Calculate the absolute minimum gas costs to deploy the contract This does
# not take into account setting up the actual memory (using KECCAK256 and
# XOR) so the actual costs of deploying the contract is higher
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
memory_gas_minimum = memory_expansion_gas_calculator(new_bytes=len(bytes(max_contract_size)))
code_deposit_gas_minimum = (
fork.gas_costs().G_CODE_DEPOSIT_BYTE * max_contract_size + memory_gas_minimum
)
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
# Calculate the loop cost of the attacker to query one address
loop_cost = (
gas_costs.G_KECCAK_256 # KECCAK static cost
+ math.ceil(85 / 32) * gas_costs.G_KECCAK_256_WORD # KECCAK dynamic
# cost for CREATE2
+ gas_costs.G_VERY_LOW * 3 # ~MSTOREs+ADDs
+ gas_costs.G_COLD_ACCOUNT_ACCESS # Opcode cost
+ 30 # ~Gluing opcodes
)
# Calculate the number of contracts to be targeted
num_contracts = (
# Base available gas = GAS_LIMIT - intrinsic - (out of loop MSTOREs)
attack_gas_limit - intrinsic_gas_cost_calc() - gas_costs.G_VERY_LOW * 4
) // loop_cost
# Set the block gas limit to a relative high value to ensure the code
# deposit tx fits in the block (there is enough gas available in the block
# to execute this)
minimum_gas_limit = code_deposit_gas_minimum * 2 * num_contracts
if env.gas_limit < minimum_gas_limit:
raise Exception(
f"`BENCHMARKING_MAX_GAS` ({env.gas_limit}) is no longer enough to support this test, "
f"which requires {minimum_gas_limit} gas for its setup. Update the value or consider "
"optimizing gas usage during the setup phase of this test."
)
# The initcode will take its address as a starting point to the input to
# the keccak hash function. It will reuse the output of the hash function
# in a loop to create a large amount of seemingly random code, until it
# reaches the maximum contract size.
initcode = (
Op.MSTORE(0, Op.ADDRESS)
+ While(
body=(
Op.SHA3(Op.SUB(Op.MSIZE, 32), 32)
# Use a xor table to avoid having to call the "expensive" sha3
# opcode as much
+ sum(
(Op.PUSH32[xor_value] + Op.XOR + Op.DUP1 + Op.MSIZE + Op.MSTORE)
for xor_value in XOR_TABLE
)
+ Op.POP
),
condition=Op.LT(Op.MSIZE, max_contract_size),
)
# Despite the whole contract has random bytecode, we make the first
# opcode be a STOP so CALL-like attacks return as soon as possible,
# while EXTCODE(HASH|SIZE) work as intended.
+ Op.MSTORE8(0, 0x00)
+ Op.RETURN(0, max_contract_size)
)
initcode_address = pre.deploy_contract(code=initcode)
# The factory contract will simply use the initcode that is already
# deployed, and create a new contract and return its address if successful.
factory_code = (
Op.EXTCODECOPY(
address=initcode_address,
dest_offset=0,
offset=0,
size=Op.EXTCODESIZE(initcode_address),
)
+ Op.MSTORE(
0,
Op.CREATE2(
value=0,
offset=0,
size=Op.EXTCODESIZE(initcode_address),
salt=Op.SLOAD(0),
),
)
+ Op.SSTORE(0, Op.ADD(Op.SLOAD(0), 1))
+ Op.RETURN(0, 32)
)
factory_address = pre.deploy_contract(code=factory_code)
# The factory caller will call the factory contract N times, creating N new
# contracts. Calldata should contain the N value.
factory_caller_code = Op.CALLDATALOAD(0) + While(
body=Op.POP(Op.CALL(address=factory_address)),
condition=Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
factory_caller_address = pre.deploy_contract(code=factory_caller_code)
contracts_deployment_tx = Transaction(
to=factory_caller_address,
gas_limit=env.gas_limit,
gas_price=10**6,
data=Hash(num_contracts),
sender=pre.fund_eoa(),
)
post = {}
deployed_contract_addresses = []
for i in range(num_contracts):
deployed_contract_address = compute_create2_address(
address=factory_address,
salt=i,
initcode=initcode,
)
post[deployed_contract_address] = Account(nonce=1)
deployed_contract_addresses.append(deployed_contract_address)
attack_call = Bytecode()
if opcode == Op.EXTCODECOPY:
attack_call = Op.EXTCODECOPY(address=Op.SHA3(32 - 20 - 1, 85), dest_offset=96, size=1000)
else:
# For the rest of the opcodes, we can use the same generic attack call
# since all only minimally need the `address` of the target.
attack_call = Op.POP(opcode(address=Op.SHA3(32 - 20 - 1, 85)))
attack_code = (
# Setup memory for later CREATE2 address generation loop.
# 0xFF+[Address(20bytes)]+[seed(32bytes)]+[initcode keccak(32bytes)]
Op.MSTORE(0, factory_address)
+ Op.MSTORE8(32 - 20 - 1, 0xFF)
+ Op.MSTORE(32, 0)
+ Op.MSTORE(64, initcode.keccak256())
# Main loop
+ While(
body=attack_call + Op.MSTORE(32, Op.ADD(Op.MLOAD(32), 1)),
)
)
if len(attack_code) > max_contract_size:
# TODO: A workaround could be to split the opcode code into multiple
# contracts and call them in sequence.
raise ValueError(
f"Code size {len(attack_code)} exceeds maximum code size {max_contract_size}"
)
opcode_address = pre.deploy_contract(code=attack_code)
opcode_tx = Transaction(
to=opcode_address,
gas_limit=attack_gas_limit,
gas_price=10**9,
sender=pre.fund_eoa(),
)
blockchain_test(
pre=pre,
post=post,
blocks=[
Block(txs=[contracts_deployment_tx]),
Block(txs=[opcode_tx]),
],
exclude_full_post_state_in_output=True,
)
@pytest.mark.parametrize(
"pattern",
[
Op.STOP,
Op.JUMPDEST,
Op.PUSH1[bytes(Op.JUMPDEST)],
Op.PUSH2[bytes(Op.JUMPDEST + Op.JUMPDEST)],
Op.PUSH1[bytes(Op.JUMPDEST)] + Op.JUMPDEST,
Op.PUSH2[bytes(Op.JUMPDEST + Op.JUMPDEST)] + Op.JUMPDEST,
],
ids=lambda x: x.hex(),
)
def test_worst_initcode_jumpdest_analysis(
benchmark_test: BenchmarkTestFiller,
fork: Fork,
pattern: Bytecode,
) -> None:
"""
Test the jumpdest analysis performance of the initcode.
This benchmark places a very long initcode in the memory and then invoke
CREATE instructions with this initcode up to the block gas limit. The
initcode itself has minimal execution time but forces the EVM to perform
the full jumpdest analysis on the parametrized byte pattern. The initicode
is modified by mixing-in the returned create address between CREATE
invocations to prevent caching.
"""
initcode_size = fork.max_initcode_size()
# Expand the initcode pattern to the transaction data so it can be used in
# CALLDATACOPY in the main contract. TODO: tune the tx_data_len param.
tx_data_len = 1024
tx_data = pattern * (tx_data_len // len(pattern))
tx_data += (tx_data_len - len(tx_data)) * bytes(Op.JUMPDEST)
assert len(tx_data) == tx_data_len
assert initcode_size % len(tx_data) == 0
# Prepare the initcode in memory.
code_prepare_initcode = sum(
(
Op.CALLDATACOPY(dest_offset=i * len(tx_data), offset=0, size=Op.CALLDATASIZE)
for i in range(initcode_size // len(tx_data))
),
Bytecode(),
)
# At the start of the initcode execution, jump to the last opcode.
# This forces EVM to do the full jumpdest analysis.
initcode_prefix = Op.JUMP(initcode_size - 1)
code_prepare_initcode += Op.MSTORE(
0, Op.PUSH32[bytes(initcode_prefix).ljust(32, bytes(Op.JUMPDEST))]
)
# Make sure the last opcode in the initcode is JUMPDEST.
code_prepare_initcode += Op.MSTORE(initcode_size - 32, Op.PUSH32[bytes(Op.JUMPDEST) * 32])
attack_block = (
Op.PUSH1[len(initcode_prefix)]
+ Op.MSTORE
+ Op.CREATE(value=Op.PUSH0, offset=Op.PUSH0, size=Op.MSIZE)
)
setup = code_prepare_initcode + Op.PUSH0
benchmark_test(
code_generator=JumpLoopGenerator(
setup=setup,
attack_block=attack_block,
tx_kwargs={"data": tx_data},
),
)
@pytest.mark.parametrize(
"opcode",
[
Op.CREATE,
Op.CREATE2,
],
)
@pytest.mark.parametrize(
"max_code_size_ratio, non_zero_data, value",
[
# To avoid a blowup of combinations, the value dimension is only
# explored for the non-zero data case, so isn't affected by code size
# influence.
pytest.param(0, False, 0, id="0 bytes without value"),
pytest.param(0, False, 1, id="0 bytes with value"),
pytest.param(0.25, True, 0, id="0.25x max code size with non-zero data"),
pytest.param(0.25, False, 0, id="0.25x max code size with zero data"),
pytest.param(0.50, True, 0, id="0.50x max code size with non-zero data"),
pytest.param(0.50, False, 0, id="0.50x max code size with zero data"),
pytest.param(0.75, True, 0, id="0.75x max code size with non-zero data"),
pytest.param(0.75, False, 0, id="0.75x max code size with zero data"),
pytest.param(1.00, True, 0, id="max code size with non-zero data"),
pytest.param(1.00, False, 0, id="max code size with zero data"),
],
)
def test_worst_create(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
fork: Fork,
opcode: Op,
max_code_size_ratio: float,
non_zero_data: bool,
value: int,
) -> None:
"""
Test the CREATE and CREATE2 performance with different configurations.
"""
max_code_size = fork.max_code_size()
code_size = int(max_code_size * max_code_size_ratio)
# Deploy the initcode template which has following design:
# ```
# PUSH3(code_size)
# [CODECOPY(DUP1) -- Conditional that non_zero_data is True]
# RETURN(0, DUP1)
# [<pad to code_size>] -- Conditional that non_zero_data is True]
# ```
code = (
Op.PUSH3(code_size)
+ (Op.CODECOPY(size=Op.DUP1) if non_zero_data else Bytecode())
+ Op.RETURN(0, Op.DUP1)
)
if non_zero_data: # Pad to code_size.
code += bytes([i % 256 for i in range(code_size - len(code))])
initcode_template_contract = pre.deploy_contract(code=code)
# Create the benchmark contract which has the following design:
# ```
# PUSH(value)
# [EXTCODECOPY(full initcode_template_contract)
# -> Conditional that non_zero_data is True]
#
# JUMPDEST (#)
# (CREATE|CREATE2)
# (CREATE|CREATE2)
# ...
# JUMP(#)
# ```
setup = (
Op.PUSH3(code_size)
+ Op.PUSH1(value)
+ Op.EXTCODECOPY(
address=initcode_template_contract,
size=Op.DUP2, # DUP2 refers to the EXTCODESIZE value above.
)
)
if opcode == Op.CREATE2:
# For CREATE2, we provide an initial salt.
setup += Op.PUSH1(42)
attack_block = (
# For CREATE:
# - DUP2 refers to the EXTOCODESIZE value pushed in code_prefix.
# - DUP3 refers to PUSH1(value) above.
Op.POP(Op.CREATE(value=Op.DUP3, offset=0, size=Op.DUP2))
if opcode == Op.CREATE
# For CREATE2: we manually push the arguments because we leverage the
# return value of previous CREATE2 calls as salt for the next CREATE2
# call.
# - DUP4 is targeting the PUSH1(value) from the code_prefix.
# - DUP3 is targeting the EXTCODESIZE value pushed in code_prefix.
else Op.DUP3 + Op.PUSH0 + Op.DUP4 + Op.CREATE2
)
code = JumpLoopGenerator(setup=setup, attack_block=attack_block).generate_repeated_code(
repeated_code=attack_block, setup=setup, fork=fork
)
tx = Transaction(
# Set enough balance in the pre-alloc for `value > 0` configurations.
to=pre.deploy_contract(code=code, balance=1_000_000_000 if value > 0 else 0),
sender=pre.fund_eoa(),
)
benchmark_test(tx=tx)
@pytest.mark.parametrize(
"opcode",
[
Op.CREATE,
Op.CREATE2,
],
)
def test_worst_creates_collisions(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
fork: Fork,
opcode: Op,
gas_benchmark_value: int,
) -> None:
"""Test the CREATE and CREATE2 collisions performance."""
# We deploy a "proxy contract" which is the contract that will be called in
# a loop using all the gas in the block. This "proxy contract" is the one
# executing CREATE2 failing with a collision. The reason why we need a
# "proxy contract" is that CREATE(2) failing with a collision will consume
# all the available gas. If we try to execute the CREATE(2) directly
# without being wrapped **and capped in gas** in a previous CALL, we would
# run out of gas very fast!
# The proxy contract calls CREATE(2) with empty initcode. The current call
# frame gas will be exhausted because of the collision. For this reason the
# caller will carefully give us the minimal gas necessary to execute the
# CREATE(2) and not waste any extra gas in the CREATE(2)-failure.
# Note that these CREATE(2) calls will fail because in (**) below we pre-
# alloc contracts with the same address as the ones that CREATE(2) will try
# to create.
proxy_contract = pre.deploy_contract(
code=Op.CREATE2(value=Op.PUSH0, salt=Op.PUSH0, offset=Op.PUSH0, size=Op.PUSH0)
if opcode == Op.CREATE2
else Op.CREATE(value=Op.PUSH0, offset=Op.PUSH0, size=Op.PUSH0)
)
gas_costs = fork.gas_costs()
# The CALL to the proxy contract needs at a minimum gas corresponding to
# the CREATE(2) plus extra required PUSH0s for arguments.
min_gas_required = gas_costs.G_CREATE + gas_costs.G_BASE * (3 if opcode == Op.CREATE else 4)
setup = Op.PUSH20(proxy_contract) + Op.PUSH3(min_gas_required)
attack_block = Op.POP(
# DUP7 refers to the PUSH3 above.
# DUP7 refers to the proxy contract address.
Op.CALL(gas=Op.DUP7, address=Op.DUP7)
)
# (**) We deploy the contract that CREATE(2) will attempt to create so any
# attempt will fail.
if opcode == Op.CREATE2:
addr = compute_create2_address(address=proxy_contract, salt=0, initcode=[])
pre.deploy_contract(address=addr, code=Op.INVALID)
else:
# Heuristic to have an upper bound.
max_contract_count = 2 * gas_benchmark_value // gas_costs.G_CREATE
for nonce in range(max_contract_count):
addr = compute_create_address(address=proxy_contract, nonce=nonce)
pre.deploy_contract(address=addr, code=Op.INVALID)
benchmark_test(
code_generator=JumpLoopGenerator(setup=setup, attack_block=attack_block),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/conftest.py | tests/benchmark/conftest.py | """Pytest configuration for benchmark tests."""
from pathlib import Path
from typing import Any
import pytest
from ethereum_test_forks import Fork
DEFAULT_BENCHMARK_FORK = "Prague"
def pytest_generate_tests(metafunc: Any) -> None:
"""
Modify test generation to enforce default benchmark fork for benchmark
tests.
"""
benchmark_dir = Path(__file__).parent
test_file_path = Path(metafunc.definition.fspath)
# Check if this test is in the benchmark directory
is_in_benchmark_dir = benchmark_dir in test_file_path.parents
if is_in_benchmark_dir:
# Add benchmark marker if no valid_from marker exists
existing_markers = list(metafunc.definition.iter_markers())
has_valid_from = any(marker.name == "valid_from" for marker in existing_markers)
if not has_valid_from:
benchmark_marker = pytest.mark.valid_from(DEFAULT_BENCHMARK_FORK)
metafunc.definition.add_marker(benchmark_marker)
def pytest_collection_modifyitems(config: Any, items: Any) -> None:
"""Add the `benchmark` marker to all tests under `./tests/benchmark`."""
benchmark_dir = Path(__file__).parent
benchmark_marker = pytest.mark.benchmark
gen_docs = config.getoption("--gen-docs", default=False)
if gen_docs:
for item in items:
if (
benchmark_dir in Path(item.fspath).parents
and not item.get_closest_marker("benchmark")
and not item.get_closest_marker("stateful")
):
item.add_marker(benchmark_marker)
return
marker_expr = config.getoption("-m", default="")
run_benchmarks = (
marker_expr and "benchmark" in marker_expr and "not benchmark" not in marker_expr
)
run_stateful_tests = (
marker_expr and "stateful" in marker_expr and "not stateful" not in marker_expr
)
items_for_removal = []
for i, item in enumerate(items):
is_in_benchmark_dir = benchmark_dir in Path(item.fspath).parents
has_stateful_marker = item.get_closest_marker("stateful")
is_benchmark_test = (
is_in_benchmark_dir and not has_stateful_marker
) or item.get_closest_marker("benchmark")
if is_benchmark_test:
if is_in_benchmark_dir and not item.get_closest_marker("benchmark"):
item.add_marker(benchmark_marker)
if not run_benchmarks:
items_for_removal.append(i)
elif run_benchmarks:
items_for_removal.append(i)
elif is_in_benchmark_dir and has_stateful_marker and not run_stateful_tests:
items_for_removal.append(i)
for i in reversed(items_for_removal):
items.pop(i)
@pytest.fixture
def tx_gas_limit_cap(fork: Fork, gas_benchmark_value: int) -> int:
"""Return the transaction gas limit cap."""
return fork.transaction_gas_limit_cap() or gas_benchmark_value
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/helpers.py | tests/benchmark/helpers.py | """Helper functions for the EVM benchmark worst-case tests."""
from ethereum_test_forks import Fork
from ethereum_test_tools import Bytecode
from ethereum_test_vm import Opcodes as Op
def code_loop_precompile_call(calldata: Bytecode, attack_block: Bytecode, fork: Fork) -> Bytecode:
"""Create a code loop that calls a precompile with the given calldata."""
max_code_size = fork.max_code_size()
# The attack contract is: CALLDATA_PREP + #JUMPDEST + [attack_block]* +
# JUMP(#)
jumpdest = Op.JUMPDEST
jump_back = Op.JUMP(len(calldata))
max_iters_loop = (max_code_size - len(calldata) - len(jumpdest) - len(jump_back)) // len(
attack_block
)
code = calldata + jumpdest + sum([attack_block] * max_iters_loop) + jump_back
if len(code) > max_code_size:
# Must never happen, but keep it as a sanity check.
raise ValueError(f"Code size {len(code)} exceeds maximum code size {max_code_size}")
return code
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/test_worst_blocks.py | tests/benchmark/test_worst_blocks.py | """
Tests that benchmark EVMs in worst-case block scenarios.
"""
import math
import random
from typing import Generator, Tuple
import pytest
from ethereum_test_base_types import Account
from ethereum_test_forks import Fork
from ethereum_test_tools import (
AccessList,
Address,
Alloc,
AuthorizationTuple,
BenchmarkTestFiller,
Block,
BlockchainTestFiller,
Hash,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
@pytest.fixture
def iteration_count(intrinsic_cost: int, gas_benchmark_value: int) -> int:
"""
Calculate the number of iterations based on the gas limit and intrinsic
cost.
"""
return gas_benchmark_value // intrinsic_cost
@pytest.fixture
def transfer_amount() -> int:
"""Ether to transfer in each transaction."""
return 1
@pytest.fixture
def intrinsic_cost(fork: Fork) -> int:
"""Transaction intrinsic cost."""
intrinsic_cost = fork.transaction_intrinsic_cost_calculator()
return intrinsic_cost()
def get_distinct_sender_list(pre: Alloc) -> Generator[Address, None, None]:
"""Get a list of distinct sender accounts."""
while True:
yield pre.fund_eoa()
def get_distinct_receiver_list(pre: Alloc) -> Generator[Address, None, None]:
"""Get a list of distinct receiver accounts."""
while True:
yield pre.fund_eoa(0)
def get_single_sender_list(pre: Alloc) -> Generator[Address, None, None]:
"""Get a list of single sender accounts."""
sender = pre.fund_eoa()
while True:
yield sender
def get_single_receiver_list(pre: Alloc) -> Generator[Address, None, None]:
"""Get a list of single receiver accounts."""
receiver = pre.fund_eoa(0)
while True:
yield receiver
@pytest.fixture
def ether_transfer_case(
case_id: str,
pre: Alloc,
) -> Tuple[Generator[Address, None, None], Generator[Address, None, None]]:
"""Generate the test parameters based on the case ID."""
if case_id == "a_to_a":
"""Sending to self."""
senders = get_single_sender_list(pre)
receivers = senders
elif case_id == "a_to_b":
"""One sender → one receiver."""
senders = get_single_sender_list(pre)
receivers = get_single_receiver_list(pre)
elif case_id == "diff_acc_to_b":
"""Multiple senders → one receiver."""
senders = get_distinct_sender_list(pre)
receivers = get_single_receiver_list(pre)
elif case_id == "a_to_diff_acc":
"""One sender → multiple receivers."""
senders = get_single_sender_list(pre)
receivers = get_distinct_receiver_list(pre)
elif case_id == "diff_acc_to_diff_acc":
"""Multiple senders → multiple receivers."""
senders = get_distinct_sender_list(pre)
receivers = get_distinct_receiver_list(pre)
else:
raise ValueError(f"Unknown case: {case_id}")
return senders, receivers
@pytest.mark.parametrize(
"case_id",
["a_to_a", "a_to_b", "diff_acc_to_b", "a_to_diff_acc", "diff_acc_to_diff_acc"],
)
def test_block_full_of_ether_transfers(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
case_id: str,
ether_transfer_case: Tuple[Generator[Address, None, None], Generator[Address, None, None]],
iteration_count: int,
transfer_amount: int,
intrinsic_cost: int,
) -> None:
"""
Single test for ether transfer scenarios.
Scenarios:
- a_to_a: one sender → one sender
- a_to_b: one sender → one receiver
- diff_acc_to_b: multiple senders → one receiver
- a_to_diff_acc: one sender → multiple receivers
- diff_acc_to_diff_acc: multiple senders → multiple receivers
"""
senders, receivers = ether_transfer_case
# Create a single block with all transactions
txs = []
balances: dict[Address, int] = {}
for _ in range(iteration_count):
receiver = next(receivers)
balances[receiver] = balances.get(receiver, 0) + transfer_amount
txs.append(
Transaction(
to=receiver,
value=transfer_amount,
gas_limit=intrinsic_cost,
sender=next(senders),
)
)
# Only include post state for non a_to_a cases
post_state = (
{}
if case_id == "a_to_a"
else {receiver: Account(balance=balance) for receiver, balance in balances.items()}
)
benchmark_test(
pre=pre,
post=post_state,
blocks=[Block(txs=txs)],
expected_benchmark_gas_used=iteration_count * intrinsic_cost,
)
@pytest.fixture
def total_cost_floor_per_token() -> int:
"""Total cost floor per token."""
return 10
@pytest.fixture
def total_cost_standard_per_token() -> int:
"""Total cost floor per token."""
return 4
def calldata_generator(
gas_amount: int,
zero_byte: int,
total_cost_floor_per_token: int,
) -> bytes:
"""Calculate the calldata based on the gas amount and zero byte."""
# Gas cost calculation based on EIP-7683: (https://eips.ethereum.org/EIPS/eip-7683)
#
# tx.gasUsed = 21000 + max(
# STANDARD_TOKEN_COST * tokens_in_calldata
# + execution_gas_used
# + isContractCreation * (32000 +
# INITCODE_WORD_COST * words(calldata)),
# TOTAL_COST_FLOOR_PER_TOKEN * tokens_in_calldata)
#
# Simplified in this test case:
# - No execution gas used (no opcodes are executed)
# - Not a contract creation (no initcode)
#
# Therefore:
# max_token_cost = max(STANDARD_TOKEN_COST, TOTAL_COST_FLOOR_PER_TOKEN)
# tx.gasUsed = 21000 + tokens_in_calldata * max_token_cost
#
# Since max(STANDARD_TOKEN_COST, TOTAL_COST_FLOOR_PER_TOKEN) = 10:
# tx.gasUsed = 21000 + tokens_in_calldata * 10
#
# Token accounting:
# tokens_in_calldata = zero_bytes + 4 * non_zero_bytes
#
# So we calculate how many bytes we can fit into calldata based on
# available gas.
max_tokens_in_calldata = gas_amount // total_cost_floor_per_token
num_of_bytes = max_tokens_in_calldata if zero_byte else max_tokens_in_calldata // 4
byte_data = b"\x00" if zero_byte else b"\xff"
return byte_data * num_of_bytes
@pytest.mark.parametrize("zero_byte", [True, False])
def test_block_full_data(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
zero_byte: bool,
intrinsic_cost: int,
total_cost_floor_per_token: int,
gas_benchmark_value: int,
tx_gas_limit_cap: int,
total_cost_standard_per_token: int,
fork: Fork,
) -> None:
"""Test a block with empty payload."""
iteration_count = math.ceil(gas_benchmark_value / tx_gas_limit_cap)
gas_remaining = gas_benchmark_value
total_gas_used = 0
txs = []
for _ in range(iteration_count):
gas_available = min(tx_gas_limit_cap, gas_remaining) - intrinsic_cost
data = calldata_generator(
gas_available,
zero_byte,
total_cost_floor_per_token,
)
total_gas_used += fork.transaction_intrinsic_cost_calculator()(calldata=data)
gas_remaining -= gas_available + intrinsic_cost
txs.append(
Transaction(
to=pre.fund_eoa(),
data=data,
gas_limit=gas_available + intrinsic_cost,
sender=pre.fund_eoa(),
)
)
benchmark_test(
blocks=[Block(txs=txs)],
expected_benchmark_gas_used=total_gas_used,
)
def test_block_full_access_list_and_data(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
intrinsic_cost: int,
total_cost_standard_per_token: int,
fork: Fork,
gas_benchmark_value: int,
tx_gas_limit_cap: int,
) -> None:
"""
Test a block with access lists (60% gas) and calldata (40% gas) using
random mixed bytes.
"""
iteration_count = math.ceil(gas_benchmark_value / tx_gas_limit_cap)
gas_remaining = gas_benchmark_value
total_gas_used = 0
txs = []
for _ in range(iteration_count):
gas_available = min(tx_gas_limit_cap, gas_remaining) - intrinsic_cost
# Split available gas: 60% for access lists, 40% for calldata
gas_for_access_list = int(gas_available * 0.6)
gas_for_calldata = int(gas_available * 0.4)
# Access list gas costs from fork's gas_costs
gas_costs = fork.gas_costs()
gas_per_address = gas_costs.G_ACCESS_LIST_ADDRESS
gas_per_storage_key = gas_costs.G_ACCESS_LIST_STORAGE
# Calculate number of storage keys we can fit
gas_after_address = gas_for_access_list - gas_per_address
num_storage_keys = gas_after_address // gas_per_storage_key
# Create access list with 1 address and many storage keys
access_address = Address("0x1234567890123456789012345678901234567890")
storage_keys = []
for i in range(num_storage_keys):
# Generate random-looking storage keys
storage_keys.append(Hash(i))
access_list = [
AccessList(
address=access_address,
storage_keys=storage_keys,
)
]
# Calculate calldata with 29% of gas for zero bytes and 71% for
# non-zero bytes
# Token accounting: tokens_in_calldata = zero_bytes + 4 *
# non_zero_bytes
# We want to split the gas budget:
# - 29% of gas_for_calldata for zero bytes
# - 71% of gas_for_calldata for non-zero bytes
max_tokens_in_calldata = gas_for_calldata // total_cost_standard_per_token
# Calculate how many tokens to allocate to each type
tokens_for_zero_bytes = int(max_tokens_in_calldata * 0.29)
tokens_for_non_zero_bytes = max_tokens_in_calldata - tokens_for_zero_bytes
# Convert tokens to actual byte counts
# Zero bytes: 1 token per byte
# Non-zero bytes: 4 tokens per byte
num_zero_bytes = tokens_for_zero_bytes # 1 token = 1 zero byte
num_non_zero_bytes = tokens_for_non_zero_bytes // 4 # 4 tokens = 1 non-zero byte
# Create calldata with mixed bytes
calldata = bytearray()
# Add zero bytes
calldata.extend(b"\x00" * num_zero_bytes)
# Add non-zero bytes (random values from 0x01 to 0xff)
rng = random.Random(42) # For reproducibility
for _ in range(num_non_zero_bytes):
calldata.append(rng.randint(1, 255))
# Shuffle the bytes to mix zero and non-zero bytes
calldata_list = list(calldata)
rng.shuffle(calldata_list)
shuffled_calldata = bytes(calldata_list)
txs.append(
Transaction(
to=pre.fund_eoa(amount=0),
data=shuffled_calldata,
gas_limit=gas_available + intrinsic_cost,
sender=pre.fund_eoa(),
access_list=access_list,
)
)
gas_remaining -= gas_for_access_list + intrinsic_cost
total_gas_used += fork.transaction_intrinsic_cost_calculator()(
calldata=shuffled_calldata,
access_list=access_list,
)
benchmark_test(
blocks=[Block(txs=txs)],
expected_benchmark_gas_used=total_gas_used,
)
@pytest.mark.parametrize("empty_authority", [True, False])
@pytest.mark.parametrize("zero_delegation", [True, False])
def test_worst_case_auth_block(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
intrinsic_cost: int,
gas_benchmark_value: int,
fork: Fork,
empty_authority: bool,
zero_delegation: bool,
) -> None:
"""Test an auth block."""
gas_costs = fork.gas_costs()
iteration_count = (gas_benchmark_value - intrinsic_cost) // gas_costs.G_AUTHORIZATION
code = Op.STOP * fork.max_code_size()
auth_target = Address(0) if zero_delegation else pre.deploy_contract(code=code)
auth_tuples = []
for _ in range(iteration_count):
signer = (
pre.fund_eoa(amount=0, delegation=None)
if empty_authority
else pre.fund_eoa(amount=0, delegation=auth_target)
)
auth_tuple = AuthorizationTuple(address=auth_target, nonce=signer.nonce, signer=signer)
auth_tuples.append(auth_tuple)
tx = Transaction(
to=pre.empty_account(),
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
authorization_list=auth_tuples,
)
gas_used = fork.transaction_intrinsic_cost_calculator()(
authorization_list_or_count=auth_tuples
)
refund = 0
if not empty_authority:
refund = min(
gas_used // 5,
(gas_costs.G_AUTHORIZATION - gas_costs.R_AUTHORIZATION_EXISTING_AUTHORITY)
* iteration_count,
)
blockchain_test(
pre=pre,
post={},
blocks=[Block(txs=[tx])],
expected_benchmark_gas_used=gas_used - refund,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/test_worst_opcode.py | tests/benchmark/test_worst_opcode.py | """
Tests benchmark worst-case opcode scenarios.
"""
import pytest
from ethereum_test_benchmark.benchmark_code_generator import JumpLoopGenerator
from ethereum_test_tools import (
BenchmarkTestFiller,
Bytecode,
)
from ethereum_test_vm import Opcode
from ethereum_test_vm import Opcodes as Op
@pytest.mark.parametrize(
"opcode",
[
pytest.param(Op.LOG0, id="log0"),
pytest.param(Op.LOG1, id="log1"),
pytest.param(Op.LOG2, id="log2"),
pytest.param(Op.LOG3, id="log3"),
pytest.param(Op.LOG4, id="log4"),
],
)
@pytest.mark.parametrize(
"size,non_zero_data",
[
pytest.param(0, False, id="0_bytes_data"),
pytest.param(1024 * 1024, False, id="1_MiB_zeros_data"), # 1 MiB
pytest.param(1024 * 1024, True, id="1_MiB_non_zero_data"), # 1 MiB
],
)
@pytest.mark.parametrize(
"zeros_topic", [pytest.param(True, id="zeros_topic"), pytest.param(False, id="non_zero_topic")]
)
@pytest.mark.parametrize("fixed_offset", [True, False])
def test_worst_log_opcodes(
benchmark_test: BenchmarkTestFiller,
opcode: Opcode,
zeros_topic: bool,
size: int,
fixed_offset: bool,
non_zero_data: bool,
) -> None:
"""Test running a block with as many LOG opcodes as possible."""
setup = Bytecode()
# For non-zero data, load into memory.
if non_zero_data:
setup += Op.CODECOPY(dest_offset=0, offset=0, size=Op.CODESIZE)
# Push the size value onto the stack and access it using the DUP opcode.
setup += Op.PUSH3(size)
# For non-zeros topic, push a non-zero value for topic.
setup += Op.PUSH0 if zeros_topic else Op.PUSH32(2**256 - 1)
topic_count = len(opcode.kwargs or []) - 2
offset = Op.PUSH0 if fixed_offset else Op.MOD(Op.GAS, 7)
# Calculate the appropriate DUP opcode based on topic count
# 0 topics -> DUP1, 1 topic -> DUP2, N topics -> DUP(N+1)
size_op = getattr(Op, f"DUP{topic_count + 2}")
attack_block = Op.DUP1 * topic_count + size_op + offset + opcode
benchmark_test(
code_generator=JumpLoopGenerator(setup=setup, attack_block=attack_block),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/__init__.py | tests/benchmark/__init__.py | """
Benchmark tests for EVMs.
Benchmark tests aim to maximize the usage of a specific opcode, precompile,
or operation within a transaction or block. These can be executed against
EVM implementations to ensure they handle pathological cases efficiently
and correctly, allowing Ethereum to safely
[Scale the L1](https://protocol.ethereum.foundation/).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/test_worst_stateful_opcodes.py | tests/benchmark/test_worst_stateful_opcodes.py | """
Tests that benchmark EVMs for worst-case stateful opcodes.
"""
import math
from enum import auto
import pytest
from ethereum_test_benchmark.benchmark_code_generator import ExtCallGenerator, JumpLoopGenerator
from ethereum_test_forks import Fork
from ethereum_test_specs import StateTestFiller
from ethereum_test_specs.benchmark import BenchmarkTestFiller
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
Bytecode,
Environment,
Hash,
Transaction,
While,
compute_create2_address,
compute_create_address,
)
from ethereum_test_types import TestPhaseManager
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "TODO"
REFERENCE_SPEC_VERSION = "TODO"
@pytest.mark.parametrize(
"opcode",
[
Op.BALANCE,
],
)
@pytest.mark.parametrize(
"absent_accounts",
[
True,
False,
],
)
def test_worst_address_state_cold(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
fork: Fork,
opcode: Op,
absent_accounts: bool,
env: Environment,
gas_benchmark_value: int,
) -> None:
"""
Test running a block with as many stateful opcodes accessing cold accounts.
"""
attack_gas_limit = gas_benchmark_value
gas_costs = fork.gas_costs()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
# For calculation robustness, the calculation below ignores "glue" opcodes
# like PUSH and POP. It should be considered a worst-case number of
# accounts, and a few of them might not be targeted before the attacking
# transaction runs out of gas.
num_target_accounts = (
attack_gas_limit - intrinsic_gas_cost_calc()
) // gas_costs.G_COLD_ACCOUNT_ACCESS
blocks = []
post = {}
# Setup The target addresses are going to be constructed (in the case of
# absent=False) and called as addr_offset + i, where i is the index of the
# account. This is to avoid collisions with the addresses indirectly
# created by the testing framework.
addr_offset = int.from_bytes(pre.fund_eoa(amount=0))
if not absent_accounts:
factory_code = Op.PUSH4(num_target_accounts) + While(
body=Op.POP(Op.CALL(address=Op.ADD(addr_offset, Op.DUP6), value=10)),
condition=Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
factory_address = pre.deploy_contract(code=factory_code, balance=10**18)
setup_tx = Transaction(
to=factory_address,
gas_limit=env.gas_limit,
sender=pre.fund_eoa(),
)
blocks.append(Block(txs=[setup_tx]))
for i in range(num_target_accounts):
addr = Address(i + addr_offset + 1)
post[addr] = Account(balance=10)
# Execution
op_code = Op.PUSH4(num_target_accounts) + While(
body=Op.POP(opcode(Op.ADD(addr_offset, Op.DUP1))),
condition=Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
op_address = pre.deploy_contract(code=op_code)
op_tx = Transaction(
to=op_address,
gas_limit=attack_gas_limit,
sender=pre.fund_eoa(),
)
blocks.append(Block(txs=[op_tx]))
benchmark_test(
post=post,
blocks=blocks,
)
@pytest.mark.parametrize(
"opcode",
[
Op.BALANCE,
Op.EXTCODESIZE,
Op.EXTCODEHASH,
Op.CALL,
Op.CALLCODE,
Op.DELEGATECALL,
Op.STATICCALL,
],
)
@pytest.mark.parametrize(
"absent_target",
[
True,
False,
],
)
def test_worst_address_state_warm(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
opcode: Op,
absent_target: bool,
) -> None:
"""
Test running a block with as many stateful opcodes doing warm access
for an account.
"""
# Setup
target_addr = Address(100_000)
post = {}
if not absent_target:
code = Op.STOP + Op.JUMPDEST * 100
target_addr = pre.deploy_contract(balance=100, code=code)
post[target_addr] = Account(balance=100, code=code)
# Execution
setup = Op.MSTORE(0, target_addr)
attack_block = Op.POP(opcode(address=Op.MLOAD(0)))
benchmark_test(
post=post,
code_generator=JumpLoopGenerator(setup=setup, attack_block=attack_block),
)
class StorageAction:
"""Enum for storage actions."""
READ = auto()
WRITE_SAME_VALUE = auto()
WRITE_NEW_VALUE = auto()
class TransactionResult:
"""Enum for the possible transaction outcomes."""
SUCCESS = auto()
OUT_OF_GAS = auto()
REVERT = auto()
@pytest.mark.parametrize(
"storage_action,tx_result",
[
pytest.param(
StorageAction.READ,
TransactionResult.SUCCESS,
id="SSLOAD",
),
pytest.param(
StorageAction.WRITE_SAME_VALUE,
TransactionResult.SUCCESS,
id="SSTORE same value",
),
pytest.param(
StorageAction.WRITE_SAME_VALUE,
TransactionResult.REVERT,
id="SSTORE same value, revert",
),
pytest.param(
StorageAction.WRITE_SAME_VALUE,
TransactionResult.OUT_OF_GAS,
id="SSTORE same value, out of gas",
),
pytest.param(
StorageAction.WRITE_NEW_VALUE,
TransactionResult.SUCCESS,
id="SSTORE new value",
),
pytest.param(
StorageAction.WRITE_NEW_VALUE,
TransactionResult.REVERT,
id="SSTORE new value, revert",
),
pytest.param(
StorageAction.WRITE_NEW_VALUE,
TransactionResult.OUT_OF_GAS,
id="SSTORE new value, out of gas",
),
],
)
@pytest.mark.parametrize(
"absent_slots",
[
True,
False,
],
)
def test_worst_storage_access_cold(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
fork: Fork,
storage_action: StorageAction,
absent_slots: bool,
env: Environment,
gas_benchmark_value: int,
tx_result: TransactionResult,
) -> None:
"""
Test running a block with as many cold storage slot accesses as possible.
"""
gas_costs = fork.gas_costs()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
loop_cost = gas_costs.G_COLD_SLOAD # All accesses are always cold
if storage_action == StorageAction.WRITE_NEW_VALUE:
if not absent_slots:
loop_cost += gas_costs.G_STORAGE_RESET
else:
loop_cost += gas_costs.G_STORAGE_SET
elif storage_action == StorageAction.WRITE_SAME_VALUE:
if absent_slots:
loop_cost += gas_costs.G_STORAGE_SET
else:
loop_cost += gas_costs.G_WARM_SLOAD
elif storage_action == StorageAction.READ:
loop_cost += 0 # Only G_COLD_SLOAD is charged
# Contract code
execution_code_body = Bytecode()
if storage_action == StorageAction.WRITE_SAME_VALUE:
# All the storage slots in the contract are initialized to their index.
# That is, storage slot `i` is initialized to `i`.
execution_code_body = Op.SSTORE(Op.DUP1, Op.DUP1)
loop_cost += gas_costs.G_VERY_LOW * 2
elif storage_action == StorageAction.WRITE_NEW_VALUE:
# The new value 2^256-1 is guaranteed to be different from the initial
# value.
execution_code_body = Op.SSTORE(Op.DUP2, Op.NOT(0))
loop_cost += gas_costs.G_VERY_LOW * 3
elif storage_action == StorageAction.READ:
execution_code_body = Op.POP(Op.SLOAD(Op.DUP1))
loop_cost += gas_costs.G_VERY_LOW + gas_costs.G_BASE
# Add costs jump-logic costs
loop_cost += (
gas_costs.G_JUMPDEST # Prefix Jumpdest
+ gas_costs.G_VERY_LOW * 7 # ISZEROs, PUSHs, SWAPs, SUB, DUP
+ gas_costs.G_HIGH # JUMPI
)
prefix_cost = (
gas_costs.G_VERY_LOW # Target slots push
)
suffix_cost = 0
if tx_result == TransactionResult.REVERT:
suffix_cost = (
gas_costs.G_VERY_LOW * 2 # Revert PUSHs
)
num_target_slots = (
gas_benchmark_value - intrinsic_gas_cost_calc() - prefix_cost - suffix_cost
) // loop_cost
if tx_result == TransactionResult.OUT_OF_GAS:
# Add an extra slot to make it run out-of-gas
num_target_slots += 1
code_prefix = Op.PUSH4(num_target_slots) + Op.JUMPDEST
code_loop = execution_code_body + Op.JUMPI(
len(code_prefix) - 1, Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO
)
execution_code = code_prefix + code_loop
if tx_result == TransactionResult.REVERT:
execution_code += Op.REVERT(0, 0)
else:
execution_code += Op.STOP
execution_code_address = pre.deploy_contract(code=execution_code)
total_gas_used = (
num_target_slots * loop_cost + intrinsic_gas_cost_calc() + prefix_cost + suffix_cost
)
# Contract creation
slots_init = Bytecode()
if not absent_slots:
slots_init = Op.PUSH4(num_target_slots) + While(
body=Op.SSTORE(Op.DUP1, Op.DUP1),
condition=Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
# To create the contract, we apply the slots_init code to initialize the
# storage slots (int the case of absent_slots=False) and then copy the
# execution code to the contract.
creation_code = (
slots_init
+ Op.EXTCODECOPY(
address=execution_code_address,
dest_offset=0,
offset=0,
size=Op.EXTCODESIZE(execution_code_address),
)
+ Op.RETURN(0, Op.MSIZE)
)
sender_addr = pre.fund_eoa()
setup_tx = Transaction(
to=None,
gas_limit=env.gas_limit,
data=creation_code,
sender=sender_addr,
)
blocks = [Block(txs=[setup_tx])]
contract_address = compute_create_address(address=sender_addr, nonce=0)
op_tx = Transaction(
to=contract_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
blocks.append(Block(txs=[op_tx]))
benchmark_test(
blocks=blocks,
expected_benchmark_gas_used=(
total_gas_used if tx_result != TransactionResult.OUT_OF_GAS else gas_benchmark_value
),
)
@pytest.mark.parametrize(
"storage_action",
[
pytest.param(StorageAction.READ, id="SLOAD"),
pytest.param(StorageAction.WRITE_SAME_VALUE, id="SSTORE same value"),
pytest.param(StorageAction.WRITE_NEW_VALUE, id="SSTORE new value"),
],
)
def test_worst_storage_access_warm(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
storage_action: StorageAction,
gas_benchmark_value: int,
env: Environment,
) -> None:
"""
Test running a block with as many warm storage slot accesses as
possible.
"""
blocks = []
# The target storage slot for the warm access is storage slot 0.
storage_slot_initial_value = 10
# Contract code
execution_code_body = Bytecode()
if storage_action == StorageAction.WRITE_SAME_VALUE:
execution_code_body = Op.SSTORE(0, Op.DUP1)
elif storage_action == StorageAction.WRITE_NEW_VALUE:
execution_code_body = Op.PUSH1(1) + Op.ADD + Op.SSTORE(0, Op.DUP1)
elif storage_action == StorageAction.READ:
execution_code_body = Op.POP(Op.SLOAD(0))
execution_code = Op.PUSH1(storage_slot_initial_value) + While(
body=execution_code_body,
)
execution_code_address = pre.deploy_contract(code=execution_code)
creation_code = (
Op.SSTORE(0, storage_slot_initial_value)
+ Op.EXTCODECOPY(
address=execution_code_address,
dest_offset=0,
offset=0,
size=Op.EXTCODESIZE(execution_code_address),
)
+ Op.RETURN(0, Op.MSIZE)
)
with TestPhaseManager.setup():
sender_addr = pre.fund_eoa()
setup_tx = Transaction(
to=None,
gas_limit=env.gas_limit,
data=creation_code,
sender=sender_addr,
)
blocks.append(Block(txs=[setup_tx]))
contract_address = compute_create_address(address=sender_addr, nonce=0)
with TestPhaseManager.execution():
op_tx = Transaction(
to=contract_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
blocks.append(Block(txs=[op_tx]))
benchmark_test(blocks=blocks)
def test_worst_blockhash(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
fork: Fork,
gas_benchmark_value: int,
tx_gas_limit_cap: int,
) -> None:
"""
Test running a block with as many blockhash accessing oldest allowed block
as possible.
"""
# Create 256 dummy blocks to fill the blockhash window.
blocks = [Block()] * 256
benchmark_test(
setup_blocks=blocks,
code_generator=ExtCallGenerator(attack_block=Op.BLOCKHASH(1)),
expected_benchmark_gas_used=gas_benchmark_value,
)
@pytest.mark.parametrize("contract_balance", [0, 1])
def test_worst_selfbalance(
benchmark_test: BenchmarkTestFiller,
contract_balance: int,
) -> None:
"""Test running a block with as many SELFBALANCE opcodes as possible."""
benchmark_test(
code_generator=ExtCallGenerator(
attack_block=Op.SELFBALANCE,
contract_balance=contract_balance,
),
)
@pytest.mark.parametrize(
"copied_size",
[
pytest.param(512, id="512"),
pytest.param(1024, id="1KiB"),
pytest.param(5 * 1024, id="5KiB"),
],
)
def test_worst_extcodecopy_warm(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
copied_size: int,
gas_benchmark_value: int,
) -> None:
"""Test running a block with as many wamr EXTCODECOPY work as possible."""
copied_contract_address = pre.deploy_contract(
code=Op.JUMPDEST * copied_size,
)
execution_code = (
Op.PUSH10(copied_size)
+ Op.PUSH20(copied_contract_address)
+ While(
body=Op.EXTCODECOPY(Op.DUP4, 0, 0, Op.DUP2),
)
)
execution_code_address = pre.deploy_contract(code=execution_code)
tx = Transaction(
to=execution_code_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
benchmark_test(tx=tx)
@pytest.mark.parametrize("value_bearing", [True, False])
def test_worst_selfdestruct_existing(
benchmark_test: BenchmarkTestFiller,
fork: Fork,
pre: Alloc,
value_bearing: bool,
env: Environment,
gas_benchmark_value: int,
) -> None:
"""
Test running a block with as many SELFDESTRUCTs as possible for existing
contracts.
"""
attack_gas_limit = gas_benchmark_value
fee_recipient = pre.fund_eoa(amount=1)
# Template code that will be used to deploy a large number of contracts.
selfdestructable_contract_addr = pre.deploy_contract(code=Op.SELFDESTRUCT(Op.COINBASE))
initcode = Op.EXTCODECOPY(
address=selfdestructable_contract_addr,
dest_offset=0,
offset=0,
size=Op.EXTCODESIZE(selfdestructable_contract_addr),
) + Op.RETURN(0, Op.EXTCODESIZE(selfdestructable_contract_addr))
initcode_address = pre.deploy_contract(code=initcode)
# Calculate the number of contracts that can be deployed with the available
# gas.
gas_costs = fork.gas_costs()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
loop_cost = (
gas_costs.G_KECCAK_256 # KECCAK static cost
+ math.ceil(85 / 32) * gas_costs.G_KECCAK_256_WORD # KECCAK dynamic
# cost for CREATE2
+ gas_costs.G_VERY_LOW * 3 # ~MSTOREs+ADDs
+ gas_costs.G_COLD_ACCOUNT_ACCESS # CALL to self-destructing contract
+ gas_costs.G_SELF_DESTRUCT
+ 63 # ~Gluing opcodes
)
final_storage_gas = (
gas_costs.G_STORAGE_RESET + gas_costs.G_COLD_SLOAD + (gas_costs.G_VERY_LOW * 2)
)
memory_expansion_cost = fork().memory_expansion_gas_calculator()(new_bytes=96)
base_costs = (
intrinsic_gas_cost_calc()
+ (gas_costs.G_VERY_LOW * 12) # 8 PUSHs + 4 MSTOREs
+ final_storage_gas
+ memory_expansion_cost
)
num_contracts = (attack_gas_limit - base_costs) // loop_cost
expected_benchmark_gas_used = num_contracts * loop_cost + base_costs
# Create a factory that deployes a new SELFDESTRUCT contract instance pre-
# funded depending on the value_bearing parameter. We use CREATE2 so the
# caller contract can easily reproduce the addresses in a loop for CALLs.
factory_code = (
Op.EXTCODECOPY(
address=initcode_address,
dest_offset=0,
offset=0,
size=Op.EXTCODESIZE(initcode_address),
)
+ Op.MSTORE(
0,
Op.CREATE2(
value=1 if value_bearing else 0,
offset=0,
size=Op.EXTCODESIZE(initcode_address),
salt=Op.SLOAD(0),
),
)
+ Op.SSTORE(0, Op.ADD(Op.SLOAD(0), 1))
+ Op.RETURN(0, 32)
)
required_balance = num_contracts if value_bearing else 0 # 1 wei per
# contract
factory_address = pre.deploy_contract(code=factory_code, balance=required_balance)
factory_caller_code = Op.CALLDATALOAD(0) + While(
body=Op.POP(Op.CALL(address=factory_address)),
condition=Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
factory_caller_address = pre.deploy_contract(code=factory_caller_code)
contracts_deployment_tx = Transaction(
to=factory_caller_address,
gas_limit=env.gas_limit,
data=Hash(num_contracts),
sender=pre.fund_eoa(),
)
code = (
# Setup memory for later CREATE2 address generation loop.
# 0xFF+[Address(20bytes)]+[seed(32bytes)]+[initcode keccak(32bytes)]
Op.MSTORE(0, factory_address)
+ Op.MSTORE8(32 - 20 - 1, 0xFF)
+ Op.MSTORE(32, 0)
+ Op.MSTORE(64, initcode.keccak256())
# Main loop
+ While(
body=Op.POP(Op.CALL(address=Op.SHA3(32 - 20 - 1, 85)))
+ Op.MSTORE(32, Op.ADD(Op.MLOAD(32), 1)),
# Only loop if we have enough gas to cover another iteration plus
# the final storage gas.
condition=Op.GT(Op.GAS, final_storage_gas + loop_cost),
)
+ Op.SSTORE(0, 42) # Done for successful tx execution assertion below.
)
assert len(code) <= fork.max_code_size()
# The 0 storage slot is initialize to avoid creation costs in SSTORE above.
code_addr = pre.deploy_contract(code=code, storage={0: 1})
opcode_tx = Transaction(
to=code_addr,
gas_limit=attack_gas_limit,
sender=pre.fund_eoa(),
)
post = {
factory_address: Account(storage={0: num_contracts}),
code_addr: Account(storage={0: 42}), # Check for successful execution.
}
deployed_contract_addresses = []
for i in range(num_contracts):
deployed_contract_address = compute_create2_address(
address=factory_address,
salt=i,
initcode=initcode,
)
post[deployed_contract_address] = Account(nonce=1)
deployed_contract_addresses.append(deployed_contract_address)
benchmark_test(
post=post,
blocks=[
Block(txs=[contracts_deployment_tx]),
Block(txs=[opcode_tx], fee_recipient=fee_recipient),
],
expected_benchmark_gas_used=expected_benchmark_gas_used,
)
@pytest.mark.parametrize("value_bearing", [True, False])
def test_worst_selfdestruct_created(
state_test: StateTestFiller,
pre: Alloc,
value_bearing: bool,
fork: Fork,
env: Environment,
gas_benchmark_value: int,
) -> None:
"""
Test running a block with as many SELFDESTRUCTs as possible for deployed
contracts in the same transaction.
"""
fee_recipient = pre.fund_eoa(amount=1)
env.fee_recipient = fee_recipient
# SELFDESTRUCT(COINBASE) contract deployment
initcode = (
Op.MSTORE8(0, Op.COINBASE.int()) + Op.MSTORE8(1, Op.SELFDESTRUCT.int()) + Op.RETURN(0, 2)
)
gas_costs = fork.gas_costs()
memory_expansion_calc = fork().memory_expansion_gas_calculator()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
initcode_costs = (
gas_costs.G_VERY_LOW * 8 # MSTOREs, PUSHs
+ memory_expansion_calc(new_bytes=2) # return into memory
)
create_costs = (
initcode_costs
+ gas_costs.G_CREATE
+ gas_costs.G_VERY_LOW * 3 # Create Parameter PUSHs
+ gas_costs.G_CODE_DEPOSIT_BYTE * 2
+ gas_costs.G_INITCODE_WORD
)
call_costs = (
gas_costs.G_WARM_ACCOUNT_ACCESS
+ gas_costs.G_BASE # COINBASE
+ gas_costs.G_SELF_DESTRUCT
+ gas_costs.G_VERY_LOW * 5 # CALL Parameter PUSHs
+ gas_costs.G_BASE # Parameter GAS
)
extra_costs = (
gas_costs.G_BASE # POP
+ gas_costs.G_VERY_LOW * 6 # PUSHs, ADD, DUP, GT
+ gas_costs.G_HIGH # JUMPI
+ gas_costs.G_JUMPDEST
)
loop_cost = create_costs + call_costs + extra_costs
prefix_cost = gas_costs.G_VERY_LOW * 3 + gas_costs.G_BASE + memory_expansion_calc(new_bytes=32)
suffix_cost = gas_costs.G_COLD_SLOAD + gas_costs.G_STORAGE_RESET + (gas_costs.G_VERY_LOW * 2)
base_costs = prefix_cost + suffix_cost + intrinsic_gas_cost_calc()
iterations = (gas_benchmark_value - base_costs) // loop_cost
code_prefix = Op.MSTORE(0, initcode.hex()) + Op.PUSH0 + Op.JUMPDEST
code_suffix = (
Op.SSTORE(0, 42) # Done for successful tx execution assertion below.
+ Op.STOP
)
loop_body = (
Op.POP(
Op.CALL(
address=Op.CREATE(
value=1 if value_bearing else 0,
offset=32 - len(initcode),
size=len(initcode),
)
)
)
+ Op.PUSH1[1]
+ Op.ADD
+ Op.JUMPI(len(code_prefix) - 1, Op.GT(iterations, Op.DUP1))
)
code = code_prefix + loop_body + code_suffix
# The 0 storage slot is initialize to avoid creation costs in SSTORE above.
code_addr = pre.deploy_contract(
code=code,
balance=iterations if value_bearing else 0,
storage={0: 1},
)
code_tx = Transaction(
to=code_addr,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
post = {code_addr: Account(storage={0: 42})} # Check for successful
# execution.
state_test(
pre=pre,
post=post,
tx=code_tx,
expected_benchmark_gas_used=iterations * loop_cost + base_costs,
)
@pytest.mark.parametrize("value_bearing", [True, False])
def test_worst_selfdestruct_initcode(
state_test: StateTestFiller,
pre: Alloc,
value_bearing: bool,
fork: Fork,
env: Environment,
gas_benchmark_value: int,
) -> None:
"""
Test running a block with as many SELFDESTRUCTs as possible executed in
initcode.
"""
fee_recipient = pre.fund_eoa(amount=1)
env.fee_recipient = fee_recipient
gas_costs = fork.gas_costs()
memory_expansion_calc = fork().memory_expansion_gas_calculator()
intrinsic_gas_cost_calc = fork.transaction_intrinsic_cost_calculator()
initcode_costs = (
gas_costs.G_BASE # COINBASE
+ gas_costs.G_SELF_DESTRUCT
)
create_costs = (
initcode_costs
+ gas_costs.G_CREATE
+ gas_costs.G_VERY_LOW * 3 # Create Parameter PUSHs
+ gas_costs.G_INITCODE_WORD
)
extra_costs = (
gas_costs.G_BASE # POP
+ gas_costs.G_VERY_LOW * 6 # PUSHs, ADD, DUP, GT
+ gas_costs.G_HIGH # JUMPI
+ gas_costs.G_JUMPDEST
)
loop_cost = create_costs + extra_costs
prefix_cost = gas_costs.G_VERY_LOW * 3 + gas_costs.G_BASE + memory_expansion_calc(new_bytes=32)
suffix_cost = gas_costs.G_COLD_SLOAD + gas_costs.G_STORAGE_RESET + (gas_costs.G_VERY_LOW * 2)
base_costs = prefix_cost + suffix_cost + intrinsic_gas_cost_calc()
iterations = (gas_benchmark_value - base_costs) // loop_cost
initcode = Op.SELFDESTRUCT(Op.COINBASE)
code_prefix = Op.MSTORE(0, initcode.hex()) + Op.PUSH0 + Op.JUMPDEST
code_suffix = (
Op.SSTORE(0, 42) # Done for successful tx execution assertion below.
+ Op.STOP
)
loop_body = (
Op.POP(
Op.CREATE(
value=1 if value_bearing else 0,
offset=32 - len(initcode),
size=len(initcode),
)
)
+ Op.PUSH1[1]
+ Op.ADD
+ Op.JUMPI(len(code_prefix) - 1, Op.GT(iterations, Op.DUP1))
)
code = code_prefix + loop_body + code_suffix
# The 0 storage slot is initialize to avoid creation costs in SSTORE above.
code_addr = pre.deploy_contract(code=code, balance=100_000, storage={0: 1})
code_tx = Transaction(
to=code_addr,
gas_limit=gas_benchmark_value,
gas_price=10,
sender=pre.fund_eoa(),
)
post = {code_addr: Account(storage={0: 42})} # Check for successful
# execution.
state_test(
pre=pre,
post=post,
tx=code_tx,
expected_benchmark_gas_used=iterations * loop_cost + base_costs,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/test_worst_compute.py | tests/benchmark/test_worst_compute.py | """
Tests that benchmark EVMs in worst-case compute scenarios.
"""
import math
import operator
import random
from enum import Enum, auto
from typing import Any, Dict, cast
import pytest
from _pytest.mark import ParameterSet
from py_ecc.bn128 import G1, G2, multiply
from ethereum_test_base_types.base_types import Bytes
from ethereum_test_benchmark import ExtCallGenerator, JumpLoopGenerator
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Address,
Alloc,
BenchmarkTestFiller,
Block,
Bytecode,
Transaction,
add_kzg_version,
)
from ethereum_test_types import TransactionType
from ethereum_test_vm import Opcode
from ethereum_test_vm import Opcodes as Op
from ..byzantium.eip198_modexp_precompile.test_modexp import ModExpInput
from ..cancun.eip4844_blobs.spec import Spec as BlobsSpec
from ..istanbul.eip152_blake2.common import Blake2bInput
from ..istanbul.eip152_blake2.spec import Spec as Blake2bSpec
from ..osaka.eip7951_p256verify_precompiles import spec as p256verify_spec
from ..osaka.eip7951_p256verify_precompiles.spec import FieldElement
from ..prague.eip2537_bls_12_381_precompiles import spec as bls12381_spec
from ..prague.eip2537_bls_12_381_precompiles.spec import BytesConcatenation
REFERENCE_SPEC_GIT_PATH = "TODO"
REFERENCE_SPEC_VERSION = "TODO"
KECCAK_RATE = 136
def neg(x: int) -> int:
"""Negate the given integer in the two's complement 256-bit range."""
assert 0 <= x < 2**256
return 2**256 - x
def make_dup(index: int) -> Opcode:
"""
Create a DUP instruction which duplicates the index-th (counting from 0)
element from the top of the stack. E.g. make_dup(0) → DUP1.
"""
assert 0 <= index < 16
return Opcode(0x80 + index, pushed_stack_items=1, min_stack_height=index + 1)
@pytest.mark.parametrize(
"opcode",
[
Op.ADDRESS,
Op.ORIGIN,
Op.CALLER,
Op.CODESIZE,
Op.GASPRICE,
Op.COINBASE,
Op.TIMESTAMP,
Op.NUMBER,
Op.PREVRANDAO,
Op.GASLIMIT,
Op.CHAINID,
Op.BASEFEE,
Op.BLOBBASEFEE,
Op.GAS,
# Note that other 0-param opcodes are covered in separate tests.
],
)
def test_worst_zero_param(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
opcode: Op,
) -> None:
"""Test running a block with as many zero-parameter opcodes as possible."""
benchmark_test(
pre=pre,
post={},
code_generator=ExtCallGenerator(attack_block=opcode),
)
@pytest.mark.parametrize("calldata_length", [0, 1_000, 10_000])
def test_worst_calldatasize(
benchmark_test: BenchmarkTestFiller,
calldata_length: int,
) -> None:
"""Test running a block with as many CALLDATASIZE as possible."""
benchmark_test(
code_generator=JumpLoopGenerator(
attack_block=Op.POP(Op.CALLDATASIZE),
tx_kwargs={"data": b"\x00" * calldata_length},
),
)
@pytest.mark.parametrize("non_zero_value", [True, False])
@pytest.mark.parametrize("from_origin", [True, False])
def test_worst_callvalue(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
fork: Fork,
non_zero_value: bool,
from_origin: bool,
) -> None:
"""
Test running a block with as many CALLVALUE opcodes as possible.
The `non_zero_value` parameter controls whether opcode must return non-zero
value. The `from_origin` parameter controls whether the call frame is the
immediate from the transaction or a previous CALL.
"""
code_address = JumpLoopGenerator(attack_block=Op.POP(Op.CALLVALUE)).deploy_contracts(
pre=pre, fork=fork
)
if from_origin:
tx_to = code_address
else:
entry_code = (
Op.JUMPDEST
+ Op.CALL(address=code_address, value=1 if non_zero_value else 0)
+ Op.JUMP(Op.PUSH0)
)
tx_to = pre.deploy_contract(code=entry_code, balance=1_000_000)
tx = Transaction(
to=tx_to,
value=1 if non_zero_value and from_origin else 0,
sender=pre.fund_eoa(),
)
benchmark_test(tx=tx)
class ReturnDataStyle(Enum):
"""Helper enum to specify return data is returned to the caller."""
RETURN = auto()
REVERT = auto()
IDENTITY = auto()
@pytest.mark.parametrize(
"return_data_style",
[
ReturnDataStyle.RETURN,
ReturnDataStyle.REVERT,
ReturnDataStyle.IDENTITY,
],
)
@pytest.mark.parametrize("returned_size", [1, 0])
def test_worst_returndatasize_nonzero(
benchmark_test: BenchmarkTestFiller,
pre: Alloc,
returned_size: int,
return_data_style: ReturnDataStyle,
) -> None:
"""
Test running a block which execute as many RETURNDATASIZE opcodes which
return a non-zero buffer as possible.
The `returned_size` parameter indicates the size of the returned data
buffer. The `return_data_style` indicates how returned data is produced for
the opcode caller.
"""
setup = Bytecode()
if return_data_style != ReturnDataStyle.IDENTITY:
setup += Op.STATICCALL(
address=pre.deploy_contract(
code=Op.REVERT(0, returned_size)
if return_data_style == ReturnDataStyle.REVERT
else Op.RETURN(0, returned_size)
)
)
else:
setup += Op.MSTORE8(0, 1) + Op.STATICCALL(
address=0x04, # Identity precompile
args_size=returned_size,
)
benchmark_test(
code_generator=JumpLoopGenerator(setup=setup, attack_block=Op.POP(Op.RETURNDATASIZE)),
)
def test_worst_returndatasize_zero(benchmark_test: BenchmarkTestFiller) -> None:
"""
Test running a block with as many RETURNDATASIZE opcodes as possible with
a zero buffer.
"""
benchmark_test(
code_generator=ExtCallGenerator(attack_block=Op.RETURNDATASIZE),
)
@pytest.mark.parametrize("mem_size", [0, 1, 1_000, 100_000, 1_000_000])
def test_worst_msize(
benchmark_test: BenchmarkTestFiller,
mem_size: int,
) -> None:
"""
Test running a block with as many MSIZE opcodes as possible.
The `mem_size` parameter indicates by how much the memory is expanded.
"""
benchmark_test(
code_generator=ExtCallGenerator(
setup=Op.MLOAD(Op.SELFBALANCE) + Op.POP,
attack_block=Op.MSIZE,
contract_balance=mem_size,
),
)
def test_worst_keccak(
benchmark_test: BenchmarkTestFiller,
fork: Fork,
gas_benchmark_value: int,
) -> None:
"""Test running a block with as many KECCAK256 permutations as possible."""
# Intrinsic gas cost is paid once.
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
available_gas = gas_benchmark_value - intrinsic_gas_calculator()
gsc = fork.gas_costs()
mem_exp_gas_calculator = fork.memory_expansion_gas_calculator()
# Discover the optimal input size to maximize keccak-permutations,
# not to maximize keccak calls.
# The complication of the discovery arises from
# the non-linear gas cost of memory expansion.
max_keccak_perm_per_block = 0
optimal_input_length = 0
for i in range(1, 1_000_000, 32):
iteration_gas_cost = (
2 * gsc.G_VERY_LOW # PUSHN + PUSH1
+ gsc.G_KECCAK_256 # KECCAK256 static cost
+ math.ceil(i / 32) * gsc.G_KECCAK_256_WORD # KECCAK256 dynamic
# cost
+ gsc.G_BASE # POP
)
# From the available gas, we subtract the mem expansion costs
# considering we know the current input size length i.
available_gas_after_expansion = max(0, available_gas - mem_exp_gas_calculator(new_bytes=i))
# Calculate how many calls we can do.
num_keccak_calls = available_gas_after_expansion // iteration_gas_cost
# KECCAK does 1 permutation every 136 bytes.
num_keccak_permutations = num_keccak_calls * math.ceil(i / KECCAK_RATE)
# If we found an input size that is better (reg permutations/gas), then
# save it.
if num_keccak_permutations > max_keccak_perm_per_block:
max_keccak_perm_per_block = num_keccak_permutations
optimal_input_length = i
# max_iters_loop contains how many keccak calls can be done per loop. The
# loop is as big as possible bounded by the maximum code size.
#
# The loop structure is: JUMPDEST + [attack iteration] + PUSH0 + JUMP
#
# Now calculate available gas for [attack iteration]:
# Numerator = max_code_size-3. (JUMPDEST, PUSH0 and JUMP)
# Denominator = (PUSHN + PUSH1 + KECCAK256 + POP) + PUSH1_DATA +
# PUSHN_DATA
# TODO: the testing framework uses PUSH1(0) instead of PUSH0 which is
# suboptimal for the
# attack, whenever this is fixed adjust accordingly.
benchmark_test(
code_generator=JumpLoopGenerator(
setup=Op.PUSH20[optimal_input_length],
attack_block=Op.POP(Op.SHA3(Op.PUSH0, Op.DUP1)),
),
)
@pytest.mark.parametrize(
"address,static_cost,per_word_dynamic_cost,bytes_per_unit_of_work",
[
pytest.param(0x02, 60, 12, 64, id="SHA2-256"),
pytest.param(0x03, 600, 120, 64, id="RIPEMD-160"),
pytest.param(0x04, 15, 3, 1, id="IDENTITY"),
],
)
def test_worst_precompile_only_data_input(
benchmark_test: BenchmarkTestFiller,
fork: Fork,
address: Address,
static_cost: int,
per_word_dynamic_cost: int,
bytes_per_unit_of_work: int,
gas_benchmark_value: int,
) -> None:
"""
Test running a block with as many precompile calls which have a single
`data` input.
"""
# Intrinsic gas cost is paid once.
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
available_gas = gas_benchmark_value - intrinsic_gas_calculator()
gsc = fork.gas_costs()
mem_exp_gas_calculator = fork.memory_expansion_gas_calculator()
# Discover the optimal input size to maximize precompile work, not
# precompile calls.
max_work = 0
optimal_input_length = 0
for input_length in range(1, 1_000_000, 32):
parameters_gas = (
gsc.G_BASE # PUSH0 = arg offset
+ gsc.G_BASE # PUSH0 = arg size
+ gsc.G_BASE # PUSH0 = arg size
+ gsc.G_VERY_LOW # PUSH0 = arg offset
+ gsc.G_VERY_LOW # PUSHN = address
+ gsc.G_BASE # GAS
)
iteration_gas_cost = (
parameters_gas
+ +static_cost # Precompile static cost
+ math.ceil(input_length / 32) * per_word_dynamic_cost
# Precompile dynamic cost
+ gsc.G_BASE # POP
)
# From the available gas, we subtract the mem expansion costs
# considering we know the current input size length.
available_gas_after_expansion = max(
0, available_gas - mem_exp_gas_calculator(new_bytes=input_length)
)
# Calculate how many calls we can do.
num_calls = available_gas_after_expansion // iteration_gas_cost
total_work = num_calls * math.ceil(input_length / bytes_per_unit_of_work)
# If we found an input size that is better (reg permutations/gas), then
# save it.
if total_work > max_work:
max_work = total_work
optimal_input_length = input_length
attack_block = Op.POP(Op.STATICCALL(Op.GAS, address, 0, optimal_input_length, 0, 0))
benchmark_test(
code_generator=JumpLoopGenerator(
setup=Op.CODECOPY(0, 0, optimal_input_length), attack_block=attack_block
),
)
def create_modexp_test_cases() -> list[ParameterSet]:
"""Create test cases for the MODEXP precompile."""
test_cases = [
# (base, exponent, modulus, test_id)
(8 * "ff", 112 * "ff", 7 * "ff" + "00", "mod_even_8b_exp_896"),
(16 * "ff", 40 * "ff", 15 * "ff" + "00", "mod_even_16b_exp_320"),
(24 * "ff", 21 * "ff", 23 * "ff" + "00", "mod_even_24b_exp_168"),
(32 * "ff", 5 * "ff", 31 * "ff" + "00", "mod_even_32b_exp_40"),
(32 * "ff", 12 * "ff", 31 * "ff" + "00", "mod_even_32b_exp_96"),
(32 * "ff", 32 * "ff", 31 * "ff" + "00", "mod_even_32b_exp_256"),
(64 * "ff", 64 * "ff", 63 * "ff" + "00", "mod_even_64b_exp_512"),
(128 * "ff", 128 * "ff", 127 * "ff" + "00", "mod_even_128b_exp_1024"),
(256 * "ff", 128 * "ff", 255 * "ff" + "00", "mod_even_256b_exp_1024"),
(512 * "ff", 128 * "ff", 511 * "ff" + "00", "mod_even_512b_exp_1024"),
(1024 * "ff", 128 * "ff", 1023 * "ff" + "00", "mod_even_1024b_exp_1024"),
(32 * "ff", 12 * "ff", 31 * "ff" + "01", "mod_odd_32b_exp_96"),
(32 * "ff", 32 * "ff", 31 * "ff" + "01", "mod_odd_32b_exp_256"),
(64 * "ff", 64 * "ff", 63 * "ff" + "01", "mod_odd_64b_exp_512"),
(128 * "ff", 128 * "ff", 127 * "ff" + "01", "mod_odd_128b_exp_1024"),
(256 * "ff", 128 * "ff", 255 * "ff" + "01", "mod_odd_256b_exp_1024"),
(512 * "ff", 128 * "ff", 511 * "ff" + "01", "mod_odd_512b_exp_1024"),
(1024 * "ff", 128 * "ff", 1023 * "ff" + "01", "mod_odd_1024b_exp_1024"),
(32 * "ff", 8 * "12345670", 31 * "ff" + "01", "mod_odd_32b_exp_cover_windows"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L38
(192 * "FF", "03", 6 * ("00" + 31 * "FF"), "mod_min_gas_base_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L40
(8 * "FF", "07" + 75 * "FF", 7 * "FF", "mod_min_gas_exp_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L42
(40 * "FF", "01" + 3 * "FF", "00" + 38 * "FF", "mod_min_gas_balanced"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L44
(32 * "FF", 5 * "FF", ("00" + 31 * "FF"), "mod_exp_208_gas_balanced"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L46
(8 * "FF", 81 * "FF", 7 * "FF", "mod_exp_215_gas_exp_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L48
(8 * "FF", 112 * "FF", 7 * "FF", "mod_exp_298_gas_exp_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L50
(16 * "FF", 40 * "FF", 15 * "FF", "mod_pawel_2"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L52
(24 * "FF", 21 * "FF", 23 * "FF", "mod_pawel_3"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L54
(32 * "FF", 12 * "FF", "00" + 31 * "FF", "mod_pawel_4"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L56
(280 * "FF", "03", 8 * ("00" + 31 * "FF") + 23 * "FF", "mod_408_gas_base_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L58
(16 * "FF", "15" + 37 * "FF", 15 * "FF", "mod_400_gas_exp_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L60
(48 * "FF", "07" + 4 * "FF", "00" + 46 * "FF", "mod_408_gas_balanced"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L62
(344 * "FF", "03", 10 * ("00" + 31 * "FF") + 23 * "FF", "mod_616_gas_base_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L64
(16 * "FF", "07" + 56 * "FF", 15 * "FF", "mod_600_gas_exp_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L66
(48 * "FF", "07" + 6 * "FF", "00" + 46 * "FF", "mod_600_gas_balanced"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L68
(392 * "FF", "03", 12 * ("00" + 31 * "FF") + 7 * "FF", "mod_800_gas_base_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L70
(16 * "FF", "01" + 75 * "FF", 15 * "FF", "mod_800_gas_exp_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L72
(56 * "FF", 6 * "FF", "00" + 54 * "FF", "mod_767_gas_balanced"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L74
(16 * "FF", 80 * "FF", 15 * "FF", "mod_852_gas_exp_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L76
(408 * "FF", "03", 12 * ("00" + 31 * "FF") + 23 * "FF", "mod_867_gas_base_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L78
(56 * "FF", "2b" + 7 * "FF", "00" + 54 * "FF", "mod_996_gas_balanced"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L80
(448 * "FF", "03", 14 * ("00" + 31 * "FF"), "mod_1045_gas_base_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L82
(32 * "FF", 16 * "FF", "00" + 31 * "FF", "mod_677_gas_base_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L84
(24 * "FF", 32 * "FF", 23 * "FF", "mod_765_gas_exp_heavy"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/Modexp.cs#L86
(32 * "FF", 32 * "FF", "00" + 31 * "FF", "mod_1360_gas_balanced"),
(8 * "FF", 81 * "FF", 7 * "FF", "mod_8_exp_648"),
(8 * "FF", "FF" + 111 * "FF", 7 * "FF", "mod_8_exp_896"),
(32 * "FF", 4 * "FF", "00" + 31 * "FF", "mod_32_exp_32"),
(32 * "FF", "0D" + 4 * "FF", "00" + 31 * "FF", "mod_32_exp_36"),
(32 * "FF", 5 * "FF", "00" + 31 * "FF", "mod_32_exp_40"),
(32 * "FF", 8 * "FF", "00" + 31 * "FF", "mod_32_exp_64"),
(32 * "FF", "01" + 8 * "FF", "00" + 31 * "FF", "mod_32_exp_65"),
(32 * "FF", 16 * "FF", "00" + 31 * "FF", "mod_32_exp_128"),
(256 * "FF", "03" + 0 * "FF", 8 * ("00" + 31 * "FF"), "mod_256_exp_2"),
(264 * "FF", "03" + 0 * "FF", 8 * ("00" + 31 * "FF") + 7 * "FF", "mod_264_exp_2"),
(1024 * "FF", "03", 32 * ("00" + 31 * "FF"), "mod_1024_exp_2"),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L122
(
"03",
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
"mod_vul_example_1",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L124
(
"",
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e",
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
"mod_vul_example_2",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L126
(
"e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5",
"02",
"fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
"mod_vul_nagydani_1_square",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L128
(
"e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5",
"03",
"fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
"mod_vul_nagydani_1_qube",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L130
(
"e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5",
"010001",
"fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b",
"mod_vul_nagydani_1_pow_0x10001",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L132
(
"cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51",
"02",
"e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
"mod_vul_nagydani_2_square",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L134
(
"cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51",
"03",
"e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
"mod_vul_nagydani_2_qube",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L136
(
"cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51",
"010001",
"e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087",
"mod_vul_nagydani_2_pow_0x10001",
),
(
"c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb",
"02",
"d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
"mod_vul_nagydani_3_square",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L140
(
"c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb",
"03",
"d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
"mod_vul_nagydani_3_qube",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L142
(
"c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb",
"010001",
"d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d",
"mod_vul_nagydani_3_pow_0x10001",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L144
(
"db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81",
"02",
"df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f",
"mod_vul_nagydani_4_square",
),
# Ported from https://github.com/NethermindEth/nethermind/blob/ceb8d57b8530ce8181d7427c115ca593386909d6/tools/EngineRequestsGenerator/TestCases/ModexpVulnerability.cs#L146
(
"db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81",
"03",
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/stateful/conftest.py | tests/benchmark/stateful/conftest.py | """Pytest configuration for state tests."""
from pathlib import Path
from typing import Any
import pytest
DEFAULT_BENCHMARK_FORK = "Prague"
def pytest_generate_tests(metafunc: Any) -> None:
"""
Add default valid_from marker to state tests without explicit fork
specification.
"""
state_dir = Path(__file__).parent
test_file_path = Path(metafunc.definition.fspath)
if state_dir in test_file_path.parents:
has_valid_from = any(
marker.name == "valid_from" for marker in metafunc.definition.iter_markers()
)
if not has_valid_from:
metafunc.definition.add_marker(pytest.mark.valid_from(DEFAULT_BENCHMARK_FORK))
def pytest_collection_modifyitems(config: Any, items: Any) -> None:
"""Manage stateful test markers and filtering."""
state_dir = Path(__file__).parent
gen_docs = config.getoption("--gen-docs", default=False)
if gen_docs:
_add_stateful_markers_for_docs(items, state_dir)
return
marker_expr = config.getoption("-m", default="")
items_to_remove = []
for i, item in enumerate(items):
item_path = Path(item.fspath)
is_in_state_dir = state_dir in item_path.parents
# Add stateful marker to tests in state directory that don't have it
if is_in_state_dir and not item.get_closest_marker("stateful"):
item.add_marker(pytest.mark.stateful)
has_stateful_marker = item.get_closest_marker("stateful")
run_stateful = (
marker_expr and ("stateful" in marker_expr) and ("not stateful" not in marker_expr)
)
# When not running stateful tests, remove all stateful tests
if not run_stateful and has_stateful_marker:
items_to_remove.append(i)
for i in reversed(items_to_remove):
items.pop(i)
def _add_stateful_markers_for_docs(items: Any, state_dir: Any) -> None:
"""Add stateful markers for documentation generation."""
for item in items:
item_path = Path(item.fspath)
if state_dir in item_path.parents and not item.get_closest_marker("stateful"):
item.add_marker(pytest.mark.stateful)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/stateful/__init__.py | tests/benchmark/stateful/__init__.py | """Benchmark state tests package."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/stateful/bloatnet/test_single_opcode.py | tests/benchmark/stateful/bloatnet/test_single_opcode.py | """
abstract: BloatNet single-opcode benchmark cases for state-related operations.
These tests focus on individual EVM opcodes (SLOAD, SSTORE) to measure
their performance when accessing many storage slots across pre-deployed
contracts. Unlike multi-opcode tests, these isolate single operations
to benchmark specific state-handling bottlenecks.
"""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Transaction,
While,
)
from ethereum_test_vm import Bytecode
from ethereum_test_vm import Opcodes as Op
from pytest_plugins.execute.pre_alloc import AddressStubs
REFERENCE_SPEC_GIT_PATH = "DUMMY/bloatnet.md"
REFERENCE_SPEC_VERSION = "1.0"
# ERC20 function selectors
BALANCEOF_SELECTOR = 0x70A08231 # balanceOf(address)
APPROVE_SELECTOR = 0x095EA7B3 # approve(address,uint256)
ALLOWANCE_SELECTOR = 0xDD62ED3E # allowance(address,address)
# SLOAD BENCHMARK ARCHITECTURE:
#
# [Pre-deployed ERC20 Contract] ──── Storage slots for balances
# │
# ��� balanceOf(address) → SLOAD(keccak256(address || slot))
# │
# [Attack Contract] ──CALL──► ERC20.balanceOf(random_address)
# │
# └─► Loop(i=0 to N):
# 1. Generate random address from counter
# 2. CALL balanceOf(random_address) → forces cold SLOAD
# 3. Most addresses have zero balance → empty storage slots
#
# WHY IT STRESSES CLIENTS:
# - Each balanceOf() call forces a cold SLOAD on a likely-empty slot
# - Storage slot = keccak256(address || balances_slot)
# - Random addresses ensure maximum cache misses
# - Tests client's sparse storage handling efficiency
# SSTORE BENCHMARK ARCHITECTURE:
#
# [Pre-deployed ERC20 Contract] ──── Storage slots for allowances
# │
# │ approve(spender, amount)
# │ → SSTORE(keccak256(spender || slot), amount)
# │
# [Attack Contract]
# ──CALL──► ERC20.approve(counter_as_spender, counter_as_amount)
# │
# └─► Loop(i=0 to N):
# 1. Use counter as both spender address and amount
# 2. CALL approve(counter, counter) → forces cold SSTORE
# 3. Writes to new allowance slots in sparse storage
#
# WHY IT STRESSES CLIENTS:
# - Each approve() call forces an SSTORE to a new storage slot
# - Storage slot = keccak256(
# msg.sender || keccak256(spender || allowances_slot)
# )
# - Sequential counter ensures unique storage locations
# - Tests client's ability to handle many storage writes
# - Simulates real-world contract state accumulation over time
@pytest.mark.valid_from("Prague")
@pytest.mark.parametrize("num_contracts", [1, 5, 10, 20, 100])
def test_sload_empty_erc20_balanceof(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
gas_benchmark_value: int,
address_stubs: AddressStubs,
num_contracts: int,
request: pytest.FixtureRequest,
) -> None:
"""
BloatNet SLOAD benchmark using ERC20 balanceOf queries on random
addresses.
This test:
1. Filters stubs matching test name prefix
(e.g., test_sload_empty_erc20_balanceof_*)
2. Uses first N contracts based on num_contracts parameter
3. Splits gas budget evenly across the selected contracts
4. Queries balanceOf() incrementally starting by 0 and increasing by 1
(thus forcing SLOADs to non-existing addresses)
"""
# Extract test function name for stub filtering
test_name = request.node.name.split("[")[0] # Remove parametrization suffix
# Filter stubs that match the test name prefix
matching_stubs = [
stub_name for stub_name in address_stubs.root.keys() if stub_name.startswith(test_name)
]
# Validate we have enough stubs
if len(matching_stubs) < num_contracts:
pytest.fail(
f"Not enough matching stubs for test '{test_name}'. "
f"Required: {num_contracts}, Found: {len(matching_stubs)}. "
f"Matching stubs: {matching_stubs}"
)
# Select first N stubs
selected_stubs = matching_stubs[:num_contracts]
gas_costs = fork.gas_costs()
# Calculate gas costs
intrinsic_gas = fork.transaction_intrinsic_cost_calculator()(calldata=b"")
# Fixed overhead per iteration (loop mechanics, independent of warm/cold)
loop_overhead = (
# Attack contract loop overhead
gas_costs.G_VERY_LOW * 2 # MLOAD counter (3*2)
+ gas_costs.G_VERY_LOW * 2 # MSTORE selector (3*2)
+ gas_costs.G_VERY_LOW * 3 # MLOAD + MSTORE address (3*3)
+ gas_costs.G_BASE # POP (2)
+ gas_costs.G_BASE * 3 # SUB + MLOAD + MSTORE for counter decrement (2*3)
+ gas_costs.G_BASE * 2 # ISZERO * 2 for loop condition (2*2)
+ gas_costs.G_MID # JUMPI (8)
)
# ERC20 internal gas (same for all calls)
erc20_internal_gas = (
gas_costs.G_VERY_LOW # PUSH4 selector (3)
+ gas_costs.G_BASE # EQ selector match (2)
+ gas_costs.G_MID # JUMPI to function (8)
+ gas_costs.G_JUMPDEST # JUMPDEST at function start (1)
+ gas_costs.G_VERY_LOW * 2 # CALLDATALOAD arg (3*2)
+ gas_costs.G_KECCAK_256 # keccak256 static (30)
+ gas_costs.G_KECCAK_256_WORD * 2 # keccak256 dynamic for 64 bytes (2*6)
+ gas_costs.G_COLD_SLOAD # Cold SLOAD - always cold for random addresses (2100)
+ gas_costs.G_VERY_LOW * 3 # MSTORE result + RETURN setup (3*3)
# RETURN costs 0 gas
)
# Calculate gas budget per contract
available_gas = gas_benchmark_value - intrinsic_gas
gas_per_contract = available_gas // num_contracts
# For each contract: first call is COLD (2600), subsequent are WARM (100)
# Solve for calls_per_contract:
# gas_per_contract = cold_call + (calls-1) * warm_call
# Simplifies to: gas = cold_warm_diff + calls * warm_call_cost
warm_call_cost = loop_overhead + gas_costs.G_WARM_ACCOUNT_ACCESS + erc20_internal_gas
cold_warm_diff = gas_costs.G_COLD_ACCOUNT_ACCESS - gas_costs.G_WARM_ACCOUNT_ACCESS
calls_per_contract = int((gas_per_contract - cold_warm_diff) // warm_call_cost)
# Deploy selected ERC20 contracts using stubs
# In execute mode: stubs point to already-deployed contracts on chain
# In fill mode: empty bytecode is deployed as placeholder
erc20_addresses = []
for stub_name in selected_stubs:
addr = pre.deploy_contract(
code=Bytecode(), # Required parameter, ignored for stubs in execute mode
stub=stub_name,
)
erc20_addresses.append(addr)
# Log test requirements
print(
f"Total gas budget: {gas_benchmark_value / 1_000_000:.1f}M gas. "
f"~{gas_per_contract / 1_000_000:.1f}M gas per contract, "
f"{calls_per_contract} balanceOf calls per contract."
)
# Build attack code that loops through each contract
attack_code: Bytecode = (
Op.JUMPDEST # Entry point
+ Op.MSTORE(offset=0, value=BALANCEOF_SELECTOR) # Store selector once for all contracts
)
for erc20_address in erc20_addresses:
# For each contract, initialize counter and loop
attack_code += (
# Initialize counter in memory[32] = number of calls
Op.MSTORE(offset=32, value=calls_per_contract)
# Loop for this specific contract
+ While(
condition=Op.MLOAD(32) + Op.ISZERO + Op.ISZERO, # Continue while counter > 0
body=(
# Call balanceOf(address) on ERC20 contract
# args_offset=28 reads: selector from MEM[28:32] + address
# from MEM[32:64]
Op.CALL(
address=erc20_address,
value=0,
args_offset=28,
args_size=36,
ret_offset=0,
ret_size=0,
)
+ Op.POP # Discard CALL success status
# Decrement counter: counter - 1
+ Op.MSTORE(offset=32, value=Op.SUB(Op.MLOAD(32), 1))
),
)
)
# Deploy attack contract
attack_address = pre.deploy_contract(code=attack_code)
# Run the attack
attack_tx = Transaction(
to=attack_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
# Post-state
post = {
attack_address: Account(storage={}),
}
blockchain_test(
pre=pre,
blocks=[Block(txs=[attack_tx])],
post=post,
)
@pytest.mark.valid_from("Prague")
@pytest.mark.parametrize("num_contracts", [1, 5, 10, 20, 100])
def test_sstore_erc20_approve(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
gas_benchmark_value: int,
address_stubs: AddressStubs,
num_contracts: int,
request: pytest.FixtureRequest,
) -> None:
"""
BloatNet SSTORE benchmark using ERC20 approve to write to storage.
This test:
1. Filters stubs matching test name prefix
(e.g., test_sstore_erc20_approve_*)
2. Uses first N contracts based on num_contracts parameter
3. Splits gas budget evenly across the selected contracts
4. Calls approve(spender, amount) incrementally (counter as spender)
5. Forces SSTOREs to allowance mapping storage slots
"""
# Extract test function name for stub filtering
test_name = request.node.name.split("[")[0] # Remove parametrization suffix
# Filter stubs that match the test name prefix
matching_stubs = [
stub_name for stub_name in address_stubs.root.keys() if stub_name.startswith(test_name)
]
# Validate we have enough stubs
if len(matching_stubs) < num_contracts:
pytest.fail(
f"Not enough matching stubs for test '{test_name}'. "
f"Required: {num_contracts}, Found: {len(matching_stubs)}. "
f"Matching stubs: {matching_stubs}"
)
# Select first N stubs
selected_stubs = matching_stubs[:num_contracts]
gas_costs = fork.gas_costs()
# Calculate gas costs
intrinsic_gas = fork.transaction_intrinsic_cost_calculator()(calldata=b"")
# Per-contract fixed overhead (setup + teardown)
memory_expansion_cost = 15 # Memory expansion to 160 bytes (5 words)
overhead_per_contract = (
gas_costs.G_VERY_LOW # MSTORE to initialize counter (3)
+ memory_expansion_cost # Memory expansion (15)
+ gas_costs.G_JUMPDEST # JUMPDEST at loop start (1)
+ gas_costs.G_LOW # MLOAD for While condition check (5)
+ gas_costs.G_BASE # ISZERO (2)
+ gas_costs.G_BASE # ISZERO (2)
+ gas_costs.G_MID # JUMPI (8)
+ gas_costs.G_BASE # POP to clean up counter at end (2)
) # = 38
# Fixed overhead per iteration (loop mechanics, independent of warm/cold)
loop_overhead = (
# Attack contract loop body operations
gas_costs.G_VERY_LOW # MSTORE selector at memory[32] (3)
+ gas_costs.G_LOW # MLOAD counter (5)
+ gas_costs.G_VERY_LOW # MSTORE spender at memory[64] (3)
+ gas_costs.G_BASE # POP call result (2)
# Counter decrement: MSTORE(0, SUB(MLOAD(0), 1))
+ gas_costs.G_LOW # MLOAD counter (5)
+ gas_costs.G_VERY_LOW # PUSH1 1 (3)
+ gas_costs.G_VERY_LOW # SUB (3)
+ gas_costs.G_VERY_LOW # MSTORE counter back (3)
# While loop condition check
+ gas_costs.G_LOW # MLOAD counter (5)
+ gas_costs.G_BASE # ISZERO (2)
+ gas_costs.G_BASE # ISZERO (2)
+ gas_costs.G_MID # JUMPI back to loop start (8)
)
# ERC20 internal gas (same for all calls)
# Note: SSTORE cost is 22100 for cold slot, zero-to-non-zero
# (20000 base + 2100 cold access)
erc20_internal_gas = (
gas_costs.G_VERY_LOW # PUSH4 selector (3)
+ gas_costs.G_BASE # EQ selector match (2)
+ gas_costs.G_MID # JUMPI to function (8)
+ gas_costs.G_JUMPDEST # JUMPDEST at function start (1)
+ gas_costs.G_VERY_LOW # CALLDATALOAD spender (3)
+ gas_costs.G_VERY_LOW # CALLDATALOAD amount (3)
+ gas_costs.G_KECCAK_256 # keccak256 static (30)
+ gas_costs.G_KECCAK_256_WORD * 2 # keccak256 dynamic for 64 bytes (12)
+ gas_costs.G_COLD_SLOAD # Cold SLOAD for allowance check (2100)
+ gas_costs.G_STORAGE_SET # SSTORE base cost (20000)
+ gas_costs.G_COLD_SLOAD # Additional cold storage access (2100)
+ gas_costs.G_VERY_LOW # PUSH1 1 for return value (3)
+ gas_costs.G_VERY_LOW # MSTORE return value (3)
+ gas_costs.G_VERY_LOW # PUSH1 32 for return size (3)
+ gas_costs.G_VERY_LOW # PUSH1 0 for return offset (3)
# RETURN costs 0 gas
)
# Calculate total gas needed
total_overhead = intrinsic_gas + (overhead_per_contract * num_contracts)
available_gas_for_iterations = gas_benchmark_value - total_overhead
# For each contract: first call is COLD (2600), subsequent are WARM (100)
# Solve for calls per contract accounting for cold/warm transition
warm_call_cost = loop_overhead + gas_costs.G_WARM_ACCOUNT_ACCESS + erc20_internal_gas
cold_warm_diff = gas_costs.G_COLD_ACCOUNT_ACCESS - gas_costs.G_WARM_ACCOUNT_ACCESS
# Per contract: gas_available = cold_warm_diff + calls * warm_call_cost
gas_per_contract = available_gas_for_iterations // num_contracts
calls_per_contract = int((gas_per_contract - cold_warm_diff) // warm_call_cost)
# Deploy selected ERC20 contracts using stubs
erc20_addresses = []
for stub_name in selected_stubs:
addr = pre.deploy_contract(
code=Bytecode(),
stub=stub_name,
)
erc20_addresses.append(addr)
# Log test requirements
print(
f"Total gas budget: {gas_benchmark_value / 1_000_000:.1f}M gas. "
f"Intrinsic: {intrinsic_gas}, Overhead per contract: {overhead_per_contract}, "
f"Warm call cost: {warm_call_cost}. "
f"{calls_per_contract} approve calls per contract ({num_contracts} contracts)."
)
# Build attack code that loops through each contract
attack_code: Bytecode = (
Op.JUMPDEST # Entry point
+ Op.MSTORE(offset=0, value=APPROVE_SELECTOR) # Store selector once for all contracts
)
for erc20_address in erc20_addresses:
# For each contract, initialize counter and loop
attack_code += (
# Initialize counter in memory[32] = number of calls
Op.MSTORE(offset=32, value=calls_per_contract)
# Loop for this specific contract
+ While(
condition=Op.MLOAD(32) + Op.ISZERO + Op.ISZERO, # Continue while counter > 0
body=(
# Store spender at memory[64] (counter as spender/amount)
Op.MSTORE(offset=64, value=Op.MLOAD(32))
# Call approve(spender, amount) on ERC20 contract
# args_offset=28 reads: selector from MEM[28:32] +
# spender from MEM[32:64] + amount from MEM[64:96]
# Note: counter at MEM[32:64] is reused as spender,
# and value at MEM[64:96] serves as the amount
+ Op.CALL(
address=erc20_address,
value=0,
args_offset=28,
args_size=68, # 4 bytes selector + 32 bytes spender + 32 bytes amount
ret_offset=0,
ret_size=0,
)
+ Op.POP # Discard CALL success status
# Decrement counter: counter - 1
+ Op.MSTORE(offset=32, value=Op.SUB(Op.MLOAD(32), 1))
),
)
)
# Deploy attack contract
attack_address = pre.deploy_contract(code=attack_code)
# Run the attack
attack_tx = Transaction(
to=attack_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
# Post-state
post = {
attack_address: Account(storage={}),
}
blockchain_test(
pre=pre,
blocks=[Block(txs=[attack_tx])],
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/stateful/bloatnet/test_bloatnet.py | tests/benchmark/stateful/bloatnet/test_bloatnet.py | """
abstract: Tests benchmark worst-case bloatnet scenarios.
Tests benchmark worst-case bloatnet scenarios.
Tests running worst-case bloatnet scenarios for benchmarking purposes.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/stateful/bloatnet/__init__.py | tests/benchmark/stateful/bloatnet/__init__.py | """Bloatnet benchmark tests package."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/benchmark/stateful/bloatnet/test_multi_opcode.py | tests/benchmark/stateful/bloatnet/test_multi_opcode.py | """
abstract: BloatNet bench cases extracted from https://hackmd.io/9icZeLN7R0Sk5mIjKlZAHQ.
The idea of all these tests is to stress client implementations to find out
where the limits of processing are focusing specifically on state-related
operations.
"""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Transaction,
While,
)
from ethereum_test_vm import Bytecode
from ethereum_test_vm import Opcodes as Op
from pytest_plugins.execute.pre_alloc import AddressStubs
REFERENCE_SPEC_GIT_PATH = "DUMMY/bloatnet.md"
REFERENCE_SPEC_VERSION = "1.0"
# BLOATNET ARCHITECTURE:
#
# [Initcode Contract] [Factory Contract] [24KB Contracts]
# (9.5KB) (116B) (N x 24KB each)
# │ │ │
# │ EXTCODECOPY │ CREATE2(salt++) │
# └──────────────► ├──────────────────► Contract_0
# ├──────────────────► Contract_1
# ├──────────────────► Contract_2
# └──────────────────► Contract_N
#
# [Attack Contract] ──STATICCALL──► [Factory.getConfig()]
# │ returns: (N, hash)
# └─► Loop(i=0 to N):
# 1. Generate CREATE2 addr: keccak256(0xFF|factory|i|hash)[12:]
# 2. BALANCE(addr) → 2600 gas (cold access)
# 3. EXTCODESIZE(addr) → 100 gas (warm access)
#
# HOW IT WORKS:
# 1. Factory uses EXTCODECOPY to load initcode, avoiding PC-relative jumps
# 2. Each CREATE2 deployment produces unique 24KB bytecode (via ADDRESS)
# 3. All contracts share same initcode hash for deterministic addresses
# 4. Attack rapidly accesses all contracts, stressing client's state handling
@pytest.mark.parametrize(
"balance_first",
[True, False],
ids=["balance_extcodesize", "extcodesize_balance"],
)
@pytest.mark.valid_from("Prague")
def test_bloatnet_balance_extcodesize(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
gas_benchmark_value: int,
balance_first: bool,
) -> None:
"""
BloatNet test using BALANCE + EXTCODESIZE with "on-the-fly" CREATE2
address generation.
This test:
1. Assumes contracts are already deployed via the factory (salt 0 to N-1)
2. Generates CREATE2 addresses dynamically during execution
3. Calls BALANCE and EXTCODESIZE (order controlled by balance_first param)
4. Maximizes cache eviction by accessing many contracts
"""
gas_costs = fork.gas_costs()
# Calculate gas costs
intrinsic_gas = fork.transaction_intrinsic_cost_calculator()(calldata=b"")
# Cost per contract access with CREATE2 address generation
cost_per_contract = (
gas_costs.G_KECCAK_256 # SHA3 static cost for address generation (30)
+ gas_costs.G_KECCAK_256_WORD * 3 # SHA3 dynamic cost (85 bytes = 3 words * 6)
+ gas_costs.G_COLD_ACCOUNT_ACCESS # Cold access (2600)
+ gas_costs.G_BASE # POP first result (2)
+ gas_costs.G_WARM_ACCOUNT_ACCESS # Warm access (100)
+ gas_costs.G_BASE # POP second result (2)
+ gas_costs.G_BASE # DUP1 before first op (3)
+ gas_costs.G_VERY_LOW * 4 # PUSH1 operations (4 * 3)
+ gas_costs.G_LOW # MLOAD for salt (3)
+ gas_costs.G_VERY_LOW # ADD for increment (3)
+ gas_costs.G_LOW # MSTORE salt back (3)
+ 10 # While loop overhead
)
# Calculate how many contracts to access based on available gas
available_gas = gas_benchmark_value - intrinsic_gas - 1000 # Reserve for cleanup
contracts_needed = int(available_gas // cost_per_contract)
# Deploy factory using stub contract - NO HARDCODED VALUES
# The stub "bloatnet_factory" must be provided via --address-stubs flag
# The factory at that address MUST have:
# - Slot 0: Number of deployed contracts
# - Slot 1: Init code hash for CREATE2 address calculation
factory_address = pre.deploy_contract(
code=Bytecode(), # Required parameter, but will be ignored for stubs
stub="bloatnet_factory",
)
# Log test requirements - deployed count read from factory storage
print(
f"Test needs {contracts_needed} contracts for "
f"{gas_benchmark_value / 1_000_000:.1f}M gas. "
f"Factory storage will be checked during execution."
)
# Define operations that differ based on parameter
balance_op = Op.POP(Op.BALANCE)
extcodesize_op = Op.POP(Op.EXTCODESIZE)
benchmark_ops = (
(balance_op + extcodesize_op) if balance_first else (extcodesize_op + balance_op)
)
# Build attack contract that reads config from factory and performs attack
attack_code = (
# Call getConfig() on factory to get num_deployed and init_code_hash
Op.STATICCALL(
gas=Op.GAS,
address=factory_address,
args_offset=0,
args_size=0,
ret_offset=96,
ret_size=64,
)
# Check if call succeeded
+ Op.ISZERO
+ Op.PUSH2(0x1000) # Jump to error handler if failed (far jump)
+ Op.JUMPI
# Load results from memory
# Memory[96:128] = num_deployed_contracts
# Memory[128:160] = init_code_hash
+ Op.MLOAD(96) # Load num_deployed_contracts
+ Op.MLOAD(128) # Load init_code_hash
# Setup memory for CREATE2 address generation
# Memory layout at 0: 0xFF + factory_addr(20) + salt(32) + hash(32)
+ Op.MSTORE(0, factory_address) # Store factory address at memory position 0
+ Op.MSTORE8(11, 0xFF) # Store 0xFF prefix at position (32 - 20 - 1)
+ Op.MSTORE(32, 0) # Store salt at position 32
# Stack now has: [num_contracts, init_code_hash]
+ Op.PUSH1(64) # Push memory position
+ Op.MSTORE # Store init_code_hash at memory[64]
# Stack now has: [num_contracts]
# Main attack loop - iterate through all deployed contracts
+ While(
body=(
# Generate CREATE2 addr: keccak256(0xFF+factory+salt+hash)
Op.SHA3(11, 85) # Generate CREATE2 address from memory[11:96]
# The address is now on the stack
+ Op.DUP1 # Duplicate for second operation
+ benchmark_ops # Execute operations in specified order
# Increment salt for next iteration
+ Op.MSTORE(32, Op.ADD(Op.MLOAD(32), 1)) # Increment and store salt
),
# Continue while we haven't reached the limit
condition=Op.DUP1 + Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
+ Op.POP # Clean up counter
)
# Deploy attack contract
attack_address = pre.deploy_contract(code=attack_code)
# Run the attack
attack_tx = Transaction(
to=attack_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
# Post-state: just verify attack contract exists
post = {
attack_address: Account(storage={}),
}
blockchain_test(
pre=pre,
blocks=[Block(txs=[attack_tx])],
post=post,
)
@pytest.mark.parametrize(
"balance_first",
[True, False],
ids=["balance_extcodecopy", "extcodecopy_balance"],
)
@pytest.mark.valid_from("Prague")
def test_bloatnet_balance_extcodecopy(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
gas_benchmark_value: int,
balance_first: bool,
) -> None:
"""
BloatNet test using BALANCE + EXTCODECOPY with on-the-fly CREATE2
address generation.
This test forces actual bytecode reads from disk by:
1. Assumes contracts are already deployed via the factory
2. Generating CREATE2 addresses dynamically during execution
3. Using BALANCE and EXTCODECOPY (order controlled by balance_first param)
4. Reading 1 byte from the END of the bytecode to force full contract load
"""
gas_costs = fork.gas_costs()
max_contract_size = fork.max_code_size()
# Calculate costs
intrinsic_gas = fork.transaction_intrinsic_cost_calculator()(calldata=b"")
# Cost per contract with EXTCODECOPY and CREATE2 address generation
cost_per_contract = (
gas_costs.G_KECCAK_256 # SHA3 static cost for address generation (30)
+ gas_costs.G_KECCAK_256_WORD * 3 # SHA3 dynamic cost (85 bytes = 3 words * 6)
+ gas_costs.G_COLD_ACCOUNT_ACCESS # Cold access (2600)
+ gas_costs.G_BASE # POP first result (2)
+ gas_costs.G_WARM_ACCOUNT_ACCESS # Warm access base (100)
+ gas_costs.G_COPY * 1 # Copy cost for 1 byte (3)
+ gas_costs.G_BASE * 2 # DUP1 before first op, DUP4 for address (6)
+ gas_costs.G_VERY_LOW * 8 # PUSH operations (8 * 3 = 24)
+ gas_costs.G_LOW * 2 # MLOAD for salt twice (6)
+ gas_costs.G_VERY_LOW * 2 # ADD operations (6)
+ gas_costs.G_LOW # MSTORE salt back (3)
+ gas_costs.G_BASE # POP after second op (2)
+ 10 # While loop overhead
)
# Calculate how many contracts to access
available_gas = gas_benchmark_value - intrinsic_gas - 1000
contracts_needed = int(available_gas // cost_per_contract)
# Deploy factory using stub contract - NO HARDCODED VALUES
# The stub "bloatnet_factory" must be provided via --address-stubs flag
# The factory at that address MUST have:
# - Slot 0: Number of deployed contracts
# - Slot 1: Init code hash for CREATE2 address calculation
factory_address = pre.deploy_contract(
code=Bytecode(), # Required parameter, but will be ignored for stubs
stub="bloatnet_factory",
)
# Log test requirements - deployed count read from factory storage
print(
f"Test needs {contracts_needed} contracts for "
f"{gas_benchmark_value / 1_000_000:.1f}M gas. "
f"Factory storage will be checked during execution."
)
# Define operations that differ based on parameter
balance_op = Op.POP(Op.BALANCE)
extcodecopy_op = (
Op.PUSH1(1) # size (1 byte)
+ Op.PUSH2(max_contract_size - 1) # code offset (last byte)
+ Op.ADD(Op.MLOAD(32), 96) # unique memory offset
+ Op.DUP4 # address (duplicated earlier)
+ Op.EXTCODECOPY
+ Op.POP # clean up address
)
benchmark_ops = (
(balance_op + extcodecopy_op) if balance_first else (extcodecopy_op + balance_op)
)
# Build attack contract that reads config from factory and performs attack
attack_code = (
# Call getConfig() on factory to get num_deployed and init_code_hash
Op.STATICCALL(
gas=Op.GAS,
address=factory_address,
args_offset=0,
args_size=0,
ret_offset=96,
ret_size=64,
)
# Check if call succeeded
+ Op.ISZERO
+ Op.PUSH2(0x1000) # Jump to error handler if failed (far jump)
+ Op.JUMPI
# Load results from memory
# Memory[96:128] = num_deployed_contracts
# Memory[128:160] = init_code_hash
+ Op.MLOAD(96) # Load num_deployed_contracts
+ Op.MLOAD(128) # Load init_code_hash
# Setup memory for CREATE2 address generation
# Memory layout at 0: 0xFF + factory_addr(20) + salt(32) + hash(32)
+ Op.MSTORE(0, factory_address) # Store factory address at memory position 0
+ Op.MSTORE8(11, 0xFF) # Store 0xFF prefix at position (32 - 20 - 1)
+ Op.MSTORE(32, 0) # Store salt at position 32
# Stack now has: [num_contracts, init_code_hash]
+ Op.PUSH1(64) # Push memory position
+ Op.MSTORE # Store init_code_hash at memory[64]
# Stack now has: [num_contracts]
# Main attack loop - iterate through all deployed contracts
+ While(
body=(
# Generate CREATE2 address
Op.SHA3(11, 85) # Generate CREATE2 address from memory[11:96]
# The address is now on the stack
+ Op.DUP1 # Duplicate for later operations
+ benchmark_ops # Execute operations in specified order
# Increment salt for next iteration
+ Op.MSTORE(32, Op.ADD(Op.MLOAD(32), 1)) # Increment and store salt
),
# Continue while counter > 0
condition=Op.DUP1 + Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
+ Op.POP # Clean up counter
)
# Deploy attack contract
attack_address = pre.deploy_contract(code=attack_code)
# Run the attack
attack_tx = Transaction(
to=attack_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
# Post-state
post = {
attack_address: Account(storage={}),
}
blockchain_test(
pre=pre,
blocks=[Block(txs=[attack_tx])],
post=post,
)
@pytest.mark.parametrize(
"balance_first",
[True, False],
ids=["balance_extcodehash", "extcodehash_balance"],
)
@pytest.mark.valid_from("Prague")
def test_bloatnet_balance_extcodehash(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
gas_benchmark_value: int,
balance_first: bool,
) -> None:
"""
BloatNet test using BALANCE + EXTCODEHASH with on-the-fly CREATE2
address generation.
This test:
1. Assumes contracts are already deployed via the factory
2. Generates CREATE2 addresses dynamically during execution
3. Calls BALANCE and EXTCODEHASH (order controlled by balance_first param)
4. Forces client to compute code hash for 24KB bytecode
"""
gas_costs = fork.gas_costs()
# Calculate gas costs
intrinsic_gas = fork.transaction_intrinsic_cost_calculator()(calldata=b"")
# Cost per contract access with CREATE2 address generation
cost_per_contract = (
gas_costs.G_KECCAK_256 # SHA3 static cost for address generation (30)
+ gas_costs.G_KECCAK_256_WORD * 3 # SHA3 dynamic cost (85 bytes = 3 words * 6)
+ gas_costs.G_COLD_ACCOUNT_ACCESS # Cold access (2600)
+ gas_costs.G_BASE # POP first result (2)
+ gas_costs.G_WARM_ACCOUNT_ACCESS # Warm access (100)
+ gas_costs.G_BASE # POP second result (2)
+ gas_costs.G_BASE # DUP1 before first op (3)
+ gas_costs.G_VERY_LOW * 4 # PUSH1 operations (4 * 3)
+ gas_costs.G_LOW # MLOAD for salt (3)
+ gas_costs.G_VERY_LOW # ADD for increment (3)
+ gas_costs.G_LOW # MSTORE salt back (3)
+ 10 # While loop overhead
)
# Calculate how many contracts to access based on available gas
available_gas = gas_benchmark_value - intrinsic_gas - 1000 # Reserve for cleanup
contracts_needed = int(available_gas // cost_per_contract)
# Deploy factory using stub contract
factory_address = pre.deploy_contract(
code=Bytecode(),
stub="bloatnet_factory",
)
# Log test requirements
print(
f"Test needs {contracts_needed} contracts for "
f"{gas_benchmark_value / 1_000_000:.1f}M gas. "
f"Factory storage will be checked during execution."
)
# Define operations that differ based on parameter
balance_op = Op.POP(Op.BALANCE)
extcodehash_op = Op.POP(Op.EXTCODEHASH)
benchmark_ops = (
(balance_op + extcodehash_op) if balance_first else (extcodehash_op + balance_op)
)
# Build attack contract that reads config from factory and performs attack
attack_code = (
# Call getConfig() on factory to get num_deployed and init_code_hash
Op.STATICCALL(
gas=Op.GAS,
address=factory_address,
args_offset=0,
args_size=0,
ret_offset=96,
ret_size=64,
)
# Check if call succeeded
+ Op.ISZERO
+ Op.PUSH2(0x1000) # Jump to error handler if failed
+ Op.JUMPI
# Load results from memory
+ Op.MLOAD(96) # Load num_deployed_contracts
+ Op.MLOAD(128) # Load init_code_hash
# Setup memory for CREATE2 address generation
+ Op.MSTORE(0, factory_address)
+ Op.MSTORE8(11, 0xFF)
+ Op.MSTORE(32, 0) # Initial salt
+ Op.PUSH1(64)
+ Op.MSTORE # Store init_code_hash
# Main attack loop
+ While(
body=(
# Generate CREATE2 address
Op.SHA3(11, 85)
+ Op.DUP1 # Duplicate for second operation
+ benchmark_ops # Execute operations in specified order
# Increment salt
+ Op.MSTORE(32, Op.ADD(Op.MLOAD(32), 1))
),
condition=Op.DUP1 + Op.PUSH1(1) + Op.SWAP1 + Op.SUB + Op.DUP1 + Op.ISZERO + Op.ISZERO,
)
+ Op.POP # Clean up counter
)
# Deploy attack contract
attack_address = pre.deploy_contract(code=attack_code)
# Run the attack
attack_tx = Transaction(
to=attack_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
# Post-state
post = {
attack_address: Account(storage={}),
}
blockchain_test(
pre=pre,
blocks=[Block(txs=[attack_tx])],
post=post,
)
# ERC20 function selectors
BALANCEOF_SELECTOR = 0x70A08231 # balanceOf(address)
APPROVE_SELECTOR = 0x095EA7B3 # approve(address,uint256)
@pytest.mark.valid_from("Prague")
@pytest.mark.parametrize("num_contracts", [1, 5, 10, 20, 100])
@pytest.mark.parametrize(
"sload_percent,sstore_percent",
[
pytest.param(50, 50, id="50-50"),
pytest.param(70, 30, id="70-30"),
pytest.param(90, 10, id="90-10"),
],
)
def test_mixed_sload_sstore(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
gas_benchmark_value: int,
address_stubs: AddressStubs,
num_contracts: int,
sload_percent: int,
sstore_percent: int,
request: pytest.FixtureRequest,
) -> None:
"""
BloatNet mixed SLOAD/SSTORE benchmark with configurable operation ratios.
This test:
1. Filters stubs matching test name prefix
(e.g., test_mixed_sload_sstore_*)
2. Uses first N contracts based on num_contracts parameter
3. Divides gas budget evenly across all selected contracts
4. For each contract, divides gas into SLOAD and SSTORE portions by
percentage
5. Executes balanceOf (SLOAD) and approve (SSTORE) calls per the ratio
6. Stresses clients with combined read/write operations on large
contracts
"""
# Extract test function name for stub filtering
test_name = request.node.name.split("[")[0] # Remove parametrization suffix
# Filter stubs that match the test name prefix
matching_stubs = [
stub_name for stub_name in address_stubs.root.keys() if stub_name.startswith(test_name)
]
# Validate we have enough stubs
if len(matching_stubs) < num_contracts:
pytest.fail(
f"Not enough matching stubs for test '{test_name}'. "
f"Required: {num_contracts}, Found: {len(matching_stubs)}. "
f"Matching stubs: {matching_stubs}"
)
# Select first N stubs
selected_stubs = matching_stubs[:num_contracts]
gas_costs = fork.gas_costs()
# Calculate gas costs
intrinsic_gas = fork.transaction_intrinsic_cost_calculator()(calldata=b"")
# Fixed overhead for SLOAD loop
sload_loop_overhead = (
# Attack contract loop overhead
gas_costs.G_VERY_LOW * 2 # MLOAD counter (3*2)
+ gas_costs.G_VERY_LOW * 2 # MSTORE selector (3*2)
+ gas_costs.G_VERY_LOW * 3 # MLOAD + MSTORE address (3*3)
+ gas_costs.G_BASE # POP (2)
+ gas_costs.G_BASE * 3 # SUB + MLOAD + MSTORE for counter decrement (2*3)
+ gas_costs.G_BASE * 2 # ISZERO * 2 for loop condition (2*2)
+ gas_costs.G_MID # JUMPI (8)
)
# ERC20 balanceOf internal gas
sload_erc20_internal = (
gas_costs.G_VERY_LOW # PUSH4 selector (3)
+ gas_costs.G_BASE # EQ selector match (2)
+ gas_costs.G_MID # JUMPI to function (8)
+ gas_costs.G_JUMPDEST # JUMPDEST at function start (1)
+ gas_costs.G_VERY_LOW * 2 # CALLDATALOAD arg (3*2)
+ gas_costs.G_KECCAK_256 # keccak256 static (30)
+ gas_costs.G_KECCAK_256_WORD * 2 # keccak256 dynamic for 64 bytes (2*6)
+ gas_costs.G_COLD_SLOAD # Cold SLOAD - always cold for random addresses (2100)
+ gas_costs.G_VERY_LOW * 3 # MSTORE result + RETURN setup (3*3)
)
# Fixed overhead for SSTORE loop
sstore_loop_overhead = (
# Attack contract loop body operations
gas_costs.G_VERY_LOW # MSTORE selector at memory[32] (3)
+ gas_costs.G_LOW # MLOAD counter (5)
+ gas_costs.G_VERY_LOW # MSTORE spender at memory[64] (3)
+ gas_costs.G_BASE # POP call result (2)
# Counter decrement
+ gas_costs.G_LOW # MLOAD counter (5)
+ gas_costs.G_VERY_LOW # PUSH1 1 (3)
+ gas_costs.G_VERY_LOW # SUB (3)
+ gas_costs.G_VERY_LOW # MSTORE counter back (3)
# While loop condition check
+ gas_costs.G_LOW # MLOAD counter (5)
+ gas_costs.G_BASE # ISZERO (2)
+ gas_costs.G_BASE # ISZERO (2)
+ gas_costs.G_MID # JUMPI back to loop start (8)
)
# ERC20 approve internal gas
# Cold SSTORE: 22100 = 20000 base + 2100 cold access
sstore_erc20_internal = (
gas_costs.G_VERY_LOW # PUSH4 selector (3)
+ gas_costs.G_BASE # EQ selector match (2)
+ gas_costs.G_MID # JUMPI to function (8)
+ gas_costs.G_JUMPDEST # JUMPDEST at function start (1)
+ gas_costs.G_VERY_LOW # CALLDATALOAD spender (3)
+ gas_costs.G_VERY_LOW # CALLDATALOAD amount (3)
+ gas_costs.G_KECCAK_256 # keccak256 static (30)
+ gas_costs.G_KECCAK_256_WORD * 2 # keccak256 dynamic for 64 bytes (12)
+ gas_costs.G_COLD_SLOAD # Cold SLOAD for allowance check (2100)
+ gas_costs.G_STORAGE_SET # SSTORE base cost (20000)
+ gas_costs.G_COLD_SLOAD # Additional cold storage access (2100)
+ gas_costs.G_VERY_LOW # PUSH1 1 for return value (3)
+ gas_costs.G_VERY_LOW # MSTORE return value (3)
+ gas_costs.G_VERY_LOW # PUSH1 32 for return size (3)
+ gas_costs.G_VERY_LOW # PUSH1 0 for return offset (3)
)
# Calculate gas budget per contract
available_gas = gas_benchmark_value - intrinsic_gas
gas_per_contract = available_gas // num_contracts
# For each contract, split gas by percentage
sload_gas_per_contract = (gas_per_contract * sload_percent) // 100
sstore_gas_per_contract = (gas_per_contract * sstore_percent) // 100
# Account for cold/warm transitions in CALL costs
# First SLOAD call is COLD (2600), rest are WARM (100)
sload_warm_cost = sload_loop_overhead + gas_costs.G_WARM_ACCOUNT_ACCESS + sload_erc20_internal
cold_warm_diff = gas_costs.G_COLD_ACCOUNT_ACCESS - gas_costs.G_WARM_ACCOUNT_ACCESS
sload_calls_per_contract = int((sload_gas_per_contract - cold_warm_diff) // sload_warm_cost)
# First SSTORE call is COLD (2600), rest are WARM (100)
sstore_warm_cost = (
sstore_loop_overhead + gas_costs.G_WARM_ACCOUNT_ACCESS + sstore_erc20_internal
)
sstore_calls_per_contract = int((sstore_gas_per_contract - cold_warm_diff) // sstore_warm_cost)
# Deploy selected ERC20 contracts using stubs
erc20_addresses = []
for stub_name in selected_stubs:
addr = pre.deploy_contract(
code=Bytecode(),
stub=stub_name,
)
erc20_addresses.append(addr)
# Log test requirements
print(
f"Total gas budget: {gas_benchmark_value / 1_000_000:.1f}M gas. "
f"~{gas_per_contract / 1_000_000:.1f}M gas per contract "
f"({sload_percent}% SLOAD, {sstore_percent}% SSTORE). "
f"Per contract: {sload_calls_per_contract} balanceOf calls, "
f"{sstore_calls_per_contract} approve calls."
)
# Build attack code that loops through each contract
attack_code: Bytecode = (
Op.JUMPDEST # Entry point
+ Op.MSTORE(offset=0, value=BALANCEOF_SELECTOR) # Store selector once for all contracts
)
for erc20_address in erc20_addresses:
# For each contract, execute SLOAD operations (balanceOf)
attack_code += (
# Initialize counter in memory[32] = number of balanceOf calls
Op.MSTORE(offset=32, value=sload_calls_per_contract)
# Loop for balanceOf calls
+ While(
condition=Op.MLOAD(32) + Op.ISZERO + Op.ISZERO,
body=(
# Call balanceOf(address) on ERC20 contract
# args_offset=28 reads: selector from MEM[28:32] + address
# from MEM[32:64]
Op.CALL(
address=erc20_address,
value=0,
args_offset=28,
args_size=36,
ret_offset=0,
ret_size=0,
)
+ Op.POP # Discard CALL success status
# Decrement counter
+ Op.MSTORE(offset=32, value=Op.SUB(Op.MLOAD(32), 1))
),
)
)
# For each contract, execute SSTORE operations (approve)
# Reuse the same memory layout as balanceOf
attack_code += (
# Store approve selector at memory[0] (reusing same slot)
Op.MSTORE(offset=0, value=APPROVE_SELECTOR)
# Initialize counter in memory[32] = number of approve calls
# (reusing same slot)
+ Op.MSTORE(offset=32, value=sstore_calls_per_contract)
# Loop for approve calls
+ While(
condition=Op.MLOAD(32) + Op.ISZERO + Op.ISZERO,
body=(
# Store spender at memory[64] (counter as spender/amount)
Op.MSTORE(offset=64, value=Op.MLOAD(32))
# Call approve(spender, amount) on ERC20 contract
# args_offset=28 reads: selector from MEM[28:32] +
# spender from MEM[32:64] + amount from MEM[64:96]
# Note: counter at MEM[32:64] is reused as spender,
# and value at MEM[64:96] serves as the amount
+ Op.CALL(
address=erc20_address,
value=0,
args_offset=28,
args_size=68,
ret_offset=0,
ret_size=0,
)
+ Op.POP # Discard CALL success status
# Decrement counter
+ Op.MSTORE(offset=32, value=Op.SUB(Op.MLOAD(32), 1))
),
)
)
# Deploy attack contract
attack_address = pre.deploy_contract(code=attack_code)
# Run the attack
attack_tx = Transaction(
to=attack_address,
gas_limit=gas_benchmark_value,
sender=pre.fund_eoa(),
)
# Post-state
post = {
attack_address: Account(storage={}),
}
blockchain_test(
pre=pre,
blocks=[Block(txs=[attack_tx])],
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/berlin/__init__.py | tests/berlin/__init__.py | """Test cases for EVM functionality introduced in Berlin."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/berlin/eip2930_access_list/test_tx_intrinsic_gas.py | tests/berlin/eip2930_access_list/test_tx_intrinsic_gas.py | """
Tests [EIP-2930: Access list transaction](https://eips.ethereum.org/EIPS/eip-2930).
Original test by Ori:
https://github.com/ethereum/tests/blob/v15.0/src/GeneralStateTestsFiller/stEIP1559/intrinsicGen.js.
"""
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
AccessList,
Address,
Alloc,
Bytes,
Environment,
StateTestFiller,
Transaction,
TransactionException,
)
from ethereum_test_tools import Opcodes as Op
from .spec import ref_spec_2930
REFERENCE_SPEC_GIT_PATH = ref_spec_2930.git_path
REFERENCE_SPEC_VERSION = ref_spec_2930.version
pytestmark = pytest.mark.valid_from("Berlin")
tx_intrinsic_gas_data_vectors = [
pytest.param(Bytes(b""), id="data_empty"),
pytest.param(Bytes(b"0x00"), id="data_1_zero_byte"),
pytest.param(Bytes(b"0x00000000"), id="data_4_zero_byte"),
pytest.param(Bytes(b"0xFF"), id="data_1_non_zero_byte"),
pytest.param(Bytes(b"0x00FF"), id="data_1_zero_byte_1_non_zero_byte"),
pytest.param(Bytes(b"0xFE00"), id="data_1_zero_byte_1_non_zero_byte_reversed"),
pytest.param(Bytes(b"0x0102030405060708090A0B0C0D0E0F10"), id="data_set_1"),
pytest.param(
Bytes(b"0x00010203040506000708090A0B0C0D0E0F10111200131415161718191a1b1c1d1e1f"),
id="data_set_2",
),
pytest.param(
Bytes(b"0x0102030405060708090A0B0C0D0E0F101112131415161718191a1b1c1d1e1f20"),
id="data_set_3",
),
pytest.param(
Bytes(b"0x01020304050607080910111213141516171819202122232425262728293031"),
id="data_set_31_bytes",
),
pytest.param(
Bytes(b"0x000102030405060708090A0B0C0D0E0F101112131415161718191a1b1c1d1e1f"),
id="data_set_32_bytes",
),
pytest.param(
Bytes(b"0x010203040506070809101112131415161718192021222324252627282930313233"),
id="data_set_33_bytes",
),
pytest.param(
Bytes(b"0x000000000000000000000000000000000000000000000000000000000000000000"),
id="data_set_33_empty_bytes",
),
pytest.param(
Bytes(
b"0x000000000000000000000000000000000000000000000000000000000000000000010203040506070809101112131415161718192021222324252627282930313233"
),
id="data_set_66_bytes_half_zeros",
),
]
tx_intrinsic_gas_access_list_vectors = [
pytest.param([], id="access_list_empty"),
pytest.param(
[AccessList(address=1, storage_keys=[])],
id="access_list_1_address_empty_keys",
),
pytest.param(
[AccessList(address=1, storage_keys=[0x60A7])],
id="access_list_1_address_1_keys",
),
pytest.param(
[AccessList(address=1, storage_keys=[0x60A7, 0x60A8])],
id="access_list_1_address_2_keys",
),
pytest.param(
[
AccessList(address=1, storage_keys=[]),
AccessList(address=2, storage_keys=[]),
],
id="access_list_2_address_empty_keys",
),
pytest.param(
[
AccessList(address=1, storage_keys=[]),
AccessList(address=2, storage_keys=[0x60A7]),
],
id="access_list_2_address_1_keys",
),
pytest.param(
[
AccessList(address=1, storage_keys=[0x60A7]),
AccessList(address=2, storage_keys=[0x60A8]),
],
id="access_list_2_address_2_keys",
),
pytest.param(
[
AccessList(address=1, storage_keys=[0x60A7, 0x60A8]),
AccessList(address=2, storage_keys=[]),
],
id="access_list_2_address_2_keys_inversion",
),
pytest.param(
[
AccessList(address=1, storage_keys=[0xCE11]),
AccessList(address=2, storage_keys=[0x60A7]),
*[
AccessList(
address=Address(i),
storage_keys=[0x600D, 0x0BAD, 0x60A7, 0xBEEF],
)
for i in range(3, 13) # 3 to 12 inclusive (10 entries)
],
],
id="access_list_12_address_42_keys",
),
]
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stEIP1559/intrinsicGen.js",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stEIP1559/intrinsicFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1535"],
)
@pytest.mark.parametrize("data", tx_intrinsic_gas_data_vectors)
@pytest.mark.parametrize("access_list", tx_intrinsic_gas_access_list_vectors)
@pytest.mark.parametrize(
"below_intrinsic",
[
pytest.param(False),
pytest.param(True, marks=pytest.mark.exception_test),
],
)
@pytest.mark.with_all_tx_types(selector=lambda tx_type: tx_type in [1, 2])
@pytest.mark.slow()
def test_tx_intrinsic_gas(
state_test: StateTestFiller,
tx_type: int,
pre: Alloc,
fork: Fork,
data: Bytes,
access_list: List[AccessList],
below_intrinsic: bool,
) -> None:
"""Transaction intrinsic gas calculation on EIP2930."""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
intrinsic_gas_cost = intrinsic_gas_cost_calculator(calldata=data, access_list=access_list)
exception: List[TransactionException] | TransactionException | None = None
if below_intrinsic:
data_floor_gas_cost_calculator = fork.transaction_data_floor_cost_calculator()
data_floor_gas_cost = data_floor_gas_cost_calculator(data=data)
if data_floor_gas_cost > intrinsic_gas_cost:
exception = TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST
elif data_floor_gas_cost == intrinsic_gas_cost:
# Depending on the implementation, client might raise either
# exception.
exception = [
TransactionException.INTRINSIC_GAS_TOO_LOW,
TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST,
]
else:
exception = TransactionException.INTRINSIC_GAS_TOO_LOW
tx = Transaction(
ty=tx_type,
sender=pre.fund_eoa(),
to=pre.deploy_contract(code=Op.SSTORE(0, Op.ADD(1, 1))),
data=data,
access_list=access_list,
gas_limit=intrinsic_gas_cost + (-1 if below_intrinsic else 0),
error=exception,
protected=True,
)
state_test(env=Environment(), pre=pre, post={}, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/berlin/eip2930_access_list/spec.py | tests/berlin/eip2930_access_list/spec.py | """Defines EIP-2930 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_2930 = ReferenceSpec("EIPS/eip-2930.md", "c9db53a936c5c9cbe2db32ba0d1b86c4c6e73534")
# Constants
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-2930 specifications as defined at
https://eips.ethereum.org/EIPS/eip-2930#specification.
"""
ACCESS_LIST_ADDRESS_COST = 2400
ACCESS_LIST_STORAGE_KEY_COST = 1900
"""From EIP-2028"""
TX_BASE_INTRINSIC_GAS = 21_000
TX_DATA_ZERO_BYTE_GAS = 4
TX_DATA_NON_ZERO_BYTE_GAS = 16
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/berlin/eip2930_access_list/test_acl.py | tests/berlin/eip2930_access_list/test_acl.py | """Test ACL Transaction Source Code Examples."""
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
AccessList,
Account,
Address,
Alloc,
CodeGasMeasure,
Environment,
Hash,
StateTestFiller,
Transaction,
TransactionException,
)
from ethereum_test_tools import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-2930.md"
REFERENCE_SPEC_VERSION = "c9db53a936c5c9cbe2db32ba0d1b86c4c6e73534"
pytestmark = pytest.mark.valid_from("Berlin")
@pytest.mark.parametrize(
"account_warm,storage_key_warm",
[
(True, True),
(True, False),
# (False, True), Not possible
(False, False),
],
)
def test_account_storage_warm_cold_state(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
account_warm: bool,
storage_key_warm: bool,
) -> None:
"""Test type 1 transaction."""
env = Environment()
gas_costs = fork.gas_costs()
storage_reader_contract = pre.deploy_contract(Op.SLOAD(1) + Op.STOP)
overhead_cost = (
gas_costs.G_VERY_LOW * (Op.CALL.popped_stack_items - 1) # Call stack items
+ gas_costs.G_BASE # Call gas
+ gas_costs.G_VERY_LOW # SLOAD Push
)
contract_address = pre.deploy_contract(
CodeGasMeasure(
code=Op.CALL(address=storage_reader_contract),
overhead_cost=overhead_cost,
extra_stack_items=1,
sstore_key=0,
)
)
expected_gas_cost = 0
access_list_address = Address(0)
access_list_storage_key = Hash(0)
if account_warm:
expected_gas_cost += gas_costs.G_WARM_ACCOUNT_ACCESS
access_list_address = storage_reader_contract
else:
expected_gas_cost += gas_costs.G_COLD_ACCOUNT_ACCESS
if storage_key_warm:
expected_gas_cost += gas_costs.G_WARM_SLOAD
access_list_storage_key = Hash(1)
else:
expected_gas_cost += gas_costs.G_COLD_SLOAD
access_lists: List[AccessList] = [
AccessList(
address=access_list_address,
storage_keys=[access_list_storage_key],
),
]
sender = pre.fund_eoa()
contract_creation = False
tx_data = b""
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
tx_gas_limit = (
intrinsic_gas_calculator(
calldata=tx_data,
contract_creation=contract_creation,
access_list=access_lists,
)
+ 100_000
)
tx = Transaction(
ty=1,
data=tx_data,
to=contract_address,
gas_limit=tx_gas_limit,
access_list=access_lists,
sender=sender,
)
post = {
contract_address: Account(
nonce=1,
storage={0: expected_gas_cost},
),
}
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"access_lists",
[
pytest.param(
[],
id="empty_access_list",
),
pytest.param(
[AccessList(address=Address(0), storage_keys=[])],
id="single_address_multiple_no_storage_keys",
),
pytest.param(
[AccessList(address=Address(0), storage_keys=[Hash(0)])],
id="single_address_single_storage_key",
),
pytest.param(
[AccessList(address=Address(0), storage_keys=[Hash(0), Hash(1)])],
id="single_address_multiple_storage_keys",
),
pytest.param(
[
AccessList(address=Address(0), storage_keys=[Hash(0), Hash(1)]),
AccessList(address=Address(1), storage_keys=[]),
],
id="multiple_addresses_second_address_no_storage_keys",
),
pytest.param(
[
AccessList(address=Address(0), storage_keys=[Hash(0), Hash(1)]),
AccessList(address=Address(1), storage_keys=[Hash(0)]),
],
id="multiple_addresses_second_address_single_storage_key",
),
pytest.param(
[
AccessList(address=Address(0), storage_keys=[Hash(0), Hash(1)]),
AccessList(address=Address(1), storage_keys=[Hash(0), Hash(1)]),
],
id="multiple_addresses_second_address_multiple_storage_keys",
),
pytest.param(
[
AccessList(address=Address(0), storage_keys=[]),
AccessList(address=Address(1), storage_keys=[Hash(0), Hash(1)]),
],
id="multiple_addresses_first_address_no_storage_keys",
),
pytest.param(
[
AccessList(address=Address(0), storage_keys=[Hash(0)]),
AccessList(address=Address(1), storage_keys=[Hash(0), Hash(1)]),
],
id="multiple_addresses_first_address_single_storage_key",
),
pytest.param(
[
AccessList(address=Address(0), storage_keys=[]),
AccessList(address=Address(1), storage_keys=[]),
],
id="repeated_address_no_storage_keys",
),
pytest.param(
[
AccessList(address=Address(0), storage_keys=[Hash(0)]),
AccessList(address=Address(0), storage_keys=[Hash(1)]),
],
id="repeated_address_single_storage_key",
),
pytest.param(
[
AccessList(address=Address(0), storage_keys=[Hash(0), Hash(1)]),
AccessList(address=Address(0), storage_keys=[Hash(0), Hash(1)]),
],
id="repeated_address_multiple_storage_keys",
),
],
)
@pytest.mark.parametrize(
"enough_gas",
[
pytest.param(True, id="enough_gas"),
pytest.param(False, id="not_enough_gas", marks=pytest.mark.exception_test),
],
)
def test_transaction_intrinsic_gas_cost(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
access_lists: List[AccessList],
enough_gas: bool,
) -> None:
"""Test type 1 transaction."""
env = Environment()
contract_start_balance = 3
contract_address = pre.deploy_contract(
Op.STOP,
balance=contract_start_balance,
)
sender = pre.fund_eoa()
tx_value = 1
pre.fund_address(sender, tx_value)
contract_creation = False
tx_data = b""
intrinsic_gas_calculator = fork.transaction_intrinsic_cost_calculator()
tx_exception = None
tx_gas_limit = intrinsic_gas_calculator(
calldata=tx_data,
contract_creation=contract_creation,
access_list=access_lists,
)
if not enough_gas:
tx_gas_limit -= 1
tx_exception = TransactionException.INTRINSIC_GAS_TOO_LOW
tx = Transaction(
ty=1,
data=tx_data,
to=contract_address,
value=tx_value,
gas_limit=tx_gas_limit,
access_list=access_lists,
sender=sender,
error=tx_exception,
)
post = {
contract_address: Account(
balance=contract_start_balance + 1 if enough_gas else contract_start_balance,
nonce=1,
),
sender: Account(
nonce=1 if enough_gas else 0,
),
}
state_test(env=env, pre=pre, post=post, tx=tx)
def test_repeated_address_acl(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
) -> None:
"""
Tests that slots are warmed correctly in an access list that has the same
address repeated more than once, each time with different slots.
Difference with other ACL tests is that we actually try to
access both slots at runtime. We also measure the gas cost
of each access in order to make debugging easier.
"""
sender = pre.fund_eoa()
gsc = fork.gas_costs()
sload0_measure = CodeGasMeasure(
code=Op.SLOAD(0),
overhead_cost=gsc.G_VERY_LOW * len(Op.SLOAD.kwargs), # Cost of pushing SLOAD args
extra_stack_items=1, # SLOAD pushes 1 item to the stack
sstore_key=0,
stop=False, # Because it's the first CodeGasMeasure
)
sload1_measure = CodeGasMeasure(
code=Op.SLOAD(1),
overhead_cost=gsc.G_VERY_LOW * len(Op.SLOAD.kwargs), # Cost of pushing SLOAD args
extra_stack_items=1, # SLOAD pushes 1 item to the stack
sstore_key=1,
)
contract = pre.deploy_contract(sload0_measure + sload1_measure)
tx = Transaction(
gas_limit=500_000,
to=contract,
value=0,
sender=sender,
access_list=[
AccessList(
address=contract,
storage_keys=[0],
),
AccessList(
address=contract,
storage_keys=[1],
),
],
)
sload_cost = gsc.G_WARM_ACCOUNT_ACCESS
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
contract: Account(
storage={0: sload_cost, 1: sload_cost},
)
},
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/berlin/eip2930_access_list/__init__.py | tests/berlin/eip2930_access_list/__init__.py | """
Tests for [EIP-2930: Optional access lists](https://eips.ethereum.org/EIPS/eip-2930).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/berlin/eip2929_gas_cost_increases/test_precompile_warming.py | tests/berlin/eip2929_gas_cost_increases/test_precompile_warming.py | """
Tests EIP-2929 precompile warming behavior.
Tests precompile warming behavior across fork transitions from
[EIP-2929: Gas cost increases for state access opcodes]
(https://eips.ethereum.org/EIPS/eip-2929).
"""
from typing import Iterator, Tuple
import pytest
from ethereum_test_checklists import EIPChecklist
from ethereum_test_forks import (
Fork,
get_transition_fork_predecessor,
get_transition_fork_successor,
)
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-2929.md"
REFERENCE_SPEC_VERSION = "0e11417265a623adb680c527b15d0cb6701b870b"
def precompile_addresses_in_predecessor_successor(
fork: Fork,
) -> Iterator[Tuple[Address, bool, bool]]:
"""
Yield the addresses of precompiled contracts and whether they existed in
the parent fork.
Args:
fork (Fork): The transition fork instance containing precompiled
contract information.
Yields:
Iterator[Tuple[str, bool]]: A tuple containing the address in
hexadecimal format and a boolean indicating whether the address
has existed in the predecessor.
"""
precompile_range = range(0x01, 0x100)
predecessor_precompiles = set(get_transition_fork_predecessor(fork).precompiles())
successor_precompiles = set(get_transition_fork_successor(fork).precompiles())
all_precompiles = successor_precompiles | predecessor_precompiles
precompiles_in_range = {
addr
for addr in all_precompiles
if int.from_bytes(addr, byteorder="big") in precompile_range
}
highest_in_range = max(int.from_bytes(addr, byteorder="big") for addr in precompiles_in_range)
highest_overall = max(int.from_bytes(addr, byteorder="big") for addr in all_precompiles)
extra_range = 32
extra_precompiles = {
Address(i) for i in range(highest_in_range + 1, highest_in_range + extra_range)
}
extra_precompiles_outside_range = {Address(highest_overall + 1)}
all_precompiles = all_precompiles | extra_precompiles | extra_precompiles_outside_range
for address in sorted(all_precompiles):
yield address, address in successor_precompiles, address in predecessor_precompiles
@pytest.mark.valid_at_transition_to("Paris", subsequent_forks=True)
@pytest.mark.parametrize_by_fork(
"address,precompile_in_successor,precompile_in_predecessor",
precompile_addresses_in_predecessor_successor,
)
@EIPChecklist.Precompile.Test.ForkTransition.Before.Cold(eip=[7951])
@EIPChecklist.Precompile.Test.ForkTransition.After.Warm(eip=[7951])
@pytest.mark.slow()
def test_precompile_warming(
blockchain_test: BlockchainTestFiller,
fork: Fork,
address: Address,
precompile_in_successor: bool,
precompile_in_predecessor: bool,
pre: Alloc,
) -> None:
"""
Call BALANCE of a precompile addresses before and after a fork.
According to EIP-2929, when a transaction begins, accessed_addresses is
initialized to include:
- tx.sender, tx.to
- and the set of all precompiles
This test verifies that:
1. Precompiles that exist in the predecessor fork are always "warm" (lower
gas cost).
2. New precompiles added in a fork are "cold" before the fork and become
"warm" after.
"""
sender = pre.fund_eoa()
call_cost_slot = 0
code = (
Op.GAS
+ Op.BALANCE(address)
+ Op.POP
+ Op.SSTORE(call_cost_slot, Op.SUB(Op.SWAP1, Op.GAS))
+ Op.STOP
)
before = pre.deploy_contract(code, storage={call_cost_slot: 0xDEADBEEF})
after = pre.deploy_contract(code, storage={call_cost_slot: 0xDEADBEEF})
# Block before fork
blocks = [
Block(
timestamp=10_000,
txs=[
Transaction(
sender=sender,
to=before,
gas_limit=1_000_000,
)
],
)
]
# Block after fork
blocks += [
Block(
timestamp=20_000,
txs=[
Transaction(
sender=sender,
to=after,
gas_limit=1_000_000,
)
],
)
]
predecessor = get_transition_fork_predecessor(fork)
successor = get_transition_fork_successor(fork)
def get_expected_gas(precompile_present: bool, fork: Fork) -> int:
gas_costs = fork.gas_costs()
warm_access_cost = gas_costs.G_WARM_ACCOUNT_ACCESS
cold_access_cost = gas_costs.G_COLD_ACCOUNT_ACCESS
extra_cost = gas_costs.G_BASE * 2 + gas_costs.G_VERY_LOW
if precompile_present:
return warm_access_cost + extra_cost
else:
return cold_access_cost + extra_cost
expected_gas_before = get_expected_gas(precompile_in_predecessor, predecessor)
expected_gas_after = get_expected_gas(precompile_in_successor, successor)
post = {
before: Account(storage={call_cost_slot: expected_gas_before}),
after: Account(storage={call_cost_slot: expected_gas_after}),
}
blockchain_test(
pre=pre,
post=post,
blocks=blocks,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/berlin/eip2929_gas_cost_increases/__init__.py | tests/berlin/eip2929_gas_cost_increases/__init__.py | """
Tests for [EIP-2929: Gas cost increases for state access opcodes](https://eips.ethereum.org/EIPS/eip-2929).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/berlin/eip2929_gas_cost_increases/test_call.py | tests/berlin/eip2929_gas_cost_increases/test_call.py | """Test the CALL opcode after EIP-2929."""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
CodeGasMeasure,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-2929.md"
REFERENCE_SPEC_VERSION = "0e11417265a623adb680c527b15d0cb6701b870b"
@pytest.mark.valid_from("Berlin")
def test_call_insufficient_balance(
state_test: StateTestFiller, pre: Alloc, env: Environment, fork: Fork
) -> None:
"""
Test a regular CALL to see if it warms the destination with insufficient
balance.
"""
gas_costs = fork.gas_costs()
destination = pre.fund_eoa(1)
contract_address = pre.deploy_contract(
# Perform the aborted external calls
Op.SSTORE(
0,
Op.CALL(
gas=Op.GAS,
address=destination,
value=1,
args_offset=0,
args_size=0,
ret_offset=0,
ret_size=0,
),
)
# Measure the gas cost for BALANCE operation
+ CodeGasMeasure(
code=Op.BALANCE(destination),
overhead_cost=gas_costs.G_VERY_LOW, # PUSH20 costs 3 gas
extra_stack_items=1, # BALANCE puts balance on stack
sstore_key=1,
),
balance=0,
)
tx = Transaction(
to=contract_address,
gas_limit=100_000,
sender=pre.fund_eoa(),
)
post = {
destination: Account(
balance=1,
),
contract_address: Account(
storage={
0: 0, # The CALL is aborted
1: gas_costs.G_WARM_ACCOUNT_ACCESS, # Warm access cost
},
),
}
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/__init__.py | tests/osaka/__init__.py | """
Test cases for EVM functionality introduced in Osaka, [EIP-7607: Hardfork Meta
- Fusaka](https://eip.directory/eips/eip-7607).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7594_peerdas/spec.py | tests/osaka/eip7594_peerdas/spec.py | """Defines EIP-7594 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7594 = ReferenceSpec("EIPS/eip-7594.md", "45d03a84a8ad0160ed3fb03af52c49bd39e802ba")
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-7594 specifications as defined at
https://eips.ethereum.org/EIPS/eip-7594.
"""
MAX_BLOBS_PER_TX = 6
BLOB_COMMITMENT_VERSION_KZG = 1
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py | tests/osaka/eip7594_peerdas/test_max_blob_per_tx.py | """
MAX_BLOBS_PER_TX limit tests.
Tests for `MAX_BLOBS_PER_TX` limit in [EIP-7594: PeerDAS - Peer Data
Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594).
"""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Address,
Alloc,
Block,
BlockchainTestFiller,
Environment,
Hash,
StateTestFiller,
Transaction,
TransactionException,
add_kzg_version,
)
from .spec import Spec, ref_spec_7594
REFERENCE_SPEC_GIT_PATH = ref_spec_7594.git_path
REFERENCE_SPEC_VERSION = ref_spec_7594.version
FORK_TIMESTAMP = 15_000
@pytest.fixture
def env() -> Environment:
"""Environment fixture."""
return Environment()
@pytest.fixture
def sender(pre: Alloc) -> Address:
"""Sender account with sufficient balance for blob transactions."""
return pre.fund_eoa(amount=10**18)
@pytest.fixture
def destination(pre: Alloc) -> Address:
"""Destination account for blob transactions."""
return pre.fund_eoa(amount=0)
@pytest.fixture
def blob_gas_price(fork: Fork) -> int:
"""Blob gas price for transactions."""
return fork.min_base_fee_per_blob_gas()
@pytest.fixture
def tx(
sender: Address,
destination: Address,
blob_gas_price: int,
blob_count: int,
) -> Transaction:
"""Blob transaction fixture."""
return Transaction(
ty=3,
sender=sender,
to=destination,
value=1,
gas_limit=21_000,
max_fee_per_gas=10,
max_priority_fee_per_gas=1,
max_fee_per_blob_gas=blob_gas_price,
access_list=[],
blob_versioned_hashes=add_kzg_version(
[Hash(i) for i in range(0, blob_count)],
Spec.BLOB_COMMITMENT_VERSION_KZG,
),
)
@pytest.mark.parametrize_by_fork(
"blob_count",
lambda fork: list(range(1, fork.max_blobs_per_tx() + 1)),
)
@pytest.mark.valid_from("Osaka")
def test_valid_max_blobs_per_tx(
state_test: StateTestFiller,
pre: Alloc,
env: Environment,
tx: Transaction,
) -> None:
"""
Test that transactions with blob count from 1 to MAX_BLOBS_PER_TX are
accepted. Verifies that individual transactions can contain up to the
maximum allowed number of blobs per transaction.
"""
state_test(
env=env,
pre=pre,
tx=tx,
post={},
)
@pytest.mark.parametrize_by_fork(
"blob_count",
lambda fork: [
fork.max_blobs_per_tx() + 1,
fork.max_blobs_per_tx() + 2,
fork.max_blobs_per_block(),
fork.max_blobs_per_block() + 1,
],
)
@pytest.mark.valid_from("Osaka")
@pytest.mark.exception_test
def test_invalid_max_blobs_per_tx(
fork: Fork,
state_test: StateTestFiller,
pre: Alloc,
env: Environment,
tx: Transaction,
blob_count: int,
) -> None:
"""
Test that transactions exceeding MAX_BLOBS_PER_TX are rejected. Verifies
that individual transactions cannot contain more than the maximum allowed
number of blobs per transaction, even if the total would be within the
block limit.
"""
state_test(
env=env,
pre=pre,
tx=tx.with_error(
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED
if blob_count > fork.max_blobs_per_block()
else TransactionException.TYPE_3_TX_BLOB_COUNT_EXCEEDED
),
post={},
)
@pytest.mark.parametrize_by_fork(
"blob_count",
lambda fork: [
fork.max_blobs_per_tx(timestamp=FORK_TIMESTAMP) + 1,
fork.max_blobs_per_block(timestamp=FORK_TIMESTAMP) + 1,
],
)
@pytest.mark.valid_at_transition_to("Osaka")
@pytest.mark.exception_test
def test_max_blobs_per_tx_fork_transition(
fork: Fork,
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Alloc,
tx: Transaction,
blob_count: int,
) -> None:
"""Test `MAX_BLOBS_PER_TX` limit enforcement across fork transition."""
expected_exception = (
TransactionException.TYPE_3_TX_MAX_BLOB_GAS_ALLOWANCE_EXCEEDED
if blob_count > fork.max_blobs_per_block(timestamp=FORK_TIMESTAMP)
else TransactionException.TYPE_3_TX_BLOB_COUNT_EXCEEDED
)
pre_fork_block = Block(
txs=[
tx
if blob_count < fork.max_blobs_per_block(timestamp=FORK_TIMESTAMP - 1)
else tx.with_error(expected_exception)
],
timestamp=FORK_TIMESTAMP - 1,
exception=None
if blob_count < fork.max_blobs_per_block(timestamp=FORK_TIMESTAMP - 1)
else [expected_exception],
)
fork_block = Block(
txs=[tx.with_nonce(1).with_error(expected_exception)],
timestamp=FORK_TIMESTAMP,
exception=[expected_exception],
)
post_fork_block = Block(
txs=[tx.with_nonce(1).with_error(expected_exception)],
timestamp=FORK_TIMESTAMP + 1,
exception=[expected_exception],
)
blockchain_test(
pre=pre,
post={},
blocks=[pre_fork_block, fork_block, post_fork_block],
genesis_environment=env,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7594_peerdas/test_get_blobs.py | tests/osaka/eip7594_peerdas/test_get_blobs.py | """
Get blobs engine endpoint tests.
Tests for get blobs engine endpoint in [EIP-7594: PeerDAS - Peer Data
Availability Sampling](https://eips.ethereum.org/EIPS/eip-7594).
"""
from hashlib import sha256
from typing import List, Optional
import pytest
from ethereum_test_base_types.base_types import Hash
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Address,
Alloc,
Blob,
BlobsTestFiller,
NetworkWrappedTransaction,
Transaction,
TransactionException,
)
from pytest_plugins.custom_logging import get_logger
from .spec import ref_spec_7594
REFERENCE_SPEC_GIT_PATH = ref_spec_7594.git_path
REFERENCE_SPEC_VERSION = ref_spec_7594.version
logger = get_logger(__name__)
@pytest.fixture
def destination_account(pre: Alloc) -> Address:
"""Destination account for the blob transactions."""
return pre.fund_eoa(amount=0)
@pytest.fixture
def tx_value() -> int:
"""
Value contained by the transactions sent during test.
Can be overloaded by a test case to provide a custom transaction value.
"""
return 1
@pytest.fixture
def tx_gas() -> int:
"""Gas allocated to transactions sent during test."""
return 21_000
@pytest.fixture
def block_base_fee_per_gas() -> int:
"""Return default max fee per gas for transactions sent during test."""
return 7
@pytest.fixture
def tx_calldata() -> bytes:
"""Calldata in transactions sent during test."""
return b""
@pytest.fixture(autouse=True)
def parent_excess_blobs() -> int:
"""
Excess blobs of the parent block.
Can be overloaded by a test case to provide a custom parent excess blob
count.
"""
return 10 # Defaults to a blob gas price of 1.
@pytest.fixture(autouse=True)
def parent_blobs() -> int:
"""
Blobs of the parent blob.
Can be overloaded by a test case to provide a custom parent blob count.
"""
return 0
@pytest.fixture
def excess_blob_gas(
fork: Fork,
parent_excess_blobs: int | None,
parent_blobs: int | None,
block_base_fee_per_gas: int,
) -> int | None:
"""
Calculate the excess blob gas of the block under test from the parent
block.
Value can be overloaded by a test case to provide a custom excess blob gas.
"""
if parent_excess_blobs is None or parent_blobs is None:
return None
excess_blob_gas = fork.excess_blob_gas_calculator()
return excess_blob_gas(
parent_excess_blobs=parent_excess_blobs,
parent_blob_count=parent_blobs,
parent_base_fee_per_gas=block_base_fee_per_gas,
)
@pytest.fixture
def blob_gas_price(
fork: Fork,
excess_blob_gas: int | None,
) -> int | None:
"""Return blob gas price for the block of the test."""
if excess_blob_gas is None:
return None
get_blob_gas_price = fork.blob_gas_price_calculator()
return get_blob_gas_price(
excess_blob_gas=excess_blob_gas,
)
@pytest.fixture
def txs_versioned_hashes(txs_blobs: List[List[Blob]]) -> List[List[bytes]]:
"""List of blob versioned hashes derived from the blobs."""
return [[blob.versioned_hash for blob in blob_tx] for blob_tx in txs_blobs]
@pytest.fixture
def tx_max_fee_per_blob_gas( # noqa: D103
blob_gas_price: Optional[int],
) -> int:
"""
Max fee per blob gas for transactions sent during test.
By default, it is set to the blob gas price of the block.
Can be overloaded by a test case to test rejection of transactions where
the max fee per blob gas is insufficient.
"""
if blob_gas_price is None:
# When fork transitioning, the default blob gas price is 1.
return 1
return blob_gas_price
@pytest.fixture
def tx_error() -> Optional[TransactionException]:
"""
Even though the final block we are producing in each of these tests is
invalid, and some of the transactions will be invalid due to the format in
the final block, none of the transactions should be rejected by the
transition tool because they are being sent to it with the correct format.
"""
return None
@pytest.fixture(autouse=True)
def txs( # noqa: D103
pre: Alloc,
destination_account: Optional[Address],
tx_gas: int,
tx_value: int,
tx_calldata: bytes,
tx_max_fee_per_blob_gas: int,
txs_versioned_hashes: List[List[bytes]],
tx_error: Optional[TransactionException],
txs_blobs: List[List[Blob]],
fork: Fork,
) -> List[NetworkWrappedTransaction | Transaction]:
"""Prepare the list of transactions that are sent during the test."""
if len(txs_blobs) != len(txs_versioned_hashes):
raise ValueError("txs_blobs and txs_versioned_hashes should have the same length")
txs: List[NetworkWrappedTransaction | Transaction] = []
for tx_blobs, tx_versioned_hashes in zip(txs_blobs, txs_versioned_hashes, strict=False):
tx = Transaction(
# type=3,
sender=pre.fund_eoa(),
to=destination_account,
value=tx_value,
gas_limit=tx_gas,
data=tx_calldata,
max_fee_per_blob_gas=tx_max_fee_per_blob_gas,
access_list=[],
blob_versioned_hashes=tx_versioned_hashes,
error=tx_error,
)
network_wrapped_tx = NetworkWrappedTransaction(
tx=tx,
blob_objects=tx_blobs,
wrapper_version=fork.full_blob_tx_wrapper_version(),
)
txs.append(network_wrapped_tx)
return txs
def generate_valid_blob_tests(
fork: Fork,
) -> List:
"""
Return a list of the 8 most important tests for valid blob transactions
parametrized for each different fork.
"""
max_blobs_per_block = fork.max_blobs_per_block()
max_blobs_per_tx = fork.max_blobs_per_tx()
target_blobs_per_block = fork.target_blobs_per_block()
logger.debug(f"MAX_BLOBS_PER_BLOCK value for fork {fork}: {max_blobs_per_block}")
logger.debug(f"MAX_BLOBS_PER_TX value for fork {fork}: {max_blobs_per_tx}")
logger.debug(f"TARGET_BLOBS_PER_BLOCK value for fork {fork}: {target_blobs_per_block}")
# Calculate ascending pattern that fits within target_blobs_per_block
ascending_txs = []
total_blobs = 0
blob_offset = 0
for tx_size in range(1, max_blobs_per_tx + 1):
if total_blobs + tx_size <= target_blobs_per_block:
ascending_txs.append([Blob.from_fork(fork, blob_offset + j) for j in range(tx_size)])
total_blobs += tx_size
blob_offset += tx_size
else:
break
return [
# Basic single blob transaction
pytest.param(
[ # Txs
[ # Blobs per transaction
Blob.from_fork(fork),
]
],
id="single_blob_transaction",
),
# Max blobs per transaction (single tx with max blobs)
pytest.param(
[[Blob.from_fork(fork, s) for s in range(max_blobs_per_tx)]],
id="max_blobs_per_tx",
),
# Max blobs per block distributed across multiple txs
pytest.param(
[
[
Blob.from_fork(fork, s),
]
for s in range(max_blobs_per_block)
],
id="max_blobs_per_block",
),
# Target blobs per block distributed across multiple txs
pytest.param(
[
[
Blob.from_fork(fork, s),
]
for s in range(target_blobs_per_block)
],
id="target_blobs_per_block",
),
# Two transactions with equal blob distribution
pytest.param(
[
[Blob.from_fork(fork, s) for s in range(target_blobs_per_block // 2)],
[
Blob.from_fork(fork, s + target_blobs_per_block // 2)
for s in range(target_blobs_per_block // 2)
],
],
id="two_tx_equal_blobs",
),
# Three transactions with equal blob distribution
pytest.param(
[
[Blob.from_fork(fork, s) for s in range(target_blobs_per_block // 3)],
[
Blob.from_fork(fork, s + target_blobs_per_block // 3)
for s in range(target_blobs_per_block // 3)
],
[
Blob.from_fork(fork, s + 2 * (target_blobs_per_block // 3))
for s in range(target_blobs_per_block // 3)
],
],
id="three_tx_equal_blobs",
),
# Mixed distribution: one max tx + remaining as singles
pytest.param(
[ # Txs
[ # First tx with max blobs
Blob.from_fork(fork, s) for s in range(max_blobs_per_tx)
]
]
+ [
[ # Remaining txs with 1 blob each
Blob.from_fork(fork, max_blobs_per_tx + s),
]
for s in range(max_blobs_per_block - max_blobs_per_tx)
],
id="mixed_max_tx_plus_singles",
),
# Ascending pattern: 1, 2, 3... blobs per tx
pytest.param(
ascending_txs,
id="ascending_blob_pattern",
),
]
@pytest.mark.parametrize_by_fork(
"txs_blobs",
generate_valid_blob_tests,
)
@pytest.mark.exception_test
@pytest.mark.valid_from("Cancun")
def test_get_blobs(
blobs_test: BlobsTestFiller,
pre: Alloc,
txs: List[NetworkWrappedTransaction | Transaction],
) -> None:
"""
Test valid blob combinations where one or more txs in the block serialized
version contain a full blob (network version) tx.
"""
blobs_test(pre=pre, txs=txs)
@pytest.mark.parametrize_by_fork(
"txs_blobs",
generate_valid_blob_tests,
)
@pytest.mark.exception_test
@pytest.mark.valid_from("Cancun")
def test_get_blobs_nonexisting(
blobs_test: BlobsTestFiller,
pre: Alloc,
txs: List[NetworkWrappedTransaction | Transaction],
) -> None:
"""
Test that ensures clients respond with 'null' when at least one requested
blob is not available.
"""
nonexisting_blob_hashes = [Hash(sha256(str(i).encode()).digest()) for i in range(5)]
blobs_test(pre=pre, txs=txs, nonexisting_blob_hashes=nonexisting_blob_hashes)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7594_peerdas/__init__.py | tests/osaka/eip7594_peerdas/__init__.py | """
Test suite for
[EIP-7594: PeerDAS - Peer Data Availability
Sampling](https://eips.ethereum.org/EIPS/eip-7594).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7823_modexp_upper_bounds/spec.py | tests/osaka/eip7823_modexp_upper_bounds/spec.py | """Defines EIP-7823 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7823 = ReferenceSpec("EIPS/eip-7823.md", "c8321494fdfbfda52ad46c3515a7ca5dc86b857c")
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7823_modexp_upper_bounds/test_modexp_upper_bounds.py | tests/osaka/eip7823_modexp_upper_bounds/test_modexp_upper_bounds.py | """
Test [EIP-7823: Set upper bounds for MODEXP](https://eips.ethereum.org/EIPS/eip-7823).
"""
from typing import Dict
import pytest
from ethereum_test_checklists import EIPChecklist
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Bytes,
StateTestFiller,
Transaction,
keccak256,
)
from ethereum_test_vm import Opcodes as Op
from ...byzantium.eip198_modexp_precompile.helpers import ModExpInput
from ..eip7883_modexp_gas_increase.spec import Spec
from .spec import ref_spec_7823
REFERENCE_SPEC_GIT_PATH = ref_spec_7823.git_path
REFERENCE_SPEC_VERSION = ref_spec_7823.version
@pytest.mark.valid_from("Osaka")
@pytest.mark.parametrize(
"modexp_input,modexp_expected,call_succeeds",
[
pytest.param(
ModExpInput(
base=b"\0" * (Spec.MAX_LENGTH_BYTES + 1),
exponent=b"\0",
modulus=b"\2",
),
Spec.modexp_error,
False,
id="excess_length_base",
),
pytest.param(
ModExpInput(
base=b"\0",
exponent=b"\0" * (Spec.MAX_LENGTH_BYTES + 1),
modulus=b"\2",
),
Spec.modexp_error,
False,
id="excess_length_exponent",
),
pytest.param(
ModExpInput(
base=b"\0",
exponent=b"\0",
modulus=b"\0" * (Spec.MAX_LENGTH_BYTES) + b"\2",
),
Spec.modexp_error,
False,
id="excess_length_modulus",
),
pytest.param(
ModExpInput(
base=b"",
exponent=b"\0" * (Spec.MAX_LENGTH_BYTES + 1),
modulus=b"",
),
Spec.modexp_error,
False,
id="exp_1025_base_0_mod_0",
),
pytest.param(
ModExpInput(
base=b"",
# Non-zero exponent is cancelled with zero multiplication
# complexity pre EIP-7823.
exponent=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
modulus=b"",
),
Spec.modexp_error,
False,
id="expFF_1025_base_0_mod_0",
),
pytest.param(
ModExpInput(
base=b"\0" * Spec.MAX_LENGTH_BYTES,
exponent=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
modulus=b"",
),
Spec.modexp_error,
False,
id="expFF_1025_base_1024_mod_0",
),
pytest.param(
ModExpInput(
base=b"\0" * (Spec.MAX_LENGTH_BYTES + 1),
exponent=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
modulus=b"",
),
Spec.modexp_error,
False,
id="expFF_1025_base_1025_mod_0",
),
pytest.param(
ModExpInput(
base=b"\0" * (Spec.MAX_LENGTH_BYTES + 1),
exponent=b"",
modulus=b"",
),
Spec.modexp_error,
False,
id="exp_0_base_1025_mod_0",
),
pytest.param(
ModExpInput(
base=b"\0" * (Spec.MAX_LENGTH_BYTES + 1),
exponent=b"",
modulus=b"\2",
),
Spec.modexp_error,
False,
id="exp_0_base_1025_mod_1",
),
pytest.param(
ModExpInput(
base=b"",
exponent=b"",
modulus=b"\0" * (Spec.MAX_LENGTH_BYTES + 1),
),
Spec.modexp_error,
False,
id="exp_0_base_0_mod_1025",
),
pytest.param(
ModExpInput(
base=b"\1",
exponent=b"",
modulus=b"\0" * (Spec.MAX_LENGTH_BYTES + 1),
),
Spec.modexp_error,
False,
id="exp_0_base_1_mod_1025",
),
pytest.param(
ModExpInput(
base=b"",
exponent=Bytes("80"),
modulus=b"",
declared_exponent_length=2**64,
),
Spec.modexp_error,
False,
id="exp_2_pow_64_base_0_mod_0",
),
# Implementation coverage tests
pytest.param(
ModExpInput(
base=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
exponent=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
modulus=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
),
Spec.modexp_error,
False,
id="all_exceed_check_ordering",
),
pytest.param(
ModExpInput(
base=b"\x00" * Spec.MAX_LENGTH_BYTES,
exponent=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
modulus=b"\xff" * (Spec.MAX_LENGTH_BYTES + 1),
),
Spec.modexp_error,
False,
id="exp_mod_exceed_base_ok",
),
pytest.param(
ModExpInput(
# Bitwise pattern for Nethermind optimization
base=b"\xaa" * (Spec.MAX_LENGTH_BYTES + 1),
exponent=b"\x55" * Spec.MAX_LENGTH_BYTES,
modulus=b"\xff" * Spec.MAX_LENGTH_BYTES,
),
Spec.modexp_error,
False,
id="bitwise_pattern_base_exceed",
),
pytest.param(
ModExpInput(
base=b"",
exponent=b"",
modulus=b"",
# Near max uint64 for revm conversion test
declared_base_length=2**63 - 1,
declared_exponent_length=1,
declared_modulus_length=1,
),
Spec.modexp_error,
False,
id="near_uint64_max_base",
),
pytest.param(
ModExpInput(
base=b"\x01" * Spec.MAX_LENGTH_BYTES,
exponent=b"",
modulus=b"\x02" * (Spec.MAX_LENGTH_BYTES + 1),
declared_exponent_length=0,
),
Spec.modexp_error,
False,
id="zero_exp_mod_exceed",
),
pytest.param(
ModExpInput(
base=b"\x01" * Spec.MAX_LENGTH_BYTES,
exponent=b"\x00",
modulus=b"\x02",
),
b"\x01",
True,
id="base_boundary",
),
pytest.param(
ModExpInput(
base=b"\x01",
exponent=b"\x00" * Spec.MAX_LENGTH_BYTES,
modulus=b"\x02",
),
b"\x01",
True,
id="exp_boundary",
),
pytest.param(
ModExpInput(
base=b"\x01",
exponent=b"\x00",
modulus=b"\x02" * Spec.MAX_LENGTH_BYTES,
),
b"\x01".rjust(Spec.MAX_LENGTH_BYTES, b"\x00"),
True,
id="mod_boundary",
),
pytest.param(
ModExpInput(
base=b"\x01" * Spec.MAX_LENGTH_BYTES,
exponent=b"\x00",
modulus=b"\x02" * Spec.MAX_LENGTH_BYTES,
),
b"\x01".rjust(Spec.MAX_LENGTH_BYTES, b"\x00"),
True,
id="base_mod_boundary",
),
],
)
@EIPChecklist.Precompile.Test.Inputs.MaxValues
@EIPChecklist.Precompile.Test.OutOfBounds.Max
def test_modexp_upper_bounds(
state_test: StateTestFiller,
modexp_input: ModExpInput,
modexp_expected: bytes,
precompile_gas: int,
fork: Fork,
tx: Transaction,
post: Dict,
pre: Alloc,
) -> None:
"""Test the MODEXP precompile input bounds."""
state_test(pre=pre, tx=tx, post=post)
@pytest.mark.parametrize(
"modexp_input,modexp_expected",
[
pytest.param(
ModExpInput(
base=b"\1" * (Spec.MAX_LENGTH_BYTES + 1),
exponent=b"\0",
modulus=b"\2",
),
b"\1",
id="base_1_exp_0_mod_2",
),
],
)
@pytest.mark.valid_at_transition_to("Osaka", subsequent_forks=True)
def test_modexp_upper_bounds_fork_transition(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
precompile_gas: int,
modexp_input: ModExpInput,
modexp_expected: bytes,
) -> None:
"""
Test MODEXP upper bounds enforcement transition from before to after Osaka
hard fork.
"""
call_code = Op.CALL(
address=Spec.MODEXP_ADDRESS,
args_size=Op.CALLDATASIZE,
)
code = (
Op.CALLDATACOPY(size=Op.CALLDATASIZE)
+ Op.SSTORE(
Op.TIMESTAMP,
call_code,
)
+ Op.RETURNDATACOPY(size=Op.RETURNDATASIZE())
+ Op.SSTORE(
Op.AND(Op.TIMESTAMP, 0xFF),
Op.SHA3(0, Op.RETURNDATASIZE()),
)
)
senders = [pre.fund_eoa() for _ in range(3)]
contracts = [pre.deploy_contract(code) for _ in range(3)]
timestamps = [14_999, 15_000, 15_001] # Before, at, and after transition
expected_results = [True, False, False]
blocks = [
Block(
timestamp=ts,
txs=[
Transaction(
to=contract,
data=bytes(modexp_input),
sender=sender,
gas_limit=6_000_000,
)
],
)
for ts, contract, sender in zip(timestamps, contracts, senders, strict=False)
]
post = {
contract: Account(
storage={
ts: expected,
ts & 0xFF: keccak256(modexp_expected)
if expected
else keccak256(Spec.modexp_error),
}
)
for contract, ts, expected in zip(contracts, timestamps, expected_results, strict=False)
}
blockchain_test(
pre=pre,
blocks=blocks,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7823_modexp_upper_bounds/conftest.py | tests/osaka/eip7823_modexp_upper_bounds/conftest.py | """Conftest for EIP-7823 tests."""
from typing import Dict
import pytest
from ethereum_test_forks import Fork, Osaka
from ethereum_test_tools import Account, Address, Alloc, Storage, Transaction, keccak256
from ethereum_test_types import Environment
from ethereum_test_vm import Opcodes as Op
from ...byzantium.eip198_modexp_precompile.helpers import ModExpInput
from ..eip7883_modexp_gas_increase.spec import Spec, Spec7883
@pytest.fixture
def call_contract_post_storage() -> Storage:
"""
Storage of the test contract after the transaction is executed. Note:
Fixture `call_contract_code` fills the actual expected storage values.
"""
return Storage()
@pytest.fixture
def call_succeeds(
total_gas_used: int, fork: Fork, env: Environment, modexp_input: ModExpInput
) -> bool:
"""
By default, depending on the expected output, we can deduce if the call is
expected to succeed or fail.
"""
# Transaction gas limit exceeded
tx_gas_limit_cap = fork.transaction_gas_limit_cap() or env.gas_limit
if total_gas_used > tx_gas_limit_cap:
return False
# Input length exceeded
base_length, exp_length, mod_length = modexp_input.get_declared_lengths()
if (
base_length > Spec.MAX_LENGTH_BYTES
or exp_length > Spec.MAX_LENGTH_BYTES
or mod_length > Spec.MAX_LENGTH_BYTES
) and fork >= Osaka:
return False
return True
@pytest.fixture
def gas_measure_contract(
pre: Alloc,
fork: Fork,
modexp_expected: bytes,
precompile_gas: int,
call_contract_post_storage: Storage,
call_succeeds: bool,
) -> Address:
"""
Deploys a contract that measures ModExp gas consumption and execution
result.
Always stored:
storage[0]: precompile call success
storage[1]: return data length from precompile
Only if the precompile call succeeds:
storage[2]: gas consumed by precompile
storage[3]: hash of return data from precompile
"""
call_code = Op.CALL(
precompile_gas,
Spec.MODEXP_ADDRESS,
0,
0,
Op.CALLDATASIZE(),
0,
0,
)
gas_costs = fork.gas_costs()
extra_gas = (
gas_costs.G_WARM_ACCOUNT_ACCESS
+ (gas_costs.G_VERY_LOW * (len(Op.CALL.kwargs) - 1))
+ gas_costs.G_BASE # CALLDATASIZE
+ gas_costs.G_BASE # GAS
)
# Build the gas measurement contract code
# Stack operations:
# [gas_start]
# [gas_start, call_result]
# [gas_start, call_result, gas_end]
# [gas_start, gas_end, call_result]
call_result_measurement = Op.GAS + call_code + Op.GAS + Op.SWAP1
# Calculate gas consumed: gas_start - (gas_end + extra_gas)
# Stack Operation:
# [gas_start, gas_end]
# [gas_start, gas_end, extra_gas]
# [gas_start, gas_end + extra_gas]
# [gas_end + extra_gas, gas_start]
# [gas_consumed]
gas_calculation = Op.PUSH2[extra_gas] + Op.ADD + Op.SWAP1 + Op.SUB
code = (
Op.CALLDATACOPY(dest_offset=0, offset=0, size=Op.CALLDATASIZE)
+ Op.SSTORE(call_contract_post_storage.store_next(call_succeeds), call_result_measurement)
+ Op.SSTORE(
call_contract_post_storage.store_next(len(modexp_expected) if call_succeeds else 0),
Op.RETURNDATASIZE(),
)
)
if call_succeeds:
code += Op.SSTORE(call_contract_post_storage.store_next(precompile_gas), gas_calculation)
code += Op.RETURNDATACOPY(dest_offset=0, offset=0, size=Op.RETURNDATASIZE())
code += Op.SSTORE(
call_contract_post_storage.store_next(keccak256(bytes(modexp_expected))),
Op.SHA3(0, Op.RETURNDATASIZE()),
)
return pre.deploy_contract(code)
@pytest.fixture
def precompile_gas(fork: Fork, modexp_input: ModExpInput) -> int:
"""
Calculate gas cost for the ModExp precompile and verify it matches expected
gas.
"""
spec = Spec if fork < Osaka else Spec7883
try:
calculated_gas = spec.calculate_gas_cost(modexp_input)
return calculated_gas
except Exception:
# Used for `test_modexp_invalid_inputs` we expect the call to not
# succeed. Return is for completeness.
return 500 if fork >= Osaka else 200
@pytest.fixture
def tx(
pre: Alloc,
gas_measure_contract: Address,
modexp_input: ModExpInput,
tx_gas_limit: int,
) -> Transaction:
"""Transaction to measure gas consumption of the ModExp precompile."""
return Transaction(
sender=pre.fund_eoa(),
to=gas_measure_contract,
data=bytes(modexp_input),
gas_limit=tx_gas_limit,
)
@pytest.fixture
def total_gas_used(
fork: Fork, modexp_expected: bytes, modexp_input: ModExpInput, precompile_gas: int
) -> int:
"""
Transaction gas limit used for the test (Can be overridden in the test).
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 500_000
total_gas = (
extra_gas
+ intrinsic_gas_cost_calculator(calldata=bytes(modexp_input))
+ memory_expansion_gas_calculator(new_bytes=len(bytes(modexp_input)))
+ precompile_gas
)
return total_gas
@pytest.fixture
def tx_gas_limit(total_gas_used: int, fork: Fork, env: Environment) -> int:
"""
Transaction gas limit used for the test (Can be overridden in the test).
"""
tx_gas_limit_cap = fork.transaction_gas_limit_cap() or env.gas_limit
return min(tx_gas_limit_cap, total_gas_used)
@pytest.fixture
def post(
gas_measure_contract: Address,
call_contract_post_storage: Storage,
) -> Dict[Address, Account]:
"""Return expected post state with gas consumption check."""
return {
gas_measure_contract: Account(storage=call_contract_post_storage),
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7823_modexp_upper_bounds/__init__.py | tests/osaka/eip7823_modexp_upper_bounds/__init__.py | """
Tests [EIP-7823: Set upper bounds for MODEXP](https://eips.ethereum.org/EIPS/eip-7823).
Test cases for EIP-7823: Set upper bounds for MODEXP.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7883_modexp_gas_increase/spec.py | tests/osaka/eip7883_modexp_gas_increase/spec.py | """Defines EIP-7883 specification constants and functions."""
from dataclasses import dataclass
from ...byzantium.eip198_modexp_precompile.helpers import ModExpInput
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7883 = ReferenceSpec("EIPS/eip-7883.md", "13aa65810336d4f243d4563a828d5afe36035d23")
def ceiling_division(a: int, b: int) -> int:
"""
Calculate the ceil without using floating point. Used by many of the EVM's
formulas.
"""
return -(a // -b)
@dataclass(frozen=True)
class Spec:
"""Constants and helpers for the ModExp gas cost calculation."""
MODEXP_ADDRESS = 0x05
MIN_GAS = 200
LARGE_BASE_MODULUS_MULTIPLIER = 1
MAX_LENGTH_THRESHOLD = 32
EXPONENT_BYTE_MULTIPLIER = 8
MAX_LENGTH_BYTES = 1024
WORD_SIZE = 8
EXPONENT_THRESHOLD = 32
GAS_DIVISOR = 3
# Arbitrary Test Constants
modexp_input = ModExpInput(
base="e8e77626586f73b955364c7b4bbf0bb7f7685ebd40e852b164633a4acbd3244c0001020304050607",
exponent="01ffffff",
modulus="f01681d2220bfea4bb888a5543db8c0916274ddb1ea93b144c042c01d8164c950001020304050607",
)
modexp_expected = bytes.fromhex(
"1abce71dc2205cce4eb6934397a88136f94641342e283cbcd30e929e85605c6718ed67f475192ffd"
)
modexp_error = bytes()
@classmethod
def calculate_multiplication_complexity(cls, base_length: int, modulus_length: int) -> int:
"""Calculate the multiplication complexity of the ModExp precompile."""
max_length = max(base_length, modulus_length)
words = ceiling_division(max_length, cls.WORD_SIZE)
if max_length <= cls.MAX_LENGTH_THRESHOLD:
return words**2
return cls.LARGE_BASE_MODULUS_MULTIPLIER * words**2
@classmethod
def calculate_iteration_count(cls, modexp_input: ModExpInput) -> int:
"""
Calculate the iteration count of the ModExp precompile. This handles
length mismatch cases by using declared lengths from the raw input and
only the first 32 bytes of exponent data for iteration calculation.
"""
_, exponent_length, _ = modexp_input.get_declared_lengths()
exponent_head = modexp_input.get_exponent_head()
if exponent_length <= cls.EXPONENT_THRESHOLD and exponent_head == 0:
iteration_count = 0
elif exponent_length <= cls.EXPONENT_THRESHOLD:
iteration_count = exponent_head.bit_length() - 1 if exponent_head > 0 else 0
else:
# For large exponents: length_part + bits from first 32 bytes
length_part = cls.EXPONENT_BYTE_MULTIPLIER * (exponent_length - 32)
bits_part = exponent_head.bit_length() - 1 if exponent_head > 0 else 0
iteration_count = length_part + bits_part
return max(iteration_count, 1)
@classmethod
def calculate_gas_cost(cls, modexp_input: ModExpInput) -> int:
"""
Calculate the ModExp gas cost according to EIP-2565 specification,
overridden by the constants within `Spec7883` when calculating for the
EIP-7883 specification.
"""
base_length, _, modulus_length = modexp_input.get_declared_lengths()
multiplication_complexity = cls.calculate_multiplication_complexity(
base_length, modulus_length
)
iteration_count = cls.calculate_iteration_count(modexp_input)
return max(cls.MIN_GAS, (multiplication_complexity * iteration_count // cls.GAS_DIVISOR))
@dataclass(frozen=True)
class Spec7883(Spec):
"""
Constants and helpers for the ModExp gas cost increase EIP. These override
the original Spec class variables for EIP-7883.
"""
MODEXP_ADDRESS = 0x05
MIN_GAS = 500
LARGE_BASE_MODULUS_MULTIPLIER = 2
EXPONENT_BYTE_MULTIPLIER = 16
GAS_DIVISOR = 1 # Overrides the original Spec class GAS_DIVISOR
@classmethod
def calculate_multiplication_complexity(cls, base_length: int, modulus_length: int) -> int:
"""
Calculate the multiplication complexity of the ModExp precompile for
EIP-7883.
"""
max_length = max(base_length, modulus_length)
words = ceiling_division(max_length, cls.WORD_SIZE)
complexity = 16
if max_length > cls.MAX_LENGTH_THRESHOLD:
complexity = cls.LARGE_BASE_MODULUS_MULTIPLIER * words**2
return complexity
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7883_modexp_gas_increase/conftest.py | tests/osaka/eip7883_modexp_gas_increase/conftest.py | """Shared pytest definitions for EIP-7883 tests."""
from typing import Dict
import pytest
from ethereum_test_forks import Fork, Osaka
from ethereum_test_tools import (
Account,
Address,
Alloc,
Bytes,
Environment,
Storage,
Transaction,
keccak256,
)
from ethereum_test_vm import Opcodes as Op
from ...byzantium.eip198_modexp_precompile.helpers import ModExpInput
from .spec import Spec, Spec7883
@pytest.fixture
def gas_old() -> int | None:
"""Get old gas cost from the test vector if any."""
return None
@pytest.fixture
def gas_new() -> int | None:
"""Get new gas cost from the test vector if any."""
return None
@pytest.fixture
def call_opcode() -> Op:
"""Return call operation used to call the precompile."""
return Op.CALL
@pytest.fixture
def call_contract_post_storage() -> Storage:
"""
Storage of the test contract after the transaction is executed. Note:
Fixture `call_contract_code` fills the actual expected storage values.
"""
return Storage()
@pytest.fixture
def total_tx_gas_needed(
fork: Fork, modexp_expected: bytes, modexp_input: ModExpInput, precompile_gas: int
) -> int:
"""Calculate total tx gas needed for the transaction."""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
sstore_gas = fork.gas_costs().G_STORAGE_SET * (len(modexp_expected) // 32)
extra_gas = 100_000
return (
extra_gas
+ intrinsic_gas_cost_calculator(calldata=bytes(modexp_input))
+ memory_expansion_gas_calculator(new_bytes=len(bytes(modexp_input)))
+ precompile_gas
+ sstore_gas
)
@pytest.fixture
def exceeds_tx_gas_cap(total_tx_gas_needed: int, fork: Fork, env: Environment) -> bool:
"""Determine if total gas requirements exceed transaction gas cap."""
tx_gas_limit_cap = fork.transaction_gas_limit_cap() or env.gas_limit
return total_tx_gas_needed > tx_gas_limit_cap
@pytest.fixture
def expected_tx_cap_fail() -> bool:
"""Whether this test is expected to fail due to transaction gas cap."""
return False
@pytest.fixture
def call_succeeds(exceeds_tx_gas_cap: bool, expected_tx_cap_fail: bool) -> bool:
"""
Determine whether the ModExp precompile call should succeed or fail. By
default, depending on the expected output, we assume it succeeds. Under
EIP-7825, transactions requiring more gas than the cap should fail only if
unexpected.
"""
if exceeds_tx_gas_cap and not expected_tx_cap_fail:
pytest.fail(
"Test unexpectedly exceeds tx gas cap. "
"Either mark with `expected_tx_cap_fail=True` or adjust inputs."
)
return not exceeds_tx_gas_cap
@pytest.fixture
def gas_measure_contract(
pre: Alloc,
call_opcode: Op,
fork: Fork,
modexp_expected: bytes,
precompile_gas: int,
precompile_gas_modifier: int,
call_contract_post_storage: Storage,
call_succeeds: bool,
) -> Address:
"""
Deploys a contract that measures ModExp gas consumption and execution
result.
Always stored:
storage[0]: precompile call success
storage[1]: return data length from precompile
Only if the precompile call succeeds:
storage[2]: gas consumed by precompile
storage[3]: hash of return data from precompile
"""
assert call_opcode in [Op.CALL, Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL]
value = [0] if call_opcode in [Op.CALL, Op.CALLCODE] else []
gas_used = (
precompile_gas + precompile_gas_modifier
if precompile_gas_modifier != float("inf")
else Environment().gas_limit
)
call_code = call_opcode(
gas_used,
Spec.MODEXP_ADDRESS,
*value,
0,
Op.CALLDATASIZE(),
0,
0,
)
gas_costs = fork.gas_costs()
extra_gas = (
gas_costs.G_WARM_ACCOUNT_ACCESS
+ (gas_costs.G_VERY_LOW * (len(call_opcode.kwargs) - 1))
+ gas_costs.G_BASE # CALLDATASIZE
+ gas_costs.G_BASE # GAS
)
# Build the gas measurement contract code
# Stack operations:
# [gas_start]
# [gas_start, call_result]
# [gas_start, call_result, gas_end]
# [gas_start, gas_end, call_result]
call_result_measurement = Op.GAS + call_code + Op.GAS + Op.SWAP1
# Calculate gas consumed: gas_start - (gas_end + extra_gas)
# Stack Operation:
# [gas_start, gas_end]
# [gas_start, gas_end, extra_gas]
# [gas_start, gas_end + extra_gas]
# [gas_end + extra_gas, gas_start]
# [gas_consumed]
gas_calculation = Op.PUSH2[extra_gas] + Op.ADD + Op.SWAP1 + Op.SUB
code = (
Op.CALLDATACOPY(dest_offset=0, offset=0, size=Op.CALLDATASIZE)
+ Op.SSTORE(call_contract_post_storage.store_next(call_succeeds), call_result_measurement)
+ Op.SSTORE(
call_contract_post_storage.store_next(len(modexp_expected) if call_succeeds else 0),
Op.RETURNDATASIZE(),
)
)
if call_succeeds:
code += Op.SSTORE(call_contract_post_storage.store_next(precompile_gas), gas_calculation)
code += Op.RETURNDATACOPY(dest_offset=0, offset=0, size=Op.RETURNDATASIZE())
code += Op.SSTORE(
call_contract_post_storage.store_next(keccak256(Bytes(modexp_expected))),
Op.SHA3(0, Op.RETURNDATASIZE()),
)
return pre.deploy_contract(code)
@pytest.fixture
def precompile_gas(
fork: Fork, modexp_input: ModExpInput, gas_old: int | None, gas_new: int | None
) -> int:
"""
Calculate gas cost for the ModExp precompile and verify it matches expected
gas.
"""
spec = Spec if fork < Osaka else Spec7883
try:
calculated_gas = spec.calculate_gas_cost(modexp_input)
if gas_old is not None and gas_new is not None:
expected_gas = gas_old if fork < Osaka else gas_new
assert calculated_gas == expected_gas, (
f"Calculated gas {calculated_gas} != Vector gas {expected_gas}\n"
f"Lengths: base: {hex(len(modexp_input.base))} ({len(modexp_input.base)}), "
f"exponent: {hex(len(modexp_input.exponent))} ({len(modexp_input.exponent)}), "
f"modulus: {hex(len(modexp_input.modulus))} ({len(modexp_input.modulus)})\n"
f"Exponent: {modexp_input.exponent} "
f"({int.from_bytes(modexp_input.exponent, byteorder='big')})"
)
return calculated_gas
except Exception:
# Used for `test_modexp_invalid_inputs` we expect the call to not
# succeed. Return is for completeness.
return 500 if fork >= Osaka else 200
@pytest.fixture
def precompile_gas_modifier() -> int:
"""Return the gas modifier for the ModExp precompile."""
return 0
@pytest.fixture
def tx(
pre: Alloc,
gas_measure_contract: Address,
modexp_input: ModExpInput,
tx_gas_limit: int,
) -> Transaction:
"""Transaction to measure gas consumption of the ModExp precompile."""
return Transaction(
sender=pre.fund_eoa(),
to=gas_measure_contract,
data=bytes(modexp_input),
gas_limit=tx_gas_limit,
)
@pytest.fixture
def tx_gas_limit(total_tx_gas_needed: int, fork: Fork, env: Environment) -> int:
"""
Transaction gas limit used for the test (Can be overridden in the test).
"""
tx_gas_limit_cap = fork.transaction_gas_limit_cap() or env.gas_limit
return min(tx_gas_limit_cap, total_tx_gas_needed)
@pytest.fixture
def post(
gas_measure_contract: Address,
call_contract_post_storage: Storage,
) -> Dict[Address, Account]:
"""Return expected post state with gas consumption check."""
return {
gas_measure_contract: Account(storage=call_contract_post_storage),
}
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7883_modexp_gas_increase/helpers.py | tests/osaka/eip7883_modexp_gas_increase/helpers.py | """Helper functions for the EIP-7883 ModExp gas cost increase tests."""
import os
from typing import Annotated, Any, List
import pytest
from pydantic import BaseModel, ConfigDict, Field, PlainValidator, RootModel, TypeAdapter
from pydantic.alias_generators import to_pascal
from ethereum_test_tools import Bytes
from ...byzantium.eip198_modexp_precompile.helpers import ModExpInput
def current_python_script_directory(*args: str) -> str:
"""Get the current Python script directory."""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), *args)
class Vector(BaseModel):
"""A vector for the ModExp gas cost increase tests."""
modexp_input: Annotated[ModExpInput, PlainValidator(ModExpInput.from_bytes)] = Field(
..., alias="Input"
)
modexp_expected: Bytes = Field(..., alias="Expected")
name: str = Field(..., alias="Name")
gas_old: int | None = Field(default=None, alias="GasOld")
gas_new: int | None = Field(default=None, alias="GasNew")
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self) -> Any:
"""
Convert the test vector to a tuple that can be used as a parameter in a
pytest test.
"""
return pytest.param(
self.modexp_input, self.modexp_expected, self.gas_old, self.gas_new, id=self.name
)
class VectorList(RootModel):
"""A list of test vectors for the ModExp gas cost increase tests."""
root: List[Vector]
VectorListAdapter = TypeAdapter(VectorList)
def vectors_from_file(filename: str) -> List:
"""Load test vectors from a file."""
with open(
current_python_script_directory(
"vector",
filename,
),
"rb",
) as f:
return [v.to_pytest_param() for v in VectorListAdapter.validate_json(f.read()).root]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7883_modexp_gas_increase/__init__.py | tests/osaka/eip7883_modexp_gas_increase/__init__.py | """
Tests for [EIP-7883: ModExp Gas Cost Increase](https://eips.ethereum.org/EIPS/eip-7883).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds_transition.py | tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds_transition.py | """
Test ModExp gas cost transition from EIP-7883 before & after the Osaka fork.
"""
import pytest
from ethereum_test_checklists import EIPChecklist
from ethereum_test_forks import Fork
from ethereum_test_tools import Account, Alloc, Block, BlockchainTestFiller, Transaction, keccak256
from ethereum_test_vm import Opcodes as Op
from ...byzantium.eip198_modexp_precompile.helpers import ModExpInput
from .spec import Spec, ref_spec_7883
REFERENCE_SPEC_GIT_PATH = ref_spec_7883.git_path
REFERENCE_SPEC_VERSION = ref_spec_7883.version
pytestmark = pytest.mark.valid_at_transition_to("Osaka", subsequent_forks=True)
@pytest.mark.parametrize(
"modexp_input,modexp_expected,gas_old,gas_new",
[
pytest.param(Spec.modexp_input, Spec.modexp_expected, 200, 1200),
],
ids=[""],
)
@EIPChecklist.GasCostChanges.Test.ForkTransition.Before()
@EIPChecklist.GasCostChanges.Test.ForkTransition.After()
@EIPChecklist.Precompile.Test.ForkTransition.After.Warm()
@EIPChecklist.GasCostChanges.Test.ForkTransition.Before()
@EIPChecklist.GasCostChanges.Test.ForkTransition.After()
def test_modexp_fork_transition(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
gas_old: int,
gas_new: int,
tx_gas_limit: int,
modexp_input: ModExpInput,
modexp_expected: bytes,
) -> None:
"""
Test ModExp gas cost transition from EIP-7883 before and after the Osaka
hard fork.
"""
call_code = Op.CALL(
address=Spec.MODEXP_ADDRESS,
args_size=Op.CALLDATASIZE,
)
gas_costs = fork.gas_costs()
extra_gas = (
gas_costs.G_WARM_ACCOUNT_ACCESS
+ (gas_costs.G_VERY_LOW * (len(Op.CALL.kwargs) - 2))
+ (gas_costs.G_BASE * 3)
)
code = (
Op.CALLDATACOPY(dest_offset=0, offset=0, size=Op.CALLDATASIZE)
+ Op.GAS # [gas_start]
+ call_code # [gas_start, call_result]
+ Op.GAS # [gas_start, call_result, gas_end]
+ Op.SWAP1 # [gas_start, gas_end, call_result]
+ Op.POP # [gas_start, gas_end]
+ Op.PUSH2[extra_gas] # [gas_start, gas_end, extra_gas]
+ Op.ADD # [gas_start, gas_end + extra_gas]
+ Op.SWAP1 # [gas_end + extra_gas, gas_start]
+ Op.SUB # [gas_start - (gas_end + extra_gas)]
+ Op.TIMESTAMP # [gas_start - (gas_end + extra_gas), TIMESTAMP]
+ Op.SSTORE # []
)
# Verification the precompile call result
code += Op.RETURNDATACOPY(dest_offset=0, offset=0, size=Op.RETURNDATASIZE()) + Op.SSTORE(
Op.AND(Op.TIMESTAMP, 0xFF),
Op.SHA3(0, Op.RETURNDATASIZE()),
)
senders = [pre.fund_eoa() for _ in range(3)]
contracts = [pre.deploy_contract(code) for _ in range(3)]
timestamps = [14_999, 15_000, 15_001]
gas_values = [gas_old, gas_new, gas_new]
blocks = [
Block(
timestamp=ts,
txs=[
Transaction(
to=contract,
data=modexp_input,
sender=sender,
gas_limit=tx_gas_limit,
)
],
)
for ts, contract, sender in zip(timestamps, contracts, senders, strict=False)
]
post = {
contract: Account(storage={ts: gas, ts & 0xFF: keccak256(bytes(modexp_expected))})
for contract, ts, gas in zip(contracts, timestamps, gas_values, strict=False)
}
blockchain_test(
pre=pre,
blocks=blocks,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds.py | tests/osaka/eip7883_modexp_gas_increase/test_modexp_thresholds.py | """
EIP-7883 ModExp gas cost increase tests.
Tests for ModExp gas cost increase in
[EIP-7883: ModExp Gas Cost Increase](https://eips.ethereum.org/EIPS/eip-7883).
"""
from typing import Dict, Generator
import pytest
from ethereum_test_checklists import EIPChecklist
from ethereum_test_forks import Fork, Osaka
from ethereum_test_tools import (
Alloc,
Environment,
StateTestFiller,
Storage,
Transaction,
keccak256,
)
from ethereum_test_types.helpers import compute_create_address
from ethereum_test_vm import Opcodes as Op
from ...byzantium.eip198_modexp_precompile.helpers import ModExpInput
from .helpers import vectors_from_file
from .spec import Spec, ref_spec_7883
REFERENCE_SPEC_GIT_PATH = ref_spec_7883.git_path
REFERENCE_SPEC_VERSION = ref_spec_7883.version
@pytest.mark.parametrize(
"modexp_input,modexp_expected,gas_old,gas_new",
vectors_from_file("vectors.json"),
ids=lambda v: v.name,
)
@EIPChecklist.Precompile.Test.Inputs.Valid()
@EIPChecklist.Precompile.Test.InputLengths.Dynamic.Valid()
@EIPChecklist.GasCostChanges.Test.GasUpdatesMeasurement()
@pytest.mark.valid_from("Berlin")
@pytest.mark.slow()
def test_vectors_from_eip(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
post: Dict,
) -> None:
"""Test ModExp gas cost using the test vectors from EIP-7883."""
state_test(
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"modexp_input,modexp_expected,gas_old,gas_new",
vectors_from_file("legacy.json"),
ids=lambda v: v.name,
)
@EIPChecklist.Precompile.Test.Inputs.Invalid()
@pytest.mark.valid_from("Berlin")
def test_vectors_from_legacy_tests(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
post: Dict,
) -> None:
"""Test ModExp gas cost using the test vectors from legacy tests."""
state_test(
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"modexp_input,",
[
# These invalid inputs are from EIP-7823. Ref:
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7823.md#analysis
pytest.param(
bytes.fromhex("9e5faafc"),
id="invalid-case-1",
),
pytest.param(
bytes.fromhex("85474728"),
id="invalid-case-2",
),
pytest.param(
bytes.fromhex("9e281a98" + "00" * 54 + "021e19e0c9bab2400000"),
id="invalid-case-3",
),
],
)
@pytest.mark.parametrize(
"modexp_expected,call_succeeds",
[
pytest.param(bytes(), False),
],
ids=[""],
)
@EIPChecklist.Precompile.Test.Inputs.AllZeros
@pytest.mark.valid_from("Berlin")
def test_modexp_invalid_inputs(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
post: Dict,
) -> None:
"""Test ModExp gas cost with invalid inputs."""
state_test(
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"modexp_input,modexp_expected,call_succeeds",
[
pytest.param(
ModExpInput(
base="FF" * (Spec.MAX_LENGTH_BYTES + 1),
exponent="FF",
modulus="FF",
),
Spec.modexp_error,
False,
id="base-too-long",
),
pytest.param(
ModExpInput(
base="FF",
exponent="FF" * (Spec.MAX_LENGTH_BYTES + 1),
modulus="FF",
),
Spec.modexp_error,
False,
id="exponent-too-long",
),
pytest.param(
ModExpInput(
base="FF",
exponent="FF",
modulus="FF" * (Spec.MAX_LENGTH_BYTES + 1),
),
Spec.modexp_error,
False,
id="modulus-too-long",
),
pytest.param(
ModExpInput(
base="FF" * (Spec.MAX_LENGTH_BYTES + 1),
exponent="FF",
modulus="FF" * (Spec.MAX_LENGTH_BYTES + 1),
),
Spec.modexp_error,
False,
id="base-modulus-too-long",
),
],
)
@EIPChecklist.Precompile.Test.OutOfBounds.MaxPlusOne()
@EIPChecklist.Precompile.Test.Inputs.Invalid.Corrupted()
@EIPChecklist.Precompile.Test.Inputs.Invalid()
@EIPChecklist.Precompile.Test.InputLengths.Dynamic.TooLong()
@pytest.mark.valid_from("Osaka")
def test_modexp_boundary_inputs(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
post: Dict,
) -> None:
"""Test ModExp boundary inputs."""
state_test(
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"call_opcode",
[
Op.CALL,
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"modexp_input,modexp_expected",
[
pytest.param(Spec.modexp_input, Spec.modexp_expected, id="base-heavy"),
],
)
@EIPChecklist.Precompile.Test.CallContexts.Static()
@EIPChecklist.Precompile.Test.CallContexts.Delegate()
@EIPChecklist.Precompile.Test.CallContexts.Callcode()
@EIPChecklist.Precompile.Test.CallContexts.Normal()
@pytest.mark.valid_from("Berlin")
def test_modexp_call_operations(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
post: Dict,
) -> None:
"""Test ModExp call related operations with EIP-7883."""
state_test(
pre=pre,
tx=tx,
post=post,
)
@pytest.mark.parametrize(
"modexp_input,modexp_expected,precompile_gas_modifier,call_succeeds",
[
pytest.param(
Spec.modexp_input,
Spec.modexp_expected,
1,
True,
id="extra_gas",
),
pytest.param(
Spec.modexp_input,
Spec.modexp_expected,
0,
True,
id="exact_gas",
),
pytest.param(
Spec.modexp_input,
Spec.modexp_error,
-1,
False,
id="insufficient_gas",
),
pytest.param(
Spec.modexp_input,
Spec.modexp_expected,
float("inf"),
True,
id="excessive_gas",
),
],
)
@EIPChecklist.Precompile.Test.GasUsage.Dynamic()
@EIPChecklist.Precompile.Test.ExcessiveGasUsage()
@pytest.mark.valid_from("Berlin")
def test_modexp_gas_usage_contract_wrapper(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
post: Dict,
) -> None:
"""
Test ModExp gas cost with different gas modifiers using contract wrapper
calls.
"""
state_test(pre=pre, tx=tx, post=post)
@pytest.mark.parametrize(
"modexp_input,modexp_expected,precompile_gas_modifier,call_values,call_succeeds",
[
pytest.param(
Spec.modexp_input,
Spec.modexp_expected,
1,
0,
True,
id="extra_gas",
),
pytest.param(
Spec.modexp_input,
Spec.modexp_expected,
0,
0,
True,
id="exact_gas",
),
pytest.param(
Spec.modexp_input,
Spec.modexp_expected,
0,
1000,
True,
id="extra_value",
),
pytest.param(
Spec.modexp_input,
Spec.modexp_error,
-1,
0,
False,
id="insufficient_gas",
),
],
)
@EIPChecklist.Precompile.Test.CallContexts.TxEntry()
@EIPChecklist.Precompile.Test.ValueTransfer.NoFee()
@pytest.mark.valid_from("Berlin")
def test_modexp_used_in_transaction_entry_points(
state_test: StateTestFiller,
pre: Alloc,
tx: Transaction,
modexp_input: bytes,
tx_gas_limit: int,
call_values: int,
) -> None:
"""
Test ModExp using in transaction entry points with different precompile gas
modifiers.
"""
tx = Transaction(
to=Spec.MODEXP_ADDRESS,
sender=pre.fund_eoa(),
data=bytes(modexp_input),
gas_limit=tx_gas_limit,
value=call_values,
)
state_test(pre=pre, tx=tx, post={})
@pytest.mark.parametrize(
"modexp_input,modexp_expected",
[
pytest.param(
Spec.modexp_input,
Spec.modexp_expected,
id="valid_input",
)
],
)
@EIPChecklist.Precompile.Test.CallContexts.Initcode()
@pytest.mark.valid_from("Berlin")
def test_contract_creation_transaction(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
modexp_input: bytes,
modexp_expected: bytes,
) -> None:
"""Test the contract creation for the ModExp precompile."""
sender = pre.fund_eoa()
storage = Storage()
contract_address = compute_create_address(address=sender, nonce=0)
contract_bytecode = (
Op.CODECOPY(0, Op.SUB(Op.CODESIZE, len(bytes(modexp_input))), len(bytes(modexp_input)))
+ Op.CALL(
gas=1_000_000,
address=Spec.MODEXP_ADDRESS,
value=0,
args_offset=0,
args_size=len(bytes(modexp_input)),
ret_offset=0,
ret_size=len(bytes(modexp_expected)),
)
+ Op.SSTORE(storage.store_next(True), Op.DUP1())
+ Op.SSTORE(
storage.store_next(keccak256(bytes(modexp_expected))), Op.SHA3(0, Op.RETURNDATASIZE())
)
+ Op.SSTORE(storage.store_next(len(bytes(modexp_expected))), Op.RETURNDATASIZE())
+ Op.STOP
)
tx = Transaction(
sender=sender,
gas_limit=1_000_000,
to=None,
value=0,
data=contract_bytecode + bytes(modexp_input),
)
post = {
contract_address: {
"storage": storage,
}
}
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"modexp_input,modexp_expected",
[
pytest.param(
Spec.modexp_input,
Spec.modexp_expected,
id="valid_input",
),
],
)
@pytest.mark.parametrize("opcode", [Op.CREATE, Op.CREATE2])
@EIPChecklist.Precompile.Test.CallContexts.Initcode.CREATE()
@pytest.mark.valid_from("Berlin")
def test_contract_initcode(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
modexp_input: bytes,
modexp_expected: bytes,
opcode: Op,
) -> None:
"""Test ModExp behavior from contract creation."""
sender = pre.fund_eoa()
storage = Storage()
call_modexp_bytecode = (
Op.CODECOPY(0, Op.SUB(Op.CODESIZE, len(bytes(modexp_input))), len(bytes(modexp_input)))
+ Op.CALL(
gas=200_000,
address=Spec.MODEXP_ADDRESS,
value=0,
args_offset=0,
args_size=len(bytes(modexp_input)),
ret_offset=0,
ret_size=len(bytes(modexp_expected)),
)
+ Op.SSTORE(storage.store_next(True), Op.DUP1())
+ Op.SSTORE(
storage.store_next(keccak256(bytes(modexp_expected))), Op.SHA3(0, Op.RETURNDATASIZE())
)
+ Op.SSTORE(storage.store_next(len(bytes(modexp_expected))), Op.RETURNDATASIZE())
+ Op.STOP
)
full_initcode = call_modexp_bytecode + bytes(modexp_input)
total_bytecode_length = len(call_modexp_bytecode) + len(bytes(modexp_input))
create_contract = (
Op.CALLDATACOPY(offset=0, size=total_bytecode_length)
+ opcode(offset=0, size=total_bytecode_length)
+ Op.STOP
)
factory_contract_address = pre.deploy_contract(code=create_contract)
contract_address = compute_create_address(
address=factory_contract_address, nonce=1, initcode=full_initcode, opcode=opcode
)
tx = Transaction(
sender=sender,
gas_limit=200_000,
to=factory_contract_address,
value=0,
data=call_modexp_bytecode + bytes(modexp_input),
)
post = {
contract_address: {
"storage": storage,
}
}
state_test(env=Environment(), pre=pre, post=post, tx=tx)
def create_modexp_variable_gas_test_cases() -> Generator:
"""
Create test cases for ModExp variable gas cost testing.
Returns: List of pytest.param objects for the test cases
"""
# Test case definitions: (base, exponent, modulus, expected_result,
# gas_usage, test_id)
test_cases = [
("", "", "", "", 500, "Z0"),
("01" * 32, "00" * 32, "", "", 500, "Z1"),
("01" * 1024, "00" * 32, "", "", 32768, "Z2"),
("01" * 32, "00" * 1024, "", "", 253952, "Z3"),
("01" * 32, "00" * 1023 + "01", "", "", 253952, "Z4"),
("", "", "01" * 32, "00" * 31 + "01", 500, "Z5"),
("", "01" * 32, "01" * 32, "00" * 32, 3968, "Z6"),
("", "00" * 31 + "01", "01" * 1024, "00" * 1024, 32768, "Z7"),
("01" * 16, "00" * 16, "02" * 16, "00" * 15 + "01", 500, "S0"),
("01" * 16, "00" * 15 + "03", "02" * 16, "01" * 16, 500, "S1"),
("01" * 32, "FF" * 32, "02" * 32, "01" * 32, 4080, "S2"),
("01" * 16, "00" * 40, "02" * 16, "00" * 15 + "01", 2048, "S3"),
("01" * 16, "00" * 39 + "01", "02" * 16, "01" * 16, 2048, "S4"),
("01" * 24, "00", "02" * 8, "00" * 7 + "01", 500, "S5"),
("01" * 8, "01", "02" * 24, "00" * 16 + "01" * 8, 500, "S6"),
("01" * 40, "00" * 16, "02" * 40, "00" * 39 + "01", 500, "L0"),
("01" * 40, "FF" * 32, "02" * 40, "01" * 40, 12750, "L1"),
("01" * 40, "00" * 40, "02" * 40, "00" * 39 + "01", 6400, "L2"),
("01" * 40, "00" * 39 + "01", "02" * 40, "01" * 40, 6400, "L3"),
("01" * 48, "01", "02" * 16, "01" * 16, 500, "L4"),
("01" * 16, "00" * 40, "02" * 48, "00" * 47 + "01", 9216, "L5"),
# Critical 32-byte boundary cases
("01" * 31, "01", "02" * 33, "00" * 2 + "01" * 31, 500, "B1"),
("01" * 33, "01", "02" * 31, "00" * 29 + "01" * 2, 500, "B2"),
("01" * 33, "01", "02" * 33, "01" * 33, 500, "B4"),
# Zero value edge cases
("00" * 32, "00" * 32, "01" * 32, "00" * 31 + "01", 500, "Z8"),
("01" * 32, "00" * 32, "00" * 32, "00" * 32, 500, "Z9"),
("00" * 32, "01" * 32, "02" * 32, "00" * 32, 3968, "Z10"),
("00" * 32, "00" * 33, "01" * 32, "00" * 31 + "01", 500, "Z11"),
("00" * 32, "00" * 1024, "01" * 32, "00" * 31 + "01", 253952, "Z12"),
("00" * 1024, "00" * 32, "01" * 32, "00" * 31 + "01", 32768, "Z13"),
("01" * 32, "00" * 1024, "00" * 32, "00" * 32, 253952, "Z14"),
("01" * 32, "00" * 31 + "01", "00" * 1024, "00" * 1024, 32768, "Z15"),
# Maximum value stress tests
("FF" * 64, "FF" * 64, "FF" * 64, "00" * 64, 98176, "M1"),
("FF" * 32, "01", "FF" * 32, "00" * 32, 500, "M2"),
("01", "FF" * 64, "FF" * 64, "00" * 63 + "01", 98176, "M3"),
# Tiny maximum values
("FF", "FE", "FD", "47", 500, "T2"),
# Bit pattern cases
("01" * 32, "80" * 32, "02" * 32, "01" * 32, 4080, "P2"),
("01" * 33, "00" * 31 + "80" + "00", "02" * 33, "01" * 33, 1150, "P3"),
# Asymmetric length cases
("01", "00" * 64, "02" * 64, "00" * 63 + "01", 65536, "A1"),
("01" * 64, "01", "02", "01", 500, "A2"),
("01" * 64, "00" * 64, "02", "01", 65536, "A3"),
# Word boundary case
("01" * 8, "01", "02" * 8, "0101010101010101", 500, "W2"),
# Exponent edge cases
("01" * 16, "00" * 32 + "01", "02" * 16, "01" * 16, 500, "E1"),
("01" * 16, "80" + "00" * 31, "02" * 16, "01" * 16, 4080, "E2"),
("01" * 16, "00" * 31 + "80", "02" * 16, "01" * 16, 500, "E3"),
("01" * 16, "7F" + "FF" * 31, "02" * 16, "01" * 16, 4064, "E4"),
# Implementation coverage cases
# IC1: Bit shift vs multiplication at 33-byte boundary
("FF" * 33, "01", "FF" * 33, "00" * 33, 500, "IC1"),
# IC3: Ceiling division at 7 bytes
("01" * 7, "01", "02" * 7, "01" * 7, 500, "IC3"),
# IC4: Ceiling division at 9 bytes
("01" * 9, "01", "02" * 9, "01" * 9, 500, "IC4"),
# IC5: Bit counting in middle of exponent
("01", "00" * 15 + "80" + "00" * 16, "02", "01", 2160, "IC5"),
# IC6: Native library even byte optimization
("01" * 31 + "00", "01", "01" * 31 + "00", "00" * 32, 500, "IC6"),
# IC7: Vector optimization 128-bit boundary
("00" * 15 + "01" * 17, "01", "00" * 15 + "01" * 17, "00" * 32, 500, "IC7"),
# IC9: Zero modulus with large inputs
("FF" * 32, "FF" * 32, "", "", None, "IC9"), # N/A case
# IC10: Power-of-2 boundary with high bit
("01" * 32, "80" + "00" * 31, "02" * 32, "01" * 32, 4080, "IC10"),
]
# Gas calculation parameters:
#
# Please refer to EIP-7883 for details of each function in the gas
# calculation.
# Link: https://eips.ethereum.org/EIPS/eip-7883
#
# - calculate_multiplication_complexity:
# - Comp: if max_length <= 32 bytes, it is Small (S), otherwise it is
# Large (L)
# - Rel (Length Relation): base < modulus (<), base = modulus (=),
# base > modulus (>)
#
# - calculate_iteration_count
# - Iter (Iteration Case):
# - A: exp≤32 and exp=0
# - B: exp≤32 and exp≠0
# - C: exp>32 and low256=0
# - D: exp>32 and low256≠0
#
# - calculate_gas_cost
# - Clamp: True if raw gas < 500 (clamped to 500), False if raw gas ≥ 500
# (no clamping)
"""
Test case coverage table:
┌─────┬──────┬─────┬──────┬───────┬─────────┬───────────────────────────────────────────────┐
│ ID │ Comp │ Rel │ Iter │ Clamp │ Gas │ Description │
├─────┼──────┼─────┼──────┼───────┼─────────┼───────────────────────────────────────────────┤
│ Z0 │ - │ - │ - │ - │ 500 │ Zero case – empty inputs │
│ Z1 │ S │ - │ A │ True │ 500 │ Non-zero base, zero exp, empty modulus │
│ Z2 │ L │ - │ A │ False │ 32768 │ Large base (1024B), zero exp, empty modulus │
│ Z3 │ S │ - │ C │ False |253952 │ Base, large zero exp (1024B), empty modulus │
│ Z4 │ S │ - │ D │ False │253952 │ Base, large exp (last byte=1), empty modulus │
│ Z5 │ S │ < │ A │ True │ 500 │ Empty base/exp, non-zero modulus only │
│ Z6 │ S │ < │ B │ False │ 3968 │ Empty base, non-zero exp and modulus │
│ Z7 │ L │ < │ B │ False │ 32768 │ Empty base, small exp, large modulus │
│ S0 │ S │ = │ A │ True │ 500 │ Small, equal, zero exp, clamped │
│ S1 │ S │ = │ B │ True │ 500 │ Small, equal, small exp, clamped │
│ S2 │ S │ = │ B │ False │ 4080 │ Small, equal, large exp, unclamped │
│ S3 │ S │ = │ C │ False │ 2048 │ Small, equal, large exp + zero low256 │
│ S4 │ S │ = │ D │ False │ 2048 │ Small, equal, large exp + non-zero low256 │
│ S5 │ S │ > │ A │ True │ 500 │ Small, base > mod, zero exp, clamped │
│ S6 │ S │ < │ B │ True │ 500 │ Small, base < mod, small exp, clamped │
│ L0 │ L │ = │ A │ True │ 500 │ Large, equal, zero exp, clamped │
│ L1 │ L │ = │ B │ False │ 12750 │ Large, equal, large exp, unclamped │
│ L2 │ L │ = │ C │ False │ 6400 │ Large, equal, large exp + zero low256 │
│ L3 │ L │ = │ D │ False │ 6400 │ Large, equal, large exp + non-zero low256 │
│ L4 │ L │ > │ B │ True │ 500 │ Large, base > mod, small exp, clamped │
│ L5 │ L │ < │ C │ False │ 9216 │ Large, base < mod, large exp + zero low256 │
│ B1 │ L │ < │ B │ True │ 500 │ Cross 32-byte boundary (31/33) │
│ B2 │ L │ > │ B │ True │ 500 │ Cross 32-byte boundary (33/31) │
│ B4 │ L │ = │ B │ True │ 500 │ Just over 32-byte boundary │
│ Z8 │ S │ = │ A │ True │ 500 │ All zeros except modulus │
│ Z9 │ S │ = │ A │ True │ 500 │ Zero modulus special case │
│ Z10 │ S │ = │ B │ False │ 3968 │ Zero base, large exponent │
│ Z11 │ S │ = │ C │ True │ 500 │ Zero base, 33B zero exp, non-zero modulus │
│ Z12 │ S │ = │ C │ False |253952 │ Zero base, large zero exp, non-zero modulus │
│ Z13 │ L │ > │ A │ False │ 32768 │ Large zero base, zero exp, non-zero modulus │
│ Z14 │ S │ = │ C │ False |253952 │ Base, large zero exp, zero modulus │
│ Z15 │ L │ < │ B │ False │ 32768 │ Base, small exp, large zero modulus │
│ Z16 │ L │ < │ C │ False │520060928│ Zero base, zero exp, large modulus (gas cap) |
│ M1 │ L │ = │ D │ False │ 98176 │ Maximum values stress test │
│ M2 │ S │ = │ B │ True │ 500 │ Max base/mod, small exponent │
│ M3 │ L │ < │ D │ False │ 98176 │ Small base, max exponent/mod │
│ T2 │ S │ = │ B │ True │ 500 │ Tiny maximum values │
│ P2 │ S │ = │ B │ False │ 4080 │ High bit in exponent │
│ P3 │ L │ = │ D │ False │ 1150 │ Specific bit pattern in large exponent │
│ A1 │ L │ < │ C │ False │ 65536 │ Asymmetric: tiny base, large exp/mod │
│ A2 │ L │ > │ B │ True │ 500 │ Asymmetric: large base, tiny exp/mod │
│ A3 │ L │ > │ C │ False │ 65536 │ Asymmetric: large base/exp, tiny modulus │
│ W2 │ S │ = │ B │ True │ 500 │ Exactly 8-byte words │
│ E1 │ S │ = │ D │ True │ 500 │ Exponent exactly 33 bytes │
│ E2 │ S │ = │ B │ False │ 4080 │ High bit in exponent first byte │
│ E3 │ S │ = │ B │ True │ 500 │ High bit in exponent last byte │
│ E4 │ S │ = │ B │ False │ 4064 │ Maximum 32-byte exponent │
│ IC1 │ L │ = │ B │ True │ 500 │ Bit shift vs multiplication @ 33 bytes │
│ IC3 │ S │ = │ B │ True │ 500 │ Ceiling division at 7 bytes │
│ IC4 │ S │ = │ B │ True │ 500 │ Ceiling division at 9 bytes │
│ IC5 │ S │ = │ B │ False │ 2160 │ Bit counting in middle of exponent │
│ IC6 │ L │ = │ B │ True │ 500 │ Native library even byte optimization │
│ IC7 │ L │ = │ B │ True │ 500 │ Vector optimization 128-bit boundary │
│ IC9 │ S │ = │ B │ N/A │ N/A │ Zero modulus handling │
│ IC10│ S │ = │ B │ False │ 4080 │ Power-of-2 boundary with high bit │
└─────┴──────┴─────┴──────┴───────┴─────────┴───────────────────────────────────────────────┘
""" # noqa: W505
for base, exponent, modulus, expected_result, gas_usage, test_id in test_cases:
yield pytest.param(
ModExpInput(base=base, exponent=exponent, modulus=modulus),
bytes.fromhex(expected_result),
gas_usage,
id=test_id,
)
@pytest.mark.parametrize(
"modexp_input,modexp_expected,gas_usage",
create_modexp_variable_gas_test_cases(),
)
@EIPChecklist.Precompile.Test.InputLengths.Zero()
@EIPChecklist.GasCostChanges.Test.GasUpdatesMeasurement()
@pytest.mark.valid_from("Berlin")
def test_modexp_variable_gas_cost(
state_test: StateTestFiller,
precompile_gas: int,
gas_usage: int,
pre: Alloc,
tx: Transaction,
fork: Fork,
post: Dict,
) -> None:
"""Test ModExp variable gas cost."""
if fork >= Osaka: # Check that gas used defined in table is accurate
assert (gas_usage is None) or (precompile_gas >= gas_usage), "inconsistent gas usage"
state_test(pre=pre, tx=tx, post=post)
@pytest.mark.parametrize(
"modexp_input,modexp_expected,expected_tx_cap_fail",
[
pytest.param(
ModExpInput(base="00" * 32, exponent="00" * 1024, modulus="01" * 1024),
bytes.fromhex("00" * 1023 + "01"),
True,
id="Z16-gas-cap-test",
),
],
)
@pytest.mark.valid_from("Berlin")
def test_modexp_variable_gas_cost_exceed_tx_gas_cap(
state_test: StateTestFiller, pre: Alloc, tx: Transaction, post: Dict
) -> None:
"""
Test ModExp variable gas cost. Inputs with an expected gas cost over the
EIP-7825 tx gas cap.
Test case coverage table (gas cap):
┌─────┬──────┬─────┬──────┬───────┬─────────┬───────────────────────────────────────────────┐
│ ID │ Comp │ Rel │ Iter │ Clamp │ Gas │ Description │
├─────┼──────┼─────┼──────┼───────┼─────────┼───────────────────────────────────────────────┤
│ Z16 │ L │ < │ C │ False │520060928│ Zero base, zero exp, large modulus (gas cap) |
└─────┴──────┴─────┴──────┴───────┴─────────┴───────────────────────────────────────────────┘
""" # noqa: W505
state_test(pre=pre, tx=tx, post=post)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7918_blob_reserve_price/spec.py | tests/osaka/eip7918_blob_reserve_price/spec.py | """Defines EIP-7918 specification constants and functions."""
from dataclasses import dataclass
# Base the spec on EIP-4844 which EIP-7918 extends
from ...cancun.eip4844_blobs.spec import Spec as EIP4844Spec
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7918 = ReferenceSpec("EIPS/eip-7918.md", "be1dbefafcb40879e3f6d231fad206c62f5b371b")
@dataclass(frozen=True)
class Spec(EIP4844Spec):
"""
Parameters from the EIP-7918 specifications. Extends EIP-4844 spec with the
new reserve price constant and functionality.
"""
BLOB_BASE_COST = 2**13
@classmethod
def get_reserve_price(
cls,
base_fee_per_gas: int,
) -> int:
"""Calculate the reserve price for blob gas given the blob base fee."""
return (cls.BLOB_BASE_COST * base_fee_per_gas) // cls.GAS_PER_BLOB
@classmethod
def is_reserve_price_active(
cls,
base_fee_per_gas: int,
blob_base_fee: int,
) -> bool:
"""Check if the reserve price mechanism should be active."""
reserve_price = cls.get_reserve_price(base_fee_per_gas)
return reserve_price > blob_base_fee
@classmethod
def calc_effective_blob_base_fee(
cls,
base_fee_per_gas: int,
blob_base_fee: int,
) -> int:
"""
Calculate the effective blob base fee considering the reserve price.
"""
reserve_price = cls.get_reserve_price(base_fee_per_gas)
return max(reserve_price, blob_base_fee)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo_transitions.py | tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo_transitions.py | """Tests EIP-7918 on BPO fork transitions."""
from dataclasses import dataclass
from typing import Iterator, List
import pytest
from ethereum_test_forks import BPO2ToBPO3AtTime15k, Fork
from ethereum_test_tools import (
EOA,
Address,
Alloc,
Block,
BlockchainTestFiller,
Environment,
Hash,
Header,
Transaction,
add_kzg_version,
)
from ethereum_test_tools import Opcodes as Op
from ethereum_test_tools.utility.pytest import ParameterSet
from .spec import Spec, ref_spec_7918
REFERENCE_SPEC_GIT_PATH = ref_spec_7918.git_path
REFERENCE_SPEC_VERSION = ref_spec_7918.version
MIN_BLOB_GASPRICE = 1
@pytest.fixture
def sender(pre: Alloc) -> EOA:
"""Sender account with enough balance for tests."""
return pre.fund_eoa()
@pytest.fixture
def destination_account(pre: Alloc) -> Address:
"""Contract recipient of blobs."""
code = Op.STOP
return pre.deploy_contract(code)
@pytest.fixture
def gas_spender_contract(pre: Alloc) -> Address:
"""Contract that exhausts the gas limit of a tx."""
code = Op.INVALID
return pre.deploy_contract(code)
@pytest.fixture
def tx_gas() -> int:
"""Gas limit for blob transactions sent during test."""
return 21_000
@pytest.fixture
def tx_value() -> int:
"""Value for blob transactions sent during test."""
return 0
def blob_hashes_per_tx(blobs_per_tx: int) -> List[Hash]:
"""Blob hashes for the transaction."""
return add_kzg_version(
[Hash(x) for x in range(blobs_per_tx)],
Spec.BLOB_COMMITMENT_VERSION_KZG,
)
@pytest.fixture
def source_fork_target_blobs(fork: Fork) -> int:
"""Transition-from fork target blobs."""
return fork.target_blobs_per_block(timestamp=0)
@pytest.fixture
def source_fork_gas_per_blob(fork: Fork) -> int:
"""Transition-from fork gas per blob."""
return fork.blob_gas_per_blob(timestamp=0)
@pytest.fixture
def transition_fork_target_blobs(fork: Fork) -> int:
"""Transition-to fork target blobs."""
return fork.target_blobs_per_block(timestamp=15_000)
@pytest.fixture
def transition_fork_gas_per_blob(fork: Fork) -> int:
"""Transition-to fork gas per blob."""
return fork.blob_gas_per_blob(timestamp=15_000)
@pytest.fixture
def genesis_base_fee_per_gas(
fork: Fork,
parent_base_fee_per_gas: int,
) -> int:
"""Genesis base fee per gas."""
# Base fee always drops from genesis to block 1 because the genesis block
# never uses any tx gas.
return (parent_base_fee_per_gas * fork.base_fee_max_change_denominator()) // 7
@pytest.fixture
def genesis_excess_blob_gas(
fork: Fork,
genesis_base_fee_per_gas: int,
parent_excess_blob_gas: int,
source_fork_target_blobs: int,
source_fork_gas_per_blob: int,
) -> int:
"""Genesis excess blob gas."""
genesis_excess_blob_gas = parent_excess_blob_gas + (
source_fork_target_blobs * source_fork_gas_per_blob
)
excess_blob_gas_calculator = fork.excess_blob_gas_calculator(timestamp=0)
current_excess_blob_gas = excess_blob_gas_calculator(
parent_excess_blob_gas=genesis_excess_blob_gas,
parent_blob_count=0,
parent_base_fee_per_gas=genesis_base_fee_per_gas,
)
if current_excess_blob_gas == parent_excess_blob_gas:
return genesis_excess_blob_gas
if current_excess_blob_gas > parent_excess_blob_gas:
minimum = 0
maximum = genesis_excess_blob_gas
while minimum < maximum:
mid = (minimum + maximum) // 2
next_excess_blob_gas = excess_blob_gas_calculator(
parent_excess_blob_gas=mid,
parent_blob_count=0,
parent_base_fee_per_gas=genesis_base_fee_per_gas,
)
if next_excess_blob_gas == parent_excess_blob_gas:
return mid
if next_excess_blob_gas > parent_excess_blob_gas:
maximum = mid - 1
else:
minimum = mid + 1
raise ValueError("No excess blob gas found")
@pytest.fixture
def env(
genesis_excess_blob_gas: int,
genesis_base_fee_per_gas: int,
) -> Environment:
"""Environment for the test."""
return Environment(
# Excess blob gas always drops from genesis to block 1 because genesis
# uses no blob gas.
excess_blob_gas=genesis_excess_blob_gas,
base_fee_per_gas=genesis_base_fee_per_gas,
gas_limit=16_000_000, # To make it easier to reach the requirement
# with a single tx
)
def get_blob_transactions(
*,
blob_count: int,
blob_cap_per_transaction: int | None,
sender: EOA,
destination_account: Address,
tx_gas: int,
tx_value: int,
block_base_fee_per_gas: int,
tx_max_fee_per_blob_gas: int,
) -> List[Transaction]:
"""Return a list of transactions with the given blobs."""
txs = []
if blob_cap_per_transaction is None:
blob_cap_per_transaction = blob_count
for _ in range(blob_count // blob_cap_per_transaction):
tx = Transaction(
ty=Spec.BLOB_TX_TYPE,
sender=sender,
to=destination_account,
value=tx_value,
gas_limit=tx_gas,
max_fee_per_gas=block_base_fee_per_gas,
max_priority_fee_per_gas=0,
max_fee_per_blob_gas=tx_max_fee_per_blob_gas,
access_list=[],
blob_versioned_hashes=blob_hashes_per_tx(blob_cap_per_transaction),
)
txs.append(tx)
if blob_count % blob_cap_per_transaction != 0:
tx = Transaction(
ty=Spec.BLOB_TX_TYPE,
sender=sender,
to=destination_account,
value=tx_value,
gas_limit=tx_gas,
max_fee_per_gas=block_base_fee_per_gas,
max_priority_fee_per_gas=0,
max_fee_per_blob_gas=tx_max_fee_per_blob_gas,
access_list=[],
blob_versioned_hashes=blob_hashes_per_tx(blob_count % blob_cap_per_transaction),
)
txs.append(tx)
return txs
@pytest.fixture
def tx_max_fee_per_blob_gas() -> int:
"""Max fee per blob gas to be used by all transactions in the test."""
return 0x1000
@pytest.fixture
def blob_cap_per_transaction(fork: Fork) -> int:
"""Max blobs that a single transaction can contain."""
return fork.max_blobs_per_tx()
@pytest.fixture
def parent_block_txs(
fork: Fork,
sender: EOA,
destination_account: Address,
gas_spender_contract: Address,
env: Environment,
tx_gas: int,
tx_value: int,
parent_blob_count: int,
parent_base_fee_per_gas: int,
tx_max_fee_per_blob_gas: int,
transition_block_base_fee_per_gas: int,
blob_cap_per_transaction: int,
) -> List[Transaction]:
"""
Transactions included in the block prior to the fork transition fork.
Includes blob transactions to raise the `parent_blob_gas_used` and normal
transactions to raise/lower the base fee per gas.
"""
parent_block_blob_txs = get_blob_transactions(
blob_count=parent_blob_count,
blob_cap_per_transaction=blob_cap_per_transaction,
sender=sender,
destination_account=destination_account,
tx_gas=tx_gas,
tx_value=tx_value,
block_base_fee_per_gas=parent_base_fee_per_gas * 10,
tx_max_fee_per_blob_gas=tx_max_fee_per_blob_gas,
)
required_gas_used = fork.base_fee_change_calculator()(
parent_gas_limit=env.gas_limit,
parent_base_fee_per_gas=parent_base_fee_per_gas,
required_base_fee_per_gas=transition_block_base_fee_per_gas,
)
blob_txs_execution_gas = sum(tx.gas_limit for tx in parent_block_blob_txs)
assert blob_txs_execution_gas <= required_gas_used
extra_tx_gas_limit = required_gas_used - blob_txs_execution_gas
assert extra_tx_gas_limit >= 21_000
extra_tx = Transaction(
sender=sender,
to=gas_spender_contract,
gas_limit=extra_tx_gas_limit,
max_fee_per_gas=parent_base_fee_per_gas,
max_priority_fee_per_gas=0,
access_list=[],
)
return parent_block_blob_txs + [extra_tx]
@pytest.fixture
def parent_block(
parent_block_txs: List[Transaction],
parent_excess_blob_gas: int,
parent_blob_count: int,
parent_base_fee_per_gas: int,
blob_gas_per_blob: int,
) -> Block:
"""Parent block to satisfy the pre-fork conditions of the test."""
return Block(
txs=parent_block_txs,
timestamp=14_999,
header_verify=Header(
excess_blob_gas=parent_excess_blob_gas,
blob_gas_used=parent_blob_count * blob_gas_per_blob,
base_fee_per_gas=parent_base_fee_per_gas,
),
)
@pytest.fixture
def transition_block_txs(
sender: EOA,
destination_account: Address,
tx_gas: int,
tx_value: int,
transition_block_blob_count: int,
blob_cap_per_transaction: int,
tx_max_fee_per_blob_gas: int,
transition_block_base_fee_per_gas: int,
) -> List[Transaction]:
"""
Transactions included in the first block of the new fork.
Includes blob transactions only.
"""
return get_blob_transactions(
blob_count=transition_block_blob_count,
blob_cap_per_transaction=blob_cap_per_transaction,
sender=sender,
destination_account=destination_account,
tx_gas=tx_gas,
tx_value=tx_value,
block_base_fee_per_gas=transition_block_base_fee_per_gas * 10,
tx_max_fee_per_blob_gas=tx_max_fee_per_blob_gas,
)
@pytest.fixture
def transition_block(
transition_block_txs: List[Transaction],
transition_block_expected_excess_blob_gas: int | None,
transition_block_blob_count: int,
transition_block_base_fee_per_gas: int,
blob_gas_per_blob: int,
) -> Block:
"""Parent block to satisfy the pre-fork conditions of the test."""
return Block(
txs=transition_block_txs,
timestamp=15_000,
header_verify=Header(
excess_blob_gas=transition_block_expected_excess_blob_gas,
blob_gas_used=transition_block_blob_count * blob_gas_per_blob,
base_fee_per_gas=transition_block_base_fee_per_gas,
),
)
@dataclass(kw_only=True)
class ParentHeader:
"""Parent block header information."""
excess_blob_gas: int
blob_gas_used: int
base_fee_per_gas: int
@dataclass(kw_only=True)
class BlobSchedule:
"""Blob schedule for a fork."""
fork: Fork
timestamp: int
@property
def max(self) -> int:
"""Return the max blobs per block."""
return self.fork.max_blobs_per_block(timestamp=self.timestamp)
@property
def target(self) -> int:
"""Return the target blobs per block."""
return self.fork.target_blobs_per_block(timestamp=self.timestamp)
@property
def base_fee_update_fraction(self) -> int:
"""Return the base fee update fraction."""
return self.fork.blob_base_fee_update_fraction(timestamp=self.timestamp)
@property
def blob_gas_per_blob(self) -> int:
"""Return the blob gas per blob."""
return self.fork.blob_gas_per_blob(timestamp=self.timestamp)
@property
def blob_base_cost(self) -> int | None:
"""Return the blob base cost."""
if self.fork.blob_reserve_price_active(timestamp=self.timestamp):
return self.fork.blob_base_cost(timestamp=self.timestamp)
return None
@property
def target_blob_gas_per_block(self) -> int:
"""Return the target blob gas per block."""
return self.target * self.blob_gas_per_blob
def calculate_excess_blob_gas(self, parent_header: ParentHeader) -> int:
"""
Calculate the excess blob gas for the current block based on the gas
used in the parent block.
"""
excess_blob_gas_calculator = self.fork.excess_blob_gas_calculator(timestamp=self.timestamp)
return excess_blob_gas_calculator(
parent_excess_blob_gas=parent_header.excess_blob_gas,
parent_blob_count=parent_header.blob_gas_used,
parent_base_fee_per_gas=parent_header.base_fee_per_gas,
)
def execution_base_fee_threshold_from_excess_blob_gas(
self, excess_blob_gas: int
) -> int | None:
"""
Return the minimum base fee required to trigger the reserve mechanism,
or None for blob schedules that don't have a reserve price mechanism.
"""
if self.blob_base_cost is None:
return None
target_blob_gas_price = self.blob_gas_per_blob
blob_gas_price_calculator = self.fork.blob_gas_price_calculator(timestamp=self.timestamp)
target_blob_gas_price *= blob_gas_price_calculator(excess_blob_gas=excess_blob_gas)
base_blob_tx_price = target_blob_gas_price
return (base_blob_tx_price // self.blob_base_cost) + 1
def get_fork_scenarios(fork: Fork) -> Iterator[ParameterSet]:
"""
Return the list of scenarios at the fork boundary depending on the source
fork and transition fork properties.
"""
source_blob_schedule = BlobSchedule(fork=fork, timestamp=0)
transition_blob_schedule = BlobSchedule(fork=fork, timestamp=15_000)
excess_blobs_combinations = [0, 1, 10, 100]
for parent_excess_blobs in excess_blobs_combinations:
parent_excess_blob_gas = parent_excess_blobs * source_blob_schedule.blob_gas_per_blob
source_execution_threshold = (
source_blob_schedule.execution_base_fee_threshold_from_excess_blob_gas(
parent_excess_blob_gas
)
)
transition_execution_threshold = (
transition_blob_schedule.execution_base_fee_threshold_from_excess_blob_gas(
parent_excess_blob_gas
)
)
if (
source_execution_threshold != transition_execution_threshold
and transition_execution_threshold is not None
):
# The source base fee reserve threshold is different from the
# transition one given the excess blob gas. We can verify that the
# BPO is activated correctly by using the a setup block with
# transition_execution_threshold to trigger the reserve.
for source_blob_count in [0, source_blob_schedule.target, source_blob_schedule.max]:
# Scenario 1: Parent base fee per gas is below the threshold at
# the parent of the transition block, so even though the base
# fee increases on the transition block to reach the value
# required to activate the reserve, since the base fee per gas
# of the parent is used, the reserve must not be activated.
parent_base_fee = transition_execution_threshold - 1
transition_base_fee = transition_execution_threshold
parent_header = ParentHeader(
excess_blob_gas=parent_excess_blob_gas,
blob_gas_used=source_blob_count,
base_fee_per_gas=parent_base_fee,
)
target_excess_blob_gas = transition_blob_schedule.calculate_excess_blob_gas(
parent_header
)
source_excess_blob_gas = source_blob_schedule.calculate_excess_blob_gas(
parent_header
)
if source_excess_blob_gas != target_excess_blob_gas:
yield pytest.param(
parent_base_fee,
parent_excess_blob_gas,
source_blob_count,
transition_base_fee,
transition_blob_schedule.target,
None,
id=(
"below_reserve_base_fee_threshold-"
f"parent_excess_blobs_{parent_excess_blobs}-"
f"parent_blobs_{source_blob_count}"
),
)
# Scenario 2: Parent base fee per gas is at the threshold, so
# the reserve is activated even though the base fee per gas
# decreases below the threshold on the transition block.
parent_base_fee = transition_execution_threshold
transition_base_fee = transition_execution_threshold - 1
parent_header = ParentHeader(
excess_blob_gas=parent_excess_blob_gas,
blob_gas_used=source_blob_count,
base_fee_per_gas=parent_base_fee,
)
target_excess_blob_gas = transition_blob_schedule.calculate_excess_blob_gas(
parent_header
)
source_excess_blob_gas = source_blob_schedule.calculate_excess_blob_gas(
parent_header
)
if source_excess_blob_gas != target_excess_blob_gas:
yield pytest.param(
parent_base_fee,
parent_excess_blob_gas,
source_blob_count,
transition_base_fee,
transition_blob_schedule.target,
None,
id=(
"at_reserve_base_fee_threshold-"
f"parent_excess_blobs_{parent_excess_blobs}-"
f"parent_blobs_{source_blob_count}"
),
)
if fork == BPO2ToBPO3AtTime15k:
# Explicitly add the exact scenario that triggered the Fusaka Devnet-4
# fork.
yield pytest.param(
0x32,
0x125BF5F,
19,
0x33,
9,
0x132CF5F,
id="devnet-4-fork-scenario",
)
@pytest.mark.parametrize_by_fork(
[
"parent_base_fee_per_gas",
"parent_excess_blob_gas",
"parent_blob_count",
"transition_block_base_fee_per_gas",
"transition_block_blob_count",
"transition_block_expected_excess_blob_gas",
],
get_fork_scenarios,
)
@pytest.mark.valid_at_transition_to("Osaka", subsequent_forks=True)
@pytest.mark.valid_for_bpo_forks()
def test_reserve_price_at_transition(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
parent_block: Block,
transition_block: Block,
env: Environment,
) -> None:
"""
Test reserve price mechanism across various block base fee and excess blob
gas scenarios.
"""
blockchain_test(
pre=pre,
post={},
blocks=[parent_block, transition_block],
genesis_environment=env,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7918_blob_reserve_price/conftest.py | tests/osaka/eip7918_blob_reserve_price/conftest.py | """
Pytest (plugin) definitions local to EIP-7918 tests.
Mostly a copy of `tests/cancun/eip4844_blobs/conftest.py`.
"""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import Environment
from .spec import Spec
@pytest.fixture
def target_blobs_per_block(fork: Fork) -> int:
"""Return default number of target blobs per block."""
return fork.target_blobs_per_block()
@pytest.fixture
def max_blobs_per_block(fork: Fork) -> int:
"""Return default number of max blobs per block."""
return fork.max_blobs_per_block()
@pytest.fixture
def blob_gas_per_blob(fork: Fork) -> int:
"""Return default blob gas cost per blob."""
return fork.blob_gas_per_blob()
@pytest.fixture(autouse=True)
def parent_excess_blobs() -> int | None:
"""
Return default excess blobs of the parent block.
Can be overloaded by a test case to provide a custom parent excess blob
count.
"""
return 10 # Defaults to a blob gas price of 1.
@pytest.fixture(autouse=True)
def parent_blobs() -> int | None:
"""
Return default data blobs of the parent block.
Can be overloaded by a test case to provide a custom parent blob count.
"""
return 0
@pytest.fixture
def parent_excess_blob_gas(
parent_excess_blobs: int | None,
blob_gas_per_blob: int,
) -> int | None:
"""
Calculate the excess blob gas of the parent block from the excess blobs.
"""
if parent_excess_blobs is None:
return None
assert parent_excess_blobs >= 0
return parent_excess_blobs * blob_gas_per_blob
@pytest.fixture
def blobs_per_tx() -> int:
"""
Total number of blobs per transaction.
Can be overloaded by a test case to provide a custom blobs per transaction
count.
"""
return 1
@pytest.fixture
def block_base_fee_per_gas_delta() -> int:
"""Delta to add to the block base fee. Default is 0."""
return 0
@pytest.fixture
def block_base_fee_per_gas(
fork: Fork,
parent_excess_blobs: int | None,
block_base_fee_per_gas_delta: int,
) -> int:
"""
Block base fee per gas. Default is 7 unless a delta is provided or
overloaded.
"""
if block_base_fee_per_gas_delta != 0:
if parent_excess_blobs is None:
blob_base_fee = 1
else:
excess_blob_gas = parent_excess_blobs * fork.blob_gas_per_blob()
blob_gas_price_calculator = fork.blob_gas_price_calculator()
blob_base_fee = blob_gas_price_calculator(excess_blob_gas=excess_blob_gas)
boundary_base_fee = 8 * blob_base_fee
return boundary_base_fee + block_base_fee_per_gas_delta
return 7
@pytest.fixture
def excess_blob_gas(
fork: Fork,
parent_excess_blobs: int | None,
parent_blobs: int | None,
block_base_fee_per_gas: int,
) -> int | None:
"""
Calculate the excess blob gas of the block under test from the parent
block.
Value can be overloaded by a test case to provide a custom excess blob gas.
"""
if parent_excess_blobs is None or parent_blobs is None:
return None
return fork.excess_blob_gas_calculator()(
parent_excess_blobs=parent_excess_blobs,
parent_blob_count=parent_blobs,
parent_base_fee_per_gas=block_base_fee_per_gas,
)
@pytest.fixture
def correct_excess_blob_gas(
fork: Fork,
parent_excess_blobs: int | None,
parent_blobs: int | None,
block_base_fee_per_gas: int,
) -> int:
"""
Calculate the correct excess blob gas of the block under test from the
parent block.
Should not be overloaded by a test case.
"""
if parent_excess_blobs is None or parent_blobs is None:
return 0
return fork.excess_blob_gas_calculator()(
parent_excess_blobs=parent_excess_blobs,
parent_blob_count=parent_blobs,
parent_base_fee_per_gas=block_base_fee_per_gas,
)
@pytest.fixture
def blob_gas_price(
fork: Fork,
excess_blob_gas: int | None,
) -> int | None:
"""Return blob gas price for the block of the test."""
if excess_blob_gas is None:
return None
get_blob_gas_price = fork.blob_gas_price_calculator()
return get_blob_gas_price(
excess_blob_gas=excess_blob_gas,
)
@pytest.fixture
def correct_blob_gas_used(
fork: Fork,
blobs_per_tx: int,
) -> int:
"""Correct blob gas used by the test transaction."""
return fork.blob_gas_per_blob() * blobs_per_tx
@pytest.fixture
def reserve_price(
block_base_fee_per_gas: int,
) -> int:
"""Calculate the blob base fee reserve price for the current base fee."""
return Spec.get_reserve_price(block_base_fee_per_gas)
@pytest.fixture
def is_reserve_price_active(
block_base_fee_per_gas: int,
blob_gas_price: int,
) -> bool:
"""Check if the reserve price mechanism should be active."""
return Spec.is_reserve_price_active(block_base_fee_per_gas, blob_gas_price)
@pytest.fixture
def genesis_excess_blob_gas(
parent_excess_blob_gas: int | None,
) -> int:
"""Return default excess blob gas for the genesis block."""
return parent_excess_blob_gas if parent_excess_blob_gas else 0
@pytest.fixture
def env(
block_base_fee_per_gas: int,
genesis_excess_blob_gas: int,
) -> Environment:
"""
Prepare the environment of the genesis block for all blockchain tests.
"""
return Environment(
excess_blob_gas=genesis_excess_blob_gas,
blob_gas_used=0,
base_fee_per_gas=block_base_fee_per_gas,
)
@pytest.fixture
def tx_max_fee_per_blob_gas(blob_gas_price: int | None) -> int:
"""Max fee per blob gas based on actual blob gas price."""
if blob_gas_price is None:
return 1
return blob_gas_price
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7918_blob_reserve_price/test_blob_base_fee.py | tests/osaka/eip7918_blob_reserve_price/test_blob_base_fee.py | """
[EIP-7918: Blob base fee bounded by execution cost](https://eips.ethereum.org/EIPS/eip-7918).
Test the blob base fee reserve price mechanism for
[EIP-7918: Blob base fee bounded by execution cost](https://eips.ethereum.org/EIPS/eip-7918).
"""
from typing import Dict, List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Environment,
Hash,
Header,
Transaction,
add_kzg_version,
)
from ethereum_test_tools import Opcodes as Op
from .spec import Spec, ref_spec_7918
REFERENCE_SPEC_GIT_PATH = ref_spec_7918.git_path
REFERENCE_SPEC_VERSION = ref_spec_7918.version
pytestmark = pytest.mark.valid_from("Osaka")
@pytest.fixture
def sender(pre: Alloc) -> Address:
"""Sender account with enough balance for tests."""
return pre.fund_eoa(10**18)
@pytest.fixture
def destination_account(pre: Alloc) -> Address:
"""Contract that stores the blob base fee for verification."""
code = Op.SSTORE(0, Op.BLOBBASEFEE)
return pre.deploy_contract(code)
@pytest.fixture
def tx_gas() -> int:
"""Gas limit for transactions sent during test."""
return 100_000
@pytest.fixture
def tx_value() -> int:
"""Value for transactions sent during test."""
return 1
@pytest.fixture
def blob_hashes_per_tx(blobs_per_tx: int) -> List[Hash]:
"""Blob hashes for the transaction."""
return add_kzg_version(
[Hash(x) for x in range(blobs_per_tx)],
Spec.BLOB_COMMITMENT_VERSION_KZG,
)
@pytest.fixture
def tx(
sender: Address,
destination_account: Address,
tx_gas: int,
tx_value: int,
blob_hashes_per_tx: List[Hash],
block_base_fee_per_gas: int,
tx_max_fee_per_blob_gas: int,
) -> Transaction:
"""Blob transaction for the block."""
return Transaction(
ty=Spec.BLOB_TX_TYPE,
sender=sender,
to=destination_account,
value=tx_value,
gas_limit=tx_gas,
max_fee_per_gas=block_base_fee_per_gas,
max_priority_fee_per_gas=0,
max_fee_per_blob_gas=tx_max_fee_per_blob_gas,
access_list=[],
blob_versioned_hashes=blob_hashes_per_tx,
)
@pytest.fixture
def block(
tx: Transaction,
fork: Fork,
parent_excess_blobs: int,
block_base_fee_per_gas: int,
blob_gas_per_blob: int,
) -> Block:
"""Single block fixture."""
blob_count = len(tx.blob_versioned_hashes) if tx.blob_versioned_hashes else 0
excess_blob_gas_calculator = fork.excess_blob_gas_calculator()
expected_excess_blob_gas = excess_blob_gas_calculator(
parent_excess_blobs=parent_excess_blobs,
parent_blob_count=0,
parent_base_fee_per_gas=block_base_fee_per_gas,
)
return Block(
txs=[tx],
header_verify=Header(
excess_blob_gas=expected_excess_blob_gas,
blob_gas_used=blob_count * blob_gas_per_blob,
),
)
@pytest.fixture
def post(
destination_account: Address,
blob_gas_price: int,
tx_value: int,
) -> Dict[Address, Account]:
"""Post state storing the effective blob base fee."""
return {
destination_account: Account(
storage={0: blob_gas_price},
balance=tx_value,
)
}
@pytest.mark.parametrize(
"block_base_fee_per_gas",
[1, 7, 15, 16, 17, 100, 1000, 10000],
)
@pytest.mark.parametrize_by_fork(
"parent_excess_blobs",
lambda fork: range(0, fork.target_blobs_per_block() + 1),
)
def test_reserve_price_various_base_fee_scenarios(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Alloc,
block: Block,
post: Dict[Address, Account],
) -> None:
"""
Test reserve price mechanism across various block base fee and excess blob
gas scenarios.
"""
blockchain_test(
pre=pre,
post=post,
blocks=[block],
genesis_environment=env,
)
@pytest.mark.parametrize_by_fork(
"parent_excess_blobs",
# Keep max assuming this will be greater than 20 in the future, to test a
# blob fee of > 1 :)
lambda fork: [0, 3, fork.target_blobs_per_block(), fork.max_blobs_per_block()],
)
@pytest.mark.parametrize("block_base_fee_per_gas_delta", [-2, -1, 0, 1, 10, 100])
def test_reserve_price_boundary(
blockchain_test: BlockchainTestFiller,
env: Environment,
pre: Alloc,
block: Block,
post: Dict[Address, Account],
) -> None:
"""
Tests the reserve price boundary mechanism. Note the default block base fee
per gas is 7 (delta is 0). With a non zero delta the block base fee per gas
is set to (boundary * blob base fee) + delta.
Example scenarios from parametrization:
Assume
parent_excess_blobs = 3:
delta=-2:
blob_base_fee=1,
boundary=8,
block_base_fee_per_gas=8+(-2)=6, 6 < 8,
reserve inactive,
effective_fee=1 delta=0:
blob_base_fee=1, boundary=8,
block_base_fee_per_gas=7, 7 < 8,
reserve inactive, effective_fee=1
delta=100: blob_base_fee=1,
boundary=8, block_base_fee_per_gas=8+100=108, 108 > 8,
reserve active, effective_fee=max(108/8, 1)=13
All values give a blob base_ fee of 1 because we need a much higher excess
blob gas to increase the blob fee. This only increases to 2 at 20 excess
blobs.
"""
blockchain_test(
genesis_environment=env,
pre=pre,
blocks=[block],
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7918_blob_reserve_price/__init__.py | tests/osaka/eip7918_blob_reserve_price/__init__.py | """Cross-client EIP-7918 Tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo.py | tests/osaka/eip7918_blob_reserve_price/test_blob_reserve_price_with_bpo.py | """
[EIP-7918: Blob base fee bounded by execution
cost](https://eips.ethereum.org/EIPS/eip-7918).
"""
import pytest
from ethereum_test_tools import (
Alloc,
Block,
BlockchainTestFiller,
Environment,
)
from .spec import ref_spec_7918
REFERENCE_SPEC_GIT_PATH = ref_spec_7918.git_path
REFERENCE_SPEC_VERSION = ref_spec_7918.version
@pytest.mark.valid_at_transition_to("BPO1")
@pytest.mark.valid_for_bpo_forks()
@pytest.mark.parametrize("parent_excess_blobs", [27])
@pytest.mark.parametrize("block_base_fee_per_gas", [17])
def test_blob_base_fee_with_bpo_transition(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
env: Environment,
) -> None:
"""Test BPO1 transition with EIP-7918 reserve mechanism."""
blockchain_test(
genesis_environment=env,
pre=pre,
blocks=[Block(timestamp=15_000)],
post={},
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7825_transaction_gas_limit_cap/spec.py | tests/osaka/eip7825_transaction_gas_limit_cap/spec.py | """Defines EIP-7825 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
# EIP-7825 reference specification
ref_spec_7825 = ReferenceSpec("EIPS/eip-7825.md", "1ed95cbac750539c2aac67c8cbbcc2d77974231c")
@dataclass(frozen=True)
class Spec:
"""
Constants and helpers for the EIP-7825 Transaction Gas Limit Cap tests.
"""
# Gas limit constants
tx_gas_limit_cap = 2**24 # 16,777,216
# Blob transaction constants
blob_commitment_version_kzg = 1
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit.py | tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit.py | """
Transaction gas limit cap tests.
Tests for transaction gas limit cap in [EIP-7825: Transaction Gas Limit
Cap](https://eips.ethereum.org/EIPS/eip-7825).
"""
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
AccessList,
Account,
Address,
Alloc,
AuthorizationTuple,
Block,
BlockchainTestFiller,
Bytecode,
Environment,
Hash,
StateTestFiller,
Storage,
Transaction,
TransactionException,
add_kzg_version,
)
from ethereum_test_tools.utility.pytest import ParameterSet
from ethereum_test_vm import Opcodes as Op
from .spec import Spec, ref_spec_7825
# Update reference spec constants
REFERENCE_SPEC_GIT_PATH = ref_spec_7825.git_path
REFERENCE_SPEC_VERSION = ref_spec_7825.version
def tx_gas_limit_cap_tests(fork: Fork) -> List[ParameterSet]:
"""
Return a list of tests for transaction gas limit cap parametrized for each
different fork.
"""
fork_tx_gas_limit_cap = fork.transaction_gas_limit_cap()
if fork_tx_gas_limit_cap is None:
# Use a default value for forks that don't have a transaction gas limit
# cap
return [
pytest.param(Spec.tx_gas_limit_cap + 1, None, id="tx_gas_limit_cap_none"),
]
return [
pytest.param(
fork_tx_gas_limit_cap + 1,
TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM,
id="tx_gas_limit_cap_exceeds_maximum",
marks=pytest.mark.exception_test,
),
pytest.param(fork_tx_gas_limit_cap, None, id="tx_gas_limit_cap_over"),
]
@pytest.mark.parametrize_by_fork("tx_gas_limit,error", tx_gas_limit_cap_tests)
@pytest.mark.with_all_tx_types
@pytest.mark.valid_from("Prague")
def test_transaction_gas_limit_cap(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
tx_gas_limit: int,
error: TransactionException | None,
tx_type: int,
) -> None:
"""
Test the transaction gas limit cap behavior for all transaction types.
"""
env = Environment()
sender = pre.fund_eoa()
storage = Storage()
contract_address = pre.deploy_contract(
code=Op.SSTORE(storage.store_next(1), 1) + Op.STOP,
)
tx_kwargs = {
"ty": tx_type,
"to": contract_address,
"gas_limit": tx_gas_limit,
"data": b"",
"value": 0,
"sender": sender,
"error": error,
}
# Add extra required fields based on transaction type
if tx_type >= 1:
# Type 1: EIP-2930 Access List Transaction
tx_kwargs["access_list"] = [
{
"address": contract_address,
"storage_keys": [0],
}
]
if tx_type == 3:
# Type 3: EIP-4844 Blob Transaction
tx_kwargs["max_fee_per_blob_gas"] = fork.min_base_fee_per_blob_gas()
tx_kwargs["blob_versioned_hashes"] = add_kzg_version([0], Spec.blob_commitment_version_kzg)
elif tx_type == 4:
# Type 4: EIP-7702 Set Code Transaction
signer = pre.fund_eoa(amount=0)
tx_kwargs["authorization_list"] = [
AuthorizationTuple(
signer=signer,
address=Address(0),
nonce=0,
)
]
tx = Transaction(**tx_kwargs)
post = {contract_address: Account(storage=storage if error is None else {})}
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"opcode",
[
pytest.param(Op.CALL),
pytest.param(Op.DELEGATECALL),
pytest.param(Op.CALLCODE),
pytest.param(Op.STATICCALL),
],
)
@pytest.mark.valid_from("Osaka")
def test_tx_gas_limit_cap_subcall_context(
state_test: StateTestFiller, pre: Alloc, opcode: Op, fork: Fork, env: Environment
) -> None:
"""Test the transaction gas limit cap behavior for subcall context."""
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
caller_address = pre.deploy_contract(
code=Op.SSTORE(
0,
opcode(
gas=tx_gas_limit_cap + 1,
address=pre.deploy_contract(code=Op.MSTORE(0, Op.GAS) + Op.RETURN(0, 0x20)),
ret_offset=0,
ret_size=0,
),
)
)
# Passing tx limit cap as the gas parameter to *CALL operations
# All tests should pass and the *CALL operations should succeed
# Gas forwarded = min(remaining gas, specified gas parameter)
tx = Transaction(
to=caller_address,
sender=pre.fund_eoa(),
gas_limit=tx_gas_limit_cap,
)
post = {
caller_address: Account(storage={"0x00": 1}),
}
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"exceed_block_gas_limit",
[
pytest.param(True, marks=pytest.mark.exception_test),
pytest.param(False),
],
)
@pytest.mark.valid_from("Osaka")
def test_tx_gas_larger_than_block_gas_limit(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
env: Environment,
fork: Fork,
exceed_block_gas_limit: bool,
) -> None:
"""
Test multiple transactions with total gas larger than the block gas limit.
"""
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
tx_count = env.gas_limit // tx_gas_limit_cap
gas_spender_contract = pre.deploy_contract(code=Op.INVALID)
block = Block(
txs=[
Transaction(
to=gas_spender_contract,
sender=pre.fund_eoa(),
gas_limit=tx_gas_limit_cap,
error=TransactionException.GAS_ALLOWANCE_EXCEEDED if i >= tx_count else None,
)
for i in range(tx_count + int(exceed_block_gas_limit))
],
exception=TransactionException.GAS_ALLOWANCE_EXCEEDED if exceed_block_gas_limit else None,
)
blockchain_test(pre=pre, post={}, blocks=[block])
@pytest.mark.parametrize(
"exceed_gas_refund_limit",
[
pytest.param(True),
pytest.param(False),
],
)
@pytest.mark.valid_from("Osaka")
def test_maximum_gas_refund(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
exceed_gas_refund_limit: bool,
) -> None:
"""Test the maximum gas refund behavior according to EIP-3529."""
gas_costs = fork.gas_costs()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
max_refund_quotient = fork.max_refund_quotient()
storage = Storage()
# Base Operation: SSTORE(slot, 0)
iteration_cost = gas_costs.G_STORAGE_RESET + gas_costs.G_BASE + gas_costs.G_VERY_LOW
gas_refund = gas_costs.R_STORAGE_CLEAR
# EIP-3529: Reduction in refunds
storage_count = tx_gas_limit_cap // iteration_cost
gas_used = storage_count * iteration_cost
maximum_gas_refund = gas_used // max_refund_quotient
gas_refund_count = maximum_gas_refund // gas_refund
# Base case: operations that fit within the refund limit
iteration_count = min(storage_count, gas_refund_count + int(exceed_gas_refund_limit))
assert iteration_cost * iteration_count <= tx_gas_limit_cap, (
"Iteration cost exceeds tx gas limit cap"
)
opcode = sum(
(Op.SSTORE(storage.store_next(0), Op.PUSH0) for _ in range(iteration_count)),
Bytecode(),
)
assert len(opcode) <= fork.max_code_size(), "code size exceeds max code size"
contract = pre.deploy_contract(
code=opcode,
storage={Hash(i): Hash(1) for i in range(iteration_count)},
)
tx = Transaction(
to=contract,
sender=pre.fund_eoa(),
gas_limit=tx_gas_limit_cap,
)
post = {contract: Account(storage=storage)}
state_test(pre=pre, post=post, tx=tx)
@pytest.fixture
def total_cost_floor_per_token(fork: Fork) -> int:
"""Total cost floor per token."""
gas_costs = fork.gas_costs()
return gas_costs.G_TX_DATA_FLOOR_TOKEN_COST
@pytest.mark.xdist_group(name="bigmem")
@pytest.mark.parametrize(
"exceed_tx_gas_limit,correct_intrinsic_cost_in_transaction_gas_limit",
[
pytest.param(True, False, marks=pytest.mark.exception_test),
pytest.param(True, True, marks=pytest.mark.exception_test),
pytest.param(False, True),
],
)
@pytest.mark.parametrize("zero_byte", [True, False])
@pytest.mark.valid_from("Osaka")
def test_tx_gas_limit_cap_full_calldata(
state_test: StateTestFiller,
pre: Alloc,
zero_byte: bool,
total_cost_floor_per_token: int,
exceed_tx_gas_limit: bool,
correct_intrinsic_cost_in_transaction_gas_limit: bool,
fork: Fork,
) -> None:
"""Test the transaction gas limit cap behavior for full calldata."""
intrinsic_cost = fork.transaction_intrinsic_cost_calculator()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
gas_available = tx_gas_limit_cap - intrinsic_cost()
max_tokens_in_calldata = gas_available // total_cost_floor_per_token
num_of_bytes = max_tokens_in_calldata if zero_byte else max_tokens_in_calldata // 4
num_of_bytes += int(exceed_tx_gas_limit)
# Gas cost calculation based on EIP-7623:
# (https://eips.ethereum.org/EIPS/eip-7623)
#
# Simplified in this test case:
# - No execution gas used (no opcodes are executed)
# - Not a contract creation (no initcode)
#
# Token accounting:
# tokens_in_calldata = zero_bytes + 4 * non_zero_bytes
byte_data = b"\x00" if zero_byte else b"\xff"
correct_intrinsic_cost = intrinsic_cost(calldata=byte_data * num_of_bytes)
if exceed_tx_gas_limit:
assert correct_intrinsic_cost > tx_gas_limit_cap, (
"Correct intrinsic cost should exceed the tx gas limit cap"
)
else:
assert correct_intrinsic_cost <= tx_gas_limit_cap, (
"Correct intrinsic cost should be less than or equal to the tx gas limit cap"
)
tx_gas_limit = (
correct_intrinsic_cost
if correct_intrinsic_cost_in_transaction_gas_limit
else tx_gas_limit_cap
)
tx = Transaction(
to=pre.fund_eoa(),
data=byte_data * num_of_bytes,
gas_limit=tx_gas_limit,
sender=pre.fund_eoa(),
error=TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM
if correct_intrinsic_cost_in_transaction_gas_limit and exceed_tx_gas_limit
else TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST
if exceed_tx_gas_limit
else None,
)
state_test(
pre=pre,
post={},
tx=tx,
)
@pytest.mark.parametrize(
"exceed_tx_gas_limit",
[
pytest.param(True),
pytest.param(False),
],
)
@pytest.mark.valid_from("Osaka")
def test_tx_gas_limit_cap_contract_creation(
state_test: StateTestFiller,
pre: Alloc,
total_cost_floor_per_token: int,
exceed_tx_gas_limit: bool,
fork: Fork,
) -> None:
"""Test the transaction gas limit cap behavior for contract creation."""
intrinsic_cost = fork.transaction_intrinsic_cost_calculator()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
gas_available = tx_gas_limit_cap - intrinsic_cost(contract_creation=True)
max_tokens_in_calldata = gas_available // total_cost_floor_per_token
num_of_bytes = (max_tokens_in_calldata // 4) + int(exceed_tx_gas_limit)
# Cannot exceed max contract code size
num_of_bytes = min(num_of_bytes, fork.max_code_size())
code = Op.JUMPDEST * num_of_bytes
# Craft a contract creation transaction that exceeds the transaction gas
# limit cap
#
# Total cost =
# intrinsic cost (base tx cost + contract creation cost)
# + calldata cost + init code execution cost
#
# The contract body is filled with JUMPDEST instructions, so:
# total cost = intrinsic cost + calldata cost + (num_of_jumpdest * 1 gas)
#
# If the total cost exceeds the tx limit cap, the transaction should fail
total_cost = intrinsic_cost(contract_creation=True, calldata=code) + num_of_bytes
tx = Transaction(
to=None,
data=code,
gas_limit=tx_gas_limit_cap,
sender=pre.fund_eoa(),
error=TransactionException.INTRINSIC_GAS_BELOW_FLOOR_GAS_COST
if total_cost > tx_gas_limit_cap
else None,
)
state_test(
pre=pre,
post={},
tx=tx,
)
@pytest.mark.parametrize(
"exceed_tx_gas_limit,correct_intrinsic_cost_in_transaction_gas_limit",
[
pytest.param(True, False, marks=pytest.mark.exception_test),
pytest.param(True, True, marks=pytest.mark.exception_test),
pytest.param(False, True),
],
)
@pytest.mark.valid_from("Osaka")
def test_tx_gas_limit_cap_access_list_with_diff_keys(
state_test: StateTestFiller,
exceed_tx_gas_limit: bool,
correct_intrinsic_cost_in_transaction_gas_limit: bool,
pre: Alloc,
fork: Fork,
) -> None:
"""
Test the transaction gas limit cap behavior for access list with different
storage keys.
"""
intrinsic_cost = fork.transaction_intrinsic_cost_calculator()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
gas_available = tx_gas_limit_cap - intrinsic_cost()
gas_costs = fork.gas_costs()
gas_per_address = gas_costs.G_ACCESS_LIST_ADDRESS
gas_per_storage_key = gas_costs.G_ACCESS_LIST_STORAGE
gas_after_address = gas_available - gas_per_address
num_storage_keys = gas_after_address // gas_per_storage_key + int(exceed_tx_gas_limit)
access_address = Address("0x1234567890123456789012345678901234567890")
storage_keys = []
for i in range(num_storage_keys):
storage_keys.append(Hash(i))
access_list = [
AccessList(
address=access_address,
storage_keys=storage_keys,
)
]
correct_intrinsic_cost = intrinsic_cost(access_list=access_list)
if exceed_tx_gas_limit:
assert correct_intrinsic_cost > tx_gas_limit_cap, (
"Correct intrinsic cost should exceed the tx gas limit cap"
)
else:
assert correct_intrinsic_cost <= tx_gas_limit_cap, (
"Correct intrinsic cost should be less than or equal to the tx gas limit cap"
)
tx_gas_limit = (
correct_intrinsic_cost
if correct_intrinsic_cost_in_transaction_gas_limit
else tx_gas_limit_cap
)
tx = Transaction(
to=pre.fund_eoa(),
gas_limit=tx_gas_limit,
sender=pre.fund_eoa(),
access_list=access_list,
error=TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM
if correct_intrinsic_cost_in_transaction_gas_limit and exceed_tx_gas_limit
else TransactionException.INTRINSIC_GAS_TOO_LOW
if exceed_tx_gas_limit
else None,
)
state_test(
pre=pre,
post={},
tx=tx,
)
@pytest.mark.parametrize(
"exceed_tx_gas_limit,correct_intrinsic_cost_in_transaction_gas_limit",
[
pytest.param(True, False, marks=pytest.mark.exception_test),
pytest.param(True, True, marks=pytest.mark.exception_test),
pytest.param(False, True),
],
)
@pytest.mark.valid_from("Osaka")
def test_tx_gas_limit_cap_access_list_with_diff_addr(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
exceed_tx_gas_limit: bool,
correct_intrinsic_cost_in_transaction_gas_limit: bool,
) -> None:
"""
Test the transaction gas limit cap behavior for access list with different
addresses.
"""
intrinsic_cost = fork.transaction_intrinsic_cost_calculator()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
gas_available = tx_gas_limit_cap - intrinsic_cost()
gas_costs = fork.gas_costs()
gas_per_address = gas_costs.G_ACCESS_LIST_ADDRESS
gas_per_storage_key = gas_costs.G_ACCESS_LIST_STORAGE
account_num = gas_available // (gas_per_address + gas_per_storage_key) + int(
exceed_tx_gas_limit
)
access_list = [
AccessList(
address=pre.fund_eoa(),
storage_keys=[Hash(i)],
)
for i in range(account_num)
]
correct_intrinsic_cost = intrinsic_cost(access_list=access_list)
if exceed_tx_gas_limit:
assert correct_intrinsic_cost > tx_gas_limit_cap, (
"Correct intrinsic cost should exceed the tx gas limit cap"
)
else:
assert correct_intrinsic_cost <= tx_gas_limit_cap, (
"Correct intrinsic cost should be less than or equal to the tx gas limit cap"
)
tx_gas_limit = (
correct_intrinsic_cost
if correct_intrinsic_cost_in_transaction_gas_limit
else tx_gas_limit_cap
)
tx = Transaction(
to=pre.fund_eoa(),
gas_limit=tx_gas_limit,
sender=pre.fund_eoa(),
access_list=access_list,
error=TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM
if correct_intrinsic_cost_in_transaction_gas_limit and exceed_tx_gas_limit
else TransactionException.INTRINSIC_GAS_TOO_LOW
if exceed_tx_gas_limit
else None,
)
state_test(
pre=pre,
post={},
tx=tx,
)
@pytest.mark.parametrize(
"exceed_tx_gas_limit,correct_intrinsic_cost_in_transaction_gas_limit",
[
pytest.param(True, False, marks=pytest.mark.exception_test),
pytest.param(True, True, marks=pytest.mark.exception_test),
pytest.param(False, True),
],
)
@pytest.mark.valid_from("Osaka")
def test_tx_gas_limit_cap_authorized_tx(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
exceed_tx_gas_limit: bool,
correct_intrinsic_cost_in_transaction_gas_limit: bool,
) -> None:
"""Test a transaction limit cap with authorized tx."""
intrinsic_cost = fork.transaction_intrinsic_cost_calculator()
tx_gas_limit_cap = fork.transaction_gas_limit_cap()
assert tx_gas_limit_cap is not None, "Fork does not have a transaction gas limit cap"
gas_available = tx_gas_limit_cap - intrinsic_cost()
gas_costs = fork.gas_costs()
gas_per_address = gas_costs.G_ACCESS_LIST_ADDRESS
per_empty_account_cost = 25_000
auth_list_length = gas_available // (gas_per_address + per_empty_account_cost) + int(
exceed_tx_gas_limit
)
# EIP-7702 authorization transaction cost:
# 21000 + 16 * non-zero calldata bytes + 4 * zero calldata bytes + 1900 *
# access list storage key count + 2400 * access list address count +
# PER_EMPTY_ACCOUNT_COST * authorization list length
#
# There is no calldata and no storage keys in this test case and the access
# address list count is equal to the authorization list length
#
# total cost = 21000 + (2400 + 25_000) * auth_list_length
auth_address = pre.deploy_contract(code=Op.STOP)
auth_signers = [pre.fund_eoa() for _ in range(auth_list_length)]
access_list = [
AccessList(
address=addr,
storage_keys=[],
)
for addr in auth_signers
]
auth_tuples = [
AuthorizationTuple(
signer=signer,
address=auth_address,
nonce=signer.nonce,
)
for signer in auth_signers
]
correct_intrinsic_cost = intrinsic_cost(
access_list=access_list, authorization_list_or_count=auth_list_length
)
if exceed_tx_gas_limit:
assert correct_intrinsic_cost > tx_gas_limit_cap, (
"Correct intrinsic cost should exceed the tx gas limit cap"
)
else:
assert correct_intrinsic_cost <= tx_gas_limit_cap, (
"Correct intrinsic cost should be less than or equal to the tx gas limit cap"
)
tx_gas_limit = (
correct_intrinsic_cost
if correct_intrinsic_cost_in_transaction_gas_limit
else tx_gas_limit_cap
)
tx = Transaction(
to=pre.fund_eoa(),
gas_limit=tx_gas_limit,
sender=pre.fund_eoa(),
access_list=access_list,
authorization_list=auth_tuples,
error=TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM
if correct_intrinsic_cost_in_transaction_gas_limit and exceed_tx_gas_limit
else TransactionException.INTRINSIC_GAS_TOO_LOW
if exceed_tx_gas_limit
else None,
)
state_test(
pre=pre,
post={},
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7825_transaction_gas_limit_cap/__init__.py | tests/osaka/eip7825_transaction_gas_limit_cap/__init__.py | """
Tests [EIP-7825: Transaction Gas Limit Cap](https://eips.ethereum.org/EIPS/eip-7825).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit_transition_fork.py | tests/osaka/eip7825_transaction_gas_limit_cap/test_tx_gas_limit_transition_fork.py | """
Transaction gas limit cap fork transition tests.
Tests for fork transition behavior in [EIP-7825: Transaction Gas Limit
Cap](https://eips.ethereum.org/EIPS/eip-7825).
"""
import pytest
from ethereum_test_checklists import EIPChecklist
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Transaction,
TransactionException,
)
from ethereum_test_vm import Opcodes as Op
from .spec import ref_spec_7825
REFERENCE_SPEC_GIT_PATH = ref_spec_7825.git_path
REFERENCE_SPEC_VERSION = ref_spec_7825.version
@EIPChecklist.ModifiedTransactionValidityConstraint.Test.ForkTransition.AcceptedBeforeFork()
@EIPChecklist.ModifiedTransactionValidityConstraint.Test.ForkTransition.RejectedBeforeFork()
@EIPChecklist.ModifiedTransactionValidityConstraint.Test.ForkTransition.AcceptedAfterFork()
@EIPChecklist.ModifiedTransactionValidityConstraint.Test.ForkTransition.RejectedAfterFork()
@pytest.mark.valid_at_transition_to("Osaka", subsequent_forks=True)
@pytest.mark.parametrize(
"transaction_at_cap",
[
pytest.param(True, id="at_cap"),
pytest.param(False, marks=pytest.mark.exception_test, id="above_cap"),
],
)
def test_transaction_gas_limit_cap_at_transition(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
transaction_at_cap: bool,
) -> None:
"""
Test transaction gas limit cap behavior at the Osaka transition.
Before timestamp 15000: No gas limit cap (transactions with gas > 2^24 are
valid) At/after timestamp 15000: Gas limit cap of 2^24 is enforced
"""
contract_address = pre.deploy_contract(
code=Op.SSTORE(Op.TIMESTAMP, Op.ADD(Op.SLOAD(Op.TIMESTAMP), 1)) + Op.STOP,
)
# Get the gas limit cap at fork activation
tx_gas_cap = fork.transaction_gas_limit_cap(timestamp=15_000)
assert tx_gas_cap is not None, "Gas limit cap should not be None after fork activation"
# Test boundary: cap + 1 should fail after fork activation
above_cap = tx_gas_cap + 1
# Before fork activation: both cap and above_cap transactions should
# succeed
at_cap_tx_before_fork = Transaction(
ty=0, # Legacy transaction
to=contract_address,
gas_limit=tx_gas_cap,
sender=pre.fund_eoa(),
)
above_cap_tx_before_fork = Transaction(
ty=0, # Legacy transaction
to=contract_address,
gas_limit=above_cap,
sender=pre.fund_eoa(),
)
post_cap_tx_error = TransactionException.GAS_LIMIT_EXCEEDS_MAXIMUM
# After fork activation: test at cap vs above cap
transition_tx = Transaction(
ty=0, # Legacy transaction
to=contract_address,
gas_limit=tx_gas_cap if transaction_at_cap else above_cap,
sender=pre.fund_eoa(),
error=None if transaction_at_cap else post_cap_tx_error,
)
blocks = []
# Before transition (timestamp < 15000): both cap and above_cap
# transactions should succeed
blocks.append(
Block(
timestamp=14_999,
txs=[above_cap_tx_before_fork, at_cap_tx_before_fork],
)
)
# At transition (timestamp = 15000):
# - transaction at cap should succeed
# - transaction above cap (cap + 1) should fail
blocks.append(
Block(
timestamp=15_000,
txs=[transition_tx],
exception=post_cap_tx_error if not transaction_at_cap else None,
)
)
# Post state: storage should be updated by successful transactions
post = {
contract_address: Account(
storage={
# Set by both transactions in first block (before transition):
14_999: 2,
# After transition:
# - Set by transaction at cap (should succeed)
# - Not set by transaction above cap (should fail)
15_000: 1 if transaction_at_cap else 0,
}
)
}
blockchain_test(pre=pre, blocks=blocks, post=post)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7939_count_leading_zeros/spec.py | tests/osaka/eip7939_count_leading_zeros/spec.py | """Defines EIP-7939 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7939 = ReferenceSpec("EIPS/eip-7939.md", "c8321494fdfbfda52ad46c3515a7ca5dc86b857c")
@dataclass(frozen=True)
class Spec:
"""Constants and helpers for the CLZ opcode."""
CLZ = 0x1E
CLZ_GAS_COST = 5
@classmethod
def calculate_clz(cls, value: int) -> int:
"""Calculate the count of leading zeros for a 256-bit value."""
if value == 0:
return 256
return 256 - value.bit_length()
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7939_count_leading_zeros/test_count_leading_zeros.py | tests/osaka/eip7939_count_leading_zeros/test_count_leading_zeros.py | """
Tests [EIP-7939: Count leading zeros (CLZ)](https://eips.ethereum.org/EIPS/eip-7939).
"""
import pytest
from ethereum_test_base_types import Storage
from ethereum_test_checklists import EIPChecklist
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
AuthorizationTuple,
Block,
BlockchainTestFiller,
Bytecode,
CodeGasMeasure,
Environment,
StateTestFiller,
Transaction,
compute_create_address,
)
from ethereum_test_vm import Opcodes as Op
from ...prague.eip7702_set_code_tx.spec import Spec as Spec7702
from .spec import Spec, ref_spec_7939
REFERENCE_SPEC_GIT_PATH = ref_spec_7939.git_path
REFERENCE_SPEC_VERSION = ref_spec_7939.version
def clz_parameters() -> list:
"""Generate all test case parameters."""
test_cases = []
# Format 0x000...000: all zeros
test_cases.append(("zero", 0, 256))
# Format 0xb000...111: leading zeros followed by ones
for bits in range(257):
value = (2**256 - 1) >> bits
expected_clz = bits
assert expected_clz == Spec.calculate_clz(value), (
f"CLZ calculation mismatch for leading_zeros_{bits}: "
f"manual={expected_clz}, spec={Spec.calculate_clz(value)}, value={hex(value)}"
)
test_cases.append((f"leading_zeros_{bits}", value, expected_clz))
# Format 0xb010...000: single bit set (1 << N for N = 1…256)
for bits in range(1, 257):
if bits == 256:
# Special case: 1 << 256 = 0 in 256-bit arithmetic (overflow)
value = 0
expected_clz = 256
else:
value = 1 << bits
expected_clz = 255 - bits
assert expected_clz == Spec.calculate_clz(value), (
f"CLZ calculation mismatch for single_bit_{bits}: "
f"manual={expected_clz}, spec={Spec.calculate_clz(value)}, value={hex(value)}"
)
test_cases.append((f"single_bit_{bits}", value, expected_clz))
# Arbitrary edge cases
arbitrary_values = [
0x123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0,
0x00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF00FF,
0x0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F0F,
0xDEADBEEFCAFEBABE0123456789ABCDEF,
0x0123456789ABCDEF,
(1 << 128) + 1,
(1 << 200) + (1 << 100),
2**255 - 1,
]
for i, value in enumerate(arbitrary_values):
expected_clz = Spec.calculate_clz(value)
test_cases.append((f"arbitrary_{i}", value, expected_clz))
return test_cases
@pytest.mark.valid_from("Osaka")
@pytest.mark.parametrize(
"test_id,value,expected_clz",
clz_parameters(),
ids=[f"{test_data[0]}-expected_clz_{test_data[2]}" for test_data in clz_parameters()],
)
def test_clz_opcode_scenarios(
state_test: StateTestFiller,
pre: Alloc,
test_id: str,
value: int,
expected_clz: int,
) -> None:
"""
Test CLZ opcode functionality.
Cases:
- Format 0xb000...111: leading zeros followed by ones
(2**256 - 1 >> bits)
- Format 0xb010...000: single bit set at position (1 << bits)
Test coverage:
- Leading zeros pattern: 0b000...111 (0 to 256 leading zeros)
- Single bit pattern: 0b010...000 (bit at each possible position)
- Edge cases: CLZ(0) = 256, CLZ(2^256-1) = 0
"""
sender = pre.fund_eoa()
contract_address = pre.deploy_contract(
code=Op.SSTORE(0, Op.CLZ(value)),
storage={"0x00": "0xdeadbeef"},
)
tx = Transaction(
to=contract_address,
sender=sender,
gas_limit=200_000,
)
post = {
contract_address: Account(storage={"0x00": expected_clz}),
}
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.valid_from("Osaka")
def test_clz_gas_cost(state_test: StateTestFiller, pre: Alloc, fork: Fork) -> None:
"""Test CLZ opcode gas cost."""
contract_address = pre.deploy_contract(
Op.SSTORE(
0,
CodeGasMeasure(
code=Op.CLZ(Op.PUSH1(1)),
extra_stack_items=1,
overhead_cost=fork.gas_costs().G_VERY_LOW,
),
),
storage={"0x00": "0xdeadbeef"},
)
sender = pre.fund_eoa()
tx = Transaction(to=contract_address, sender=sender, gas_limit=200_000)
post = {
contract_address: Account( # Cost measured is CLZ + PUSH1
storage={"0x00": fork.gas_costs().G_LOW}
),
}
state_test(pre=pre, post=post, tx=tx)
@EIPChecklist.Opcode.Test.GasUsage.Normal()
@EIPChecklist.Opcode.Test.GasUsage.OutOfGasExecution()
@EIPChecklist.Opcode.Test.GasUsage.ExtraGas()
@pytest.mark.valid_from("Osaka")
@pytest.mark.parametrize("bits", [0, 64, 128, 255])
@pytest.mark.parametrize("gas_cost_delta", [-2, -1, 0, 1, 2])
def test_clz_gas_cost_boundary(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
bits: int,
gas_cost_delta: int,
) -> None:
"""Test CLZ opcode gas cost boundary."""
code = Op.PUSH32(1 << bits) + Op.CLZ
contract_address = pre.deploy_contract(code=code)
call_code = Op.SSTORE(
0,
Op.CALL(
gas=fork.gas_costs().G_VERY_LOW + Spec.CLZ_GAS_COST + gas_cost_delta,
address=contract_address,
),
)
call_address = pre.deploy_contract(
code=call_code,
storage={"0x00": "0xdeadbeef"},
)
tx = Transaction(to=call_address, sender=pre.fund_eoa(), gas_limit=200_000)
post = {call_address: Account(storage={"0x00": 0 if gas_cost_delta < 0 else 1})}
state_test(pre=pre, post=post, tx=tx)
@EIPChecklist.Opcode.Test.StackUnderflow()
@EIPChecklist.Opcode.Test.StackComplexOperations.StackHeights.Zero()
@pytest.mark.valid_from("Osaka")
def test_clz_stack_underflow(state_test: StateTestFiller, pre: Alloc) -> None:
"""
Test CLZ opcode with empty stack (should revert due to stack underflow).
"""
sender = pre.fund_eoa()
callee_address = pre.deploy_contract(
code=Op.CLZ + Op.STOP, # No stack items, should underflow
)
caller_address = pre.deploy_contract(
code=Op.SSTORE(0, Op.CALL(gas=0xFFFF, address=callee_address)),
storage={"0x00": "0xdeadbeef"},
)
tx = Transaction(
to=caller_address,
sender=sender,
gas_limit=200_000,
)
post = {
caller_address: Account(
storage={"0x00": 0} # Call failed due to stack underflow
),
}
state_test(pre=pre, post=post, tx=tx)
@EIPChecklist.Opcode.Test.StackComplexOperations.StackHeights.Odd()
@EIPChecklist.Opcode.Test.StackComplexOperations.StackHeights.Even()
@pytest.mark.valid_from("Osaka")
def test_clz_stack_not_overflow(state_test: StateTestFiller, pre: Alloc, fork: Fork) -> None:
"""Test CLZ opcode never causes stack overflow."""
max_stack_items = fork.max_stack_height()
code = Bytecode()
post = {}
code += Op.PUSH0 * (max_stack_items - 2)
for i in range(256):
code += Op.PUSH1(i) + Op.CLZ(1 << i) + Op.SWAP1 + Op.SSTORE
code_address = pre.deploy_contract(code=code)
post[code_address] = Account(storage={i: 255 - i for i in range(256)})
tx = Transaction(
to=code_address,
sender=pre.fund_eoa(),
gas_limit=6_000_000,
)
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.valid_from("Osaka")
def test_clz_push_operation_same_value(state_test: StateTestFiller, pre: Alloc) -> None:
"""Test CLZ opcode returns the same value via different push operations."""
storage = {}
code = Op.SSTORE(0, Op.CLZ(Op.PUSH0))
storage[0x00] = 256
for bit in range(1, 33): # PUSH value
for push_n in range(bit, 33): # PUSHn opcode
op = getattr(Op, f"PUSH{push_n}")
key = 100 * bit + push_n
code += Op.SSTORE(key, Op.CLZ(op[1 << bit]))
storage[key] = 255 - bit
code_address = pre.deploy_contract(code=code)
tx = Transaction(
to=code_address,
sender=pre.fund_eoa(),
gas_limit=12_000_000,
)
post = {
code_address: Account(
storage=storage,
)
}
state_test(pre=pre, post=post, tx=tx)
@EIPChecklist.Opcode.Test.ForkTransition.Invalid()
@EIPChecklist.Opcode.Test.ForkTransition.At()
@pytest.mark.valid_at_transition_to("Osaka", subsequent_forks=True)
def test_clz_fork_transition(blockchain_test: BlockchainTestFiller, pre: Alloc) -> None:
"""Test CLZ opcode behavior at fork transition."""
sender = pre.fund_eoa()
callee_address = pre.deploy_contract(
code=Op.SSTORE(Op.TIMESTAMP, Op.CLZ(1 << 100)) + Op.STOP,
storage={14_999: "0xdeadbeef"},
)
caller_address = pre.deploy_contract(
code=Op.SSTORE(Op.TIMESTAMP, Op.CALL(gas=0xFFFF, address=callee_address)),
storage={14_999: "0xdeadbeef"},
)
blocks = [
Block(
timestamp=14_999,
txs=[
Transaction(
to=caller_address,
sender=sender,
nonce=0,
gas_limit=200_000,
)
],
),
Block(
timestamp=15_000,
txs=[
Transaction(
to=caller_address,
sender=sender,
nonce=1,
gas_limit=200_000,
)
],
),
Block(
timestamp=15_001,
txs=[
Transaction(
to=caller_address,
sender=sender,
nonce=2,
gas_limit=200_000,
)
],
),
]
blockchain_test(
pre=pre,
blocks=blocks,
post={
caller_address: Account(
storage={
14_999: 0, # Call fails as opcode not valid before Osaka
15_000: 1, # Call succeeds on fork transition block
15_001: 1, # Call continues to succeed after transition
}
),
callee_address: Account(
storage={
# CLZ not valid before fork, storage unchanged
14_999: "0xdeadbeef",
# CLZ valid on transition block, CLZ(1 << 100) = 155
15_000: 155,
# CLZ continues to be valid after transition
15_001: 155,
}
),
},
)
@pytest.mark.valid_from("Osaka")
@pytest.mark.parametrize("opcode", [Op.JUMPI, Op.JUMP])
@pytest.mark.parametrize("valid_jump", [True, False])
@pytest.mark.parametrize("jumpi_condition", [True, False])
@pytest.mark.parametrize("bits", [0, 16, 64, 128, 255])
def test_clz_jump_operation(
state_test: StateTestFiller,
pre: Alloc,
opcode: Op,
valid_jump: bool,
jumpi_condition: bool,
bits: int,
) -> None:
"""Test CLZ opcode with valid and invalid jump."""
if opcode == Op.JUMP and not jumpi_condition:
pytest.skip("Duplicate case for JUMP.")
code = Op.PUSH32(1 << bits)
if opcode == Op.JUMPI:
code += Op.PUSH1(jumpi_condition)
code += Op.PUSH1(len(code) + 3) + opcode
if valid_jump:
code += Op.JUMPDEST
code += Op.CLZ + Op.PUSH0 + Op.SSTORE + Op.RETURN(0, 0)
callee_address = pre.deploy_contract(code=code)
caller_address = pre.deploy_contract(
code=Op.SSTORE(0, Op.CALL(gas=0xFFFF, address=callee_address)),
storage={"0x00": "0xdeadbeef"},
)
tx = Transaction(
to=caller_address,
sender=pre.fund_eoa(),
gas_limit=200_000,
)
expected_clz = 255 - bits
post = {
caller_address: Account(storage={"0x00": 1 if valid_jump or not jumpi_condition else 0}),
}
if valid_jump or not jumpi_condition:
post[callee_address] = Account(storage={"0x00": expected_clz})
state_test(pre=pre, post=post, tx=tx)
auth_account_start_balance = 0
@EIPChecklist.Opcode.Test.ExecutionContext.SetCode()
@pytest.mark.valid_from("Osaka")
def test_clz_from_set_code(
state_test: StateTestFiller,
pre: Alloc,
) -> None:
"""Test the address opcode in a set-code transaction."""
storage = Storage()
auth_signer = pre.fund_eoa(auth_account_start_balance)
set_code = Bytecode()
for bits in [0, 1, 128, 255]:
expected_clz = 255 - bits
set_code += Op.SSTORE(storage.store_next(expected_clz), Op.CLZ(1 << bits))
set_code += Op.STOP
set_code_to_address = pre.deploy_contract(set_code)
tx = Transaction(
gas_limit=200_000,
to=auth_signer,
value=0,
authorization_list=[
AuthorizationTuple(
address=set_code_to_address,
nonce=0,
signer=auth_signer,
),
],
sender=pre.fund_eoa(),
)
state_test(
env=Environment(),
pre=pre,
tx=tx,
post={
set_code_to_address: Account(storage={}),
auth_signer: Account(
nonce=1,
code=Spec7702.delegation_designation(set_code_to_address),
storage=storage,
),
},
)
@pytest.mark.valid_from("Osaka")
@pytest.mark.parametrize("bits", [0, 64, 255])
@pytest.mark.parametrize("opcode", [Op.CODECOPY, Op.EXTCODECOPY])
def test_clz_code_copy_operation(
state_test: StateTestFiller, pre: Alloc, bits: int, opcode: Op
) -> None:
"""Test CLZ opcode with code copy operation."""
storage = Storage()
expected_value = 255 - bits
clz_code_offset = len(Op.CLZ(1 << bits)) - 1 # Offset to CLZ opcode
mload_value = Spec.CLZ << 248 # CLZ opcode in MSB position (0x1E000...000)
target_address = pre.deploy_contract(code=Op.CLZ(1 << bits))
clz_contract_address = pre.deploy_contract(
code=(
Op.CLZ(1 << bits) # Calculate CLZ of the value
+ Op.SSTORE(storage.store_next(expected_value), Op.CLZ(1 << bits)) # Store CLZ result
+ ( # Load CLZ byte from code with CODECOPY or EXTCODECOPY
Op.CODECOPY(dest_offset=0, offset=clz_code_offset, size=1)
if opcode == Op.CODECOPY
else Op.EXTCODECOPY(
address=target_address, dest_offset=0, offset=clz_code_offset, size=1
)
)
# Store loaded CLZ byte
+ Op.SSTORE(storage.store_next(mload_value), Op.MLOAD(0))
),
storage={"0x00": "0xdeadbeef"},
)
post = {
clz_contract_address: Account(
storage={
"0x00": expected_value,
"0x01": mload_value,
}
)
}
tx = Transaction(
to=clz_contract_address,
sender=pre.fund_eoa(),
gas_limit=200_000,
)
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.valid_from("Osaka")
@pytest.mark.parametrize("bits", [0, 64, 255])
@pytest.mark.parametrize("opcode", [Op.CODECOPY, Op.EXTCODECOPY])
def test_clz_with_memory_operation(
state_test: StateTestFiller, pre: Alloc, bits: int, opcode: Op
) -> None:
"""Test CLZ opcode with memory operation."""
storage = Storage()
expected_value = 255 - bits
# Target code pattern:
# PUSH32 (1 << bits)
# PUSH0
# MSTORE
#
# This sequence stores a 32-byte value in memory.
# Later, we copy the immediate value from the PUSH32 instruction into
# memory using CODECOPY or EXTCODECOPY, and then load it with MLOAD for
# the CLZ test.
target_code = Op.PUSH32(1 << bits)
offset = 1
target_address = pre.deploy_contract(code=target_code)
clz_contract_address = pre.deploy_contract(
code=(
target_code
+ Op.SSTORE(storage.store_next(expected_value), Op.CLZ(1 << bits)) # Store CLZ result
+ (
Op.CODECOPY(dest_offset=0, offset=offset, size=0x20)
if opcode == Op.CODECOPY
else Op.EXTCODECOPY(
address=target_address, dest_offset=0, offset=offset, size=0x20
)
)
+ Op.SSTORE(storage.store_next(expected_value), Op.CLZ(Op.MLOAD(0)))
),
storage={"0x00": "0xdeadbeef"},
)
post = {
clz_contract_address: Account(storage={"0x00": expected_value, "0x01": expected_value}),
}
tx = Transaction(
to=clz_contract_address,
sender=pre.fund_eoa(),
gas_limit=200_000,
)
state_test(pre=pre, post=post, tx=tx)
@EIPChecklist.Opcode.Test.ExecutionContext.Initcode.Behavior.Tx()
@pytest.mark.valid_from("Osaka")
def test_clz_initcode_context(state_test: StateTestFiller, pre: Alloc) -> None:
"""Test CLZ opcode behavior when creating a contract."""
bits = [0, 1, 64, 128, 255]
storage = Storage()
init_code = Bytecode()
for bit in bits:
init_code += Op.SSTORE(storage.store_next(255 - bit), Op.CLZ(1 << bit))
sender_address = pre.fund_eoa()
contract_address = compute_create_address(address=sender_address, nonce=0)
tx = Transaction(
to=None,
gas_limit=6_000_000,
data=init_code,
sender=sender_address,
)
post = {
contract_address: Account(storage=storage),
}
state_test(pre=pre, post=post, tx=tx)
@EIPChecklist.Opcode.Test.ExecutionContext.Initcode.Behavior.Opcode()
@pytest.mark.valid_from("Osaka")
@pytest.mark.parametrize("opcode", [Op.CREATE, Op.CREATE2])
def test_clz_initcode_create(state_test: StateTestFiller, pre: Alloc, opcode: Op) -> None:
"""Test CLZ opcode behavior when creating a contract."""
bits = [0, 1, 64, 128, 255] # expected values: [255, 254, 191, 127, 0]
storage = Storage()
ext_code = Bytecode()
for bit in bits:
ext_code += Op.SSTORE(storage.store_next(255 - bit), Op.CLZ(1 << bit))
sender_address = pre.fund_eoa()
create_contract = (
Op.CALLDATACOPY(offset=0, size=len(ext_code))
+ opcode(offset=0, size=len(ext_code))
+ Op.STOP
)
factory_contract_address = pre.deploy_contract(code=create_contract)
created_contract_address = compute_create_address(
address=factory_contract_address, nonce=1, initcode=ext_code, opcode=opcode
)
tx = Transaction(
to=factory_contract_address,
gas_limit=200_000,
data=ext_code,
sender=sender_address,
)
post = {
created_contract_address: Account(
storage=storage,
),
}
state_test(pre=pre, post=post, tx=tx)
class CallingContext:
"""Context for calling operations."""
callee_context = 1 # CALL
caller_context = 2 # DELEGATECALL
no_context = 3 # STATICCALL
@EIPChecklist.Opcode.Test.ExecutionContext.Call()
@EIPChecklist.Opcode.Test.ExecutionContext.Delegatecall()
@EIPChecklist.Opcode.Test.ExecutionContext.Callcode()
@EIPChecklist.Opcode.Test.ExecutionContext.Staticcall()
@pytest.mark.valid_from("Osaka")
@pytest.mark.parametrize(
"opcode,context",
[
pytest.param(Op.CALL, CallingContext.callee_context, id="call"),
pytest.param(Op.DELEGATECALL, CallingContext.caller_context, id="delegatecall"),
pytest.param(Op.CALLCODE, CallingContext.caller_context, id="callcode"),
pytest.param(Op.STATICCALL, CallingContext.no_context, id="staticcall"),
],
)
def test_clz_call_operation(
state_test: StateTestFiller, pre: Alloc, opcode: Op, context: CallingContext
) -> None:
"""Test CLZ opcode with call operation."""
test_cases = [0, 64, 255]
# Storage Layout
callee_storage = Storage()
caller_storage = Storage()
callee_code = Bytecode()
for bits in reversed(test_cases):
callee_code += Op.CLZ(1 << bits)
if context != CallingContext.no_context:
for bits in test_cases:
callee_code += Op.SSTORE(callee_storage.store_next(255 - bits), Op.CLZ(1 << bits))
for i in range(len(test_cases)):
callee_code += Op.PUSH32(i * 0x20) + Op.MSTORE
callee_code += Op.RETURN(0, len(test_cases) * 0x20)
callee_address = pre.deploy_contract(code=callee_code)
caller_code = opcode(
gas=0xFFFF, address=callee_address, ret_offset=0, ret_size=len(test_cases) * 0x20
)
for i, bits in enumerate(test_cases):
caller_code += Op.SSTORE(caller_storage.store_next(255 - bits), Op.MLOAD(i * 0x20))
caller_address = pre.deploy_contract(code=caller_code)
tx = Transaction(
to=caller_address,
sender=pre.fund_eoa(),
gas_limit=200_000,
)
post = {}
if context == CallingContext.caller_context:
post[caller_address] = Account(storage=callee_storage)
elif context == CallingContext.callee_context:
post[callee_address] = Account(storage=callee_storage)
post[caller_address] = Account(storage=caller_storage)
elif context == CallingContext.no_context:
post[caller_address] = Account(storage=caller_storage)
state_test(pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7939_count_leading_zeros/__init__.py | tests/osaka/eip7939_count_leading_zeros/__init__.py | """
Tests [EIP-7939: Count leading zeros (CLZ) opcode](https://eips.ethereum.org/EIPS/eip-7939).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7951_p256verify_precompiles/spec.py | tests/osaka/eip7951_p256verify_precompiles/spec.py | """Defines EIP-7951 specification constants and functions."""
from dataclasses import dataclass
from typing import Sized, SupportsBytes
from ethereum_test_tools import Address, Bytes
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7951 = ReferenceSpec("EIPS/eip-7951.md", "06aadd458ee04ede80498db55927b052eb5bef38")
class BytesConcatenation(SupportsBytes, Sized):
"""A class that can be concatenated with bytes."""
def __len__(self) -> int:
"""Return length of the object when converted to bytes."""
return len(bytes(self))
def __add__(self, other: bytes | SupportsBytes) -> bytes:
"""Concatenates the object with another bytes object."""
return bytes(self) + bytes(other)
def __radd__(self, other: bytes | SupportsBytes) -> bytes:
"""Concatenates the object with another bytes object."""
return bytes(other) + bytes(self)
@dataclass(frozen=True)
class FieldElement(BytesConcatenation):
"""Dataclass that defines a single field element."""
value: int = 0
def __bytes__(self) -> bytes:
"""Convert field element to bytes."""
return self.value.to_bytes(32, byteorder="big")
# Specific field element classes
@dataclass(frozen=True)
class R(FieldElement):
"""Dataclass that defines a R component of the signature."""
pass
@dataclass(frozen=True)
class S(FieldElement):
"""Dataclass that defines a S component of the signature."""
pass
@dataclass(frozen=True)
class X(FieldElement):
"""Dataclass that defines a X coordinate value."""
pass
@dataclass(frozen=True)
class Y(FieldElement):
"""Dataclass that defines a Y coordinate value."""
pass
@dataclass(frozen=True)
class H(FieldElement):
"""Dataclass that defines a Message Hash value."""
pass
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-7951 specifications as defined at
https://eips.ethereum.org/EIPS/eip-7951.
"""
# Address
P256VERIFY = 0x100
# Gas constants
P256VERIFY_GAS = 6900
# Curve Parameters
P = 0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF ## Base field modulus
A = 0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC ## Curve Coefficient
B = 0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B ## Curve Coefficient
N = 0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551 ## Subgroup Order
Gx = 0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296 ## Generator Point X
Gy = 0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5 ## Generator Point Y
# Other constants
SUCCESS_RETURN_VALUE = b"\x01".rjust(32, b"\x00")
INVALID_RETURN_VALUE = b""
DELEGATION_DESIGNATION = Bytes("ef0100")
# Test constants, from:
# https://github.com/C2SP/wycheproof/blob/4a6c2bf5dc4c0b67c770233ad33961ee653996a0/testvectors/ecdsa_secp256r1_sha256_test.json#L35
H0 = H(0xBB5A52F42F9C9261ED4361F59422A1E30036E7C32B270C8807A419FECA605023)
R0 = R(0x2BA3A8BE6B94D5EC80A6D9D1190A436EFFE50D85A1EEE859B8CC6AF9BD5C2E18)
S0 = S(0x4CD60B855D442F5B3C7B11EB6C4E0AE7525FE710FAB9AA7C77A67F79E6FADD76)
X0 = X(0x2927B10512BAE3EDDCFE467828128BAD2903269919F7086069C8C4DF6C732838)
Y0 = Y(0xC7787964EAAC00E5921FB1498A60F4606766B3D9685001558D1A974E7341513E)
# Test constants from:
# https://github.com/C2SP/wycheproof/blob/4a6c2bf5dc4c0b67c770233ad33961ee653996a0/testvectors/ecdsa_webcrypto_test.json#L1064
# k*G has a large x-coordinate which also gives very small r.
H1 = H(0x532EAABD9574880DBF76B9B8CC00832C20A6EC113D682299550D7A6E0F345E25)
R1 = R(0x000000000000000000000000000000004319055358E8617B0C46353D039CDAAB)
S1 = S(0xFFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC63254E)
X1 = X(0xD705D16F80987E2D9B1A6957D29CE22FEBF7D10FA515153182415C8361BAACA4)
Y1 = Y(0xB1FC105EE5CE80D514EC1238BEAE2037A6F83625593620D460819E8682160926)
@staticmethod
def delegation_designation(address: Address) -> Bytes:
"""Return delegation designation for the given address."""
return Bytes(Spec.DELEGATION_DESIGNATION + bytes(address))
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7951_p256verify_precompiles/test_p256verify.py | tests/osaka/eip7951_p256verify_precompiles/test_p256verify.py | """
Tests for [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951).
"""
import pytest
from ethereum_test_checklists import EIPChecklist
from ethereum_test_tools import (
Address,
Alloc,
Environment,
StateTestFiller,
Storage,
Transaction,
compute_create_address,
)
from ethereum_test_tools import Opcodes as Op
from .helpers import vectors_from_file
from .spec import H, R, S, Spec, X, Y, ref_spec_7951
REFERENCE_SPEC_GIT_PATH = ref_spec_7951.git_path
REFERENCE_SPEC_VERSION = ref_spec_7951.version
pytestmark = [
pytest.mark.valid_from("Osaka"),
]
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
vectors_from_file("secp256r1_signature_specific.json")
+ vectors_from_file("secp256r1_shamir_multiplication.json")
+ vectors_from_file("secp256r1_special_case_hash.json")
+ vectors_from_file("secp256r1_u1_u2.json")
+ vectors_from_file("secp256r1_k_and_s.json")
+ vectors_from_file("secp256r1_public_key.json"),
# Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256 test
# suite, valid cases are from this source:
# https://github.com/C2SP/wycheproof/blob/main/testvectors/ecdsa_secp256r1_sha256_test.json
)
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.CallContexts.Normal()
@EIPChecklist.Precompile.Test.Inputs.Valid()
@EIPChecklist.Precompile.Test.Inputs.MaxValues()
def test_wycheproof_valid(
state_test: StateTestFiller, pre: Alloc, post: dict, tx: Transaction
) -> None:
"""Test P256Verify precompile with Wycheproof test suite (valid cases)."""
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
vectors_from_file("secp256r1_special_case_r_s.json")
+ vectors_from_file("secp256r1_modified_r_s.json"),
# Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256
# test suite, invalid cases
# Source: https://github.com/C2SP/wycheproof/blob/main/
# testvectors/ecdsa_secp256r1_sha256_test.json
)
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.CallContexts.Normal()
@EIPChecklist.Precompile.Test.Inputs.Invalid()
@EIPChecklist.Precompile.Test.Inputs.MaxValues()
def test_wycheproof_invalid(
state_test: StateTestFiller, pre: Alloc, post: dict, tx: Transaction
) -> None:
"""
Test P256Verify precompile with Wycheproof test suite
(invalid cases).
"""
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"input_data,expected_output,vector_gas_value",
vectors_from_file("secp256r1_small_large_r_s.json")
+ vectors_from_file("secp256r1_special_points.json"),
# Test vectors generated from Wycheproof's ECDSA secp256r1 SHA-256
# test suite, valid/invalid cases
# Source: https://github.com/C2SP/wycheproof/blob/main/
# testvectors/ecdsa_secp256r1_sha256_test.json
)
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.CallContexts.Normal()
@EIPChecklist.Precompile.Test.Inputs.MaxValues()
def test_wycheproof_extra(
state_test: StateTestFiller, pre: Alloc, post: dict, tx: Transaction
) -> None:
"""
Test P256Verify precompile with Wycheproof test suite
(mixed valid/invalid cases).
"""
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"input_data",
[
pytest.param(
H(0) + R(Spec.Gx) + S(Spec.Gx) + X(Spec.Gx) + Y(Spec.Gy),
id="hash_0",
),
pytest.param(
H(Spec.N - 1) + R(Spec.Gx) + S(Spec.Gx - 1) + X(Spec.Gx) + Y(Spec.Gy),
id="hash_N_minus_1",
),
pytest.param(
H(Spec.N) + R(Spec.Gx) + S(Spec.Gx) + X(Spec.Gx) + Y(Spec.Gy),
id="hash_N",
),
pytest.param(
H(Spec.P - 1)
+ R(Spec.Gx)
+ S(Spec.Gx + Spec.P - 1 - Spec.N)
+ X(Spec.Gx)
+ Y(Spec.Gy),
id="hash_P_minus_1",
),
pytest.param(
H(Spec.P) + R(Spec.Gx) + S(Spec.Gx + Spec.P - Spec.N) + X(Spec.Gx) + Y(Spec.Gy),
id="hash_P",
),
pytest.param(
H(2**256 - 1)
+ R(Spec.Gx)
+ S(Spec.Gx + 2**256 - 1 - Spec.N)
+ X(Spec.Gx)
+ Y(Spec.Gy),
id="hash_max",
),
pytest.param(
H(Spec.N + 1 - Spec.Gx) + R(Spec.Gx) + S(1) + X(Spec.Gx) + Y(Spec.Gy),
id="s_1",
),
pytest.param(
H(Spec.N - 1 - Spec.Gx) + R(Spec.Gx) + S(Spec.N - 1) + X(Spec.Gx) + Y(Spec.Gy),
id="s_N_minus_1",
),
pytest.param(
H(((2**256 - 1) % Spec.N) - Spec.Gx + Spec.N)
+ R(Spec.Gx)
+ S((2**256 - 1) % Spec.N)
+ X(Spec.Gx)
+ Y(Spec.Gy),
id="s_max_mod_N",
),
pytest.param(
H(0xC3D3BE9EB3577F217AE0AB360529A30B18ADC751AEC886328593D7D6FE042809)
+ R(0x3A4E97B44CBF88B90E6205A45BA957E520F63F3C6072B53C244653278A1819D8)
+ S(0x6A184AA037688A5EBD25081FD2C0B10BB64FA558B671BD81955CA86E09D9D722)
+ X(0)
+ Y(0x66485C780E2F83D72433BD5D84A06BB6541C2AF31DAE871728BF856A174F93F4),
id="x_0_y_positive",
),
pytest.param(
H(0xF98A88895CB0866C5BAD58CF03000DDF9D21CB9407892FF54D637E6A046AFBB3)
+ R(0x81DC074973D3222F3930981AD98D022517C91063FFB83CFD620E29B86DC30A8F)
+ S(0x365E4CD085617A265765062A2D9954ED86309DFA33CF5AE1464FE119419FC34A)
+ X(0)
+ Y(0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B),
id="x_0_y_negative",
),
pytest.param(
H(0x5F95DCD6E41662D1E0AEFCCDB7877877C1FD88C9E67FC3CDA0D1D520FA8A3AC2)
+ R(0xAF5DFDDB7EDC789D7C9C42A44AFBBF13C8F1D77D576B6EE5F11FEA4F33E2CB39)
+ S(0xA28F8C5625AD622950F2FCE9672784B287EF9E032ADE8C23BA218413A1CF6522)
+ X(5)
+ Y(0x459243B9AA581806FE913BCE99817ADE11CA503C64D9A3C533415C083248FBCC),
id="x_5_y_positive",
),
pytest.param(
H(0x31CE0B00FA8DD61EF28C7DC5F839C78CF70D60E625E0670BF9C9FCE25E89D99F)
+ R(0x0FA19CBE154513BA348F2DB951AFB6E135BAC5BD8891282781A032103C3F1289)
+ S(0xD9ABF5C4E61098A6E653F265770BDBA36ECC8073CEF99548D89FE2C39A7AFA9B)
+ X(5)
+ Y(0xBA6DBC4555A7E7FA016EC431667E8521EE35AFC49B265C3ACCBEA3F7CDB70433),
id="x_5_y_negative",
),
pytest.param(
H(0x65B0E03E7A27E6F9F4989C72486FCAF0A3ECF3EF60D14F1C11FB5BF071A8FD1B)
+ R(0x0B0CC9E314E4180FE18D205010DD1C4410632D472CC4E7AB56CBC04091ABE006)
+ S(0x8D12C4F19AC41D7877705453A247AB96394E7C093F57EC073A9D150CDE6B68C6)
+ X(0x09E78D4EF60D05F750F6636209092BC43CBDD6B47E11A9DE20A9FEB2A50BB96C)
+ Y(1),
id="y_1",
),
pytest.param(
H(0x744084AD41EE67ED1802A6868ACE7815FD6FC0585A3479FF68E69ADB8DD2B420)
+ R(0xB481C7650CBE85BCD15565811966DA2DA4E4E2931F0892D911520B6A06C340D8)
+ S(0xE4C2D9FB9A4E3E29B7414F0408B2EBC4421D5BC8ADDCCF864AFF9E7E10DA31BB)
+ X(0x09E78D4EF60D05F750F6636209092BC43CBDD6B47E11A9DE20A9FEB2A50BB96C)
+ Y(Spec.P - 1),
id="y_P_minus_1",
),
# Test case for u1==u2 and Q==G.
# This test case is important because u1*G + u2*Q is point doubling.
pytest.param(
H(0x7CF27B188D034F7E8A52380304B51AC3C08969E277F21B35A60B48FC47669978)
+ R(0x7CF27B188D034F7E8A52380304B51AC3C08969E277F21B35A60B48FC47669978)
+ S(0x830D84E672FCB08275ADC7FCFB4AE53BFC5D90CB2F25834F4DAE81C6B4FC8BD9)
+ X(Spec.Gx)
+ Y(Spec.Gy),
id="u1_eq_u2_and_Q_eq_G",
),
# Test case for u1==u2 and Q!=G.
pytest.param(
H(0x65FB4407BCB2A33AE2E486366BAA79B3A8A17A83DDE0FED6F09014A8AC6F78A1)
+ R(0x65FB4407BCB2A33AE2E486366BAA79B3A8A17A83DDE0FED6F09014A8AC6F78A1)
+ S(0x65FB4407BCB2A33AE2E486366BAA79B3A8A17A83DDE0FED6F09014A8AC6F78A1)
+ Spec.X0
+ Spec.Y0,
id="u1_eq_u2_and_Q_ne_G",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.SUCCESS_RETURN_VALUE], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
def test_valid(state_test: StateTestFiller, pre: Alloc, post: dict, tx: Transaction) -> None:
"""Positive tests for the P256VERIFY precompile."""
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"input_data",
[
pytest.param(b"", id="zero_length_input"),
pytest.param(
b"\x00" + Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
id="input_too_long",
),
pytest.param(
(Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0)[:-1],
id="input_too_short",
),
pytest.param(
H(0) + R(0) + S(0) + X(0) + Y(0),
id="input_all_zeros",
),
pytest.param(
H(0) + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
id="hash_0",
),
pytest.param(
H(Spec.N - 1) + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
id="hash_N_minus_1",
),
pytest.param(
H(Spec.N) + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
id="hash_N",
),
pytest.param(
H(Spec.P - 1) + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
id="hash_P_minus_1",
),
pytest.param(
H(Spec.P) + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
id="hash_P",
),
pytest.param(
H(2**256 - 1) + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
id="hash_max",
),
pytest.param(
Spec.H0 + R(0) + Spec.S0 + Spec.X0 + Spec.Y0,
id="r_eq_to_zero",
),
pytest.param(
Spec.H0 + R(Spec.N) + Spec.S0 + Spec.X0 + Spec.Y0,
id="r_eq_to_n",
),
pytest.param(
Spec.H1 + R(Spec.R1.value + Spec.N) + Spec.S1 + Spec.X1 + Spec.Y1,
id="r_above_n",
),
pytest.param(
Spec.H0 + R(2**256 - 1) + Spec.S0 + Spec.X0 + Spec.Y0,
id="r_max",
),
pytest.param(
Spec.H0 + Spec.R0 + S(0) + Spec.X0 + Spec.Y0,
id="s_eq_to_zero",
),
pytest.param(
Spec.H0 + Spec.R0 + S(Spec.N) + Spec.X0 + Spec.Y0,
id="s_eq_to_n",
),
# If checks for r, s, and point-at-infinity are missing, the s=0 zeros
# both u1 and u2, so the computed R is the point at infinity,
# and the signature may be considered valid in such implementation.
pytest.param(
Spec.H0 + R(0) + S(0) + X(Spec.Gx) + Y(Spec.Gy),
id="r_0_s_0",
),
pytest.param(
Spec.H0 + R(0) + S(Spec.N) + X(Spec.Gx) + Y(Spec.Gy),
id="r_0_s_N",
),
pytest.param(
Spec.H0 + R(Spec.N) + S(0) + X(Spec.Gx) + Y(Spec.Gy),
id="r_N_s_0",
),
pytest.param(
Spec.H0 + R(Spec.N) + S(Spec.N) + X(Spec.Gx) + Y(Spec.Gy),
id="r_N_s_N",
),
# If checks for r and point-at-infinity are missing, the h=0 and r=0
# zero both u1 and u2, so the computed R is the point at infinity,
# and the signature may be considered valid in such implementation.
pytest.param(
H(0) + R(0) + Spec.S0 + X(Spec.Gx) + Y(Spec.Gy),
id="hash_0_r_0",
),
pytest.param(
H(0) + R(Spec.N) + Spec.S0 + X(Spec.Gx) + Y(Spec.Gy),
id="hash_0_r_N",
),
pytest.param(
H(Spec.N) + R(0) + Spec.S0 + X(Spec.Gx) + Y(Spec.Gy),
id="hash_N_r_0",
),
pytest.param(
H(Spec.N) + R(Spec.N) + Spec.S0 + X(Spec.Gx) + Y(Spec.Gy),
id="hash_N_r_N",
),
pytest.param(
Spec.H0 + R(Spec.Gx) + S((2**256 - 1) % Spec.N) + X(Spec.Gx) + Y(Spec.Gy),
id="s_max_mod_N",
),
pytest.param(
H(Spec.N + 1 - Spec.Gx) + R(Spec.Gx) + S(Spec.N + 1) + X(Spec.Gx) + Y(Spec.Gy),
id="s_N_plus_1",
),
pytest.param(
H(((2**256 - 1) % Spec.N) - Spec.Gx + Spec.N)
+ R(Spec.Gx)
+ S(2**256 - 1)
+ X(Spec.Gx)
+ Y(Spec.Gy),
id="s_max",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + X(Spec.P) + Spec.Y0,
id="x_eq_to_p",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Y(Spec.P),
id="y_eq_to_p",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + X(0) + Y(0),
id="point_at_infinity",
),
# Test case with Q at infinity. If the implementation misses the check
# that Q is not the point at infinity, the signature should verify.
pytest.param(
Spec.H0
+ R(0x2DD5CBB0E37BAEC8D1460909B206CA2C87E50CA43B8F31E46168027A7F0AEEC6)
+ Spec.S0
+ X(0)
+ Y(0),
id="point_at_infinity_v2",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + X(Spec.X0.value + 1) + Spec.Y0,
id="point_not_on_curve_x",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Y(Spec.Y0.value + 1),
id="point_not_on_curve_y",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.Y0 + Spec.X0,
id="x_and_y_reversed",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Y(Spec.P + 1),
id="y_greater_than_p",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + X(Spec.P + 1) + Spec.Y0,
id="x_greater_than_p",
),
pytest.param(
H(0xC3D3BE9EB3577F217AE0AB360529A30B18ADC751AEC886328593D7D6FE042809)
+ R(0x3A4E97B44CBF88B90E6205A45BA957E520F63F3C6072B53C244653278A1819D8)
+ S(0x6A184AA037688A5EBD25081FD2C0B10BB64FA558B671BD81955CA86E09D9D722)
+ X(Spec.P) # Valid for X(0)
+ Y(0x66485C780E2F83D72433BD5D84A06BB6541C2AF31DAE871728BF856A174F93F4),
id="x_P_y_positive",
),
pytest.param(
H(0xF98A88895CB0866C5BAD58CF03000DDF9D21CB9407892FF54D637E6A046AFBB3)
+ R(0x81DC074973D3222F3930981AD98D022517C91063FFB83CFD620E29B86DC30A8F)
+ S(0x365E4CD085617A265765062A2D9954ED86309DFA33CF5AE1464FE119419FC34A)
+ X(Spec.P) # Valid for X(0)
+ Y(0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B),
id="x_P_y_negative",
),
pytest.param(
H(0x5F95DCD6E41662D1E0AEFCCDB7877877C1FD88C9E67FC3CDA0D1D520FA8A3AC2)
+ R(0xAF5DFDDB7EDC789D7C9C42A44AFBBF13C8F1D77D576B6EE5F11FEA4F33E2CB39)
+ S(0xA28F8C5625AD622950F2FCE9672784B287EF9E032ADE8C23BA218413A1CF6522)
+ X(Spec.P + 5) # Valid for X(5)
+ Y(0x459243B9AA581806FE913BCE99817ADE11CA503C64D9A3C533415C083248FBCC),
id="x_P_plus_5_y_positive",
),
pytest.param(
H(0x31CE0B00FA8DD61EF28C7DC5F839C78CF70D60E625E0670BF9C9FCE25E89D99F)
+ R(0x0FA19CBE154513BA348F2DB951AFB6E135BAC5BD8891282781A032103C3F1289)
+ S(0xD9ABF5C4E61098A6E653F265770BDBA36ECC8073CEF99548D89FE2C39A7AFA9B)
+ X(Spec.P + 5) # Valid for X(5)
+ Y(0xBA6DBC4555A7E7FA016EC431667E8521EE35AFC49B265C3ACCBEA3F7CDB70433),
id="x_P_plus_5_y_negative",
),
pytest.param(
H(0x65B0E03E7A27E6F9F4989C72486FCAF0A3ECF3EF60D14F1C11FB5BF071A8FD1B)
+ R(0x0B0CC9E314E4180FE18D205010DD1C4410632D472CC4E7AB56CBC04091ABE006)
+ S(0x8D12C4F19AC41D7877705453A247AB96394E7C093F57EC073A9D150CDE6B68C6)
+ X(0x09E78D4EF60D05F750F6636209092BC43CBDD6B47E11A9DE20A9FEB2A50BB96C)
+ Y(Spec.P + 1), # Valid for Y(1)
id="y_P_plus_1",
),
# Test case produces the point R at infinity: (R0/S0)*G + (R0/S0)*(-G).
pytest.param(
H(Spec.R0.value) + Spec.R0 + Spec.S0 + X(Spec.Gx) + Y(Spec.P - Spec.Gy),
id="R_at_infinity_v1",
),
# Test case produces the point R at infinity: (1/1)*G + (1/1)*(-G).
pytest.param(
H(1) + R(1) + S(1) + X(Spec.Gx) + Y(Spec.P - Spec.Gy),
id="R_at_infinity_v2",
),
pytest.param(
Spec.H0
+ R(0x813EF79CCEFA9A56F7BA805F0E478584FE5F0DD5F567BC09B5123CCBC9832365)
+ S(0x900E75AD233FCC908509DBFF5922647DB37C21F4AFD3203AE8DC4AE7794B0F87)
+ X(0xB838FF44E5BC177BF21189D0766082FC9D843226887FC9760371100B7EE20A6F)
+ Y(0xF0C9D75BFBA7B31A6BCA1974496EEB56DE357071955D83C4B1BADAA0B21832E9),
id="valid_secp256k1_inputs",
),
pytest.param(
H(0x235060CAFE19A407880C272BC3E73600E3A12294F56143ED61929C2FF4525ABB)
+ R(0x182E5CBDF96ACCB859E8EEA1850DE5FF6E430A19D1D9A680ECD5946BBEA8A32B)
+ S(0x76DDFAE6797FA6777CAAB9FA10E75F52E70A4E6CEB117B3C5B2F445D850BD64C)
+ X(0x3828736CDFC4C8696008F71999260329AD8B12287846FEDCEDE3BA1205B12729)
+ Y(0x3E5141734E971A8D55015068D9B3666760F4608A49B11F92E500ACEA647978C7),
id="wrong_endianness",
),
pytest.param(
H(Spec.P - 1)
+ R(Spec.N - 2)
+ S((Spec.N - 1) // 2)
+ X(Spec.P - 3)
+ Y(0x19719BEBF6AEA13F25C96DFD7C71F5225D4C8FC09EB5A0AB9F39E9178E55C121),
id="near_field_boundary_p_minus_3",
),
pytest.param(
# Invalid curve attack: This point satisfies y² = x³ - 3x + 1 (mod
# p) instead of the correct P-256 equation y² = x³ - 3x + b where
# b = 0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53...
# ...B0F63BCE3C3E27D2604B
#
# This tests that the implementation properly validates the curve
# equation and rejects points on different curves (CVE-2020-0601
# class vulnerability)
Spec.H0
+ Spec.R0
+ Spec.S0
+ X(0x4)
+ Y(0x872A856D521EED42D28A60CCC2EAE42E1572F33BE2BF616DC9A762D51C459E2A),
id="invalid_curve_attack_b_equals_one",
),
pytest.param(
# Invalid curve attack: Singular curve with b = 0
# Point satisfies y² = x³ - 3x (mod p) - a singular/degenerate
# curve
# Singular curves have discriminant = 0 and provide no security
# guarantees.
# This tests rejection of points on curves with catastrophic
# security failures
Spec.H0
+ Spec.R0
+ Spec.S0
+ X(0x2)
+ Y(0x507442007322AA895340CBA4ABC2D730BFD0B16C2C79A46815F8780D2C55A2DD),
id="invalid_curve_attack_singular_b_zero",
),
pytest.param(
# Invalid curve attack: Boundary value b = p-1
# Point satisfies y² = x³ - 3x + (p-1) (mod p)
#
# Tests proper parameter validation at
# modular arithmetic boundaries.
# Ensures implementations handle field arithmetic edge cases
# correctly.
Spec.H0
+ Spec.R0
+ Spec.S0
+ X(0x1)
+ Y(0x6522AED9EA48F2623B8EEAE3E213B99DA32E74C9421835804D374CE28FCCA662),
id="invalid_curve_attack_b_equals_p_minus_1",
),
pytest.param(
# Invalid curve attack: Small discriminant curve with b = 2
# Point satisfies y² = x³ - 3x + 2 (mod p)
#
# Curves with small discriminants are vulnerable to specialized
# attacks.
#
# Tests rejection of cryptographically weak curve parameters.
Spec.H0 + Spec.R0 + Spec.S0 + X(0x1) + Y(0x0),
id="invalid_curve_attack_small_discriminant",
),
pytest.param(
# Invalid curve attack: Composite order curve with b = 7
# Point satisfies y² = x³ - 3x + 7 (mod p)
# Curve order has small factors enabling Pohlig-Hellman attacks
# Tests protection against small subgroup confinement attacks
Spec.H0
+ Spec.R0
+ Spec.S0
+ X(0x1)
+ Y(0x85EC5A4AF40176B63189069AEFFCB229C96D3E046E0283ED2F9DAC21B15AD3C),
id="invalid_curve_attack_composite_order",
),
pytest.param(
# Invalid curve attack: Composite order curve with b = -Spec.B
# Random point which satisfies y² = x³ - 3x - Spec.B (mod p)
# Without the curve check in the implementation,
# the signature checks out.
H(0xC223E1538C4D7B5BBD3EF932736826FD64F4E8B5C80250D9E07A728689D13C38)
+ R(0x0C7CB59EF6BE7539397CC979AD9A87A3B73A0DD268BBA4990A3378C6391512D5)
+ S(0xF8C943685BCFE7864C0F8485CACD732D3A9F167531CAF26B67A3CB10B641F92C)
+ X(0xF1F2ADE681DB5699741B1F9FF080E9A08DCFF48F48A5048C4D90EC89440C3EFB)
+ Y(0xBFFE372E7BBDBD60E4DF885E17A37878461AE13B6491E7863020305962F2C6B6),
id="invalid_curve_attack_bneg_1",
),
pytest.param(
# Invalid curve attack: Composite order curve with b = -Spec.B
# Random point which satisfies y² = x³ - 3x - Spec.B (mod p)
# Without the curve check in the implementation,
# the signature checks out.
H(0x982D25BF8E0E81FF41AC3C8033604C78ED5EF17C6EDDA977072EAB6821A7AD0A)
+ R(0x7C1996FA0EC911E4739AE7340B5345823272F494DFA32034A4FE5642C3DB91F2)
+ S(0x1E4D6CCF1AFB675D18BD27274770C8B84028D272D1D2641E70B30E1DF17AF3DC)
+ X(0xC9124B6AB12F08790A2712AEC74A1B71FA997CA7DE1E9117BC18D07DCBFE7C91)
+ Y(0xADD1E9DF40A47ADD6B2191C05D0C1B4AF1BAEEAA0C0A97E7B3D06FFAE543D096),
id="invalid_curve_attack_bneg_2",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0)
+ R(0xD21697149F598FEAE9A750DCA86AE6D5EFA654680BA748D2DF7053115101C129)
+ S(0xEF3FD943AD1F126B3EBA1A5900D79886755DB6DAFCB6B0117D86364340CE36CC)
+ X(0x687216395BD2F58E5A6D91964AE67E766DF2A2FB8E623795A5852507927C70C2)
+ Y(0xF40E19B93BEB5C0678EDE25AB3654E08C0C6EF6A143CEC9865F3A447C6EB84E3),
id="invalid_curve_attack_h0_random1",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0)
+ R(0x52E47C5D6AAB66AB6A18A694359EB86FDD40F10E79EF5493C5469EC88BA03334)
+ S(0x7584C5BF3CA2869C7E383B1603A935EEB79D990B7F7152E055EC562E87FD715E)
+ X(0x0000000000000002000000000000000000000000000000000000000000000000)
+ Y(0x000000000000000000000000000000000000000000000000FE00000000000000),
id="invalid_curve_attack_h0_random2",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0)
+ R(0x81333B13B13F362253BD536D17563A72EB575F1993F55ED40E633E503F60B864)
+ S(0xE2208C4045F5241ECCF08F825399224C4B78595A10433EC33799DCAD7B0E1F4A)
+ X(0xCE9C1088B4BCC71223A187410BB05819A6D32D2F1A1024B83557E51833AB23DC)
+ Y(0x00FB64209538D1143A88E8B91D2DA46095AF852D7DD494BE6AF26C29D545F856),
id="invalid_curve_attack_h0_random3",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0)
+ R(0x3C593B5857D1D0EB83923D73E76A7A53EF191BB210267D8C0BE17A4E34AB2E73)
+ S(0xD022359310067882F713AFBECECE71CB80E4857368F46AB0346362DB033ED298)
+ X(0x358DF65C0D732CCAB431D4CAB7F98E9F9279BD71D64635FAB21EA87EF254C5D1)
+ Y(0x82909FF2E230433D000000000000000000000000000000000000000000000000),
id="invalid_curve_attack_h0_random4",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0)
+ R(0x425CFFCA652791CABFC81B1E4B7712DBA196599FABCE16978E06E6AF486B1FEC)
+ S(0x58B864B5A41CD17524E4773EC353C9590D792F601DA075AD9B3F40E8E7070E8A)
+ X(0x00000000000000000000000000000000000000000000000000007FFFFFFFFFFF)
+ Y(0xFFFF000000000000000000000000000000000000000000000000000000000000),
id="invalid_curve_attack_h0_random5",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0x2DA0A74BE3122AEAEF5704D0EB27881FBFB918B4A5252B660935263D0569BA92)
+ R(0x5543729CBCFD99EE6C3B422D7F245903E7177B3A6A4E3C20C0DC5F5E109795AE)
+ S(0x96403D5BB253EBD7DEF44BCBC062FCD4EA5E358B19B67C13E625EFDF6B977597)
+ X(0x996CADC001622FB5E363B421A08854096569397B3BDCB8C3DEC907392F7CC59B)
+ Y(0xD34A4E0F08C6FC549F7FAFFBCAF610D7F6C467B7B27072720E81079FB6595B52),
id="invalid_curve_attack_random6",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0x1F9D9B26DB42380C85F075174DDAF158F9DE4CD10C3104190D7AF96938DD8ECD)
+ R(0x159946DBC4F1DE68CD4096862A5B10E5986ACB32229D6E68884DC83DAB70A307)
+ S(0x63D80724A4074421F7DD255630794E3AEBE635B756D72B24652AAC07D01B289C)
+ X(0x9CA2F39CC3536861000000000000000000000000000000000000000000000000)
+ Y(0x000000000000B100000000000000000000000000000000000000000000000000),
id="invalid_curve_attack_random7",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0xD380DA9251F1FB809ED48C70DC8F81E91C471F0E81BC95E7611C653278A5B6B4)
+ R(0xFF197EB72A9E531B17B872525247E6564B786CC014ED28B6849CE7D8C976BDF2)
+ S(0x7B0B2EFF9BB5409052B35FD3FF81DCE77D95A1F75C46989817045120DA5C3C9C)
+ X(0xBA7695481956A6269DD646ADDD4AFE6D9763637D76AD780299E51201384A8403)
+ Y(0xA62443DD4AFE6D9763637D76AD780299E51201384AE4FEDD3CDAC9F461600D53),
id="invalid_curve_attack_random8",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0x4B082B60497ED87FFE570612D521E73A2CD6C832744EF8E4E2E329E30D3D5879)
+ R(0x6665A88CB3FF30D339A1975FD46CF5EF480A68A093AB778550073D3528C3B609)
+ S(0xAEAADDB235E4AC6097356DB96161E27849EA8EDF1E971F74EB51E19A1CC950A1)
+ X(0x0000000000000002000000000000000000000000000000000000000000000000)
+ Y(0x000000000000000000000000000000000000000000000000FE00000000000000),
id="invalid_curve_attack_random9",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0x6CC2B605CFBDB22B9E7B55EFE8C1DA0F1C5A0EC1AA8D82EEDFB5EA70E9846E88)
+ R(0x3C593B5857D1D0EB83923D73E76A7A53EF191BB210267D8C0BE17A4E34AB2E73)
+ S(0xD022359310067882F713AFBECECE71CB80E4857368F46AB0346362DB033ED298)
+ X(0x358DF65C0D732CCAB431D4CAB7F98E9F9279BD71D64635FAB21EA87EF254C5D1)
+ Y(0x82909FF2E230433D000000000000000000000000000000000000000000000000),
id="invalid_curve_attack_random10",
),
pytest.param(
# Invalid curve attack: random point bytes.
# Without the curve check in the implementation,
# the signature checks out.
H(0x810C1D53EA96A700C93F6AF1C183197B040EA6FEAE10564877A1C78EC6074FF1)
+ R(0x34D0F0C8E14D39002B5DEA00808957963E849503DDFD626323433047D696C7C4)
+ S(0x6A7FE39C046304317F799FB900877073F2AE3C798DD4414795551A833ABCBA85)
+ X(0x0000000000F90000000067923073C067015B601D94F262F0E82B9DA2D33A6A32)
+ Y(0xFC3D71CB490CF346ED31DC37405FB0069F4A7ED188381DC049ABAB66E9F80080),
id="invalid_curve_attack_random_11",
),
],
)
@pytest.mark.parametrize("expected_output", [Spec.INVALID_RETURN_VALUE], ids=[""])
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.Inputs.AllZeros()
@EIPChecklist.Precompile.Test.Inputs.Invalid()
@EIPChecklist.Precompile.Test.Inputs.Invalid.Crypto()
@EIPChecklist.Precompile.Test.Inputs.Invalid.Corrupted()
@EIPChecklist.Precompile.Test.InputLengths.Zero()
@EIPChecklist.Precompile.Test.InputLengths.Static.Correct()
@EIPChecklist.Precompile.Test.InputLengths.Static.TooShort()
@EIPChecklist.Precompile.Test.InputLengths.Static.TooLong()
@EIPChecklist.Precompile.Test.OutOfBounds.Max()
@EIPChecklist.Precompile.Test.OutOfBounds.MaxPlusOne()
def test_invalid(state_test: StateTestFiller, pre: Alloc, post: dict, tx: Transaction) -> None:
"""Negative tests for the P256VERIFY precompile."""
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"input_data,expected_output,precompile_gas_modifier,call_succeeds",
[
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
Spec.SUCCESS_RETURN_VALUE,
1,
True,
id="extra_gas",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
Spec.INVALID_RETURN_VALUE,
-1,
False,
id="insufficient_gas",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
Spec.INVALID_RETURN_VALUE,
-6900,
False,
id="zero_gas",
),
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
Spec.INVALID_RETURN_VALUE,
-3450,
False,
id="3450_gas",
),
],
)
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.GasUsage.Constant.Exact()
@EIPChecklist.Precompile.Test.GasUsage.Constant.Oog()
def test_gas(state_test: StateTestFiller, pre: Alloc, post: dict, tx: Transaction) -> None:
"""Test P256Verify precompile gas requirements."""
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"call_opcode",
[
Op.STATICCALL,
Op.DELEGATECALL,
Op.CALLCODE,
],
)
@pytest.mark.parametrize(
"input_data,expected_output",
[
pytest.param(
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
Spec.SUCCESS_RETURN_VALUE,
id="valid_call",
),
],
)
@pytest.mark.parametrize("precompile_address", [Spec.P256VERIFY], ids=[""])
@EIPChecklist.Precompile.Test.CallContexts.Delegate()
@EIPChecklist.Precompile.Test.CallContexts.Static()
@EIPChecklist.Precompile.Test.CallContexts.Callcode()
def test_call_types(
state_test: StateTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""Test P256Verify precompile using different call types."""
state_test(env=Environment(), pre=pre, post=post, tx=tx)
@pytest.mark.parametrize(
"input_data,call_contract_address,post",
[
pytest.param(
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | true |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7951_p256verify_precompiles/conftest.py | tests/osaka/eip7951_p256verify_precompiles/conftest.py | """Shared pytest definitions local to EIP-7951 tests."""
from typing import SupportsBytes
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import EOA, Address, Alloc, Bytecode, Storage, Transaction, keccak256
from ethereum_test_tools import Opcodes as Op
from .spec import Spec
@pytest.fixture
def vector_gas_value() -> int | None:
"""
Gas value from the test vector if any.
If `None` it means that the test scenario did not come from a file, so no
comparison is needed.
The `vectors_from_file` function reads the gas value from the file and
overwrites this fixture.
"""
return None
@pytest.fixture
def precompile_gas(vector_gas_value: int | None) -> int:
"""Gas cost for the precompile."""
if vector_gas_value is not None:
assert vector_gas_value == Spec.P256VERIFY_GAS, (
f"Calculated gas {vector_gas_value} != Vector gas {Spec.P256VERIFY_GAS}"
)
return Spec.P256VERIFY_GAS
@pytest.fixture
def precompile_gas_modifier() -> int:
"""
Modify the gas passed to the precompile, for testing purposes.
By default the call is made with the exact gas amount required for the
given opcode, but when this fixture is overridden, the gas amount can be
modified to, e.g., test a lower amount and test if the precompile call
fails.
"""
return 0
@pytest.fixture
def call_opcode() -> Op:
"""
Type of call used to call the precompile.
By default it is Op.CALL, but it can be overridden in the test.
"""
return Op.CALL
@pytest.fixture
def call_contract_post_storage() -> Storage:
"""
Storage of the test contract after the transaction is executed.
Note:
Fixture `call_contract_code` fills the actual expected storage values.
"""
return Storage()
@pytest.fixture
def call_succeeds() -> bool:
"""
By default, depending on the expected output, we can deduce if the call is
expected to succeed or fail.
"""
return True
@pytest.fixture
def call_contract_code(
precompile_address: int,
precompile_gas: int,
precompile_gas_modifier: int,
expected_output: bytes | SupportsBytes,
call_succeeds: bool,
call_opcode: Op,
call_contract_post_storage: Storage,
) -> Bytecode:
"""Code of the test contract."""
expected_output = bytes(expected_output)
assert call_opcode in [Op.CALL, Op.CALLCODE, Op.DELEGATECALL, Op.STATICCALL]
value = [0] if call_opcode in [Op.CALL, Op.CALLCODE] else []
code = Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE()) + Op.SSTORE(
call_contract_post_storage.store_next(call_succeeds),
call_opcode(
precompile_gas + precompile_gas_modifier,
precompile_address,
*value,
0,
Op.CALLDATASIZE(),
0,
0,
)
+ Op.SSTORE(
call_contract_post_storage.store_next(len(expected_output)), Op.RETURNDATASIZE()
),
)
if call_succeeds:
# Add integrity check only if the call is expected to succeed.
code += Op.RETURNDATACOPY(0, 0, Op.RETURNDATASIZE()) + Op.SSTORE(
call_contract_post_storage.store_next(keccak256(expected_output)),
Op.SHA3(0, Op.RETURNDATASIZE()),
)
return code
@pytest.fixture
def call_contract_address(pre: Alloc, call_contract_code: Bytecode) -> Address:
"""Address where the test contract will be deployed."""
return pre.deploy_contract(call_contract_code)
@pytest.fixture
def post(call_contract_address: Address, call_contract_post_storage: Storage) -> dict:
"""Test expected post outcome."""
return {
call_contract_address: {
"storage": call_contract_post_storage,
},
}
@pytest.fixture
def tx_gas_limit(fork: Fork, input_data: bytes, precompile_gas: int) -> int:
"""
Transaction gas limit used for the test (Can be overridden in the test).
"""
intrinsic_gas_cost_calculator = fork.transaction_intrinsic_cost_calculator()
memory_expansion_gas_calculator = fork.memory_expansion_gas_calculator()
extra_gas = 100_000
return (
extra_gas
+ intrinsic_gas_cost_calculator(calldata=input_data)
+ memory_expansion_gas_calculator(new_bytes=len(input_data))
+ precompile_gas
)
@pytest.fixture
def tx(
input_data: bytes,
tx_gas_limit: int,
call_contract_address: Address,
sender: EOA,
) -> Transaction:
"""Transaction for the test."""
return Transaction(
gas_limit=tx_gas_limit, data=input_data, to=call_contract_address, sender=sender
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7951_p256verify_precompiles/helpers.py | tests/osaka/eip7951_p256verify_precompiles/helpers.py | """Helper functions for the EIP-7951 P256VERIFY precompiles tests."""
import os
from typing import Annotated, Any, List
import pytest
from pydantic import BaseModel, BeforeValidator, ConfigDict, RootModel, TypeAdapter
from pydantic.alias_generators import to_pascal
def current_python_script_directory(*args: str) -> str:
"""
Get the current Python script directory, optionally appending additional
path components.
"""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), *args)
class Vector(BaseModel):
"""Test vector for the secp256r1 precompile."""
input: Annotated[bytes, BeforeValidator(bytes.fromhex)]
expected: Annotated[bytes, BeforeValidator(bytes.fromhex)]
gas: int
name: str
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self) -> Any:
"""
Convert the test vector to a tuple that can be used as a parameter in a
pytest test.
"""
return pytest.param(self.input, self.expected, self.gas, id=self.name)
class FailVector(BaseModel):
"""Test vector for the BLS12-381 precompiles."""
input: Annotated[bytes, BeforeValidator(bytes.fromhex)]
expected_error: str
name: str
model_config = ConfigDict(alias_generator=to_pascal)
def to_pytest_param(self) -> Any:
"""
Convert the test vector to a tuple that can be used as a parameter in a
pytest test.
"""
return pytest.param(self.input, id=self.name)
class VectorList(RootModel):
"""List of test vectors for the secp256r1 precompile."""
root: List[Vector | FailVector]
VectorListAdapter = TypeAdapter(VectorList)
def vectors_from_file(filename: str) -> List:
"""Load test vectors from a file."""
with open(
current_python_script_directory(
"vectors",
filename,
),
"rb",
) as f:
vectors_list = VectorListAdapter.validate_json(f.read())
all_inputs = set()
for vector in vectors_list.root:
if vector.input in all_inputs:
raise ValueError(f"Duplicate input: {vector.input.hex()}")
all_inputs.add(vector.input)
return [v.to_pytest_param() for v in vectors_list.root]
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7951_p256verify_precompiles/test_p256verify_before_fork.py | tests/osaka/eip7951_p256verify_precompiles/test_p256verify_before_fork.py | """
Tests P256VERIFY precompiles of [EIP-7951: Precompile for secp256r1
Curve Support](https://eips.ethereum.org/EIPS/eip-7951).
Tests P256VERIFY
precompiles of [EIP-7951: Precompile for secp256r1 Curve
Support](https://eips.ethereum.org/EIPS/eip-7951) before the Osaka hard fork is
active.
"""
import pytest
from ethereum_test_checklists import EIPChecklist
from ethereum_test_tools import Alloc, Block, BlockchainTestFiller, Transaction
from .spec import Spec, ref_spec_7951
REFERENCE_SPEC_GIT_PATH = ref_spec_7951.git_path
REFERENCE_SPEC_VERSION = ref_spec_7951.version
pytestmark = pytest.mark.valid_at_transition_to("Osaka")
@pytest.mark.parametrize(
"precompile_address,input_data,precompile_gas_modifier",
[
pytest.param(
Spec.P256VERIFY,
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
0,
id="P256VERIFY_valid_input_6900_gas",
),
pytest.param(
Spec.P256VERIFY,
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.X0,
0,
id="P256VERIFY_invalid_input",
),
pytest.param(
Spec.P256VERIFY,
Spec.H0 + Spec.R0 + Spec.S0 + Spec.X0 + Spec.Y0,
-6900,
id="P256VERIFY_valid_input_zero_gas",
),
],
)
@pytest.mark.parametrize(
"expected_output,call_succeeds",
[pytest.param(Spec.INVALID_RETURN_VALUE, True, id=pytest.HIDDEN_PARAM)],
)
@EIPChecklist.Precompile.Test.ForkTransition.Before.InvalidInput()
@EIPChecklist.Precompile.Test.ForkTransition.Before.ZeroGas()
def test_precompile_before_fork(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
post: dict,
tx: Transaction,
) -> None:
"""
Test P256VERIFY precompiles before the Osaka hard fork is active.
The call must succeed but the output must be empty.
"""
blockchain_test(
pre=pre,
blocks=[Block(txs=[tx])],
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7951_p256verify_precompiles/__init__.py | tests/osaka/eip7951_p256verify_precompiles/__init__.py | """
Tests [EIP-7951: Precompile for secp256r1 Curve Support](https://eips.ethereum.org/EIPS/eip-7951).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7934_block_rlp_limit/spec.py | tests/osaka/eip7934_block_rlp_limit/spec.py | """Defines EIP-7934 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_7934 = ReferenceSpec("EIPS/eip-7934.md", "2e5cc824089bab8d04aee598708e21c0e06857ef")
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-7934 specifications as defined at
https://eips.ethereum.org/EIPS/eip-7934#specification.
"""
MAX_BLOCK_SIZE = 10_485_760 # 10 MiB
SAFETY_MARGIN = 2_097_152 # 2 MiB
MAX_RLP_BLOCK_SIZE = MAX_BLOCK_SIZE - SAFETY_MARGIN # 8_388_608 bytes
BLOB_COMMITMENT_VERSION_KZG = 1
@staticmethod
def exceed_max_rlp_block_size(rlp_encoded_block: bytes) -> bool:
"""Check if an RLP encoded block exceeds the maximum allowed size."""
return len(rlp_encoded_block) > Spec.MAX_RLP_BLOCK_SIZE
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7934_block_rlp_limit/conftest.py | tests/osaka/eip7934_block_rlp_limit/conftest.py | """Fixtures for the EIP-7934 RLP block size limit tests."""
import pytest
from ethereum_test_tools import (
Address,
Alloc,
)
from ethereum_test_types import Environment
from ethereum_test_vm import Opcodes as Op
@pytest.fixture
def post() -> Alloc:
"""Post state allocation fixture."""
return Alloc()
@pytest.fixture
def env() -> Environment:
"""Environment fixture with a specified gas limit."""
return Environment(gas_limit=100_000_000)
@pytest.fixture
def contract_recipient(pre: Alloc) -> Address:
"""Deploy a simple contract that can receive large calldata."""
contract_code = Op.SSTORE(0, Op.CALLDATASIZE) + Op.STOP
return pre.deploy_contract(contract_code)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7934_block_rlp_limit/test_max_block_rlp_size.py | tests/osaka/eip7934_block_rlp_limit/test_max_block_rlp_size.py | """
Tests for [EIP-7934: RLP Execution Block Size Limit](https://eips.ethereum.org/EIPS/eip-7934).
"""
from functools import lru_cache
from typing import List, Tuple
import pytest
from ethereum_test_base_types import Address, HexNumber, ZeroPaddedHexNumber
from ethereum_test_checklists import EIPChecklist
from ethereum_test_fixtures.blockchain import (
FixtureBlockBase,
FixtureHeader,
FixtureWithdrawal,
)
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Alloc,
Block,
BlockchainTestFiller,
BlockException,
Bytes,
Transaction,
Withdrawal,
)
from ethereum_test_tools import Opcodes as Op
from ethereum_test_types import EOA, Environment
from .spec import Spec, ref_spec_7934
REFERENCE_SPEC_GIT_PATH = ref_spec_7934.git_path
REFERENCE_SPEC_VERSION = ref_spec_7934.version
pytestmark = [
pytest.mark.pre_alloc_group(
"block_rlp_limit_tests",
reason="Block RLP size tests require exact calculations",
),
pytest.mark.xdist_group(name="bigmem"),
]
HEADER_TIMESTAMP = 123456789
EXTRA_DATA_AT_LIMIT = b"\x00\x00\x00"
BLOCK_GAS_LIMIT = 100_000_000
@pytest.fixture
def block_size_limit(fork: Fork) -> int:
"""Get the fork-specific block RLP size limit."""
limit = fork.block_rlp_size_limit()
if limit is None:
raise ValueError("Fork does not implement block RLP size limit")
assert limit == Spec.MAX_RLP_BLOCK_SIZE, (
f"Expected block RLP size limit to be {Spec.MAX_RLP_BLOCK_SIZE}, "
f"but got {limit} for fork {fork.name}"
)
return limit
@pytest.fixture
def block_errors() -> List[BlockException]:
"""
Block exceptions expected for blocks that exceed the `MAX_RLP_BLOCK_SIZE`.
"""
return [BlockException.RLP_BLOCK_LIMIT_EXCEEDED]
def create_test_header(gas_used: int) -> FixtureHeader:
"""Create a standard test header for RLP size calculations."""
return FixtureHeader(
difficulty="0x0",
number="0x1",
gas_limit=hex(BLOCK_GAS_LIMIT),
timestamp=hex(HEADER_TIMESTAMP),
coinbase="0x" + "00" * 20,
parent_hash="0x" + "00" * 32,
uncle_hash="0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
state_root="0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
transactions_trie="0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
receiptTrie="0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
bloom="0x" + "00" * 256,
gas_used=hex(gas_used),
extra_data=EXTRA_DATA_AT_LIMIT.hex(),
mix_hash="0x" + "00" * 32,
nonce="0x0000000000000042",
base_fee_per_gas="0x0",
withdrawals_root="0x" + "00" * 32,
blob_gas_used="0x0",
excess_blob_gas="0x0",
parent_beacon_block_root="0x" + "00" * 32,
requests_hash="0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
)
def get_block_rlp_size(
transactions: List[Transaction], gas_used: int, withdrawals: List[Withdrawal] | None = None
) -> int:
"""
Calculate the RLP size of a block with given transactions
and withdrawals.
"""
header = create_test_header(gas_used)
total_gas = sum((tx.gas_limit or 21000) for tx in transactions)
header.gas_used = ZeroPaddedHexNumber(total_gas)
# Calculate blob gas used if there are blob transactions
blob_gas_used = 0
for tx in transactions:
if hasattr(tx, "blob_versioned_hashes") and tx.blob_versioned_hashes:
blob_gas_used += len(tx.blob_versioned_hashes) * (2**17)
if blob_gas_used > 0:
header.blob_gas_used = ZeroPaddedHexNumber(blob_gas_used)
# Convert withdrawals to FixtureWithdrawal if provided
block_withdrawals = []
if withdrawals is not None:
block_withdrawals = [
FixtureWithdrawal(
index=w.index,
validator_index=w.validator_index,
address=w.address,
amount=w.amount,
)
for w in withdrawals
]
test_block = FixtureBlockBase(blockHeader=header, withdrawals=block_withdrawals)
return len(test_block.with_rlp(txs=transactions).rlp)
def exact_size_transactions(
sender: EOA,
block_size_limit: int,
fork: Fork,
pre: Alloc,
gas_limit: int,
emit_logs: bool = False,
specific_transaction_to_include: Transaction | None = None,
withdrawals: List[Withdrawal] | None = None,
) -> Tuple[List[Transaction], int]:
"""
Generate transactions that fill a block to exactly the RLP size limit.
The calculation uses caching to avoid recalculating the same block rlp for
each fork. Calculate the block and fill with real sender for testing.
Args:
sender: The sender account
block_size_limit: The target block RLP size limit
fork: The fork to generate transactions for
pre: Required if emit_logs is True, used to deploy the log contract
gas_limit: The gas limit for the block
emit_logs: If True, transactions will call a contract that emits logs
specific_transaction_to_include: If provided, this transaction will
be included
withdrawals: Optional list of withdrawals to include in the block
"""
log_contract = None
if emit_logs:
if pre is None:
raise ValueError("pre is required when emit_logs is True")
# Deploy a contract that emits logs
log_contract_code = Op.SSTORE(1, 1)
# Emit multiple LOG4 events with maximum data and topics
for _ in range(3):
log_contract_code += Op.PUSH32(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
) # topic 4
log_contract_code += Op.PUSH32(
0xEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
) # topic 3
log_contract_code += Op.PUSH32(
0xDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD
) # topic 2
log_contract_code += Op.PUSH32(
0xCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
) # topic 1
log_contract_code += Op.PUSH1(32) # size
log_contract_code += Op.PUSH1(0) # offset
log_contract_code += Op.LOG4
log_contract = pre.deploy_contract(log_contract_code)
if not specific_transaction_to_include and not withdrawals:
# use cached version when possible for performance
transactions, gas_used = _exact_size_transactions_cached(
block_size_limit,
fork,
gas_limit,
sender,
emit_logs_contract=log_contract,
)
else:
# Direct calculation, no cache, since `Transaction` / `Withdrawal`
# are not hashable
transactions, gas_used = _exact_size_transactions_impl(
block_size_limit,
fork,
gas_limit,
sender,
specific_transaction_to_include=specific_transaction_to_include,
emit_logs_contract=log_contract,
withdrawals=withdrawals,
)
return transactions, gas_used
@lru_cache(maxsize=128)
def _exact_size_transactions_cached(
block_size_limit: int,
fork: Fork,
gas_limit: int,
sender: EOA,
emit_logs_contract: Address | None = None,
) -> Tuple[List[Transaction], int]:
"""
Generate transactions that fill a block to exactly the RLP size limit.
Abstracted with hashable arguments for caching block calculations.
"""
return _exact_size_transactions_impl(
block_size_limit,
fork,
gas_limit,
sender,
None,
emit_logs_contract,
None,
)
def _exact_size_transactions_impl(
block_size_limit: int,
fork: Fork,
block_gas_limit: int,
sender: EOA,
specific_transaction_to_include: Transaction | None = None,
emit_logs_contract: Address | None = None,
withdrawals: List[Withdrawal] | None = None,
) -> Tuple[List[Transaction], int]:
"""
Calculate the exact size of transactions to be included. Shared by both
cached and non-cached paths.
"""
transactions = []
nonce = 0
total_gas_used = 0
calculator = fork.transaction_intrinsic_cost_calculator()
data_large = Bytes(b"\x00" * 500_000)
gas_limit_large = calculator(calldata=data_large)
# block with 16 transactions + large calldata remains safely below the
# limit add 15 generic transactions to fill the block and one typed
# transaction if tx_type is specified, otherwise just add 16 generic
# transactions
not_all_generic_txs = any(
kwarg is not None for kwarg in [specific_transaction_to_include, emit_logs_contract]
)
generic_tx_num = 15 if not_all_generic_txs else 16
for _ in range(generic_tx_num):
tx = Transaction(
sender=sender,
nonce=nonce,
max_fee_per_gas=10**11,
max_priority_fee_per_gas=10**11,
gas_limit=gas_limit_large,
data=data_large,
)
transactions.append(tx)
total_gas_used += gas_limit_large
nonce += 1
# append a typed transaction to fill the block
if not_all_generic_txs:
if specific_transaction_to_include is not None:
tx_dict = specific_transaction_to_include.model_dump(exclude_unset=True)
data = Bytes(b"\x00" * 200_000)
gas_limit = HexNumber(
calculator(
calldata=data,
access_list=specific_transaction_to_include.access_list,
authorization_list_or_count=len(tx_dict.get("authorization_list", [])),
)
)
tx_dict["sender"] = sender
tx_dict["nonce"] = nonce
tx_dict["data"] = data
tx_dict["gas_limit"] = gas_limit
last_tx = Transaction(**tx_dict)
elif emit_logs_contract is not None:
last_tx = Transaction(
sender=sender,
nonce=nonce,
max_fee_per_gas=10**11,
max_priority_fee_per_gas=10**11,
gas_limit=calculator(calldata=b""),
to=emit_logs_contract,
)
else:
raise ValueError(
"Either specific_transaction_to_include or emit_logs_contract must be provided."
)
transactions.append(last_tx)
nonce += 1
total_gas_used += last_tx.gas_limit
current_size = get_block_rlp_size(
transactions, gas_used=total_gas_used, withdrawals=withdrawals
)
remaining_bytes = block_size_limit - current_size
remaining_gas = block_gas_limit - total_gas_used
if remaining_bytes > 0 and remaining_gas > 50_000:
# create an empty transaction to measure base contribution
empty_tx = Transaction(
sender=sender,
nonce=nonce,
max_fee_per_gas=10**11,
max_priority_fee_per_gas=10**11,
gas_limit=calculator(calldata=b""),
data=b"",
)
empty_block_size = get_block_rlp_size(
transactions + [empty_tx],
gas_used=total_gas_used + empty_tx.gas_limit,
withdrawals=withdrawals,
)
empty_contribution = empty_block_size - current_size
calldata_bytes_needed = remaining_bytes - empty_contribution
estimated_calldata = max(0, calldata_bytes_needed - 5)
target_calldata = b"\x00" * estimated_calldata
target_gas = calculator(calldata=target_calldata)
if target_gas <= remaining_gas:
test_tx = Transaction(
sender=sender,
nonce=nonce,
max_fee_per_gas=10**11,
max_priority_fee_per_gas=10**11,
gas_limit=target_gas,
data=target_calldata,
)
test_size = get_block_rlp_size(
transactions + [test_tx],
gas_used=total_gas_used + target_gas,
withdrawals=withdrawals,
)
if test_size == block_size_limit:
# if exact match, use the transaction
transactions.append(test_tx)
else:
# search for the best adjustment
diff = block_size_limit - test_size
best_diff = abs(diff)
search_range = min(abs(diff) + 50, 1000)
for adjustment in range(-search_range, search_range + 1):
adjusted_size = estimated_calldata + adjustment
if adjusted_size < 0:
continue
adjusted_calldata = b"\x00" * adjusted_size
adjusted_gas = calculator(calldata=adjusted_calldata)
if adjusted_gas <= remaining_gas:
adjusted_tx = Transaction(
sender=sender,
nonce=nonce,
max_fee_per_gas=10**11,
max_priority_fee_per_gas=10**11,
gas_limit=adjusted_gas,
data=adjusted_calldata,
)
adjusted_test_size = get_block_rlp_size(
transactions + [adjusted_tx],
gas_used=total_gas_used + adjusted_gas,
withdrawals=withdrawals,
)
if adjusted_test_size == block_size_limit:
# exact match
transactions.append(adjusted_tx)
break
adjusted_diff = abs(block_size_limit - adjusted_test_size)
if adjusted_diff < best_diff:
best_diff = adjusted_diff
else:
raise RuntimeError(
"Failed to find a transaction that matches the target size."
)
else:
transactions.append(empty_tx)
final_size = get_block_rlp_size(
transactions, gas_used=sum(tx.gas_limit for tx in transactions), withdrawals=withdrawals
)
final_gas = sum(tx.gas_limit for tx in transactions)
assert final_size == block_size_limit, (
f"Size mismatch: got {final_size}, "
f"expected {block_size_limit} "
f"({final_size - block_size_limit} bytes diff)"
)
return transactions, final_gas
@EIPChecklist.BlockLevelConstraint.Test.Boundary.Under()
@EIPChecklist.BlockLevelConstraint.Test.Boundary.Exact()
@EIPChecklist.BlockLevelConstraint.Test.Boundary.Over()
@pytest.mark.parametrize(
"delta",
[
pytest.param(-1, id="max_rlp_size_minus_1_byte", marks=pytest.mark.verify_sync),
pytest.param(0, id="max_rlp_size", marks=pytest.mark.verify_sync),
pytest.param(1, id="max_rlp_size_plus_1_byte", marks=pytest.mark.exception_test),
],
)
@pytest.mark.valid_from("Osaka")
def test_block_at_rlp_size_limit_boundary(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
post: Alloc,
env: Environment,
sender: EOA,
fork: Fork,
block_size_limit: int,
delta: int,
) -> None:
"""
Test the block rlp size limit.
- At the limit - 1 byte, the block is valid
- At the limit, the block is valid
- At the limit + 1 byte, the block is invalid
"""
transactions, gas_used = exact_size_transactions(
sender,
block_size_limit,
fork,
pre,
env.gas_limit,
)
block_rlp_size = get_block_rlp_size(transactions, gas_used=gas_used)
assert block_rlp_size == block_size_limit, (
f"Block RLP size {block_rlp_size} does not exactly match limit {block_size_limit}, "
f"difference: {block_rlp_size - block_size_limit} bytes"
)
block = Block(
txs=transactions,
exception=BlockException.RLP_BLOCK_LIMIT_EXCEEDED if delta > 0 else None,
)
if delta < 0:
block.extra_data = Bytes(EXTRA_DATA_AT_LIMIT[: -abs(delta)])
elif delta == 0:
block.extra_data = Bytes(EXTRA_DATA_AT_LIMIT)
else: # delta > 0
block.extra_data = Bytes(EXTRA_DATA_AT_LIMIT + b"\x00" * delta)
block.timestamp = ZeroPaddedHexNumber(HEADER_TIMESTAMP)
blockchain_test(
genesis_environment=env,
pre=pre,
post=post,
blocks=[block],
)
@EIPChecklist.BlockLevelConstraint.Test.Content.TransactionTypes()
@pytest.mark.with_all_typed_transactions
@pytest.mark.verify_sync
@pytest.mark.valid_from("Osaka")
def test_block_rlp_size_at_limit_with_all_typed_transactions(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
post: Alloc,
fork: Fork,
sender: EOA,
block_size_limit: int,
env: Environment,
typed_transaction: Transaction,
) -> None:
"""Test the block RLP size limit with all transaction types."""
transactions, gas_used = exact_size_transactions(
sender,
block_size_limit,
fork,
pre,
env.gas_limit,
specific_transaction_to_include=typed_transaction,
)
block_rlp_size = get_block_rlp_size(transactions, gas_used=gas_used)
assert block_rlp_size == block_size_limit, (
f"Block RLP size {block_rlp_size} does not exactly match limit {block_size_limit}, "
f"difference: {block_rlp_size - block_size_limit} bytes"
)
block = Block(txs=transactions)
block.extra_data = Bytes(EXTRA_DATA_AT_LIMIT)
block.timestamp = ZeroPaddedHexNumber(HEADER_TIMESTAMP)
blockchain_test(
genesis_environment=env,
pre=pre,
post=post,
blocks=[block],
)
@EIPChecklist.BlockLevelConstraint.Test.Content.Logs()
@pytest.mark.verify_sync
@pytest.mark.valid_from("Osaka")
def test_block_at_rlp_limit_with_logs(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
post: Alloc,
env: Environment,
sender: EOA,
fork: Fork,
block_size_limit: int,
) -> None:
"""
Test that a block at the RLP size limit is valid even when transactions
emit logs.
"""
transactions, gas_used = exact_size_transactions(
sender,
block_size_limit,
fork,
pre,
env.gas_limit,
emit_logs=True,
)
block_rlp_size = get_block_rlp_size(transactions, gas_used=gas_used)
assert block_rlp_size == block_size_limit, (
f"Block RLP size {block_rlp_size} does not exactly match limit {block_size_limit}, "
f"difference: {block_rlp_size - block_size_limit} bytes"
)
block = Block(txs=transactions)
block.extra_data = Bytes(EXTRA_DATA_AT_LIMIT)
block.timestamp = ZeroPaddedHexNumber(HEADER_TIMESTAMP)
blockchain_test(
genesis_environment=env,
pre=pre,
post=post,
blocks=[block],
)
@EIPChecklist.BlockLevelConstraint.Test.Content.Withdrawals()
@pytest.mark.verify_sync
@pytest.mark.valid_from("Osaka")
def test_block_at_rlp_limit_with_withdrawals(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
post: Alloc,
env: Environment,
sender: EOA,
fork: Fork,
block_size_limit: int,
) -> None:
"""
Test that a block at the RLP size limit is valid even when the block
contains withdrawals.
"""
withdrawals = [
Withdrawal(
index=0,
validator_index=0,
address=pre.fund_eoa(),
amount=1,
),
Withdrawal(
index=1,
validator_index=1,
address=pre.fund_eoa(),
amount=1,
),
]
transactions, gas_used = exact_size_transactions(
sender,
block_size_limit,
fork,
pre,
env.gas_limit,
withdrawals=withdrawals,
)
block_rlp_size = get_block_rlp_size(transactions, gas_used=gas_used, withdrawals=withdrawals)
assert block_rlp_size == block_size_limit, (
f"Block RLP size {block_rlp_size} does not exactly match limit {block_size_limit}, "
f"difference: {block_rlp_size - block_size_limit} bytes"
)
block = Block(
txs=transactions,
withdrawals=withdrawals,
extra_data=Bytes(EXTRA_DATA_AT_LIMIT),
timestamp=ZeroPaddedHexNumber(HEADER_TIMESTAMP),
)
blockchain_test(
genesis_environment=env,
pre=pre,
post=post,
blocks=[block],
)
@EIPChecklist.BlockLevelConstraint.Test.ForkTransition.AcceptedBeforeFork()
@EIPChecklist.BlockLevelConstraint.Test.ForkTransition.AcceptedAfterFork()
@EIPChecklist.BlockLevelConstraint.Test.ForkTransition.RejectedAfterFork()
@pytest.mark.parametrize(
"exceeds_limit_at_fork",
[
pytest.param(False, id="at_fork_within_limit"),
pytest.param(True, marks=pytest.mark.exception_test, id="at_fork_exceeds_limit"),
],
)
@pytest.mark.valid_at_transition_to("Osaka")
def test_fork_transition_block_rlp_limit(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
env: Environment,
fork: Fork,
exceeds_limit_at_fork: bool,
block_size_limit: int,
) -> None:
"""
Test block RLP size limit at fork transition boundary.
- Before fork (timestamp 14999): Block at limit +1 should be accepted
- At fork (timestamp 15000): Block at limit should be accepted
- At fork (timestamp 15000): Block at limit +1 should be rejected
"""
sender_before_fork = pre.fund_eoa()
sender_at_fork = pre.fund_eoa()
transactions_before, gas_used_before = exact_size_transactions(
sender_before_fork,
block_size_limit,
fork,
pre,
env.gas_limit,
)
transactions_at_fork, gas_used_at_fork = exact_size_transactions(
sender_at_fork,
block_size_limit,
fork,
pre,
env.gas_limit,
)
for fork_block_rlp_size in [
get_block_rlp_size(transactions_before, gas_used=gas_used_before),
get_block_rlp_size(transactions_at_fork, gas_used=gas_used_at_fork),
]:
assert fork_block_rlp_size == block_size_limit, (
f"Block RLP size {fork_block_rlp_size} does not exactly match "
f"limit {block_size_limit}, difference: "
f"{fork_block_rlp_size - block_size_limit} bytes"
)
# HEADER_TIMESTAMP (123456789) used in calculation takes 4 bytes in RLP
# encoding. Transition timestamps (14_999 and 15_000) take 2 bytes
# Re-define `_extradata_at_limit` accounting for this difference
timestamp_byte_savings = 2
_extradata_at_limit = EXTRA_DATA_AT_LIMIT + (b"\x00" * timestamp_byte_savings)
blocks = [
# before fork, block at limit +1 should be accepted
Block(
timestamp=14_999,
txs=transactions_before,
# +1 to exceed limit
extra_data=Bytes(_extradata_at_limit + b"\x00"),
)
]
# At fork (timestamp 15000): Test behavior with and without exceeding limit
if exceeds_limit_at_fork:
blocks.append(
Block(
timestamp=15_000,
txs=transactions_at_fork,
# +1 to exceed limit, should be rejected
extra_data=Bytes(_extradata_at_limit + b"\x00"),
exception=BlockException.RLP_BLOCK_LIMIT_EXCEEDED,
)
)
else:
blocks.append(
Block(
timestamp=15_000,
txs=transactions_at_fork,
# exact limit should be accepted
extra_data=Bytes(EXTRA_DATA_AT_LIMIT),
)
)
blockchain_test(
genesis_environment=env,
pre=pre,
post={},
blocks=blocks,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/osaka/eip7934_block_rlp_limit/__init__.py | tests/osaka/eip7934_block_rlp_limit/__init__.py | """
Tests for [EIP-7934: RLP Execution Block Size Limit](https://eips.ethereum.org/EIPS/eip-7934).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/constantinople/__init__.py | tests/constantinople/__init__.py | """Test cases for EVM functionality introduced in Constantinople."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/constantinople/eip145_bitwise_shift/spec.py | tests/constantinople/eip145_bitwise_shift/spec.py | """Defines EIP-145 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_145 = ReferenceSpec("EIPS/eip-145.md", "be0aca3e57f1eeb8ae265e58da6e2dffc5b67f81")
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-145 specifications as defined at
https://eips.ethereum.org/EIPS/eip-145.
"""
# Below are GPT o4-mini-high implementation of shift functions It can
# contain bugs, treat it with caution and refer to EVM implementations
@staticmethod
def sar(shift: int, value: int) -> int:
"""
Simulate the EVM SAR (Signed Arithmetic Right shift) operation.
Parameters
----------
shift : int
Number of bits to shift to the right (interpreted as full unsigned;
no low-8-bit truncation here).
value : int
The 256-bit value to shift, interpreted as a signed integer.
Returns
-------
int
The result of the arithmetic right shift, pushed as an unsigned
256-bit integer on the EVM stack.
"""
mask256 = (1 << 256) - 1 # Clamp value to 256 bits
# Interpret as signed
v = value & mask256
if v >> 255:
v_signed = v - (1 << 256)
else:
v_signed = v
# If shift >= 256, spec says:
# • result = 0 if v_signed >= 0
# • result = -1 if v_signed < 0
if shift >= 256:
result_signed = -1 if v_signed < 0 else 0
else:
# Normal arithmetic right shift
result_signed = v_signed >> shift
# Wrap back to unsigned 256-bit
return result_signed & mask256
@staticmethod
def shl(shift: int, value: int) -> int:
"""
Simulate the EVM SHL (Logical Left shift) operation.
Parameters
----------
shift : int
Number of bits to shift to the left.
value : int
The 256-bit value to shift, interpreted as an unsigned integer.
Returns
-------
int
The result of the logical left shift, pushed as an unsigned
256-bit integer on the EVM stack.
"""
mask256 = (1 << 256) - 1
# Clamp input to 256 bits
v = value & mask256
# If shift >= 256, spec returns 0
if shift >= 256:
return 0
# Logical left shift and wrap to 256 bits
return (v << shift) & mask256
@staticmethod
def shr(shift: int, value: int) -> int:
"""
Simulate the EVM SHR (Logical Right shift) operation.
Parameters
----------
shift : int
Number of bits to shift to the right.
value : int
The 256-bit value to shift, interpreted as an unsigned integer.
Returns
-------
int
The result of the logical right shift, pushed as an unsigned
256-bit integer on the EVM stack.
"""
mask256 = (1 << 256) - 1
# Clamp input to 256 bits
v = value & mask256
# If shift >= 256, the EVM spec returns 0
if shift >= 256:
return 0
# Logical right shift and mask back to 256 bits
return (v >> shift) & mask256
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/constantinople/eip145_bitwise_shift/test_shift_combinations.py | tests/constantinople/eip145_bitwise_shift/test_shift_combinations.py | """Test bitwise shift opcodes in different combinations."""
import itertools
from typing import Callable
import pytest
from ethereum_test_tools import (
Account,
Alloc,
StateTestFiller,
Storage,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
from .spec import Spec, ref_spec_145
REFERENCE_SPEC_GIT_PATH = ref_spec_145.git_path
REFERENCE_SPEC_VERSION = ref_spec_145.version
list_of_args = [
0,
1,
2,
5,
0xFE,
0xFF,
0x100,
0x101,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE,
0x8000000000000000000000000000000000000000000000000000000000000000,
0xA000000000000000000000000000000000000000000000000000000000000000,
0x5555555555555555555555555555555555555555555555555555555555555555,
0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA,
0x0000000000000000000000000000000000000000000000000000000000000080,
0x0000000000000000000000000000000000000000000000000000000000008000,
0x0000000000000000000000000000000000000000000000000000000080000000,
0x0000000000000000000000000000000000000000000000008000000000000000,
0x0000000000000000000000000000000080000000000000000000000000000000,
0x8000000000000000000000000000000000000000000000000000000000000000,
]
combinations = list(itertools.product(list_of_args, repeat=2))
@pytest.mark.parametrize(
"opcode,operation",
[
pytest.param(Op.SAR, Spec.sar, id="sar"),
pytest.param(Op.SHL, Spec.shl, id="shl"),
pytest.param(Op.SHR, Spec.shr, id="shr"),
],
)
@pytest.mark.valid_from("Constantinople")
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stShift/shiftCombinationsFiller.yml",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stShift/shiftSignedCombinationsFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1683"],
)
def test_combinations(
state_test: StateTestFiller, pre: Alloc, opcode: Op, operation: Callable
) -> None:
"""Test bitwise shift combinations."""
result = Storage()
address_to = pre.deploy_contract(
code=sum(
Op.SSTORE(
result.store_next(operation(shift=a, value=b), f"{str(opcode).lower()}({a}, {b})"),
opcode(a, b),
)
for a, b in combinations
)
+ Op.SSTORE(result.store_next(1, "code_finished"), 1)
+ Op.STOP,
)
tx = Transaction(
sender=pre.fund_eoa(),
to=address_to,
gas_limit=5_000_000,
)
state_test(pre=pre, post={address_to: Account(storage=result)}, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/constantinople/eip145_bitwise_shift/__init__.py | tests/constantinople/eip145_bitwise_shift/__init__.py | """
Test [EIP-145: Bitwise shifting instructions in EVM](https://eips.ethereum.org/EIPS/eip-145).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/constantinople/eip1014_create2/spec.py | tests/constantinople/eip1014_create2/spec.py | """Defines EIP-1014 specification constants and functions."""
from dataclasses import dataclass
@dataclass(frozen=True)
class ReferenceSpec:
"""Defines the reference spec version and git path."""
git_path: str
version: str
ref_spec_1014 = ReferenceSpec("EIPS/eip-1014.md", "31d4d62ec1dfc9f2bb26ca648f05d4cc2f47da09")
@dataclass(frozen=True)
class Spec:
"""
Parameters from the EIP-1014 specifications as defined at
https://eips.ethereum.org/EIPS/eip-1014.
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/constantinople/eip1014_create2/test_recreate.py | tests/constantinople/eip1014_create2/test_recreate.py | """Test Account Self-destruction and Re-creation."""
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
Initcode,
Transaction,
compute_create2_address,
)
from ethereum_test_tools import Opcodes as Op
from .spec import ref_spec_1014
REFERENCE_SPEC_GIT_PATH = ref_spec_1014.git_path
REFERENCE_SPEC_VERSION = ref_spec_1014.version
@pytest.mark.parametrize("recreate_on_separate_block", [True, False])
@pytest.mark.valid_from("Constantinople")
@pytest.mark.valid_until("Shanghai")
def test_recreate(
blockchain_test: BlockchainTestFiller,
pre: Alloc,
fork: Fork,
recreate_on_separate_block: bool,
) -> None:
"""
Test that the storage is cleared when a contract is first destructed then
re-created using CREATE2.
"""
creator_contract_code = Op.CALLDATACOPY(0, 0, Op.CALLDATASIZE) + Op.CREATE2(
0, 0, Op.CALLDATASIZE, 0
)
creator_address = pre.deploy_contract(creator_contract_code)
sender = pre.fund_eoa()
deploy_code = (
Op.EQ(0, Op.CALLVALUE)
+ Op.PUSH1(0xC)
+ Op.JUMPI
+ Op.SSTORE(0, Op.CALLVALUE)
+ Op.STOP
+ Op.JUMPDEST
+ Op.PUSH1(0x0)
+ Op.SELFDESTRUCT
)
initcode = Initcode(deploy_code=deploy_code)
create_tx = Transaction(
gas_limit=100_000,
to=creator_address,
data=initcode,
sender=sender,
)
created_contract_address = compute_create2_address(
address=creator_address, salt=0, initcode=initcode
)
set_storage_tx = Transaction(
gas_limit=100_000,
to=created_contract_address,
value=1,
sender=sender,
)
blocks = [Block(txs=[create_tx, set_storage_tx])]
destruct_tx = Transaction(
gas_limit=100_000,
to=created_contract_address,
value=0,
sender=sender,
)
balance = 1
send_funds_tx = Transaction(
gas_limit=100_000,
to=created_contract_address,
value=balance,
sender=sender,
)
re_create_tx = Transaction(
gas_limit=100_000,
to=creator_address,
data=initcode,
sender=sender,
)
if recreate_on_separate_block:
blocks.append(Block(txs=[destruct_tx, send_funds_tx]))
blocks.append(Block(txs=[re_create_tx]))
else:
blocks.append(Block(txs=[destruct_tx, send_funds_tx, re_create_tx]))
post = {
created_contract_address: Account(
nonce=1,
balance=balance,
code=deploy_code,
storage={},
),
}
blockchain_test(pre=pre, post=post, blocks=blocks)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/constantinople/eip1014_create2/test_create_returndata.py | tests/constantinople/eip1014_create2/test_create_returndata.py | """
Return data management around create2 Port
call_outsize_then_create2_successful_then_returndatasizeFiller.json test Port
call_then_create2_successful_then_returndatasizeFiller.json test.
"""
import pytest
from ethereum_test_tools import Account, Alloc, StateTestFiller, Transaction, keccak256
from ethereum_test_tools import Opcodes as Op
from .spec import ref_spec_1014
REFERENCE_SPEC_GIT_PATH = ref_spec_1014.git_path
REFERENCE_SPEC_VERSION = ref_spec_1014.version
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stCreate2/call_outsize_then_create2_successful_then_returndatasizeFiller.json",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stCreate2/call_then_create2_successful_then_returndatasizeFiller.json",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/497"],
coverage_missed_reason="coinbase is deleted in original test (tx.gas_price==env.base_fee)",
)
@pytest.mark.valid_from("Istanbul")
@pytest.mark.parametrize("call_return_size", [35, 32, 0])
@pytest.mark.parametrize("create_type", [Op.CREATE, Op.CREATE2])
@pytest.mark.parametrize("return_type", [Op.RETURN, Op.REVERT])
@pytest.mark.parametrize("return_type_in_create", [Op.RETURN, Op.REVERT])
@pytest.mark.slow()
def test_create2_return_data(
call_return_size: int,
create_type: Op,
return_type: Op,
return_type_in_create: Op,
pre: Alloc,
state_test: StateTestFiller,
) -> None:
"""
Validate that create2 return data does not interfere with previously
existing memory.
"""
# Storage vars
slot_returndatasize_before_create = 0
slot_returndatasize_after_create = 1
slot_return_data_hash_before_create = 2
slot_return_data_hash_after_create = 3
slot_code_worked = 4
slot_returndatacopy_before_create = 5
slot_returndatacopy_before_create_2 = 6
slot_returndatacopy_after_create = 7
slot_begin_memory_after_create = 8
# CREATE2 Initcode
return_data_in_create = 0xFFFAFB
initcode = Op.MSTORE(0, return_data_in_create) + return_type_in_create(0, 32)
call_return_data_value = 0x1122334455667788991011121314151617181920212223242526272829303132
expected_call_return_data = int.to_bytes(call_return_data_value, 32, byteorder="big").ljust(
call_return_size, b"\0"
)[0:call_return_size]
expected_returndatacopy = expected_call_return_data[0:32]
empty_data = b""
address_call = pre.deploy_contract(
code=Op.MSTORE(0, call_return_data_value)
+ Op.MSTORE(32, 0xFFFFFFFF)
+ return_type(0, call_return_size),
storage={},
)
address_to = pre.deploy_contract(
balance=100_000_000,
code=Op.JUMPDEST()
+ Op.MSTORE(0x100, Op.CALLDATALOAD(0))
+ Op.CALL(0x0900000000, address_call, 0, 0, 0, 0, call_return_size)
#
#
+ Op.SSTORE(slot_returndatasize_before_create, Op.RETURNDATASIZE())
+ Op.RETURNDATACOPY(0x200, 0, call_return_size)
+ Op.SSTORE(slot_returndatacopy_before_create, Op.MLOAD(0x200))
+ Op.SSTORE(slot_returndatacopy_before_create_2, Op.MLOAD(0x220))
+ Op.SSTORE(slot_return_data_hash_before_create, Op.SHA3(0, call_return_size))
#
#
+ create_type(offset=0x100, size=Op.CALLDATASIZE())
+ Op.SSTORE(slot_returndatasize_after_create, Op.RETURNDATASIZE())
+ Op.RETURNDATACOPY(0x300, 0, Op.RETURNDATASIZE())
+ Op.SSTORE(slot_returndatacopy_after_create, Op.MLOAD(0x300))
+ Op.SSTORE(slot_return_data_hash_after_create, Op.SHA3(0x300, Op.RETURNDATASIZE()))
+ Op.SSTORE(slot_begin_memory_after_create, Op.MLOAD(0))
+ Op.SSTORE(slot_code_worked, 1)
+ Op.STOP(),
storage={
slot_returndatasize_before_create: 0xFF,
slot_returndatasize_after_create: 0xFF,
slot_return_data_hash_before_create: 0xFF,
slot_return_data_hash_after_create: 0xFF,
slot_returndatacopy_before_create: 0xFF,
slot_returndatacopy_before_create_2: 0xFF,
slot_begin_memory_after_create: 0xFF,
},
)
post = {
address_to: Account(
storage={
slot_code_worked: 1,
slot_returndatacopy_before_create: expected_returndatacopy,
slot_returndatacopy_before_create_2: 0,
#
# the actual bytes returned by returndatacopy opcode after
# create
slot_returndatacopy_after_create: (
return_data_in_create if return_type_in_create == Op.REVERT else 0
),
slot_returndatasize_before_create: call_return_size,
#
# return datasize value after create
slot_returndatasize_after_create: (
0x20 if return_type_in_create == Op.REVERT else 0
),
#
slot_return_data_hash_before_create: keccak256(expected_call_return_data),
slot_return_data_hash_after_create: (
keccak256(empty_data)
if return_type_in_create == Op.RETURN
else keccak256(int.to_bytes(return_data_in_create, 32, byteorder="big"))
),
#
# check that create 2 didn't mess up with initial memory space
# declared for return
slot_begin_memory_after_create: expected_returndatacopy,
}
)
}
tx = Transaction(
sender=pre.fund_eoa(),
to=address_to,
protected=False,
data=initcode,
gas_limit=500_000,
value=0,
)
state_test(pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/constantinople/eip1014_create2/__init__.py | tests/constantinople/eip1014_create2/__init__.py | """
Tests for [EIP-1014: Skinny CREATE2](https://eips.ethereum.org/EIPS/eip-1014).
"""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/paris/__init__.py | tests/paris/__init__.py | """Test cases for EVM functionality introduced in Paris (Merge)."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/paris/security/__init__.py | tests/paris/security/__init__.py | """Ethereum execution client tests related to security issues."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/paris/security/test_selfdestruct_balance_bug.py | tests/paris/security/test_selfdestruct_balance_bug.py | """
Tests the Consensus Flaw During Block Processing related to SELFDESTRUCT.
Tests the consensus-vulnerability reported in
[go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4](https://github.com/ethere
um/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4).
To reproduce the issue with this test case:
1. Fill the test with the most recent geth evm version.
2. Run the fixture output within a vulnerable geth version:
v1.9.20 > geth >= v1.9.4.
"""
import pytest
from ethereum_test_tools import (
Account,
Alloc,
Block,
BlockchainTestFiller,
CalldataCase,
Initcode,
Switch,
Transaction,
compute_create_address,
)
from ethereum_test_vm import Opcodes as Op
@pytest.mark.valid_from("Constantinople")
def test_tx_selfdestruct_balance_bug(blockchain_test: BlockchainTestFiller, pre: Alloc) -> None:
"""
Test that the vulnerability is not present by checking the balance of the
`0xaa` contract after executing specific transactions.
1. Start with contract `0xaa` which has initial balance of 3 wei.
`0xaa` contract code simply performs a self-destruct to itself.
2. Send a transaction (tx 1) to invoke caller contract `0xcc` (which has a
balance of 1 wei), which in turn invokes `0xaa` with a 1 wei call.
3. Store the balance of `0xaa` after the first transaction is processed.
`0xaa` self-destructed. Expected outcome: 0 wei.
4. Send another transaction (tx 2) to call 0xaa with 5 wei.
5. Store the balance of `0xaa` after the second transaction is processed.
No self-destruct. Expected outcome: 5 wei.
6. Verify that:
- Call within tx 1 is successful, i.e `0xaa` self-destructed.
- The balances of `0xaa` after each tx are correct.
- During tx 2, code in `0xaa` does not execute,
hence self-destruct mechanism does not trigger.
TODO: EOF - This test could be parametrized for EOFCREATE
"""
deploy_code = Switch(
default_action=Op.REVERT(0, 0),
cases=[
CalldataCase(
value=0,
action=Op.SELFDESTRUCT(Op.ADDRESS),
),
CalldataCase(
value=1,
action=Op.SSTORE(0, Op.SELFBALANCE),
),
],
)
aa_code = Initcode(
deploy_code=deploy_code,
)
cc_code = (
Op.CALLDATACOPY(size=Op.CALLDATASIZE)
+ Op.MSTORE(
0,
Op.CREATE(
value=3, # Initial balance of 3 wei
offset=0,
size=Op.CALLDATASIZE,
),
)
+ Op.SSTORE(0xCA1101, Op.CALL(gas=100000, address=Op.MLOAD(0), value=0))
+ Op.CALL(gas=100000, address=Op.MLOAD(0), value=1)
)
cc_address = pre.deploy_contract(cc_code, balance=1000000000)
aa_location = compute_create_address(address=cc_address, nonce=1)
balance_code = Op.SSTORE(0xBA1AA, Op.BALANCE(aa_location))
balance_address_1 = pre.deploy_contract(balance_code)
balance_address_2 = pre.deploy_contract(balance_code)
sender = pre.fund_eoa()
blocks = [
Block(
txs=[
# Sender invokes caller, caller invokes 0xaa:
# calling with 1 wei call
Transaction(
sender=sender,
to=cc_address,
data=aa_code,
gas_limit=1000000,
),
# Dummy tx to store balance of 0xaa after first TX.
Transaction(
sender=sender,
to=balance_address_1,
gas_limit=100000,
),
# Sender calls 0xaa with 5 wei.
Transaction(
sender=sender,
to=aa_location,
gas_limit=100000,
value=5,
),
# Dummy tx to store balance of 0xaa after second TX.
Transaction(
sender=sender,
to=balance_address_2,
gas_limit=100000,
),
],
),
]
post = {
# Check call from caller has succeeded.
cc_address: Account(nonce=2, storage={0xCA1101: 1}),
# Check balance of 0xaa after tx 1 is 0 wei, i.e self-destructed.
# Vulnerable versions should return 1 wei.
balance_address_1: Account(storage={0xBA1AA: 0}),
# Check that 0xaa exists and balance after tx 2 is 5 wei.
# Vulnerable versions should return 6 wei.
balance_address_2: Account(storage={0xBA1AA: 5}),
aa_location: Account(storage={0: 0}),
}
blockchain_test(pre=pre, post=post, blocks=blocks)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/paris/eip7610_create_collision/__init__.py | tests/paris/eip7610_create_collision/__init__.py | """Cross-client Create Collision Tests."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/paris/eip7610_create_collision/test_initcollision.py | tests/paris/eip7610_create_collision/test_initcollision.py | """
Test collision in CREATE/CREATE2 account creation, where the existing account
only has a non-zero storage slot set.
"""
import pytest
from ethereum_test_tools import (
Account,
Alloc,
Bytecode,
Initcode,
StateTestFiller,
Transaction,
compute_create_address,
)
from ethereum_test_tools import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-7610.md"
REFERENCE_SPEC_VERSION = "80ef48d0bbb5a4939ade51caaaac57b5df6acd4e"
pytestmark = [
pytest.mark.valid_from("Paris"),
pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stSStoreTest/InitCollisionFiller.json",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stSStoreTest/InitCollisionNonZeroNonceFiller.json",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stSStoreTest/InitCollisionParisFiller.json",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/636"],
),
pytest.mark.parametrize(
"collision_nonce,collision_balance,collision_code",
[
pytest.param(0, 0, b"\0", id="non-empty-code"),
pytest.param(0, 1, b"", id="non-empty-balance"),
pytest.param(1, 0, b"", id="non-empty-nonce"),
],
),
pytest.mark.parametrize(
"initcode",
[
pytest.param(
Initcode(
deploy_code=Op.STOP,
initcode_prefix=Op.SSTORE(0, 1) + Op.SSTORE(1, 0),
),
id="correct-initcode",
),
pytest.param(Op.REVERT(0, 0), id="revert-initcode"),
pytest.param(Op.MSTORE(0xFFFFFFFFFFFFFFFFFFFFFFFFFFF, 1), id="oog-initcode"),
],
),
pytest.mark.pre_alloc_modify, # We need to modify the pre-alloc to include the collision
]
@pytest.mark.with_all_contract_creating_tx_types
def test_init_collision_create_tx(
state_test: StateTestFiller,
pre: Alloc,
tx_type: int,
collision_nonce: int,
collision_balance: int,
collision_code: bytes,
initcode: Bytecode,
) -> None:
"""
Test that a contract creation transaction exceptionally aborts when
the target address has a non-empty storage, balance, nonce, or code.
"""
tx = Transaction(
sender=pre.fund_eoa(),
type=tx_type,
to=None,
data=initcode,
gas_limit=200_000,
)
created_contract_address = tx.created_contract
# This is the collision
pre[created_contract_address] = Account(
storage={0x01: 0x01},
nonce=collision_nonce,
balance=collision_balance,
code=collision_code,
)
state_test(
pre=pre,
post={
created_contract_address: Account(
storage={0x01: 0x01},
),
},
tx=tx,
)
@pytest.mark.parametrize("opcode", [Op.CREATE, Op.CREATE2])
def test_init_collision_create_opcode(
state_test: StateTestFiller,
pre: Alloc,
opcode: Op,
collision_nonce: int,
collision_balance: int,
collision_code: bytes,
initcode: Bytecode,
) -> None:
"""
Test that a contract creation opcode exceptionally aborts when the target
address has a non-empty storage, balance, nonce, or code.
"""
assert len(initcode) <= 32
contract_creator_code = (
Op.MSTORE(0, Op.PUSH32(bytes(initcode).ljust(32, b"\0")))
+ Op.SSTORE(0x01, opcode(value=0, offset=0, size=len(initcode)))
+ Op.STOP
)
contract_creator_address = pre.deploy_contract(
contract_creator_code,
storage={0x01: 0x01},
)
created_contract_address = compute_create_address(
address=contract_creator_address,
nonce=1,
salt=0,
initcode=initcode,
opcode=opcode,
)
tx = Transaction(
sender=pre.fund_eoa(),
to=contract_creator_address,
data=initcode,
gas_limit=2_000_000,
)
pre[created_contract_address] = Account(
storage={0x01: 0x01},
nonce=collision_nonce,
balance=collision_balance,
code=collision_code,
)
state_test(
pre=pre,
post={
created_contract_address: Account(
storage={0x01: 0x01},
),
contract_creator_address: Account(storage={0x01: 0x00}),
},
tx=tx,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/homestead/__init__.py | tests/homestead/__init__.py | """Test cases for EVM functionality introduced in Homestead."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/homestead/identity_precompile/test_identity.py | tests/homestead/identity_precompile/test_identity.py | """abstract: EIP-2: Homestead Identity Precompile Test Cases."""
import pytest
from ethereum_test_tools import (
Account,
Alloc,
Environment,
StateTestFiller,
Transaction,
keccak256,
)
from ethereum_test_tools import Opcodes as Op
@pytest.mark.with_all_call_opcodes()
@pytest.mark.valid_from("Byzantium")
def test_identity_return_overwrite(
state_test: StateTestFiller,
pre: Alloc,
call_opcode: Op,
) -> None:
"""
Test the return data of the identity precompile overwriting its input.
"""
code = (
sum(Op.MSTORE8(offset=i, value=(i + 1)) for i in range(4)) # memory = [1, 2, 3, 4]
+ call_opcode(
address=4,
args_offset=0,
args_size=4, # args = [1, 2, 3, 4]
ret_offset=1,
ret_size=4,
) # memory = [1, 1, 2, 3, 4]
+ Op.RETURNDATACOPY(
dest_offset=0, offset=0, size=Op.RETURNDATASIZE()
) # memory correct = [1, 2, 3, 4, 4], corrupt = [1, 1, 2, 3, 4]
+ Op.SSTORE(1, Op.SHA3(offset=0, size=Op.MSIZE))
)
contract_address = pre.deploy_contract(
code=code,
)
tx = Transaction(
sender=pre.fund_eoa(),
to=contract_address,
gas_limit=100_000,
)
post = {
contract_address: Account(
storage={
1: keccak256(bytes([1, 2, 3, 4, 4]).ljust(32, b"\0")),
},
),
}
state_test(pre=pre, post=post, tx=tx)
@pytest.mark.with_all_call_opcodes()
@pytest.mark.valid_from("Byzantium")
def test_identity_return_buffer_modify(
state_test: StateTestFiller,
pre: Alloc,
call_opcode: Op,
) -> None:
"""
Test the modification of the input range to attempt to modify the return
buffer.
"""
env = Environment()
code = (
sum(Op.MSTORE8(offset=i, value=(i + 1)) for i in range(4)) # memory = [1, 2, 3, 4]
+ call_opcode(
address=4,
args_offset=0,
args_size=4, # args = [1, 2, 3, 4]
) # memory = [1, 2, 3, 4]
+ Op.MSTORE8(offset=0, value=5) # memory = [5, 2, 3, 4]
+ Op.MSTORE8(offset=4, value=5) # memory = [5, 2, 3, 4, 5]
+ Op.RETURNDATACOPY(
dest_offset=0, offset=0, size=Op.RETURNDATASIZE()
) # memory correct = [1, 2, 3, 4, 5], corrupt = [5, 2, 3, 4, 5]
+ Op.SSTORE(1, Op.SHA3(offset=0, size=Op.MSIZE))
)
contract_address = pre.deploy_contract(
code=code,
)
tx = Transaction(
sender=pre.fund_eoa(),
to=contract_address,
gas_limit=100_000,
)
post = {
contract_address: Account(
storage={
1: keccak256(bytes([1, 2, 3, 4, 5]).ljust(32, b"\0")),
},
),
}
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/homestead/identity_precompile/__init__.py | tests/homestead/identity_precompile/__init__.py | """abstract: EIP-2: Homestead Precompile Identity Test Cases."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/homestead/coverage/test_coverage.py | tests/homestead/coverage/test_coverage.py | """
Tests that address coverage gaps that result from updating `ethereum/tests`
into EEST tests.
"""
import pytest
from ethereum_test_forks import Cancun, Fork
from ethereum_test_tools import Alloc, Environment, StateTestFiller, Transaction
from ethereum_test_vm import Opcodes as Op
REFERENCE_SPEC_GIT_PATH = "N/A"
REFERENCE_SPEC_VERSION = "N/A"
@pytest.mark.valid_from("Homestead")
def test_coverage(
state_test: StateTestFiller,
pre: Alloc,
fork: Fork,
) -> None:
"""
Cover gaps that result from transforming Yul code into our Python opcode
wrapper bytecode.
E.g. Yul tends to optimize stack items by using `SWAP1` and `DUP1` opcodes,
which are not regularly used in python code.
Modify this test to cover more Yul code if required in the future.
"""
missed_coverage = pre.deploy_contract(
balance=0,
code=Op.SHL(0x0000000000000000000000000000000000000000000000000000000000000001, 0x00)
+ Op.SHR(0x0000000000000000000000000000000000000000000000000000000000000001, 0x00)
+ Op.PUSH1(0x0A)
+ Op.PUSH1(0x0B)
+ Op.PUSH1(0x0C)
+ Op.PUSH1(0x0D)
+ Op.PUSH1(0x0E)
+ Op.SWAP1()
+ Op.DUP1()
+ Op.DUP2()
+ Op.PUSH0()
+ Op.PUSH2(0x0102)
+ Op.PUSH3(0x010203)
+ Op.PUSH4(0x01020304)
+ Op.PUSH32(0x0101010101010101010101010101010101010101010101010101010101010101)
+ Op.MSTORE8(0x00, 0x01)
+ Op.ADD(0x02, 0x03)
+ Op.POP(0x01)
# lllc tests insert codecopy when using lll(seq())
+ Op.CODECOPY(0, 16, 4),
storage={},
)
address_to = pre.deploy_contract(
balance=1_000_000_000_000_000_000,
code=Op.MSTORE(0, Op.CALL(Op.GAS, missed_coverage, 0, 0, 0, 0, 0)) + Op.RETURN(0, 32),
)
if fork >= Cancun:
tx = Transaction(
sender=pre.fund_eoa(7_000_000_000_000_000_000),
gas_limit=100000,
to=address_to,
data=b"",
value=0,
protected=False,
access_list=[],
max_fee_per_gas=10,
max_priority_fee_per_gas=5,
)
else:
tx = Transaction(
sender=pre.fund_eoa(7_000_000_000_000_000_000),
gas_limit=100000,
to=address_to,
data=b"",
value=0,
protected=False,
)
state_test(env=Environment(), pre=pre, post={}, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/homestead/coverage/__init__.py | tests/homestead/coverage/__init__.py | """Tests that fill coverage gaps when porting over from `ethereum/tests`."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/homestead/yul/__init__.py | tests/homestead/yul/__init__.py | """Tests using Yul source for contracts."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/__init__.py | tests/frontier/__init__.py | """Test cases for EVM functionality introduced in Frontier."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/identity_precompile/test_identity.py | tests/frontier/identity_precompile/test_identity.py | """Test identity precompile output size."""
from typing import Tuple
import pytest
from ethereum_test_base_types.composite_types import Storage
from ethereum_test_tools import (
Account,
Alloc,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
from .common import CallArgs, generate_identity_call_bytecode
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentitiy_0Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentitiy_1Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentity_1_nonzeroValueFiller.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentity_2Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentity_3Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentity_4Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentity_4_gas17Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentity_4_gas18Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentitiy_0Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentitiy_1Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentity_1_nonzeroValueFiller.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentity_2Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentity_3Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentity_4Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentity_4_gas17Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentity_4_gas18Filler.json",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1344"],
coverage_missed_reason="MPT related coverage lost, not relevant to this test",
)
@pytest.mark.valid_from("Byzantium")
@pytest.mark.parametrize("call_type", [Op.CALL, Op.CALLCODE])
@pytest.mark.parametrize(
[
"call_args",
"memory_values",
"contract_balance",
"call_succeeds",
],
[
pytest.param(CallArgs(gas=0xFF), (0x1,), 0x0, True, id="identity_0"),
pytest.param(
CallArgs(args_size=0x0),
(0x0,),
0x0,
True,
id="identity_1",
),
pytest.param(
CallArgs(gas=0x30D40, value=0x1, args_size=0x0),
(0x1,),
0x1,
True,
id="identity_1_nonzerovalue",
),
pytest.param(
CallArgs(gas=0x30D40, value=0x1, args_size=0x0),
None,
0x0,
False,
id="identity_1_nonzerovalue_insufficient_balance",
),
pytest.param(
CallArgs(args_size=0x25),
(0xF34578907F,),
0x0,
True,
id="identity_2",
),
pytest.param(
CallArgs(args_size=0x25),
(0xF34578907F,),
0x0,
True,
id="identity_3",
),
pytest.param(
CallArgs(gas=0x64),
(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,),
0x0,
True,
id="identity_4",
),
pytest.param(
CallArgs(gas=0x11),
(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,),
0x0,
False,
id="identity_4_insufficient_gas",
),
pytest.param(
CallArgs(gas=0x12),
(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,),
0x0,
True,
id="identity_4_exact_gas",
),
],
)
def test_call_identity_precompile(
state_test: StateTestFiller,
pre: Alloc,
call_type: Op,
call_args: CallArgs,
memory_values: Tuple[int, ...],
call_succeeds: bool,
tx_gas_limit: int,
contract_balance: int,
) -> None:
"""
Test identity precompile RETURNDATA is sized correctly based on the input
size.
"""
env = Environment()
storage = Storage()
contract_bytecode = generate_identity_call_bytecode(
storage,
call_type,
memory_values,
call_args,
call_succeeds,
)
account = pre.deploy_contract(
contract_bytecode,
storage=storage.canary(),
balance=contract_balance,
)
tx = Transaction(
to=account,
sender=pre.fund_eoa(),
gas_limit=tx_gas_limit,
)
post = {account: Account(storage=storage)}
state_test(env=env, pre=pre, post=post, tx=tx)
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CALLCODEIdentity_5Filler.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts2/CallIdentity_5Filler.json",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1344"],
coverage_missed_reason="MPT related coverage lost, not relevant to this test",
)
@pytest.mark.valid_from("Byzantium")
@pytest.mark.parametrize("call_type", [Op.CALL, Op.CALLCODE])
@pytest.mark.parametrize(
[
"call_args",
"memory_values",
"call_succeeds",
],
[
pytest.param(
CallArgs(gas=0x258, args_size=0xF4240),
(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,),
False,
id="identity_5",
),
pytest.param(
CallArgs(gas=0x258, ret_size=0x40),
(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF,
0x1234,
),
True,
id="identity_6",
),
],
)
@pytest.mark.parametrize("tx_gas_limit", [10_000_000])
def test_call_identity_precompile_large_params(
state_test: StateTestFiller,
pre: Alloc,
call_type: Op,
call_args: CallArgs,
memory_values: Tuple[int, ...],
call_succeeds: bool,
tx_gas_limit: int,
) -> None:
"""Test identity precompile when out of gas occurs."""
env = Environment()
storage = Storage()
contract_bytecode = generate_identity_call_bytecode(
storage,
call_type,
memory_values,
call_args,
call_succeeds,
)
account = pre.deploy_contract(
contract_bytecode,
storage=storage.canary(),
)
tx = Transaction(
to=account,
sender=pre.fund_eoa(),
gas_limit=tx_gas_limit,
)
post = {account: Account(storage=storage)}
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/identity_precompile/common.py | tests/frontier/identity_precompile/common.py | """Common utilities for the Identity Precompile tests."""
from dataclasses import dataclass
from typing import Tuple
from pydantic import BaseModel
from ethereum_test_base_types.composite_types import Storage
from ethereum_test_tools import (
Bytecode,
)
from ethereum_test_tools import Opcodes as Op
@dataclass(frozen=True)
class Constants:
"""Constants for the Identity Precompile."""
IDENTITY_PRECOMPILE_ADDRESS = 0x04
class CallArgs(BaseModel):
"""Defines inputs to CALL for the Identity precompile."""
address: int = Constants.IDENTITY_PRECOMPILE_ADDRESS
gas: int = 0x1F4
value: int = 0x0
args_offset: int = 0x0
args_size: int = 0x20
ret_offset: int = 0x0
ret_size: int = 0x20
def generate_identity_call_bytecode(
storage: Storage,
call_type: Op,
memory_values: Tuple[int, ...],
call_args: CallArgs,
call_succeeds: bool,
) -> Bytecode:
"""
Generate bytecode for calling the identity precompile with given memory
values.
Args:
storage (Storage): The storage object to use for storing values.
call_type (Op): The type of call opcode (CALL or CALLCODE).
memory_values(Tuple[int, ...]): Values to store in memory before
the call.
call_args(CallArgs): Arguments for the CALL opcode.
call_succeeds (bool): Whether the call should succeed or not.
Returns: Bytecode: The generated bytecode for the identity precompile call.
"""
code = Bytecode()
# Store provided values in memory
mstore_count = len(memory_values) if memory_values else 0
mstore_offset = 0
mstore_value = 0
if mstore_count:
for i, value in enumerate(memory_values):
mstore_value = value
code += Op.MSTORE(mstore_offset, mstore_value)
if mstore_count > i + 1:
mstore_offset += 0x20
# Call the identity precompile, then check that the last value in memory
# has not changed
code += (
Op.SSTORE(
storage.store_next(call_succeeds),
call_type(**call_args.model_dump()),
)
+ Op.SSTORE(storage.store_next(mstore_value), Op.MLOAD(mstore_offset))
+ Op.STOP
)
return code
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/identity_precompile/conftest.py | tests/frontier/identity_precompile/conftest.py | """Pytest (plugin) definitions local to Identity precompile tests."""
import pytest
@pytest.fixture
def tx_gas_limit() -> int:
"""Return the gas limit for transactions."""
return 365_224
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/identity_precompile/test_identity_returndatasize.py | tests/frontier/identity_precompile/test_identity_returndatasize.py | """Test identity precompile output size."""
import pytest
from ethereum_test_base_types.composite_types import Storage
from ethereum_test_tools import (
Account,
Alloc,
Environment,
StateTestFiller,
Transaction,
)
from ethereum_test_tools import Opcodes as Op
from .common import Constants
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts/identity_to_biggerFiller.json",
"https://github.com/ethereum/tests/blob/v17.1/src/GeneralStateTestsFiller/stPreCompiledContracts/identity_to_smallerFiller.json",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/1344"],
)
@pytest.mark.valid_from("Byzantium")
@pytest.mark.parametrize(
["args_size", "output_size", "expected_returndatasize"],
[
pytest.param(16, 32, 16, id="output_size_greater_than_input"),
pytest.param(32, 16, 32, id="output_size_less_than_input"),
],
)
def test_identity_precompile_returndata(
state_test: StateTestFiller,
pre: Alloc,
args_size: int,
output_size: int,
expected_returndatasize: int,
) -> None:
"""
Test identity precompile RETURNDATA is sized correctly based on the input
size.
"""
env = Environment()
storage = Storage()
account = pre.deploy_contract(
Op.MSTORE(0, 0)
+ Op.GAS
+ Op.MSTORE(0, 0x112233445566778899AABBCCDDEEFF00112233445566778899AABBCCDDEEFF00)
+ Op.POP(
Op.CALL(
address=Constants.IDENTITY_PRECOMPILE_ADDRESS,
args_offset=0,
args_size=args_size,
ret_offset=0x10,
ret_size=output_size,
)
)
+ Op.SSTORE(storage.store_next(expected_returndatasize), Op.RETURNDATASIZE)
+ Op.STOP,
storage=storage.canary(),
)
tx = Transaction(
to=account,
sender=pre.fund_eoa(),
gas_limit=200_000,
protected=True,
)
post = {account: Account(storage=storage)}
state_test(env=env, pre=pre, post=post, tx=tx)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/identity_precompile/__init__.py | tests/frontier/identity_precompile/__init__.py | """Test for precompiles that apply for all forks starting from Frontier."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/common.py | tests/frontier/scenarios/common.py | """Define Scenario structures and helpers for test_scenarios test."""
from abc import abstractmethod
from dataclasses import dataclass
from enum import Enum
from ethereum_test_forks import Fork, Frontier
from ethereum_test_tools import Address, Alloc, Bytecode, Conditional, MemoryVariable
from ethereum_test_vm import Opcodes as Op
class ScenarioExpectOpcode(Enum):
"""Opcodes that are replaced to real values computed by the scenario."""
TX_ORIGIN = 1
CODE_ADDRESS = 2
CODE_CALLER = 3
SELFBALANCE = 4
CALL_VALUE = 6
CALL_DATALOAD_0 = 7
CALL_DATASIZE = 8
GASPRICE = 9
BLOCKHASH_0 = 10
COINBASE = 11
TIMESTAMP = 12
NUMBER = 13
GASLIMIT = 14
@dataclass
class ScenarioEnvironment:
"""
Scenario evm environment Each scenario must define an environment on which
program is executed This is so post state verification could check results
of evm opcodes.
"""
code_address: Address # Op.ADDRESS, address scope for program
code_caller: Address # Op.CALLER, caller of the program
selfbalance: int # Op.SELFBALANCE, balance of the environment of the program
call_value: int # Op.CALLVALUE of call that is done to the program
call_dataload_0: int # Op.CALLDATALOAD(0) expected result
call_datasize: int # Op.CALLDATASIZE expected result
has_static: bool = False # Weather scenario execution context is static
@dataclass
class ExecutionEnvironment:
"""Scenario execution environment which is determined by test."""
fork: Fork
gasprice: int
origin: Address
coinbase: Address
timestamp: int
number: int
gaslimit: int
@dataclass
class ProgramResult:
"""
Describe expected result of a program.
Attributes:
result (int | ScenarioExpectOpcode): The result of the program
from_fork (Fork): The result is only valid from this fork
(default: Frontier)
static_support (bool): Can be verified in static context (default: True)
"""
result: int | ScenarioExpectOpcode
"""The result is only valid from this fork"""
from_fork: Fork = Frontier
static_support: bool = True
def translate_result(
self, env: ScenarioEnvironment, exec_env: ExecutionEnvironment
) -> int | Address:
"""
Translate expected program result code into concrete value, given the
scenario evm environment and test execution environment.
"""
if exec_env.fork < self.from_fork:
return 0
if not self.static_support and env.has_static:
return 0
if isinstance(self.result, ScenarioExpectOpcode):
if self.result == ScenarioExpectOpcode.TX_ORIGIN:
return exec_env.origin
if self.result == ScenarioExpectOpcode.CODE_ADDRESS:
return env.code_address
if self.result == ScenarioExpectOpcode.CODE_CALLER:
return env.code_caller
if self.result == ScenarioExpectOpcode.CALL_VALUE:
return int(env.call_value)
if self.result == ScenarioExpectOpcode.CALL_DATALOAD_0:
return env.call_dataload_0
if self.result == ScenarioExpectOpcode.CALL_DATASIZE:
return env.call_datasize
if self.result == ScenarioExpectOpcode.GASPRICE:
return exec_env.gasprice
if self.result == ScenarioExpectOpcode.COINBASE:
return exec_env.coinbase
if self.result == ScenarioExpectOpcode.TIMESTAMP:
return exec_env.timestamp
if self.result == ScenarioExpectOpcode.NUMBER:
return exec_env.number
if self.result == ScenarioExpectOpcode.GASLIMIT:
return exec_env.gaslimit
if self.result == ScenarioExpectOpcode.SELFBALANCE:
return int(env.selfbalance)
else:
return self.result
return 0
class ScenarioTestProgram:
"""Base class for deploying test code that will be used in scenarios."""
@abstractmethod
def make_test_code(self, pre: Alloc, fork: Fork) -> Bytecode:
"""Test code to be deployed."""
pass
@property
@abstractmethod
def id(self) -> str:
"""Test program pytest id."""
pass
@abstractmethod
def result(self) -> ProgramResult:
"""Test program result."""
pass
def __str__(self) -> str:
"""Return the ID (for use as pytest test name)."""
return self.id
@dataclass
class ScenarioDebug:
"""Debug selector for the development."""
program_id: str
scenario_name: str
@dataclass
class ScenarioGeneratorInput:
"""
Parameters for the scenario generator function.
Attributes:
fork (Fork): Fork for which we ask to generate scenarios
pre(Alloc): Access to the state to be able to deploy contracts into pre
operation (Bytecode): Evm bytecode program that will be tested
external_address (Address): Static external address for ext opcodes
"""
fork: Fork
pre: Alloc
operation_code: Bytecode
@dataclass
class Scenario:
"""
Describe test scenario that will be run in test for each program.
Attributes:
category (str): Scenario category name
name (str): Scenario name for the test vector
code (Address): Address that is an entry point for scenario code
env (ScenarioEnvironment): Evm values for ScenarioExpectAddress map
reverting (bool): If scenario reverts program execution,
making result 0 (default: False)
"""
category: str
name: str
code: Address
env: ScenarioEnvironment
halts: bool = False
def make_gas_hash_contract(pre: Alloc) -> Address:
"""
Contract that spends unique amount of gas based on input.
Used for the values we can't predict, can be gas consuming on high values.
So that if we can't check exact value in expect section, we at least
could spend unique gas amount.
"""
# EVM memory variables
byte_offset = MemoryVariable(0)
current_byte = MemoryVariable(32)
# Code for memory initialization
initialize_code = byte_offset.set(0)
calldata_copy = Op.JUMPDEST + Op.CALLDATACOPY(
dest_offset=current_byte.offset + 32 - 1,
offset=byte_offset,
size=1,
)
# Code offsets
offset_calldata_copy = len(initialize_code)
offset_conditional = offset_calldata_copy + len(calldata_copy)
# Deploy contract
gas_hash_address = pre.deploy_contract(
code=initialize_code
+ calldata_copy # offset_calldata_copy
+ Op.JUMPDEST # offset_conditional
+ Conditional(
condition=Op.ISZERO(current_byte),
if_true=(
# Increase the calldata byte offset, and if it's greater than
# the calldata size, return, otherwise jump to the calldata
# copy code and read the next byte.
byte_offset.add(1)
+ Conditional(
condition=Op.GT(byte_offset, Op.CALLDATASIZE()),
if_true=Op.RETURN(offset=0, size=0),
if_false=Op.JUMP(offset_calldata_copy),
)
),
if_false=(current_byte.sub(1) + Op.JUMP(offset_conditional)),
)
)
return gas_hash_address
def make_invalid_opcode_contract(pre: Alloc, fork: Fork) -> Address:
"""
Deploy a contract that will execute any asked byte as an opcode from
calldataload Deploy 20 empty stack elements. Jump to opcode instruction. if
worked, return 0.
"""
invalid_opcode_caller = pre.deploy_contract(
code=Op.PUSH1(0) * 20
+ Op.JUMP(Op.ADD(Op.MUL(7, Op.CALLDATALOAD(0)), 20 * 2 + 10))
+ sum(
[
Op.JUMPDEST
+ Bytecode(bytes([opcode]), popped_stack_items=0, pushed_stack_items=0)
+ Op.RETURN(0, 0)
for opcode in range(0x00, 0xFF)
],
)
)
invalid_opcodes = []
valid_opcode_values = [opcode.int() for opcode in fork.valid_opcodes()]
for op in range(0x00, 0xFF):
if op not in valid_opcode_values:
invalid_opcodes.append(op)
results_sum = MemoryVariable(0)
current_opcode = MemoryVariable(32)
code = Bytecode(
sum(
current_opcode.set(opcode)
+ results_sum.add(
Op.CALL(
gas=50000,
address=invalid_opcode_caller,
args_offset=current_opcode.offset,
args_size=32,
),
)
for opcode in invalid_opcodes
)
# If any of invalid instructions works, mstore[0] will be > 1
+ results_sum.add(1)
+ results_sum.return_value()
)
return pre.deploy_contract(code=code)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/test_scenarios.py | tests/frontier/scenarios/test_scenarios.py | """
Call every possible opcode and test that the subcall is successful if the
opcode is supported by the fork and fails otherwise.
"""
from typing import List
import pytest
from ethereum_test_forks import Fork
from ethereum_test_tools import (
Account,
Address,
Alloc,
Block,
BlockchainTestFiller,
Environment,
Storage,
Transaction,
)
from ethereum_test_vm import Opcodes as Op
from .common import (
ExecutionEnvironment,
Scenario,
ScenarioDebug,
ScenarioGeneratorInput,
ScenarioTestProgram,
)
from .programs.all_frontier_opcodes import ProgramAllFrontierOpcodes
from .programs.context_calls import (
ProgramAddress,
ProgramBalance,
ProgramBasefee,
ProgramBlobBaseFee,
ProgramBlobhash,
ProgramBlockhash,
ProgramCallDataCopy,
ProgramCallDataLoad,
ProgramCallDataSize,
ProgramCaller,
ProgramCallValue,
ProgramChainid,
ProgramCodeCopyCodeSize,
ProgramCoinbase,
ProgramDifficultyRandao,
ProgramExtCodeCopyExtCodeSize,
ProgramExtCodehash,
ProgramGasLimit,
ProgramGasPrice,
ProgramMcopy,
ProgramNumber,
ProgramOrigin,
ProgramPush0,
ProgramReturnDataCopy,
ProgramReturnDataSize,
ProgramSelfbalance,
ProgramTimestamp,
ProgramTload,
)
from .programs.invalid_opcodes import ProgramInvalidOpcode
from .programs.static_violation import (
ProgramLogs,
ProgramSstoreSload,
ProgramSuicide,
ProgramTstoreTload,
)
from .scenarios.call_combinations import ScenariosCallCombinations
from .scenarios.create_combinations import scenarios_create_combinations
from .scenarios.double_call_combinations import scenarios_double_call_combinations
from .scenarios.revert_combinations import scenarios_revert_combinations
REFERENCE_SPEC_GIT_PATH = "N/A"
REFERENCE_SPEC_VERSION = "N/A"
@pytest.fixture
def scenarios(fork: Fork, pre: Alloc, test_program: ScenarioTestProgram) -> List[Scenario]:
"""
Define fixture vectors of all possible scenarios, given the current pre
state input.
"""
scenarios_list: List[Scenario] = []
scenario_input = ScenarioGeneratorInput(
fork=fork,
pre=pre,
operation_code=test_program.make_test_code(pre, fork),
)
call_combinations = ScenariosCallCombinations(scenario_input).generate()
for combination in call_combinations:
scenarios_list.append(combination)
call_combinations = scenarios_create_combinations(scenario_input)
for combination in call_combinations:
scenarios_list.append(combination)
revert_combinations = scenarios_revert_combinations(scenario_input)
for combination in revert_combinations:
scenarios_list.append(combination)
double_call_combinations = scenarios_double_call_combinations(scenario_input)
for combination in double_call_combinations:
scenarios_list.append(combination)
return scenarios_list
@pytest.mark.ported_from(
[
"https://github.com/ethereum/tests/blob/v13.3/src/Templates/DiffPlaces/templateGen.js",
"https://github.com/ethereum/tests/blob/v13.3/src/Templates/DiffPlaces/createDiffPlacesTests.sh",
"https://github.com/ethereum/tests/blob/v13.3/src/Templates/DiffPlaces/createBadOpcodeTest.sh",
"https://github.com/ethereum/tests/blob/v13.3/src/Templates/DiffPlaces/createAllBadOpcodeTests.sh",
"https://github.com/ethereum/tests/blob/v13.3/src/GeneralStateTestsFiller/stSelfBalance/diffPlacesFiller.yml",
],
pr=["https://github.com/ethereum/execution-spec-tests/pull/808"],
coverage_missed_reason=("Original test pre-sets storage of some of the deployed accounts."),
)
@pytest.mark.valid_from("Frontier")
@pytest.mark.parametrize(
# select program to debug ("program_id","scenario_name")
# program=""
# select all programs scenario_name=""
# select all scenarios
#
# Example:
# [ScenarioDebug(program_id=ProgramSstoreSload().id,
# scenario_name="scenario_CALL_CALL")]
"debug",
[
ScenarioDebug(
program_id="",
scenario_name="",
)
],
ids=["debug"],
)
@pytest.mark.parametrize(
"test_program",
[
ProgramSstoreSload(),
ProgramTstoreTload(),
ProgramLogs(),
ProgramSuicide(),
ProgramInvalidOpcode(),
ProgramAddress(),
ProgramBalance(),
ProgramOrigin(),
ProgramCaller(),
ProgramCallValue(),
ProgramCallDataLoad(),
ProgramCallDataSize(),
ProgramCallDataCopy(),
ProgramCodeCopyCodeSize(),
ProgramGasPrice(),
ProgramExtCodeCopyExtCodeSize(),
ProgramReturnDataSize(),
ProgramReturnDataCopy(),
ProgramExtCodehash(),
ProgramBlockhash(),
ProgramCoinbase(),
ProgramTimestamp(),
ProgramNumber(),
ProgramDifficultyRandao(),
ProgramGasLimit(),
ProgramChainid(),
ProgramSelfbalance(),
ProgramBasefee(),
ProgramBlobhash(),
ProgramBlobBaseFee(),
ProgramTload(),
ProgramMcopy(),
ProgramPush0(),
ProgramAllFrontierOpcodes(),
],
)
@pytest.mark.slow()
def test_scenarios(
blockchain_test: BlockchainTestFiller,
fork: Fork,
pre: Alloc,
debug: ScenarioDebug,
test_program: ScenarioTestProgram,
scenarios: List[Scenario],
) -> None:
"""
Test given operation in different scenarios Verify that it's return value
equal to expected result on every scenario, that is valid for the given
fork.
Note: Don't use pytest parametrize for scenario production, because
scenarios will be complex Generate one test file for [each operation] *
[each scenario] to save space As well as operations will be complex too
"""
tx_env = Environment()
tx_origin: Address = pre.fund_eoa()
tests: int = 0
blocks: List[Block] = []
post: dict = {}
for scenario in scenarios:
if debug.scenario_name and scenario.name != debug.scenario_name:
continue
if debug.program_id:
if test_program.id != debug.program_id:
continue
tests = tests + 1
post_storage = Storage()
result_slot = post_storage.store_next(1, hint=f"runner result {scenario.name}")
tx_max_gas = 7_000_000 if test_program.id == ProgramInvalidOpcode().id else 1_000_000
if scenario.category == "double_call_combinations":
tx_max_gas *= 2
tx_gasprice: int = 10
exec_env = ExecutionEnvironment(
fork=fork,
origin=tx_origin,
gasprice=tx_gasprice,
timestamp=tx_env.timestamp, # we can't know timestamp before head,
# use gas hash
number=len(blocks) + 1,
gaslimit=tx_env.gas_limit,
coinbase=tx_env.fee_recipient,
)
def make_result(scenario: Scenario, exec_env: ExecutionEnvironment, post: Storage) -> int:
"""Make Scenario post result."""
if scenario.halts:
return post.store_next(0, hint=scenario.name)
else:
return post.store_next(
test_program.result().translate_result(scenario.env, exec_env),
hint=scenario.name,
)
runner_contract = pre.deploy_contract(
code=Op.MSTORE(0, 0)
+ Op.CALL(tx_max_gas, scenario.code, 0, 0, 0, 0, 32)
+ Op.SSTORE(make_result(scenario, exec_env, post_storage), Op.MLOAD(0))
+ Op.SSTORE(result_slot, 1),
storage={
result_slot: 0xFFFF,
},
)
tx = Transaction(
sender=tx_origin,
gas_limit=tx_max_gas + 100_000,
gas_price=tx_gasprice,
to=runner_contract,
data=bytes.fromhex("11223344"),
value=0,
protected=False,
)
post[runner_contract] = Account(storage=post_storage)
blocks.append(Block(txs=[tx], post=post))
if tests > 0:
blockchain_test(
pre=pre,
blocks=blocks,
post=post,
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/__init__.py | tests/frontier/scenarios/__init__.py | """Scenarios common import."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/scenarios/create_combinations.py | tests/frontier/scenarios/scenarios/create_combinations.py | """Define Scenario that will put a given program in create contexts."""
from dataclasses import dataclass
from typing import List
from ethereum_test_tools import Alloc, Bytecode
from ethereum_test_types import compute_create_address
from ethereum_test_vm import EVMCodeType, Opcode
from ethereum_test_vm import Macros as Om
from ethereum_test_vm import Opcodes as Op
from ..common import Scenario, ScenarioEnvironment, ScenarioGeneratorInput
@dataclass
class AddressBalance:
"""Definition of values we use to put in contract balances and call."""
root_call_value = 1
create_value = 3
call_value = 5
root_contract_balance = 100
scenario_contract_balance = 200
def scenarios_create_combinations(scenario_input: ScenarioGeneratorInput) -> List[Scenario]:
"""Generate Scenarios for create combinations."""
def _compute_selfbalance() -> int:
"""
Compute selfbalance opcode for root -> call -> scenario ->
create | [call*] -> program.
"""
if call in [Op.DELEGATECALL, Op.CALLCODE]:
return (
balance.scenario_contract_balance + balance.root_call_value - balance.create_value
)
if call == Op.CALL:
return balance.create_value + balance.call_value
return balance.create_value
scenarios_list: List[Scenario] = []
keep_gas = 100000
create_types: List[Opcode] = [
create_code
for create_code, evm_type in scenario_input.fork.create_opcodes()
if evm_type == EVMCodeType.LEGACY
]
env: ScenarioEnvironment
balance: AddressBalance = AddressBalance()
# run code in create constructor
for create in create_types:
salt = [0] if create == Op.CREATE2 else []
operation_contract = scenario_input.pre.deploy_contract(code=scenario_input.operation_code)
# the code result in init code will be actually code of a deployed
# contract
scenario_contract = scenario_input.pre.deploy_contract(
balance=3,
code=Op.EXTCODECOPY(operation_contract, 0, 0, Op.EXTCODESIZE(operation_contract))
+ Op.MSTORE(0, create(3, 0, Op.EXTCODESIZE(operation_contract), *salt))
+ Op.EXTCODECOPY(Op.MLOAD(0), 0, 0, 32)
+ Op.RETURN(0, 32),
)
created_address = compute_create_address(
address=scenario_contract,
nonce=1,
initcode=scenario_input.operation_code,
opcode=Op.CREATE if create == Op.CREATE else Op.CREATE2,
)
env = ScenarioEnvironment(
# Define address on which behalf program is executed
code_address=created_address,
code_caller=scenario_contract,
selfbalance=3,
call_value=3,
call_dataload_0=0,
call_datasize=0,
)
scenarios_list.append(
Scenario(
category="create_constructor_combinations",
name=f"scenario_{create}_constructor",
code=scenario_contract,
env=env,
)
)
# create a contract with test code and call it
deploy_code = Bytecode(
Op.EXTCODECOPY(operation_contract, 0, 0, Op.EXTCODESIZE(operation_contract))
+ Op.RETURN(0, Op.EXTCODESIZE(operation_contract))
)
deploy_code_size: int = int(len(deploy_code.hex()) / 2)
call_types: List[Opcode] = [
callcode
for callcode, evm_type in scenario_input.fork.call_opcodes()
if evm_type == EVMCodeType.LEGACY
]
pre: Alloc = scenario_input.pre
for create in create_types:
for call in call_types:
salt = [0] if create == Op.CREATE2 else []
scenario_contract = pre.deploy_contract(
balance=balance.scenario_contract_balance,
code=Om.MSTORE(deploy_code, 0)
+ Op.MSTORE(32, create(balance.create_value, 0, deploy_code_size, *salt))
+ Op.MSTORE(0, 0)
+ Op.MSTORE(64, 1122334455)
+ (
call(
gas=Op.SUB(Op.GAS, keep_gas),
address=Op.MLOAD(32),
args_offset=64,
args_size=40,
ret_offset=0,
ret_size=32,
value=balance.call_value,
)
if call not in [Op.DELEGATECALL, Op.STATICCALL]
else call(
gas=Op.SUB(Op.GAS, keep_gas),
address=Op.MLOAD(32),
args_offset=64,
args_size=40,
ret_offset=0,
ret_size=32,
)
)
+ Op.RETURN(0, 32),
)
root_contract = pre.deploy_contract(
balance=balance.root_contract_balance,
code=Op.CALL(
gas=Op.SUB(Op.GAS, keep_gas),
address=scenario_contract,
ret_size=32,
value=balance.root_call_value,
)
+ Op.RETURN(0, 32),
)
created_address = compute_create_address(
address=scenario_contract,
nonce=1,
initcode=deploy_code,
opcode=Op.CREATE if create == Op.CREATE else Op.CREATE2,
)
env = ScenarioEnvironment(
# Define address on which behalf program is executed
code_address=(
scenario_contract
if call in [Op.CALLCODE, Op.DELEGATECALL]
else created_address
),
code_caller=root_contract if call == Op.DELEGATECALL else scenario_contract,
selfbalance=_compute_selfbalance(),
call_value=(
0
if call in [Op.STATICCALL]
else (
balance.root_call_value
if call in [Op.DELEGATECALL]
else balance.call_value
)
),
call_dataload_0=1122334455,
call_datasize=40,
has_static=True if call == Op.STATICCALL else False,
)
scenarios_list.append(
Scenario(
category="create_call_combinations",
name=f"scenario_{create}_then_{call}",
code=root_contract,
env=env,
)
)
return scenarios_list
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/scenarios/revert_combinations.py | tests/frontier/scenarios/scenarios/revert_combinations.py | """Define Scenario that will run a given program and then revert."""
from typing import List
from ethereum_test_vm import Macro, Opcode
from ethereum_test_vm import Macros as Om
from ethereum_test_vm import Opcodes as Op
from ..common import Scenario, ScenarioEnvironment, ScenarioGeneratorInput
def scenarios_revert_combinations(scenario_input: ScenarioGeneratorInput) -> List[Scenario]:
"""Generate Scenarios for revert combinations."""
scenarios_list: List[Scenario] = []
keep_gas = 100000
# TODO stack underflow cause
revert_types: List[Opcode | Macro] = [Op.STOP, Om.OOG]
if Op.REVERT in scenario_input.fork.valid_opcodes():
revert_types.append(Op.REVERT)
for revert in revert_types:
operation_contract = scenario_input.pre.deploy_contract(code=scenario_input.operation_code)
scenario_contract = scenario_input.pre.deploy_contract(
code=Op.CALLCODE(gas=Op.SUB(Op.GAS, keep_gas), address=operation_contract, ret_size=32)
+ revert(0, 32, unchecked=True)
+ Op.RETURN(0, 32)
)
env: ScenarioEnvironment = ScenarioEnvironment(
code_address=scenario_contract,
code_caller=scenario_contract,
selfbalance=0,
call_value=0,
call_dataload_0=0,
call_datasize=0,
)
scenarios_list.append(
Scenario(
category="revert_combinations",
name=f"scenario_revert_by_{revert}",
code=scenario_contract,
env=env,
halts=False if revert == Op.REVERT else True,
)
)
return scenarios_list
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/scenarios/double_call_combinations.py | tests/frontier/scenarios/scenarios/double_call_combinations.py | """Define Scenario that will run a given program and then revert."""
from typing import List
from ethereum_test_tools import Bytecode, Conditional
from ethereum_test_tools import Macros as Om
from ethereum_test_tools import Opcodes as Op
from ..common import Scenario, ScenarioEnvironment, ScenarioGeneratorInput
def scenarios_double_call_combinations(scenario_input: ScenarioGeneratorInput) -> List[Scenario]:
"""
Generate Scenarios for double call combinations.
First call the operation normally.
Then do a subcall that will [OOG,REVERT,RETURN].
Second call the operation normally.
Compare the results of first call with the second operation call.
"""
scenarios_list: List[Scenario] = []
keep_gas = 300000
revert_types: List[Bytecode] = [Op.STOP(), Om.OOG(), Op.RETURN(offset=0, size=32)]
if Op.REVERT in scenario_input.fork.valid_opcodes():
revert_types.append(Op.REVERT(offset=0, size=32))
for revert in revert_types:
operation_contract = scenario_input.pre.deploy_contract(code=scenario_input.operation_code)
subcall_contract = scenario_input.pre.deploy_contract(
code=Op.MSTORE(0, 0x1122334455667788991011121314151617181920212223242526272829303132)
+ revert
)
scenario_contract = scenario_input.pre.deploy_contract(
code=Op.CALL(gas=Op.SUB(Op.GAS, keep_gas), address=operation_contract, ret_size=32)
+ Op.MSTORE(100, Op.MLOAD(0))
+ Op.MSTORE(0, 0)
+ Op.CALL(gas=50_000, address=subcall_contract)
+ Op.CALL(gas=Op.SUB(Op.GAS, keep_gas), address=operation_contract, ret_size=32)
+ Op.MSTORE(200, Op.MLOAD(0))
+ Conditional(
condition=Op.EQ(Op.MLOAD(100), Op.MLOAD(200)),
if_true=Op.RETURN(100, 32),
if_false=Op.MSTORE(0, 0) + Op.RETURN(0, 32),
)
)
env: ScenarioEnvironment = ScenarioEnvironment(
code_address=operation_contract,
code_caller=scenario_contract,
selfbalance=0,
call_value=0,
call_dataload_0=0,
call_datasize=0,
)
scenarios_list.append(
Scenario(
category="double_call_combinations",
name=f"scenario_call_then_{revert}_in_subcall_then_call",
code=scenario_contract,
env=env,
halts=False,
)
)
return scenarios_list
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/scenarios/call_combinations.py | tests/frontier/scenarios/scenarios/call_combinations.py | """Define Scenario that will put a given program in all call contexts."""
from dataclasses import dataclass
from typing import List
from ethereum_test_tools import Address, Alloc
from ethereum_test_vm import EVMCodeType, Opcode
from ethereum_test_vm import Opcodes as Op
from ..common import Scenario, ScenarioEnvironment, ScenarioGeneratorInput
class ScenariosCallCombinations:
"""Class that would generate scenarios for all call combinations."""
@dataclass
class AddressBalance:
"""Definition of values we use to put in contract balances and call."""
root_call_value = 1
first_call_value = 3
second_call_value = 5
root_contract_balance = 105
scenario_contract_balance = 107
sub_contract_balance = 111
program_selfbalance = 113
"""The gas we keep before calling an address"""
keep_gas = 100000
"""Possible calls list to make as a first call"""
first_call_opcodes: List[Opcode] = []
"""Possible calls list to make as a second call"""
second_call_opcodes: List[Opcode] = []
"""Balance map that we put in different accounts"""
balance: AddressBalance
scenario_input: ScenarioGeneratorInput
env: ScenarioEnvironment
def __init__(self, scenario_input: ScenarioGeneratorInput):
"""Define possible call combinations given the fork."""
self.first_call_opcodes = [
callcode
for callcode, evm_type in scenario_input.fork.call_opcodes()
if evm_type == EVMCodeType.LEGACY
]
self.second_call_opcodes = self.first_call_opcodes[:]
self.second_call_opcodes.append(Op.NOOP)
self.scenario_input = scenario_input
self.balance = self.AddressBalance()
def generate(self) -> List[Scenario]:
"""
Generate Scenarios for call combinations We take code that we want to
test at scenario_input.operation_contract and put it in the context of
call combinations.
Example:
root_contract -> call -> scenario_contract -> first_call ->
sub_contract sub_contact -> second_call -> code
We assume that code always returns its result.
That we pass as return value in scenario_contract for the
post state verification.
"""
scenarios_list: List[Scenario] = []
for first_call in self.first_call_opcodes:
for second_call in self.second_call_opcodes:
if second_call == Op.NOOP:
scenarios_list.append(self._generate_one_call_scenario(first_call))
else:
scenarios_list.append(
self._generate_two_call_scenario(first_call, second_call)
)
return scenarios_list
def _generate_one_call_scenario(self, first_call: Opcode) -> Scenario:
"""
Generate scenario for only one call root_contract -(CALL)->
scenario_contract -(first_call)-> operation_contract.
"""
scenario_input: ScenarioGeneratorInput = self.scenario_input
pre: Alloc = scenario_input.pre
balance = self.balance
operation_contract = pre.deploy_contract(
code=scenario_input.operation_code, balance=balance.program_selfbalance
)
scenario_contract = pre.deploy_contract(
code=Op.MSTORE(32, 1122334455)
+ (
first_call(
gas=Op.SUB(Op.GAS, self.keep_gas),
address=operation_contract,
args_offset=32,
args_size=40,
ret_size=32,
value=balance.first_call_value,
)
if first_call not in [Op.DELEGATECALL, Op.STATICCALL]
else first_call(
gas=Op.SUB(Op.GAS, self.keep_gas),
address=operation_contract,
args_offset=32,
args_size=40,
ret_size=32,
)
)
+ Op.RETURN(0, 32),
balance=balance.scenario_contract_balance,
)
root_contract = pre.deploy_contract(
code=Op.CALL(
gas=Op.SUB(Op.GAS, self.keep_gas),
address=scenario_contract,
ret_size=32,
)
+ Op.RETURN(0, 32),
balance=balance.root_contract_balance,
)
return Scenario(
category="call_combinations",
name=f"scenario_{first_call}",
code=root_contract,
env=ScenarioEnvironment(
# Define address on which behalf program is executed
code_address=(
scenario_contract
if first_call == Op.CALLCODE or first_call == Op.DELEGATECALL
else operation_contract
),
# Define code_caller for Op.CALLER
code_caller=(
root_contract if first_call == Op.DELEGATECALL else scenario_contract
),
# Define balance for Op.BALANCE
selfbalance=(
balance.scenario_contract_balance
if first_call in [Op.DELEGATECALL, Op.CALLCODE]
else (
balance.program_selfbalance
if first_call == Op.STATICCALL
else balance.first_call_value + balance.program_selfbalance
)
),
call_value=(
0
if first_call in [Op.STATICCALL, Op.DELEGATECALL]
else balance.first_call_value
),
call_dataload_0=1122334455,
call_datasize=40,
has_static=True if first_call == Op.STATICCALL else False,
),
)
def _generate_two_call_scenario(self, first_call: Opcode, second_call: Opcode) -> Scenario:
"""
Generate scenario for two types of calls combination root_contract
-(CALL)-> scenario_contract -(first_call)-> sub_contract sub_contract
-(second_call) -> operation_contract.
"""
def _compute_code_caller() -> Address:
"""
Calculate who is the code caller in program_contract's code in
given sequence root -CALL-> scenario_contract -(first_call)
-> sub_contract -(second_call)-> program.
"""
code_caller: Address = root_contract
if first_call == Op.DELEGATECALL:
code_caller = scenario_contract
if second_call == Op.DELEGATECALL:
code_caller = root_contract
else:
if second_call == Op.DELEGATECALL:
code_caller = scenario_contract
else:
code_caller = sub_contract
if first_call == Op.CALLCODE:
code_caller = scenario_contract
return code_caller
def _compute_selfbalance() -> int:
"""
Calculate the result of Op.SELFBALANCE in program scope in given
sequence root -CALL-> scenario_contract -(first_call)->
sub_contract -(second_call)-> program.
"""
selfbalance: int = 0
if second_call in [Op.CALL]:
selfbalance = second_call_value + balance.program_selfbalance
return selfbalance
if second_call in [Op.STATICCALL]:
selfbalance = balance.program_selfbalance
return selfbalance
if first_call == Op.STATICCALL and second_call in [Op.DELEGATECALL, Op.CALLCODE]:
selfbalance = balance.sub_contract_balance
if first_call in [Op.CALLCODE, Op.DELEGATECALL] and second_call in [
Op.DELEGATECALL,
Op.CALLCODE,
]:
selfbalance = balance.scenario_contract_balance + balance.root_call_value
if first_call == Op.CALL and second_call in [Op.DELEGATECALL, Op.CALLCODE]:
selfbalance = balance.sub_contract_balance + balance.first_call_value
if first_call == Op.STATICCALL and second_call == Op.STATICCALL:
selfbalance = balance.program_selfbalance
return selfbalance
def _compute_callvalue() -> int:
"""
Calculate the expected callvalue in program scope given sequence:
root -CALL-> scenario_contract -(first_call)-> sub_contract
-(second_call)-> program.
"""
if second_call == Op.STATICCALL:
return 0
if second_call == Op.DELEGATECALL:
if first_call == Op.STATICCALL:
return 0
else:
if first_call == Op.DELEGATECALL:
return balance.root_call_value
else:
return balance.first_call_value
else:
return second_call_value
scenario_input = self.scenario_input
pre: Alloc = scenario_input.pre
balance = self.balance
second_call_value = balance.second_call_value if first_call != Op.STATICCALL else 0
operation_contract = pre.deploy_contract(
code=scenario_input.operation_code, balance=balance.program_selfbalance
)
sub_contract = pre.deploy_contract(
code=Op.MSTORE(32, 1122334455)
+ (
second_call(
gas=Op.SUB(Op.GAS, self.keep_gas),
address=operation_contract,
args_size=40,
args_offset=32,
ret_size=32,
value=second_call_value,
)
if second_call not in [Op.DELEGATECALL, Op.STATICCALL]
else second_call(
gas=Op.SUB(Op.GAS, self.keep_gas),
address=operation_contract,
args_size=40,
args_offset=32,
ret_size=32,
)
)
+ Op.RETURN(0, 32),
balance=balance.sub_contract_balance,
)
scenario_contract = pre.deploy_contract(
code=(
first_call(
gas=Op.SUB(Op.GAS, self.keep_gas),
address=sub_contract,
ret_size=32,
value=balance.first_call_value,
)
if first_call not in [Op.DELEGATECALL, Op.STATICCALL]
else first_call(
gas=Op.SUB(Op.GAS, self.keep_gas),
address=sub_contract,
ret_size=32,
)
)
+ Op.RETURN(0, 32),
balance=balance.scenario_contract_balance,
)
root_contract = pre.deploy_contract(
balance=balance.root_contract_balance,
code=Op.CALL(
gas=Op.SUB(Op.GAS, self.keep_gas),
address=scenario_contract,
ret_size=32,
value=balance.root_call_value,
)
+ Op.RETURN(0, 32),
)
return Scenario(
category="call_combinations",
name=f"scenario_{first_call}_{second_call}",
code=root_contract,
env=ScenarioEnvironment(
# Define address on which behalf program is executed
code_address=(
operation_contract
if second_call not in [Op.CALLCODE, Op.DELEGATECALL]
else (
sub_contract
if first_call not in [Op.CALLCODE, Op.DELEGATECALL]
else scenario_contract
)
),
# Define code_caller for Op.CALLER
code_caller=_compute_code_caller(),
selfbalance=_compute_selfbalance(),
call_value=_compute_callvalue(),
call_dataload_0=1122334455,
call_datasize=40,
has_static=(
True if first_call == Op.STATICCALL or second_call == Op.STATICCALL else False
),
),
)
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
ethereum/execution-spec-tests | https://github.com/ethereum/execution-spec-tests/blob/88e9fb8f10ed89805aa3110d0a2cd5dcadc19689/tests/frontier/scenarios/scenarios/__init__.py | tests/frontier/scenarios/scenarios/__init__.py | """Scenarios common import."""
| python | MIT | 88e9fb8f10ed89805aa3110d0a2cd5dcadc19689 | 2026-01-05T06:50:32.790998Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.