repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/nft_wallet/test_nft_puzzles.py | tests/wallet/nft_wallet/test_nft_puzzles.py | from __future__ import annotations
from secrets import token_bytes
from typing import Tuple
from clvm.casts import int_from_bytes
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.wallet.nft_wallet import uncurry_nft
from flax.wallet.nft_wallet.nft_puzzles import (
construct_ownership_layer,
create_full_puzzle,
create_nft_layer_puzzle_with_curry_params,
recurry_nft_puzzle,
)
from flax.wallet.outer_puzzles import match_puzzle
from flax.wallet.puzzles.load_clvm import load_clvm
from flax.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk, solution_for_conditions
from flax.wallet.uncurried_puzzle import uncurry_puzzle
from tests.core.make_block_generator import int_to_public_key
SINGLETON_MOD = load_clvm("singleton_top_layer_v1_1.clvm")
LAUNCHER_PUZZLE = load_clvm("singleton_launcher.clvm")
DID_MOD = load_clvm("did_innerpuz.clvm")
NFT_STATE_LAYER_MOD = load_clvm("nft_state_layer.clvm")
NFT_OWNERSHIP_LAYER = load_clvm("nft_ownership_layer.clvm")
NFT_TRANSFER_PROGRAM_DEFAULT = load_clvm("nft_ownership_transfer_program_one_way_claim_with_royalties.clvm")
LAUNCHER_PUZZLE_HASH = LAUNCHER_PUZZLE.get_tree_hash()
NFT_STATE_LAYER_MOD_HASH = NFT_STATE_LAYER_MOD.get_tree_hash()
SINGLETON_MOD_HASH = SINGLETON_MOD.get_tree_hash()
OFFER_MOD = load_clvm("settlement_payments.clvm")
LAUNCHER_ID = Program.to(b"launcher-id").get_tree_hash()
NFT_METADATA_UPDATER_DEFAULT = load_clvm("nft_metadata_updater_default.clvm")
def test_nft_transfer_puzzle_hashes():
maker_pk = int_to_public_key(111)
maker_p2_puz = puzzle_for_pk(maker_pk)
maker_p2_ph = maker_p2_puz.get_tree_hash()
maker_did = Program.to("maker did").get_tree_hash()
# maker_did_inner_hash = Program.to("maker did inner hash").get_tree_hash()
metadata = [
("u", ["https://www.flaxnetwork.org/img/branding/flax-logo.svg"]),
("h", 0xD4584AD463139FA8C0D9F68F4B59F185),
]
metadata_updater_hash = NFT_METADATA_UPDATER_DEFAULT.get_tree_hash()
# royalty_addr = maker_p2_ph
royalty_pc = 2000 # basis pts
nft_id = Program.to("nft id").get_tree_hash()
SINGLETON_STRUCT = Program.to((SINGLETON_MOD_HASH, (nft_id, LAUNCHER_PUZZLE_HASH)))
transfer_puz = NFT_TRANSFER_PROGRAM_DEFAULT.curry(
SINGLETON_STRUCT,
maker_p2_ph,
royalty_pc,
)
ownership_puz = NFT_OWNERSHIP_LAYER.curry(
NFT_OWNERSHIP_LAYER.get_tree_hash(), maker_did, transfer_puz, maker_p2_puz
)
metadata_puz = NFT_STATE_LAYER_MOD.curry(
NFT_STATE_LAYER_MOD.get_tree_hash(), metadata, metadata_updater_hash, ownership_puz
)
nft_puz = SINGLETON_MOD.curry(SINGLETON_STRUCT, metadata_puz)
nft_info = match_puzzle(uncurry_puzzle(nft_puz))
assert nft_info.also().also() is not None
unft = uncurry_nft.UncurriedNFT.uncurry(*nft_puz.uncurry())
assert unft is not None
assert unft.supports_did
# setup transfer
taker_pk = int_to_public_key(222)
taker_p2_puz = puzzle_for_pk(taker_pk)
taker_p2_ph = taker_p2_puz.get_tree_hash()
# make nft solution
fake_lineage_proof = Program.to([token_bytes(32), maker_p2_ph, 1])
transfer_conditions = Program.to([[51, taker_p2_ph, 1, [taker_p2_ph]], [-10, [], [], []]])
ownership_sol = Program.to([solution_for_conditions(transfer_conditions)])
metadata_sol = Program.to([ownership_sol])
nft_sol = Program.to([fake_lineage_proof, 1, metadata_sol])
conds = nft_puz.run(nft_sol)
# get the new NFT puzhash
for cond in conds.as_iter():
if cond.first().as_int() == 51:
expected_ph = bytes32(cond.at("rf").atom)
# recreate the puzzle for new_puzhash
new_ownership_puz = NFT_OWNERSHIP_LAYER.curry(NFT_OWNERSHIP_LAYER.get_tree_hash(), None, transfer_puz, taker_p2_puz)
new_metadata_puz = NFT_STATE_LAYER_MOD.curry(
NFT_STATE_LAYER_MOD.get_tree_hash(), metadata, metadata_updater_hash, new_ownership_puz
)
new_nft_puz = SINGLETON_MOD.curry(SINGLETON_STRUCT, new_metadata_puz)
calculated_ph = new_nft_puz.get_tree_hash()
assert expected_ph == calculated_ph
def make_a_new_solution() -> Tuple[Program, Program]:
destination = int_to_public_key(2)
p2_puzzle = puzzle_for_pk(destination)
puzhash = p2_puzzle.get_tree_hash()
new_did = Program.to("test").get_tree_hash()
new_did_inner_hash = Program.to("fake").get_tree_hash()
trade_prices_list = [[200, OFFER_MOD.get_tree_hash()]]
condition_list = [
[
51,
puzhash,
1,
[puzhash],
],
[-10, new_did, trade_prices_list, new_did_inner_hash],
]
solution = Program.to(
[
[],
[],
[
[solution_for_conditions(condition_list)],
],
],
)
return p2_puzzle, solution
def make_a_new_ownership_layer_puzzle() -> Tuple[Program, Program]:
pubkey = int_to_public_key(1)
innerpuz = puzzle_for_pk(pubkey)
old_did = Program.to("test_2").get_tree_hash()
nft_id = Program.to("nft_id")
SINGLETON_STRUCT = Program.to((SINGLETON_MOD_HASH, (nft_id, LAUNCHER_PUZZLE_HASH)))
curried_tp = NFT_TRANSFER_PROGRAM_DEFAULT.curry(
SINGLETON_STRUCT,
innerpuz.get_tree_hash(),
2000,
)
curried_inner = innerpuz
curried_ownership_layer = construct_ownership_layer(old_did, curried_tp, curried_inner)
return innerpuz, curried_ownership_layer
def make_a_new_nft_puzzle(curried_ownership_layer: Program, metadata: Program) -> Program:
curried_state_layer = NFT_STATE_LAYER_MOD.curry(
NFT_STATE_LAYER_MOD_HASH, metadata, NFT_METADATA_UPDATER_DEFAULT.get_tree_hash(), curried_ownership_layer
)
return curried_state_layer
def get_updated_nft_puzzle(puzzle: Program, solution: Program) -> bytes32:
result = puzzle.run(solution)
for condition in result.as_iter():
code = int_from_bytes(condition.first().atom)
if code == 51:
if int_from_bytes(condition.rest().rest().first().atom) == 1:
# this is our new puzzle hash
return bytes32(condition.rest().first().atom)
raise ValueError("No create coin condition found")
def test_transfer_puzzle_builder() -> None:
metadata = [
("u", ["https://www.flaxnetwork.org/img/branding/flax-logo.svg"]),
("h", 0xD4584AD463139FA8C0D9F68F4B59F185),
]
sp2_puzzle, solution = make_a_new_solution()
p2_puzzle, ownership_puzzle = make_a_new_ownership_layer_puzzle()
clvm_nft_puzzle = create_nft_layer_puzzle_with_curry_params(
Program.to(metadata), NFT_METADATA_UPDATER_DEFAULT.get_tree_hash(), ownership_puzzle
)
puzzle = create_full_puzzle(
Program.to(["singleton_id"]).get_tree_hash(),
Program.to(metadata),
NFT_METADATA_UPDATER_DEFAULT.get_tree_hash(),
ownership_puzzle,
)
clvm_puzzle_hash = get_updated_nft_puzzle(clvm_nft_puzzle, solution.at("rrf"))
unft = uncurry_nft.UncurriedNFT.uncurry(*puzzle.uncurry())
assert unft is not None
assert unft.nft_state_layer == clvm_nft_puzzle
assert unft.inner_puzzle == ownership_puzzle
assert unft.p2_puzzle == p2_puzzle
ol_puzzle = recurry_nft_puzzle(unft, solution, sp2_puzzle)
nft_puzzle = create_nft_layer_puzzle_with_curry_params(
Program.to(metadata), NFT_METADATA_UPDATER_DEFAULT.get_tree_hash(), ol_puzzle
)
assert clvm_puzzle_hash == nft_puzzle.get_tree_hash()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/nft_wallet/test_nft_lifecycle.py | tests/wallet/nft_wallet/test_nft_lifecycle.py | from __future__ import annotations
import itertools
from typing import List, Tuple
import pytest
from blspy import G2Element
from flax.clvm.spend_sim import SimClient, SpendSim
from flax.types.announcement import Announcement
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.types.mempool_inclusion_status import MempoolInclusionStatus
from flax.types.spend_bundle import SpendBundle
from flax.util.errors import Err
from flax.wallet.nft_wallet.nft_puzzles import (
NFT_METADATA_UPDATER,
NFT_TRANSFER_PROGRAM_DEFAULT,
construct_ownership_layer,
create_nft_layer_puzzle_with_curry_params,
metadata_to_program,
)
ACS = Program.to(1)
ACS_PH = ACS.get_tree_hash()
@pytest.mark.asyncio()
@pytest.mark.parametrize("metadata_updater", ["default"])
async def test_state_layer(setup_sim: Tuple[SpendSim, SimClient], metadata_updater: str) -> None:
sim, sim_client = setup_sim
try:
if metadata_updater == "default":
METADATA: Program = metadata_to_program(
{
b"u": ["hey hey"],
b"lu": ["You have no permissions grr"],
b"mu": ["This but off chain"],
b"foo": ["Can't update this"],
}
)
METADATA_UPDATER: Program = NFT_METADATA_UPDATER
else:
# TODO: Add test for updateable
return
METADATA_UPDATER_PUZZLE_HASH: bytes32 = METADATA_UPDATER.get_tree_hash()
state_layer_puzzle: Program = create_nft_layer_puzzle_with_curry_params(
METADATA, METADATA_UPDATER_PUZZLE_HASH, ACS
)
state_layer_ph: bytes32 = state_layer_puzzle.get_tree_hash()
await sim.farm_block(state_layer_ph)
state_layer_coin = (
await sim_client.get_coin_records_by_puzzle_hash(state_layer_ph, include_spent_coins=False)
)[0].coin
generic_spend = CoinSpend(
state_layer_coin,
state_layer_puzzle,
Program.to([[[51, ACS_PH, 1]]]),
)
generic_bundle = SpendBundle([generic_spend], G2Element())
result = await sim_client.push_tx(generic_bundle)
assert result == (MempoolInclusionStatus.SUCCESS, None)
await sim.farm_block()
if metadata_updater == "default":
metadata_updater_solutions: List[Program] = [
Program.to((b"u", "update")),
Program.to((b"lu", "update")),
Program.to((b"mu", "update")),
Program.to((b"foo", "update")),
]
expected_metadatas: List[Program] = [
metadata_to_program(
{
b"u": ["update", "hey hey"],
b"lu": ["You have no permissions grr"],
b"mu": ["This but off chain"],
b"foo": ["Can't update this"],
}
),
metadata_to_program(
{
b"u": ["update", "hey hey"],
b"lu": ["update", "You have no permissions grr"],
b"mu": ["This but off chain"],
b"foo": ["Can't update this"],
}
),
metadata_to_program(
{
b"u": ["update", "hey hey"],
b"lu": ["update", "You have no permissions grr"],
b"mu": ["update", "This but off chain"],
b"foo": ["Can't update this"],
}
),
metadata_to_program(
{ # no change
b"u": ["update", "hey hey"],
b"lu": ["update", "You have no permissions grr"],
b"mu": ["update", "This but off chain"],
b"foo": ["Can't update this"],
}
),
]
else:
return
for solution, metadata in zip(metadata_updater_solutions, expected_metadatas):
state_layer_coin = (
await sim_client.get_coin_records_by_parent_ids([state_layer_coin.name()], include_spent_coins=False)
)[0].coin
update_spend = CoinSpend(
state_layer_coin,
state_layer_puzzle,
Program.to(
[
[
[51, ACS_PH, 1],
[-24, METADATA_UPDATER, solution],
]
]
),
)
update_bundle = SpendBundle([update_spend], G2Element())
result = await sim_client.push_tx(update_bundle)
assert result == (MempoolInclusionStatus.SUCCESS, None)
await sim.farm_block()
state_layer_puzzle = create_nft_layer_puzzle_with_curry_params(metadata, METADATA_UPDATER_PUZZLE_HASH, ACS)
finally:
await sim.close()
@pytest.mark.asyncio()
async def test_ownership_layer(setup_sim: Tuple[SpendSim, SimClient]) -> None:
sim, sim_client = setup_sim
try:
TARGET_OWNER = bytes32([0] * 32)
TARGET_TP = Program.to([8]) # (x)
# (a (i 11 (q 4 19 (c 43 (q ()))) (q 8)) 1) or
# (mod (_ _ solution) (if solution (list (f solution) (f (r solution)) ()) (x)))
transfer_program = Program.to([2, [3, 11, [1, 4, 19, [4, 43, [1, []]]], [1, 8]], 1])
ownership_puzzle: Program = construct_ownership_layer(
None,
transfer_program,
ACS,
)
ownership_ph: bytes32 = ownership_puzzle.get_tree_hash()
await sim.farm_block(ownership_ph)
ownership_coin = (await sim_client.get_coin_records_by_puzzle_hash(ownership_ph, include_spent_coins=False))[
0
].coin
generic_spend = CoinSpend(
ownership_coin,
ownership_puzzle,
Program.to([[[51, ACS_PH, 1], [-10, [], []]]]),
)
generic_bundle = SpendBundle([generic_spend], G2Element())
result = await sim_client.push_tx(generic_bundle)
assert result == (MempoolInclusionStatus.SUCCESS, None)
await sim.farm_block()
ownership_coin = (await sim_client.get_coin_records_by_puzzle_hash(ownership_ph, include_spent_coins=False))[
0
].coin
skip_tp_spend = CoinSpend(
ownership_coin,
ownership_puzzle,
Program.to([[[51, ACS_PH, 1]]]),
)
skip_tp_bundle = SpendBundle([skip_tp_spend], G2Element())
result = await sim_client.push_tx(skip_tp_bundle)
assert result == (MempoolInclusionStatus.FAILED, Err.GENERATOR_RUNTIME_ERROR)
with pytest.raises(ValueError, match="clvm raise"):
skip_tp_spend.puzzle_reveal.to_program().run(skip_tp_spend.solution.to_program())
make_bad_announcement_spend = CoinSpend(
ownership_coin,
ownership_puzzle,
Program.to(
[
[
[51, ACS_PH, 1],
[-10, TARGET_OWNER, TARGET_TP],
[62, b"\xad\x4c" + bytes32([0] * 32)],
]
]
),
)
make_bad_announcement_bundle = SpendBundle([make_bad_announcement_spend], G2Element())
result = await sim_client.push_tx(make_bad_announcement_bundle)
assert result == (MempoolInclusionStatus.FAILED, Err.GENERATOR_RUNTIME_ERROR)
with pytest.raises(ValueError, match="clvm raise"):
make_bad_announcement_spend.puzzle_reveal.to_program().run(
make_bad_announcement_spend.solution.to_program()
)
expected_announcement = Announcement(
ownership_puzzle.get_tree_hash(),
b"\xad\x4c" + Program.to([TARGET_OWNER, TARGET_TP]).get_tree_hash(),
)
harmless_announcement = Announcement(
ownership_puzzle.get_tree_hash(),
b"oy",
)
update_everything_spend = CoinSpend(
ownership_coin,
ownership_puzzle,
Program.to(
[
[
[51, ACS_PH, 1],
[-10, TARGET_OWNER, TARGET_TP],
[62, harmless_announcement.message], # create a harmless puzzle announcement
[63, expected_announcement.name()],
[63, harmless_announcement.name()],
]
]
),
)
update_everything_bundle = SpendBundle([update_everything_spend], G2Element())
result = await sim_client.push_tx(update_everything_bundle)
assert result == (MempoolInclusionStatus.SUCCESS, None)
await sim.farm_block()
assert (await sim_client.get_coin_records_by_parent_ids([ownership_coin.name()], include_spent_coins=False))[
0
].coin.puzzle_hash == construct_ownership_layer(
TARGET_OWNER,
TARGET_TP,
ACS,
).get_tree_hash()
finally:
await sim.close()
@pytest.mark.asyncio()
async def test_default_transfer_program(setup_sim: Tuple[SpendSim, SimClient]) -> None:
sim, sim_client = setup_sim
try:
# Now make the ownership coin
FAKE_SINGLETON_MOD = Program.to([2, 5, 11]) # (a 5 11) | (mod (_ INNER_PUZ inner_sol) (a INNER_PUZ inner_sol))
FAKE_CAT_MOD = Program.to([2, 11, 23]) # (a 11 23) or (mod (_ _ INNER_PUZ inner_sol) (a INNER_PUZ inner_sol))
FAKE_LAUNCHER_ID = bytes32([0] * 32)
FAKE_TAIL = bytes32([2] * 32)
FAKE_SINGLETON_STRUCT = Program.to((FAKE_SINGLETON_MOD.get_tree_hash(), (FAKE_LAUNCHER_ID, FAKE_LAUNCHER_ID)))
FAKE_SINGLETON = FAKE_SINGLETON_MOD.curry(FAKE_SINGLETON_STRUCT, ACS)
FAKE_CAT = FAKE_CAT_MOD.curry(FAKE_CAT_MOD.get_tree_hash(), FAKE_TAIL, ACS)
ROYALTY_ADDRESS = bytes32([1] * 32)
TRADE_PRICE_PERCENTAGE = 5000 # 50%
transfer_program: Program = NFT_TRANSFER_PROGRAM_DEFAULT.curry(
FAKE_SINGLETON_STRUCT,
ROYALTY_ADDRESS,
TRADE_PRICE_PERCENTAGE,
)
ownership_puzzle: Program = construct_ownership_layer(
None,
transfer_program,
ACS,
)
ownership_ph: bytes32 = ownership_puzzle.get_tree_hash()
await sim.farm_block(ownership_ph)
ownership_coin = (await sim_client.get_coin_records_by_puzzle_hash(ownership_ph, include_spent_coins=False))[
0
].coin
BLOCK_HEIGHT = sim.block_height
# Try a spend, no royalties, no owner update
generic_spend = CoinSpend(
ownership_coin,
ownership_puzzle,
Program.to([[[51, ACS_PH, 1]]]),
)
generic_bundle = SpendBundle([generic_spend], G2Element())
result = await sim_client.push_tx(generic_bundle)
assert result == (MempoolInclusionStatus.SUCCESS, None)
await sim.farm_block()
assert len(await sim_client.get_coin_records_by_puzzle_hash(ownership_ph, include_spent_coins=False)) > 0
await sim.rewind(BLOCK_HEIGHT)
# Now try an owner update plus royalties
await sim.farm_block(FAKE_SINGLETON.get_tree_hash())
await sim.farm_block(FAKE_CAT.get_tree_hash())
await sim.farm_block(ACS_PH)
singleton_coin = (
await sim_client.get_coin_records_by_puzzle_hash(FAKE_SINGLETON.get_tree_hash(), include_spent_coins=False)
)[0].coin
cat_coin = (
await sim_client.get_coin_records_by_puzzle_hash(FAKE_CAT.get_tree_hash(), include_spent_coins=False)
)[0].coin
xfx_coin = (await sim_client.get_coin_records_by_puzzle_hash(ACS_PH, include_spent_coins=False))[0].coin
ownership_spend = CoinSpend(
ownership_coin,
ownership_puzzle,
Program.to(
[[[51, ACS_PH, 1], [-10, FAKE_LAUNCHER_ID, [[100, ACS_PH], [100, FAKE_CAT.get_tree_hash()]], ACS_PH]]]
),
)
did_announcement_spend = CoinSpend(
singleton_coin,
FAKE_SINGLETON,
Program.to([[[62, FAKE_LAUNCHER_ID]]]),
)
expected_announcement_data = Program.to(
(FAKE_LAUNCHER_ID, [[ROYALTY_ADDRESS, 50, [ROYALTY_ADDRESS]]])
).get_tree_hash()
xfx_announcement_spend = CoinSpend(
xfx_coin,
ACS,
Program.to([[62, expected_announcement_data]]),
)
cat_announcement_spend = CoinSpend(cat_coin, FAKE_CAT, Program.to([[[62, expected_announcement_data]]]))
# Make sure every combo except all of them work
for i in range(1, 3):
for announcement_combo in itertools.combinations(
[did_announcement_spend, xfx_announcement_spend, cat_announcement_spend], i
):
result = await sim_client.push_tx(SpendBundle([ownership_spend, *announcement_combo], G2Element()))
assert result == (MempoolInclusionStatus.FAILED, Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
# Make sure all of them together pass
full_bundle = SpendBundle(
[ownership_spend, did_announcement_spend, xfx_announcement_spend, cat_announcement_spend], G2Element()
)
result = await sim_client.push_tx(full_bundle)
assert result == (MempoolInclusionStatus.SUCCESS, None)
# Finally, make sure we can just clear the DID label off
new_ownership_puzzle: Program = construct_ownership_layer(
FAKE_LAUNCHER_ID,
transfer_program,
ACS,
)
new_ownership_ph: bytes32 = new_ownership_puzzle.get_tree_hash()
await sim.farm_block(new_ownership_ph)
new_ownership_coin = (
await sim_client.get_coin_records_by_puzzle_hash(new_ownership_ph, include_spent_coins=False)
)[0].coin
empty_spend = CoinSpend(
new_ownership_coin,
new_ownership_puzzle,
Program.to([[[51, ACS_PH, 1], [-10, [], [], []]]]),
)
empty_bundle = SpendBundle([empty_spend], G2Element())
result = await sim_client.push_tx(empty_bundle)
assert result == (MempoolInclusionStatus.SUCCESS, None)
await sim.farm_block()
finally:
await sim.close()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/db_wallet/test_db_graftroot.py | tests/wallet/db_wallet/test_db_graftroot.py | from __future__ import annotations
from typing import Dict, List, Tuple
import pytest
from blspy import G2Element
from flax.clvm.spend_sim import SimClient, SpendSim
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.types.mempool_inclusion_status import MempoolInclusionStatus
from flax.types.spend_bundle import SpendBundle
from flax.util.errors import Err
from flax.wallet.puzzles.load_clvm import load_clvm
from flax.wallet.util.merkle_utils import build_merkle_tree, build_merkle_tree_from_binary_tree, simplify_merkle_proof
GRAFTROOT_MOD = load_clvm("graftroot_dl_offers.clvm")
# Always returns the last value
# (mod solution
#
# (defun recurse (solution last_value)
# (if solution
# (recurse (r solution) (f solution))
# last_value
# )
# )
#
# (recurse solution ())
# )
ACS = Program.fromhex(
"ff02ffff01ff02ff02ffff04ff02ffff04ff03ffff01ff8080808080ffff04ffff01ff02ffff03ff05ffff01ff02ff02ffff04ff02ffff04ff0dffff04ff09ff8080808080ffff010b80ff0180ff018080" # noqa
)
ACS_PH = ACS.get_tree_hash()
NIL_PH = Program.to(None).get_tree_hash()
@pytest.mark.asyncio
async def test_graftroot(setup_sim: Tuple[SpendSim, SimClient]) -> None:
sim, sim_client = setup_sim
try:
# Create the coin we're testing
all_values: List[bytes32] = [bytes32([x] * 32) for x in range(0, 100)]
root, proofs = build_merkle_tree(all_values)
p2_conditions = Program.to((1, [[51, ACS_PH, 0]])) # An coin to create to make sure this hits the blockchain
desired_key_values = ((bytes32([0] * 32), bytes32([1] * 32)), (bytes32([7] * 32), bytes32([8] * 32)))
desired_row_hashes: List[bytes32] = [build_merkle_tree_from_binary_tree(kv)[0] for kv in desired_key_values]
fake_struct: Program = Program.to((ACS_PH, NIL_PH))
graftroot_puzzle: Program = GRAFTROOT_MOD.curry(
# Do everything twice to test depending on multiple singleton updates
p2_conditions,
[fake_struct, fake_struct],
[ACS_PH, ACS_PH],
[desired_row_hashes, desired_row_hashes],
)
await sim.farm_block(graftroot_puzzle.get_tree_hash())
graftroot_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(graftroot_puzzle.get_tree_hash()))[
0
].coin
# Build some merkle trees that won't satidy the requirements
def filter_all(values: List[bytes32]) -> List[bytes32]:
return [h for i, h in enumerate(values) if (h, values[min(i, i + 1)]) not in desired_key_values]
def filter_to_only_one(values: List[bytes32]) -> List[bytes32]:
return [h for i, h in enumerate(values) if (h, values[min(i, i + 1)]) not in desired_key_values[1:]]
# And one that will
def filter_none(values: List[bytes32]) -> List[bytes32]:
return values
for list_filter in (filter_all, filter_to_only_one, filter_none):
# Create the "singleton"
filtered_values = list_filter(all_values)
root, proofs = build_merkle_tree(filtered_values)
filtered_row_hashes: Dict[bytes32, Tuple[int, List[bytes32]]] = {
simplify_merkle_proof(v, (proofs[v][0], [proofs[v][1][0]])): (proofs[v][0] >> 1, proofs[v][1][1:])
for v in filtered_values
}
fake_puzzle: Program = ACS.curry(fake_struct, ACS.curry(ACS_PH, (root, None), NIL_PH, None))
await sim.farm_block(fake_puzzle.get_tree_hash())
fake_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(fake_puzzle.get_tree_hash()))[0].coin
# Create the spend
fake_spend = CoinSpend(
fake_coin,
fake_puzzle,
Program.to([[[62, "$"]]]),
)
proofs_of_inclusion = []
for row_hash in desired_row_hashes:
if row_hash in filtered_row_hashes:
proofs_of_inclusion.append(filtered_row_hashes[row_hash])
else:
proofs_of_inclusion.append((0, []))
graftroot_spend = CoinSpend(
graftroot_coin,
graftroot_puzzle,
Program.to(
[
# Again, everything twice
[proofs_of_inclusion] * 2,
[(root, None), (root, None)],
[NIL_PH, NIL_PH],
[NIL_PH, NIL_PH],
[],
]
),
)
final_bundle = SpendBundle([fake_spend, graftroot_spend], G2Element())
result = await sim_client.push_tx(final_bundle)
# If this is the satisfactory merkle tree
if filtered_values == all_values:
assert result == (MempoolInclusionStatus.SUCCESS, None)
# clear the mempool
same_height = sim.block_height
await sim.farm_block()
assert len(await sim_client.get_coin_records_by_puzzle_hash(ACS_PH)) > 0
await sim.rewind(same_height)
# try with a bad merkle root announcement
new_fake_spend = CoinSpend(
fake_coin,
ACS.curry(fake_struct, ACS.curry(ACS_PH, (bytes32([0] * 32), None), None, None)),
Program.to([[[62, "$"]]]),
)
new_final_bundle = SpendBundle([new_fake_spend, graftroot_spend], G2Element())
result = await sim_client.push_tx(new_final_bundle)
assert result == (MempoolInclusionStatus.FAILED, Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
else:
assert result == (MempoolInclusionStatus.FAILED, Err.GENERATOR_RUNTIME_ERROR)
with pytest.raises(ValueError, match="clvm raise"):
graftroot_puzzle.run(graftroot_spend.solution.to_program())
finally:
await sim.close()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/db_wallet/config.py | tests/wallet/db_wallet/config.py | from __future__ import annotations
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/db_wallet/__init__.py | tests/wallet/db_wallet/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/db_wallet/test_dl_offers.py | tests/wallet/db_wallet/test_dl_offers.py | from __future__ import annotations
import dataclasses
from typing import Any, List, Tuple
import pytest
from flax.data_layer.data_layer_wallet import DataLayerWallet
from flax.simulator.time_out_assert import time_out_assert
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint64
from flax.wallet.puzzle_drivers import Solver
from flax.wallet.trade_record import TradeRecord
from flax.wallet.trading.offer import Offer
from flax.wallet.trading.trade_status import TradeStatus
from flax.wallet.util.merkle_utils import build_merkle_tree, simplify_merkle_proof
async def is_singleton_confirmed_and_root(dl_wallet: DataLayerWallet, lid: bytes32, root: bytes32) -> bool:
rec = await dl_wallet.get_latest_singleton(lid)
if rec is None:
return False
if rec.confirmed is True:
assert rec.confirmed_at_height > 0
assert rec.timestamp > 0
return rec.confirmed and rec.root == root
async def get_trade_and_status(trade_manager: Any, trade: TradeRecord) -> TradeStatus:
trade_rec = await trade_manager.get_trade_by_id(trade.trade_id)
return TradeStatus(trade_rec.status)
def get_parent_branch(value: bytes32, proof: Tuple[int, List[bytes32]]) -> Tuple[bytes32, Tuple[int, List[bytes32]]]:
branch: bytes32 = simplify_merkle_proof(value, (proof[0], [proof[1][0]]))
new_proof: Tuple[int, List[bytes32]] = (proof[0] >> 1, proof[1][1:])
return branch, new_proof
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_dl_offers(wallets_prefarm: Any, trusted: bool) -> None:
(
[wallet_node_maker, maker_funds],
[wallet_node_taker, taker_funds],
full_node_api,
) = wallets_prefarm
assert wallet_node_maker.wallet_state_manager is not None
assert wallet_node_taker.wallet_state_manager is not None
wsm_maker = wallet_node_maker.wallet_state_manager
wsm_taker = wallet_node_taker.wallet_state_manager
wallet_maker = wsm_maker.main_wallet
wallet_taker = wsm_taker.main_wallet
async with wsm_maker.lock:
dl_wallet_maker = await DataLayerWallet.create_new_dl_wallet(wsm_maker, wallet_maker)
async with wsm_taker.lock:
dl_wallet_taker = await DataLayerWallet.create_new_dl_wallet(wsm_taker, wallet_taker)
MAKER_ROWS = [bytes32([i] * 32) for i in range(0, 10)]
TAKER_ROWS = [bytes32([i] * 32) for i in range(0, 10)]
maker_root, _ = build_merkle_tree(MAKER_ROWS)
taker_root, _ = build_merkle_tree(TAKER_ROWS)
fee = uint64(1_999_999_999_999)
dl_record, std_record, launcher_id_maker = await dl_wallet_maker.generate_new_reporter(maker_root, fee=fee)
assert await dl_wallet_maker.get_latest_singleton(launcher_id_maker) is not None
await wsm_maker.add_pending_transaction(dl_record)
await wsm_maker.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
maker_funds -= fee
maker_funds -= 1
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_maker, maker_root)
dl_record, std_record, launcher_id_taker = await dl_wallet_taker.generate_new_reporter(taker_root, fee=fee)
assert await dl_wallet_taker.get_latest_singleton(launcher_id_taker) is not None
await wsm_taker.add_pending_transaction(dl_record)
await wsm_taker.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
taker_funds -= fee
taker_funds -= 1
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_taker, taker_root)
peer = wallet_node_taker.get_full_node_peer()
assert peer is not None
await dl_wallet_maker.track_new_launcher_id(launcher_id_taker, peer)
await dl_wallet_taker.track_new_launcher_id(launcher_id_maker, peer)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_taker, taker_root)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_maker, maker_root)
trade_manager_maker = wsm_maker.trade_manager
trade_manager_taker = wsm_taker.trade_manager
maker_addition = bytes32([101] * 32)
taker_addition = bytes32([101] * 32)
MAKER_ROWS.append(maker_addition)
TAKER_ROWS.append(taker_addition)
maker_root, maker_proofs = build_merkle_tree(MAKER_ROWS)
taker_root, taker_proofs = build_merkle_tree(TAKER_ROWS)
maker_branch, maker_branch_proof = get_parent_branch(maker_addition, maker_proofs[maker_addition])
taker_branch, taker_branch_proof = get_parent_branch(taker_addition, taker_proofs[taker_addition])
fee = uint64(2_000_000_000_000)
success, offer_maker, error = await trade_manager_maker.create_offer_for_ids(
{launcher_id_maker: -1, launcher_id_taker: 1},
solver=Solver(
{
launcher_id_maker.hex(): {
"new_root": "0x" + maker_root.hex(),
"dependencies": [
{
"launcher_id": "0x" + launcher_id_taker.hex(),
"values_to_prove": ["0x" + taker_branch.hex()],
},
],
}
}
),
fee=fee,
)
assert error is None
assert success is True
assert offer_maker is not None
assert await trade_manager_taker.get_offer_summary(Offer.from_bytes(offer_maker.offer)) == {
"offered": [
{
"launcher_id": launcher_id_maker.hex(),
"new_root": maker_root.hex(),
"dependencies": [
{
"launcher_id": launcher_id_taker.hex(),
"values_to_prove": [taker_branch.hex()],
}
],
}
]
}
success, offer_taker, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(offer_maker.offer),
peer,
solver=Solver(
{
launcher_id_taker.hex(): {
"new_root": "0x" + taker_root.hex(),
"dependencies": [
{
"launcher_id": "0x" + launcher_id_maker.hex(),
"values_to_prove": ["0x" + maker_branch.hex()],
},
],
},
"proofs_of_inclusion": [
[
maker_root.hex(),
str(maker_branch_proof[0]),
["0x" + sibling.hex() for sibling in maker_branch_proof[1]],
],
[
taker_root.hex(),
str(taker_branch_proof[0]),
["0x" + sibling.hex() for sibling in taker_branch_proof[1]],
],
],
}
),
fee=fee,
)
assert error is None
assert success is True
assert offer_taker is not None
assert await trade_manager_maker.get_offer_summary(Offer.from_bytes(offer_taker.offer)) == {
"offered": [
{
"launcher_id": launcher_id_maker.hex(),
"new_root": maker_root.hex(),
"dependencies": [
{
"launcher_id": launcher_id_taker.hex(),
"values_to_prove": [taker_branch.hex()],
}
],
},
{
"launcher_id": launcher_id_taker.hex(),
"new_root": taker_root.hex(),
"dependencies": [
{
"launcher_id": launcher_id_maker.hex(),
"values_to_prove": [maker_branch.hex()],
}
],
},
]
}
await time_out_assert(15, wallet_maker.get_unconfirmed_balance, maker_funds)
await time_out_assert(15, wallet_taker.get_unconfirmed_balance, taker_funds - fee)
# Let's hack a way to await this offer's confirmation
offer_record = dataclasses.replace(dl_record, spend_bundle=Offer.from_bytes(offer_taker.offer).bundle)
await full_node_api.process_transaction_records(records=[offer_record])
maker_funds -= fee
taker_funds -= fee
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
await time_out_assert(15, wallet_maker.get_unconfirmed_balance, maker_funds)
await time_out_assert(15, wallet_taker.get_confirmed_balance, taker_funds)
await time_out_assert(15, wallet_taker.get_unconfirmed_balance, taker_funds)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_taker, taker_root)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_maker, maker_root)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_maker, offer_maker)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, offer_taker)
async def is_singleton_generation(wallet: DataLayerWallet, launcher_id: bytes32, generation: int) -> bool:
latest = await wallet.get_latest_singleton(launcher_id)
if latest is not None and latest.generation == generation:
return True
return False
await time_out_assert(15, is_singleton_generation, True, dl_wallet_taker, launcher_id_taker, 2)
txs = await dl_wallet_taker.create_update_state_spend(launcher_id_taker, bytes32([2] * 32))
for tx in txs:
await wallet_node_taker.wallet_state_manager.add_pending_transaction(tx)
await full_node_api.process_transaction_records(records=txs)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_dl_offer_cancellation(wallets_prefarm: Any, trusted: bool) -> None:
[wallet_node, _], [_, _], full_node_api = wallets_prefarm
assert wallet_node.wallet_state_manager is not None
wsm = wallet_node.wallet_state_manager
wallet = wsm.main_wallet
async with wsm.lock:
dl_wallet = await DataLayerWallet.create_new_dl_wallet(wsm, wallet)
ROWS = [bytes32([i] * 32) for i in range(0, 10)]
root, _ = build_merkle_tree(ROWS)
dl_record, std_record, launcher_id = await dl_wallet.generate_new_reporter(root)
assert await dl_wallet.get_latest_singleton(launcher_id) is not None
await wsm.add_pending_transaction(dl_record)
await wsm.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet, launcher_id, root)
dl_record_2, std_record_2, launcher_id_2 = await dl_wallet.generate_new_reporter(root)
await wsm.add_pending_transaction(dl_record_2)
await wsm.add_pending_transaction(std_record_2)
await full_node_api.process_transaction_records(records=[dl_record_2, std_record_2])
trade_manager = wsm.trade_manager
addition = bytes32([101] * 32)
ROWS.append(addition)
root, proofs = build_merkle_tree(ROWS)
success, offer, error = await trade_manager.create_offer_for_ids(
{launcher_id: -1, launcher_id_2: 1},
solver=Solver(
{
launcher_id.hex(): {
"new_root": "0x" + root.hex(),
"dependencies": [
{
"launcher_id": "0x" + launcher_id_2.hex(),
"values_to_prove": ["0x" + addition.hex()],
},
],
}
}
),
fee=uint64(2_000_000_000_000),
)
assert error is None
assert success is True
assert offer is not None
cancellation_txs = await trade_manager.cancel_pending_offer_safely(offer.trade_id, fee=uint64(2_000_000_000_000))
assert len(cancellation_txs) == 3
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager, offer)
await full_node_api.process_transaction_records(records=cancellation_txs)
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager, offer)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_multiple_dl_offers(wallets_prefarm: Any, trusted: bool) -> None:
(
[wallet_node_maker, maker_funds],
[wallet_node_taker, taker_funds],
full_node_api,
) = wallets_prefarm
assert wallet_node_maker.wallet_state_manager is not None
assert wallet_node_taker.wallet_state_manager is not None
wsm_maker = wallet_node_maker.wallet_state_manager
wsm_taker = wallet_node_taker.wallet_state_manager
wallet_maker = wsm_maker.main_wallet
wallet_taker = wsm_taker.main_wallet
async with wsm_maker.lock:
dl_wallet_maker = await DataLayerWallet.create_new_dl_wallet(wsm_maker, wallet_maker)
async with wsm_taker.lock:
dl_wallet_taker = await DataLayerWallet.create_new_dl_wallet(wsm_taker, wallet_taker)
MAKER_ROWS = [bytes32([i] * 32) for i in range(0, 10)]
TAKER_ROWS = [bytes32([i] * 32) for i in range(10, 20)]
maker_root, _ = build_merkle_tree(MAKER_ROWS)
taker_root, _ = build_merkle_tree(TAKER_ROWS)
fee = uint64(1_999_999_999_999)
dl_record, std_record, launcher_id_maker_1 = await dl_wallet_maker.generate_new_reporter(maker_root, fee=fee)
assert await dl_wallet_maker.get_latest_singleton(launcher_id_maker_1) is not None
await wsm_maker.add_pending_transaction(dl_record)
await wsm_maker.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
maker_funds -= fee
maker_funds -= 1
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_maker_1, maker_root)
dl_record, std_record, launcher_id_maker_2 = await dl_wallet_maker.generate_new_reporter(maker_root, fee=fee)
assert await dl_wallet_maker.get_latest_singleton(launcher_id_maker_2) is not None
await wsm_maker.add_pending_transaction(dl_record)
await wsm_maker.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
maker_funds -= fee
maker_funds -= 1
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_maker_2, maker_root)
dl_record, std_record, launcher_id_taker_1 = await dl_wallet_taker.generate_new_reporter(taker_root, fee=fee)
assert await dl_wallet_taker.get_latest_singleton(launcher_id_taker_1) is not None
await wsm_taker.add_pending_transaction(dl_record)
await wsm_taker.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
taker_funds -= fee
taker_funds -= 1
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_taker_1, taker_root)
dl_record, std_record, launcher_id_taker_2 = await dl_wallet_taker.generate_new_reporter(taker_root, fee=fee)
assert await dl_wallet_taker.get_latest_singleton(launcher_id_taker_2) is not None
await wsm_taker.add_pending_transaction(dl_record)
await wsm_taker.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
taker_funds -= fee
taker_funds -= 1
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_taker_2, taker_root)
peer = wallet_node_taker.get_full_node_peer()
assert peer is not None
await dl_wallet_maker.track_new_launcher_id(launcher_id_taker_1, peer)
await dl_wallet_maker.track_new_launcher_id(launcher_id_taker_2, peer)
await dl_wallet_taker.track_new_launcher_id(launcher_id_maker_1, peer)
await dl_wallet_taker.track_new_launcher_id(launcher_id_maker_2, peer)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_taker_1, taker_root)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_taker_2, taker_root)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_maker_1, maker_root)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_maker_2, maker_root)
trade_manager_maker = wsm_maker.trade_manager
trade_manager_taker = wsm_taker.trade_manager
maker_addition = bytes32([101] * 32)
taker_addition = bytes32([202] * 32)
MAKER_ROWS.append(maker_addition)
TAKER_ROWS.append(taker_addition)
maker_root, maker_proofs = build_merkle_tree(MAKER_ROWS)
taker_root, taker_proofs = build_merkle_tree(TAKER_ROWS)
maker_branch, maker_branch_proof = get_parent_branch(maker_addition, maker_proofs[maker_addition])
taker_branch, taker_branch_proof = get_parent_branch(taker_addition, taker_proofs[taker_addition])
fee = uint64(2_000_000_000_000)
success, offer_maker, error = await trade_manager_maker.create_offer_for_ids(
{launcher_id_maker_1: -1, launcher_id_taker_1: 1, launcher_id_maker_2: -1, launcher_id_taker_2: 1},
solver=Solver(
{
launcher_id_maker_1.hex(): {
"new_root": "0x" + maker_root.hex(),
"dependencies": [
{
"launcher_id": "0x" + launcher_id_taker_1.hex(),
"values_to_prove": ["0x" + taker_branch.hex(), "0x" + taker_branch.hex()],
}
],
},
launcher_id_maker_2.hex(): {
"new_root": "0x" + maker_root.hex(),
"dependencies": [
{
"launcher_id": "0x" + launcher_id_taker_1.hex(),
"values_to_prove": ["0x" + taker_branch.hex()],
},
{
"launcher_id": "0x" + launcher_id_taker_2.hex(),
"values_to_prove": ["0x" + taker_branch.hex()],
},
],
},
}
),
fee=fee,
)
assert error is None
assert success is True
assert offer_maker is not None
success, offer_taker, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(offer_maker.offer),
peer,
solver=Solver(
{
launcher_id_taker_1.hex(): {
"new_root": "0x" + taker_root.hex(),
"dependencies": [
{
"launcher_id": "0x" + launcher_id_maker_1.hex(),
"values_to_prove": ["0x" + maker_branch.hex(), "0x" + maker_branch.hex()],
}
],
},
launcher_id_taker_2.hex(): {
"new_root": "0x" + taker_root.hex(),
"dependencies": [
{
"launcher_id": "0x" + launcher_id_maker_1.hex(),
"values_to_prove": ["0x" + maker_branch.hex()],
},
{
"launcher_id": "0x" + launcher_id_maker_2.hex(),
"values_to_prove": ["0x" + maker_branch.hex()],
},
],
},
"proofs_of_inclusion": [
[
maker_root.hex(),
str(maker_branch_proof[0]),
["0x" + sibling.hex() for sibling in maker_branch_proof[1]],
],
[
taker_root.hex(),
str(taker_branch_proof[0]),
["0x" + sibling.hex() for sibling in taker_branch_proof[1]],
],
],
}
),
fee=fee,
)
assert error is None
assert success is True
assert offer_taker is not None
await time_out_assert(15, wallet_maker.get_unconfirmed_balance, maker_funds)
await time_out_assert(15, wallet_taker.get_unconfirmed_balance, taker_funds - fee)
# Let's hack a way to await this offer's confirmation
offer_record = dataclasses.replace(dl_record, spend_bundle=Offer.from_bytes(offer_taker.offer).bundle)
await full_node_api.process_transaction_records(records=[offer_record])
maker_funds -= fee
taker_funds -= fee
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
await time_out_assert(15, wallet_maker.get_unconfirmed_balance, maker_funds)
await time_out_assert(15, wallet_taker.get_confirmed_balance, taker_funds)
await time_out_assert(15, wallet_taker.get_unconfirmed_balance, taker_funds)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_taker_1, taker_root)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_maker, launcher_id_taker_2, taker_root)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_maker_1, maker_root)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_taker, launcher_id_maker_2, maker_root)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_maker, offer_maker)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, offer_taker)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/db_wallet/test_dl_wallet.py | tests/wallet/db_wallet/test_dl_wallet.py | from __future__ import annotations
import asyncio
import dataclasses
from typing import Any, AsyncIterator, Iterator, List
import pytest
import pytest_asyncio
from flax.data_layer.data_layer_wallet import DataLayerWallet, Mirror
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.simulator.time_out_assert import time_out_assert
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16, uint32, uint64
from flax.wallet.db_wallet.db_wallet_puzzles import create_mirror_puzzle
from flax.wallet.util.merkle_tree import MerkleTree
from tests.setup_nodes import SimulatorsAndWallets, setup_simulators_and_wallets
pytestmark = pytest.mark.data_layer
@pytest.fixture(scope="module")
def event_loop() -> Iterator[asyncio.AbstractEventLoop]:
loop = asyncio.get_event_loop()
yield loop
async def is_singleton_confirmed(dl_wallet: DataLayerWallet, lid: bytes32) -> bool:
rec = await dl_wallet.get_latest_singleton(lid)
if rec is None:
return False
if rec.confirmed is True:
assert rec.confirmed_at_height > 0
assert rec.timestamp > 0
return rec.confirmed
class TestDLWallet:
@pytest_asyncio.fixture(scope="function")
async def wallet_node(self) -> AsyncIterator[SimulatorsAndWallets]:
async for _ in setup_simulators_and_wallets(1, 1, {}):
yield _
@pytest_asyncio.fixture(scope="function")
async def two_wallet_nodes(self) -> AsyncIterator[SimulatorsAndWallets]:
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest_asyncio.fixture(scope="function")
async def three_wallet_nodes(self) -> AsyncIterator[SimulatorsAndWallets]:
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest_asyncio.fixture(scope="function")
async def two_wallet_nodes_five_freeze(self) -> AsyncIterator[SimulatorsAndWallets]:
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
@pytest_asyncio.fixture(scope="function")
async def three_sim_two_wallets(self) -> AsyncIterator[SimulatorsAndWallets]:
async for _ in setup_simulators_and_wallets(3, 2, {}):
yield _
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_initial_creation(self, wallet_node: SimulatorsAndWallets, trusted: bool) -> None:
full_nodes, wallets, _ = wallet_node
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_0 = wallets[0]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node_0.config["trusted_peers"] = {}
await server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
funds = await full_node_api.farm_blocks(count=2, wallet=wallet_0)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
async with wallet_node_0.wallet_state_manager.lock:
dl_wallet = await DataLayerWallet.create_new_dl_wallet(wallet_node_0.wallet_state_manager, wallet_0)
nodes = [Program.to("thing").get_tree_hash(), Program.to([8]).get_tree_hash()]
current_tree = MerkleTree(nodes)
current_root = current_tree.calculate_root()
for i in range(0, 2):
dl_record, std_record, launcher_id = await dl_wallet.generate_new_reporter(
current_root, fee=uint64(1999999999999)
)
assert await dl_wallet.get_latest_singleton(launcher_id) is not None
await wallet_node_0.wallet_state_manager.add_pending_transaction(dl_record)
await wallet_node_0.wallet_state_manager.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet, launcher_id)
await asyncio.sleep(0.5)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, 0)
await time_out_assert(10, wallet_0.get_confirmed_balance, 0)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_get_owned_singletons(self, wallet_node: SimulatorsAndWallets, trusted: bool) -> None:
full_nodes, wallets, _ = wallet_node
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_0 = wallets[0]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node_0.config["trusted_peers"] = {}
await server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
funds = await full_node_api.farm_blocks(count=2, wallet=wallet_0)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
async with wallet_node_0.wallet_state_manager.lock:
dl_wallet = await DataLayerWallet.create_new_dl_wallet(wallet_node_0.wallet_state_manager, wallet_0)
nodes = [Program.to("thing").get_tree_hash(), Program.to([8]).get_tree_hash()]
current_tree = MerkleTree(nodes)
current_root = current_tree.calculate_root()
expected_launcher_ids = set()
for i in range(0, 2):
dl_record, std_record, launcher_id = await dl_wallet.generate_new_reporter(
current_root, fee=uint64(1999999999999)
)
expected_launcher_ids.add(launcher_id)
assert await dl_wallet.get_latest_singleton(launcher_id) is not None
await wallet_node_0.wallet_state_manager.add_pending_transaction(dl_record)
await wallet_node_0.wallet_state_manager.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet, launcher_id)
await asyncio.sleep(0.5)
owned_singletons = await dl_wallet.get_owned_singletons()
owned_launcher_ids = sorted(singleton.launcher_id for singleton in owned_singletons)
assert owned_launcher_ids == sorted(expected_launcher_ids)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_tracking_non_owned(self, two_wallet_nodes: SimulatorsAndWallets, trusted: bool) -> None:
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_0 = wallets[0]
wallet_node_1, server_1 = wallets[1]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_1.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node_0.config["trusted_peers"] = {}
wallet_node_1.config["trusted_peers"] = {}
await server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_1.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
funds = await full_node_api.farm_blocks(count=2, wallet=wallet_0)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
async with wallet_node_0.wallet_state_manager.lock:
dl_wallet_0 = await DataLayerWallet.create_new_dl_wallet(wallet_node_0.wallet_state_manager, wallet_0)
async with wallet_node_1.wallet_state_manager.lock:
dl_wallet_1 = await DataLayerWallet.create_new_dl_wallet(wallet_node_1.wallet_state_manager, wallet_1)
nodes = [Program.to("thing").get_tree_hash(), Program.to([8]).get_tree_hash()]
current_tree = MerkleTree(nodes)
current_root = current_tree.calculate_root()
dl_record, std_record, launcher_id = await dl_wallet_0.generate_new_reporter(current_root)
assert await dl_wallet_0.get_latest_singleton(launcher_id) is not None
await wallet_node_0.wallet_state_manager.add_pending_transaction(dl_record)
await wallet_node_0.wallet_state_manager.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet_0, launcher_id)
await asyncio.sleep(0.5)
peer = wallet_node_1.get_full_node_peer()
assert peer is not None
await dl_wallet_1.track_new_launcher_id(launcher_id, peer)
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet_1, launcher_id)
await asyncio.sleep(0.5)
for i in range(0, 5):
new_root = MerkleTree([Program.to("root").get_tree_hash()]).calculate_root()
txs = await dl_wallet_0.create_update_state_spend(launcher_id, new_root)
for tx in txs:
await wallet_node_0.wallet_state_manager.add_pending_transaction(tx)
await full_node_api.process_transaction_records(records=txs)
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet_0, launcher_id)
await asyncio.sleep(0.5)
async def do_tips_match() -> bool:
latest_singleton_0 = await dl_wallet_0.get_latest_singleton(launcher_id)
latest_singleton_1 = await dl_wallet_1.get_latest_singleton(launcher_id)
return latest_singleton_0 == latest_singleton_1
await time_out_assert(15, do_tips_match, True)
await dl_wallet_1.stop_tracking_singleton(launcher_id)
assert await dl_wallet_1.get_latest_singleton(launcher_id) is None
await dl_wallet_1.track_new_launcher_id(launcher_id, peer)
await time_out_assert(15, do_tips_match, True)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_lifecycle(self, wallet_node: SimulatorsAndWallets, trusted: bool) -> None:
full_nodes, wallets, _ = wallet_node
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_0 = wallets[0]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node_0.config["trusted_peers"] = {}
await server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
funds = await full_node_api.farm_blocks(count=5, wallet=wallet_0)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
async with wallet_node_0.wallet_state_manager.lock:
dl_wallet = await DataLayerWallet.create_new_dl_wallet(wallet_node_0.wallet_state_manager, wallet_0)
nodes = [Program.to("thing").get_tree_hash(), Program.to([8]).get_tree_hash()]
current_tree = MerkleTree(nodes)
current_root = current_tree.calculate_root()
dl_record, std_record, launcher_id = await dl_wallet.generate_new_reporter(current_root)
assert await dl_wallet.get_latest_singleton(launcher_id) is not None
await wallet_node_0.wallet_state_manager.add_pending_transaction(dl_record)
await wallet_node_0.wallet_state_manager.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet, launcher_id)
await asyncio.sleep(0.5)
previous_record = await dl_wallet.get_latest_singleton(launcher_id)
assert previous_record is not None
assert previous_record.lineage_proof.amount is not None
new_root = MerkleTree([Program.to("root").get_tree_hash()]).calculate_root()
txs = await dl_wallet.generate_signed_transaction(
[previous_record.lineage_proof.amount],
[previous_record.inner_puzzle_hash],
launcher_id=previous_record.launcher_id,
new_root_hash=new_root,
fee=uint64(1999999999999),
)
assert txs[0].spend_bundle is not None
with pytest.raises(ValueError, match="is currently pending"):
await dl_wallet.generate_signed_transaction(
[previous_record.lineage_proof.amount],
[previous_record.inner_puzzle_hash],
coins=set([txs[0].spend_bundle.removals()[0]]),
fee=uint64(1999999999999),
)
new_record = await dl_wallet.get_latest_singleton(launcher_id)
assert new_record is not None
assert new_record != previous_record
assert not new_record.confirmed
for tx in txs:
await wallet_node_0.wallet_state_manager.add_pending_transaction(tx)
await full_node_api.process_transaction_records(records=txs)
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet, launcher_id)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds - 2000000000000)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds - 2000000000000)
await asyncio.sleep(0.5)
previous_record = await dl_wallet.get_latest_singleton(launcher_id)
new_root = MerkleTree([Program.to("new root").get_tree_hash()]).calculate_root()
txs = await dl_wallet.create_update_state_spend(launcher_id, new_root)
new_record = await dl_wallet.get_latest_singleton(launcher_id)
assert new_record is not None
assert new_record != previous_record
assert not new_record.confirmed
for tx in txs:
await wallet_node_0.wallet_state_manager.add_pending_transaction(tx)
await full_node_api.process_transaction_records(records=txs)
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet, launcher_id)
await asyncio.sleep(0.5)
@pytest.mark.skip(reason="maybe no longer relevant, needs to be rewritten at least")
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_rebase(self, two_wallet_nodes: SimulatorsAndWallets, trusted: bool) -> None:
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_0 = wallets[0]
wallet_node_1, server_1 = wallets[1]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_1.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node_0.config["trusted_peers"] = {}
wallet_node_1.config["trusted_peers"] = {}
await server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await server_1.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
funds = await full_node_api.farm_blocks(count=5, wallet=wallet_0)
await full_node_api.farm_blocks(count=5, wallet=wallet_1)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
await time_out_assert(10, wallet_1.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_1.get_confirmed_balance, funds)
async with wallet_node_0.wallet_state_manager.lock:
dl_wallet_0 = await DataLayerWallet.create_new_dl_wallet(wallet_node_0.wallet_state_manager, wallet_0)
async with wallet_node_1.wallet_state_manager.lock:
dl_wallet_1 = await DataLayerWallet.create_new_dl_wallet(wallet_node_1.wallet_state_manager, wallet_1)
nodes = [Program.to("thing").get_tree_hash(), Program.to([8]).get_tree_hash()]
current_tree = MerkleTree(nodes)
current_root = current_tree.calculate_root()
async def is_singleton_confirmed(wallet: DataLayerWallet, lid: bytes32) -> bool:
latest_singleton = await wallet.get_latest_singleton(lid)
if latest_singleton is None:
return False
return latest_singleton.confirmed
dl_record, std_record, launcher_id = await dl_wallet_0.generate_new_reporter(current_root)
initial_record = await dl_wallet_0.get_latest_singleton(launcher_id)
assert initial_record is not None
await wallet_node_0.wallet_state_manager.add_pending_transaction(dl_record)
await wallet_node_0.wallet_state_manager.add_pending_transaction(std_record)
await asyncio.wait_for(full_node_api.process_transaction_records(records=[dl_record, std_record]), timeout=15)
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet_0, launcher_id)
await asyncio.sleep(0.5)
peer = wallet_node_1.get_full_node_peer()
assert peer is not None
await dl_wallet_1.track_new_launcher_id(launcher_id, peer)
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet_1, launcher_id)
current_record = await dl_wallet_1.get_latest_singleton(launcher_id)
assert current_record is not None
await asyncio.sleep(0.5)
# Because these have the same fee, the one that gets pushed first will win
report_txs = await dl_wallet_1.create_update_state_spend(
launcher_id, current_record.root, fee=uint64(2000000000000)
)
record_1 = await dl_wallet_1.get_latest_singleton(launcher_id)
assert record_1 is not None
assert current_record != record_1
update_txs = await dl_wallet_0.create_update_state_spend(
launcher_id, bytes32([0] * 32), fee=uint64(2000000000000)
)
record_0 = await dl_wallet_0.get_latest_singleton(launcher_id)
assert record_0 is not None
assert initial_record != record_0
assert record_0 != record_1
for tx in report_txs:
await wallet_node_1.wallet_state_manager.add_pending_transaction(tx)
await asyncio.wait_for(full_node_api.wait_transaction_records_entered_mempool(records=report_txs), timeout=15)
for tx in update_txs:
await wallet_node_0.wallet_state_manager.add_pending_transaction(tx)
await asyncio.wait_for(full_node_api.process_transaction_records(records=report_txs), timeout=15)
funds -= 2000000000001
async def is_singleton_generation(wallet: DataLayerWallet, launcher_id: bytes32, generation: int) -> bool:
latest = await wallet.get_latest_singleton(launcher_id)
if latest is not None and latest.generation == generation:
return True
return False
next_generation = current_record.generation + 2
await time_out_assert(15, is_singleton_generation, True, dl_wallet_0, launcher_id, next_generation)
for i in range(0, 2):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(bytes32(32 * b"0")))
await asyncio.sleep(0.5)
await time_out_assert(15, is_singleton_confirmed, True, dl_wallet_0, launcher_id)
await time_out_assert(15, is_singleton_generation, True, dl_wallet_1, launcher_id, next_generation)
latest = await dl_wallet_0.get_latest_singleton(launcher_id)
assert latest is not None
assert latest == (await dl_wallet_1.get_latest_singleton(launcher_id))
await time_out_assert(15, wallet_0.get_confirmed_balance, funds)
await time_out_assert(15, wallet_0.get_unconfirmed_balance, funds)
assert (
len(
await dl_wallet_0.get_history(
launcher_id, min_generation=uint32(next_generation - 1), max_generation=uint32(next_generation - 1)
)
)
== 1
)
for tx in update_txs:
assert await wallet_node_0.wallet_state_manager.tx_store.get_transaction_record(tx.name) is None
assert await dl_wallet_0.get_singleton_record(record_0.coin_id) is None
update_txs_1 = await dl_wallet_0.create_update_state_spend(
launcher_id, bytes32([1] * 32), fee=uint64(2000000000000)
)
record_1 = await dl_wallet_0.get_latest_singleton(launcher_id)
assert record_1 is not None
for tx in update_txs_1:
await wallet_node_0.wallet_state_manager.add_pending_transaction(tx)
await full_node_api.wait_transaction_records_entered_mempool(update_txs_1)
# Delete any trace of that update
await wallet_node_0.wallet_state_manager.dl_store.delete_singleton_record(record_1.coin_id)
for tx in update_txs_1:
await wallet_node_0.wallet_state_manager.tx_store.delete_transaction_record(tx.name)
update_txs_0 = await dl_wallet_0.create_update_state_spend(launcher_id, bytes32([2] * 32))
record_0 = await dl_wallet_0.get_latest_singleton(launcher_id)
assert record_0 is not None
assert record_0 != record_1
for tx in update_txs_0:
await wallet_node_0.wallet_state_manager.add_pending_transaction(tx)
await asyncio.wait_for(full_node_api.process_transaction_records(records=update_txs_1), timeout=15)
async def does_singleton_have_root(wallet: DataLayerWallet, lid: bytes32, root: bytes32) -> bool:
latest_singleton = await wallet.get_latest_singleton(lid)
if latest_singleton is None:
return False
return latest_singleton.root == root
funds -= 2000000000000
next_generation += 1
await time_out_assert(15, is_singleton_generation, True, dl_wallet_0, launcher_id, next_generation)
await time_out_assert(15, does_singleton_have_root, True, dl_wallet_0, launcher_id, bytes32([1] * 32))
await time_out_assert(15, wallet_0.get_confirmed_balance, funds)
await time_out_assert(15, wallet_0.get_unconfirmed_balance, funds)
assert (
len(
await dl_wallet_0.get_history(
launcher_id, min_generation=uint32(next_generation), max_generation=uint32(next_generation)
)
)
== 1
)
for tx in update_txs_0:
assert await wallet_node_0.wallet_state_manager.tx_store.get_transaction_record(tx.name) is None
assert await dl_wallet_0.get_singleton_record(record_0.coin_id) is None
async def is_singleton_confirmed_and_root(dl_wallet: DataLayerWallet, lid: bytes32, root: bytes32) -> bool:
rec = await dl_wallet.get_latest_singleton(lid)
if rec is None:
return False
if rec.confirmed is True:
assert rec.confirmed_at_height > 0
assert rec.timestamp > 0
return rec.confirmed and rec.root == root
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_mirrors(wallets_prefarm: Any, trusted: bool) -> None:
(
[wallet_node_1, _],
[wallet_node_2, _],
full_node_api,
) = wallets_prefarm
assert wallet_node_1.wallet_state_manager is not None
assert wallet_node_2.wallet_state_manager is not None
wsm_1 = wallet_node_1.wallet_state_manager
wsm_2 = wallet_node_2.wallet_state_manager
wallet_1 = wsm_1.main_wallet
wallet_2 = wsm_2.main_wallet
async with wsm_1.lock:
dl_wallet_1 = await DataLayerWallet.create_new_dl_wallet(wsm_1, wallet_1)
async with wsm_2.lock:
dl_wallet_2 = await DataLayerWallet.create_new_dl_wallet(wsm_2, wallet_2)
dl_record, std_record, launcher_id_1 = await dl_wallet_1.generate_new_reporter(bytes32([0] * 32))
assert await dl_wallet_1.get_latest_singleton(launcher_id_1) is not None
await wsm_1.add_pending_transaction(dl_record)
await wsm_1.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_1, launcher_id_1, bytes32([0] * 32))
dl_record, std_record, launcher_id_2 = await dl_wallet_2.generate_new_reporter(bytes32([0] * 32))
assert await dl_wallet_2.get_latest_singleton(launcher_id_2) is not None
await wsm_2.add_pending_transaction(dl_record)
await wsm_2.add_pending_transaction(std_record)
await full_node_api.process_transaction_records(records=[dl_record, std_record])
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_2, launcher_id_2, bytes32([0] * 32))
peer_1 = wallet_node_1.get_full_node_peer()
assert peer_1 is not None
await dl_wallet_1.track_new_launcher_id(launcher_id_2, peer_1)
peer_2 = wallet_node_2.get_full_node_peer()
assert peer_2 is not None
await dl_wallet_2.track_new_launcher_id(launcher_id_1, peer_2)
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_1, launcher_id_2, bytes32([0] * 32))
await time_out_assert(15, is_singleton_confirmed_and_root, True, dl_wallet_2, launcher_id_1, bytes32([0] * 32))
txs = await dl_wallet_1.create_new_mirror(launcher_id_2, uint64(3), [b"foo", b"bar"], fee=uint64(1_999_999_999_999))
additions: List[Coin] = []
for tx in txs:
if tx.spend_bundle is not None:
additions.extend(tx.spend_bundle.additions())
await wsm_1.add_pending_transaction(tx)
await full_node_api.process_transaction_records(records=txs)
mirror_coin: Coin = [c for c in additions if c.puzzle_hash == create_mirror_puzzle().get_tree_hash()][0]
mirror = Mirror(
bytes32(mirror_coin.name()), bytes32(launcher_id_2), uint64(mirror_coin.amount), [b"foo", b"bar"], True
)
await time_out_assert(15, dl_wallet_1.get_mirrors_for_launcher, [mirror], launcher_id_2)
await time_out_assert(
15, dl_wallet_2.get_mirrors_for_launcher, [dataclasses.replace(mirror, ours=False)], launcher_id_2
)
txs = await dl_wallet_1.delete_mirror(mirror.coin_id, peer_1, fee=uint64(2_000_000_000_000))
for tx in txs:
await wsm_1.add_pending_transaction(tx)
await full_node_api.process_transaction_records(records=txs)
await time_out_assert(15, dl_wallet_1.get_mirrors_for_launcher, [], launcher_id_2)
await time_out_assert(15, dl_wallet_2.get_mirrors_for_launcher, [], launcher_id_2)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/cat_wallet/test_trades.py | tests/wallet/cat_wallet/test_trades.py | from __future__ import annotations
import asyncio
from secrets import token_bytes
from typing import Any, Dict, List
import pytest
from flax.consensus.cost_calculator import NPCResult
from flax.full_node.bundle_tools import simple_solution_generator
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions
from flax.full_node.mempool_manager import MempoolManager
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.simulator.time_out_assert import time_out_assert
from flax.types.blockchain_format.program import INFINITE_COST
from flax.util.ints import uint64
from flax.wallet.cat_wallet.cat_wallet import CATWallet
from flax.wallet.outer_puzzles import AssetType
from flax.wallet.puzzle_drivers import PuzzleInfo
from flax.wallet.trading.offer import Offer
from flax.wallet.trading.trade_status import TradeStatus
from flax.wallet.transaction_record import TransactionRecord
from flax.wallet.util.transaction_type import TransactionType
async def tx_in_pool(mempool: MempoolManager, tx_id):
tx = mempool.get_spendbundle(tx_id)
if tx is None:
return False
return True
buffer_blocks = 4
@pytest.mark.parametrize(
"trusted",
[True, False],
)
class TestCATTrades:
@pytest.mark.asyncio
async def test_cat_trades(self, wallets_prefarm):
(
[wallet_node_maker, initial_maker_balance],
[wallet_node_taker, initial_taker_balance],
full_node,
) = wallets_prefarm
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
wallet_taker = wallet_node_taker.wallet_state_manager.main_wallet
# Create two new CATs, one in each wallet
async with wallet_node_maker.wallet_state_manager.lock:
cat_wallet_maker: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node_maker.wallet_state_manager, wallet_maker, {"identifier": "genesis_by_id"}, uint64(100)
)
await asyncio.sleep(1)
async with wallet_node_taker.wallet_state_manager.lock:
new_cat_wallet_taker: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node_taker.wallet_state_manager, wallet_taker, {"identifier": "genesis_by_id"}, uint64(100)
)
await asyncio.sleep(1)
for i in range(1, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, 100)
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, 100)
await time_out_assert(15, new_cat_wallet_taker.get_confirmed_balance, 100)
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, 100)
# Add the taker's CAT to the maker's wallet
assert cat_wallet_maker.cat_info.my_tail is not None
assert new_cat_wallet_taker.cat_info.my_tail is not None
new_cat_wallet_maker: CATWallet = await CATWallet.create_wallet_for_cat(
wallet_node_maker.wallet_state_manager, wallet_maker, new_cat_wallet_taker.get_asset_id()
)
# Create the trade parameters
MAKER_FLAX_BALANCE = initial_maker_balance - 100
TAKER_FLAX_BALANCE = initial_taker_balance - 100
await time_out_assert(25, wallet_maker.get_confirmed_balance, MAKER_FLAX_BALANCE)
await time_out_assert(25, wallet_taker.get_unconfirmed_balance, TAKER_FLAX_BALANCE)
MAKER_CAT_BALANCE = 100
MAKER_NEW_CAT_BALANCE = 0
TAKER_CAT_BALANCE = 0
TAKER_NEW_CAT_BALANCE = 100
flax_for_cat = {
wallet_maker.id(): -1,
bytes.fromhex(new_cat_wallet_maker.get_asset_id()): 2, # This is the CAT that the taker made
}
cat_for_flax = {
wallet_maker.id(): 3,
cat_wallet_maker.id(): -4, # The taker has no knowledge of this CAT yet
}
cat_for_cat = {
bytes.fromhex(cat_wallet_maker.get_asset_id()): -5,
new_cat_wallet_maker.id(): 6,
}
flax_for_multiple_cat = {
wallet_maker.id(): -7,
cat_wallet_maker.id(): 8,
new_cat_wallet_maker.id(): 9,
}
multiple_cat_for_flax = {
wallet_maker.id(): 10,
cat_wallet_maker.id(): -11,
new_cat_wallet_maker.id(): -12,
}
flax_and_cat_for_cat = {
wallet_maker.id(): -13,
cat_wallet_maker.id(): -14,
new_cat_wallet_maker.id(): 15,
}
driver_dict: Dict[str, Dict[str, Any]] = {}
for wallet in (cat_wallet_maker, new_cat_wallet_maker):
asset_id: str = wallet.get_asset_id()
driver_dict[bytes.fromhex(asset_id)] = PuzzleInfo(
{
"type": AssetType.CAT.name,
"tail": "0x" + asset_id,
}
)
trade_manager_maker = wallet_node_maker.wallet_state_manager.trade_manager
trade_manager_taker = wallet_node_taker.wallet_state_manager.trade_manager
# Execute all of the trades
# flax_for_cat
success, trade_make, error = await trade_manager_maker.create_offer_for_ids(flax_for_cat, fee=uint64(1))
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_make is not None
peer = wallet_node_taker.get_full_node_peer()
assert peer is not None
success, trade_take, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(trade_make.offer), peer, fee=uint64(1)
)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_take is not None
first_offer = Offer.from_bytes(trade_take.offer)
MAKER_FLAX_BALANCE -= 2 # -1 and -1 for fee
MAKER_NEW_CAT_BALANCE += 2
TAKER_FLAX_BALANCE += 0 # +1 and -1 for fee
TAKER_NEW_CAT_BALANCE -= 2
await time_out_assert(15, wallet_taker.get_unconfirmed_balance, TAKER_FLAX_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, wallet_maker.get_confirmed_balance, MAKER_FLAX_BALANCE)
await time_out_assert(15, wallet_maker.get_unconfirmed_balance, MAKER_FLAX_BALANCE)
await time_out_assert(15, new_cat_wallet_maker.get_confirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_maker.get_unconfirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, wallet_taker.get_confirmed_balance, TAKER_FLAX_BALANCE)
await time_out_assert(15, wallet_taker.get_unconfirmed_balance, TAKER_FLAX_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_confirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
async def get_trade_and_status(trade_manager, trade) -> TradeStatus:
trade_rec = await trade_manager.get_trade_by_id(trade.trade_id)
return TradeStatus(trade_rec.status)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_maker, trade_make)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, trade_take)
async def assert_trade_tx_number(wallet_node, trade_id, number):
txs = await wallet_node.wallet_state_manager.tx_store.get_transactions_by_trade_id(trade_id)
return len(txs) == number
await time_out_assert(15, assert_trade_tx_number, True, wallet_node_maker, trade_make.trade_id, 1)
await time_out_assert(15, assert_trade_tx_number, True, wallet_node_taker, trade_take.trade_id, 3)
# cat_for_flax
success, trade_make, error = await trade_manager_maker.create_offer_for_ids(cat_for_flax)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_make is not None
success, trade_take, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(trade_make.offer), peer
)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_take is not None
MAKER_CAT_BALANCE -= 4
MAKER_FLAX_BALANCE += 3
TAKER_CAT_BALANCE += 4
TAKER_FLAX_BALANCE -= 3
cat_wallet_taker: CATWallet = await wallet_node_taker.wallet_state_manager.get_wallet_for_asset_id(
cat_wallet_maker.get_asset_id()
)
await time_out_assert(15, wallet_taker.get_unconfirmed_balance, TAKER_FLAX_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, wallet_maker.get_confirmed_balance, MAKER_FLAX_BALANCE)
await time_out_assert(15, wallet_maker.get_unconfirmed_balance, MAKER_FLAX_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, wallet_taker.get_confirmed_balance, TAKER_FLAX_BALANCE)
await time_out_assert(15, wallet_taker.get_unconfirmed_balance, TAKER_FLAX_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_confirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_maker, trade_make)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, trade_take)
await time_out_assert(15, assert_trade_tx_number, True, wallet_node_maker, trade_make.trade_id, 1)
await time_out_assert(15, assert_trade_tx_number, True, wallet_node_taker, trade_take.trade_id, 2)
# cat_for_cat
success, trade_make, error = await trade_manager_maker.create_offer_for_ids(cat_for_cat)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_make is not None
success, trade_take, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(trade_make.offer), peer
)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_take is not None
second_offer = Offer.from_bytes(trade_take.offer)
MAKER_CAT_BALANCE -= 5
MAKER_NEW_CAT_BALANCE += 6
TAKER_CAT_BALANCE += 5
TAKER_NEW_CAT_BALANCE -= 6
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, new_cat_wallet_maker.get_confirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_maker.get_unconfirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_confirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_confirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_maker, trade_make)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, trade_take)
# flax_for_multiple_cat
success, trade_make, error = await trade_manager_maker.create_offer_for_ids(
flax_for_multiple_cat, driver_dict=driver_dict
)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_make is not None
success, trade_take, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(trade_make.offer), peer
)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_take is not None
third_offer = Offer.from_bytes(trade_take.offer)
MAKER_FLAX_BALANCE -= 7
MAKER_CAT_BALANCE += 8
MAKER_NEW_CAT_BALANCE += 9
TAKER_FLAX_BALANCE += 7
TAKER_CAT_BALANCE -= 8
TAKER_NEW_CAT_BALANCE -= 9
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, new_cat_wallet_maker.get_confirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_maker.get_unconfirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_confirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_confirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_maker, trade_make)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, trade_take)
# multiple_cat_for_flax
success, trade_make, error = await trade_manager_maker.create_offer_for_ids(multiple_cat_for_flax)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_make is not None
success, trade_take, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(trade_make.offer), peer
)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_take is not None
fourth_offer = Offer.from_bytes(trade_take.offer)
MAKER_CAT_BALANCE -= 11
MAKER_NEW_CAT_BALANCE -= 12
MAKER_FLAX_BALANCE += 10
TAKER_CAT_BALANCE += 11
TAKER_NEW_CAT_BALANCE += 12
TAKER_FLAX_BALANCE -= 10
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, new_cat_wallet_maker.get_confirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_maker.get_unconfirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_confirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_confirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_maker, trade_make)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, trade_take)
# flax_and_cat_for_cat
success, trade_make, error = await trade_manager_maker.create_offer_for_ids(flax_and_cat_for_cat)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_make is not None
success, trade_take, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(trade_make.offer), peer
)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_take is not None
fifth_offer = Offer.from_bytes(trade_take.offer)
MAKER_FLAX_BALANCE -= 13
MAKER_CAT_BALANCE -= 14
MAKER_NEW_CAT_BALANCE += 15
TAKER_FLAX_BALANCE += 13
TAKER_CAT_BALANCE += 14
TAKER_NEW_CAT_BALANCE -= 15
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, new_cat_wallet_maker.get_confirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_maker.get_unconfirmed_balance, MAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, MAKER_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_confirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, new_cat_wallet_taker.get_unconfirmed_balance, TAKER_NEW_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_confirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, cat_wallet_taker.get_unconfirmed_balance, TAKER_CAT_BALANCE)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_maker, trade_make)
await time_out_assert(15, get_trade_and_status, TradeStatus.CONFIRMED, trade_manager_taker, trade_take)
# This tests an edge case where aggregated offers the include > 2 of the same kind of CAT
# (and therefore are solved as a complete ring)
bundle = Offer.aggregate([first_offer, second_offer, third_offer, fourth_offer, fifth_offer]).to_valid_spend()
program = simple_solution_generator(bundle)
result: NPCResult = get_name_puzzle_conditions(program, INFINITE_COST, cost_per_byte=0, mempool_mode=True)
assert result.error is None
@pytest.mark.asyncio
async def test_trade_cancellation(self, wallets_prefarm):
(
[wallet_node_maker, maker_funds],
[wallet_node_taker, taker_funds],
full_node,
) = wallets_prefarm
wallet_maker = wallet_node_maker.wallet_state_manager.main_wallet
wallet_taker = wallet_node_taker.wallet_state_manager.main_wallet
xfx_to_cat_amount = uint64(100)
async with wallet_node_maker.wallet_state_manager.lock:
cat_wallet_maker: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node_maker.wallet_state_manager, wallet_maker, {"identifier": "genesis_by_id"}, xfx_to_cat_amount
)
tx_queue: List[TransactionRecord] = await wallet_node_maker.wallet_state_manager.tx_store.get_not_sent()
await time_out_assert(
15, tx_in_pool, True, full_node.full_node.mempool_manager, tx_queue[0].spend_bundle.name()
)
for i in range(1, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xfx_to_cat_amount)
await time_out_assert(15, cat_wallet_maker.get_unconfirmed_balance, xfx_to_cat_amount)
maker_funds -= xfx_to_cat_amount
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds)
cat_for_flax = {
wallet_maker.id(): 1,
cat_wallet_maker.id(): -2,
}
flax_for_cat = {
wallet_maker.id(): -3,
cat_wallet_maker.id(): 4,
}
trade_manager_maker = wallet_node_maker.wallet_state_manager.trade_manager
trade_manager_taker = wallet_node_taker.wallet_state_manager.trade_manager
async def get_trade_and_status(trade_manager, trade) -> TradeStatus:
trade_rec = await trade_manager.get_trade_by_id(trade.trade_id)
return TradeStatus(trade_rec.status)
success, trade_make, error = await trade_manager_maker.create_offer_for_ids(cat_for_flax)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_make is not None
await trade_manager_maker.cancel_pending_offer(trade_make.trade_id)
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager_maker, trade_make)
# Due to current mempool rules, trying to force a take out of the mempool with a cancel will not work.
# Uncomment this when/if it does
# success, trade_take, error = await trade_manager_taker.respond_to_offer(Offer.from_bytes(trade_make.offer))
# await asyncio.sleep(1)
# assert error is None
# assert success is True
# assert trade_take is not None
# await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CONFIRM, trade_manager_taker, trade_take)
# await time_out_assert(
# 15,
# tx_in_pool,
# True,
# full_node.full_node.mempool_manager,
# Offer.from_bytes(trade_take.offer).to_valid_spend().name(),
# )
fee = uint64(2_000_000_000_000)
txs = await trade_manager_maker.cancel_pending_offer_safely(trade_make.trade_id, fee=fee)
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager_maker, trade_make)
for tx in txs:
if tx.spend_bundle is not None:
await time_out_assert(15, tx_in_pool, True, full_node.full_node.mempool_manager, tx.spend_bundle.name())
sum_of_outgoing = uint64(0)
sum_of_incoming = uint64(0)
for tx in txs:
if tx.type == TransactionType.OUTGOING_TX.value:
sum_of_outgoing = uint64(sum_of_outgoing + tx.amount)
elif tx.type == TransactionType.INCOMING_TX.value:
sum_of_incoming = uint64(sum_of_incoming + tx.amount)
assert (sum_of_outgoing - sum_of_incoming) == 0
for i in range(1, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager_maker, trade_make)
# await time_out_assert(15, get_trade_and_status, TradeStatus.FAILED, trade_manager_taker, trade_take)
await time_out_assert(15, wallet_maker.get_pending_change_balance, 0)
await time_out_assert(15, wallet_maker.get_confirmed_balance, maker_funds - fee)
await time_out_assert(15, cat_wallet_maker.get_confirmed_balance, xfx_to_cat_amount)
await time_out_assert(15, wallet_taker.get_confirmed_balance, taker_funds)
peer = wallet_node_taker.get_full_node_peer()
assert peer is not None
success, trade_take, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(trade_make.offer), peer
)
await asyncio.sleep(1)
assert error is not None
assert success is False
assert trade_take is None
# Now we're going to create the other way around for test coverage sake
success, trade_make, error = await trade_manager_maker.create_offer_for_ids(flax_for_cat)
await asyncio.sleep(1)
assert error is None
assert success is True
assert trade_make is not None
# This take should fail since we have no CATs to fulfill it with
success, trade_take, error = await trade_manager_taker.respond_to_offer(
Offer.from_bytes(trade_make.offer), peer
)
await asyncio.sleep(1)
assert error is not None
assert success is False
assert trade_take is None
txs = await trade_manager_maker.cancel_pending_offer_safely(trade_make.trade_id, fee=uint64(0))
await time_out_assert(15, get_trade_and_status, TradeStatus.PENDING_CANCEL, trade_manager_maker, trade_make)
for tx in txs:
if tx.spend_bundle is not None:
await time_out_assert(15, tx_in_pool, True, full_node.full_node.mempool_manager, tx.spend_bundle.name())
for i in range(1, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, get_trade_and_status, TradeStatus.CANCELLED, trade_manager_maker, trade_make)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/cat_wallet/test_cat_lifecycle.py | tests/wallet/cat_wallet/test_cat_lifecycle.py | from typing import List, Tuple, Optional, Dict
import pytest
from blspy import PrivateKey, AugSchemeMPL, G2Element
from clvm.casts import int_to_bytes
from flax.clvm.spend_sim import SpendSim, SimClient
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.types.mempool_inclusion_status import MempoolInclusionStatus
from flax.types.spend_bundle import SpendBundle
from flax.util.errors import Err
from flax.util.ints import uint64
from flax.wallet.cat_wallet.cat_utils import (
SpendableCAT,
construct_cat_puzzle,
unsigned_spend_bundle_for_spendable_cats,
)
from flax.wallet.puzzles.cat_loader import CAT_MOD
from flax.wallet.lineage_proof import LineageProof
from flax.wallet.puzzles.tails import (
GenesisById,
GenesisByPuzhash,
EverythingWithSig,
DelegatedLimitations,
)
from tests.clvm.benchmark_costs import cost_of_spend_bundle
from tests.clvm.test_puzzles import secret_exponent_for_index
acs = Program.to(1)
acs_ph = acs.get_tree_hash()
NO_LINEAGE_PROOF = LineageProof()
async def do_spend(
sim: SpendSim,
sim_client: SimClient,
tail: Program,
coins: List[Coin],
lineage_proofs: List[Program],
inner_solutions: List[Program],
expected_result: Tuple[MempoolInclusionStatus, Err],
reveal_limitations_program: bool = True,
signatures: List[G2Element] = [],
extra_deltas: Optional[List[int]] = None,
additional_spends: List[SpendBundle] = [],
limitations_solutions: Optional[List[Program]] = None,
) -> int:
if limitations_solutions is None:
limitations_solutions = [Program.to([])] * len(coins)
if extra_deltas is None:
extra_deltas = [0] * len(coins)
spendable_cat_list: List[SpendableCAT] = []
for coin, innersol, proof, limitations_solution, extra_delta in zip(
coins, inner_solutions, lineage_proofs, limitations_solutions, extra_deltas
):
spendable_cat_list.append(
SpendableCAT(
coin,
tail.get_tree_hash(),
acs,
innersol,
limitations_solution=limitations_solution,
lineage_proof=proof,
extra_delta=extra_delta,
limitations_program_reveal=tail if reveal_limitations_program else Program.to([]),
)
)
spend_bundle: SpendBundle = unsigned_spend_bundle_for_spendable_cats(
CAT_MOD,
spendable_cat_list,
)
agg_sig = AugSchemeMPL.aggregate(signatures)
result = await sim_client.push_tx(
SpendBundle.aggregate(
[
*additional_spends,
spend_bundle,
SpendBundle([], agg_sig), # "Signing" the spend bundle
]
)
)
assert result == expected_result
cost = cost_of_spend_bundle(spend_bundle)
await sim.farm_block()
return cost
class TestCATLifecycle:
cost: Dict[str, int] = {}
@pytest.mark.asyncio()
async def test_cat_mod(self, setup_sim):
sim, sim_client = setup_sim
try:
tail = Program.to([])
checker_solution = Program.to([])
cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail.get_tree_hash(), acs)
cat_ph: bytes32 = cat_puzzle.get_tree_hash()
await sim.farm_block(cat_ph)
starting_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(cat_ph))[0].coin
# Testing the eve spend
self.cost["Eve Spend"] = await do_spend(
sim,
sim_client,
tail,
[starting_coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), starting_coin.amount - 3, [b"memo"]],
[51, acs.get_tree_hash(), 1],
[51, acs.get_tree_hash(), 2],
[51, 0, -113, tail, checker_solution],
]
)
],
(MempoolInclusionStatus.SUCCESS, None),
limitations_solutions=[checker_solution],
)
# There's 4 total coins at this point. A farming reward and the three children of the spend above.
# Testing a combination of two
coins: List[Coin] = [
record.coin
for record in (await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))
]
coins = [coins[0], coins[1]]
self.cost["Two CATs"] = await do_spend(
sim,
sim_client,
tail,
coins,
[NO_LINEAGE_PROOF] * 2,
[
Program.to(
[
[51, acs.get_tree_hash(), coins[0].amount + coins[1].amount],
[51, 0, -113, tail, checker_solution],
]
),
Program.to([[51, 0, -113, tail, checker_solution]]),
],
(MempoolInclusionStatus.SUCCESS, None),
limitations_solutions=[checker_solution] * 2,
)
# Testing a combination of three
coins = [
record.coin
for record in (await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))
]
total_amount: uint64 = uint64(sum([c.amount for c in coins]))
self.cost["Three CATs"] = await do_spend(
sim,
sim_client,
tail,
coins,
[NO_LINEAGE_PROOF] * 3,
[
Program.to(
[
[51, acs.get_tree_hash(), total_amount],
[51, 0, -113, tail, checker_solution],
]
),
Program.to([[51, 0, -113, tail, checker_solution]]),
Program.to([[51, 0, -113, tail, checker_solution]]),
],
(MempoolInclusionStatus.SUCCESS, None),
limitations_solutions=[checker_solution] * 3,
)
# Spend with a standard lineage proof
parent_coin: Coin = coins[0] # The first one is the one we didn't light on fire
_, curried_args = cat_puzzle.uncurry()
_, _, innerpuzzle = curried_args.as_iter()
lineage_proof = LineageProof(parent_coin.parent_coin_info, innerpuzzle.get_tree_hash(), parent_coin.amount)
self.cost["Standard Lineage Check"] = await do_spend(
sim,
sim_client,
tail,
[(await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))[0].coin],
[lineage_proof],
[Program.to([[51, acs.get_tree_hash(), total_amount]])],
(MempoolInclusionStatus.SUCCESS, None),
reveal_limitations_program=False,
)
# Melt some value
self.cost["Melting Value"] = await do_spend(
sim,
sim_client,
tail,
[(await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))[0].coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), total_amount - 1],
[51, 0, -113, tail, checker_solution],
]
)
],
(MempoolInclusionStatus.SUCCESS, None),
extra_deltas=[-1],
limitations_solutions=[checker_solution],
)
# Mint some value
temp_p = Program.to(1)
temp_ph: bytes32 = temp_p.get_tree_hash()
await sim.farm_block(temp_ph)
acs_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(temp_ph, include_spent_coins=False))[
0
].coin
acs_bundle = SpendBundle(
[
CoinSpend(
acs_coin,
temp_p,
Program.to([]),
)
],
G2Element(),
)
self.cost["Mint Value"] = await do_spend(
sim,
sim_client,
tail,
[(await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))[0].coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), total_amount],
[51, 0, -113, tail, checker_solution],
]
)
], # We subtracted 1 last time so it's normal now
(MempoolInclusionStatus.SUCCESS, None),
extra_deltas=[1],
additional_spends=[acs_bundle],
limitations_solutions=[checker_solution],
)
finally:
await sim.close()
@pytest.mark.asyncio()
async def test_complex_spend(self, setup_sim):
sim, sim_client = setup_sim
try:
tail = Program.to([])
checker_solution = Program.to([])
cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail.get_tree_hash(), acs)
cat_ph: bytes32 = cat_puzzle.get_tree_hash()
await sim.farm_block(cat_ph)
await sim.farm_block(cat_ph)
cat_records = await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False)
parent_of_mint = cat_records[0].coin
parent_of_melt = cat_records[1].coin
eve_to_mint = cat_records[2].coin
eve_to_melt = cat_records[3].coin
# Spend two of them to make them non-eve
self.cost["Spend two eves"] = await do_spend(
sim,
sim_client,
tail,
[parent_of_mint, parent_of_melt],
[NO_LINEAGE_PROOF, NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), parent_of_mint.amount],
[51, 0, -113, tail, checker_solution],
]
),
Program.to(
[
[51, acs.get_tree_hash(), parent_of_melt.amount],
[51, 0, -113, tail, checker_solution],
]
),
],
(MempoolInclusionStatus.SUCCESS, None),
limitations_solutions=[checker_solution] * 2,
)
# Make the lineage proofs for the non-eves
mint_lineage = LineageProof(parent_of_mint.parent_coin_info, acs_ph, parent_of_mint.amount)
melt_lineage = LineageProof(parent_of_melt.parent_coin_info, acs_ph, parent_of_melt.amount)
# Find the two new coins
all_cats = await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False)
all_cat_coins = [cr.coin for cr in all_cats]
standard_to_mint = list(filter(lambda cr: cr.parent_coin_info == parent_of_mint.name(), all_cat_coins))[0]
standard_to_melt = list(filter(lambda cr: cr.parent_coin_info == parent_of_melt.name(), all_cat_coins))[0]
# Do the complex spend
# We have both and eve and non-eve doing both minting and melting
self.cost["Complex Spend"] = await do_spend(
sim,
sim_client,
tail,
[eve_to_mint, eve_to_melt, standard_to_mint, standard_to_melt],
[NO_LINEAGE_PROOF, NO_LINEAGE_PROOF, mint_lineage, melt_lineage],
[
Program.to(
[
[51, acs.get_tree_hash(), eve_to_mint.amount + 13],
[51, 0, -113, tail, checker_solution],
]
),
Program.to(
[
[51, acs.get_tree_hash(), eve_to_melt.amount - 21],
[51, 0, -113, tail, checker_solution],
]
),
Program.to(
[
[51, acs.get_tree_hash(), standard_to_mint.amount + 21],
[51, 0, -113, tail, checker_solution],
]
),
Program.to(
[
[51, acs.get_tree_hash(), standard_to_melt.amount - 13],
[51, 0, -113, tail, checker_solution],
]
),
],
(MempoolInclusionStatus.SUCCESS, None),
limitations_solutions=[checker_solution] * 4,
extra_deltas=[13, -21, 21, -13],
)
finally:
await sim.close()
@pytest.mark.asyncio()
async def test_genesis_by_id(self, setup_sim):
sim, sim_client = setup_sim
try:
standard_acs = Program.to(1)
standard_acs_ph: bytes32 = standard_acs.get_tree_hash()
await sim.farm_block(standard_acs_ph)
starting_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(standard_acs_ph))[0].coin
tail: Program = GenesisById.construct([Program.to(starting_coin.name())])
checker_solution: Program = GenesisById.solve([], {})
cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail.get_tree_hash(), acs)
cat_ph: bytes32 = cat_puzzle.get_tree_hash()
await sim_client.push_tx(
SpendBundle(
[CoinSpend(starting_coin, standard_acs, Program.to([[51, cat_ph, starting_coin.amount]]))],
G2Element(),
)
)
await sim.farm_block()
self.cost["Genesis by ID"] = await do_spend(
sim,
sim_client,
tail,
[(await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))[0].coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), starting_coin.amount],
[51, 0, -113, tail, checker_solution],
]
)
],
(MempoolInclusionStatus.SUCCESS, None),
limitations_solutions=[checker_solution],
)
finally:
await sim.close()
@pytest.mark.asyncio()
async def test_genesis_by_puzhash(self, setup_sim):
sim, sim_client = setup_sim
try:
standard_acs = Program.to(1)
standard_acs_ph: bytes32 = standard_acs.get_tree_hash()
await sim.farm_block(standard_acs_ph)
starting_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(standard_acs_ph))[0].coin
tail: Program = GenesisByPuzhash.construct([Program.to(starting_coin.puzzle_hash)])
checker_solution: Program = GenesisByPuzhash.solve([], starting_coin.to_json_dict())
cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail.get_tree_hash(), acs)
cat_ph: bytes32 = cat_puzzle.get_tree_hash()
await sim_client.push_tx(
SpendBundle(
[CoinSpend(starting_coin, standard_acs, Program.to([[51, cat_ph, starting_coin.amount]]))],
G2Element(),
)
)
await sim.farm_block()
self.cost["Genesis by Puzhash"] = await do_spend(
sim,
sim_client,
tail,
[(await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))[0].coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), starting_coin.amount],
[51, 0, -113, tail, checker_solution],
]
)
],
(MempoolInclusionStatus.SUCCESS, None),
limitations_solutions=[checker_solution],
)
finally:
await sim.close()
@pytest.mark.asyncio()
async def test_everything_with_signature(self, setup_sim):
sim, sim_client = setup_sim
try:
sk = PrivateKey.from_bytes(secret_exponent_for_index(1).to_bytes(32, "big"))
tail: Program = EverythingWithSig.construct([Program.to(sk.get_g1())])
checker_solution: Program = EverythingWithSig.solve([], {})
cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail.get_tree_hash(), acs)
cat_ph: bytes32 = cat_puzzle.get_tree_hash()
await sim.farm_block(cat_ph)
# Test eve spend
# We don't sign any message data because CLVM 0 translates to b'' apparently
starting_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(cat_ph))[0].coin
signature: G2Element = AugSchemeMPL.sign(
sk, (starting_coin.name() + sim.defaults.AGG_SIG_ME_ADDITIONAL_DATA)
)
self.cost["Signature Issuance"] = await do_spend(
sim,
sim_client,
tail,
[starting_coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), starting_coin.amount],
[51, 0, -113, tail, checker_solution],
]
)
],
(MempoolInclusionStatus.SUCCESS, None),
limitations_solutions=[checker_solution],
signatures=[signature],
)
# Test melting value
coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))[0].coin
signature = AugSchemeMPL.sign(
sk, (int_to_bytes(-1) + coin.name() + sim.defaults.AGG_SIG_ME_ADDITIONAL_DATA)
)
self.cost["Signature Melt"] = await do_spend(
sim,
sim_client,
tail,
[coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), coin.amount - 1],
[51, 0, -113, tail, checker_solution],
]
)
],
(MempoolInclusionStatus.SUCCESS, None),
extra_deltas=[-1],
limitations_solutions=[checker_solution],
signatures=[signature],
)
# Test minting value
coin = (await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))[0].coin
signature = AugSchemeMPL.sign(sk, (int_to_bytes(1) + coin.name() + sim.defaults.AGG_SIG_ME_ADDITIONAL_DATA))
# Need something to fund the minting
temp_p = Program.to(1)
temp_ph: bytes32 = temp_p.get_tree_hash()
await sim.farm_block(temp_ph)
acs_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(temp_ph, include_spent_coins=False))[
0
].coin
acs_bundle = SpendBundle(
[
CoinSpend(
acs_coin,
temp_p,
Program.to([]),
)
],
G2Element(),
)
self.cost["Signature Mint"] = await do_spend(
sim,
sim_client,
tail,
[coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), coin.amount + 1],
[51, 0, -113, tail, checker_solution],
]
)
],
(MempoolInclusionStatus.SUCCESS, None),
extra_deltas=[1],
limitations_solutions=[checker_solution],
signatures=[signature],
additional_spends=[acs_bundle],
)
finally:
await sim.close()
@pytest.mark.asyncio()
async def test_delegated_tail(self, setup_sim):
sim, sim_client = setup_sim
try:
standard_acs = Program.to(1)
standard_acs_ph: bytes32 = standard_acs.get_tree_hash()
await sim.farm_block(standard_acs_ph)
starting_coin: Coin = (await sim_client.get_coin_records_by_puzzle_hash(standard_acs_ph))[0].coin
sk = PrivateKey.from_bytes(secret_exponent_for_index(1).to_bytes(32, "big"))
tail: Program = DelegatedLimitations.construct([Program.to(sk.get_g1())])
cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail.get_tree_hash(), acs)
cat_ph: bytes32 = cat_puzzle.get_tree_hash()
await sim_client.push_tx(
SpendBundle(
[CoinSpend(starting_coin, standard_acs, Program.to([[51, cat_ph, starting_coin.amount]]))],
G2Element(),
)
)
await sim.farm_block()
# We're signing a different tail to use here
name_as_program = Program.to(starting_coin.name())
new_tail: Program = GenesisById.construct([name_as_program])
checker_solution: Program = DelegatedLimitations.solve(
[name_as_program],
{
"signed_program": {
"identifier": "genesis_by_id",
"args": [str(name_as_program)],
},
"program_arguments": {},
},
)
signature: G2Element = AugSchemeMPL.sign(sk, new_tail.get_tree_hash())
self.cost["Delegated Genesis"] = await do_spend(
sim,
sim_client,
tail,
[(await sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))[0].coin],
[NO_LINEAGE_PROOF],
[
Program.to(
[
[51, acs.get_tree_hash(), starting_coin.amount],
[51, 0, -113, tail, checker_solution],
]
)
],
(MempoolInclusionStatus.SUCCESS, None),
signatures=[signature],
limitations_solutions=[checker_solution],
)
finally:
await sim.close()
def test_cost(self):
import json
import logging
log = logging.getLogger(__name__)
log.warning(json.dumps(self.cost))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/cat_wallet/test_cat_outer_puzzle.py | tests/wallet/cat_wallet/test_cat_outer_puzzle.py | from __future__ import annotations
from typing import Optional
import pytest
from clvm_tools.binutils import disassemble
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.util.ints import uint64
from flax.wallet.cat_wallet.cat_utils import construct_cat_puzzle
from flax.wallet.outer_puzzles import construct_puzzle, get_inner_puzzle, get_inner_solution, match_puzzle, solve_puzzle
from flax.wallet.puzzle_drivers import PuzzleInfo, Solver
from flax.wallet.puzzles.cat_loader import CAT_MOD
from flax.wallet.uncurried_puzzle import uncurry_puzzle
def test_cat_outer_puzzle() -> None:
ACS = Program.to(1)
tail = bytes32([0] * 32)
cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail, ACS)
double_cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail, cat_puzzle)
uncurried_cat_puzzle = uncurry_puzzle(double_cat_puzzle)
cat_driver: Optional[PuzzleInfo] = match_puzzle(uncurried_cat_puzzle)
assert cat_driver is not None
assert cat_driver.type() == "CAT"
assert cat_driver["tail"] == tail
inside_cat_driver: Optional[PuzzleInfo] = cat_driver.also()
assert inside_cat_driver is not None
assert inside_cat_driver.type() == "CAT"
assert inside_cat_driver["tail"] == tail
assert construct_puzzle(cat_driver, ACS) == double_cat_puzzle
assert get_inner_puzzle(cat_driver, uncurried_cat_puzzle) == ACS
# Set up for solve
parent_coin = Coin(tail, double_cat_puzzle.get_tree_hash(), uint64(100))
child_coin = Coin(parent_coin.name(), double_cat_puzzle.get_tree_hash(), uint64(100))
parent_spend = CoinSpend(parent_coin, double_cat_puzzle.to_serialized_program(), Program.to([]))
child_coin_as_hex: str = (
"0x" + child_coin.parent_coin_info.hex() + child_coin.puzzle_hash.hex() + bytes(uint64(child_coin.amount)).hex()
)
parent_spend_as_hex: str = "0x" + bytes(parent_spend).hex()
inner_solution = Program.to([[51, ACS.get_tree_hash(), 100]])
solution: Program = solve_puzzle(
cat_driver,
Solver(
{
"coin": child_coin_as_hex,
"parent_spend": parent_spend_as_hex,
"siblings": "(" + child_coin_as_hex + ")",
"sibling_spends": "(" + parent_spend_as_hex + ")",
"sibling_puzzles": "(" + disassemble(ACS) + ")", # type: ignore
"sibling_solutions": "(" + disassemble(inner_solution) + ")", # type: ignore
}
),
ACS,
inner_solution,
)
with pytest.raises(ValueError, match="clvm raise"):
double_cat_puzzle.run(solution)
assert get_inner_solution(cat_driver, solution) == inner_solution
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/cat_wallet/config.py | tests/wallet/cat_wallet/config.py | # flake8: noqa: E501
from __future__ import annotations
job_timeout = 50
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/cat_wallet/test_offer_lifecycle.py | tests/wallet/cat_wallet/test_offer_lifecycle.py | from typing import Any, Dict, Optional, List
import pytest
from blspy import G2Element
from flax.types.announcement import Announcement
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.types.mempool_inclusion_status import MempoolInclusionStatus
from flax.types.spend_bundle import SpendBundle
from flax.util.ints import uint64
from flax.wallet.cat_wallet.cat_utils import (
construct_cat_puzzle,
SpendableCAT,
unsigned_spend_bundle_for_spendable_cats,
)
from flax.wallet.puzzles.cat_loader import CAT_MOD
from flax.wallet.outer_puzzles import AssetType
from flax.wallet.puzzle_drivers import PuzzleInfo
from flax.wallet.payment import Payment
from flax.wallet.trading.offer import Offer, NotarizedPayment
from tests.clvm.benchmark_costs import cost_of_spend_bundle
acs = Program.to(1)
acs_ph = acs.get_tree_hash()
# Some methods mapping strings to CATs
def str_to_tail(tail_str: str) -> Program:
return Program.to([3, [], [1, tail_str], []])
def str_to_tail_hash(tail_str: str) -> bytes32:
return Program.to([3, [], [1, tail_str], []]).get_tree_hash()
def str_to_cat_hash(tail_str: str) -> bytes32:
return construct_cat_puzzle(CAT_MOD, str_to_tail_hash(tail_str), acs).get_tree_hash()
# This method takes a dictionary of strings mapping to amounts and generates the appropriate CAT/XFX coins
async def generate_coins(
sim,
sim_client,
requested_coins: Dict[Optional[str], List[uint64]],
) -> Dict[Optional[str], List[Coin]]:
await sim.farm_block(acs_ph)
parent_coin: Coin = [cr.coin for cr in await (sim_client.get_coin_records_by_puzzle_hash(acs_ph))][0]
# We need to gather a list of initial coins to create as well as spends that do the eve spend for every CAT
payments: List[Payment] = []
cat_bundles: List[SpendBundle] = []
for tail_str, amounts in requested_coins.items():
for amount in amounts:
if tail_str:
tail: Program = str_to_tail(tail_str) # Making a fake but unique TAIL
cat_puzzle: Program = construct_cat_puzzle(CAT_MOD, tail.get_tree_hash(), acs)
payments.append(Payment(cat_puzzle.get_tree_hash(), amount, []))
cat_bundles.append(
unsigned_spend_bundle_for_spendable_cats(
CAT_MOD,
[
SpendableCAT(
Coin(parent_coin.name(), cat_puzzle.get_tree_hash(), amount),
tail.get_tree_hash(),
acs,
Program.to([[51, acs_ph, amount], [51, 0, -113, tail, []]]),
)
],
)
)
else:
payments.append(Payment(acs_ph, amount, []))
# This bundle creates all of the initial coins
parent_bundle = SpendBundle(
[
CoinSpend(
parent_coin,
acs,
Program.to([[51, p.puzzle_hash, p.amount] for p in payments]),
)
],
G2Element(),
)
# Then we aggregate it with all of the eve spends
await sim_client.push_tx(SpendBundle.aggregate([parent_bundle, *cat_bundles]))
await sim.farm_block()
# Search for all of the coins and put them into a dictionary
coin_dict: Dict[Optional[str], List[Coin]] = {}
for tail_str, _ in requested_coins.items():
if tail_str:
tail_hash: bytes32 = str_to_tail_hash(tail_str)
cat_ph: bytes32 = construct_cat_puzzle(CAT_MOD, tail_hash, acs).get_tree_hash()
coin_dict[tail_str] = [
cr.coin for cr in await (sim_client.get_coin_records_by_puzzle_hash(cat_ph, include_spent_coins=False))
]
else:
coin_dict[None] = list(
filter(
lambda c: c.amount < 250000000000,
[
cr.coin
for cr in await (sim_client.get_coin_records_by_puzzle_hash(acs_ph, include_spent_coins=False))
],
)
)
return coin_dict
# `generate_secure_bundle` simulates a wallet's `generate_signed_transaction`
# but doesn't bother with non-offer announcements
def generate_secure_bundle(
selected_coins: List[Coin],
announcements: List[Announcement],
offered_amount: uint64,
tail_str: Optional[str] = None,
) -> SpendBundle:
announcement_assertions: List[List] = [[63, a.name()] for a in announcements]
selected_coin_amount: int = sum([c.amount for c in selected_coins])
non_primaries: List[Coin] = [] if len(selected_coins) < 2 else selected_coins[1:]
inner_solution: List[List] = [
[51, Offer.ph(), offered_amount], # Offered coin
[51, acs_ph, uint64(selected_coin_amount - offered_amount)], # Change
*announcement_assertions,
]
if tail_str is None:
bundle = SpendBundle(
[
CoinSpend(
selected_coins[0],
acs,
Program.to(inner_solution),
),
*[CoinSpend(c, acs, Program.to([])) for c in non_primaries],
],
G2Element(),
)
else:
spendable_cats: List[SpendableCAT] = [
SpendableCAT(
c,
str_to_tail_hash(tail_str),
acs,
Program.to(
[
[51, 0, -113, str_to_tail(tail_str), Program.to([])], # Use the TAIL rather than lineage
*(inner_solution if c == selected_coins[0] else []),
]
),
)
for c in selected_coins
]
bundle = unsigned_spend_bundle_for_spendable_cats(CAT_MOD, spendable_cats)
return bundle
class TestOfferLifecycle:
cost: Dict[str, int] = {}
@pytest.mark.asyncio()
async def test_complex_offer(self, setup_sim):
sim, sim_client = setup_sim
try:
coins_needed: Dict[Optional[str], List[int]] = {
None: [500, 400, 300],
"red": [250, 100],
"blue": [3000],
}
all_coins: Dict[Optional[str], List[Coin]] = await generate_coins(sim, sim_client, coins_needed)
flax_coins: List[Coin] = all_coins[None]
red_coins: List[Coin] = all_coins["red"]
blue_coins: List[Coin] = all_coins["blue"]
driver_dict: Dict[bytes32, PuzzleInfo] = {
str_to_tail_hash("red"): PuzzleInfo(
{"type": AssetType.CAT.value, "tail": "0x" + str_to_tail_hash("red").hex()}
),
str_to_tail_hash("blue"): PuzzleInfo(
{"type": AssetType.CAT.value, "tail": "0x" + str_to_tail_hash("blue").hex()}
),
}
driver_dict_as_infos: Dict[str, Any] = {}
for key, value in driver_dict.items():
driver_dict_as_infos[key.hex()] = value.info
# Create an XFX Offer for RED
flax_requested_payments: Dict[Optional[bytes32], List[Payment]] = {
str_to_tail_hash("red"): [
Payment(acs_ph, 100, [b"memo"]),
Payment(acs_ph, 200, [b"memo"]),
]
}
flax_requested_payments: Dict[Optional[bytes32], List[NotarizedPayment]] = Offer.notarize_payments(
flax_requested_payments, flax_coins
)
flax_announcements: List[Announcement] = Offer.calculate_announcements(flax_requested_payments, driver_dict)
flax_secured_bundle: SpendBundle = generate_secure_bundle(flax_coins, flax_announcements, 1000)
flax_offer = Offer(flax_requested_payments, flax_secured_bundle, driver_dict)
assert not flax_offer.is_valid()
# Create a RED Offer for XFX
red_coins_1 = red_coins[0:1]
red_coins_2 = red_coins[1:]
red_requested_payments: Dict[Optional[bytes32], List[Payment]] = {
None: [
Payment(acs_ph, 300, [b"red memo"]),
Payment(acs_ph, 350, [b"red memo"]),
]
}
red_requested_payments: Dict[Optional[bytes32], List[NotarizedPayment]] = Offer.notarize_payments(
red_requested_payments, red_coins_1
)
red_announcements: List[Announcement] = Offer.calculate_announcements(red_requested_payments, driver_dict)
red_secured_bundle: SpendBundle = generate_secure_bundle(
red_coins_1, red_announcements, sum([c.amount for c in red_coins_1]), tail_str="red"
)
red_offer = Offer(red_requested_payments, red_secured_bundle, driver_dict)
assert not red_offer.is_valid()
red_requested_payments_2: Dict[Optional[bytes32], List[Payment]] = {
None: [
Payment(acs_ph, 50, [b"red memo"]),
]
}
red_requested_payments_2: Dict[Optional[bytes32], List[NotarizedPayment]] = Offer.notarize_payments(
red_requested_payments_2, red_coins_2
)
red_announcements_2: List[Announcement] = Offer.calculate_announcements(
red_requested_payments_2, driver_dict
)
red_secured_bundle_2: SpendBundle = generate_secure_bundle(
red_coins_2, red_announcements_2, sum([c.amount for c in red_coins_2]), tail_str="red"
)
red_offer_2 = Offer(red_requested_payments_2, red_secured_bundle_2, driver_dict)
assert not red_offer_2.is_valid()
# Test aggregation of offers
new_offer = Offer.aggregate([flax_offer, red_offer, red_offer_2])
assert new_offer.get_offered_amounts() == {None: 1000, str_to_tail_hash("red"): 350}
assert new_offer.get_requested_amounts() == {None: 700, str_to_tail_hash("red"): 300}
assert new_offer.is_valid()
# Create yet another offer of BLUE for XFX and RED
blue_requested_payments: Dict[Optional[bytes32], List[Payment]] = {
None: [
Payment(acs_ph, 200, [b"blue memo"]),
],
str_to_tail_hash("red"): [
Payment(acs_ph, 50, [b"blue memo"]),
],
}
blue_requested_payments: Dict[Optional[bytes32], List[NotarizedPayment]] = Offer.notarize_payments(
blue_requested_payments, blue_coins
)
blue_announcements: List[Announcement] = Offer.calculate_announcements(blue_requested_payments, driver_dict)
blue_secured_bundle: SpendBundle = generate_secure_bundle(
blue_coins, blue_announcements, 2000, tail_str="blue"
)
blue_offer = Offer(blue_requested_payments, blue_secured_bundle, driver_dict)
assert not blue_offer.is_valid()
# Test a re-aggregation
new_offer: Offer = Offer.aggregate([new_offer, blue_offer])
assert new_offer.get_offered_amounts() == {
None: 1000,
str_to_tail_hash("red"): 350,
str_to_tail_hash("blue"): 2000,
}
assert new_offer.get_requested_amounts() == {None: 900, str_to_tail_hash("red"): 350}
assert new_offer.summary() == (
{
"xfx": 1000,
str_to_tail_hash("red").hex(): 350,
str_to_tail_hash("blue").hex(): 2000,
},
{"xfx": 900, str_to_tail_hash("red").hex(): 350},
driver_dict_as_infos,
)
assert new_offer.get_pending_amounts() == {
"xfx": 1200,
str_to_tail_hash("red").hex(): 350,
str_to_tail_hash("blue").hex(): 3000,
}
assert new_offer.is_valid()
# Test (de)serialization
assert Offer.from_bytes(bytes(new_offer)) == new_offer
# Test compression
assert Offer.from_compressed(new_offer.compress()) == new_offer
# Make sure we can actually spend the offer once it's valid
arbitrage_ph: bytes32 = Program.to([3, [], [], 1]).get_tree_hash()
offer_bundle: SpendBundle = new_offer.to_valid_spend(arbitrage_ph)
result = await sim_client.push_tx(offer_bundle)
assert result == (MempoolInclusionStatus.SUCCESS, None)
self.cost["complex offer"] = cost_of_spend_bundle(offer_bundle)
await sim.farm_block()
finally:
await sim.close()
def test_cost(self):
import json
import logging
log = logging.getLogger(__name__)
log.warning(json.dumps(self.cost))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/cat_wallet/__init__.py | tests/wallet/cat_wallet/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/cat_wallet/test_cat_wallet.py | tests/wallet/cat_wallet/test_cat_wallet.py | import asyncio
from typing import List
import pytest
from flax.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from flax.full_node.mempool_manager import MempoolManager
from flax.simulator.simulator_protocol import FarmNewBlockProtocol, ReorgProtocol
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16, uint32, uint64
from flax.wallet.cat_wallet.cat_constants import DEFAULT_CATS
from flax.wallet.cat_wallet.cat_info import LegacyCATInfo
from flax.wallet.cat_wallet.cat_utils import construct_cat_puzzle
from flax.wallet.cat_wallet.cat_wallet import CATWallet
from flax.wallet.puzzles.cat_loader import CAT_MOD
from flax.wallet.transaction_record import TransactionRecord
from flax.wallet.wallet_info import WalletInfo
from flax.simulator.time_out_assert import time_out_assert
from tests.util.wallet_is_synced import wallet_is_synced
async def tx_in_pool(mempool: MempoolManager, tx_id: bytes32):
tx = mempool.get_spendbundle(tx_id)
if tx is None:
return False
return True
class TestCATWallet:
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_cat_creation(self, self_hostname, two_wallet_nodes, trusted):
num_blocks = 3
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(20, wallet.get_confirmed_balance, funds)
await time_out_assert(20, wallet_is_synced, True, wallet_node, full_node_api)
async with wallet_node.wallet_state_manager.lock:
cat_wallet: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100)
)
# The next 2 lines are basically a noop, it just adds test coverage
cat_wallet = await CATWallet.create(wallet_node.wallet_state_manager, wallet, cat_wallet.wallet_info)
await wallet_node.wallet_state_manager.add_new_wallet(cat_wallet, cat_wallet.id())
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
await time_out_assert(20, cat_wallet.get_spendable_balance, 100)
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
# Test migration
all_lineage = await cat_wallet.lineage_store.get_all_lineage_proofs()
current_info = cat_wallet.wallet_info
data_str = bytes(
LegacyCATInfo(
cat_wallet.cat_info.limitations_program_hash, cat_wallet.cat_info.my_tail, list(all_lineage.items())
)
).hex()
wallet_info = WalletInfo(current_info.id, current_info.name, current_info.type, data_str)
new_cat_wallet = await CATWallet.create(wallet_node.wallet_state_manager, wallet, wallet_info)
assert new_cat_wallet.cat_info.limitations_program_hash == cat_wallet.cat_info.limitations_program_hash
assert new_cat_wallet.cat_info.my_tail == cat_wallet.cat_info.my_tail
assert await cat_wallet.lineage_store.get_all_lineage_proofs() == all_lineage
height = full_node_api.full_node.blockchain.get_peak_height()
await full_node_api.reorg_from_index_to_new_index(
ReorgProtocol(height - num_blocks - 1, height + 1, 32 * b"1", None)
)
await time_out_assert(20, cat_wallet.get_confirmed_balance, 0)
@pytest.mark.asyncio
async def test_cat_creation_unique_lineage_store(self, self_hostname, two_wallet_nodes):
num_blocks = 3
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, wallet_server = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(20, wallet.get_confirmed_balance, funds)
await time_out_assert(20, wallet_is_synced, True, wallet_node, full_node_api)
async with wallet_node.wallet_state_manager.lock:
cat_wallet_1: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100)
)
cat_wallet_2: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(200)
)
proofs_1 = await cat_wallet_1.lineage_store.get_all_lineage_proofs()
proofs_2 = await cat_wallet_2.lineage_store.get_all_lineage_proofs()
assert len(proofs_1) == len(proofs_2)
assert proofs_1 != proofs_2
assert cat_wallet_1.lineage_store.table_name != cat_wallet_2.lineage_store.table_name
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_cat_spend(self, self_hostname, two_wallet_nodes, trusted):
num_blocks = 3
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(20, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
cat_wallet: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100)
)
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
assert cat_wallet.cat_info.limitations_program_hash is not None
asset_id = cat_wallet.get_asset_id()
cat_wallet_2: CATWallet = await CATWallet.create_wallet_for_cat(
wallet_node_2.wallet_state_manager, wallet2, asset_id
)
assert cat_wallet.cat_info.limitations_program_hash == cat_wallet_2.cat_info.limitations_program_hash
cat_2_hash = await cat_wallet_2.get_new_inner_hash()
tx_records = await cat_wallet.generate_signed_transaction([uint64(60)], [cat_2_hash], fee=uint64(1))
for tx_record in tx_records:
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
if tx_record.spend_bundle is not None:
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
if tx_record.wallet_id is cat_wallet.id():
assert tx_record.to_puzzle_hash == cat_2_hash
await time_out_assert(20, cat_wallet.get_pending_change_balance, 40)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
await time_out_assert(30, wallet.get_confirmed_balance, funds - 101)
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 40)
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 60)
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 60)
cat_hash = await cat_wallet.get_new_inner_hash()
tx_records = await cat_wallet_2.generate_signed_transaction([uint64(15)], [cat_hash])
for tx_record in tx_records:
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(20, cat_wallet.get_confirmed_balance, 55)
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 55)
height = full_node_api.full_node.blockchain.get_peak_height()
await full_node_api.reorg_from_index_to_new_index(ReorgProtocol(height - 1, height + 1, 32 * b"1", None))
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_get_wallet_for_asset_id(self, self_hostname, two_wallet_nodes, trusted):
num_blocks = 3
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(20, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
cat_wallet: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100)
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
asset_id = cat_wallet.get_asset_id()
await cat_wallet.set_tail_program(bytes(cat_wallet.cat_info.my_tail).hex())
assert await wallet_node.wallet_state_manager.get_wallet_for_asset_id(asset_id) == cat_wallet
# Test that the a default CAT will initialize correctly
asset = DEFAULT_CATS[next(iter(DEFAULT_CATS))]
asset_id = asset["asset_id"]
cat_wallet_2 = await CATWallet.create_wallet_for_cat(wallet_node.wallet_state_manager, wallet, asset_id)
assert await cat_wallet_2.get_name() == asset["name"]
await cat_wallet_2.set_name("Test Name")
assert await cat_wallet_2.get_name() == "Test Name"
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_cat_doesnt_see_eve(self, self_hostname, two_wallet_nodes, trusted):
num_blocks = 3
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(20, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
cat_wallet: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100)
)
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100)
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100)
assert cat_wallet.cat_info.limitations_program_hash is not None
asset_id = cat_wallet.get_asset_id()
cat_wallet_2: CATWallet = await CATWallet.create_wallet_for_cat(
wallet_node_2.wallet_state_manager, wallet2, asset_id
)
assert cat_wallet.cat_info.limitations_program_hash == cat_wallet_2.cat_info.limitations_program_hash
cat_2_hash = await cat_wallet_2.get_new_inner_hash()
tx_records = await cat_wallet.generate_signed_transaction([uint64(60)], [cat_2_hash], fee=uint64(1))
for tx_record in tx_records:
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
if tx_record.spend_bundle is not None:
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(30, wallet.get_confirmed_balance, funds - 101)
await time_out_assert(30, wallet.get_unconfirmed_balance, funds - 101)
await time_out_assert(20, cat_wallet.get_confirmed_balance, 40)
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 40)
await time_out_assert(20, cat_wallet_2.get_confirmed_balance, 60)
await time_out_assert(20, cat_wallet_2.get_unconfirmed_balance, 60)
cc2_ph = await cat_wallet_2.get_new_cat_puzzle_hash()
tx_record = await wallet.wallet_state_manager.main_wallet.generate_signed_transaction(10, cc2_ph, 0)
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
id = cat_wallet_2.id()
wsm = cat_wallet_2.wallet_state_manager
async def query_and_assert_transactions(wsm, id):
all_txs = await wsm.tx_store.get_all_transactions_for_wallet(id)
return len(list(filter(lambda tx: tx.amount == 10, all_txs)))
await time_out_assert(20, query_and_assert_transactions, 0, wsm, id)
await time_out_assert(20, wsm.get_confirmed_balance_for_wallet, 60, id)
await time_out_assert(20, cat_wallet_2.get_confirmed_balance, 60)
await time_out_assert(20, cat_wallet_2.get_unconfirmed_balance, 60)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_cat_spend_multiple(self, self_hostname, three_wallet_nodes, trusted):
num_blocks = 3
full_nodes, wallets, _ = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, wallet_server_0 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet_node_2, wallet_server_2 = wallets[2]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
if trusted:
wallet_node_0.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_1.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node_0.config["trusted_peers"] = {}
wallet_node_1.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(20, wallet_0.get_confirmed_balance, funds)
async with wallet_node_0.wallet_state_manager.lock:
cat_wallet_0: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node_0.wallet_state_manager, wallet_0, {"identifier": "genesis_by_id"}, uint64(100)
)
tx_queue: List[TransactionRecord] = await wallet_node_0.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(20, cat_wallet_0.get_confirmed_balance, 100)
await time_out_assert(20, cat_wallet_0.get_unconfirmed_balance, 100)
assert cat_wallet_0.cat_info.limitations_program_hash is not None
asset_id = cat_wallet_0.get_asset_id()
cat_wallet_1: CATWallet = await CATWallet.create_wallet_for_cat(
wallet_node_1.wallet_state_manager, wallet_1, asset_id
)
cat_wallet_2: CATWallet = await CATWallet.create_wallet_for_cat(
wallet_node_2.wallet_state_manager, wallet_2, asset_id
)
assert cat_wallet_0.cat_info.limitations_program_hash == cat_wallet_1.cat_info.limitations_program_hash
assert cat_wallet_0.cat_info.limitations_program_hash == cat_wallet_2.cat_info.limitations_program_hash
cat_1_hash = await cat_wallet_1.get_new_inner_hash()
cat_2_hash = await cat_wallet_2.get_new_inner_hash()
tx_records = await cat_wallet_0.generate_signed_transaction([uint64(60), uint64(20)], [cat_1_hash, cat_2_hash])
for tx_record in tx_records:
await wallet_0.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(20, cat_wallet_0.get_confirmed_balance, 20)
await time_out_assert(20, cat_wallet_0.get_unconfirmed_balance, 20)
await time_out_assert(30, cat_wallet_1.get_confirmed_balance, 60)
await time_out_assert(30, cat_wallet_1.get_unconfirmed_balance, 60)
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 20)
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 20)
cat_hash = await cat_wallet_0.get_new_inner_hash()
tx_records = await cat_wallet_1.generate_signed_transaction([uint64(15)], [cat_hash])
for tx_record in tx_records:
await wallet_1.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
tx_records_2 = await cat_wallet_2.generate_signed_transaction([uint64(20)], [cat_hash])
for tx_record in tx_records_2:
await wallet_2.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(20, cat_wallet_0.get_confirmed_balance, 55)
await time_out_assert(20, cat_wallet_0.get_unconfirmed_balance, 55)
await time_out_assert(30, cat_wallet_1.get_confirmed_balance, 45)
await time_out_assert(30, cat_wallet_1.get_unconfirmed_balance, 45)
await time_out_assert(30, cat_wallet_2.get_confirmed_balance, 0)
await time_out_assert(30, cat_wallet_2.get_unconfirmed_balance, 0)
txs = await wallet_1.wallet_state_manager.tx_store.get_transactions_between(cat_wallet_1.id(), 0, 100000)
print(len(txs))
# Test with Memo
tx_records_3: TransactionRecord = await cat_wallet_1.generate_signed_transaction(
[uint64(30)], [cat_hash], memos=[[b"Markus Walburg"]]
)
with pytest.raises(ValueError):
await cat_wallet_1.generate_signed_transaction(
[uint64(30)], [cat_hash], memos=[[b"too"], [b"many"], [b"memos"]]
)
for tx_record in tx_records_3:
await wallet_1.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
txs = await wallet_1.wallet_state_manager.tx_store.get_transactions_between(cat_wallet_1.id(), 0, 100000)
for tx in txs:
if tx.amount == 30:
memos = tx.get_memos()
assert len(memos) == 1
assert b"Markus Walburg" in [v for v_list in memos.values() for v in v_list]
assert list(memos.keys())[0] in [a.name() for a in tx.spend_bundle.additions()]
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_cat_max_amount_send(self, self_hostname, two_wallet_nodes, trusted):
num_blocks = 3
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
await time_out_assert(20, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
cat_wallet: CATWallet = await CATWallet.create_new_cat_wallet(
wallet_node.wallet_state_manager, wallet, {"identifier": "genesis_by_id"}, uint64(100000)
)
tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.tx_store.get_not_sent()
tx_record = tx_queue[0]
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))
await time_out_assert(20, cat_wallet.get_confirmed_balance, 100000)
await time_out_assert(20, cat_wallet.get_unconfirmed_balance, 100000)
assert cat_wallet.cat_info.limitations_program_hash is not None
cat_2 = await cat_wallet.get_new_inner_puzzle()
cat_2_hash = cat_2.get_tree_hash()
amounts = []
puzzle_hashes = []
for i in range(1, 50):
amounts.append(uint64(i))
puzzle_hashes.append(cat_2_hash)
spent_coint = (await cat_wallet.get_cat_spendable_coins())[0].coin
tx_records = await cat_wallet.generate_signed_transaction(amounts, puzzle_hashes, coins={spent_coint})
for tx_record in tx_records:
await wallet.wallet_state_manager.add_pending_transaction(tx_record)
await time_out_assert(
15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await asyncio.sleep(2)
async def check_all_there():
spendable = await cat_wallet.get_cat_spendable_coins()
spendable_name_set = set()
for record in spendable:
spendable_name_set.add(record.coin.name())
puzzle_hash = construct_cat_puzzle(
CAT_MOD, cat_wallet.cat_info.limitations_program_hash, cat_2
).get_tree_hash()
for i in range(1, 50):
coin = Coin(spent_coint.name(), puzzle_hash, i)
if coin.name() not in spendable_name_set:
return False
return True
await time_out_assert(20, check_all_there, True)
await asyncio.sleep(5)
max_sent_amount = await cat_wallet.get_max_send_amount()
# 1) Generate transaction that is under the limit
under_limit_txs = None
try:
under_limit_txs = await cat_wallet.generate_signed_transaction(
[max_sent_amount - 1],
[ph],
)
except ValueError:
assert ValueError
assert under_limit_txs is not None
# 2) Generate transaction that is equal to limit
at_limit_txs = None
try:
at_limit_txs = await cat_wallet.generate_signed_transaction(
[max_sent_amount],
[ph],
)
except ValueError:
assert ValueError
assert at_limit_txs is not None
# 3) Generate transaction that is greater than limit
above_limit_txs = None
try:
above_limit_txs = await cat_wallet.generate_signed_transaction(
[max_sent_amount + 1],
[ph],
)
except ValueError:
pass
assert above_limit_txs is None
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.parametrize(
"autodiscovery",
[True, False],
)
@pytest.mark.asyncio
async def test_cat_hint(self, self_hostname, two_wallet_nodes, trusted, autodiscovery):
num_blocks = 3
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
wallet_node_2.config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/did_wallet/test_did.py | tests/wallet/did_wallet/test_did.py | import json
from typing import Optional
import pytest
from blspy import AugSchemeMPL, G1Element, G2Element
from flax.consensus.block_rewards import calculate_pool_reward, calculate_base_farmer_reward
from flax.rpc.wallet_rpc_api import WalletRpcApi
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.types.blockchain_format.program import Program
from flax.types.peer_info import PeerInfo
from flax.types.spend_bundle import SpendBundle
from flax.util.bech32m import encode_puzzle_hash
from flax.util.ints import uint16, uint32, uint64
from flax.wallet.util.address_type import AddressType
from flax.wallet.util.wallet_types import WalletType
from flax.wallet.did_wallet.did_wallet import DIDWallet
from flax.simulator.time_out_assert import time_out_assert, time_out_assert_not_none
# pytestmark = pytest.mark.skip("TODO: Fix tests")
async def get_wallet_num(wallet_manager):
return len(await wallet_manager.get_all_wallet_info_entries())
class TestDIDWallet:
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_creation_from_backup_file(self, self_hostname, three_wallet_nodes, trusted):
num_blocks = 5
full_nodes, wallets, _ = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, server_0 = wallets[0]
wallet_node_1, server_1 = wallets[1]
wallet_node_2, server_2 = wallets[2]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
wallet_2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet_0.get_new_puzzlehash()
ph1 = await wallet_1.get_new_puzzlehash()
ph2 = await wallet_2.get_new_puzzlehash()
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
wallet_node_1.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
wallet_node_2.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
wallet_node_1.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await server_0.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(10, wallet_0.get_unconfirmed_balance, funds)
await time_out_assert(10, wallet_0.get_confirmed_balance, funds)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph1))
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
# Wallet1 sets up DIDWallet1 without any backup set
async with wallet_node_0.wallet_state_manager.lock:
did_wallet_0: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_0.wallet_state_manager, wallet_0, uint64(101)
)
spend_bundle_list = await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_0.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_0.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_0.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet_0.get_pending_change_balance, 0)
# Wallet1 sets up DIDWallet_1 with DIDWallet_0 as backup
backup_ids = [bytes.fromhex(did_wallet_0.get_my_DID())]
async with wallet_node_1.wallet_state_manager.lock:
did_wallet_1: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_1.wallet_state_manager, wallet_1, uint64(201), backup_ids
)
spend_bundle_list = await wallet_node_1.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_1.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_1.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_1.get_unconfirmed_balance, 201)
await time_out_assert(15, did_wallet_1.get_pending_change_balance, 0)
backup_data = did_wallet_1.create_backup()
# Wallet2 recovers DIDWallet2 to a new set of keys
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node_2.wallet_state_manager, wallet_2, backup_data
)
coins = await did_wallet_1.select_coins(1)
coin = coins.copy().pop()
assert did_wallet_2.did_info.temp_coin == coin
newpuzhash = await did_wallet_2.get_new_did_inner_hash()
pubkey = bytes(
(await did_wallet_2.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)).pubkey
)
message_spend_bundle, attest_data = await did_wallet_0.create_attestment(
did_wallet_2.did_info.temp_coin.name(), newpuzhash, pubkey
)
spend_bundle_list = await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_0.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_2.load_attest_files_for_recovery_spend([attest_data])
assert message_spend_bundle == test_message_spend_bundle
spend_bundle = await did_wallet_2.recovery_spend(
did_wallet_2.did_info.temp_coin,
newpuzhash,
test_info_list,
pubkey,
test_message_spend_bundle,
)
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(45, did_wallet_2.get_confirmed_balance, 201)
await time_out_assert(45, did_wallet_2.get_unconfirmed_balance, 201)
some_ph = 32 * b"\2"
await did_wallet_2.create_exit_spend(some_ph)
spend_bundle_list = await wallet_node_2.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_2.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
async def get_coins_with_ph():
coins = await full_node_api.full_node.coin_store.get_coin_records_by_puzzle_hash(True, some_ph)
if len(coins) == 1:
return True
return False
await time_out_assert(15, get_coins_with_ph, True)
await time_out_assert(45, did_wallet_2.get_confirmed_balance, 0)
await time_out_assert(45, did_wallet_2.get_unconfirmed_balance, 0)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_did_recovery_with_multiple_backup_dids(self, self_hostname, two_wallet_nodes, trusted):
num_blocks = 5
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
server_1 = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
wallet_node_2.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
assert did_wallet.wallet_info.name == "Profile 1"
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet.id())
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
ph = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
recovery_list = [bytes.fromhex(did_wallet.get_my_DID())]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(101), recovery_list
)
spend_bundle_list = await wallet_node_2.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_2.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_2.get_unconfirmed_balance, 101)
assert did_wallet_2.did_info.backup_ids == recovery_list
recovery_list.append(bytes.fromhex(did_wallet_2.get_my_DID()))
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_3: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(201), recovery_list
)
spend_bundle_list = await wallet_node_2.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_3.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
ph2 = await wallet.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
assert did_wallet_3.did_info.backup_ids == recovery_list
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 201)
coins = await did_wallet_3.select_coins(1)
coin = coins.pop()
backup_data = did_wallet_3.create_backup()
async with wallet_node.wallet_state_manager.lock:
did_wallet_4 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node.wallet_state_manager,
wallet,
backup_data,
)
assert did_wallet_4.wallet_info.name == "Profile 2"
pubkey = (
await did_wallet_4.wallet_state_manager.get_unused_derivation_record(did_wallet_2.wallet_info.id)
).pubkey
new_ph = did_wallet_4.did_info.temp_puzhash
message_spend_bundle, attest1 = await did_wallet.create_attestment(coin.name(), new_ph, pubkey)
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet.id())
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
message_spend_bundle2, attest2 = await did_wallet_2.create_attestment(coin.name(), new_ph, pubkey)
spend_bundle_list = await wallet_node_2.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_2.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
message_spend_bundle = message_spend_bundle.aggregate([message_spend_bundle, message_spend_bundle2])
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_4.load_attest_files_for_recovery_spend([attest1, attest2])
assert message_spend_bundle == test_message_spend_bundle
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet_4.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet_4.get_unconfirmed_balance, 0)
await did_wallet_4.recovery_spend(coin, new_ph, test_info_list, pubkey, message_spend_bundle)
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_4.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet_4.get_confirmed_balance, 201)
await time_out_assert(15, did_wallet_4.get_unconfirmed_balance, 201)
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 0)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_did_recovery_with_empty_set(self, self_hostname, two_wallet_nodes, trusted):
num_blocks = 5
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
server_1 = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
wallet_node_2.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet.id())
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
coins = await did_wallet.select_coins(1)
coin = coins.pop()
info = Program.to([])
pubkey = (await did_wallet.wallet_state_manager.get_unused_derivation_record(did_wallet.wallet_info.id)).pubkey
try:
spend_bundle = await did_wallet.recovery_spend(
coin, ph, info, pubkey, SpendBundle([], AugSchemeMPL.aggregate([]))
)
except Exception:
# We expect a CLVM 80 error for this test
pass
else:
assert False
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_did_attest_after_recovery(self, self_hostname, two_wallet_nodes, trusted):
num_blocks = 5
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
server_1 = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
wallet_node_2.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager, wallet, uint64(101)
)
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet.id())
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
ph2 = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
recovery_list = [bytes.fromhex(did_wallet.get_my_DID())]
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_2: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node_2.wallet_state_manager, wallet2, uint64(101), recovery_list
)
spend_bundle_list = await wallet_node_2.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_2.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
ph = await wallet.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(25, did_wallet_2.get_confirmed_balance, 101)
await time_out_assert(25, did_wallet_2.get_unconfirmed_balance, 101)
assert did_wallet_2.did_info.backup_ids == recovery_list
# Update coin with new ID info
recovery_list = [bytes.fromhex(did_wallet_2.get_my_DID())]
await did_wallet.update_recovery_list(recovery_list, uint64(1))
assert did_wallet.did_info.backup_ids == recovery_list
await did_wallet.create_update_spend()
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet.id())
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 101)
# DID Wallet 2 recovers into DID Wallet 3 with new innerpuz
backup_data = did_wallet_2.create_backup()
async with wallet_node.wallet_state_manager.lock:
did_wallet_3 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node.wallet_state_manager,
wallet,
backup_data,
)
new_ph = await did_wallet_3.get_new_did_inner_hash()
coins = await did_wallet_2.select_coins(1)
coin = coins.pop()
pubkey = (
await did_wallet_3.wallet_state_manager.get_unused_derivation_record(did_wallet_3.wallet_info.id)
).pubkey
await time_out_assert(15, did_wallet.get_confirmed_balance, 101)
attest_data = (await did_wallet.create_attestment(coin.name(), new_ph, pubkey))[1]
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(did_wallet.id())
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
(
info,
message_spend_bundle,
) = await did_wallet_3.load_attest_files_for_recovery_spend([attest_data])
await did_wallet_3.recovery_spend(coin, new_ph, info, pubkey, message_spend_bundle)
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_3.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_3.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_3.get_unconfirmed_balance, 101)
# DID Wallet 1 recovery spends into DID Wallet 4
backup_data = did_wallet.create_backup()
async with wallet_node_2.wallet_state_manager.lock:
did_wallet_4 = await DIDWallet.create_new_did_wallet_from_recovery(
wallet_node_2.wallet_state_manager,
wallet2,
backup_data,
)
coins = await did_wallet.select_coins(1)
coin = coins.pop()
new_ph = await did_wallet_4.get_new_did_inner_hash()
pubkey = (
await did_wallet_4.wallet_state_manager.get_unused_derivation_record(did_wallet_4.wallet_info.id)
).pubkey
attest1 = (await did_wallet_3.create_attestment(coin.name(), new_ph, pubkey))[1]
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_3.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
(
test_info_list,
test_message_spend_bundle,
) = await did_wallet_4.load_attest_files_for_recovery_spend([attest1])
await did_wallet_4.recovery_spend(coin, new_ph, test_info_list, pubkey, test_message_spend_bundle)
spend_bundle_list = await wallet_node_2.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_4.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(15, did_wallet_4.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_4.get_unconfirmed_balance, 101)
await time_out_assert(15, did_wallet.get_confirmed_balance, 0)
await time_out_assert(15, did_wallet.get_unconfirmed_balance, 0)
@pytest.mark.parametrize(
"with_recovery",
[True, False],
)
@pytest.mark.parametrize(
"trusted",
[True, False],
)
@pytest.mark.asyncio
async def test_did_transfer(self, two_wallet_nodes, with_recovery, trusted):
num_blocks = 5
fee = uint64(1000)
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
server_1 = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
wallet2 = wallet_node_2.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
if trusted:
wallet_node.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
wallet_node_2.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks - 1)
]
)
await time_out_assert(15, wallet.get_confirmed_balance, funds)
async with wallet_node.wallet_state_manager.lock:
did_wallet_1: DIDWallet = await DIDWallet.create_new_did_wallet(
wallet_node.wallet_state_manager,
wallet,
uint64(101),
[bytes(ph)],
uint64(1),
{"Twitter": "Test", "GitHub": "测试"},
fee=fee,
)
assert did_wallet_1.wallet_info.name == "Profile 1"
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_1.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
ph2 = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, did_wallet_1.get_confirmed_balance, 101)
await time_out_assert(15, did_wallet_1.get_unconfirmed_balance, 101)
await time_out_assert(15, wallet.get_confirmed_balance, 7999999998899)
await time_out_assert(15, wallet.get_unconfirmed_balance, 7999999998899)
# Transfer DID
new_puzhash = await wallet2.get_new_puzzlehash()
await did_wallet_1.transfer_did(new_puzhash, fee, with_recovery)
spend_bundle_list = await wallet_node.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
did_wallet_1.id()
)
spend_bundle = spend_bundle_list[0].spend_bundle
await time_out_assert_not_none(5, full_node_api.full_node.mempool_manager.get_spendbundle, spend_bundle.name())
ph2 = await wallet2.get_new_puzzlehash()
for i in range(1, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph2))
await time_out_assert(15, wallet.get_confirmed_balance, 7999999997899)
await time_out_assert(15, wallet.get_unconfirmed_balance, 7999999997899)
# Check if the DID wallet is created in the wallet2
async def num_wallets() -> int:
return len(await wallet_node_2.wallet_state_manager.get_all_wallet_info_entries())
await time_out_assert(30, num_wallets, 2)
# Get the new DID wallet
did_wallets = list(
filter(
lambda w: (w.type == WalletType.DECENTRALIZED_ID),
await wallet_node_2.wallet_state_manager.get_all_wallet_info_entries(),
)
)
did_wallet_2: Optional[DIDWallet] = wallet_node_2.wallet_state_manager.wallets[did_wallets[0].id]
assert did_wallet_1.did_info.origin_coin == did_wallet_2.did_info.origin_coin
if with_recovery:
assert did_wallet_1.did_info.backup_ids[0] == did_wallet_2.did_info.backup_ids[0]
assert did_wallet_1.did_info.num_of_backup_ids_needed == did_wallet_2.did_info.num_of_backup_ids_needed
metadata = json.loads(did_wallet_2.did_info.metadata)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/did_wallet/config.py | tests/wallet/did_wallet/config.py | # flake8: noqa: E501
from __future__ import annotations
job_timeout = 50
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/did_wallet/__init__.py | tests/wallet/did_wallet/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/sync/test_wallet_sync.py | tests/wallet/sync/test_wallet_sync.py | from __future__ import annotations
from typing import List, Optional, Set
import pytest
from aiosqlite import Error as AIOSqliteError
from colorlog import getLogger
from flax.consensus.block_record import BlockRecord
from flax.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from flax.full_node.full_node_api import FullNodeAPI
from flax.full_node.mempool_manager import MempoolManager
from flax.full_node.weight_proof import WeightProofHandler
from flax.protocols import full_node_protocol, wallet_protocol
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.protocols.shared_protocol import Capability
from flax.protocols.wallet_protocol import RequestAdditions, RespondAdditions, RespondBlockHeaders, SendTransaction
from flax.server.outbound_message import Message
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.simulator.time_out_assert import time_out_assert, time_out_assert_not_none
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.peer_info import PeerInfo
from flax.util.block_cache import BlockCache
from flax.util.hash import std_hash
from flax.util.ints import uint16, uint32, uint64
from flax.wallet.nft_wallet.nft_wallet import NFTWallet
from flax.wallet.transaction_record import TransactionRecord
from flax.wallet.util.compute_memos import compute_memos
from flax.wallet.util.wallet_sync_utils import PeerRequestException
from flax.wallet.util.wallet_types import AmountWithPuzzlehash
from flax.wallet.wallet_coin_record import WalletCoinRecord
from flax.wallet.wallet_weight_proof_handler import get_wp_fork_point
from tests.connection_utils import disconnect_all, disconnect_all_and_reconnect
from tests.setup_nodes import test_constants
from tests.util.wallet_is_synced import wallet_is_synced
from tests.weight_proof.test_weight_proof import load_blocks_dont_validate
async def wallet_height_at_least(wallet_node, h):
height = await wallet_node.wallet_state_manager.blockchain.get_finished_sync_up_to()
if height == h:
return True
return False
async def get_nft_count(wallet: NFTWallet) -> int:
return await wallet.get_nft_count()
log = getLogger(__name__)
class TestWalletSync:
@pytest.mark.asyncio
async def test_request_block_headers(self, wallet_node, default_1000_blocks):
# Tests the edge case of receiving funds right before the recent blocks in weight proof
full_node_api: FullNodeAPI
full_node_api, wallet_node, full_node_server, wallet_server, bt = wallet_node
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
for block in default_1000_blocks[:100]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(10), uint32(15), False)
)
assert msg.type == ProtocolMessageTypes.respond_block_headers.value
res_block_headers = RespondBlockHeaders.from_bytes(msg.data)
bh = res_block_headers.header_blocks
assert len(bh) == 6
assert [x.reward_chain_block.height for x in default_1000_blocks[10:16]] == [
x.reward_chain_block.height for x in bh
]
assert [x.foliage for x in default_1000_blocks[10:16]] == [x.foliage for x in bh]
assert [x.transactions_filter for x in bh] == [b"\x00"] * 6
num_blocks = 20
new_blocks = bt.get_consecutive_blocks(
num_blocks, block_list_input=default_1000_blocks, pool_reward_puzzle_hash=ph
)
for i in range(0, len(new_blocks)):
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[i]))
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(110), uint32(115), True)
)
res_block_headers = RespondBlockHeaders.from_bytes(msg.data)
bh = res_block_headers.header_blocks
assert len(bh) == 6
# @pytest.mark.parametrize(
# "test_case",
# [(1000000, 10000010, False, ProtocolMessageTypes.reject_block_headers)],
# [(80, 99, False, ProtocolMessageTypes.respond_block_headers)],
# [(10, 8, False, None)],
# )
@pytest.mark.asyncio
async def test_request_block_headers_rejected(self, wallet_node, default_1000_blocks):
# Tests the edge case of receiving funds right before the recent blocks in weight proof
full_node_api: FullNodeAPI
full_node_api, wallet_node, full_node_server, wallet_server, bt = wallet_node
# start_height, end_height, return_filter, expected_res = test_case
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(1000000), uint32(1000010), False)
)
assert msg.type == ProtocolMessageTypes.reject_block_headers.value
for block in default_1000_blocks[:150]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(80), uint32(99), False)
)
assert msg.type == ProtocolMessageTypes.respond_block_headers.value
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(10), uint32(8), False)
)
assert msg.type == ProtocolMessageTypes.reject_block_headers.value
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(10), uint32(8), True)
)
assert msg.type == ProtocolMessageTypes.reject_block_headers.value
# test for 128 blocks to fetch at once limit
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(10), uint32(140), True)
)
assert msg.type == ProtocolMessageTypes.reject_block_headers.value
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(90), uint32(160), False)
)
assert msg.type == ProtocolMessageTypes.reject_block_headers.value
msg = await full_node_api.request_block_headers(
wallet_protocol.RequestBlockHeaders(uint32(90), uint32(160), True)
)
assert msg.type == ProtocolMessageTypes.reject_block_headers.value
@pytest.mark.parametrize(
"two_wallet_nodes",
[
dict(
disable_capabilities=[Capability.BLOCK_HEADERS],
),
dict(
disable_capabilities=[Capability.BASE],
),
],
indirect=True,
)
@pytest.mark.asyncio
async def test_basic_sync_wallet(self, two_wallet_nodes, default_400_blocks, self_hostname):
full_nodes, wallets, bt = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Trusted node sync
wallets[0][0].config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
# Untrusted node sync
wallets[1][0].config["trusted_peers"] = {}
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
for wallet_node, wallet_server in wallets:
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for wallet_node, wallet_server in wallets:
await time_out_assert(100, wallet_height_at_least, True, wallet_node, len(default_400_blocks) - 1)
# Tests a reorg with the wallet
num_blocks = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks - 1, block_list_input=default_400_blocks[:-5])
blocks_reorg = bt.get_consecutive_blocks(1, blocks_reorg, guarantee_transaction_block=True, current_time=True)
for i in range(1, len(blocks_reorg)):
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks_reorg[i]))
for wallet_node, wallet_server in wallets:
await disconnect_all_and_reconnect(wallet_server, full_node_server, self_hostname)
for wallet_node, wallet_server in wallets:
await time_out_assert(
100, wallet_height_at_least, True, wallet_node, len(default_400_blocks) + num_blocks - 5 - 1
)
await time_out_assert(20, wallet_node.wallet_state_manager.synced)
await disconnect_all(wallet_server)
assert not (await wallet_node.wallet_state_manager.synced())
@pytest.mark.parametrize(
"two_wallet_nodes",
[
dict(
disable_capabilities=[Capability.BLOCK_HEADERS],
),
dict(
disable_capabilities=[Capability.BASE],
),
],
indirect=True,
)
@pytest.mark.asyncio
async def test_almost_recent(self, two_wallet_nodes, default_400_blocks, self_hostname):
# Tests the edge case of receiving funds right before the recent blocks in weight proof
full_nodes, wallets, bt = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Trusted node sync
wallets[0][0].config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
# Untrusted node sync
wallets[1][0].config["trusted_peers"] = {}
base_num_blocks = 400
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
all_blocks = default_400_blocks
both_phs = []
for wallet_node, wallet_server in wallets:
wallet = wallet_node.wallet_state_manager.main_wallet
both_phs.append(await wallet.get_new_puzzlehash())
for i in range(20):
# Tests a reorg with the wallet
ph = both_phs[i % 2]
all_blocks = bt.get_consecutive_blocks(1, block_list_input=all_blocks, pool_reward_puzzle_hash=ph)
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(all_blocks[-1]))
new_blocks = bt.get_consecutive_blocks(
test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 10, block_list_input=all_blocks
)
for i in range(base_num_blocks + 20, len(new_blocks)):
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(new_blocks[i]))
for wallet_node, wallet_server in wallets:
wallet = wallet_node.wallet_state_manager.main_wallet
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await time_out_assert(30, wallet.get_confirmed_balance, 10 * calculate_pool_reward(uint32(1000)))
@pytest.mark.asyncio
async def test_backtrack_sync_wallet(self, two_wallet_nodes, default_400_blocks, self_hostname):
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Trusted node sync
wallets[0][0].config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
# Untrusted node sync
wallets[1][0].config["trusted_peers"] = {}
for block in default_400_blocks[:20]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
for wallet_node, wallet_server in wallets:
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for wallet_node, wallet_server in wallets:
await time_out_assert(100, wallet_height_at_least, True, wallet_node, 19)
# Tests a reorg with the wallet
@pytest.mark.asyncio
async def test_short_batch_sync_wallet(self, two_wallet_nodes, default_400_blocks, self_hostname):
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Trusted node sync
wallets[0][0].config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
# Untrusted node sync
wallets[1][0].config["trusted_peers"] = {}
for block in default_400_blocks[:200]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
for wallet_node, wallet_server in wallets:
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for wallet_node, wallet_server in wallets:
await time_out_assert(100, wallet_height_at_least, True, wallet_node, 199)
@pytest.mark.asyncio
async def test_long_sync_wallet(self, two_wallet_nodes, default_1000_blocks, default_400_blocks, self_hostname):
full_nodes, wallets, bt = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Trusted node sync
wallets[0][0].config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
# Untrusted node sync
wallets[1][0].config["trusted_peers"] = {}
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
for wallet_node, wallet_server in wallets:
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for wallet_node, wallet_server in wallets:
await time_out_assert(600, wallet_height_at_least, True, wallet_node, len(default_400_blocks) - 1)
# Tests a long reorg
for block in default_1000_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
for wallet_node, wallet_server in wallets:
await disconnect_all_and_reconnect(wallet_server, full_node_server, self_hostname)
log.info(
f"wallet node height is {await wallet_node.wallet_state_manager.blockchain.get_finished_sync_up_to()}"
)
await time_out_assert(600, wallet_height_at_least, True, wallet_node, len(default_1000_blocks) - 1)
await disconnect_all_and_reconnect(wallet_server, full_node_server, self_hostname)
# Tests a short reorg
num_blocks = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks, block_list_input=default_1000_blocks[:-5])
for i in range(len(blocks_reorg) - num_blocks - 10, len(blocks_reorg)):
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks_reorg[i]))
for wallet_node, wallet_server in wallets:
await time_out_assert(
600, wallet_height_at_least, True, wallet_node, len(default_1000_blocks) + num_blocks - 5 - 1
)
@pytest.mark.asyncio
async def test_wallet_reorg_sync(self, two_wallet_nodes, default_400_blocks, self_hostname):
num_blocks = 5
full_nodes, wallets, bt = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Trusted node sync
wallets[0][0].config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
# Untrusted node sync
wallets[1][0].config["trusted_peers"] = {}
phs = []
for wallet_node, wallet_server in wallets:
wallet = wallet_node.wallet_state_manager.main_wallet
phs.append(await wallet.get_new_puzzlehash())
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
# Insert 400 blocks
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Farm few more with reward
for i in range(0, num_blocks - 1):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(phs[0]))
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(phs[1]))
# Confirm we have the funds
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
async def get_tx_count(wsm, wallet_id):
txs = await wsm.get_all_transactions(wallet_id)
return len(txs)
for wallet_node, wallet_server in wallets:
wallet = wallet_node.wallet_state_manager.main_wallet
await time_out_assert(60, wallet.get_confirmed_balance, funds)
await time_out_assert(60, get_tx_count, 2 * (num_blocks - 1), wallet_node.wallet_state_manager, 1)
# Reorg blocks that carry reward
num_blocks = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks, block_list_input=default_400_blocks[:-5])
for block in blocks_reorg[-30:]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
for wallet_node, wallet_server in wallets:
wallet = wallet_node.wallet_state_manager.main_wallet
await time_out_assert(60, get_tx_count, 0, wallet_node.wallet_state_manager, 1)
await time_out_assert(60, wallet.get_confirmed_balance, 0)
@pytest.mark.asyncio
async def test_wallet_reorg_get_coinbase(self, two_wallet_nodes, default_400_blocks, self_hostname):
full_nodes, wallets, bt = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
# Trusted node sync
wallets[0][0].config["trusted_peers"] = {full_node_server.node_id.hex(): full_node_server.node_id.hex()}
# Untrusted node sync
wallets[1][0].config["trusted_peers"] = {}
for wallet_node, wallet_server in wallets:
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
# Insert 400 blocks
for block in default_400_blocks:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Reorg blocks that carry reward
num_blocks_reorg = 30
blocks_reorg = bt.get_consecutive_blocks(num_blocks_reorg, block_list_input=default_400_blocks[:-5])
for block in blocks_reorg[:-5]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
async def get_tx_count(wsm, wallet_id):
txs = await wsm.get_all_transactions(wallet_id)
return len(txs)
for wallet_node, wallet_server in wallets:
await time_out_assert(30, get_tx_count, 0, wallet_node.wallet_state_manager, 1)
await time_out_assert(30, wallet_is_synced, True, wallet_node, full_node_api)
num_blocks_reorg_1 = 40
all_blocks_reorg_2 = blocks_reorg[:-30]
for wallet_node, wallet_server in wallets:
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
all_blocks_reorg_2 = bt.get_consecutive_blocks(
1, pool_reward_puzzle_hash=ph, farmer_reward_puzzle_hash=ph, block_list_input=all_blocks_reorg_2
)
blocks_reorg_2 = bt.get_consecutive_blocks(num_blocks_reorg_1, block_list_input=all_blocks_reorg_2)
for block in blocks_reorg_2[-44:]:
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(block))
for wallet_node, wallet_server in wallets:
await disconnect_all_and_reconnect(wallet_server, full_node_server, self_hostname)
# Confirm we have the funds
funds = calculate_pool_reward(uint32(len(all_blocks_reorg_2))) + calculate_base_farmer_reward(
uint32(len(all_blocks_reorg_2))
)
for wallet_node, wallet_server in wallets:
wallet = wallet_node.wallet_state_manager.main_wallet
await time_out_assert(60, wallet_is_synced, True, wallet_node, full_node_api)
await time_out_assert(20, get_tx_count, 2, wallet_node.wallet_state_manager, 1)
await time_out_assert(20, wallet.get_confirmed_balance, funds)
@pytest.mark.asyncio
async def test_request_additions_errors(self, wallet_node_sim_and_wallet, self_hostname):
full_nodes, wallets, _ = wallet_node_sim_and_wallet
wallet_node, wallet_server = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
full_node_api = full_nodes[0]
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None)
for i in range(2):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(20, wallet_is_synced, True, wallet_node, full_node_api)
last_block: Optional[BlockRecord] = full_node_api.full_node.blockchain.get_peak()
assert last_block is not None
# Invalid height
with pytest.raises(ValueError):
await full_node_api.request_additions(RequestAdditions(uint64(100), last_block.header_hash, [ph]))
# Invalid header hash
with pytest.raises(ValueError):
await full_node_api.request_additions(RequestAdditions(last_block.height, std_hash(b""), [ph]))
# No results
res1: Optional[Message] = await full_node_api.request_additions(
RequestAdditions(last_block.height, last_block.header_hash, [std_hash(b"")])
)
assert res1 is not None
response = RespondAdditions.from_bytes(res1.data)
assert response.height == last_block.height
assert response.header_hash == last_block.header_hash
assert len(response.proofs) == 1
assert len(response.coins) == 1
assert response.proofs[0][0] == std_hash(b"")
assert response.proofs[0][1] is not None
assert response.proofs[0][2] is None
@pytest.mark.asyncio
async def test_request_additions_success(self, wallet_node_sim_and_wallet, self_hostname):
full_nodes, wallets, _ = wallet_node_sim_and_wallet
wallet_node, wallet_server = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
full_node_api = full_nodes[0]
await wallet_server.start_client(PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None)
for i in range(2):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(20, wallet_is_synced, True, wallet_node, full_node_api)
payees: List[AmountWithPuzzlehash] = []
for i in range(10):
payee_ph = await wallet.get_new_puzzlehash()
payees.append({"amount": uint64(i + 100), "puzzlehash": payee_ph, "memos": []})
payees.append({"amount": uint64(i + 200), "puzzlehash": payee_ph, "memos": []})
tx: TransactionRecord = await wallet.generate_signed_transaction(uint64(0), ph, primaries=payees)
await full_node_api.send_transaction(SendTransaction(tx.spend_bundle))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
last_block: Optional[BlockRecord] = full_node_api.full_node.blockchain.get_peak()
assert last_block is not None
await time_out_assert(20, wallet_is_synced, True, wallet_node, full_node_api)
res2: Optional[Message] = await full_node_api.request_additions(
RequestAdditions(
last_block.height,
None,
[payees[0]["puzzlehash"], payees[2]["puzzlehash"], std_hash(b"1")],
)
)
assert res2 is not None
response = RespondAdditions.from_bytes(res2.data)
assert response.height == last_block.height
assert response.header_hash == last_block.header_hash
assert len(response.proofs) == 3
# First two PHs are included
for i in range(2):
assert response.proofs[i][0] in {payees[j]["puzzlehash"] for j in (0, 2)}
assert response.proofs[i][1] is not None
assert response.proofs[i][2] is not None
# Third PH is not included
assert response.proofs[2][2] is None
coin_list_dict = {p: coin_list for p, coin_list in response.coins}
assert len(coin_list_dict) == 3
for p, coin_list in coin_list_dict.items():
if p == std_hash(b"1"):
# this is the one that is not included
assert len(coin_list) == 0
else:
for coin in coin_list:
assert coin.puzzle_hash == p
# The other ones are included
assert len(coin_list) == 2
# None for puzzle hashes returns all coins and no proofs
res3: Optional[Message] = await full_node_api.request_additions(
RequestAdditions(last_block.height, last_block.header_hash, None)
)
assert res3 is not None
response = RespondAdditions.from_bytes(res3.data)
assert response.height == last_block.height
assert response.header_hash == last_block.header_hash
assert response.proofs is None
assert len(response.coins) == 12
assert sum([len(c_list) for _, c_list in response.coins]) == 24
# [] for puzzle hashes returns nothing
res4: Optional[Message] = await full_node_api.request_additions(
RequestAdditions(last_block.height, last_block.header_hash, [])
)
assert res4 is not None
response = RespondAdditions.from_bytes(res4.data)
assert response.proofs == []
assert len(response.coins) == 0
@pytest.mark.asyncio
async def test_get_wp_fork_point(self, default_10000_blocks):
blocks = default_10000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp1 = await wpf.get_proof_of_weight(header_cache[height_to_hash[uint32(9000)]].header_hash)
wp2 = await wpf.get_proof_of_weight(header_cache[height_to_hash[uint32(9030)]].header_hash)
wp3 = await wpf.get_proof_of_weight(header_cache[height_to_hash[uint32(7500)]].header_hash)
wp4 = await wpf.get_proof_of_weight(header_cache[height_to_hash[uint32(8700)]].header_hash)
wp5 = await wpf.get_proof_of_weight(header_cache[height_to_hash[uint32(9700)]].header_hash)
wp6 = await wpf.get_proof_of_weight(header_cache[height_to_hash[uint32(9010)]].header_hash)
fork12 = get_wp_fork_point(test_constants, wp1, wp2)
fork13 = get_wp_fork_point(test_constants, wp3, wp1)
fork14 = get_wp_fork_point(test_constants, wp4, wp1)
fork23 = get_wp_fork_point(test_constants, wp3, wp2)
fork24 = get_wp_fork_point(test_constants, wp4, wp2)
fork34 = get_wp_fork_point(test_constants, wp3, wp4)
fork45 = get_wp_fork_point(test_constants, wp4, wp5)
fork16 = get_wp_fork_point(test_constants, wp1, wp6)
# overlap between recent chain in wps, fork point is the tip of the shorter wp
assert fork12 == wp1.recent_chain_data[-1].height
assert fork16 == wp1.recent_chain_data[-1].height
# if there is an overlap between the recent chains we can find the exact fork point
# if not we should get the latest block with a sub epoch summary that exists in both wp's
# this can happen in fork24 and fork14 since they are not very far and also not very close
if wp2.recent_chain_data[0].height > wp4.recent_chain_data[-1].height:
assert fork24 in summaries.keys()
assert fork24 < wp4.recent_chain_data[-1].height
else:
assert fork24 == wp4.recent_chain_data[-1].height
if wp1.recent_chain_data[0].height > wp4.recent_chain_data[-1].height:
assert fork14 in summaries.keys()
assert fork14 < wp4.recent_chain_data[-1].height
else:
assert fork14 == wp4.recent_chain_data[-1].height
# no overlap between recent chain in wps, fork point
# is the latest block with a sub epoch summary that exists in both wp's
assert fork13 in summaries.keys()
assert fork13 < wp3.recent_chain_data[-1].height
assert fork23 in summaries.keys()
assert fork23 < wp3.recent_chain_data[-1].height
assert fork34 in summaries.keys()
assert fork23 < wp3.recent_chain_data[-1].height
assert fork45 in summaries.keys()
assert fork45 < wp4.recent_chain_data[-1].height
"""
This tests that a wallet filters out the dust properly.
It runs in seven phases:
1. Create a single dust coin.
Typically (though there are edge cases), this coin will not be filtered.
2. Create dust coins until the filter threshold has been reached.
At this point, none of the dust should be filtered.
3. Create 10 coins that are exactly the size of the filter threshold.
These should not be filtered because they are not dust.
4. Create one more dust coin. This coin should be filtered.
5. Create 5 coins below the threshold and 5 at or above.
Those below the threshold should get filtered, and those above should not.
6. Clear all coins from the dust wallet.
Send to the dust wallet "spam_filter_after_n_txs" coins that are equal in value to "xfx_spam_amount".
Send 1 mojo from the dust wallet. The dust wallet should receive a change coin valued at "xfx_spam_amount-1".
7: Create an NFT wallet for the farmer wallet, and generate an NFT in that wallet.
Create an NFT wallet for the dust wallet.
Send the NFT to the dust wallet. The NFT should not be filtered.
"""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"spam_filter_after_n_txs, xfx_spam_amount, dust_value",
[
# In the following tests, the filter is run right away:
(0, 1, 1), # nothing is filtered
# In the following tests, 1 coin will be created in part 1, and 9 in part 2:
(10, 10000000000, 1), # everything is dust
(10, 10000000000, 10000000000), # max dust threshold, dust is same size so not filtered
# Test with more coins
(100, 1000000, 1), # default filter level (1m mojos), default dust size (1)
],
)
async def test_dusted_wallet(
self,
self_hostname,
two_wallet_nodes_custom_spam_filtering,
spam_filter_after_n_txs,
xfx_spam_amount,
dust_value,
):
full_nodes, wallets, _ = two_wallet_nodes_custom_spam_filtering
farm_wallet_node, farm_wallet_server = wallets[0]
dust_wallet_node, dust_wallet_server = wallets[1]
# Create two wallets, one for farming (not used for testing), and one for testing dust.
farm_wallet = farm_wallet_node.wallet_state_manager.main_wallet
dust_wallet = dust_wallet_node.wallet_state_manager.main_wallet
ph = await farm_wallet.get_new_puzzlehash()
full_node_api = full_nodes[0]
# It's also possible to obtain the current settings for spam_filter_after_n_txs and xfx_spam_amount
# spam_filter_after_n_txs = wallets[0][0].config["spam_filter_after_n_txs"]
# xfx_spam_amount = wallets[0][0].config["xfx_spam_amount"]
# dust_value=1
# Verify legal values for the settings to be tested
# If spam_filter_after_n_txs is greater than 250, this test will take a long time to run.
# Current max value for xfx_spam_amount is 0.01 XFX.
# If needed, this could be increased but we would need to farm more blocks.
# The max dust_value could be increased, but would require farming more blocks.
assert spam_filter_after_n_txs >= 0
assert spam_filter_after_n_txs <= 250
assert xfx_spam_amount >= 1
assert xfx_spam_amount <= 10000000000
assert dust_value >= 1
assert dust_value <= 10000000000
# start both clients
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/sync/config.py | tests/wallet/sync/config.py | from __future__ import annotations
job_timeout = 60
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/wallet/sync/__init__.py | tests/wallet/sync/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/generator/test_scan.py | tests/generator/test_scan.py | from unittest import TestCase
from flax.full_node.bundle_tools import (
match_standard_transaction_at_any_index,
match_standard_transaction_exactly_and_return_pubkey,
)
from flax.util.byte_types import hexstr_to_bytes
gen1 = hexstr_to_bytes(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080" # noqa
)
EXPECTED_START = 46
PUBKEY_PLUS_SUFFIX = 48 + 4 + 1
EXPECTED_END = 337 - PUBKEY_PLUS_SUFFIX
STANDARD_TRANSACTION_1 = hexstr_to_bytes(
"""ff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b0aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaff018080""" # noqa
)
STANDARD_TRANSACTION_2 = hexstr_to_bytes(
"""ff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b0bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbff018080""" # noqa
)
class TestScan(TestCase):
def test_match_generator(self):
# match_standard_transaction_at_any_index(generator_body: bytes) -> (int,int):
m = match_standard_transaction_at_any_index(gen1)
assert m == (EXPECTED_START, EXPECTED_END)
m = match_standard_transaction_at_any_index(b"\xff" + gen1 + b"\x80")
assert m == (EXPECTED_START + 1, EXPECTED_END + 1)
m = match_standard_transaction_at_any_index(gen1[47:])
assert m is None
def test_match_transaction(self):
# match_standard_transaction_exactly_and_return_pubkey(transaction: bytes) -> Optional[bytes]:
m = match_standard_transaction_exactly_and_return_pubkey(STANDARD_TRANSACTION_1)
assert m == hexstr_to_bytes(
"b0aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
)
m = match_standard_transaction_exactly_and_return_pubkey(STANDARD_TRANSACTION_1 + b"\xfa")
assert m is None
m = match_standard_transaction_exactly_and_return_pubkey(b"\xba" + STANDARD_TRANSACTION_1 + b"\xfa")
assert m is None
m = match_standard_transaction_exactly_and_return_pubkey(b"\xba" + STANDARD_TRANSACTION_1)
assert m is None
m = match_standard_transaction_exactly_and_return_pubkey(
gen1[EXPECTED_START : EXPECTED_END + PUBKEY_PLUS_SUFFIX]
)
assert m == hexstr_to_bytes(
"b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3"
)
m = match_standard_transaction_exactly_and_return_pubkey(gen1)
assert m is None
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/generator/test_generator_types.py | tests/generator/test_generator_types.py | from typing import Dict
from unittest import TestCase
from flax.types.blockchain_format.program import Program, SerializedProgram
from flax.types.generator_types import GeneratorBlockCacheInterface
from flax.full_node.generator import create_block_generator, create_generator_args
from flax.util.ints import uint32
gen0 = SerializedProgram.from_bytes(
bytes.fromhex(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080" # noqa
)
)
gen1 = SerializedProgram.from_bytes(
bytes.fromhex(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080" # noqa
)
)
gen2 = SerializedProgram.from_bytes(
bytes.fromhex(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080" # noqa
)
)
class BlockDict(GeneratorBlockCacheInterface):
def __init__(self, d: Dict[uint32, SerializedProgram]):
self.d = d
def get_generator_for_block_height(self, index: uint32) -> SerializedProgram:
return self.d[index]
class TestGeneratorTypes(TestCase):
def test_make_generator(self):
block_dict = BlockDict({uint32(1): gen1})
gen = create_block_generator(gen2, [uint32(1)], block_dict)
print(gen)
def test_make_generator_args(self):
generator_ref_list = [gen1]
gen_args = create_generator_args(generator_ref_list)
gen_args_as_program = Program.from_bytes(bytes(gen_args))
# First Argument to the block generator is the first template generator
arg2 = gen_args_as_program.first().first()
print(arg2)
assert arg2 == bytes(gen1)
# It's not a list anymore.
# TODO: Test the first three arg positions passed through here.
# def test_generator_arg_is_list(self):
# generator_ref_list = [Program.to(b"gen1"), Program.to(b"gen2")]
# gen_args = create_generator_args(generator_ref_list)
# gen_args_as_program = Program.from_bytes(bytes(gen_args))
# arg2 = gen_args_as_program.rest().first()
# assert arg2 == binutils.assemble("('gen1' 'gen2')")
# print(arg2)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/generator/test_compression.py | tests/generator/test_compression.py | # flake8: noqa: F501
from dataclasses import dataclass
from typing import List, Any
from unittest import TestCase
from flax.full_node.bundle_tools import (
bundle_suitable_for_compression,
compressed_coin_spend_entry_list,
compressed_spend_bundle_solution,
match_standard_transaction_at_any_index,
simple_solution_generator,
spend_bundle_to_serialized_coin_spend_entry_list,
)
from flax.full_node.generator import run_generator_unsafe, create_generator_args
from flax.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from flax.types.blockchain_format.program import Program, SerializedProgram, INFINITE_COST
from flax.types.generator_types import BlockGenerator, CompressorArg
from flax.types.spend_bundle import SpendBundle
from flax.util.byte_types import hexstr_to_bytes
from flax.util.ints import uint32
from flax.wallet.puzzles.load_clvm import load_clvm
from tests.core.make_block_generator import make_spend_bundle
from clvm import SExp
import io
from clvm.serialize import sexp_from_stream
from clvm_tools import binutils
TEST_GEN_DESERIALIZE = load_clvm("test_generator_deserialize.clvm", package_or_requirement="flax.wallet.puzzles")
DESERIALIZE_MOD = load_clvm("flaxlisp_deserialisation.clvm", package_or_requirement="flax.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="flax.wallet.puzzles")
DECOMPRESS_CSE = load_clvm("decompress_coin_spend_entry.clvm", package_or_requirement="flax.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_spend_entry_with_prefix.clvm", package_or_requirement="flax.wallet.puzzles"
)
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="flax.wallet.puzzles")
TEST_MULTIPLE = load_clvm("test_multiple_generator_input_arguments.clvm", package_or_requirement="flax.wallet.puzzles")
Nil = Program.from_bytes(b"\x80")
original_generator = hexstr_to_bytes(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080"
) # noqa
gen1 = b"aaaaaaaaaa" + original_generator
gen2 = b"bb" + original_generator
FAKE_BLOCK_HEIGHT1 = uint32(100)
FAKE_BLOCK_HEIGHT2 = uint32(200)
@dataclass(frozen=True)
class MultipleCompressorArg:
arg: List[CompressorArg]
split_offset: int
def create_multiple_ref_generator(args: MultipleCompressorArg, spend_bundle: SpendBundle) -> BlockGenerator:
"""
Decompress a transaction by referencing bytes from multiple input generator references
"""
compressed_cse_list = compressed_coin_spend_entry_list(spend_bundle)
program = TEST_MULTIPLE.curry(
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
args.arg[0].start,
args.arg[0].end - args.split_offset,
args.arg[1].end - args.split_offset,
args.arg[1].end,
compressed_cse_list,
)
# TODO aqk: Improve ergonomics of CompressorArg -> GeneratorArg conversion
generator_list = [
args.arg[0].generator,
args.arg[1].generator,
]
generator_heights = [
FAKE_BLOCK_HEIGHT1,
FAKE_BLOCK_HEIGHT2,
]
return BlockGenerator(program, generator_list, generator_heights)
def spend_bundle_to_coin_spend_entry_list(bundle: SpendBundle) -> List[Any]:
r = []
for coin_spend in bundle.coin_spends:
entry = [
coin_spend.coin.parent_coin_info,
sexp_from_stream(io.BytesIO(bytes(coin_spend.puzzle_reveal)), SExp.to),
coin_spend.coin.amount,
sexp_from_stream(io.BytesIO(bytes(coin_spend.solution)), SExp.to),
]
r.append(entry)
return r
class TestCompression(TestCase):
def test_spend_bundle_suitable(self):
sb: SpendBundle = make_spend_bundle(1)
assert bundle_suitable_for_compression(sb)
def test_compress_spend_bundle(self):
pass
def test_multiple_input_gen_refs(self):
start1, end1 = match_standard_transaction_at_any_index(gen1)
start2, end2 = match_standard_transaction_at_any_index(gen2)
ca1 = CompressorArg(FAKE_BLOCK_HEIGHT1, SerializedProgram.from_bytes(gen1), start1, end1)
ca2 = CompressorArg(FAKE_BLOCK_HEIGHT2, SerializedProgram.from_bytes(gen2), start2, end2)
prefix_len1 = end1 - start1
prefix_len2 = end2 - start2
assert prefix_len1 == prefix_len2
prefix_len = prefix_len1
results = []
for split_offset in range(prefix_len):
gen_args = MultipleCompressorArg([ca1, ca2], split_offset)
spend_bundle: SpendBundle = make_spend_bundle(1)
multi_gen = create_multiple_ref_generator(gen_args, spend_bundle)
cost, result = run_generator_unsafe(multi_gen, INFINITE_COST)
results.append(result)
assert result is not None
assert cost > 0
assert all(r == results[0] for r in results)
def test_compressed_block_results(self):
sb: SpendBundle = make_spend_bundle(1)
start, end = match_standard_transaction_at_any_index(original_generator)
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
s = simple_solution_generator(sb)
assert c != s
cost_c, result_c = run_generator_unsafe(c, INFINITE_COST)
cost_s, result_s = run_generator_unsafe(s, INFINITE_COST)
print(result_c)
assert result_c is not None
assert result_s is not None
assert result_c == result_s
def test_get_removals_for_single_coin(self):
sb: SpendBundle = make_spend_bundle(1)
start, end = match_standard_transaction_at_any_index(original_generator)
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
removal = sb.coin_spends[0].coin
error, puzzle, solution = get_puzzle_and_solution_for_coin(c, removal)
assert error is None
assert bytes(puzzle) == bytes(sb.coin_spends[0].puzzle_reveal)
assert bytes(solution) == bytes(sb.coin_spends[0].solution)
# Test non compressed generator as well
s = simple_solution_generator(sb)
error, puzzle, solution = get_puzzle_and_solution_for_coin(s, removal)
assert error is None
assert bytes(puzzle) == bytes(sb.coin_spends[0].puzzle_reveal)
assert bytes(solution) == bytes(sb.coin_spends[0].solution)
def test_spend_byndle_coin_spend(self):
for i in range(0, 10):
sb: SpendBundle = make_spend_bundle(i)
cs1 = SExp.to(spend_bundle_to_coin_spend_entry_list(sb)).as_bin() # pylint: disable=E1101
cs2 = spend_bundle_to_serialized_coin_spend_entry_list(sb)
assert cs1 == cs2
class TestDecompression(TestCase):
def __init__(self, *args, **kwargs):
super(TestDecompression, self).__init__(*args, **kwargs)
self.maxDiff = None
def test_deserialization(self):
self.maxDiff = None
cost, out = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [bytes(Program.to("hello"))])
assert out == Program.to("hello")
def test_deserialization_as_argument(self):
self.maxDiff = None
cost, out = TEST_GEN_DESERIALIZE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, Nil, bytes(Program.to("hello"))]
)
print(bytes(Program.to("hello")))
print()
print(out)
assert out == Program.to("hello")
def test_decompress_puzzle(self):
cost, out = DECOMPRESS_PUZZLE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, b"\xff", bytes(Program.to("pubkey")), b"\x80"]
)
print()
print(out)
# An empty CSE is invalid. (An empty CSE list may be okay)
# def test_decompress_empty_cse(self):
# cse0 = binutils.assemble("()")
# cost, out = DECOMPRESS_CSE.run_with_cost(INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0])
# print()
# print(out)
def test_decompress_cse(self):
"""Decompress a single CSE / CoinSpendEntry"""
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
cost, out = DECOMPRESS_CSE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0]
)
print()
print(out)
def test_decompress_cse_with_prefix(self):
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
start = 2 + 44
end = start + 238
prefix = original_generator[start:end]
# (deserialize decompress_puzzle puzzle_prefix cse)
cost, out = DECOMPRESS_CSE_WITH_PREFIX.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, prefix, cse0]
)
print()
print(out)
def test_block_program_zero(self):
"Decompress a list of CSEs"
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_spend_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
cost, out = DECOMPRESS_BLOCK.run_with_cost(
INFINITE_COST,
[
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
start,
Program.to(end),
cse2,
DESERIALIZE_MOD,
[bytes(original_generator)],
],
)
print()
print(out)
def test_block_program_zero_with_curry(self):
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_spend_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
p = DECOMPRESS_BLOCK.curry(DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end))
cost, out = p.run_with_cost(INFINITE_COST, [cse2, DESERIALIZE_MOD, [bytes(original_generator)]])
print()
print(p)
print(out)
p_with_cses = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end), cse2, DESERIALIZE_MOD
)
generator_args = create_generator_args([SerializedProgram.from_bytes(original_generator)])
cost, out = p_with_cses.run_with_cost(INFINITE_COST, generator_args)
print()
print(p_with_cses)
print(out)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/generator/config.py | tests/generator/config.py | from __future__ import annotations
parallel = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/generator/__init__.py | tests/generator/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/generator/test_list_to_batches.py | tests/generator/test_list_to_batches.py | import pytest
from flax.util.generator_tools import list_to_batches
def test_empty_lists():
# An empty list should return an empty iterator and skip the loop's body.
for _, _ in list_to_batches([], 1):
assert False
def test_valid():
for k in range(1, 10):
test_list = [x for x in range(0, k)]
for i in range(1, len(test_list) + 1): # Test batch_size 1 to 11 (length + 1)
checked = 0
for remaining, batch in list_to_batches(test_list, i):
assert remaining == max(len(test_list) - checked - i, 0)
assert len(batch) <= i
assert batch == test_list[checked : min(checked + i, len(test_list))]
checked += len(batch)
assert checked == len(test_list)
def test_invalid_batch_sizes():
with pytest.raises(ValueError):
for _ in list_to_batches([], 0):
assert False
with pytest.raises(ValueError):
for _ in list_to_batches([], -1):
assert False
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/generator/test_rom.py | tests/generator/test_rom.py | from clvm_tools import binutils
from clvm_tools.clvmc import compile_clvm_text
from flax.full_node.generator import run_generator_unsafe
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions
from flax.types.blockchain_format.program import Program, SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.generator_types import BlockGenerator
from flax.util.ints import uint32
from flax.wallet.puzzles.load_clvm import load_clvm
from flax.consensus.condition_costs import ConditionCost
from flax.types.spend_bundle_conditions import Spend
MAX_COST = int(1e15)
COST_PER_BYTE = int(12000)
DESERIALIZE_MOD = load_clvm("flaxlisp_deserialisation.clvm", package_or_requirement="flax.wallet.puzzles")
GENERATOR_CODE = """
(mod (deserialize-mod historical-generators)
(defun first-block (deserialize-mod historical-generators)
(a deserialize-mod (list (f historical-generators))))
(defun second-block (deserialize-mod historical-generators)
(a deserialize-mod (r historical-generators)))
(defun go (deserialize-mod historical-generators)
(c (first-block deserialize-mod historical-generators)
(second-block deserialize-mod historical-generators)
))
(go deserialize-mod historical-generators)
)
"""
COMPILED_GENERATOR_CODE = bytes.fromhex(
"ff02ffff01ff04ffff02ff04ffff04ff02ffff04ff05ffff04ff0bff8080808080ffff02"
"ff06ffff04ff02ffff04ff05ffff04ff0bff808080808080ffff04ffff01ffff02ff05ff"
"1380ff02ff05ff2b80ff018080"
)
COMPILED_GENERATOR_CODE = bytes(Program.to(compile_clvm_text(GENERATOR_CODE, [])))
FIRST_GENERATOR = Program.to(
binutils.assemble('((parent_id (c 1 (q "puzzle blob")) 50000 "solution is here" extra data for coin))')
).as_bin()
SECOND_GENERATOR = Program.to(binutils.assemble("(extra data for block)")).as_bin()
FIRST_GENERATOR = Program.to(
binutils.assemble(
"""
((0x0000000000000000000000000000000000000000000000000000000000000000 1 50000
((51 0x0000000000000000000000000000000000000000000000000000000000000001 500)) "extra" "data" "for" "coin" ))"""
)
).as_bin()
SECOND_GENERATOR = Program.to(binutils.assemble("(extra data for block)")).as_bin()
def to_sp(sexp) -> SerializedProgram:
return SerializedProgram.from_bytes(bytes(sexp))
def block_generator() -> BlockGenerator:
generator_list = [to_sp(FIRST_GENERATOR), to_sp(SECOND_GENERATOR)]
generator_heights = [uint32(0), uint32(1)]
return BlockGenerator(to_sp(COMPILED_GENERATOR_CODE), generator_list, generator_heights)
EXPECTED_ABBREVIATED_COST = 108379
EXPECTED_COST = 113415
EXPECTED_OUTPUT = (
"ffffffa00000000000000000000000000000000000000000000000000000000000000000"
"ff01ff8300c350ffffff33ffa00000000000000000000000000000000000000000000000"
"000000000000000001ff8201f48080ff856578747261ff8464617461ff83666f72ff8463"
"6f696e8080ff856578747261ff8464617461ff83666f72ff85626c6f636b80"
)
class TestROM:
def test_rom_inputs(self):
# this test checks that the generator just works
# It's useful for debugging the generator prior to having the ROM invoke it.
args = Program.to([DESERIALIZE_MOD, [FIRST_GENERATOR, SECOND_GENERATOR]])
sp = to_sp(COMPILED_GENERATOR_CODE)
cost, r = sp.run_with_cost(MAX_COST, args)
assert cost == EXPECTED_ABBREVIATED_COST
assert r.as_bin().hex() == EXPECTED_OUTPUT
def test_get_name_puzzle_conditions(self):
# this tests that extra block or coin data doesn't confuse `get_name_puzzle_conditions`
gen = block_generator()
cost, r = run_generator_unsafe(gen, max_cost=MAX_COST)
print(r)
npc_result = get_name_puzzle_conditions(gen, max_cost=MAX_COST, cost_per_byte=COST_PER_BYTE, mempool_mode=False)
assert npc_result.error is None
assert npc_result.cost == EXPECTED_COST + ConditionCost.CREATE_COIN.value + (
len(bytes(gen.program)) * COST_PER_BYTE
)
spend = Spend(
coin_id=bytes32.fromhex("e8538c2d14f2a7defae65c5c97f5d4fae7ee64acef7fec9d28ad847a0880fd03"),
puzzle_hash=bytes32.fromhex("9dcf97a184f32623d11a73124ceb99a5709b083721e878a16d78f596718ba7b2"),
height_relative=None,
seconds_relative=0,
create_coin=[(bytes([0] * 31 + [1]), 500, None)],
agg_sig_me=[],
)
assert npc_result.conds.spends == [spend]
def test_coin_extras(self):
# the ROM supports extra data after a coin. This test checks that it actually gets passed through
gen = block_generator()
cost, r = run_generator_unsafe(gen, max_cost=MAX_COST)
coin_spends = r.first()
for coin_spend in coin_spends.as_iter():
extra_data = coin_spend.rest().rest().rest().rest()
assert extra_data.as_atom_list() == b"extra data for coin".split()
def test_block_extras(self):
# the ROM supports extra data after the coin spend list. This test checks that it actually gets passed through
gen = block_generator()
cost, r = run_generator_unsafe(gen, max_cost=MAX_COST)
extra_block_data = r.rest()
assert extra_block_data.as_atom_list() == b"extra data for block".split()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/simulation/test_simulation.py | tests/simulation/test_simulation.py | from typing import AsyncIterator, List, Tuple
import pytest
import pytest_asyncio
from flax.cmds.units import units
from flax.consensus.block_rewards import calculate_pool_reward, calculate_base_farmer_reward
from flax.server.server import FlaxServer
from flax.simulator.block_tools import create_block_tools_async, BlockTools
from flax.simulator.full_node_simulator import FullNodeSimulator
from flax.simulator.simulator_protocol import FarmNewBlockProtocol, GetAllCoinsProtocol, ReorgProtocol
from flax.simulator.time_out_assert import time_out_assert
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16, uint32, uint64
from flax.wallet.wallet_node import WalletNode
from tests.core.node_height import node_height_at_least
from tests.setup_nodes import (
SimulatorsAndWallets,
setup_full_node,
setup_full_system,
test_constants,
setup_simulators_and_wallets,
)
from tests.util.keyring import TempKeyring
test_constants_modified = test_constants.replace(
**{
"DIFFICULTY_STARTING": 2**8,
"DISCRIMINANT_SIZE_BITS": 1024,
"SUB_EPOCH_BLOCKS": 140,
"WEIGHT_PROOF_THRESHOLD": 2,
"WEIGHT_PROOF_RECENT_BLOCKS": 350,
"MAX_SUB_SLOT_BLOCKS": 50,
"NUM_SPS_SUB_SLOT": 32, # Must be a power of 2
"EPOCH_BLOCKS": 280,
"SUB_SLOT_ITERS_STARTING": 2**20,
"NUMBER_ZERO_BITS_PLOT_FILTER": 5,
}
)
# TODO: Ideally, the db_version should be the (parameterized) db_version
# fixture, to test all versions of the database schema. This doesn't work
# because of a hack in shutting down the full node, which means you cannot run
# more than one simulations per process.
@pytest_asyncio.fixture(scope="function")
async def extra_node(self_hostname):
with TempKeyring() as keychain:
b_tools = await create_block_tools_async(constants=test_constants_modified, keychain=keychain)
async for _ in setup_full_node(
test_constants_modified,
"blockchain_test_3.db",
self_hostname,
b_tools,
db_version=1,
):
yield _
@pytest_asyncio.fixture(scope="function")
async def simulation(bt):
async for _ in setup_full_system(test_constants_modified, bt, db_version=1):
yield _
@pytest_asyncio.fixture(scope="function")
async def one_wallet_node() -> AsyncIterator[SimulatorsAndWallets]:
async for _ in setup_simulators_and_wallets(simulator_count=1, wallet_count=1, dic={}):
yield _
class TestSimulation:
@pytest.mark.asyncio
async def test_simulation_1(self, simulation, extra_node, self_hostname):
node1, node2, _, _, _, _, _, _, _, sanitizer_server = simulation
server1 = node1.server
node1_port = node1.full_node.server.get_port()
node2_port = node2.full_node.server.get_port()
await server1.start_client(PeerInfo(self_hostname, uint16(node2_port)))
# Use node2 to test node communication, since only node1 extends the chain.
await time_out_assert(600, node_height_at_least, True, node2, 7)
await sanitizer_server.start_client(PeerInfo(self_hostname, uint16(node2_port)))
async def has_compact(node1, node2):
peak_height_1 = node1.full_node.blockchain.get_peak_height()
headers_1 = await node1.full_node.blockchain.get_header_blocks_in_range(0, peak_height_1 - 6)
peak_height_2 = node2.full_node.blockchain.get_peak_height()
headers_2 = await node2.full_node.blockchain.get_header_blocks_in_range(0, peak_height_2 - 6)
# Commented to speed up.
# cc_eos = [False, False]
# icc_eos = [False, False]
# cc_sp = [False, False]
# cc_ip = [False, False]
has_compact = [False, False]
for index, headers in enumerate([headers_1, headers_2]):
for header in headers.values():
for sub_slot in header.finished_sub_slots:
if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
# cc_eos[index] = True
has_compact[index] = True
if (
sub_slot.proofs.infused_challenge_chain_slot_proof is not None
and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
# icc_eos[index] = True
has_compact[index] = True
if (
header.challenge_chain_sp_proof is not None
and header.challenge_chain_sp_proof.normalized_to_identity
):
# cc_sp[index] = True
has_compact[index] = True
if header.challenge_chain_ip_proof.normalized_to_identity:
# cc_ip[index] = True
has_compact[index] = True
# return (
# cc_eos == [True, True] and icc_eos == [True, True] and cc_sp == [True, True] and cc_ip == [True, True]
# )
return has_compact == [True, True]
await time_out_assert(600, has_compact, True, node1, node2)
node3 = extra_node
server3 = node3.full_node.server
peak_height = max(node1.full_node.blockchain.get_peak_height(), node2.full_node.blockchain.get_peak_height())
await server3.start_client(PeerInfo(self_hostname, uint16(node1_port)))
await server3.start_client(PeerInfo(self_hostname, uint16(node2_port)))
await time_out_assert(600, node_height_at_least, True, node3, peak_height)
@pytest.mark.asyncio
async def test_simulator_auto_farm_and_get_coins(
self,
two_wallet_nodes: Tuple[List[FullNodeSimulator], List[Tuple[WalletNode, FlaxServer]], BlockTools],
self_hostname: str,
) -> None:
num_blocks = 2
full_nodes, wallets, _ = two_wallet_nodes
full_node_api = full_nodes[0]
server_1 = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
wallet_node_2, server_3 = wallets[1]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
wallet_node.config["trusted_peers"] = {}
wallet_node_2.config["trusted_peers"] = {}
# enable auto_farming
await full_node_api.update_autofarm_config(True)
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
block_reward = calculate_pool_reward(uint32(1)) + calculate_base_farmer_reward(uint32(1))
funds = block_reward
await time_out_assert(10, wallet.get_confirmed_balance, funds)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds)
tx = await wallet.generate_signed_transaction(
uint64(10),
await wallet_node_2.wallet_state_manager.main_wallet.get_new_puzzlehash(),
uint64(0),
)
await wallet.push_transaction(tx)
# wait till out of mempool
await time_out_assert(10, full_node_api.full_node.mempool_manager.get_spendbundle, None, tx.name)
# wait until the transaction is confirmed
await time_out_assert(20, wallet_node.wallet_state_manager.blockchain.get_finished_sync_up_to, 3)
funds += block_reward # add auto farmed block.
await time_out_assert(10, wallet.get_confirmed_balance, funds - 10)
for i in range(num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds += block_reward
# to reduce test flake, check block height again
await time_out_assert(30, wallet_node.wallet_state_manager.blockchain.get_finished_sync_up_to, 5)
await time_out_assert(10, wallet.get_confirmed_balance, funds - 10)
await time_out_assert(5, wallet.get_unconfirmed_balance, funds - 10)
# now lets test getting all coins, first only unspent, then all
# we do this here, because we have a wallet.
non_spent_coins = await full_node_api.get_all_coins(GetAllCoinsProtocol(False))
assert len(non_spent_coins) == 11
spent_and_non_spent_coins = await full_node_api.get_all_coins(GetAllCoinsProtocol(True))
assert len(spent_and_non_spent_coins) == 12
# try reorg, then check again.
# revert to height 2, then go to height 6, so that we don't include the transaction we made.
await full_node_api.reorg_from_index_to_new_index(ReorgProtocol(uint32(2), uint32(6), ph, None))
reorg_non_spent_coins = await full_node_api.get_all_coins(GetAllCoinsProtocol(False))
reorg_spent_and_non_spent_coins = await full_node_api.get_all_coins(GetAllCoinsProtocol(True))
assert len(reorg_non_spent_coins) == 12 and len(reorg_spent_and_non_spent_coins) == 12
assert tx.additions not in spent_and_non_spent_coins # just double check that those got reverted.
@pytest.mark.asyncio
@pytest.mark.parametrize(argnames="count", argvalues=[0, 1, 2, 5, 10])
async def test_simulation_process_blocks(
self,
count,
one_wallet_node: SimulatorsAndWallets,
):
[[full_node_api], _, _] = one_wallet_node
# Starting at the beginning.
assert full_node_api.full_node.blockchain.get_peak_height() is None
await full_node_api.process_blocks(count=count)
# The requested number of blocks had been processed.
expected_height = None if count == 0 else count
assert full_node_api.full_node.blockchain.get_peak_height() == expected_height
@pytest.mark.asyncio
@pytest.mark.parametrize(argnames="count", argvalues=[0, 1, 2, 5, 10])
async def test_simulation_farm_blocks(
self,
count,
one_wallet_node: SimulatorsAndWallets,
):
[[full_node_api], [[wallet_node, wallet_server]], _] = one_wallet_node
await wallet_server.start_client(PeerInfo("localhost", uint16(full_node_api.server._port)), None)
# Avoiding an attribute error below.
assert wallet_node.wallet_state_manager is not None
wallet = wallet_node.wallet_state_manager.main_wallet
# Starting at the beginning.
assert full_node_api.full_node.blockchain.get_peak_height() is None
rewards = await full_node_api.farm_blocks(count=count, wallet=wallet)
# The requested number of blocks had been processed plus 1 to handle the final reward
# transactions in the case of a non-zero count.
expected_height = count
if count > 0:
expected_height += 1
peak_height = full_node_api.full_node.blockchain.get_peak_height()
if peak_height is None:
peak_height = uint32(0)
assert peak_height == expected_height
# The expected rewards have been received and confirmed.
unconfirmed_balance = await wallet.get_unconfirmed_balance()
confirmed_balance = await wallet.get_confirmed_balance()
assert [unconfirmed_balance, confirmed_balance] == [rewards, rewards]
@pytest.mark.asyncio
@pytest.mark.parametrize(
argnames=["amount", "coin_count"],
argvalues=[
[0, 0],
[1, 2],
[(2 * units["flax"]) - 1, 2],
[2 * units["flax"], 2],
[(2 * units["flax"]) + 1, 4],
[3 * units["flax"], 4],
[10 * units["flax"], 10],
],
)
async def test_simulation_farm_rewards(
self,
amount: int,
coin_count: int,
one_wallet_node: SimulatorsAndWallets,
):
[[full_node_api], [[wallet_node, wallet_server]], _] = one_wallet_node
await wallet_server.start_client(PeerInfo("localhost", uint16(full_node_api.server._port)), None)
# Avoiding an attribute error below.
assert wallet_node.wallet_state_manager is not None
wallet = wallet_node.wallet_state_manager.main_wallet
rewards = await full_node_api.farm_rewards(amount=amount, wallet=wallet)
# At least the requested amount was farmed.
assert rewards >= amount
# The rewards amount is both received and confirmed.
unconfirmed_balance = await wallet.get_unconfirmed_balance()
confirmed_balance = await wallet.get_confirmed_balance()
assert [unconfirmed_balance, confirmed_balance] == [rewards, rewards]
# The expected number of coins were received.
spendable_coins = await wallet.wallet_state_manager.get_spendable_coins_for_wallet(wallet.id())
assert len(spendable_coins) == coin_count
@pytest.mark.asyncio
async def test_wait_transaction_records_entered_mempool(
self,
one_wallet_node: SimulatorsAndWallets,
) -> None:
repeats = 50
tx_amount = 1
[[full_node_api], [[wallet_node, wallet_server]], _] = one_wallet_node
await wallet_server.start_client(PeerInfo("localhost", uint16(full_node_api.server._port)), None)
# Avoiding an attribute hint issue below.
assert wallet_node.wallet_state_manager is not None
wallet = wallet_node.wallet_state_manager.main_wallet
# generate some coins for repetitive testing
await full_node_api.farm_rewards(amount=repeats * tx_amount, wallet=wallet)
coins = await full_node_api.create_coins_with_amounts(amounts=[tx_amount] * repeats, wallet=wallet)
assert len(coins) == repeats
# repeating just to try to expose any flakiness
for coin in coins:
tx = await wallet.generate_signed_transaction(
amount=uint64(tx_amount),
puzzle_hash=await wallet_node.wallet_state_manager.main_wallet.get_new_puzzlehash(),
coins={coin},
)
await wallet.push_transaction(tx)
await full_node_api.wait_transaction_records_entered_mempool(records=[tx])
assert tx.spend_bundle is not None
assert full_node_api.full_node.mempool_manager.get_spendbundle(tx.spend_bundle.name()) is not None
# TODO: this fails but it seems like it shouldn't when above passes
# assert tx.is_in_mempool()
@pytest.mark.asyncio
async def test_process_transaction_records(
self,
one_wallet_node: SimulatorsAndWallets,
) -> None:
repeats = 50
tx_amount = 1
[[full_node_api], [[wallet_node, wallet_server]], _] = one_wallet_node
await wallet_server.start_client(PeerInfo("localhost", uint16(full_node_api.server._port)), None)
# Avoiding an attribute hint issue below.
assert wallet_node.wallet_state_manager is not None
wallet = wallet_node.wallet_state_manager.main_wallet
# generate some coins for repetitive testing
await full_node_api.farm_rewards(amount=repeats * tx_amount, wallet=wallet)
coins = await full_node_api.create_coins_with_amounts(amounts=[tx_amount] * repeats, wallet=wallet)
assert len(coins) == repeats
# repeating just to try to expose any flakiness
for coin in coins:
tx = await wallet.generate_signed_transaction(
amount=uint64(tx_amount),
puzzle_hash=await wallet_node.wallet_state_manager.main_wallet.get_new_puzzlehash(),
coins={coin},
)
await wallet.push_transaction(tx)
await full_node_api.process_transaction_records(records=[tx])
# TODO: is this the proper check?
assert full_node_api.full_node.coin_store.get_coin_record(coin.name()) is not None
@pytest.mark.asyncio
@pytest.mark.parametrize(
argnames="amounts",
argvalues=[
*[pytest.param([1] * n, id=f"1 mojo x {n}") for n in [0, 1, 10, 49, 51, 103]],
*[pytest.param(list(range(1, n + 1)), id=f"incrementing x {n}") for n in [1, 10, 49, 51, 103]],
],
)
async def test_create_coins_with_amounts(
self,
amounts: List[int],
one_wallet_node: SimulatorsAndWallets,
) -> None:
[[full_node_api], [[wallet_node, wallet_server]], _] = one_wallet_node
await wallet_server.start_client(PeerInfo("localhost", uint16(full_node_api.server._port)), None)
# Avoiding an attribute hint issue below.
assert wallet_node.wallet_state_manager is not None
wallet = wallet_node.wallet_state_manager.main_wallet
await full_node_api.farm_rewards(amount=sum(amounts), wallet=wallet)
# Get some more coins. The creator helper doesn't get you all the coins you
# need yet.
await full_node_api.farm_blocks(count=2, wallet=wallet)
coins = await full_node_api.create_coins_with_amounts(amounts=amounts, wallet=wallet)
assert sorted(coin.amount for coin in coins) == sorted(amounts)
@pytest.mark.asyncio
@pytest.mark.parametrize(
argnames="amounts",
argvalues=[
[0],
[5, -5],
[4, 0],
],
ids=lambda amounts: ", ".join(str(amount) for amount in amounts),
)
async def test_create_coins_with_invalid_amounts_raises(
self,
amounts: List[int],
one_wallet_node: SimulatorsAndWallets,
) -> None:
[[full_node_api], [[wallet_node, wallet_server]], _] = one_wallet_node
await wallet_server.start_client(PeerInfo("localhost", uint16(full_node_api.server._port)), None)
# Avoiding an attribute hint issue below.
assert wallet_node.wallet_state_manager is not None
wallet = wallet_node.wallet_state_manager.main_wallet
with pytest.raises(Exception, match="Coins must have a positive value"):
await full_node_api.create_coins_with_amounts(amounts=amounts, wallet=wallet)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/simulation/test_start_simulator.py | tests/simulation/test_start_simulator.py | from __future__ import annotations
import asyncio
from pathlib import Path
from typing import Any, AsyncGenerator, Dict, Optional, Tuple
import pytest
import pytest_asyncio
from flax.simulator.full_node_simulator import FullNodeSimulator
from flax.simulator.simulator_full_node_rpc_client import SimulatorFullNodeRpcClient
from flax.simulator.simulator_test_tools import get_full_flax_simulator, get_puzzle_hash_from_key
from flax.simulator.time_out_assert import time_out_assert
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.hash import std_hash
from flax.util.ints import uint16
async def get_num_coins_for_ph(simulator_client: SimulatorFullNodeRpcClient, ph: bytes32) -> int:
return len(await simulator_client.get_coin_records_by_puzzle_hash(ph))
class TestStartSimulator:
"""
These tests are designed to test the user facing functionality of the simulator.
"""
@pytest_asyncio.fixture(scope="function")
async def get_flax_simulator(
self, automated_testing: bool = False, flax_root: Optional[Path] = None, config: Optional[Dict[str, Any]] = None
) -> AsyncGenerator[Tuple[FullNodeSimulator, Path, Dict[str, Any], str, int], None]:
async for simulator_args in get_full_flax_simulator(automated_testing, flax_root, config):
yield simulator_args
@pytest.mark.asyncio
async def test_start_simulator(
self, get_flax_simulator: Tuple[FullNodeSimulator, Path, Dict[str, Any], str, int]
) -> None:
simulator, root_path, config, mnemonic, fingerprint = get_flax_simulator
ph_1 = get_puzzle_hash_from_key(fingerprint, key_id=1)
ph_2 = get_puzzle_hash_from_key(fingerprint, key_id=2)
dummy_hash = std_hash(b"test")
num_blocks = 2
# connect to rpc
rpc_port = config["full_node"]["rpc_port"]
simulator_rpc_client = await SimulatorFullNodeRpcClient.create(
config["self_hostname"], uint16(rpc_port), root_path, config
)
# test auto_farm logic
assert await simulator_rpc_client.get_auto_farming()
await time_out_assert(10, simulator_rpc_client.set_auto_farming, False, False)
await simulator.autofarm_transaction(dummy_hash) # this should do nothing
await asyncio.sleep(3) # wait for block to be processed
assert len(await simulator.get_all_full_blocks()) == 0
# now check if auto_farm is working
await time_out_assert(10, simulator_rpc_client.set_auto_farming, True, True)
for i in range(num_blocks):
await simulator.autofarm_transaction(dummy_hash)
await time_out_assert(10, simulator.full_node.blockchain.get_peak_height, 2)
# check if reward was sent to correct target
await time_out_assert(10, get_num_coins_for_ph, 2, simulator_rpc_client, ph_1)
# test both block RPC's
await simulator_rpc_client.farm_block(ph_2)
new_height = await simulator_rpc_client.farm_block(ph_2, guarantee_tx_block=True)
# check if farming reward was received correctly & if block was created
await time_out_assert(10, simulator.full_node.blockchain.get_peak_height, new_height)
await time_out_assert(10, get_num_coins_for_ph, 2, simulator_rpc_client, ph_2)
# test balance rpc
ph_amount = await simulator_rpc_client.get_all_puzzle_hashes()
assert ph_amount[ph_2][0] == 2000000000000
assert ph_amount[ph_2][1] == 2
# test all coins rpc.
coin_records = await simulator_rpc_client.get_all_coins()
ph_2_total = 0
ph_1_total = 0
for cr in coin_records:
if cr.coin.puzzle_hash == ph_2:
ph_2_total += cr.coin.amount
elif cr.coin.puzzle_hash == ph_1:
ph_1_total += cr.coin.amount
assert ph_2_total == 2000000000000 and ph_1_total == 4000000000000
# block rpc tests.
# test reorg
old_blocks = await simulator_rpc_client.get_all_blocks() # len should be 4
await simulator_rpc_client.reorg_blocks(2) # fork point 2 blocks, now height is 5
await time_out_assert(10, simulator.full_node.blockchain.get_peak_height, 5)
# now validate that the blocks don't match
assert (await simulator.get_all_full_blocks())[0:4] != old_blocks
# test block deletion
await simulator_rpc_client.revert_blocks(3) # height 5 to 2
await time_out_assert(10, simulator.full_node.blockchain.get_peak_height, 2)
await time_out_assert(10, get_num_coins_for_ph, 2, simulator_rpc_client, ph_1)
# close up
simulator_rpc_client.close()
await simulator_rpc_client.await_closed()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/simulation/config.py | tests/simulation/config.py | from __future__ import annotations
job_timeout = 60
install_timelord = True
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/simulation/__init__.py | tests/simulation/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/db/__init__.py | tests/db/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/db/test_db_wrapper.py | tests/db/test_db_wrapper.py | from __future__ import annotations
import asyncio
import contextlib
from typing import TYPE_CHECKING, Callable, List
import aiosqlite
import pytest
# TODO: update after resolution in https://github.com/pytest-dev/pytest/issues/7469
from _pytest.fixtures import SubRequest
from flax.util.db_wrapper import DBWrapper2
from tests.util.db_connection import DBConnection
if TYPE_CHECKING:
ConnectionContextManager = contextlib.AbstractAsyncContextManager[aiosqlite.core.Connection]
GetReaderMethod = Callable[[DBWrapper2], Callable[[], ConnectionContextManager]]
class UniqueError(Exception):
"""Used to uniquely trigger the exception path out of the context managers."""
pass
async def increment_counter(db_wrapper: DBWrapper2) -> None:
async with db_wrapper.writer_maybe_transaction() as connection:
async with connection.execute("SELECT value FROM counter") as cursor:
row = await cursor.fetchone()
assert row is not None
[old_value] = row
await asyncio.sleep(0)
new_value = old_value + 1
await connection.execute("UPDATE counter SET value = :value", {"value": new_value})
async def decrement_counter(db_wrapper: DBWrapper2) -> None:
async with db_wrapper.writer_maybe_transaction() as connection:
async with connection.execute("SELECT value FROM counter") as cursor:
row = await cursor.fetchone()
assert row is not None
[old_value] = row
await asyncio.sleep(0)
new_value = old_value - 1
await connection.execute("UPDATE counter SET value = :value", {"value": new_value})
async def sum_counter(db_wrapper: DBWrapper2, output: List[int]) -> None:
async with db_wrapper.reader_no_transaction() as connection:
async with connection.execute("SELECT value FROM counter") as cursor:
row = await cursor.fetchone()
assert row is not None
[value] = row
output.append(value)
async def setup_table(db: DBWrapper2) -> None:
async with db.writer_maybe_transaction() as conn:
await conn.execute("CREATE TABLE counter(value INTEGER NOT NULL)")
await conn.execute("INSERT INTO counter(value) VALUES(0)")
async def get_value(cursor: aiosqlite.Cursor) -> int:
row = await cursor.fetchone()
assert row
return int(row[0])
async def query_value(connection: aiosqlite.Connection) -> int:
async with connection.execute("SELECT value FROM counter") as cursor:
return await get_value(cursor=cursor)
def _get_reader_no_transaction_method(db_wrapper: DBWrapper2) -> Callable[[], ConnectionContextManager]:
return db_wrapper.reader_no_transaction
def _get_regular_reader_method(db_wrapper: DBWrapper2) -> Callable[[], ConnectionContextManager]:
return db_wrapper.reader
@pytest.fixture(
name="get_reader_method",
params=[
pytest.param(_get_reader_no_transaction_method, id="reader_no_transaction"),
pytest.param(_get_regular_reader_method, id="reader"),
],
)
def get_reader_method_fixture(request: SubRequest) -> Callable[[], ConnectionContextManager]:
# https://github.com/pytest-dev/pytest/issues/8763
return request.param # type: ignore[no-any-return]
@pytest.mark.asyncio
@pytest.mark.parametrize(
argnames="acquire_outside",
argvalues=[pytest.param(False, id="not acquired outside"), pytest.param(True, id="acquired outside")],
)
async def test_concurrent_writers(acquire_outside: bool, get_reader_method: GetReaderMethod) -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
concurrent_task_count = 200
async with contextlib.AsyncExitStack() as exit_stack:
if acquire_outside:
await exit_stack.enter_async_context(db_wrapper.writer_maybe_transaction())
tasks = []
for index in range(concurrent_task_count):
task = asyncio.create_task(increment_counter(db_wrapper))
tasks.append(task)
await asyncio.wait_for(asyncio.gather(*tasks), timeout=None)
async with get_reader_method(db_wrapper)() as connection:
async with connection.execute("SELECT value FROM counter") as cursor:
row = await cursor.fetchone()
assert row is not None
[value] = row
assert value == concurrent_task_count
@pytest.mark.asyncio
async def test_writers_nests() -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with db_wrapper.writer_maybe_transaction() as conn1:
async with conn1.execute("SELECT value FROM counter") as cursor:
value = await get_value(cursor)
async with db_wrapper.writer_maybe_transaction() as conn2:
assert conn1 == conn2
value += 1
await conn2.execute("UPDATE counter SET value = :value", {"value": value})
async with db_wrapper.writer_maybe_transaction() as conn3:
assert conn1 == conn3
async with conn3.execute("SELECT value FROM counter") as cursor:
value = await get_value(cursor)
assert value == 1
@pytest.mark.asyncio
async def test_writer_journal_mode_wal() -> None:
async with DBConnection(2) as db_wrapper:
async with db_wrapper.writer() as connection:
async with connection.execute("PRAGMA journal_mode") as cursor:
result = await cursor.fetchone()
assert result == ("wal",)
@pytest.mark.asyncio
async def test_reader_journal_mode_wal() -> None:
async with DBConnection(2) as db_wrapper:
async with db_wrapper.reader_no_transaction() as connection:
async with connection.execute("PRAGMA journal_mode") as cursor:
result = await cursor.fetchone()
assert result == ("wal",)
@pytest.mark.asyncio
async def test_partial_failure() -> None:
values = []
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with db_wrapper.writer() as conn1:
await conn1.execute("UPDATE counter SET value = 42")
async with conn1.execute("SELECT value FROM counter") as cursor:
values.append(await get_value(cursor))
try:
async with db_wrapper.writer() as conn2:
await conn2.execute("UPDATE counter SET value = 1337")
async with conn1.execute("SELECT value FROM counter") as cursor:
values.append(await get_value(cursor))
# this simulates a failure, which will cause a rollback of the
# write we just made, back to 42
raise RuntimeError("failure within a sub-transaction")
except RuntimeError:
# we expect to get here
values.append(1)
async with conn1.execute("SELECT value FROM counter") as cursor:
values.append(await get_value(cursor))
# the write of 1337 failed, and was restored to 42
assert values == [42, 1337, 1, 42]
@pytest.mark.asyncio
async def test_readers_nests(get_reader_method: GetReaderMethod) -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with get_reader_method(db_wrapper)() as conn1:
async with get_reader_method(db_wrapper)() as conn2:
assert conn1 == conn2
async with get_reader_method(db_wrapper)() as conn3:
assert conn1 == conn3
async with conn3.execute("SELECT value FROM counter") as cursor:
value = await get_value(cursor)
assert value == 0
@pytest.mark.asyncio
async def test_readers_nests_writer(get_reader_method: GetReaderMethod) -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with db_wrapper.writer_maybe_transaction() as conn1:
async with get_reader_method(db_wrapper)() as conn2:
assert conn1 == conn2
async with db_wrapper.writer_maybe_transaction() as conn3:
assert conn1 == conn3
async with conn3.execute("SELECT value FROM counter") as cursor:
value = await get_value(cursor)
assert value == 0
@pytest.mark.parametrize(
argnames="transactioned",
argvalues=[
pytest.param(True, id="transaction"),
pytest.param(False, id="no transaction"),
],
)
@pytest.mark.asyncio
async def test_only_transactioned_reader_ignores_writer(transactioned: bool) -> None:
writer_committed = asyncio.Event()
reader_read = asyncio.Event()
async def write() -> None:
try:
async with db_wrapper.writer() as writer:
assert reader is not writer
await writer.execute("UPDATE counter SET value = 1")
finally:
writer_committed.set()
await reader_read.wait()
assert await query_value(connection=writer) == 1
async with DBConnection(2) as db_wrapper:
get_reader = db_wrapper.reader if transactioned else db_wrapper.reader_no_transaction
await setup_table(db_wrapper)
async with get_reader() as reader:
assert await query_value(connection=reader) == 0
task = asyncio.create_task(write())
await writer_committed.wait()
assert await query_value(connection=reader) == 0 if transactioned else 1
reader_read.set()
await task
async with get_reader() as reader:
assert await query_value(connection=reader) == 1
@pytest.mark.asyncio
async def test_reader_nests_and_ends_transaction() -> None:
async with DBConnection(2) as db_wrapper:
async with db_wrapper.reader() as reader:
assert reader.in_transaction
async with db_wrapper.reader() as inner_reader:
assert inner_reader is reader
assert reader.in_transaction
assert reader.in_transaction
assert not reader.in_transaction
@pytest.mark.asyncio
async def test_writer_in_reader_works() -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with db_wrapper.reader() as reader:
async with db_wrapper.writer() as writer:
assert writer is not reader
await writer.execute("UPDATE counter SET value = 1")
assert await query_value(connection=writer) == 1
assert await query_value(connection=reader) == 0
assert await query_value(connection=reader) == 0
@pytest.mark.asyncio
async def test_reader_transaction_is_deferred() -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with db_wrapper.reader() as reader:
async with db_wrapper.writer() as writer:
assert writer is not reader
await writer.execute("UPDATE counter SET value = 1")
assert await query_value(connection=writer) == 1
# The deferred transaction initiation results in the transaction starting
# here and thus reading the written value.
assert await query_value(connection=reader) == 1
@pytest.mark.asyncio
@pytest.mark.parametrize(
argnames="acquire_outside",
argvalues=[pytest.param(False, id="not acquired outside"), pytest.param(True, id="acquired outside")],
)
async def test_concurrent_readers(acquire_outside: bool, get_reader_method: GetReaderMethod) -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with db_wrapper.writer_maybe_transaction() as connection:
await connection.execute("UPDATE counter SET value = 1")
concurrent_task_count = 200
async with contextlib.AsyncExitStack() as exit_stack:
if acquire_outside:
await exit_stack.enter_async_context(get_reader_method(db_wrapper)())
tasks = []
values: List[int] = []
for index in range(concurrent_task_count):
task = asyncio.create_task(sum_counter(db_wrapper, values))
tasks.append(task)
await asyncio.wait_for(asyncio.gather(*tasks), timeout=None)
assert values == [1] * concurrent_task_count
@pytest.mark.asyncio
@pytest.mark.parametrize(
argnames="acquire_outside",
argvalues=[pytest.param(False, id="not acquired outside"), pytest.param(True, id="acquired outside")],
)
async def test_mixed_readers_writers(acquire_outside: bool, get_reader_method: GetReaderMethod) -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with db_wrapper.writer_maybe_transaction() as connection:
await connection.execute("UPDATE counter SET value = 1")
concurrent_task_count = 200
async with contextlib.AsyncExitStack() as exit_stack:
if acquire_outside:
await exit_stack.enter_async_context(get_reader_method(db_wrapper)())
tasks = []
values: List[int] = []
for index in range(concurrent_task_count):
task = asyncio.create_task(increment_counter(db_wrapper))
tasks.append(task)
task = asyncio.create_task(decrement_counter(db_wrapper))
tasks.append(task)
task = asyncio.create_task(sum_counter(db_wrapper, values))
tasks.append(task)
await asyncio.wait_for(asyncio.gather(*tasks), timeout=None)
# we increment and decrement the counter an equal number of times. It should
# end back at 1.
async with get_reader_method(db_wrapper)() as connection:
async with connection.execute("SELECT value FROM counter") as cursor:
row = await cursor.fetchone()
assert row is not None
assert row[0] == 1
# it's possible all increments or all decrements are run first
assert len(values) == concurrent_task_count
for v in values:
assert v > -99
assert v <= 100
@pytest.mark.parametrize(
argnames=["manager_method", "expected"],
argvalues=[
[DBWrapper2.writer, True],
[DBWrapper2.writer_maybe_transaction, True],
[DBWrapper2.reader, True],
[DBWrapper2.reader_no_transaction, False],
],
)
@pytest.mark.asyncio
async def test_in_transaction_as_expected(
manager_method: Callable[[DBWrapper2], ConnectionContextManager],
expected: bool,
) -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with manager_method(db_wrapper) as connection:
assert connection.in_transaction == expected
@pytest.mark.asyncio
async def test_cancelled_reader_does_not_cancel_writer() -> None:
async with DBConnection(2) as db_wrapper:
await setup_table(db_wrapper)
async with db_wrapper.writer() as writer:
await writer.execute("UPDATE counter SET value = 1")
with pytest.raises(UniqueError):
async with db_wrapper.reader() as _:
raise UniqueError()
assert await query_value(connection=writer) == 1
assert await query_value(connection=writer) == 1
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_program.py | tests/clvm/test_program.py | from unittest import TestCase
from flax.types.blockchain_format.program import Program
from clvm.EvalError import EvalError
from clvm.operators import KEYWORD_TO_ATOM
from clvm_tools.binutils import assemble, disassemble
class TestProgram(TestCase):
def test_at(self):
p = Program.to([10, 20, 30, [15, 17], 40, 50])
self.assertEqual(p.first(), p.at("f"))
self.assertEqual(Program.to(10), p.at("f"))
self.assertEqual(p.rest(), p.at("r"))
self.assertEqual(Program.to([20, 30, [15, 17], 40, 50]), p.at("r"))
self.assertEqual(p.rest().rest().rest().first().rest().first(), p.at("rrrfrf"))
self.assertEqual(Program.to(17), p.at("rrrfrf"))
self.assertRaises(ValueError, lambda: p.at("q"))
self.assertRaises(EvalError, lambda: p.at("ff"))
def test_replace(self):
p1 = Program.to([100, 200, 300])
self.assertEqual(p1.replace(f=105), Program.to([105, 200, 300]))
self.assertEqual(p1.replace(rrf=[301, 302]), Program.to([100, 200, [301, 302]]))
self.assertEqual(p1.replace(f=105, rrf=[301, 302]), Program.to([105, 200, [301, 302]]))
self.assertEqual(p1.replace(f=100, r=200), Program.to((100, 200)))
def test_replace_conflicts(self):
p1 = Program.to([100, 200, 300])
self.assertRaises(ValueError, lambda: p1.replace(rr=105, rrf=200))
def test_replace_conflicting_paths(self):
p1 = Program.to([100, 200, 300])
self.assertRaises(ValueError, lambda: p1.replace(ff=105))
def test_replace_bad_path(self):
p1 = Program.to([100, 200, 300])
self.assertRaises(ValueError, lambda: p1.replace(q=105))
self.assertRaises(ValueError, lambda: p1.replace(rq=105))
def check_idempotency(f, *args):
prg = Program.to(f)
curried = prg.curry(*args)
r = disassemble(curried)
f_0, args_0 = curried.uncurry()
assert disassemble(f_0) == disassemble(f)
assert disassemble(args_0) == disassemble(Program.to(list(args)))
return r
def test_curry_uncurry():
PLUS = KEYWORD_TO_ATOM["+"][0]
f = assemble("(+ 2 5)")
actual_disassembly = check_idempotency(f, 200, 30)
assert actual_disassembly == f"(a (q {PLUS} 2 5) (c (q . 200) (c (q . 30) 1)))"
f = assemble("(+ 2 5)")
args = assemble("(+ (q . 50) (q . 60))")
# passing "args" here wraps the arguments in a list
actual_disassembly = check_idempotency(f, args)
assert actual_disassembly == f"(a (q {PLUS} 2 5) (c (q {PLUS} (q . 50) (q . 60)) 1))"
def test_uncurry_not_curried():
# this function has not been curried
plus = Program.to(assemble("(+ 2 5)"))
assert plus.uncurry() == (plus, Program.to(0))
def test_uncurry():
# this is a positive test
plus = Program.to(assemble("(2 (q . (+ 2 5)) (c (q . 1) 1))"))
assert plus.uncurry() == (Program.to(assemble("(+ 2 5)")), Program.to([1]))
def test_uncurry_top_level_garbage():
# there's garbage at the end of the top-level list
plus = Program.to(assemble("(2 (q . 1) (c (q . 1) (q . 1)) (q . 0x1337))"))
assert plus.uncurry() == (plus, Program.to(0))
def test_uncurry_not_pair():
# the second item in the list is expected to be a pair, with a qoute
plus = Program.to(assemble("(2 1 (c (q . 1) (q . 1)))"))
assert plus.uncurry() == (plus, Program.to(0))
def test_uncurry_args_garbage():
# there's garbage at the end of the args list
plus = Program.to(assemble("(2 (q . 1) (c (q . 1) (q . 1) (q . 0x1337)))"))
assert plus.uncurry() == (plus, Program.to(0))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_serialized_program.py | tests/clvm/test_serialized_program.py | from unittest import TestCase
from flax.types.blockchain_format.program import Program, SerializedProgram, INFINITE_COST
from flax.wallet.puzzles.load_clvm import load_clvm
SHA256TREE_MOD = load_clvm("sha256tree_module.clvm")
# TODO: test multiple args
class TestSerializedProgram(TestCase):
def test_tree_hash(self):
p = SHA256TREE_MOD
s = SerializedProgram.from_bytes(bytes(SHA256TREE_MOD))
self.assertEqual(s.get_tree_hash(), p.get_tree_hash())
def test_program_execution(self):
p_result = SHA256TREE_MOD.run(SHA256TREE_MOD)
sp = SerializedProgram.from_bytes(bytes(SHA256TREE_MOD))
cost, sp_result = sp.run_with_cost(INFINITE_COST, sp)
self.assertEqual(p_result, sp_result)
def test_serialization(self):
s0 = SerializedProgram.from_bytes(b"\x00")
p0 = Program.from_bytes(b"\x00")
print(s0, p0)
# TODO: enable when clvm updated for minimal encoding of zero
# self.assertEqual(bytes(p0), bytes(s0))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/benchmark_costs.py | tests/clvm/benchmark_costs.py | from flax.types.blockchain_format.program import INFINITE_COST
from flax.types.spend_bundle import SpendBundle
from flax.types.generator_types import BlockGenerator
from flax.consensus.cost_calculator import NPCResult
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.full_node.bundle_tools import simple_solution_generator
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions
def cost_of_spend_bundle(spend_bundle: SpendBundle) -> int:
program: BlockGenerator = simple_solution_generator(spend_bundle)
npc_result: NPCResult = get_name_puzzle_conditions(
program, INFINITE_COST, cost_per_byte=DEFAULT_CONSTANTS.COST_PER_BYTE, mempool_mode=True
)
return npc_result.cost
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_puzzle_drivers.py | tests/clvm/test_puzzle_drivers.py | from __future__ import annotations
from typing import Any, Dict, Union
import pytest
from flax.types.blockchain_format.program import Program
from flax.wallet.puzzle_drivers import PuzzleInfo, Solver
def test_puzzle_info() -> None:
test_driver: Dict[str, Any] = {
"string": "hello",
"bytes": "0xcafef00d",
"int": "123",
"program": "(q . 'hello')",
"zero": "0",
"nil": "()",
}
test_also: Dict[str, Any] = {"type": "TEST", "string": "hello"}
test_driver["also"] = test_also
with pytest.raises(ValueError, match="A type is required"):
PuzzleInfo(test_driver)
solver = Solver(test_driver)
test_driver["type"] = "TEST"
puzzle_info = PuzzleInfo(test_driver)
assert puzzle_info.type() == "TEST"
assert puzzle_info.also() == PuzzleInfo(test_also)
capitalize_bytes = test_driver.copy()
capitalize_bytes["bytes"] = "0xCAFEF00D"
assert solver == Solver(capitalize_bytes)
assert puzzle_info == PuzzleInfo(capitalize_bytes)
obj: Union[PuzzleInfo, Solver]
for obj in (puzzle_info, solver): # type: ignore
assert obj["string"] == "hello"
assert obj["bytes"] == bytes.fromhex("cafef00d")
assert obj["int"] == 123
assert obj["program"] == Program.to((1, "hello"))
assert obj["zero"] == 0
assert obj["nil"] == Program.to([])
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_puzzles.py | tests/clvm/test_puzzles.py | from __future__ import annotations
from typing import Iterable, List, Tuple
from unittest import TestCase
from blspy import AugSchemeMPL, BasicSchemeMPL, G1Element, G2Element
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.spend_bundle import SpendBundle
from flax.util.hash import std_hash
from flax.wallet.puzzles import (
p2_conditions,
p2_delegated_conditions,
p2_delegated_puzzle,
p2_delegated_puzzle_or_hidden_puzzle,
p2_m_of_n_delegate_direct,
p2_puzzle_hash,
)
from tests.util.key_tool import KeyTool
from ..core.make_block_generator import int_to_public_key
from .coin_store import CoinStore, CoinTimestamp
T1 = CoinTimestamp(1, 10000000)
T2 = CoinTimestamp(5, 10003000)
MAX_BLOCK_COST_CLVM = int(1e18)
COST_PER_BYTE = int(12000)
def secret_exponent_for_index(index: int) -> int:
blob = index.to_bytes(32, "big")
hashed_blob = BasicSchemeMPL.key_gen(std_hash(b"foo" + blob))
r = int.from_bytes(hashed_blob, "big")
return r
def public_key_for_index(index: int, key_lookup: KeyTool) -> bytes:
secret_exponent = secret_exponent_for_index(index)
key_lookup.add_secret_exponents([secret_exponent])
return bytes(int_to_public_key(secret_exponent))
def throwaway_puzzle_hash(index: int, key_lookup: KeyTool) -> bytes32:
return p2_delegated_puzzle.puzzle_for_pk(public_key_for_index(index, key_lookup)).get_tree_hash()
def do_test_spend(
puzzle_reveal: Program,
solution: Program,
payments: Iterable[Tuple[bytes32, int]],
key_lookup: KeyTool,
farm_time: CoinTimestamp = T1,
spend_time: CoinTimestamp = T2,
) -> SpendBundle:
"""
This method will farm a coin paid to the hash of `puzzle_reveal`, then try to spend it
with `solution`, and verify that the created coins correspond to `payments`.
The `key_lookup` is used to create a signed version of the `SpendBundle`, although at
this time, signatures are not verified.
"""
coin_db = CoinStore()
puzzle_hash = puzzle_reveal.get_tree_hash()
# farm it
coin = coin_db.farm_coin(puzzle_hash, farm_time)
# spend it
coin_spend = CoinSpend(coin, puzzle_reveal, solution)
spend_bundle = SpendBundle([coin_spend], G2Element())
coin_db.update_coin_store_for_spend_bundle(spend_bundle, spend_time, MAX_BLOCK_COST_CLVM, COST_PER_BYTE)
# ensure all outputs are there
for puzzle_hash, amount in payments:
for coin in coin_db.coins_for_puzzle_hash(puzzle_hash):
if coin.amount == amount:
break
else:
assert 0
# make sure we can actually sign the solution
signatures = []
for coin_spend in spend_bundle.coin_spends:
signature = key_lookup.signature_for_solution(coin_spend, bytes([2] * 32))
signatures.append(signature)
return SpendBundle(spend_bundle.coin_spends, AugSchemeMPL.aggregate(signatures))
def default_payments_and_conditions(
initial_index: int, key_lookup: KeyTool
) -> Tuple[List[Tuple[bytes32, int]], Program]:
payments = [
(throwaway_puzzle_hash(initial_index + 1, key_lookup), initial_index * 1000),
(throwaway_puzzle_hash(initial_index + 2, key_lookup), (initial_index + 1) * 1000),
]
conditions = Program.to([make_create_coin_condition(ph, amount) for ph, amount in payments])
return payments, conditions
def make_create_coin_condition(puzzle_hash, amount):
return Program.to([ConditionOpcode.CREATE_COIN, puzzle_hash, amount])
class TestPuzzles(TestCase):
def test_p2_conditions(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
puzzle = p2_conditions.puzzle_for_conditions(conditions)
solution = p2_conditions.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_conditions(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pk = public_key_for_index(1, key_lookup)
puzzle = p2_delegated_conditions.puzzle_for_pk(pk)
solution = p2_delegated_conditions.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_puzzle_simple(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pk = public_key_for_index(1, key_lookup)
puzzle = p2_delegated_puzzle.puzzle_for_pk(pk)
solution = p2_delegated_puzzle.solution_for_conditions(conditions)
do_test_spend(puzzle, solution, payments, key_lookup)
def test_p2_delegated_puzzle_graftroot(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
delegated_puzzle = p2_delegated_conditions.puzzle_for_pk(public_key_for_index(8, key_lookup))
delegated_solution = p2_delegated_conditions.solution_for_conditions(conditions)
puzzle_program = p2_delegated_puzzle.puzzle_for_pk(public_key_for_index(1, key_lookup))
solution = p2_delegated_puzzle.solution_for_delegated_puzzle(delegated_puzzle, delegated_solution)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_puzzle_hash(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
inner_puzzle = p2_delegated_conditions.puzzle_for_pk(public_key_for_index(4, key_lookup))
inner_solution = p2_delegated_conditions.solution_for_conditions(conditions)
inner_puzzle_hash = inner_puzzle.get_tree_hash()
puzzle_program = p2_puzzle_hash.puzzle_for_inner_puzzle_hash(inner_puzzle_hash)
assert puzzle_program == p2_puzzle_hash.puzzle_for_inner_puzzle(inner_puzzle)
solution = p2_puzzle_hash.solution_for_inner_puzzle_and_inner_solution(inner_puzzle, inner_solution)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_m_of_n_delegated_puzzle(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
pks = [public_key_for_index(_, key_lookup) for _ in range(1, 6)]
M = 3
delegated_puzzle = p2_conditions.puzzle_for_conditions(conditions)
delegated_solution = []
puzzle_program = p2_m_of_n_delegate_direct.puzzle_for_m_of_public_key_list(M, pks)
selectors = [1, [], [], 1, 1]
solution = p2_m_of_n_delegate_direct.solution_for_delegated_puzzle(
M, selectors, delegated_puzzle, delegated_solution
)
do_test_spend(puzzle_program, solution, payments, key_lookup)
def test_p2_delegated_puzzle_or_hidden_puzzle_with_hidden_puzzle(self):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
hidden_puzzle = p2_conditions.puzzle_for_conditions(conditions)
hidden_public_key = public_key_for_index(10, key_lookup)
puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_public_key_and_hidden_puzzle(
G1Element.from_bytes_unchecked(hidden_public_key), hidden_puzzle
)
solution = p2_delegated_puzzle_or_hidden_puzzle.solution_for_hidden_puzzle(
G1Element.from_bytes_unchecked(hidden_public_key), hidden_puzzle, Program.to(0)
)
do_test_spend(puzzle, solution, payments, key_lookup)
def do_test_spend_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(self, hidden_pub_key_index):
key_lookup = KeyTool()
payments, conditions = default_payments_and_conditions(1, key_lookup)
hidden_puzzle = p2_conditions.puzzle_for_conditions(conditions)
hidden_public_key = public_key_for_index(hidden_pub_key_index, key_lookup)
hidden_pub_key_point = G1Element.from_bytes(hidden_public_key)
puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_public_key_and_hidden_puzzle(
hidden_pub_key_point, hidden_puzzle
)
payable_payments, payable_conditions = default_payments_and_conditions(5, key_lookup)
delegated_puzzle = p2_conditions.puzzle_for_conditions(payable_conditions)
delegated_solution = []
synthetic_public_key = p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_public_key(
G1Element.from_bytes(hidden_public_key), hidden_puzzle.get_tree_hash()
)
solution = p2_delegated_puzzle_or_hidden_puzzle.solution_for_delegated_puzzle(
delegated_puzzle, delegated_solution
)
hidden_puzzle_hash = hidden_puzzle.get_tree_hash()
synthetic_offset = p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_offset(
hidden_pub_key_point, hidden_puzzle_hash
)
assert synthetic_public_key == int_to_public_key(synthetic_offset) + hidden_pub_key_point
secret_exponent = key_lookup.get(hidden_public_key)
assert int_to_public_key(secret_exponent) == hidden_pub_key_point
synthetic_secret_exponent = secret_exponent + synthetic_offset
key_lookup.add_secret_exponents([synthetic_secret_exponent])
do_test_spend(puzzle, solution, payable_payments, key_lookup)
def test_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(self):
for hidden_pub_key_index in range(1, 10):
self.do_test_spend_p2_delegated_puzzle_or_hidden_puzzle_with_delegated_puzzle(hidden_pub_key_index)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/coin_store.py | tests/clvm/coin_store.py | from collections import defaultdict
from dataclasses import dataclass, replace
from typing import Dict, Iterator, Optional
from flax.full_node.mempool_check_conditions import mempool_check_time_locks, get_name_puzzle_conditions
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_record import CoinRecord
from flax.types.spend_bundle import SpendBundle
from flax.util.ints import uint32, uint64
from flax.full_node.bundle_tools import simple_solution_generator
from flax.util.errors import Err
from flax.consensus.cost_calculator import NPCResult
MAX_COST = 11000000000
class BadSpendBundleError(Exception):
pass
@dataclass
class CoinTimestamp:
seconds: int
height: int
class CoinStore:
def __init__(self, reward_mask: int = 0):
self._db: Dict[bytes32, CoinRecord] = dict()
self._ph_index: Dict = defaultdict(list)
self._reward_mask = reward_mask
def farm_coin(
self,
puzzle_hash: bytes32,
birthday: CoinTimestamp,
amount: int = 1024,
prefix=bytes32.fromhex("ccd5bb71183532bff220ba46c268991a00000000000000000000000000000000"), # noqa
) -> Coin:
parent = bytes32(
[
a | b
for a, b in zip(
prefix,
birthday.height.to_bytes(32, "big"),
)
],
)
# parent = birthday.height.to_bytes(32, "big")
coin = Coin(parent, puzzle_hash, uint64(amount))
self._add_coin_entry(coin, birthday)
return coin
def validate_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
max_cost: int,
cost_per_byte: int,
) -> int:
# this should use blockchain consensus code
program = simple_solution_generator(spend_bundle)
result: NPCResult = get_name_puzzle_conditions(
program, max_cost, cost_per_byte=cost_per_byte, mempool_mode=True
)
if result.error is not None:
raise BadSpendBundleError(f"condition validation failure {Err(result.error)}")
ephemeral_db = dict(self._db)
assert result.conds is not None
for spend in result.conds.spends:
for puzzle_hash, amount, hint in spend.create_coin:
coin = Coin(bytes32(spend.coin_id), bytes32(puzzle_hash), uint64(amount))
name = coin.name()
ephemeral_db[name] = CoinRecord(
coin,
uint32(now.height),
uint32(0),
False,
uint64(now.seconds),
)
err = mempool_check_time_locks(
ephemeral_db,
result.conds,
uint32(now.height),
uint64(now.seconds),
)
if err is not None:
raise BadSpendBundleError(f"condition validation failure {Err(err)}")
return 0
def update_coin_store_for_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
max_cost: int,
cost_per_byte: int,
):
err = self.validate_spend_bundle(spend_bundle, now, max_cost, cost_per_byte)
if err != 0:
raise BadSpendBundleError(f"validation failure {err}")
additions = spend_bundle.additions()
removals = spend_bundle.removals()
for new_coin in additions:
self._add_coin_entry(new_coin, now)
for spent_coin in removals:
coin_name = spent_coin.name()
coin_record = self._db[coin_name]
self._db[coin_name] = replace(coin_record, spent_block_index=now.height)
return additions, spend_bundle.coin_spends
def coins_for_puzzle_hash(self, puzzle_hash: bytes32) -> Iterator[Coin]:
for coin_name in self._ph_index[puzzle_hash]:
coin_entry = self._db[coin_name]
assert coin_entry.coin.puzzle_hash == puzzle_hash
yield coin_entry.coin
def all_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
yield coin_entry.coin
def all_unspent_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
if not coin_entry.spent:
yield coin_entry.coin
def _add_coin_entry(self, coin: Coin, birthday: CoinTimestamp) -> None:
name = coin.name()
# assert name not in self._db
self._db[name] = CoinRecord(
coin,
uint32(birthday.height),
uint32(0),
False,
uint64(birthday.seconds),
)
self._ph_index[coin.puzzle_hash].append(name)
def coin_record(self, coin_id: bytes32) -> Optional[CoinRecord]:
return self._db.get(coin_id)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_curry_and_treehash.py | tests/clvm/test_curry_and_treehash.py | from __future__ import annotations
from flax.types.blockchain_format.program import Program
from flax.wallet.puzzles import p2_delegated_puzzle_or_hidden_puzzle # import (puzzle_for_pk, puzzle_hash_for_pk, MOD)
from flax.wallet.util.curry_and_treehash import calculate_hash_of_quoted_mod_hash, curry_and_treehash
def test_curry_and_treehash() -> None:
arbitrary_mod = p2_delegated_puzzle_or_hidden_puzzle.MOD
arbitrary_mod_hash = arbitrary_mod.get_tree_hash()
# we don't really care what `arbitrary_mod` is. We just need some code
quoted_mod_hash = calculate_hash_of_quoted_mod_hash(arbitrary_mod_hash)
for v in range(500):
args = [v, v * v, v * v * v]
# we don't really care about the arguments either
puzzle = arbitrary_mod.curry(*args)
puzzle_hash_via_curry = puzzle.get_tree_hash()
hashed_args = [Program.to(_).get_tree_hash() for _ in args]
puzzle_hash_via_f = curry_and_treehash(quoted_mod_hash, *hashed_args)
assert puzzle_hash_via_curry == puzzle_hash_via_f
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_singletons.py | tests/clvm/test_singletons.py | import pytest
from typing import List, Tuple, Optional
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.coin import Coin
from flax.types.coin_spend import CoinSpend
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.spend_bundle import SpendBundle
from flax.util.errors import Err
from flax.util.ints import uint64
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.wallet.lineage_proof import LineageProof
from flax.wallet.puzzles import p2_conditions, p2_delegated_puzzle_or_hidden_puzzle
from tests.util.key_tool import KeyTool
from tests.clvm.test_puzzles import (
public_key_for_index,
secret_exponent_for_index,
)
from flax.clvm.spend_sim import SpendSim, SimClient
"""
This test suite aims to test:
- flax.wallet.puzzles.singleton_top_layer.py
- flax.wallet.puzzles.singleton_top_layer.clvm
- flax.wallet.puzzles.singleton_top_layer_v1_1.py
- flax.wallet.puzzles.singleton_top_layer_v1_1.clvm
- flax.wallet.puzzles.p2_singleton.clvm
- flax.wallet.puzzles.p2_singleton_or_delayed_puzhash.clvm
"""
class TransactionPushError(Exception):
pass
class TestSingleton:
# Helper function
def sign_delegated_puz(self, del_puz: Program, coin: Coin) -> G2Element:
synthetic_secret_key: PrivateKey = p2_delegated_puzzle_or_hidden_puzzle.calculate_synthetic_secret_key( # noqa
PrivateKey.from_bytes(
secret_exponent_for_index(1).to_bytes(32, "big"),
),
p2_delegated_puzzle_or_hidden_puzzle.DEFAULT_HIDDEN_PUZZLE_HASH,
)
return AugSchemeMPL.sign(
synthetic_secret_key,
(del_puz.get_tree_hash() + coin.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA), # noqa
)
# Helper function
async def make_and_spend_bundle(
self,
sim: SpendSim,
sim_client: SimClient,
coin: Coin,
delegated_puzzle: Program,
coinsols: List[CoinSpend],
ex_error: Optional[Err] = None,
fail_msg: str = "",
):
signature: G2Element = self.sign_delegated_puz(delegated_puzzle, coin)
spend_bundle = SpendBundle(
coinsols,
signature,
)
try:
result, error = await sim_client.push_tx(spend_bundle)
if error is None:
await sim.farm_block()
elif ex_error is not None:
assert error == ex_error
else:
raise TransactionPushError(error)
except AssertionError:
raise AssertionError(fail_msg)
@pytest.mark.asyncio
@pytest.mark.parametrize("version", [0, 1])
async def test_singleton_top_layer(self, version):
try:
# START TESTS
# Generate starting info
key_lookup = KeyTool()
pk: G1Element = G1Element.from_bytes(public_key_for_index(1, key_lookup))
starting_puzzle: Program = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_pk(pk) # noqa
if version == 0:
from flax.wallet.puzzles import singleton_top_layer
adapted_puzzle: Program = singleton_top_layer.adapt_inner_to_singleton(starting_puzzle) # noqa
else:
from flax.wallet.puzzles import singleton_top_layer_v1_1 as singleton_top_layer
adapted_puzzle = starting_puzzle
adapted_puzzle_hash: bytes32 = adapted_puzzle.get_tree_hash()
# Get our starting standard coin created
START_AMOUNT: uint64 = 1023
sim = await SpendSim.create()
sim_client = SimClient(sim)
await sim.farm_block(starting_puzzle.get_tree_hash())
starting_coin: Coin = await sim_client.get_coin_records_by_puzzle_hash(starting_puzzle.get_tree_hash())
starting_coin = starting_coin[0].coin
comment: List[Tuple[str, str]] = [("hello", "world")]
# LAUNCHING
# Try to create an even singleton (driver test)
try:
conditions, launcher_coinsol = singleton_top_layer.launch_conditions_and_coinsol( # noqa
starting_coin, adapted_puzzle, comment, (START_AMOUNT - 1)
)
raise AssertionError("This should fail due to an even amount")
except ValueError as msg:
assert str(msg) == "Coin amount cannot be even. Subtract one mojo."
conditions, launcher_coinsol = singleton_top_layer.launch_conditions_and_coinsol( # noqa
starting_coin, adapted_puzzle, comment, START_AMOUNT
)
# Creating solution for standard transaction
delegated_puzzle: Program = p2_conditions.puzzle_for_conditions(conditions) # noqa
full_solution: Program = p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions(conditions) # noqa
starting_coinsol = CoinSpend(
starting_coin,
starting_puzzle,
full_solution,
)
await self.make_and_spend_bundle(
sim,
sim_client,
starting_coin,
delegated_puzzle,
[starting_coinsol, launcher_coinsol],
)
# EVE
singleton_eve: Coin = (await sim.all_non_reward_coins())[0]
launcher_coin: Coin = singleton_top_layer.generate_launcher_coin(
starting_coin,
START_AMOUNT,
)
launcher_id: bytes32 = launcher_coin.name()
# This delegated puzzle just recreates the coin exactly
delegated_puzzle: Program = Program.to(
(
1,
[
[
ConditionOpcode.CREATE_COIN,
adapted_puzzle_hash,
singleton_eve.amount,
]
],
)
)
inner_solution: Program = Program.to([[], delegated_puzzle, []])
# Generate the lineage proof we will need from the launcher coin
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(launcher_coinsol) # noqa
puzzle_reveal: Program = singleton_top_layer.puzzle_for_singleton(
launcher_id,
adapted_puzzle,
)
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof,
singleton_eve.amount,
inner_solution,
)
singleton_eve_coinsol = CoinSpend(
singleton_eve,
puzzle_reveal,
full_solution,
)
await self.make_and_spend_bundle(
sim,
sim_client,
singleton_eve,
delegated_puzzle,
[singleton_eve_coinsol],
)
# POST-EVE
singleton: Coin = (await sim.all_non_reward_coins())[0]
# Same delegated_puzzle / inner_solution. We're just recreating ourself
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_eve_coinsol) # noqa
# Same puzzle_reveal too
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof,
singleton.amount,
inner_solution,
)
singleton_coinsol = CoinSpend(
singleton,
puzzle_reveal,
full_solution,
)
await self.make_and_spend_bundle(
sim,
sim_client,
singleton,
delegated_puzzle,
[singleton_coinsol],
)
# CLAIM A P2_SINGLETON
singleton_child: Coin = (await sim.all_non_reward_coins())[0]
p2_singleton_puz: Program = singleton_top_layer.pay_to_singleton_puzzle(launcher_id)
p2_singleton_ph: bytes32 = p2_singleton_puz.get_tree_hash()
await sim.farm_block(p2_singleton_ph)
p2_singleton_coin: Coin = await sim_client.get_coin_records_by_puzzle_hash(p2_singleton_ph)
p2_singleton_coin = p2_singleton_coin[0].coin
assertion, announcement, claim_coinsol = singleton_top_layer.claim_p2_singleton(
p2_singleton_coin,
adapted_puzzle_hash,
launcher_id,
)
delegated_puzzle: Program = Program.to(
(
1,
[
[ConditionOpcode.CREATE_COIN, adapted_puzzle_hash, singleton_eve.amount],
assertion,
announcement,
],
)
)
inner_solution: Program = Program.to([[], delegated_puzzle, []])
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_coinsol)
puzzle_reveal: Program = singleton_top_layer.puzzle_for_singleton(
launcher_id,
adapted_puzzle,
)
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof,
singleton_eve.amount,
inner_solution,
)
singleton_claim_coinsol = CoinSpend(
singleton_child,
puzzle_reveal,
full_solution,
)
await self.make_and_spend_bundle(
sim, sim_client, singleton_child, delegated_puzzle, [singleton_claim_coinsol, claim_coinsol]
)
# CLAIM A P2_SINGLETON_OR_DELAYED
singleton_child: Coin = (await sim.all_non_reward_coins())[0]
DELAY_TIME: uint64 = 1
DELAY_PH: bytes32 = adapted_puzzle_hash
p2_singleton_puz: Program = singleton_top_layer.pay_to_singleton_or_delay_puzzle(
launcher_id,
DELAY_TIME,
DELAY_PH,
)
p2_singleton_ph: bytes32 = p2_singleton_puz.get_tree_hash()
ARBITRARY_AMOUNT: uint64 = 250000000000
await sim.farm_block(p2_singleton_ph)
p2_singleton_coin: Coin = await sim_client.get_coin_records_by_puzzle_hash(p2_singleton_ph)
p2_singleton_coin = sorted(p2_singleton_coin, key=lambda x: x.coin.amount)[0].coin
assertion, announcement, claim_coinsol = singleton_top_layer.claim_p2_singleton(
p2_singleton_coin,
adapted_puzzle_hash,
launcher_id,
DELAY_TIME,
DELAY_PH,
)
delegated_puzzle: Program = Program.to(
(
1,
[
[ConditionOpcode.CREATE_COIN, adapted_puzzle_hash, singleton_eve.amount],
assertion,
announcement,
],
)
)
inner_solution: Program = Program.to([[], delegated_puzzle, []])
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_claim_coinsol)
puzzle_reveal: Program = singleton_top_layer.puzzle_for_singleton(
launcher_id,
adapted_puzzle,
)
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof,
singleton_eve.amount,
inner_solution,
)
delay_claim_coinsol = CoinSpend(
singleton_child,
puzzle_reveal,
full_solution,
)
# Save the height so we can rewind after this
save_height: uint64 = (
sim.get_height()
) # The last coin solution before this point is singleton_claim_coinsol
await self.make_and_spend_bundle(
sim, sim_client, singleton_child, delegated_puzzle, [delay_claim_coinsol, claim_coinsol]
)
# TRY TO SPEND AWAY TOO SOON (Negative Test)
await sim.rewind(save_height)
to_delay_ph_coinsol: CoinSpend = singleton_top_layer.spend_to_delayed_puzzle(
p2_singleton_coin,
ARBITRARY_AMOUNT,
launcher_id,
DELAY_TIME,
DELAY_PH,
)
result, error = await sim_client.push_tx(SpendBundle([to_delay_ph_coinsol], G2Element()))
assert error == Err.ASSERT_SECONDS_RELATIVE_FAILED
# SPEND TO DELAYED PUZZLE HASH
await sim.rewind(save_height)
sim.pass_time(10000005)
sim.pass_blocks(100)
await sim_client.push_tx(SpendBundle([to_delay_ph_coinsol], G2Element()))
# CREATE MULTIPLE ODD CHILDREN (Negative Test)
singleton_child: Coin = (await sim.all_non_reward_coins())[0]
delegated_puzzle: Program = Program.to(
(
1,
[
[ConditionOpcode.CREATE_COIN, adapted_puzzle_hash, 3],
[ConditionOpcode.CREATE_COIN, adapted_puzzle_hash, 7],
],
)
)
inner_solution: Program = Program.to([[], delegated_puzzle, []])
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_claim_coinsol)
puzzle_reveal: Program = singleton_top_layer.puzzle_for_singleton(
launcher_id,
adapted_puzzle,
)
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof, singleton_child.amount, inner_solution
)
multi_odd_coinsol = CoinSpend(
singleton_child,
puzzle_reveal,
full_solution,
)
await self.make_and_spend_bundle(
sim,
sim_client,
singleton_child,
delegated_puzzle,
[multi_odd_coinsol],
ex_error=Err.GENERATOR_RUNTIME_ERROR,
fail_msg="Too many odd children were allowed",
)
# CREATE NO ODD CHILDREN (Negative Test)
delegated_puzzle: Program = Program.to(
(
1,
[
[ConditionOpcode.CREATE_COIN, adapted_puzzle_hash, 4],
[ConditionOpcode.CREATE_COIN, adapted_puzzle_hash, 10],
],
)
)
inner_solution: Program = Program.to([[], delegated_puzzle, []])
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_claim_coinsol)
puzzle_reveal: Program = singleton_top_layer.puzzle_for_singleton(
launcher_id,
adapted_puzzle,
)
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof, singleton_child.amount, inner_solution
)
no_odd_coinsol = CoinSpend(
singleton_child,
puzzle_reveal,
full_solution,
)
await self.make_and_spend_bundle(
sim,
sim_client,
singleton_child,
delegated_puzzle,
[no_odd_coinsol],
ex_error=Err.GENERATOR_RUNTIME_ERROR,
fail_msg="Need at least one odd child",
)
# ATTEMPT TO CREATE AN EVEN SINGLETON (Negative test)
await sim.rewind(save_height)
delegated_puzzle: Program = Program.to(
(
1,
[
[
ConditionOpcode.CREATE_COIN,
singleton_child.puzzle_hash,
2,
],
[ConditionOpcode.CREATE_COIN, adapted_puzzle_hash, 1],
],
)
)
inner_solution: Program = Program.to([[], delegated_puzzle, []])
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_claim_coinsol)
puzzle_reveal: Program = singleton_top_layer.puzzle_for_singleton(
launcher_id,
adapted_puzzle,
)
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof, singleton_child.amount, inner_solution
)
singleton_even_coinsol = CoinSpend(
singleton_child,
puzzle_reveal,
full_solution,
)
await self.make_and_spend_bundle(
sim,
sim_client,
singleton_child,
delegated_puzzle,
[singleton_even_coinsol],
)
# Now try a perfectly innocent spend
evil_coin: Coin = next(filter(lambda c: c.amount == 2, (await sim.all_non_reward_coins())))
delegated_puzzle: Program = Program.to(
(
1,
[
[
ConditionOpcode.CREATE_COIN,
adapted_puzzle_hash,
1,
],
],
)
)
inner_solution: Program = Program.to([[], delegated_puzzle, []])
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_even_coinsol) # noqa
puzzle_reveal: Program = singleton_top_layer.puzzle_for_singleton(
launcher_id,
adapted_puzzle,
)
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof,
1,
inner_solution,
)
evil_coinsol = CoinSpend(
evil_coin,
puzzle_reveal,
full_solution,
)
await self.make_and_spend_bundle(
sim,
sim_client,
evil_coin,
delegated_puzzle,
[evil_coinsol],
ex_error=Err.ASSERT_MY_COIN_ID_FAILED if version == 0 else Err.ASSERT_MY_AMOUNT_FAILED,
fail_msg="This coin is even!",
)
# MELTING
# Remember, we're still spending singleton_child
await sim.rewind(save_height)
conditions = [
singleton_top_layer.MELT_CONDITION,
[
ConditionOpcode.CREATE_COIN,
adapted_puzzle_hash,
(singleton_child.amount - 1),
],
]
delegated_puzzle: Program = p2_conditions.puzzle_for_conditions(conditions)
inner_solution: Program = p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions(conditions)
lineage_proof: LineageProof = singleton_top_layer.lineage_proof_for_coinsol(singleton_claim_coinsol)
puzzle_reveal: Program = singleton_top_layer.puzzle_for_singleton(
launcher_id,
adapted_puzzle,
)
full_solution: Program = singleton_top_layer.solution_for_singleton(
lineage_proof, singleton_child.amount, inner_solution
)
melt_coinsol = CoinSpend(
singleton_child,
puzzle_reveal,
full_solution,
)
await self.make_and_spend_bundle(
sim,
sim_client,
singleton_child,
delegated_puzzle,
[melt_coinsol],
)
melted_coin: Coin = (await sim.all_non_reward_coins())[0]
assert melted_coin.puzzle_hash == adapted_puzzle_hash
finally:
await sim.close()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_clvm_step.py | tests/clvm/test_clvm_step.py | from __future__ import annotations
from typing import Any, Optional
from unittest import TestCase
from clvm_tools_rs import start_clvm_program
factorial = (
"ff02ffff01ff02ff02ffff04ff02ffff04ff05ff80808080ffff04ffff01ff02"
+ "ffff03ffff09ff05ffff010180ffff01ff0101ffff01ff12ff05ffff02ff02ff"
+ "ff04ff02ffff04ffff11ff05ffff010180ff808080808080ff0180ff018080"
)
factorial_function_hash = "de3687023fa0a095d65396f59415a859dd46fc84ed00504bf4c9724fca08c9de"
factorial_sym = {factorial_function_hash: "factorial"}
class TestRunProgram(TestCase):
def test_simple_program_run(self) -> None:
p = start_clvm_program(factorial, "ff0580", factorial_sym)
last: Optional[Any] = None
location: Optional[Any] = None
while not p.is_ended():
step_result = p.step()
if step_result is not None:
last = step_result
self.assertTrue("Failure" not in last)
if "Operator-Location" in last:
location = last["Operator-Location"]
self.assertTrue(last is not None)
self.assertTrue(location is not None)
if last is not None and location is not None:
self.assertTrue("Final" in last)
self.assertEqual(int(last["Final"]), 120)
self.assertTrue(location.startswith("factorial"))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/config.py | tests/clvm/config.py | from __future__ import annotations
parallel = True
checkout_blocks_and_plots = False
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/__init__.py | tests/clvm/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_spend_sim.py | tests/clvm/test_spend_sim.py | import pytest
from blspy import G2Element
from flax.clvm.spend_sim import SpendSim, SimClient
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.program import Program
from flax.types.spend_bundle import SpendBundle
from flax.types.coin_spend import CoinSpend
class TestSpendSim:
@pytest.mark.asyncio
async def test_farming(self):
try:
sim = await SpendSim.create()
for i in range(0, 5):
await sim.farm_block()
assert len(sim.blocks) == 5
assert sim.blocks[-1].height == 4
assert sim.block_records[0].reward_claims_incorporated[0].amount == 18375000000000000000
finally:
await sim.close()
@pytest.mark.asyncio
async def test_rewind(self):
try:
sim = await SpendSim.create()
for i in range(0, 5):
await sim.farm_block()
save_height = sim.get_height()
await sim.farm_block()
await sim.rewind(save_height)
assert len(sim.blocks) == 5
assert sim.blocks[-1].height == 4
finally:
await sim.close()
class TestSimClient:
@pytest.mark.asyncio
async def test_all_endpoints(self):
try:
sim = await SpendSim.create()
sim_client = SimClient(sim)
for i in range(0, 5):
await sim.farm_block()
await sim.farm_block(bytes32([0] * 32))
await sim.farm_block(bytes32([1] * 32))
for i in range(0, 5):
await sim.farm_block()
# get_coin_records_by_puzzle_hash
coin_records = await sim_client.get_coin_records_by_puzzle_hash(bytes32([0] * 32))
coin_record_name = coin_records[0].coin.name()
assert len(coin_records) == 2
coin_records = await sim_client.get_coin_records_by_puzzle_hash(
bytes32([0] * 32), start_height=0, end_height=2
)
assert len(coin_records) == 0
# get_coin_records_by_puzzle_hashes
coin_records = await sim_client.get_coin_records_by_puzzle_hashes([bytes32([0] * 32), bytes32([1] * 32)])
assert len(coin_records) == 4
coin_records = await sim_client.get_coin_records_by_puzzle_hashes(
[bytes32([0] * 32), bytes32([1] * 32)], start_height=0, end_height=2
)
assert len(coin_records) == 0
# get_coin_record_by_name
assert await sim_client.get_coin_record_by_name(coin_record_name)
# get_block_records
block_records = await sim_client.get_block_records(0, 5)
assert len(block_records) == 5
# get_block_record_by_height
block_record = await sim_client.get_block_record_by_height(0)
assert block_record
assert block_record == block_records[0]
# get_block_record
same_block_record = await sim_client.get_block_record(block_record.header_hash)
assert same_block_record == block_record
# get_block
full_block = await sim_client.get_block(block_record.header_hash)
assert full_block.transactions_generator is None
# get_all_block
full_blocks = await sim_client.get_all_block(0, 5)
assert full_blocks[0] == full_block
# push_tx
puzzle_hash = bytes.fromhex(
"9dcf97a184f32623d11a73124ceb99a5709b083721e878a16d78f596718ba7b2"
) # Program.to(1)
await sim.farm_block(puzzle_hash)
spendable_coin = await sim_client.get_coin_records_by_puzzle_hash(puzzle_hash)
spendable_coin = spendable_coin[0].coin
bundle = SpendBundle(
[
CoinSpend(
spendable_coin,
Program.to(1),
Program.to([[51, puzzle_hash, 1]]),
)
],
G2Element(),
)
result, error = await sim_client.push_tx(bundle)
# get_all_mempool_tx_ids
mempool_items = await sim_client.get_all_mempool_tx_ids()
assert len(mempool_items) == 1
# get_mempool_item_by_tx_id
mempool_item = await sim_client.get_mempool_item_by_tx_id(mempool_items[0])
assert mempool_item
# get_all_mempool_items
mempool_items = await sim_client.get_all_mempool_items()
assert len(mempool_items) == 1
# get_additions_and_removals
await sim.farm_block()
latest_block = sim.block_records[-1]
additions, removals = await sim_client.get_additions_and_removals(latest_block.header_hash)
assert additions
assert removals
# get_puzzle_and_solution
coin_solution = await sim_client.get_puzzle_and_solution(spendable_coin.name(), latest_block.height)
assert coin_solution
# get_coin_records_by_parent_ids
new_coin = next(x.coin for x in additions if x.coin.puzzle_hash == puzzle_hash)
coin_records = await sim_client.get_coin_records_by_parent_ids([spendable_coin.name()])
assert coin_records[0].coin.name() == new_coin.name()
finally:
await sim.close()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_puzzle_compression.py | tests/clvm/test_puzzle_compression.py | from blspy import G1Element, G2Element
from typing import Dict
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.coin import Coin
from flax.types.spend_bundle import SpendBundle
from flax.types.coin_spend import CoinSpend
from flax.util.ints import uint64
from flax.wallet.trading.offer import OFFER_MOD
from flax.wallet.util.puzzle_compression import (
LATEST_VERSION,
lowest_best_version,
compress_object_with_puzzles,
decompress_object_with_puzzles,
)
from flax.wallet.cat_wallet.cat_utils import construct_cat_puzzle
from flax.wallet.puzzles.cat_loader import CAT_MOD
from flax.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk
ZERO_32 = bytes32([0] * 32)
ONE_32 = bytes32([17] * 32)
COIN = Coin(ZERO_32, ZERO_32, uint64(0))
SOLUTION = Program.to([])
class TestPuzzleCompression:
compression_factors: Dict[str, float] = {}
def test_standard_puzzle(self):
coin_spend = CoinSpend(
COIN,
puzzle_for_pk(G1Element()),
SOLUTION,
)
compressed = compress_object_with_puzzles(bytes(coin_spend), LATEST_VERSION)
assert len(bytes(coin_spend)) > len(compressed)
assert coin_spend == CoinSpend.from_bytes(decompress_object_with_puzzles(compressed))
self.compression_factors["standard_puzzle"] = len(bytes(compressed)) / len(bytes(coin_spend))
def test_cat_puzzle(self):
coin_spend = CoinSpend(
COIN,
construct_cat_puzzle(CAT_MOD, Program.to([]).get_tree_hash(), Program.to(1)),
SOLUTION,
)
compressed = compress_object_with_puzzles(bytes(coin_spend), LATEST_VERSION)
assert len(bytes(coin_spend)) > len(compressed)
assert coin_spend == CoinSpend.from_bytes(decompress_object_with_puzzles(compressed))
self.compression_factors["cat_puzzle"] = len(bytes(compressed)) / len(bytes(coin_spend))
def test_offer_puzzle(self):
coin_spend = CoinSpend(
COIN,
OFFER_MOD,
SOLUTION,
)
compressed = compress_object_with_puzzles(bytes(coin_spend), LATEST_VERSION)
assert len(bytes(coin_spend)) > len(compressed)
assert coin_spend == CoinSpend.from_bytes(decompress_object_with_puzzles(compressed))
self.compression_factors["offer_puzzle"] = len(bytes(compressed)) / len(bytes(coin_spend))
def test_nesting_puzzles(self):
coin_spend = CoinSpend(
COIN,
construct_cat_puzzle(CAT_MOD, Program.to([]).get_tree_hash(), puzzle_for_pk(G1Element())),
SOLUTION,
)
compressed = compress_object_with_puzzles(bytes(coin_spend), LATEST_VERSION)
assert len(bytes(coin_spend)) > len(compressed)
assert coin_spend == CoinSpend.from_bytes(decompress_object_with_puzzles(compressed))
self.compression_factors["cat_w_standard_puzzle"] = len(bytes(compressed)) / len(bytes(coin_spend))
def test_unknown_wrapper(self):
unknown = Program.to([2, 2, []]) # (a 2 ())
coin_spend = CoinSpend(
COIN,
unknown.curry(puzzle_for_pk(G1Element())),
SOLUTION,
)
compressed = compress_object_with_puzzles(bytes(coin_spend), LATEST_VERSION)
assert len(bytes(coin_spend)) > len(compressed)
assert coin_spend == CoinSpend.from_bytes(decompress_object_with_puzzles(compressed))
self.compression_factors["unknown_and_standard"] = len(bytes(compressed)) / len(bytes(coin_spend))
def test_lowest_best_version(self):
assert lowest_best_version([bytes(CAT_MOD)]) == 4
assert lowest_best_version([bytes(OFFER_MOD)]) == 2
def test_version_override(self):
coin_spend = CoinSpend(
COIN,
OFFER_MOD,
SOLUTION,
)
spend_bundle = SpendBundle([coin_spend], G2Element())
compressed = compress_object_with_puzzles(bytes(spend_bundle), LATEST_VERSION)
compressed_earlier = compress_object_with_puzzles(bytes(spend_bundle), 1)
assert len(bytes(spend_bundle)) > len(bytes(compressed))
assert spend_bundle == SpendBundle.from_bytes(decompress_object_with_puzzles(compressed))
assert spend_bundle == SpendBundle.from_bytes(decompress_object_with_puzzles(compressed_earlier))
assert len(bytes(compressed_earlier)) > len(bytes(compressed))
def test_compression_factors(self):
import json
import logging
log = logging.getLogger(__name__)
log.warning(json.dumps(self.compression_factors))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/clvm/test_flaxlisp_deserialization.py | tests/clvm/test_flaxlisp_deserialization.py | from unittest import TestCase
from flax.types.blockchain_format.program import Program, INFINITE_COST
from flax.util.byte_types import hexstr_to_bytes
from flax.wallet.puzzles.load_clvm import load_clvm
DESERIALIZE_MOD = load_clvm("flaxlisp_deserialisation.clvm", package_or_requirement="flax.wallet.puzzles")
def serialized_atom_overflow(size):
if size == 0:
size_blob = b"\x80"
elif size < 0x40:
size_blob = bytes([0x80 | size])
elif size < 0x2000:
size_blob = bytes([0xC0 | (size >> 8), (size >> 0) & 0xFF])
elif size < 0x100000:
size_blob = bytes([0xE0 | (size >> 16), (size >> 8) & 0xFF, (size >> 0) & 0xFF])
elif size < 0x8000000:
size_blob = bytes(
[
0xF0 | (size >> 24),
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
elif size < 0x400000000:
size_blob = bytes(
[
0xF8 | (size >> 32),
(size >> 24) & 0xFF,
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
else:
size_blob = bytes(
[
0xFC | ((size >> 40) & 0xFF),
(size >> 32) & 0xFF,
(size >> 24) & 0xFF,
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
extra_str = "01" * 1000
return size_blob.hex() + extra_str
class TestClvmNativeDeserialization(TestCase):
"""
Test clvm deserialization done from within the clvm
"""
def test_deserialization_simple_list(self):
# ("hello" "friend")
b = hexstr_to_bytes("ff8568656c6c6fff86667269656e6480")
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_deserialization_password_coin(self):
# (i (= (sha256 2) (q 0x2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824)) (c (q 51) (c 5 (c (q 100) (q ())))) (q "wrong password")) # noqa
b = hexstr_to_bytes(
"ff04ffff0affff0bff0280ffff01ffa02cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b98248080ffff05ffff01ff3380ffff05ff05ffff05ffff01ff6480ffff01ff8080808080ffff01ff8e77726f6e672070617373776f72648080" # noqa
) # noqa
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_deserialization_large_numbers(self):
# '(99999999999999999999999999999999999999999999999999999999999999999 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF -99999999999999999999999999999999999999999999999999999999999999999999999999999)' # noqa
b = hexstr_to_bytes(
"ff9c00f316271c7fc3908a8bef464e3945ef7a253609ffffffffffffffffffb00fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1ff22ea0179500526edb610f148ec0c614155678491902d6000000000000000000180" # noqa
) # noqa
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_overflow_atoms(self):
b = hexstr_to_bytes(serialized_atom_overflow(0xFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0x3FFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0xFFFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0x1FFFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/fee_estimation/test_fee_estimation_unit_tests.py | tests/fee_estimation/test_fee_estimation_unit_tests.py | from __future__ import annotations
import logging
from typing import List
from chia_rs import Coin
from flax.consensus.cost_calculator import NPCResult
from flax.full_node.bitcoin_fee_estimator import create_bitcoin_fee_estimator
from flax.full_node.fee_estimation import FeeBlockInfo
from flax.full_node.fee_estimator_interface import FeeEstimatorInterface
from flax.simulator.block_tools import test_constants
from flax.simulator.wallet_tools import WalletTool
from flax.types.clvm_cost import CLVMCost
from flax.types.fee_rate import FeeRate
from flax.types.mempool_item import MempoolItem
from flax.types.mojos import Mojos
from flax.util.ints import uint32, uint64
log = logging.getLogger(__name__)
def test_interface() -> None:
max_block_cost_clvm = uint64(1000 * 1000)
estimator: FeeEstimatorInterface = create_bitcoin_fee_estimator(max_block_cost_clvm, log)
target_times = [0, 120, 300]
estimates = [estimator.estimate_fee_rate(time_offset_seconds=time) for time in target_times]
current_fee_rate = estimator.estimate_fee_rate(
time_offset_seconds=1,
)
zero = FeeRate(uint64(0))
assert estimates == [zero, zero, zero]
assert current_fee_rate.mojos_per_clvm_cost == 0
def test_estimator_create() -> None:
max_block_cost_clvm = uint64(1000 * 1000)
estimator = create_bitcoin_fee_estimator(max_block_cost_clvm, log)
assert estimator is not None
def test_single_estimate() -> None:
max_block_cost_clvm = uint64(1000 * 1000)
estimator = create_bitcoin_fee_estimator(max_block_cost_clvm, log)
height = uint32(1)
estimator.new_block(FeeBlockInfo(height, []))
fee_rate = estimator.estimate_fee_rate(time_offset_seconds=40 * height)
assert fee_rate.mojos_per_clvm_cost == 0
def make_block(
wallet_tool: WalletTool, height: uint32, num_tx: int, cost: uint64, fee: uint64, num_blocks_wait_in_mempool: int
) -> List[MempoolItem]:
items = []
ph = wallet_tool.get_new_puzzlehash()
coin = Coin(ph, ph, uint64(10000))
spend_bundle = wallet_tool.generate_signed_transaction(uint64(10000), ph, coin)
for n in range(num_tx):
block_included = uint32(height - num_blocks_wait_in_mempool)
mempool_item = MempoolItem(
spend_bundle, fee, NPCResult(None, None, cost), cost, spend_bundle.name(), [], [], block_included
)
items.append(mempool_item)
return items
def test_steady_fee_pressure() -> None:
"""
We submit successive blocks containing transactions with identical FeeRates.
We expect the estimator to converge on this FeeRate value.
"""
max_block_cost_clvm = uint64(1000 * 1000)
estimator = create_bitcoin_fee_estimator(max_block_cost_clvm, log)
wallet_tool = WalletTool(test_constants)
cost = uint64(5000000)
fee = uint64(10000000)
num_blocks_wait_in_mempool = 5
start = 100
end = 300
estimates_during = []
for height in range(start, end):
height = uint32(height)
items = make_block(wallet_tool, height, 1, cost, fee, num_blocks_wait_in_mempool)
estimator.new_block(FeeBlockInfo(uint32(height), items))
estimates_during.append(estimator.estimate_fee_rate(time_offset_seconds=40 * height))
est = estimator.estimate_fee_rate(time_offset_seconds=240)
e = []
for seconds in range(30, 5 * 60, 30):
est2 = estimator.estimate_fee_rate(time_offset_seconds=seconds)
e.append(est2)
assert est == FeeRate.create(Mojos(fee), CLVMCost(cost))
estimates_after = [estimator.estimate_fee_rate(time_offset_seconds=40 * height) for height in range(start, end)]
block_estimates = [estimator.estimate_fee_rate_for_block(uint32(h)) for h in range(start, end)]
assert estimates_during == estimates_after
assert estimates_after == block_estimates
def test_fee_estimation_inception() -> None:
"""
Confirm that estimates are given only for blocks farther out than the smallest
transaction block wait time we have observed.
"""
max_block_cost_clvm = uint64(1000 * 1000)
estimator1 = create_bitcoin_fee_estimator(max_block_cost_clvm, log)
wallet_tool = WalletTool(test_constants)
cost = uint64(5000000)
fee = uint64(10000000)
start = 100
end = 300
for height in range(start, end):
height = uint32(height)
# Transactions will wait in the mempool for 1 block
items = make_block(wallet_tool, height, 1, cost, fee, num_blocks_wait_in_mempool=1)
estimator1.new_block(FeeBlockInfo(uint32(height), items))
e = []
for seconds in range(40, 5 * 60, 40):
est = estimator1.estimate_fee_rate(time_offset_seconds=seconds)
e.append(est.mojos_per_clvm_cost)
# Confirm that estimates are available for near blocks
assert e == [2, 2, 2, 2, 2, 2, 2]
##########################################################
estimator5 = create_bitcoin_fee_estimator(max_block_cost_clvm, log)
for height in range(start, end):
height = uint32(height)
# Transactions will wait in the mempool for 5 blocks
items = make_block(wallet_tool, height, 1, cost, fee, num_blocks_wait_in_mempool=5)
estimator5.new_block(FeeBlockInfo(uint32(height), items))
e1 = []
for seconds in range(40, 5 * 60, 40):
est = estimator5.estimate_fee_rate(time_offset_seconds=seconds)
e1.append(est.mojos_per_clvm_cost)
# Confirm that estimates start after block 4
assert e1 == [0, 0, 0, 2, 2, 2, 2]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/fee_estimation/test_fee_estimation_rpc.py | tests/fee_estimation/test_fee_estimation_rpc.py | from __future__ import annotations
from typing import List, Tuple
import pytest
import pytest_asyncio
from flax.full_node.full_node import FullNode
from flax.rpc.full_node_rpc_api import FullNodeRpcApi
from flax.rpc.full_node_rpc_client import FullNodeRpcClient
from flax.server.start_service import Service
from flax.simulator.block_tools import BlockTools
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.simulator.wallet_tools import WalletTool
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint64
from flax.wallet.wallet_node import WalletNode
@pytest_asyncio.fixture(scope="function")
async def setup_node_and_rpc(
two_wallet_nodes_services: Tuple[List[Service[FullNode]], List[Service[WalletNode]], BlockTools],
) -> Tuple[FullNodeRpcClient, FullNodeRpcApi]:
full_nodes, wallets, bt = two_wallet_nodes_services
wallet = wallets[0]._node.wallet_state_manager.main_wallet
full_node_apis = [full_node_service._api for full_node_service in full_nodes]
full_node_api = full_node_apis[0]
full_node_service_1 = full_nodes[0]
assert full_node_service_1.rpc_server is not None
client = await FullNodeRpcClient.create(
bt.config["self_hostname"],
full_node_service_1.rpc_server.listen_port,
full_node_service_1.root_path,
full_node_service_1.config,
)
full_node_rpc_api = FullNodeRpcApi(full_node_api.full_node)
ph = await wallet.get_new_puzzlehash()
for i in range(4):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
return client, full_node_rpc_api
@pytest.mark.asyncio
async def test_get_blockchain_state(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
# Confirm full node setup correctly
client, _ = setup_node_and_rpc
response = await client.get_blockchain_state()
assert response["genesis_challenge_initialized"] is True
@pytest.mark.asyncio
async def test_empty_request(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(ValueError):
await full_node_rpc_api.get_fee_estimate({})
@pytest.mark.asyncio
async def test_no_target_times(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(ValueError):
await full_node_rpc_api.get_fee_estimate({"cost": 1})
@pytest.mark.asyncio
async def test_negative_time(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(ValueError):
await full_node_rpc_api.get_fee_estimate({"cost": 1, "target_times": [-1]})
@pytest.mark.asyncio
async def test_negative_cost(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(ValueError):
await full_node_rpc_api.get_fee_estimate({"cost": -1, "target_times": [1]})
@pytest.mark.asyncio
async def test_no_cost_or_tx(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(ValueError):
await full_node_rpc_api.get_fee_estimate({"target_times": []})
@pytest.mark.asyncio
async def test_both_cost_and_tx(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(ValueError):
await full_node_rpc_api.get_fee_estimate({"target_times": [], "cost": 1, "spend_bundle": "80"})
@pytest.mark.asyncio
async def test_target_times_invalid_type(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(TypeError):
await full_node_rpc_api.get_fee_estimate({"target_times": 1, "cost": 1})
@pytest.mark.asyncio
async def test_cost_invalid_type(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(ValueError):
await full_node_rpc_api.get_fee_estimate({"target_times": [], "cost": "a lot"})
@pytest.mark.asyncio
async def test_tx_invalid_type(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
with pytest.raises(TypeError):
await full_node_rpc_api.get_fee_estimate({"target_times": [], "spend_bundle": 1})
#####################
@pytest.mark.asyncio
async def test_empty_target_times(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
response = await full_node_rpc_api.get_fee_estimate({"target_times": [], "cost": 1})
assert response["estimates"] == []
assert response["target_times"] == []
@pytest.mark.asyncio
async def test_cost(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
response = await full_node_rpc_api.get_fee_estimate({"target_times": [1], "cost": 1})
assert response["estimates"] == [0]
assert response["target_times"] == [1]
@pytest.mark.asyncio
async def test_tx(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi], bt: BlockTools) -> None:
client, full_node_rpc_api = setup_node_and_rpc
wallet_a: WalletTool = bt.get_pool_wallet_tool()
my_puzzle_hash = wallet_a.get_new_puzzlehash()
recevier_puzzle_hash = bytes32(b"0" * 32)
coin_to_spend = Coin(bytes32(b"0" * 32), my_puzzle_hash, uint64(1750000000000))
spend_bundle = wallet_a.generate_signed_transaction(
uint64(coin_to_spend.amount), recevier_puzzle_hash, coin_to_spend
)
response = await full_node_rpc_api.get_fee_estimate(
{"target_times": [1], "spend_bundle": spend_bundle.to_json_dict()}
)
assert response["estimates"] == [0]
assert response["target_times"] == [1]
@pytest.mark.asyncio
async def test_multiple(setup_node_and_rpc: Tuple[FullNodeRpcClient, FullNodeRpcApi]) -> None:
client, full_node_rpc_api = setup_node_and_rpc
response = await full_node_rpc_api.get_fee_estimate({"target_times": [1, 5, 10, 15, 60, 120, 180, 240], "cost": 1})
assert response["estimates"] == [0, 0, 0, 0, 0, 0, 0, 0]
assert response["target_times"] == [1, 5, 10, 15, 60, 120, 180, 240]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/fee_estimation/cmdline_test.py | tests/fee_estimation/cmdline_test.py | from __future__ import annotations
import subprocess
import sysconfig
from pathlib import Path
from typing import Tuple, Union
from click.testing import CliRunner
from flax.cmds.flax import cli
from flax.full_node.full_node_api import FullNodeAPI
from flax.server.server import FlaxServer
from flax.simulator.block_tools import BlockTools
from flax.simulator.full_node_simulator import FullNodeSimulator
def test_print_fee_info_cmd(
one_node_one_block: Tuple[Union[FullNodeAPI, FullNodeSimulator], FlaxServer, BlockTools]
) -> None:
_, _, _ = one_node_one_block
scripts_path = Path(sysconfig.get_path("scripts"))
subprocess.run([scripts_path.joinpath("flax"), "show", "-f"], check=True)
def test_show_fee_info(
one_node_one_block: Tuple[Union[FullNodeAPI, FullNodeSimulator], FlaxServer, BlockTools]
) -> None:
_, _, _ = one_node_one_block
runner = CliRunner()
result = runner.invoke(cli, ["show", "-f"])
assert result.exit_code == 0
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/fee_estimation/__init__.py | tests/fee_estimation/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/weight_proof/test_weight_proof.py | tests/weight_proof/test_weight_proof.py | import sys
from typing import Dict, List, Optional, Tuple
import aiosqlite
import pytest
from flax.consensus.block_record import BlockRecord
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.consensus.full_block_to_block_record import block_to_block_record
from flax.full_node.block_store import BlockStore
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.util.block_cache import BlockCache
from flax.simulator.block_tools import test_constants
from flax.util.generator_tools import get_block_header
from flax.consensus.pot_iterations import calculate_iterations_quality
from flax.full_node.weight_proof import (
WeightProofHandler,
_map_sub_epoch_summaries,
_validate_summaries_weight,
)
from flax.types.full_block import FullBlock
from flax.types.header_block import HeaderBlock
from flax.util.ints import uint32, uint64
def count_sub_epochs(blockchain, last_hash) -> int:
curr = blockchain._sub_blocks[last_hash]
count = 0
while True:
if curr.height == 0:
break
# next sub block
curr = blockchain._sub_blocks[curr.prev_hash]
# if end of sub-epoch
if curr.sub_epoch_summary_included is not None:
count += 1
return count
def get_prev_ses_block(sub_blocks, last_hash) -> Tuple[BlockRecord, int]:
curr = sub_blocks[last_hash]
blocks = 1
while curr.height != 0:
# next sub block
curr = sub_blocks[curr.prev_hash]
# if end of sub-epoch
if curr.sub_epoch_summary_included is not None:
return curr, blocks
blocks += 1
assert False
async def load_blocks_dont_validate(
blocks,
) -> Tuple[
Dict[bytes32, HeaderBlock], Dict[uint32, bytes32], Dict[bytes32, BlockRecord], Dict[uint32, SubEpochSummary]
]:
header_cache: Dict[bytes32, HeaderBlock] = {}
height_to_hash: Dict[uint32, bytes32] = {}
sub_blocks: Dict[bytes32, BlockRecord] = {}
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
prev_block = None
difficulty = test_constants.DIFFICULTY_STARTING
block: FullBlock
for block in blocks:
if block.height > 0:
assert prev_block is not None
difficulty = block.reward_chain_block.weight - prev_block.weight
if block.reward_chain_block.challenge_chain_sp_vdf is None:
assert block.reward_chain_block.signage_point_index == 0
cc_sp: bytes32 = block.reward_chain_block.pos_ss_cc_challenge_hash
else:
cc_sp = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
quality_string: Optional[bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
test_constants,
block.reward_chain_block.pos_ss_cc_challenge_hash,
cc_sp,
)
assert quality_string is not None
required_iters: uint64 = calculate_iterations_quality(
test_constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
block.reward_chain_block.proof_of_space.size,
difficulty,
cc_sp,
)
sub_block = block_to_block_record(
test_constants,
BlockCache(sub_blocks, height_to_hash=height_to_hash),
required_iters,
block,
None,
)
sub_blocks[block.header_hash] = sub_block
height_to_hash[block.height] = block.header_hash
header_cache[block.header_hash] = get_block_header(block, [], [])
if sub_block.sub_epoch_summary_included is not None:
sub_epoch_summaries[block.height] = sub_block.sub_epoch_summary_included
prev_block = block
return header_cache, height_to_hash, sub_blocks, sub_epoch_summaries
async def _test_map_summaries(blocks, header_cache, height_to_hash, sub_blocks, summaries):
curr = sub_blocks[blocks[-1].header_hash]
orig_summaries: Dict[int, SubEpochSummary] = {}
while curr.height > 0:
if curr.sub_epoch_summary_included is not None:
orig_summaries[curr.height] = curr.sub_epoch_summary_included
# next sub block
curr = sub_blocks[curr.prev_hash]
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
# sub epoch summaries validate hashes
summaries, sub_epoch_data_weight, _ = _map_sub_epoch_summaries(
test_constants.SUB_EPOCH_BLOCKS,
test_constants.GENESIS_CHALLENGE,
wp.sub_epochs,
test_constants.DIFFICULTY_STARTING,
)
assert len(summaries) == len(orig_summaries)
class TestWeightProof:
@pytest.mark.asyncio
async def test_weight_proof_map_summaries_1(self, default_400_blocks):
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(default_400_blocks)
await _test_map_summaries(default_400_blocks, header_cache, height_to_hash, sub_blocks, summaries)
@pytest.mark.asyncio
async def test_weight_proof_map_summaries_2(self, default_1000_blocks):
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(default_1000_blocks)
await _test_map_summaries(default_1000_blocks, header_cache, height_to_hash, sub_blocks, summaries)
@pytest.mark.asyncio
async def test_weight_proof_summaries_1000_blocks(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
summaries, sub_epoch_data_weight, _ = _map_sub_epoch_summaries(
wpf.constants.SUB_EPOCH_BLOCKS,
wpf.constants.GENESIS_CHALLENGE,
wp.sub_epochs,
wpf.constants.DIFFICULTY_STARTING,
)
assert _validate_summaries_weight(test_constants, sub_epoch_data_weight, summaries, wp)
# assert res is not None
@pytest.mark.asyncio
async def test_weight_proof_bad_peak_hash(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(b"sadgfhjhgdgsfadfgh")
assert wp is None
@pytest.mark.asyncio
@pytest.mark.skip(reason="broken")
async def test_weight_proof_from_genesis(self, default_400_blocks):
blocks = default_400_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
@pytest.mark.asyncio
async def test_weight_proof_edge_cases(self, bt, default_400_blocks):
blocks: List[FullBlock] = default_400_blocks
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=2
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=1
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=2
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_eos=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_icc_eos=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_ip=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
skip_slots=4,
normalized_to_identity_cc_sp=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
1, block_list_input=blocks, seed=b"asdfghjkl", force_overflow=True, skip_slots=4
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=True,
)
blocks: List[FullBlock] = bt.get_consecutive_blocks(
300,
block_list_input=blocks,
seed=b"asdfghjkl",
force_overflow=False,
)
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000_pre_genesis_empty_slots(self, pre_genesis_empty_slots_1000_blocks):
blocks = pre_genesis_empty_slots_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof10000__blocks_compact(self, default_10000_blocks_compact):
blocks = default_10000_blocks_compact
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof1000_partial_blocks_compact(self, bt, default_10000_blocks_compact):
blocks: List[FullBlock] = bt.get_consecutive_blocks(
100,
block_list_input=default_10000_blocks_compact,
seed=b"asdfghjkl",
normalized_to_identity_cc_ip=True,
normalized_to_identity_cc_eos=True,
normalized_to_identity_icc_eos=True,
)
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof10000(self, default_10000_blocks):
blocks = default_10000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
assert wp is not None
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, {}, height_to_hash, {}))
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_check_num_of_samples(self, default_10000_blocks):
blocks = default_10000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf.get_proof_of_weight(blocks[-1].header_hash)
curr = -1
samples = 0
for sub_epoch_segment in wp.sub_epoch_segments:
if sub_epoch_segment.sub_epoch_n > curr:
curr = sub_epoch_segment.sub_epoch_n
samples += 1
assert samples <= wpf.MAX_SAMPLES
@pytest.mark.asyncio
async def test_weight_proof_extend_no_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
last_ses_height = sorted(summaries.keys())[-1]
wpf_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf_synced.get_proof_of_weight(blocks[last_ses_height].header_hash)
assert wp is not None
# todo for each sampled sub epoch, validate number of segments
wpf_not_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
new_wp = await wpf_synced._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
@pytest.mark.asyncio
async def test_weight_proof_extend_new_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
# delete last summary
last_ses_height = sorted(summaries.keys())[-1]
last_ses = summaries[last_ses_height]
del summaries[last_ses_height]
wpf_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wp = await wpf_synced.get_proof_of_weight(blocks[last_ses_height - 10].header_hash)
assert wp is not None
wpf_not_synced = WeightProofHandler(test_constants, BlockCache(sub_blocks, height_to_hash, header_cache, {}))
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
summaries[last_ses_height] = last_ses
wpf_synced.blockchain = BlockCache(sub_blocks, header_cache, height_to_hash, summaries)
new_wp = await wpf_synced._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
wpf_synced.blockchain = BlockCache(sub_blocks, header_cache, height_to_hash, summaries)
new_wp = await wpf_synced._create_proof_of_weight(blocks[last_ses_height].header_hash)
valid, fork_point, _ = await wpf_not_synced.validate_weight_proof(new_wp)
assert valid
assert fork_point == 0
valid, fork_point, _ = await wpf.validate_weight_proof(new_wp)
assert valid
assert fork_point != 0
@pytest.mark.asyncio
async def test_weight_proof_extend_multiple_ses(self, default_1000_blocks):
blocks = default_1000_blocks
header_cache, height_to_hash, sub_blocks, summaries = await load_blocks_dont_validate(blocks)
last_ses_height = sorted(summaries.keys())[-1]
last_ses = summaries[last_ses_height]
before_last_ses_height = sorted(summaries.keys())[-2]
before_last_ses = summaries[before_last_ses_height]
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
wpf_verify = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, {}))
for x in range(10, -1, -1):
wp = await wpf.get_proof_of_weight(blocks[before_last_ses_height - x].header_hash)
assert wp is not None
valid, fork_point, _ = await wpf_verify.validate_weight_proof(wp)
assert valid
assert fork_point == 0
# extend proof with 100 blocks
summaries[last_ses_height] = last_ses
summaries[before_last_ses_height] = before_last_ses
wpf = WeightProofHandler(test_constants, BlockCache(sub_blocks, header_cache, height_to_hash, summaries))
new_wp = await wpf._create_proof_of_weight(blocks[-1].header_hash)
valid, fork_point, _ = await wpf.validate_weight_proof(new_wp)
assert valid
assert fork_point != 0
@pytest.mark.skip("used for debugging")
@pytest.mark.asyncio
async def test_weight_proof_from_database(self):
connection = await aiosqlite.connect("path to db")
block_store: BlockStore = await BlockStore.create(connection)
blocks = await block_store.get_block_records_in_range(0, 0xFFFFFFFF)
peak = len(blocks) - 1
peak_height = blocks[peak].height
headers = await block_store.get_header_blocks_in_range(0, peak_height)
sub_height_to_hash = {}
sub_epoch_summaries = {}
# peak_header = await block_store.get_full_blocks_at([peak_height])
if len(blocks) == 0:
return None, None
assert peak is not None
# Sets the other state variables (peak_height and height_to_hash)
curr: BlockRecord = blocks[peak]
while True:
sub_height_to_hash[curr.height] = curr.header_hash
if curr.sub_epoch_summary_included is not None:
sub_epoch_summaries[curr.height] = curr.sub_epoch_summary_included
if curr.height == 0:
break
curr = blocks[curr.prev_hash]
assert len(sub_height_to_hash) == peak_height + 1
block_cache = BlockCache(blocks, headers, sub_height_to_hash, sub_epoch_summaries)
wpf = WeightProofHandler(DEFAULT_CONSTANTS, block_cache)
wp = await wpf._create_proof_of_weight(sub_height_to_hash[peak_height - 50])
valid, fork_point = wpf.validate_weight_proof_single_proc(wp)
await connection.close()
assert valid
print(f"size of proof is {get_size(wp)}")
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, "__dict__"):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/weight_proof/config.py | tests/weight_proof/config.py | from __future__ import annotations
parallel = True
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/weight_proof/__init__.py | tests/weight_proof/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/plotting/util.py | tests/plotting/util.py | from __future__ import annotations
from pathlib import Path
from typing import List
from flax.simulator.block_tools import get_plot_dir
def get_test_plots(sub_dir: str = "") -> List[Path]:
path = get_plot_dir()
if sub_dir != "":
path = path / sub_dir
return list(sorted(path.glob("*.plot")))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/plotting/config.py | tests/plotting/config.py | from __future__ import annotations
parallel = True
install_timelord = False
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/plotting/__init__.py | tests/plotting/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/plotting/test_plot_manager.py | tests/plotting/test_plot_manager.py | from __future__ import annotations
import logging
import sys
import time
from dataclasses import dataclass, replace
from os import unlink
from pathlib import Path
from shutil import copy, move
from typing import Callable, Iterator, List, Optional
import pytest
from blspy import G1Element
from flax.plotting.cache import CURRENT_VERSION, CacheDataV1
from flax.plotting.manager import Cache, PlotManager
from flax.plotting.util import (
PlotInfo,
PlotRefreshEvents,
PlotRefreshResult,
add_plot_directory,
get_plot_directories,
remove_plot,
remove_plot_directory,
)
from flax.simulator.block_tools import get_plot_dir
from flax.simulator.time_out_assert import time_out_assert
from flax.util.config import create_default_flax_config, lock_and_load_config, save_config
from flax.util.ints import uint16, uint32
from flax.util.misc import VersionedBlob
from tests.plotting.util import get_test_plots
log = logging.getLogger(__name__)
@dataclass
class MockDiskProver:
filename: str
def get_filename(self) -> str:
return self.filename
@dataclass
class MockPlotInfo:
prover: MockDiskProver
class Directory:
path: Path
plots: List[Path]
def __init__(self, path: Path, plots_origin: List[Path]):
self.path = path
path.mkdir(parents=True, exist_ok=True)
# Drop the existing files in the test directories
for plot in path.iterdir():
unlink(plot)
# Copy over the original plots
for plot in plots_origin:
if not Path(path / plot.name).exists():
copy(plot, path)
# Adjust the paths to reflect the testing plots
self.plots = [path / plot.name for plot in plots_origin]
def __len__(self):
return len(self.plots)
def plot_info_list(self) -> List[MockPlotInfo]:
return [MockPlotInfo(MockDiskProver(str(x))) for x in self.plots]
def path_list(self) -> List[Path]:
return self.plots
def drop(self, path: Path):
assert self.path / path.name
del self.plots[self.plots.index(self.path / path.name)]
class PlotRefreshTester:
plot_manager: PlotManager
expected_result: PlotRefreshResult
expected_result_matched: bool
def __init__(self, root_path: Path):
self.plot_manager = PlotManager(root_path, self.refresh_callback)
# Set a very high refresh interval here to avoid unintentional refresh cycles
self.plot_manager.refresh_parameter = replace(
self.plot_manager.refresh_parameter, interval_seconds=uint32(10000)
)
# Set to the current time to avoid automated refresh after we start below.
self.plot_manager.last_refresh_time = time.time()
self.plot_manager.start_refreshing()
def refresh_callback(self, event: PlotRefreshEvents, refresh_result: PlotRefreshResult):
if event != PlotRefreshEvents.done:
# Only validate the final results for this tests
return
for name in ["loaded", "removed", "processed", "remaining"]:
try:
actual_value = refresh_result.__getattribute__(name)
if type(actual_value) == list:
expected_list = self.expected_result.__getattribute__(name)
if len(expected_list) != len(actual_value):
return
values_found = 0
for value in actual_value:
if type(value) == PlotInfo:
for plot_info in expected_list:
if plot_info.prover.get_filename() == value.prover.get_filename():
values_found += 1
continue
else:
if value in expected_list:
values_found += 1
continue
if values_found != len(expected_list):
log.error(f"{name} invalid: values_found {values_found} expected {len(expected_list)}")
return
else:
expected_value = self.expected_result.__getattribute__(name)
if actual_value != expected_value:
log.error(f"{name} invalid: actual {actual_value} expected {expected_value}")
return
except AttributeError as error:
log.error(f"{error}")
return
self.expected_result_matched = True
async def run(self, expected_result: PlotRefreshResult):
self.expected_result = expected_result
self.expected_result_matched = False
self.plot_manager.trigger_refresh()
await time_out_assert(5, self.plot_manager.needs_refresh, value=False)
assert self.expected_result_matched
@dataclass
class Environment:
root_path: Path
refresh_tester: PlotRefreshTester
dir_1: Directory
dir_2: Directory
@pytest.fixture(scope="function")
def environment(tmp_path, bt) -> Iterator[Environment]:
dir_1_count: int = 7
dir_2_count: int = 3
plots: List[Path] = get_test_plots()
assert len(plots) >= dir_1_count + dir_2_count
dir_1: Directory = Directory(tmp_path / "plots" / "1", plots[0:dir_1_count])
dir_2: Directory = Directory(tmp_path / "plots" / "2", plots[dir_1_count : dir_1_count + dir_2_count])
create_default_flax_config(tmp_path)
refresh_tester = PlotRefreshTester(tmp_path)
refresh_tester.plot_manager.set_public_keys(bt.plot_manager.farmer_public_keys, bt.plot_manager.pool_public_keys)
yield Environment(tmp_path, refresh_tester, dir_1, dir_2)
refresh_tester.plot_manager.stop_refreshing()
# Wrap `remove_plot` to give it the same interface as the other triggers, e.g. `add_plot_directory(Path, str)`.
def trigger_remove_plot(_: Path, plot_path: str):
remove_plot(Path(plot_path))
@pytest.mark.asyncio
async def test_plot_refreshing(environment):
env: Environment = environment
expected_result = PlotRefreshResult()
dir_duplicates: Directory = Directory(get_plot_dir().resolve() / "duplicates", env.dir_1.plots)
async def run_test_case(
*,
trigger: Callable,
test_path: Path,
expect_loaded: List[MockPlotInfo],
expect_removed: List[Path],
expect_processed: int,
expect_duplicates: int,
expected_directories: int,
expect_total_plots: int,
):
expected_result.loaded = expect_loaded
expected_result.removed = expect_removed
expected_result.processed = expect_processed
trigger(env.root_path, str(test_path))
assert len(get_plot_directories(env.root_path)) == expected_directories
await env.refresh_tester.run(expected_result)
assert len(env.refresh_tester.plot_manager.plots) == expect_total_plots
assert len(env.refresh_tester.plot_manager.get_duplicates()) == expect_duplicates
assert len(env.refresh_tester.plot_manager.failed_to_open_filenames) == 0
# Add dir_1
await run_test_case(
trigger=add_plot_directory,
test_path=env.dir_1.path,
expect_loaded=env.dir_1.plot_info_list(),
expect_removed=[],
expect_processed=len(env.dir_1),
expect_duplicates=0,
expected_directories=1,
expect_total_plots=len(env.dir_1),
)
# Add dir_2
await run_test_case(
trigger=add_plot_directory,
test_path=env.dir_2.path,
expect_loaded=env.dir_2.plot_info_list(),
expect_removed=[],
expect_processed=len(env.dir_1) + len(env.dir_2),
expect_duplicates=0,
expected_directories=2,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Add dir_duplicates
await run_test_case(
trigger=add_plot_directory,
test_path=dir_duplicates.path,
expect_loaded=[],
expect_removed=[],
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
for item in dir_duplicates.path.iterdir():
assert item.is_file() and item in env.refresh_tester.plot_manager.get_duplicates()
# Drop the duplicated plot we remove in the next test case from the test directory upfront so that the numbers match
# the expected below
drop_path = dir_duplicates.plots[0]
dir_duplicates.drop(drop_path)
# Delete one duplicated plot
await run_test_case(
trigger=trigger_remove_plot,
test_path=drop_path,
expect_loaded=[],
expect_removed=[],
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Drop the duplicated plot we remove in the next test case from the test directory upfront so that the numbers match
# the expected below
drop_path = env.dir_1.plots[0]
env.dir_1.drop(drop_path)
# Delete one duplicated plot
await run_test_case(
trigger=trigger_remove_plot,
test_path=drop_path,
expect_loaded=[],
expect_removed=[drop_path],
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Remove directory with the duplicates
await run_test_case(
trigger=remove_plot_directory,
test_path=dir_duplicates.path,
expect_loaded=[],
expect_removed=[],
expect_processed=len(env.dir_1) + len(env.dir_2),
expect_duplicates=0,
expected_directories=2,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
for item in dir_duplicates.path.iterdir():
assert item.is_file() and item not in env.refresh_tester.plot_manager.get_duplicates()
# Re-add the directory with the duplicates for other tests
await run_test_case(
trigger=add_plot_directory,
test_path=dir_duplicates.path,
expect_loaded=[],
expect_removed=[],
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Remove dir_1 from which the duplicated plots are loaded. This removes the duplicates of dir_1
# and in the same run loads them from dir_duplicates.
await run_test_case(
trigger=remove_plot_directory,
test_path=env.dir_1.path,
expect_loaded=dir_duplicates.plot_info_list(),
expect_removed=env.dir_1.path_list(),
expect_processed=len(env.dir_2) + len(dir_duplicates),
expect_duplicates=0,
expected_directories=2,
expect_total_plots=len(env.dir_2) + len(dir_duplicates),
)
# Re-add the directory. Now the plot seen as duplicate is from dir_1, not from dir_duplicates like before
await run_test_case(
trigger=add_plot_directory,
test_path=env.dir_1.path,
expect_loaded=[],
expect_removed=[],
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(dir_duplicates),
expected_directories=3,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Drop the duplicated plot we remove in the next test case from the test directory upfront so that the numbers match
# the expected below
drop_path = env.dir_1.plots[2]
env.dir_1.drop(drop_path)
# Remove the duplicated plot
await run_test_case(
trigger=trigger_remove_plot,
test_path=drop_path,
expect_loaded=[],
expect_removed=[],
expect_processed=len(env.dir_1) + len(env.dir_2) + len(dir_duplicates),
expect_duplicates=len(env.dir_1),
expected_directories=3,
expect_total_plots=len(env.dir_2) + len(dir_duplicates),
)
# Remove dir_duplicates, this drops the duplicates and loads all plots from dir_1
await run_test_case(
trigger=remove_plot_directory,
test_path=dir_duplicates.path,
expect_loaded=env.dir_1.plot_info_list(),
expect_removed=dir_duplicates.path_list(),
expect_processed=len(env.dir_1) + len(env.dir_2),
expect_duplicates=0,
expected_directories=2,
expect_total_plots=len(env.dir_1) + len(env.dir_2),
)
# Remove dir_2
await run_test_case(
trigger=remove_plot_directory,
test_path=env.dir_2.path,
expect_loaded=[],
expect_removed=env.dir_2.path_list(),
expect_processed=len(env.dir_1),
expect_duplicates=0,
expected_directories=1,
expect_total_plots=len(env.dir_1),
)
# Remove dir_1
await run_test_case(
trigger=remove_plot_directory,
test_path=env.dir_1.path,
expect_loaded=[],
expect_removed=env.dir_1.path_list(),
expect_processed=0,
expect_duplicates=0,
expected_directories=0,
expect_total_plots=0,
)
@pytest.mark.asyncio
async def test_initial_refresh_flag(environment: Environment) -> None:
env: Environment = environment
assert env.refresh_tester.plot_manager.initial_refresh()
for _ in range(2):
await env.refresh_tester.run(PlotRefreshResult())
assert not env.refresh_tester.plot_manager.initial_refresh()
env.refresh_tester.plot_manager.reset()
assert env.refresh_tester.plot_manager.initial_refresh()
@pytest.mark.asyncio
async def test_invalid_plots(environment):
env: Environment = environment
expected_result = PlotRefreshResult()
# Test re-trying if processing a plot failed
# First create a backup of the plot
retry_test_plot = env.dir_1.path_list()[0].resolve()
retry_test_plot_save = Path(env.dir_1.path / ".backup").resolve()
copy(retry_test_plot, retry_test_plot_save)
# Invalidate the plot
with open(retry_test_plot, "r+b") as file:
file.write(bytes(100))
# Add it and validate it fails to load
add_plot_directory(env.root_path, str(env.dir_1.path))
expected_result.loaded = env.dir_1.plot_info_list()[1:]
expected_result.removed = []
expected_result.processed = len(env.dir_1)
expected_result.remaining = 0
await env.refresh_tester.run(expected_result)
assert len(env.refresh_tester.plot_manager.failed_to_open_filenames) == 1
assert retry_test_plot in env.refresh_tester.plot_manager.failed_to_open_filenames
# Give it a non .plot ending and make sure it gets removed from the invalid list on the next refresh
retry_test_plot_unload = Path(env.dir_1.path / ".unload").resolve()
move(retry_test_plot, retry_test_plot_unload)
expected_result.processed -= 1
expected_result.loaded = []
await env.refresh_tester.run(expected_result)
assert len(env.refresh_tester.plot_manager.failed_to_open_filenames) == 0
assert retry_test_plot not in env.refresh_tester.plot_manager.failed_to_open_filenames
# Recover the name and make sure it reappears in the invalid list
move(retry_test_plot_unload, retry_test_plot)
expected_result.processed += 1
await env.refresh_tester.run(expected_result)
assert len(env.refresh_tester.plot_manager.failed_to_open_filenames) == 1
assert retry_test_plot in env.refresh_tester.plot_manager.failed_to_open_filenames
# Make sure the file stays in `failed_to_open_filenames` and doesn't get loaded in the next refresh cycle
expected_result.loaded = []
expected_result.processed = len(env.dir_1)
await env.refresh_tester.run(expected_result)
assert len(env.refresh_tester.plot_manager.failed_to_open_filenames) == 1
assert retry_test_plot in env.refresh_tester.plot_manager.failed_to_open_filenames
# Now decrease the re-try timeout, restore the valid plot file and make sure it properly loads now
env.refresh_tester.plot_manager.refresh_parameter = replace(
env.refresh_tester.plot_manager.refresh_parameter, retry_invalid_seconds=uint32(0)
)
move(retry_test_plot_save, retry_test_plot)
expected_result.loaded = env.dir_1.plot_info_list()[0:1]
expected_result.processed = len(env.dir_1)
await env.refresh_tester.run(expected_result)
assert len(env.refresh_tester.plot_manager.failed_to_open_filenames) == 0
assert retry_test_plot not in env.refresh_tester.plot_manager.failed_to_open_filenames
@pytest.mark.asyncio
async def test_keys_missing(environment: Environment) -> None:
env: Environment = environment
not_in_keychain_plots: List[Path] = get_test_plots("not_in_keychain")
dir_not_in_keychain: Directory = Directory(env.root_path / "plots" / "not_in_keychain", not_in_keychain_plots)
expected_result = PlotRefreshResult()
# The plots in "not_in_keychain" directory have infinity g1 elements as farmer/pool key so they should be plots
# with missing keys for now
add_plot_directory(env.root_path, str(dir_not_in_keychain.path))
expected_result.loaded = []
expected_result.removed = []
expected_result.processed = len(dir_not_in_keychain)
expected_result.remaining = 0
for i in range(2):
await env.refresh_tester.run(expected_result)
assert len(env.refresh_tester.plot_manager.no_key_filenames) == len(dir_not_in_keychain)
for path in env.refresh_tester.plot_manager.no_key_filenames:
assert path in dir_not_in_keychain.plots
# Delete one of the plots and make sure it gets dropped from the no key filenames list
drop_plot = dir_not_in_keychain.path_list()[0]
dir_not_in_keychain.drop(drop_plot)
drop_plot.unlink()
assert drop_plot in env.refresh_tester.plot_manager.no_key_filenames
expected_result.processed -= 1
await env.refresh_tester.run(expected_result)
assert drop_plot not in env.refresh_tester.plot_manager.no_key_filenames
# Now add the missing keys to the plot manager's key lists and make sure the plots are getting loaded
env.refresh_tester.plot_manager.farmer_public_keys.append(G1Element())
env.refresh_tester.plot_manager.pool_public_keys.append(G1Element())
expected_result.loaded = dir_not_in_keychain.plot_info_list() # type: ignore[assignment]
expected_result.processed = len(dir_not_in_keychain)
await env.refresh_tester.run(expected_result)
# And make sure they are dropped from the list of plots with missing keys
assert len(env.refresh_tester.plot_manager.no_key_filenames) == 0
@pytest.mark.asyncio
async def test_plot_info_caching(environment, bt):
env: Environment = environment
expected_result = PlotRefreshResult()
add_plot_directory(env.root_path, str(env.dir_1.path))
expected_result.loaded = env.dir_1.plot_info_list()
expected_result.removed = []
expected_result.processed = len(env.dir_1)
expected_result.remaining = 0
await env.refresh_tester.run(expected_result)
assert env.refresh_tester.plot_manager.cache.path().exists()
unlink(env.refresh_tester.plot_manager.cache.path())
# Should not write the cache again on shutdown because it didn't change
assert not env.refresh_tester.plot_manager.cache.path().exists()
env.refresh_tester.plot_manager.stop_refreshing()
assert not env.refresh_tester.plot_manager.cache.path().exists()
# Manually trigger `save_cache` and make sure it creates a new cache file
env.refresh_tester.plot_manager.cache.save()
assert env.refresh_tester.plot_manager.cache.path().exists()
refresh_tester: PlotRefreshTester = PlotRefreshTester(env.root_path)
plot_manager = refresh_tester.plot_manager
plot_manager.set_public_keys(bt.plot_manager.farmer_public_keys, bt.plot_manager.pool_public_keys)
plot_manager.cache.load()
assert len(plot_manager.cache) == len(env.refresh_tester.plot_manager.cache)
for path, cache_entry in env.refresh_tester.plot_manager.cache.items():
cache_entry_new = plot_manager.cache.get(path)
assert bytes(cache_entry_new.prover) == bytes(cache_entry.prover)
assert cache_entry_new.farmer_public_key == cache_entry.farmer_public_key
assert cache_entry_new.pool_public_key == cache_entry.pool_public_key
assert cache_entry_new.pool_contract_puzzle_hash == cache_entry.pool_contract_puzzle_hash
assert cache_entry_new.plot_public_key == cache_entry.plot_public_key
await refresh_tester.run(expected_result)
for path, plot_info in env.refresh_tester.plot_manager.plots.items():
assert path in plot_manager.plots
assert plot_manager.plots[path].prover.get_filename() == plot_info.prover.get_filename()
assert plot_manager.plots[path].prover.get_id() == plot_info.prover.get_id()
assert plot_manager.plots[path].prover.get_memo() == plot_info.prover.get_memo()
assert plot_manager.plots[path].prover.get_size() == plot_info.prover.get_size()
assert plot_manager.plots[path].pool_public_key == plot_info.pool_public_key
assert plot_manager.plots[path].pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash
assert plot_manager.plots[path].plot_public_key == plot_info.plot_public_key
assert plot_manager.plots[path].file_size == plot_info.file_size
assert plot_manager.plots[path].time_modified == plot_info.time_modified
assert plot_manager.plot_filename_paths == env.refresh_tester.plot_manager.plot_filename_paths
assert plot_manager.failed_to_open_filenames == env.refresh_tester.plot_manager.failed_to_open_filenames
assert plot_manager.no_key_filenames == env.refresh_tester.plot_manager.no_key_filenames
plot_manager.stop_refreshing()
# Modify the content of the plot_manager.dat
with open(plot_manager.cache.path(), "r+b") as file:
file.write(b"\xff\xff") # Sets Cache.version to 65535
# Make sure it just loads the plots normally if it fails to load the cache
refresh_tester: PlotRefreshTester = PlotRefreshTester(env.root_path)
plot_manager = refresh_tester.plot_manager
plot_manager.cache.load()
assert len(plot_manager.cache) == 0
plot_manager.set_public_keys(bt.plot_manager.farmer_public_keys, bt.plot_manager.pool_public_keys)
await refresh_tester.run(expected_result)
assert len(plot_manager.plots) == len(plot_manager.plots)
plot_manager.stop_refreshing()
@pytest.mark.asyncio
async def test_drop_too_large_cache_entries(environment, bt):
env: Environment = environment
expected_result = PlotRefreshResult(loaded=env.dir_1.plot_info_list(), processed=len(env.dir_1))
add_plot_directory(env.root_path, str(env.dir_1.path))
await env.refresh_tester.run(expected_result)
assert env.refresh_tester.plot_manager.cache.path().exists()
assert len(env.dir_1) >= 6, "This test requires at least 6 cache entries"
# Load the cache entries
cache_path = env.refresh_tester.plot_manager.cache.path()
serialized = cache_path.read_bytes()
stored_cache: VersionedBlob = VersionedBlob.from_bytes(serialized)
cache_data: CacheDataV1 = CacheDataV1.from_bytes(stored_cache.blob)
def modify_cache_entry(index: int, additional_data: int, modify_memo: bool) -> str:
path, cache_entry = cache_data.entries[index]
prover_data = cache_entry.prover_data
# Size of length hints in chiapos serialization currently depends on the platform
size_length = 8 if sys.maxsize > 2**32 else 4
# Version
version_size = 2
version = prover_data[0:version_size]
# Filename
filename_offset = version_size + size_length
filename_length = int.from_bytes(prover_data[version_size:filename_offset], byteorder=sys.byteorder)
filename = prover_data[filename_offset : filename_offset + filename_length]
# Memo
memo_length_offset = filename_offset + filename_length
memo_length = int.from_bytes(
prover_data[memo_length_offset : memo_length_offset + size_length], byteorder=sys.byteorder
)
memo_offset = memo_length_offset + size_length
memo = prover_data[memo_offset : memo_offset + memo_length]
# id, k, table pointers, C2
remainder = prover_data[memo_offset + memo_length :]
# Add the additional data to the filename
filename_length += additional_data
filename += bytes(b"\a" * additional_data)
# Add the additional data to the memo if requested
if modify_memo:
memo_length += additional_data
memo += bytes(b"\b" * additional_data)
filename_length_bytes = filename_length.to_bytes(size_length, byteorder=sys.byteorder)
memo_length_bytes = memo_length.to_bytes(size_length, byteorder=sys.byteorder)
cache_data.entries[index] = (
path,
replace(
cache_entry,
prover_data=bytes(version + filename_length_bytes + filename + memo_length_bytes + memo + remainder),
),
)
return path
def assert_cache(expected: List[MockPlotInfo]) -> None:
test_cache = Cache(cache_path)
assert len(test_cache) == 0
test_cache.load()
assert len(test_cache) == len(expected)
for plot_info in expected:
assert test_cache.get(Path(plot_info.prover.get_filename())) is not None
# Modify two entries, with and without memo modification, they both should remain in the cache after load
modify_cache_entry(0, 1500, modify_memo=False)
modify_cache_entry(1, 1500, modify_memo=True)
invalid_entries = [
modify_cache_entry(2, 2000, modify_memo=False),
modify_cache_entry(3, 2000, modify_memo=True),
modify_cache_entry(4, 50000, modify_memo=False),
modify_cache_entry(5, 50000, modify_memo=True),
]
plot_infos = env.dir_1.plot_info_list()
# Make sure the cache currently contains all plots from dir1
assert_cache(plot_infos)
# Write the modified cache entries to the file
cache_path.write_bytes(bytes(VersionedBlob(uint16(CURRENT_VERSION), bytes(cache_data))))
# And now test that plots in invalid_entries are not longer loaded
assert_cache([plot_info for plot_info in plot_infos if plot_info.prover.get_filename() not in invalid_entries])
@pytest.mark.asyncio
async def test_cache_lifetime(environment: Environment) -> None:
# Load a directory to produce a cache file
env: Environment = environment
expected_result = PlotRefreshResult()
add_plot_directory(env.root_path, str(env.dir_1.path))
expected_result.loaded = env.dir_1.plot_info_list() # type: ignore[assignment]
expected_result.removed = []
expected_result.processed = len(env.dir_1)
expected_result.remaining = 0
await env.refresh_tester.run(expected_result)
expected_result.loaded = []
cache_v1: Cache = env.refresh_tester.plot_manager.cache
assert len(cache_v1) > 0
count_before = len(cache_v1)
# Remove half of the plots in dir1
for path in env.dir_1.path_list()[0 : int(len(env.dir_1) / 2)]:
expected_result.processed -= 1
expected_result.removed.append(path)
unlink(path)
# Modify the `last_use` timestamp of all cache entries to let them expire
last_use_before = time.time() - Cache.expiry_seconds - 1
for cache_entry in cache_v1.values():
cache_entry.last_use = last_use_before
assert cache_entry.expired(Cache.expiry_seconds)
# The next refresh cycle will now lead to half of the cache entries being removed because they are expired and
# the related plots do not longer exist.
await env.refresh_tester.run(expected_result)
assert len(cache_v1) == count_before - len(expected_result.removed)
# The other half of the cache entries should have a different `last_use` value now.
for cache_entry in cache_v1.values():
assert cache_entry.last_use != last_use_before
@pytest.mark.parametrize(
["event_to_raise"],
[
pytest.param(PlotRefreshEvents.started, id="started"),
pytest.param(PlotRefreshEvents.batch_processed, id="batch_processed"),
pytest.param(PlotRefreshEvents.done, id="done"),
],
)
@pytest.mark.asyncio
async def test_callback_event_raises(environment, event_to_raise: PlotRefreshEvents):
last_event_fired: Optional[PlotRefreshEvents] = None
def raising_callback(event: PlotRefreshEvents, _: PlotRefreshResult):
nonlocal last_event_fired
last_event_fired = event
if event == event_to_raise:
raise Exception(f"run_raise_in_callback {event_to_raise}")
env: Environment = environment
expected_result = PlotRefreshResult()
# Load dir_1
add_plot_directory(env.root_path, str(env.dir_1.path))
expected_result.loaded = env.dir_1.plot_info_list() # type: ignore[assignment]
expected_result.removed = []
expected_result.processed = len(env.dir_1)
expected_result.remaining = 0
await env.refresh_tester.run(expected_result)
# Load dir_2
add_plot_directory(env.root_path, str(env.dir_2.path))
expected_result.loaded = env.dir_2.plot_info_list() # type: ignore[assignment]
expected_result.removed = []
expected_result.processed = len(env.dir_1) + len(env.dir_2)
expected_result.remaining = 0
await env.refresh_tester.run(expected_result)
# Now raise the exception in the callback
default_callback = env.refresh_tester.plot_manager._refresh_callback
env.refresh_tester.plot_manager.set_refresh_callback(raising_callback)
env.refresh_tester.plot_manager.start_refreshing()
env.refresh_tester.plot_manager.trigger_refresh()
await time_out_assert(5, env.refresh_tester.plot_manager.needs_refresh, value=False)
# And make sure the follow-up evens aren't fired
assert last_event_fired == event_to_raise
# The exception should trigger `PlotManager.reset()` and clear the plots
assert len(env.refresh_tester.plot_manager.plots) == 0
assert len(env.refresh_tester.plot_manager.plot_filename_paths) == 0
assert len(env.refresh_tester.plot_manager.failed_to_open_filenames) == 0
assert len(env.refresh_tester.plot_manager.no_key_filenames) == 0
# The next run without the valid callback should lead to re-loading of all plot
env.refresh_tester.plot_manager.set_refresh_callback(default_callback)
expected_result.loaded = env.dir_1.plot_info_list() + env.dir_2.plot_info_list() # type: ignore[assignment]
expected_result.removed = []
expected_result.processed = len(env.dir_1) + len(env.dir_2)
expected_result.remaining = 0
await env.refresh_tester.run(expected_result)
@pytest.mark.asyncio
async def test_recursive_plot_scan(environment: Environment) -> None:
env: Environment = environment
# Create a directory tree with some subdirectories containing plots, others not.
root_plot_dir = env.root_path / "root"
sub_dir_0: Directory = Directory(root_plot_dir / "0", env.dir_1.plots[0:2])
sub_dir_0_1: Directory = Directory(sub_dir_0.path / "1", env.dir_1.plots[2:3])
sub_dir_1: Directory = Directory(root_plot_dir / "1", [])
sub_dir_1_0: Directory = Directory(sub_dir_1.path / "0", [])
sub_dir_1_0_1: Directory = Directory(sub_dir_1_0.path / "1", env.dir_1.plots[3:7])
# List of all the plots in the directory tree
expected_plot_list = sub_dir_0.plot_info_list() + sub_dir_0_1.plot_info_list() + sub_dir_1_0_1.plot_info_list()
# Adding the root without `recursive_plot_scan` and running a test should not load any plots (match an empty result)
expected_result = PlotRefreshResult()
add_plot_directory(env.root_path, str(root_plot_dir))
await env.refresh_tester.run(expected_result)
# Set the recursive scan flag in the config
with lock_and_load_config(env.root_path, "config.yaml") as config:
config["harvester"]["recursive_plot_scan"] = True
save_config(env.root_path, "config.yaml", config)
# With the flag enabled it should load all expected plots
expected_result.loaded = expected_plot_list # type: ignore[assignment]
expected_result.processed = len(expected_plot_list)
await env.refresh_tester.run(expected_result)
# Adding the subdirectories also should not lead to some failure or duplicated loading
add_plot_directory(env.root_path, str(sub_dir_0_1.path))
add_plot_directory(env.root_path, str(sub_dir_1_0_1.path))
expected_result.loaded = []
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/pools/test_pool_rpc.py | tests/pools/test_pool_rpc.py | import asyncio
import logging
import tempfile
from dataclasses import dataclass
from pathlib import Path
from shutil import rmtree
from typing import Any, Optional, List, Dict, Tuple, AsyncGenerator
import pytest
import pytest_asyncio
from blspy import G1Element
from flax.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from flax.full_node.full_node_api import FullNodeAPI
from flax.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH
from flax.pools.pool_wallet_info import PoolSingletonState, PoolWalletInfo
from flax.protocols import full_node_protocol
from flax.protocols.full_node_protocol import RespondBlock
from flax.rpc.wallet_rpc_client import WalletRpcClient
from flax.simulator.simulator_protocol import FarmNewBlockProtocol, ReorgProtocol
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.peer_info import PeerInfo
from flax.util.bech32m import encode_puzzle_hash
from flax.util.byte_types import hexstr_to_bytes
from flax.util.config import load_config
from flax.util.ints import uint16, uint32
from flax.wallet.derive_keys import find_authentication_sk, find_owner_sk
from flax.wallet.transaction_record import TransactionRecord
from flax.wallet.util.wallet_types import WalletType
from flax.simulator.block_tools import BlockTools, get_plot_dir
from tests.util.wallet_is_synced import wallet_is_synced
from tests.setup_nodes import setup_simulators_and_wallets
from flax.simulator.time_out_assert import time_out_assert, time_out_assert_not_none
# TODO: Compare deducted fees in all tests against reported total_fee
log = logging.getLogger(__name__)
FEE_AMOUNT = 2000000000000
MAX_WAIT_SECS = 30 # A high value for WAIT_SECS is useful when paused in the debugger
def get_pool_plot_dir():
return get_plot_dir() / Path("pool_tests")
async def get_total_block_rewards(num_blocks):
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
return funds
async def farm_blocks(full_node_api, ph: bytes32, num_blocks: int):
for i in range(num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
return num_blocks
# TODO also return calculated block rewards
@dataclass
class TemporaryPoolPlot:
bt: BlockTools
p2_singleton_puzzle_hash: bytes32
plot_id: Optional[bytes32] = None
async def __aenter__(self):
self._tmpdir = tempfile.TemporaryDirectory()
tmp_path: Path = Path(self._tmpdir.name)
self.bt.add_plot_directory(tmp_path)
plot_id: bytes32 = await self.bt.new_plot(self.p2_singleton_puzzle_hash, tmp_path, tmp_dir=tmp_path)
assert plot_id is not None
await self.bt.refresh_plots()
self.plot_id = plot_id
return self
async def __aexit__(self, exc_type, exc_value, exc_traceback):
await self.bt.delete_plot(self.plot_id)
self._tmpdir.cleanup()
PREFARMED_BLOCKS = 4
@pytest_asyncio.fixture(scope="function")
async def one_wallet_node_and_rpc(
self_hostname,
) -> AsyncGenerator[Tuple[WalletRpcClient, Any, FullNodeAPI, BlockTools], None]:
rmtree(get_pool_plot_dir(), ignore_errors=True)
async for nodes in setup_simulators_and_wallets(1, 1, {}, yield_services=True):
full_nodes, wallets, bt = nodes
full_node_api = full_nodes[0]._api
wallet_service = wallets[0]
wallet_node_0 = wallet_service._node
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
our_ph = await wallet_0.get_new_puzzlehash()
await farm_blocks(full_node_api, our_ph, PREFARMED_BLOCKS)
client = await WalletRpcClient.create(
self_hostname, wallet_service.rpc_server.listen_port, wallet_service.root_path, wallet_service.config
)
yield client, wallet_node_0, full_node_api, bt
client.close()
await client.await_closed()
@pytest_asyncio.fixture(scope="function")
async def setup(two_wallet_nodes_services, self_hostname):
rmtree(get_pool_plot_dir(), ignore_errors=True)
full_nodes, wallets, bt = two_wallet_nodes_services
full_node_apis = [full_node_service._api for full_node_service in full_nodes]
wallet_service_0 = wallets[0]
wallet_service_1 = wallets[1]
wallet_node_0 = wallet_service_0._node
wallet_node_1 = wallet_service_1._node
our_ph_record = await wallet_node_0.wallet_state_manager.get_unused_derivation_record(1, hardened=True)
pool_ph_record = await wallet_node_1.wallet_state_manager.get_unused_derivation_record(1, hardened=True)
our_ph = our_ph_record.puzzle_hash
pool_ph = pool_ph_record.puzzle_hash
client = await WalletRpcClient.create(
self_hostname, wallet_service_0.rpc_server.listen_port, wallet_service_0.root_path, wallet_service_0.config
)
return (
full_node_apis,
[wallet_node_0, wallet_node_1],
[our_ph, pool_ph],
client, # wallet rpc client
)
class TestPoolWalletRpc:
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted_and_fee", [(True, FEE_AMOUNT), (False, 0)])
async def test_create_new_pool_wallet_self_farm(self, one_wallet_node_and_rpc, trusted_and_fee, self_hostname):
trusted, fee = trusted_and_fee
client, wallet_node_0, full_node_api, _ = one_wallet_node_and_rpc
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
total_block_rewards = await get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(20, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(
20, wallet_node_0.wallet_state_manager.blockchain.get_finished_sync_up_to, PREFARMED_BLOCKS
)
our_ph = await wallet_0.get_new_puzzlehash()
assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await farm_blocks(full_node_api, our_ph, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
await time_out_assert(30, wallet_is_synced, True, wallet_node_0, full_node_api)
summaries_response = await client.get_wallets(WalletType.POOLING_WALLET)
assert len(summaries_response) == 1
wallet_id: int = summaries_response[0]["id"]
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert status.current.state == PoolSingletonState.SELF_POOLING.value
assert status.target is None
assert status.current.owner_pubkey == G1Element.from_bytes(
bytes.fromhex(
"b286bbf7a10fa058d2a2a758921377ef00bb7f8143e1bd40dd195ae918dbef42cfc481140f01b9eae13b430a0c8fe304"
)
)
assert status.current.pool_url == ""
assert status.current.relative_lock_height == 0
assert status.current.version == 1
# Check that config has been written properly
full_config: Dict = load_config(wallet_0.wallet_state_manager.root_path, "config.yaml")
pool_list: List[Dict] = full_config["pool"]["pool_list"]
assert len(pool_list) == 1
pool_config = pool_list[0]
assert (
pool_config["owner_public_key"]
== "0xb286bbf7a10fa058d2a2a758921377ef00bb7f8143e1bd40dd195ae918dbef42cfc481140f01b9eae13b430a0c8fe304"
)
# It can be one of multiple launcher IDs, due to selecting a different coin
launcher_id = None
for addition in creation_tx.additions:
if addition.puzzle_hash == SINGLETON_LAUNCHER_HASH:
launcher_id = addition.name()
break
assert hexstr_to_bytes(pool_config["launcher_id"]) == launcher_id
assert pool_config["pool_url"] == ""
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted_and_fee", [(True, FEE_AMOUNT), (False, 0)])
async def test_create_new_pool_wallet_farm_to_pool(self, one_wallet_node_and_rpc, trusted_and_fee, self_hostname):
trusted, fee = trusted_and_fee
client, wallet_node_0, full_node_api, _ = one_wallet_node_and_rpc
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
total_block_rewards = await get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(
20, wallet_node_0.wallet_state_manager.blockchain.get_finished_sync_up_to, PREFARMED_BLOCKS
)
await time_out_assert(20, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
our_ph = await wallet_0.get_new_puzzlehash()
assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "http://pool.example.com", 10, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await farm_blocks(full_node_api, our_ph, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
summaries_response = await client.get_wallets(WalletType.POOLING_WALLET)
assert len(summaries_response) == 1
wallet_id: int = summaries_response[0]["id"]
status: PoolWalletInfo = (await client.pw_status(wallet_id))[0]
assert status.current.state == PoolSingletonState.FARMING_TO_POOL.value
assert status.target is None
assert status.current.owner_pubkey == G1Element.from_bytes(
bytes.fromhex(
"b286bbf7a10fa058d2a2a758921377ef00bb7f8143e1bd40dd195ae918dbef42cfc481140f01b9eae13b430a0c8fe304"
)
)
assert status.current.pool_url == "http://pool.example.com"
assert status.current.relative_lock_height == 10
assert status.current.version == 1
# Check that config has been written properly
full_config: Dict = load_config(wallet_0.wallet_state_manager.root_path, "config.yaml")
pool_list: List[Dict] = full_config["pool"]["pool_list"]
assert len(pool_list) == 1
pool_config = pool_list[0]
assert (
pool_config["owner_public_key"]
== "0xb286bbf7a10fa058d2a2a758921377ef00bb7f8143e1bd40dd195ae918dbef42cfc481140f01b9eae13b430a0c8fe304"
)
# It can be one of multiple launcher IDs, due to selecting a different coin
launcher_id = None
for addition in creation_tx.additions:
if addition.puzzle_hash == SINGLETON_LAUNCHER_HASH:
launcher_id = addition.name()
break
assert hexstr_to_bytes(pool_config["launcher_id"]) == launcher_id
assert pool_config["pool_url"] == "http://pool.example.com"
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted_and_fee", [(True, FEE_AMOUNT), (False, 0)])
async def test_create_multiple_pool_wallets(self, one_wallet_node_and_rpc, trusted_and_fee, self_hostname):
trusted, fee = trusted_and_fee
client, wallet_node_0, full_node_api, _ = one_wallet_node_and_rpc
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
total_block_rewards = await get_total_block_rewards(PREFARMED_BLOCKS)
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
await time_out_assert(20, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(
20, wallet_node_0.wallet_state_manager.blockchain.get_finished_sync_up_to, PREFARMED_BLOCKS
)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
our_ph_1 = await wallet_0.get_new_puzzlehash()
our_ph_2 = await wallet_0.get_new_puzzlehash()
assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph_1, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
creation_tx_2: TransactionRecord = await client.create_new_pool_wallet(
our_ph_1, self_hostname, 12, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx_2.spend_bundle,
creation_tx_2.name,
)
await farm_blocks(full_node_api, our_ph_2, 6)
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx.name) is None
assert full_node_api.full_node.mempool_manager.get_spendbundle(creation_tx_2.name) is None
async def pw_created(check_wallet_id: int) -> bool:
try:
await client.pw_status(check_wallet_id)
return True
except ValueError:
return False
await time_out_assert(10, pw_created, True, 2)
await time_out_assert(10, pw_created, True, 3)
status_2: PoolWalletInfo = (await client.pw_status(2))[0]
status_3: PoolWalletInfo = (await client.pw_status(3))[0]
if status_2.current.state == PoolSingletonState.SELF_POOLING.value:
assert status_3.current.state == PoolSingletonState.FARMING_TO_POOL.value
else:
assert status_2.current.state == PoolSingletonState.FARMING_TO_POOL.value
assert status_3.current.state == PoolSingletonState.SELF_POOLING.value
full_config: Dict = load_config(wallet_0.wallet_state_manager.root_path, "config.yaml")
pool_list: List[Dict] = full_config["pool"]["pool_list"]
assert len(pool_list) == 2
assert len(await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0
assert len(await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(3)) == 0
# Doing a reorg reverts and removes the pool wallets
await full_node_api.reorg_from_index_to_new_index(ReorgProtocol(uint32(0), uint32(20), our_ph_2, None))
await time_out_assert(30, wallet_is_synced, True, wallet_node_0, full_node_api)
summaries_response = await client.get_wallets()
assert len(summaries_response) == 1
with pytest.raises(ValueError):
await client.pw_status(2)
with pytest.raises(ValueError):
await client.pw_status(3)
# Create some CAT wallets to increase wallet IDs
def mempool_not_empty() -> bool:
return len(full_node_api.full_node.mempool_manager.mempool.spends.keys()) > 0
def mempool_empty() -> bool:
return len(full_node_api.full_node.mempool_manager.mempool.spends.keys()) == 0
await client.delete_unconfirmed_transactions("1")
await farm_blocks(full_node_api, our_ph_2, 1)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
for i in range(5):
await time_out_assert(10, mempool_empty)
res = await client.create_new_cat_and_wallet(20)
summaries_response = await client.get_wallets()
assert res["success"]
cat_0_id = res["wallet_id"]
asset_id = bytes.fromhex(res["asset_id"])
assert len(asset_id) > 0
await time_out_assert(10, mempool_not_empty)
await farm_blocks(full_node_api, our_ph_2, 1)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
bal_0 = await client.get_wallet_balance(cat_0_id)
assert bal_0["confirmed_wallet_balance"] == 20
# Test creation of many pool wallets. Use untrusted since that is the more complicated protocol, but don't
# run this code more than once, since it's slow.
if not trusted:
for i in range(22):
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
creation_tx_3: TransactionRecord = await client.create_new_pool_wallet(
our_ph_1, self_hostname, 5, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx_3.spend_bundle,
creation_tx_3.name,
)
await farm_blocks(full_node_api, our_ph_2, 2)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
full_config: Dict = load_config(wallet_0.wallet_state_manager.root_path, "config.yaml")
pool_list: List[Dict] = full_config["pool"]["pool_list"]
assert len(pool_list) == i + 3
if i == 0:
# Ensures that the CAT creation does not cause pool wallet IDs to increment
for wallet in wallet_node_0.wallet_state_manager.wallets.values():
if wallet.type() == WalletType.POOLING_WALLET:
status: PoolWalletInfo = (await client.pw_status(wallet.id()))[0]
assert (await wallet.get_pool_wallet_index()) < 5
auth_sk = find_authentication_sk(
[wallet_0.wallet_state_manager.private_key], status.current.owner_pubkey
)
assert auth_sk is not None
owner_sk = find_owner_sk(
[wallet_0.wallet_state_manager.private_key], status.current.owner_pubkey
)
assert owner_sk is not None
assert owner_sk != auth_sk
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted_and_fee", [(True, FEE_AMOUNT), (False, 0)])
async def test_absorb_self(self, one_wallet_node_and_rpc, trusted_and_fee, self_hostname):
trusted, fee = trusted_and_fee
client, wallet_node_0, full_node_api, _ = one_wallet_node_and_rpc
bt = full_node_api.bt
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
total_block_rewards = await get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(20, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(
20, wallet_node_0.wallet_state_manager.blockchain.get_finished_sync_up_to, PREFARMED_BLOCKS
)
our_ph = await wallet_0.get_new_puzzlehash()
assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await farm_blocks(full_node_api, our_ph, 1)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
status: PoolWalletInfo = (await client.pw_status(2))[0]
assert status.current.state == PoolSingletonState.SELF_POOLING.value
async with TemporaryPoolPlot(bt, status.p2_singleton_puzzle_hash) as pool_plot:
all_blocks = await full_node_api.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
3,
block_list_input=all_blocks,
force_plot_id=pool_plot.plot_id,
farmer_reward_puzzle_hash=our_ph,
guarantee_transaction_block=True,
)
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-3]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-2]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-1]))
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 2 * 1750000000000
# Claim 2 * 1.75, and farm a new 1.75
absorb_tx: TransactionRecord = (await client.pw_absorb_rewards(2, fee))["transaction"]
await time_out_assert(
5,
full_node_api.full_node.mempool_manager.get_spendbundle,
absorb_tx.spend_bundle,
absorb_tx.name,
)
await farm_blocks(full_node_api, our_ph, 1)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
new_status: PoolWalletInfo = (await client.pw_status(2))[0]
assert status.current == new_status.current
assert status.tip_singleton_coin_id != new_status.tip_singleton_coin_id
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 1 * 1750000000000
# Claim another 1.75
absorb_tx1: TransactionRecord = (await client.pw_absorb_rewards(2, fee))["transaction"]
await time_out_assert(
MAX_WAIT_SECS,
full_node_api.full_node.mempool_manager.get_spendbundle,
absorb_tx1.spend_bundle,
absorb_tx1.name,
)
await farm_blocks(full_node_api, our_ph, 2)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 0
assert len(await wallet_node_0.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(2)) == 0
tr: TransactionRecord = await client.send_transaction(
1, 100, encode_puzzle_hash(status.p2_singleton_puzzle_hash, "txfx")
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
tr.spend_bundle,
tr.name,
)
await farm_blocks(full_node_api, our_ph, 2)
# Balance ignores non coinbase TX
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 0
with pytest.raises(ValueError):
await client.pw_absorb_rewards(2, fee)
tx1 = await client.get_transactions(1)
assert (250000000000 + fee) in [tx.amount for tx in tx1]
# await time_out_assert(20, wallet_0.get_confirmed_balance, total_block_rewards)
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted_and_fee", [(True, FEE_AMOUNT * 2)])
async def test_absorb_self_multiple_coins(self, one_wallet_node_and_rpc, trusted_and_fee, self_hostname):
trusted, fee = trusted_and_fee
client, wallet_node_0, full_node_api, _ = one_wallet_node_and_rpc
bt = full_node_api.bt
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
total_block_rewards = await get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(20, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(
20, wallet_node_0.wallet_state_manager.blockchain.get_finished_sync_up_to, PREFARMED_BLOCKS
)
our_ph = await wallet_0.get_new_puzzlehash()
assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "", 0, f"{self_hostname}:5000", "new", "SELF_POOLING", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await farm_blocks(full_node_api, our_ph, 1)
async def pool_wallet_created():
try:
status: PoolWalletInfo = (await client.pw_status(2))[0]
return status.current.state == PoolSingletonState.SELF_POOLING.value
except ValueError:
return False
await time_out_assert(20, pool_wallet_created)
status: PoolWalletInfo = (await client.pw_status(2))[0]
async with TemporaryPoolPlot(bt, status.p2_singleton_puzzle_hash) as pool_plot:
all_blocks = await full_node_api.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
3,
block_list_input=all_blocks,
force_plot_id=pool_plot.plot_id,
farmer_reward_puzzle_hash=our_ph,
guarantee_transaction_block=True,
)
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-3]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-2]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-1]))
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
bal = await client.get_wallet_balance(2)
assert bal["confirmed_wallet_balance"] == 2 * 1750000000000
await farm_blocks(full_node_api, our_ph, 6)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
# Claim
absorb_tx: TransactionRecord = (await client.pw_absorb_rewards(2, fee, 1))["transaction"]
await time_out_assert(
5,
full_node_api.full_node.mempool_manager.get_spendbundle,
absorb_tx.spend_bundle,
absorb_tx.name,
)
await farm_blocks(full_node_api, our_ph, 2)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
new_status: PoolWalletInfo = (await client.pw_status(2))[0]
assert status.current == new_status.current
assert status.tip_singleton_coin_id != new_status.tip_singleton_coin_id
main_bal = await client.get_wallet_balance(1)
pool_bal = await client.get_wallet_balance(2)
assert pool_bal["confirmed_wallet_balance"] == 2 * 1750000000000
assert main_bal["confirmed_wallet_balance"] == 26499999999999
@pytest.mark.asyncio
@pytest.mark.parametrize("trusted_and_fee", [(True, FEE_AMOUNT), (False, 0)])
async def test_absorb_pooling(self, one_wallet_node_and_rpc, trusted_and_fee, self_hostname):
trusted, fee = trusted_and_fee
client, wallet_node_0, full_node_api, _ = one_wallet_node_and_rpc
bt = full_node_api.bt
if trusted:
wallet_node_0.config["trusted_peers"] = {
full_node_api.full_node.server.node_id.hex(): full_node_api.full_node.server.node_id.hex()
}
else:
wallet_node_0.config["trusted_peers"] = {}
await wallet_node_0.server.start_client(
PeerInfo(self_hostname, uint16(full_node_api.full_node.server._port)), None
)
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
total_block_rewards = await get_total_block_rewards(PREFARMED_BLOCKS)
await time_out_assert(20, wallet_0.get_confirmed_balance, total_block_rewards)
await time_out_assert(
20, wallet_node_0.wallet_state_manager.blockchain.get_finished_sync_up_to, PREFARMED_BLOCKS
)
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
our_ph = await wallet_0.get_new_puzzlehash()
assert len(await client.get_wallets(WalletType.POOLING_WALLET)) == 0
# Balance stars at 6 XFX
assert (await wallet_0.get_confirmed_balance()) == 6000000000000
creation_tx: TransactionRecord = await client.create_new_pool_wallet(
our_ph, "http://123.45.67.89", 10, f"{self_hostname}:5000", "new", "FARMING_TO_POOL", fee
)
await time_out_assert(
10,
full_node_api.full_node.mempool_manager.get_spendbundle,
creation_tx.spend_bundle,
creation_tx.name,
)
await farm_blocks(full_node_api, our_ph, 1)
async def farming_to_pool() -> bool:
try:
status: PoolWalletInfo = (await client.pw_status(2))[0]
return status.current.state == PoolSingletonState.FARMING_TO_POOL.value
except ValueError:
return False
await time_out_assert(20, farming_to_pool)
status: PoolWalletInfo = (await client.pw_status(2))[0]
async with TemporaryPoolPlot(bt, status.p2_singleton_puzzle_hash) as pool_plot:
all_blocks = await full_node_api.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
3,
block_list_input=all_blocks,
force_plot_id=pool_plot.plot_id,
farmer_reward_puzzle_hash=our_ph,
guarantee_transaction_block=True,
)
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-3]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-2]))
await full_node_api.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-1]))
# Pooled plots don't have balance
await time_out_assert(20, wallet_is_synced, True, wallet_node_0, full_node_api)
bal = await client.get_wallet_balance(2)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/pools/test_pool_puzzles_lifecycle.py | tests/pools/test_pool_puzzles_lifecycle.py | import copy
from typing import List
from unittest import TestCase
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.coin import Coin
from flax.types.coin_spend import CoinSpend
from flax.types.spend_bundle import SpendBundle
from flax.util.ints import uint64, uint32
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
puzzle_for_pk,
solution_for_conditions,
calculate_synthetic_secret_key,
DEFAULT_HIDDEN_PUZZLE_HASH,
)
from flax.wallet.puzzles.p2_conditions import puzzle_for_conditions
from flax.wallet.puzzles import singleton_top_layer
from flax.pools.pool_wallet_info import PoolState
from flax.pools.pool_puzzles import (
create_waiting_room_inner_puzzle,
create_pooling_inner_puzzle,
create_p2_singleton_puzzle,
create_absorb_spend,
create_travel_spend,
get_most_recent_singleton_coin_from_coin_spend,
get_delayed_puz_info_from_launcher_spend,
SINGLETON_MOD_HASH,
launcher_id_to_p2_puzzle_hash,
is_pool_singleton_inner_puzzle,
get_pubkey_from_member_inner_puzzle,
solution_to_pool_state,
uncurry_pool_waitingroom_inner_puzzle,
get_seconds_and_delayed_puzhash_from_p2_singleton_puzzle,
)
from tests.util.key_tool import KeyTool
from tests.clvm.test_puzzles import (
public_key_for_index,
secret_exponent_for_index,
)
from tests.clvm.coin_store import CoinStore, CoinTimestamp, BadSpendBundleError
"""
This test suite aims to test:
- flax.pools.pool_puzzles.py
- flax.wallet.puzzles.pool_member_innerpuz.clvm
- flax.wallet.puzzles.pool_waiting_room_innerpuz.clvm
"""
# Helper function
def sign_delegated_puz(del_puz: Program, coin: Coin) -> G2Element:
synthetic_secret_key: PrivateKey = calculate_synthetic_secret_key(
PrivateKey.from_bytes(
secret_exponent_for_index(1).to_bytes(32, "big"),
),
DEFAULT_HIDDEN_PUZZLE_HASH,
)
return AugSchemeMPL.sign(
synthetic_secret_key,
(del_puz.get_tree_hash() + coin.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
class TestPoolPuzzles(TestCase):
def test_pool_lifecycle(self):
# START TESTS
# Generate starting info
key_lookup = KeyTool()
sk: PrivateKey = PrivateKey.from_bytes(
secret_exponent_for_index(1).to_bytes(32, "big"),
)
pk: G1Element = G1Element.from_bytes(public_key_for_index(1, key_lookup))
starting_puzzle: Program = puzzle_for_pk(pk)
starting_ph: bytes32 = starting_puzzle.get_tree_hash()
# Get our starting standard coin created
START_AMOUNT: uint64 = 1023
coin_db = CoinStore()
time = CoinTimestamp(10000000, 1)
coin_db.farm_coin(starting_ph, time, START_AMOUNT)
starting_coin: Coin = next(coin_db.all_unspent_coins())
# LAUNCHING
# Create the escaping inner puzzle
GENESIS_CHALLENGE = bytes32.fromhex("ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb")
launcher_coin = singleton_top_layer.generate_launcher_coin(
starting_coin,
START_AMOUNT,
)
DELAY_TIME = uint64(60800)
DELAY_PH = starting_ph
launcher_id = launcher_coin.name()
relative_lock_height: uint32 = uint32(5000)
# use a dummy pool state
pool_state = PoolState(
owner_pubkey=pk,
pool_url="",
relative_lock_height=relative_lock_height,
state=3, # farming to pool
target_puzzle_hash=starting_ph,
version=1,
)
# create a new dummy pool state for travelling
target_pool_state = PoolState(
owner_pubkey=pk,
pool_url="",
relative_lock_height=relative_lock_height,
state=2, # Leaving pool
target_puzzle_hash=starting_ph,
version=1,
)
# Standard format comment
comment = Program.to([("p", bytes(pool_state)), ("t", DELAY_TIME), ("h", DELAY_PH)])
pool_wr_innerpuz: bytes32 = create_waiting_room_inner_puzzle(
starting_ph,
relative_lock_height,
pk,
launcher_id,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
pool_wr_inner_hash = pool_wr_innerpuz.get_tree_hash()
pooling_innerpuz: Program = create_pooling_inner_puzzle(
starting_ph,
pool_wr_inner_hash,
pk,
launcher_id,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Driver tests
assert is_pool_singleton_inner_puzzle(pooling_innerpuz)
assert is_pool_singleton_inner_puzzle(pool_wr_innerpuz)
assert get_pubkey_from_member_inner_puzzle(pooling_innerpuz) == pk
# Generating launcher information
conditions, launcher_coinsol = singleton_top_layer.launch_conditions_and_coinsol(
starting_coin, pooling_innerpuz, comment, START_AMOUNT
)
# Creating solution for standard transaction
delegated_puzzle: Program = puzzle_for_conditions(conditions)
full_solution: Program = solution_for_conditions(conditions)
starting_coinsol = CoinSpend(
starting_coin,
starting_puzzle,
full_solution,
)
# Create the spend bundle
sig: G2Element = sign_delegated_puz(delegated_puzzle, starting_coin)
spend_bundle = SpendBundle(
[starting_coinsol, launcher_coinsol],
sig,
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
spend_bundle,
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# Test that we can retrieve the extra data
assert get_delayed_puz_info_from_launcher_spend(launcher_coinsol) == (DELAY_TIME, DELAY_PH)
assert solution_to_pool_state(launcher_coinsol) == pool_state
# TEST TRAVEL AFTER LAUNCH
# fork the state
fork_coin_db: CoinStore = copy.deepcopy(coin_db)
post_launch_coinsol, _ = create_travel_spend(
launcher_coinsol,
launcher_coin,
pool_state,
target_pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Spend it!
fork_coin_db.update_coin_store_for_spend_bundle(
SpendBundle([post_launch_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# HONEST ABSORB
time = CoinTimestamp(10000030, 2)
# create the farming reward
p2_singleton_puz: Program = create_p2_singleton_puzzle(
SINGLETON_MOD_HASH,
launcher_id,
DELAY_TIME,
DELAY_PH,
)
p2_singleton_ph: bytes32 = p2_singleton_puz.get_tree_hash()
assert uncurry_pool_waitingroom_inner_puzzle(pool_wr_innerpuz) == (
starting_ph,
relative_lock_height,
pk,
p2_singleton_ph,
)
assert launcher_id_to_p2_puzzle_hash(launcher_id, DELAY_TIME, DELAY_PH) == p2_singleton_ph
assert get_seconds_and_delayed_puzhash_from_p2_singleton_puzzle(p2_singleton_puz) == (DELAY_TIME, DELAY_PH)
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
coin_sols: List[CoinSpend] = create_absorb_spend(
launcher_coinsol,
pool_state,
launcher_coin,
2,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# ABSORB A NON EXISTENT REWARD (Negative test)
last_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
coin_sols: List[CoinSpend] = create_absorb_spend(
last_coinsol,
pool_state,
launcher_coin,
2,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# filter for only the singleton solution
singleton_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([singleton_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_ANNOUNCE_CONSUMED_FAILED"
# SPEND A NON-REWARD P2_SINGLETON (Negative test)
# create the dummy coin
non_reward_p2_singleton = Coin(
bytes32(32 * b"3"),
p2_singleton_ph,
uint64(1337),
)
coin_db._add_coin_entry(non_reward_p2_singleton, time)
# construct coin solution for the p2_singleton coin
bad_coinsol = CoinSpend(
non_reward_p2_singleton,
p2_singleton_puz,
Program.to(
[
pooling_innerpuz.get_tree_hash(),
non_reward_p2_singleton.name(),
]
),
)
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([singleton_coinsol, bad_coinsol], G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_ANNOUNCE_CONSUMED_FAILED"
# ENTER WAITING ROOM
# find the singleton
singleton = get_most_recent_singleton_coin_from_coin_spend(last_coinsol)
# get the relevant coin solution
travel_coinsol, _ = create_travel_spend(
last_coinsol,
launcher_coin,
pool_state,
target_pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Test that we can retrieve the extra data
assert solution_to_pool_state(travel_coinsol) == target_pool_state
# sign the serialized state
data = Program.to(bytes(target_pool_state)).get_tree_hash()
sig: G2Element = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([travel_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# ESCAPE TOO FAST (Negative test)
# find the singleton
singleton = get_most_recent_singleton_coin_from_coin_spend(travel_coinsol)
# get the relevant coin solution
return_coinsol, _ = create_travel_spend(
travel_coinsol,
launcher_coin,
target_pool_state,
pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# sign the serialized target state
sig = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it and hope it fails!
try:
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([return_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
except BadSpendBundleError as e:
assert str(e) == "condition validation failure Err.ASSERT_HEIGHT_RELATIVE_FAILED"
# ABSORB WHILE IN WAITING ROOM
time = CoinTimestamp(10000060, 3)
# create the farming reward
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
# generate relevant coin solutions
coin_sols: List[CoinSpend] = create_absorb_spend(
travel_coinsol,
target_pool_state,
launcher_coin,
3,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# LEAVE THE WAITING ROOM
time = CoinTimestamp(20000000, 10000)
# find the singleton
singleton_coinsol: CoinSpend = list(
filter(
lambda e: e.coin.amount == START_AMOUNT,
coin_sols,
)
)[0]
singleton: Coin = get_most_recent_singleton_coin_from_coin_spend(singleton_coinsol)
# get the relevant coin solution
return_coinsol, _ = create_travel_spend(
singleton_coinsol,
launcher_coin,
target_pool_state,
pool_state,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH,
)
# Test that we can retrieve the extra data
assert solution_to_pool_state(return_coinsol) == pool_state
# sign the serialized target state
data = Program.to([pooling_innerpuz.get_tree_hash(), START_AMOUNT, bytes(pool_state)]).get_tree_hash()
sig: G2Element = AugSchemeMPL.sign(
sk,
(data + singleton.name() + DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA),
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle([return_coinsol], sig),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
# ABSORB ONCE MORE FOR GOOD MEASURE
time = CoinTimestamp(20000000, 10005)
# create the farming reward
coin_db.farm_coin(p2_singleton_ph, time, 1750000000000)
coin_sols: List[CoinSpend] = create_absorb_spend(
return_coinsol,
pool_state,
launcher_coin,
10005,
GENESIS_CHALLENGE,
DELAY_TIME,
DELAY_PH, # height
)
# Spend it!
coin_db.update_coin_store_for_spend_bundle(
SpendBundle(coin_sols, G2Element()),
time,
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM,
DEFAULT_CONSTANTS.COST_PER_BYTE,
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/pools/test_pool_wallet.py | tests/pools/test_pool_wallet.py | from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Any, List, Optional, cast
from unittest.mock import MagicMock
import pytest
from blspy import G1Element
from benchmarks.utils import rand_g1, rand_hash
from flax.pools.pool_wallet import PoolWallet
from flax.types.blockchain_format.sized_bytes import bytes32
@dataclass
class MockStandardWallet:
canned_puzzlehash: bytes32
async def get_new_puzzlehash(self) -> bytes32:
return self.canned_puzzlehash
@dataclass
class MockWalletStateManager:
root_path: Optional[Path] = None
@dataclass
class MockPoolWalletConfig:
launcher_id: bytes32
pool_url: str
payout_instructions: str
target_puzzle_hash: bytes32
p2_singleton_puzzle_hash: bytes32
owner_public_key: G1Element
@dataclass
class MockPoolState:
pool_url: Optional[str]
target_puzzle_hash: bytes32
owner_pubkey: G1Element
@dataclass
class MockPoolWalletInfo:
launcher_id: bytes32
p2_singleton_puzzle_hash: bytes32
current: MockPoolState
@pytest.mark.asyncio
async def test_update_pool_config_new_config(monkeypatch: Any) -> None:
"""
Test that PoolWallet can create a new pool config
"""
updated_configs: List[MockPoolWalletConfig] = []
payout_instructions_ph = rand_hash()
launcher_id: bytes32 = rand_hash()
p2_singleton_puzzle_hash: bytes32 = rand_hash()
pool_url: str = ""
target_puzzle_hash: bytes32 = rand_hash()
owner_pubkey: G1Element = rand_g1()
current: MockPoolState = MockPoolState(
pool_url=pool_url,
target_puzzle_hash=target_puzzle_hash,
owner_pubkey=owner_pubkey,
)
current_state: MockPoolWalletInfo = MockPoolWalletInfo(
launcher_id=launcher_id,
p2_singleton_puzzle_hash=p2_singleton_puzzle_hash,
current=current,
)
# No config data
def mock_load_pool_config(root_path: Path) -> List[MockPoolWalletConfig]:
return []
monkeypatch.setattr("flax.pools.pool_wallet.load_pool_config", mock_load_pool_config)
# Mock pool_config.update_pool_config to capture the updated configs
async def mock_pool_config_update_pool_config(
root_path: Path, pool_config_list: List[MockPoolWalletConfig]
) -> None:
nonlocal updated_configs
updated_configs = pool_config_list
monkeypatch.setattr("flax.pools.pool_wallet.update_pool_config", mock_pool_config_update_pool_config)
# Mock PoolWallet.get_current_state to return our canned state
async def mock_get_current_state(self: Any) -> Any:
return current_state
monkeypatch.setattr(PoolWallet, "get_current_state", mock_get_current_state)
# Create an empty PoolWallet and populate only the required fields
wallet = PoolWallet(
wallet_state_manager=MockWalletStateManager(),
standard_wallet=cast(Any, MockStandardWallet(canned_puzzlehash=payout_instructions_ph)),
log=MagicMock(),
wallet_info=MagicMock(),
wallet_id=MagicMock(),
)
await wallet.update_pool_config()
assert len(updated_configs) == 1
assert updated_configs[0].launcher_id == launcher_id
assert updated_configs[0].pool_url == pool_url
assert updated_configs[0].payout_instructions == payout_instructions_ph.hex()
assert updated_configs[0].target_puzzle_hash == target_puzzle_hash
assert updated_configs[0].p2_singleton_puzzle_hash == p2_singleton_puzzle_hash
assert updated_configs[0].owner_public_key == owner_pubkey
@pytest.mark.asyncio
async def test_update_pool_config_existing_payout_instructions(monkeypatch: Any) -> None:
"""
Test that PoolWallet will retain existing payout_instructions when updating the pool config.
"""
updated_configs: List[MockPoolWalletConfig] = []
payout_instructions_ph = rand_hash()
launcher_id: bytes32 = rand_hash()
p2_singleton_puzzle_hash: bytes32 = rand_hash()
pool_url: str = "https://fake.pool.url"
target_puzzle_hash: bytes32 = rand_hash()
owner_pubkey: G1Element = rand_g1()
current: MockPoolState = MockPoolState(
pool_url=pool_url,
target_puzzle_hash=target_puzzle_hash,
owner_pubkey=owner_pubkey,
)
current_state: MockPoolWalletInfo = MockPoolWalletInfo(
launcher_id=launcher_id,
p2_singleton_puzzle_hash=p2_singleton_puzzle_hash,
current=current,
)
# Existing config data with different values
# payout_instructions should _NOT_ be updated after calling update_pool_config
existing_launcher_id: bytes32 = launcher_id
existing_pool_url: str = ""
existing_payout_instructions_ph: bytes32 = rand_hash()
existing_target_puzzle_hash: bytes32 = rand_hash()
existing_p2_singleton_puzzle_hash: bytes32 = rand_hash()
existing_owner_pubkey: G1Element = rand_g1()
existing_config: MockPoolWalletConfig = MockPoolWalletConfig(
launcher_id=existing_launcher_id,
pool_url=existing_pool_url,
payout_instructions=existing_payout_instructions_ph.hex(),
target_puzzle_hash=existing_target_puzzle_hash,
p2_singleton_puzzle_hash=existing_p2_singleton_puzzle_hash,
owner_public_key=existing_owner_pubkey,
)
# No config data
def mock_load_pool_config(root_path: Path) -> List[MockPoolWalletConfig]:
nonlocal existing_config
return [existing_config]
monkeypatch.setattr("flax.pools.pool_wallet.load_pool_config", mock_load_pool_config)
# Mock pool_config.update_pool_config to capture the updated configs
async def mock_pool_config_update_pool_config(
root_path: Path, pool_config_list: List[MockPoolWalletConfig]
) -> None:
nonlocal updated_configs
updated_configs = pool_config_list
monkeypatch.setattr("flax.pools.pool_wallet.update_pool_config", mock_pool_config_update_pool_config)
# Mock PoolWallet.get_current_state to return our canned state
async def mock_get_current_state(self: Any) -> Any:
return current_state
monkeypatch.setattr(PoolWallet, "get_current_state", mock_get_current_state)
# Create an empty PoolWallet and populate only the required fields
wallet = PoolWallet(
wallet_state_manager=MockWalletStateManager(),
standard_wallet=cast(Any, MockStandardWallet(canned_puzzlehash=payout_instructions_ph)),
log=MagicMock(),
wallet_info=MagicMock(),
wallet_id=MagicMock(),
)
await wallet.update_pool_config()
assert len(updated_configs) == 1
assert updated_configs[0].launcher_id == launcher_id
assert updated_configs[0].pool_url == pool_url
# payout_instructions should still point to existing_payout_instructions_ph
assert updated_configs[0].payout_instructions == existing_payout_instructions_ph.hex()
assert updated_configs[0].target_puzzle_hash == target_puzzle_hash
assert updated_configs[0].p2_singleton_puzzle_hash == p2_singleton_puzzle_hash
assert updated_configs[0].owner_public_key == owner_pubkey
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/pools/config.py | tests/pools/config.py | from __future__ import annotations
parallel = 2
job_timeout = 60
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/pools/__init__.py | tests/pools/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/pools/test_wallet_pool_store.py | tests/pools/test_wallet_pool_store.py | from secrets import token_bytes
from typing import Optional
import pytest
from clvm_tools import binutils
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program, SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.util.ints import uint64
from flax.wallet.wallet_pool_store import WalletPoolStore
from tests.util.db_connection import DBConnection
def make_child_solution(coin_spend: CoinSpend, new_coin: Optional[Coin] = None) -> CoinSpend:
new_puzzle_hash: bytes32 = bytes32(token_bytes(32))
solution = "()"
puzzle = f"(q . ((51 0x{new_puzzle_hash.hex()} 1)))"
puzzle_prog = Program.to(binutils.assemble(puzzle))
solution_prog = Program.to(binutils.assemble(solution))
if new_coin is None:
new_coin = coin_spend.additions()[0]
sol: CoinSpend = CoinSpend(
new_coin,
SerializedProgram.from_program(puzzle_prog),
SerializedProgram.from_program(solution_prog),
)
return sol
class TestWalletPoolStore:
@pytest.mark.asyncio
async def test_store(self):
async with DBConnection(1) as db_wrapper:
store = await WalletPoolStore.create(db_wrapper)
try:
async with db_wrapper.writer():
coin_0 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_0_alt = Coin(token_bytes(32), token_bytes(32), uint64(12312))
solution_0: CoinSpend = make_child_solution(None, coin_0)
solution_0_alt: CoinSpend = make_child_solution(None, coin_0_alt)
solution_1: CoinSpend = make_child_solution(solution_0)
assert await store.get_spends_for_wallet(0) == []
assert await store.get_spends_for_wallet(1) == []
await store.add_spend(1, solution_1, 100)
assert await store.get_spends_for_wallet(1) == [(100, solution_1)]
# Idempotent
await store.add_spend(1, solution_1, 100)
assert await store.get_spends_for_wallet(1) == [(100, solution_1)]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 101)
# Rebuild cache, no longer present
raise RuntimeError("abandon transaction")
except Exception:
pass
assert await store.get_spends_for_wallet(1) == []
await store.add_spend(1, solution_1, 100)
assert await store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_1_alt: CoinSpend = make_child_solution(solution_0_alt)
with pytest.raises(ValueError):
await store.add_spend(1, solution_1_alt, 100)
assert await store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_2: CoinSpend = make_child_solution(solution_1)
await store.add_spend(1, solution_2, 100)
solution_3: CoinSpend = make_child_solution(solution_2)
await store.add_spend(1, solution_3, 100)
solution_4: CoinSpend = make_child_solution(solution_3)
with pytest.raises(ValueError):
await store.add_spend(1, solution_4, 99)
await store.add_spend(1, solution_4, 101)
await store.rollback(101, 1)
assert await store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
(101, solution_4),
]
await store.rollback(100, 1)
assert await store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 105)
await store.add_spend(1, solution_4, 105)
solution_5: CoinSpend = make_child_solution(solution_4)
await store.add_spend(1, solution_5, 105)
await store.rollback(99, 1)
assert await store.get_spends_for_wallet(1) == []
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/pools/test_pool_config.py | tests/pools/test_pool_config.py | # flake8: noqa: E501
from blspy import AugSchemeMPL, PrivateKey
from flax.pools.pool_config import PoolWalletConfig
from flax.util.config import create_default_flax_config, load_config, lock_config, save_config
def test_pool_config(tmp_path):
test_root = tmp_path
test_path = test_root / "config"
eg_config = test_path / "config.yaml"
to_config = test_path / "test_pool_config.yaml"
create_default_flax_config(test_root, ["config.yaml"])
assert eg_config.exists()
eg_config.rename(to_config)
config = load_config(test_root, "test_pool_config.yaml")
auth_sk: PrivateKey = AugSchemeMPL.key_gen(b"1" * 32)
d = {
"owner_public_key": "84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5",
"p2_singleton_puzzle_hash": "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824",
"payout_instructions": "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8",
"pool_url": "localhost",
"launcher_id": "ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa",
"target_puzzle_hash": "344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58",
}
pwc = PoolWalletConfig.from_json_dict(d)
config_a = config.copy()
config_b = config.copy()
config_a["wallet"]["pool_list"] = [d]
config_b["wallet"]["pool_list"] = [pwc.to_json_dict()]
print(config["wallet"]["pool_list"])
with lock_config(test_root, "test_pool_config_a.yaml"):
save_config(test_root, "test_pool_config_a.yaml", config_a)
with lock_config(test_root, "test_pool_config_b.yaml"):
save_config(test_root, "test_pool_config_b.yaml", config_b)
assert config_a == config_b
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/pools/test_pool_cmdline.py | tests/pools/test_pool_cmdline.py | # flake8: noqa: E501
import click
import pytest
from click.testing import CliRunner, Result
from flax.cmds.plotnft import validate_fee
from flax.cmds.plotnft import create_cmd, show_cmd
pytestmark = pytest.mark.skip("TODO: Works locally but fails on CI, needs to be fixed!")
class TestPoolNFTCommands:
def test_validate_fee(self):
with pytest.raises(click.exceptions.BadParameter):
r = validate_fee(None, "fee", "1.0")
with pytest.raises(click.exceptions.BadParameter):
r = validate_fee(None, "fee", "-1")
r = validate_fee(None, "fee", "0")
assert r == "0"
r = validate_fee(None, "fee", "0.000000000001")
assert r == "0.000000000001"
r = validate_fee(None, "fee", "0.5")
assert r == "0.5"
def test_plotnft_show(self):
runner = CliRunner()
result: Result = runner.invoke(show_cmd, [])
assert result.exit_code == 0
def test_validate_fee_cmdline(self):
runner = CliRunner()
result: Result = runner.invoke(create_cmd, ["create", "-s", "local", "--fee", "0.005"])
assert result.exit_code != 0
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_db_conversion.py | tests/core/test_db_conversion.py | import pytest
import random
from pathlib import Path
from typing import List, Tuple, Optional
from tests.setup_nodes import test_constants
from tests.util.temp_file import TempFile
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint64
from flax.cmds.db_upgrade_func import convert_v1_to_v2
from flax.util.db_wrapper import DBWrapper2
from flax.full_node.block_store import BlockStore
from flax.full_node.coin_store import CoinStore
from flax.full_node.hint_store import HintStore
from flax.consensus.blockchain import Blockchain
from flax.consensus.multiprocess_validation import PreValidationResult
def rand_bytes(num) -> bytes:
ret = bytearray(num)
for i in range(num):
ret[i] = random.getrandbits(8)
return bytes(ret)
class TestDbUpgrade:
@pytest.mark.asyncio
@pytest.mark.parametrize("with_hints", [True, False])
async def test_blocks(self, default_1000_blocks, with_hints: bool):
blocks = default_1000_blocks
hints: List[Tuple[bytes32, bytes]] = []
for i in range(351):
hints.append((bytes32(rand_bytes(32)), rand_bytes(20)))
# the v1 schema allows duplicates in the hints table
for i in range(10):
coin_id = bytes32(rand_bytes(32))
hint = rand_bytes(20)
hints.append((coin_id, hint))
hints.append((coin_id, hint))
for i in range(2000):
hints.append((bytes32(rand_bytes(32)), rand_bytes(20)))
for i in range(5):
coin_id = bytes32(rand_bytes(32))
hint = rand_bytes(20)
hints.append((coin_id, hint))
hints.append((coin_id, hint))
with TempFile() as in_file, TempFile() as out_file:
db_wrapper1 = await DBWrapper2.create(
database=in_file,
reader_count=1,
db_version=1,
journal_mode="OFF",
synchronous="OFF",
)
try:
block_store1 = await BlockStore.create(db_wrapper1)
coin_store1 = await CoinStore.create(db_wrapper1)
if with_hints:
hint_store1: Optional[HintStore] = await HintStore.create(db_wrapper1)
for h in hints:
assert hint_store1 is not None
await hint_store1.add_hints([(h[0], h[1])])
else:
hint_store1 = None
bc = await Blockchain.create(coin_store1, block_store1, test_constants, Path("."), reserved_cores=0)
for block in blocks:
# await _validate_and_add_block(bc, block)
results = PreValidationResult(None, uint64(1), None, False)
result, err, _ = await bc.receive_block(block, results)
assert err is None
finally:
await db_wrapper1.close()
# now, convert v1 in_file to v2 out_file
convert_v1_to_v2(in_file, out_file)
db_wrapper1 = await DBWrapper2.create(database=in_file, reader_count=1, db_version=1)
db_wrapper2 = await DBWrapper2.create(database=out_file, reader_count=1, db_version=2)
try:
block_store1 = await BlockStore.create(db_wrapper1)
coin_store1 = await CoinStore.create(db_wrapper1)
hint_store1 = None
if with_hints:
hint_store1 = await HintStore.create(db_wrapper1)
block_store2 = await BlockStore.create(db_wrapper2)
coin_store2 = await CoinStore.create(db_wrapper2)
hint_store2 = await HintStore.create(db_wrapper2)
if with_hints:
# check hints
for h in hints:
assert hint_store1 is not None
assert h[0] in await hint_store1.get_coin_ids(h[1])
assert h[0] in await hint_store2.get_coin_ids(h[1])
# check peak
assert await block_store1.get_peak() == await block_store2.get_peak()
# check blocks
for block in blocks:
hh = block.header_hash
height = block.height
assert await block_store1.get_full_block(hh) == await block_store2.get_full_block(hh)
assert await block_store1.get_full_block_bytes(hh) == await block_store2.get_full_block_bytes(hh)
assert await block_store1.get_full_blocks_at([height]) == await block_store2.get_full_blocks_at(
[height]
)
assert await block_store1.get_block_records_by_hash(
[hh]
) == await block_store2.get_block_records_by_hash([hh])
assert await block_store1.get_block_record(hh) == await block_store2.get_block_record(hh)
assert await block_store1.is_fully_compactified(hh) == await block_store2.is_fully_compactified(hh)
# check coins
for block in blocks:
coins = await coin_store1.get_coins_added_at_height(block.height)
assert await coin_store2.get_coins_added_at_height(block.height) == coins
assert await coin_store1.get_coins_removed_at_height(
block.height
) == await coin_store2.get_coins_removed_at_height(block.height)
for c in coins:
n = c.coin.name()
assert await coin_store1.get_coin_record(n) == await coin_store2.get_coin_record(n)
finally:
await db_wrapper1.close()
await db_wrapper2.close()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_merkle_set.py | tests/core/test_merkle_set.py | from __future__ import annotations
import itertools
import random
from hashlib import sha256
from itertools import permutations
from typing import List
import pytest
from chia_rs import compute_merkle_set_root
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.merkle_set import MerkleSet, confirm_included_already_hashed
class TestMerkleSet:
@pytest.mark.asyncio
async def test_basics(self, bt):
num_blocks = 20
blocks = bt.get_consecutive_blocks(num_blocks)
merkle_set = MerkleSet()
merkle_set_reverse = MerkleSet()
coins = list(itertools.chain.from_iterable(map(lambda block: block.get_included_reward_coins(), blocks)))
# excluded coin (not present in 'coins' and Merkle sets)
excl_coin = coins.pop()
for coin in reversed(coins):
merkle_set_reverse.add_already_hashed(coin.name())
for coin in coins:
merkle_set.add_already_hashed(coin.name())
for coin in coins:
result, proof = merkle_set.is_included_already_hashed(coin.name())
assert result is True
result_excl, proof_excl = merkle_set.is_included_already_hashed(excl_coin.name())
assert result_excl is False
validate_proof = confirm_included_already_hashed(merkle_set.get_root(), coin.name(), proof)
validate_proof_excl = confirm_included_already_hashed(merkle_set.get_root(), excl_coin.name(), proof_excl)
assert validate_proof is True
assert validate_proof_excl is False
# Test if the order of adding items changes the outcome
assert merkle_set.get_root() == merkle_set_reverse.get_root()
def hashdown(buf: bytes) -> bytes32:
return bytes32(sha256(bytes([0] * 30) + buf).digest())
@pytest.mark.asyncio
async def test_merkle_set_invalid_hash_size():
merkle_set = MerkleSet()
# this is too large
with pytest.raises(AssertionError):
merkle_set.add_already_hashed(bytes([0x80] + [0] * 32))
with pytest.raises(ValueError, match="could not convert slice to array"):
compute_merkle_set_root([bytes([0x80] + [0] * 32)])
# this is too small
with pytest.raises(AssertionError):
merkle_set.add_already_hashed(bytes([0x80] + [0] * 30))
with pytest.raises(ValueError, match="could not convert slice to array"):
compute_merkle_set_root([bytes([0x80] + [0] * 30)])
# empty
with pytest.raises(AssertionError):
merkle_set.add_already_hashed(b"")
with pytest.raises(ValueError, match="could not convert slice to array"):
compute_merkle_set_root([b""])
@pytest.mark.asyncio
async def test_merkle_set_1():
a = bytes32([0x80] + [0] * 31)
merkle_set = MerkleSet()
merkle_set.add_already_hashed(a)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root([a]))
assert merkle_set.get_root() == sha256(b"\1" + a).digest()
@pytest.mark.asyncio
async def test_merkle_set_duplicate():
a = bytes32([0x80] + [0] * 31)
merkle_set = MerkleSet()
merkle_set.add_already_hashed(a)
merkle_set.add_already_hashed(a)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root([a, a]))
assert merkle_set.get_root() == sha256(b"\1" + a).digest()
@pytest.mark.asyncio
async def test_merkle_set_0():
merkle_set = MerkleSet()
assert merkle_set.get_root() == bytes32(compute_merkle_set_root([]))
assert merkle_set.get_root() == bytes32([0] * 32)
@pytest.mark.asyncio
async def test_merkle_set_2():
a = bytes32([0x80] + [0] * 31)
b = bytes32([0x70] + [0] * 31)
merkle_set = MerkleSet()
merkle_set.add_already_hashed(a)
merkle_set.add_already_hashed(b)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root([a, b]))
assert merkle_set.get_root() == hashdown(b"\1\1" + b + a)
@pytest.mark.asyncio
async def test_merkle_set_2_reverse():
a = bytes32([0x80] + [0] * 31)
b = bytes32([0x70] + [0] * 31)
merkle_set = MerkleSet()
merkle_set.add_already_hashed(b)
merkle_set.add_already_hashed(a)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root([b, a]))
assert merkle_set.get_root() == hashdown(b"\1\1" + b + a)
@pytest.mark.asyncio
async def test_merkle_set_3():
a = bytes32([0x80] + [0] * 31)
b = bytes32([0x70] + [0] * 31)
c = bytes32([0x71] + [0] * 31)
values = [a, b, c]
for vals in permutations(values):
merkle_set = MerkleSet()
for v in vals:
merkle_set.add_already_hashed(v)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root(list(vals)))
assert merkle_set.get_root() == hashdown(b"\2\1" + hashdown(b"\1\1" + b + c) + a)
# this tree looks like this:
#
# o
# / \
# o a
# / \
# b c
@pytest.mark.asyncio
async def test_merkle_set_4():
a = bytes32([0x80] + [0] * 31)
b = bytes32([0x70] + [0] * 31)
c = bytes32([0x71] + [0] * 31)
d = bytes32([0x81] + [0] * 31)
values = [a, b, c, d]
for vals in permutations(values):
merkle_set = MerkleSet()
for v in vals:
merkle_set.add_already_hashed(v)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root(list(vals)))
assert merkle_set.get_root() == hashdown(b"\2\2" + hashdown(b"\1\1" + b + c) + hashdown(b"\1\1" + a + d))
# this tree looks like this:
#
# o
# / \
# o o
# / \ / \
# b c a d
@pytest.mark.asyncio
async def test_merkle_set_5():
BLANK = bytes32([0] * 32)
a = bytes32([0x58] + [0] * 31)
b = bytes32([0x23] + [0] * 31)
c = bytes32([0x21] + [0] * 31)
d = bytes32([0xCA] + [0] * 31)
e = bytes32([0x20] + [0] * 31)
# build the expected tree bottom up, since that's simpler
expected = hashdown(b"\1\1" + e + c)
expected = hashdown(b"\2\1" + expected + b)
expected = hashdown(b"\2\0" + expected + BLANK)
expected = hashdown(b"\2\0" + expected + BLANK)
expected = hashdown(b"\2\0" + expected + BLANK)
expected = hashdown(b"\0\2" + BLANK + expected)
expected = hashdown(b"\2\1" + expected + a)
expected = hashdown(b"\2\1" + expected + d)
values = [a, b, c, d, e]
for vals in permutations(values):
merkle_set = MerkleSet()
for v in vals:
merkle_set.add_already_hashed(v)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root(list(vals)))
assert merkle_set.get_root() == expected
# this tree looks like this:
#
# o
# / \
# o d
# / \
# o a
# / \
# E o
# / \
# o E
# / \
# o E
# / \
# o E
# / \
# o b
# / \
# e c
@pytest.mark.asyncio
async def test_merkle_left_edge():
BLANK = bytes32([0] * 32)
a = bytes32([0x80] + [0] * 31)
b = bytes32([0] * 31 + [1])
c = bytes32([0] * 31 + [2])
d = bytes32([0] * 31 + [3])
values = [a, b, c, d]
expected = hashdown(b"\1\1" + c + d)
expected = hashdown(b"\1\2" + b + expected)
for _ in range(253):
expected = hashdown(b"\2\0" + expected + BLANK)
expected = hashdown(b"\2\1" + expected + a)
for vals in permutations(values):
merkle_set = MerkleSet()
for v in vals:
merkle_set.add_already_hashed(v)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root(list(vals)))
assert merkle_set.get_root() == expected
# this tree looks like this:
# o
# / \
# o a
# / \
# o E
# / \
# . E
# .
# .
# / \
# o E
# / \
# b o
# / \
# c d
@pytest.mark.asyncio
async def test_merkle_right_edge():
BLANK = bytes32([0] * 32)
a = bytes32([0x40] + [0] * 31)
b = bytes32([0xFF] * 31 + [0xFF])
c = bytes32([0xFF] * 31 + [0xFE])
d = bytes32([0xFF] * 31 + [0xFD])
values = [a, b, c, d]
expected = hashdown(b"\1\1" + c + b)
expected = hashdown(b"\1\2" + d + expected)
for _ in range(253):
expected = hashdown(b"\0\2" + BLANK + expected)
expected = hashdown(b"\1\2" + a + expected)
for vals in permutations(values):
merkle_set = MerkleSet()
for v in vals:
merkle_set.add_already_hashed(v)
assert merkle_set.get_root() == bytes32(compute_merkle_set_root(list(vals)))
assert merkle_set.get_root() == expected
# this tree looks like this:
# o
# / \
# a o
# / \
# E o
# / \
# E o
# .
# .
# .
# o
# / \
# d o
# / \
# c b
def rand_hash(rng: random.Random) -> bytes32:
ret = bytearray(32)
for i in range(32):
ret[i] = rng.getrandbits(8)
return bytes32(ret)
@pytest.mark.asyncio
@pytest.mark.skip("This test is expensive and has already convinced us there are no discrepancies")
async def test_merkle_set_random_regression():
rng = random.Random()
rng.seed(123456)
for i in range(100):
size = rng.randint(0, 4000)
values: List[bytes32] = [rand_hash(rng) for _ in range(size)]
print(f"iter: {i}/100 size: {size}")
for _ in range(10):
rng.shuffle(values)
merkle_set = MerkleSet()
for v in values:
merkle_set.add_already_hashed(v)
python_root = merkle_set.get_root()
rust_root = bytes32(compute_merkle_set_root(values))
assert rust_root == python_root
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_daemon_rpc.py | tests/core/test_daemon_rpc.py | import pytest
from flax import __version__
from flax.daemon.client import connect_to_daemon
class TestDaemonRpc:
@pytest.mark.asyncio
async def test_get_version_rpc(self, get_daemon, bt):
ws_server = get_daemon
config = bt.config
client = await connect_to_daemon(
config["self_hostname"], config["daemon_port"], 50 * 1000 * 1000, bt.get_daemon_ssl_context()
)
response = await client.get_version()
assert response["data"]["success"]
assert response["data"]["version"] == __version__
ws_server.stop()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/large_block.py | tests/core/large_block.py | from __future__ import annotations
import zstd
from flax.types.full_block import FullBlock
LARGE_BLOCK: FullBlock = FullBlock.from_bytes(
zstd.decompress(
bytes.fromhex(
"28b52ffda006b904005c3204ec00080000000145318d2f5c31cc4403aad4bfed"
"0c8d5a92daf0051a3590071f8d153433c93adf00000000042000000200b93591"
"d2c0454c4f4fd842612bd86a04c40d9b73b6211b16873b9ccaf6222adc83f85b"
"bae26d82f7d86cf1733140fe341565c6844cfe8ef9db536ccc990709808325f8"
"9327ff24c980956c8ea55dd9fda6965e709bbbb092708c8157b05bd10e010001"
"80c59123071311b61e77519bc293c5abb45d3535fa1cd0a39556681c480f9922"
"00000001f73f25cd908f8f09bfc62d6f99d567374221627b3a478b69af77a53c"
"f5d2e29e0000000003869a4903000fd2720586616effb504b55017910fe485bf"
"638c7e0726309e6b61d6215bbabbc393c260ac48d6187425ba9521108b05f5ea"
"bd073871bea38b1ef1f2bf3999119270f247d7dafff11fee1b36092c64d81180"
"ee7eb271fb3100265c1c289ad1200200dd31c2e77b86dd74f14a166d8f431f7f"
"257a76b93d77c06b0d6fc4353e9bdafe0050f003007a0731e0670a58ccdc3f20"
"5a68042b653408ae36b4b7d521baa2e0bfdb663e4e232ff15a9793edcdb88cea"
"57d8f4189a3950df103d84c4bf87b06f8ee5fb6a2a4dfe5f55380ff0eba8f2c9"
"ec59f61edae053afcc281c2941fef4812f10930b2101007092022fecfdd2246d"
"39ad813beb7ffee1b4e3bf94367db60509e88744de122010020000017e020054"
"01e7ccd158d6dfde8043263b1378d9c8b0fafe86e7c907c1f08aa3d2868126d5"
"0c225c5fbc914314ee7e71cc9fb77619b18727536af33e73d44c0b1cc34c1da7"
"64251b190880976eadc7aa3226f944c7129a489fb8ebba2b794b92f1216c4f01"
"00bcacf024b1c5089efe2f50830d50f6a740a055690d75fa16d991b19a59a3d5"
"e6ef830d03002d1853c5216f05cefb5e9696b75975edf628457c3e0c33652702"
"b917ccfc11d05be1129445b5c0f527c07e9659be46a1425ae2f4affa91d3dccb"
"e94846afdd3ba3b560311fa8621b4c994d728df71216bb71fb3c65a877eb972e"
"7e8787234c2e0235a0fb3da071d29e33150e4b4ad43669f57a0b049bbce9382f"
"68ac06f67861220bf48b0000760bb2151e02e47e12cd1d5ce15a8475da604670"
"69e57a49d359c11e444a6a1c53f27396abf0e196ec7fe1e63e6d74faebc5f9ea"
"da3d9de08c85914bb65f4877c5ff4d9a1b658d24e4319ad520edac345a14968f"
"d3787908a0d62c24e55aed3301000001036d382449980ee6fbc6395d260251e0"
"3c4e267e32ffe584ad60816466d6f2c95bfaa861fb7d2854bcd1e840d66b0d1e"
"cd37b9bb5e8a6e5394e5a11d12e87e17aa27268c533c07c6467b6b216be301bb"
"57e9a55c9b86538a85e3bd24668e6a31c2a9b508fbdfd6e9184ec8c4fd0e0814"
"b9a13df040341831f8ed92b9e0ed31abbb0200e835ff8a61ca1ff08f8c47227a"
"676c06ec7d95f4b7b8816c95e857dbb1d6eef993eed8b4a9d3c2c397a6444fae"
"f5c40a520a46a3f0cd1f5a55b0f0f40006bc05cd8c59b3c570eaf46ed9ec1e8e"
"f1c5b0d73dcb5c4a849a30bf7e4c02060e7b0a0402f2daa6001b1c2c0c75601c"
"34ad19aa60bfaef0087a4fbb85ba2af6d551a4f5c6c102004287665d998f67d7"
"d412577b44273d687a23c7e99ed168546f548c9066ce60e4fcf4aa803f6dadca"
"c8a35115a92c82413b45cbfcfef65121c0c3a1bc1389160247e5c517e9f5d72d"
"a5794080e28e1897536572f3ce7242675a733afa76d95e02100200010017c239"
"ac364c879cff03f0a0f69edddeae008be527c3ded297ade2d0a1504ff11cde2f"
"829c8a734ee4a752bb69961982116fe483b163bd1e32e5462701af650881bca8"
"c0b48c8594667888219454f26781b248ac98db2627b81147588b8ad2060302fe"
"336f8bdf03b9049ff817f9667a8438d8883675fd102b6e727233828300946e85"
"0300546944fed3aa7ea39753013e194bfaca000bd43f6fecfe216461d3a25f6c"
"5d4ca3ef971f4c96915c70f9b743dd1222ccc0a18571937ecfebba45913eb49a"
"da09b7715601e92b85a6277b0aedabd01b73e25f91aa262921cfecb24158b1b9"
"572d9990d9c6104cb5cd32f12a56f2f44c7ea78562649feeba8de074a7b8b0dd"
"97ae290000dd2b41d2e9a6e0592d2807cdb267691cda9c6b35d43b41b398f622"
"9011b57555a70c6972aadcdc84677981cf1eca5ccc6925f3684841d520dd4931"
"ac220f9a295350a6f2fb6bda4fe615c4fe0f865fcecc646323937bf55ef83867"
"8ccf55d60480e7b6340003f0ec008aed9a862f02ea7252a88ff3aa73771ed0ed"
"1328971f6c026763daad83a7e943025030fa82ba0001a076a4cd8c39e4046f37"
"c3df72c41b4589a737e54a0a8538c9e67b53739de9929243c3d01e03aa043140"
"11fa40ec959c468d8d7a982bbc434f8b491bb4ebbb9e374418f2d862a50eae36"
"9ed73cf6255c200000010076fdbc810a4f39b1889f292ee645b54d6c6f67b28e"
"f85b2f0380cc6106e7f85a16d8f74c3ee59a01effdf0d5bb4cf68a6978cb26d3"
"ebf55944899480b01f280592bab01389b2914bae1413b369af1f48688bb57093"
"0c89c62c29952bf39481d2c98ef1cf9bf1f166970f0a42ab973d3f0060c68ea8"
"06292a50fe799019a1c91c0caec952def97848f19a743eebc3d2a212f466e0eb"
"75d595a5dc9f1ad7ccff8dc51eed7ea498872399ad71199c5a5666f368f4e440"
"a0c727cdcd4d3a9f8f79b66444eae08aa093575196dd8e977d5e5f561fc1d9e4"
"67a5e1e2b76fe883d01cd2528b1e4a43c4bc89d9e9e1eb1e60b4546bf6076b94"
"2e8a0c370c30b3d6b2a310012100000200e388438f1b39afb59614a8c40ff266"
"edfeed91e0653d0cca884c0ef09b3edcf92e4dc36dab51f025bdab1f74e4a97c"
"69c52e6dd6d0c3c5392d427d67e594bf56dd4e2a90f2319770aa71d8058ab03f"
"fc2494add0d9121f0de6e360c3575b40030100a294817eebfb6f98fd4a629680"
"c61b27616eb002bc9ed637faa310ec4496a6140c093e506775ba52905e02274a"
"b50c2f1076f4989127a10839283a0946b27c48621e9183b3da16646c83c2e097"
"790e01aaefd447a1a74781c28216a361d0067062862f000088a0e9876f85bf19"
"e99144d879e840df50aa7112bfad442ce78b3564d30f69d97aa419327bf0557e"
"d6b47b9230190cfd7796cf700cdf00d73574f8d9a08c0c620fd703512c409b20"
"7387c5e93ab4ca76101fe85a156fcfceab1bd9223ff15c2f0100015456f0ee46"
"43ac1761a0e6a7eccbad87e9ecd20cb1fc7355d2158c91926a8d0e010099952f"
"0f1304b0611b03919034c40619923c28532fcfe45b5564cab013519b442b65bd"
"c5d05b06dd0dde2014c7c207a17985086f1045d7333cdacdc1f103e10deb727f"
"7034ab371578bf6cbe1e1af5cfc31410299f2fc4a829eb00782713940004038c"
"48d64c159fe0b93f095c279cea2b0961114f30f2da6fa0b6062c0f925c8a5f16"
"2698796d49f114714870849d5fdec203c1e1ed7f7a7815f1d2c26fb00ef632ea"
"4e244d9d9d258ff588e883aaa29a193a4145580da3b92b07bff58d1afb015201"
"c261057419141c262ec7f2c9d1e512639334b274a26a7718f9204546161820a8"
"70d4ff99bd5cc705d1785ed06e5b0f3906d5cdfdaed42bb8803eab54753b01fd"
"7bdb1bd636893858ecbecbed9f56ddefea51502b8e78327eb7953f027c02a904"
"780100010000bae46d2025999847ef7531507d2081012cc90e7574e2b1358ba8"
"55591956ed17827d9a5053890bef6bd001937070be0e460d560b4bb350079d42"
"e45daf10660d0b72622e2ea868ea3024f6097f0ff2de7a6a9302b5aa9c4146c0"
"caebea5af506075558b12398be332e901927478bbd4e5be0abf77a49c93bf9c1"
"53e2277156b5fa56aa7702004450e4d117181d136fecd2676e984a488651ccc8"
"7b9dd7f65beb55aa7ea93f4fa0452933718a1e3991d042969e9518cd0fcd9bd9"
"bcc8106d0ad3c50b18902228d1c02a9b4c01221dec07c2d60255e034b163b64b"
"d8f02d30fa27c025b9caf13f15ffa4da3894a9b3c0764a5f3973cf7db4803cc5"
"a17f99d7bbd7d943bfc8ddfc110e7c5d0300f0934575549031addc077500a502"
"c0a3ee4ad7e4235d7aa6e95f598f115db144dec7a248e59b2af8de39571b62b3"
"428cf0814a59f0610e655b2c96a61ef9730d8550307168b29ea0f770d860b80f"
"d9616c546b351a6f7828597c5a68822091140400020029b18c071a93390d35da"
"59fcc585ed4570d78daa67fe3e5aed7c5a3318731298d0b338c5a074be9ccd9b"
"d0628e171632822e23d26d3df21e4363fac5e65549020b69e5cf134308136e40"
"05c8fbba1853a511243037c66dcad2077667ad4d2400050315e514b57c1953c1"
"53956f434084ea1707235d2a7c48db5d07dd771394740136598d46d90300aa95"
"1c97e38a5f16ead450364bcd822efae4f97cd14b79aabf8f9cb5427dd320994c"
"092df422834b68ee0d900c9d88074e126ad951922e162a4bd8767a9f71099361"
"ce88356c3d14be821c8f081b61cc2c880ee3fe396bca6f58b9c6d208ec0e0403"
"41ae74cb927a47577afbce1ce5ec49b6bd6290851bc5aba65b9fa54104a66b38"
"dcce7cc70000bc1d0c64347562304425550e17283db62f5032f1b6ae33c3bf35"
"007b7aaa6eb08c8c38f2e6c1289a9754537c9295f676ccb57decd4d372263aea"
"e9567b25b9245f96e0c3b380ed52455e691ac6e8fa378ac0600ab5387d5cc148"
"8809c465281802df307cc917b66178f178f3cfb33ad008ca93d3c8527d62d74a"
"8baff3405adce3dc377cf10ffbf854366f07ceba99f6e2adda089597478b4e2a"
"e2a9f887db8914c303ed5de1671eaf9e257881b60a8449cbb45a4b2150794dc7"
"1ae3ba9233690c075558cbc26630e2a7f06e4865901c8b4572a603dd361b2bda"
"e947272ace5a00b39b826b0200783ee42eda0b4ed59ee68651d93066cd40585c"
"b3b11f5bcdb04868beb8bd5c45e96bb809c96b0ecd17f53936b12a3b31892f38"
"4504247a8b02459bb13490661b5dfb3c7140cfe463ee0fbd4b7bfa360686c8a2"
"9d1270eeb9c25fd195c38e27179dc6967b0e186855a8e6c26e2ca822a84f6cc1"
"4baf4d0e1b4ce810e9d30384c11d03000ef3b80bd8337173716b0e585399e5ad"
"c3582302e7a1119ea99eea7244e5166fc957af61d5ed779ecd7782d13a7d45ed"
"959215afa36b265be659518eb48ce845df2d800c833ac15ea11ff016cd0f5fc1"
"296df8ec2145a90048bdc8ac33b8d23a0103002e56b89e634025031007d2207a"
"9d9831ecb3f162d95def6234d40928c78bd960a65e132c7fc0dd885d1f4ad4a7"
"042c4a2dcb7e47287db0ade97001327f634c4c5deb5988eafdbd1ab612f1473d"
"b7d018a5c44569de6f54c028349aeffde0b000e514fc53638cdb5834c268ac22"
"f8167a507836e5e865c17a15bff474ce8d66d8b1cd750300eccc40f2bf8a681d"
"b77fbc9b352e6d2851cd1731ac5a0fd15efcd0de83aba0bc295313637005d90b"
"2d86057fd0e615517d12a84dad70665d9438342cfd9d0d03d39028608d479dd9"
"b88f022f6673567caac2df5b75d2441dea541f842c79f401090441ae74c134cf"
"d99966bcf2d426014956d1028243aa360ed5155c2b6c361b35a7c6f1cda50100"
"4cbe5fcbefdb6fad78950d2879a87fded55c706d39be47e868ce02d8d000b003"
"41338eb948afe01cd66a3e870cf4eab573b3f34aec23940799e94497fd705805"
"b771b73dd425849b6d4b8416cec21fa4e1716291e9a39daf99de053709cbdf02"
"0100000008e8026581b6e8fd9a9f2e6ddc677266c0750ed2e553870dea039e9a"
"ff7863f9b11e2f4781b245cb5412c532317f0edade0dba2ddd375332482977ff"
"6a4e48884953124eeac352234fd2ddc866a0d16e819afc3e003b7142913503ec"
"b4f9f75977ee2ce2d35782845dd82e03ad6704c9b4284f7c8ad7a3e4963ae743"
"d180582b03a2c7c98aad2f4f1ea6eddbb035f4854c2152cf76b0b164257a5b0b"
"b7f4f1150212a7760682a8447a7d8b621ce5367d76b92028172d4353aeb70254"
"290b68164640ecd9f7111d8b3ea5784ff3c3f56ec799735aa8e341ab4784f08c"
"29fe2d5df04f184201b5cbda17b065d63e1dce29f5b378c01065de35eba25c0b"
"4e4d8572702d199bdd01a3bae093379a2fe64c416856db60a4154c77b76d1881"
"a06cbf9daf26f2cdc716c2258dea545c390fd2f9397ee5391884028293065d48"
"b9f0bae69943dae8205d92ce53d610cd577fda2419a3a241ccc302ebb0c1ca1d"
"b9d5648c35c8aa499cfe01824b55ec6f554f745ece43bf550f96bddac56b24e3"
"89d184f75c16141a73ec6a0000000061b09ad9d143c2d11e613b0ee312dcb0a7"
"dffb8cade11d28e9d6ff5576ca216e31dccd7fbb22572b1bfbed4094471f423d"
"3d6be4a54d6907459673b47e0aa00bc1ae99cd8923f1d8b5da44db245a7cbbf1"
"c069ac94997c19e4a4952fce0e844d32aced5cbd899662bd864456cb6b2223da"
"913769097003c6862e80da64c7f076d3703055018c0423a2f12156f5d86f6e87"
"624a75e915fcf58550ef18c34f4f9224317f910d010101a509f37e814997e4e0"
"33261ed5feccf503e16e71f6fcf242fe26c34e9a26649685edd3852ec5c89c78"
"3af9474eb5583c0777b91157086a2289b09820d088cf5a7e9de6dc7acfed05c2"
"7b2509126ef5792e9b0911f1e71e193bbe17c4b56b2d5c01266080ef00000002"
"ae83525ba8d1dd3f09b277de18ca3e43f0e901977420dc00fc0af20d20c4b3e9"
"2ef2a48bd291ccb23a3529440001ff01ffffffa01a6ed4d80104b5083637123d"
"80ebcd06d64f5808c6b308e0bebf41bdac8572fbffff02ffff0103ff0bff09ff"
"05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8017"
"ff2f088080ff0104ffff040504ffff01ff32ff02ffff03ffff07ff050bffff01"
"02090d01ff050180b08ae834af2e68c131e5c83365b887322c246f1fcb5cf191"
"dda0bf53a717ea26f65c8a4b01e62455eb1fcaf801047ef5cf8515e5e84099ff"
"ffff33ffa09121ce7b5a6fb887c8afbc942912601d602df0c94d970a5feaae64"
"2bf687c0bcff8609184e72a00080a1c8848121fba348ac728dd4280fe402b336"
"a43340b00bc2702a04214d687745ff850315646e1680ffff3cffa09e4876aa7d"
"57182defb8773d453e882632e80fbe484d87d93fabf30b0ff4d2c5ffffa02bda"
"0dd17ae4da5e22d82f60100e30b8f03ba9e59668639e6dc46137853e9097b994"
"53d00d72cd3afa1c8c7186fe9e22da83011bbe869af9c9b7804f7c959203a61e"
"ea1c0d96f4018af6cd296f29350d1633779a7d3dffa0a926fa10c552272a9ee2"
"a274a75558d4edfdad6f95339e0db7802b11a9154f090455109ed7e65eb0ad67"
"d48f13b0dc8d44563f30b8e15bddbc1699e3d93aa2fdb5da99193f4bf7d8c1c2"
"6e6dea647fdf06a7ff7716b708ed4b4133eff6719e7a3a311065c63dbbdcbfb6"
"4c8de29428685f0e0760ec11e645c9e1bf47700980564028d588d942fb157c6b"
"e71563e9e1dec083fae0acf1663cd348b5315d54785d3ac18ae2989d930ba128"
"5b13e69c4ac369b917a6ae5cda5ba5352a40961c02faf6acb8e65c26e86430f3"
"7be7494827775cff84d2fc67d96d2c251dcc1016cb14d9f456ed2e7e44a4a163"
"235518f2461bcb3d84f94b2fbaf2c685ea8a10c469f5887d6228a07939bc3508"
"fb256d3058808bd0ff199b1bfa6759924dfaceb725746a3b3265df7399be3c7b"
"18ac94f77499b7409b571b8d83a172ac6a53ab780e8b8a5f3146ff4c0d3acf43"
"7b503c9290f072cef371619a8c829e5d2144209ed95b6eedfa0779fd2c54076d"
"83f540d6365f57b109c90245d1f52bdeb0b62d55ebd7fd2439ed28b847a8a640"
"7b5fe9a28edb8b3543bf8464d5c7df9b4a3202a4932bbb443d957ec39e88ffee"
"eef22d2799e0a0d62d3929e3ad2b4cfc598955b0c7cb54a98d719d50d0f15ba5"
"76282aadfcb063f0926decf92bf1302c87c641ae801cfce66353b4c39106bc98"
"df17a8033c6a7e544c511f9802a07be95c37c99ea87a2eac90a6dbdffa09cbf7"
"867c008b65371053f61e5f82ad8828bb9800e51b0da9e915f264cbf139ebbdea"
"313460119a8d160d226ec75c308aa6f2ca49227db35ad3204e073a8edef69dbb"
"0dbac03c2e5af7370b231c5d03f67083ae907f56e553269b7de2361f87a31bd6"
"cea12c9a135da822a375bb3d6b4c4a771b52930d98edc0d6dd843f5a9b9c3c7e"
"5ab38e9e981d17d9738a75644e8dca71b04a301b6b3b635b12051b17712f644f"
"562a25305cdc6f61d54c1901507d82fb563915e17570ac161c453541fff984c8"
"8be78fa80f257c36d328d9d056d3ee111b1c7433c4cda98948e5ac7042fc0217"
"d42a7f802ab3c5dd649bd5fa2725143402d3e1a30528cf74ecadb465b82c3b26"
"c3ca72db013bd2165a4fba6ce08bb6903777e407133b4925f8c4c94fd86784b3"
"c02f16a21e135d4e4ce0ee2885e3b403ca3ec71102a807b95af5bd1392089934"
"0fdf13f97d4efebcaef9dfb7c0a8ff63154fee2a03fa0c3ab347e8db61fdadc1"
"12d5870f7e84d6b5a2a4827299004ff0909f96f37678dec594f4a2e1b5e5a856"
"1e9c4d367b1f088bba953108f3d829dd5420539331ac3570eafead57e391712e"
"02a3f14b248077c78354216691ce747484a3779b62f7e2fd39f8a013b4ed6081"
"098b66a2156f890972658959efef0b9cca447a9550bfac78c215f71105ede56f"
"05950b7095d8fec228bdf2fce0280108c01045c8ab1d19a4b3613012f566737b"
"bb413559d9a0b0efa9f44bd4dcb95ed8bee558b59532f43aa192496b7dcd7f90"
"06a39bda55427588d97b7457c88ab1b72f1f7e8a8afd9b3cc9afbbc00aab5dad"
"99560336ba1e8dd0973f8458abdaddacb7eccae577b7c84a765456711df5dd9f"
"5c5efc71a1393b8b534ccd3392dbc4292a1ff5efb08c4c3d9e4388a1893802cd"
"e6ff38a0390fed87bdb57323758fa4aa9faa50e3bd5498fe6179e85d35ee2409"
"6023658ddedcbcd896766c6563094e86c8358f5dd3af8cb650647e806bc06e59"
"66bed85fb55f622edd100d57c63d2480e20d6c48f902277aa4b69669180c3121"
"ba12f0f9285b3fb0138c2606c553091f6043fc668d475e2ec2ce5cdcaba47361"
"022c0a95b18a6707977fab34820b91dd439727c64ee96147d17eb3622f222ed5"
"f0b2a38f8b5328e043340682a1c869b2662fd56e337e77835a9f45cc2227f9f5"
"336f3d15860feed9d4dd942d55e3fda1113c3615fdc31dd7e671746379cb7842"
"a391276431c82cd4d342e99792a9f96fa345160b0ebbf653adafad46a5b01f2d"
"c5b3ef8ab4e5bdc1b8b8e5e4f61203ca5de276d6bd2568fa7cf716fd5ac806fd"
"b274a227ce251beef86020860f4bf00bffc2a8b1df2bd5a45fbc2d8b8aa1544d"
"89106b289ece01bd1f07bf8b9c3060701f6f25ca531b3755b486889831627e39"
"54fd31c0c06b4b24c78ab1b527db47d5b52fae0f2dc6078ac826d4ed27b28764"
"7d46035b47b1977061d0b2aea319e6dfddf83b78d75b7ee1aef42304496ff7b0"
"99d84853a048992286b43e334b7b67cc8b4d27b3a7d4df612ae36c591c3b69b2"
"4aa4432e227f7add399828a608c9ad8b837c9bb288a0006e06de8f7260473656"
"a92de3f5a8ee044ba1cbc3f3a519e394f15b12dcac5f6ae1186d13d91355678e"
"7d048a17a8a88ffe8f136ea4d3e982bf2d1b9fd1c626d377a08bb28a05424a03"
"ae998b5d302bfa59c4492929427d0918c42acceddefaa695173a2c62860842f4"
"62c293858b4e2d1915139eb389485140866caf973a833481138b5d6a7a4c0b01"
"753b8a6395495c1c67b0178d0a1185d5ceda3c2e53d1c9bb54098676b43d6a70"
"7dea3f261524bc5393416bacabff3a83a9e0025f7eb930b0086e963b7740dd39"
"ecdbb1a11c2925a3722109cc212ea40e3f6964952bcb40629f603f60fe775eff"
"48cbb6f1581dec166c913819c4af44d8678a11b718a49fe6fdd7dd61c1ebdd10"
"a230ec334a6dd1454a6ac85884a86882e8c62d59cbbd3b6b80505c4675440525"
"e091ce530bf2a3e9d02f6a21a12be35d5134c99e979a4dbddc7e863b3fe5aff7"
"25f085e018c87c69335ed7eb993a84eba55ba2953578480ed00eda737b7e6d2a"
"b0e124dccd1c21fdf8bc61ae3af00504e92ca9450d8351a331422ed7b7f57182"
"7ab227c8db72c767c7e284e9c3c88cc21655985e1bc9635076d00210f5e470ee"
"6351e8d4f01a915436fd05a26e69726a717f1edb99bd26b94b98a074d155c4d3"
"377130b6665cb004d41e38927d853a98eec6abcca8acab16c8a8db5f8d604af9"
"6f95e8bdd3bbdd68f485515731b9b7afc1edae2f5024202f2b6497522cf3a5c1"
"9b0ca0da6c08fc5f4c3dffa00d3f22800409a47299950005a98e8aa0c00c5d38"
"4c92762a41c6e92a147846e9ab0fab54947103d28a52a5546214952d8ff327e8"
"a41d48e0b1a23dbc6ee978f2ff1ad275158547adf8d482324019cd43b61082bc"
"215622c745354cd67ed72c2986b12b5a8592bedea5f0987c3cac222441f34882"
"051b5c83984e5d61b4b3aeb7e50b2ccde8fbc5ffbd7854fe07b6a118deed1d19"
"c9c08259ed60ab2898b78e93b69acad6fe8588599a3d404cf91e766ed9d80023"
"234cd15e9984bf4a511f5069ee75b1949683aabc9fc622df6f9a511044d9b2ed"
"81a9408ede304f886ed0e0b6b4f617568386a6d18c4d392a79f3848bbbcd8ee1"
"92a1071cfb3f74773d6785c355296d842cf2f26588e9e6c59064b5a73fc60e15"
"303aa70e60c615d97596079048516d494cb0e672cfbefb18bd90934cf57b41df"
"2dbc0e6d73ce55237520b2e43818b9d3612e95c71d9a8bd0ffb740819dc13ef8"
"7e918cd6119cb9f55f4beeed24a71529a3d619223e4b46ffb48bd2787f03a03a"
"2d2c1f6bc8cfbaf3813f8cc0640186fa140f9306af520ff2c7455a78ba372a6e"
"e0759b33e642b82e568763835f6e5327d9058a4a96608325ea19076526a43473"
"d1a65c5b5e06b2abb30bff5f5c203424417a1dd6d17aed0f20210fcf6f803e5c"
"571a0bb89171ec4a81168270e82bd6536ffa43a0c1cab4a220022a5ca75ad13e"
"c059945a7f7a82571ba95d78eefe60f8080ddff054037b5d64f3598d2690fb3e"
"c529da656f2315e786a706f8d8eead8cd519486766604ec245b96cb4697e4077"
"a770ebca53e24d2baddf0b8f37da729076d7f4f378fa8e2adfd20fcae2d16261"
"e452a68079b35e0f643e1ee7827cf849c679239fffa78008a8d4bb164462da6b"
"2845ed99e1365da766bf7dd1f82b7af19417cdfca4e96e26f683769884d66919"
"a6eee7c41ab94a1e9e07cb68e5bb08176fa1d8931d39669704da139e92e1c9b3"
"0a9b94ba3577dcbaca69ec31fe9838c0694386f5de4c54995406e7e0f5c123d8"
"0fde76e7d433150221ec23b73a0b2af7524c4460a495d76aa91a97b68c2df9fa"
"80fcec3c77e9186eea71f6b31966ca085dc25834f4fec274be1925d3ab6bda34"
"c391604e763abb8bdc45c708f51688262acab5a1d8ec079098b55217b5675b50"
"147d5d8d3d174a08913eaa226a0664cee7e34fa8106dce87f8910add05b766e3"
"9e6c87c49a67f0ec41a0dff27672b0b8d0691c69ece69de519c052bd6089b305"
"64daa1b8ef490f183507b2c7653d48f8dccbbac69da87a67922b9c90a5ee9cdd"
"3e178c0007dc6ba0648e776d346c26277556f625ece46b641553f298635e19e7"
"08a9653ebb46a0cf94c1f54720a1374a1d063e693596980ba4a193c16a63377b"
"809e6c1fc2464e54d4b5697e547d3f02b33998fa42e6771573672c5b0afcf0e9"
"62ef1f419bcc39e2f1070c591354b0aceab9748b89b76aa659e4abd4bd293cf5"
"32dbe9de7a5320d75479e08aae8ccc52d0f3d530b9e679c23c02826834e91530"
"f6d6b4429972ee0fb41abcb3dbf49f48481eda080d7b304faa59141214fb8d51"
"80d6727a892295fc4b676c9530551cfca5591e3c7760deceef3d0ac1bac5f506"
"c8a3c373ca32273fcdc0a9ba969304869ff849b94265e88c8fdc58cbc415b0c7"
"fc7b0a8dd088c3c0bf62c25b1cf37b3be44b2816991e579306b15eb6e23e49d6"
"3d54c4ae3a136349498394be9c9c25d9c1bd0589fe04befdbfb56121096526cf"
"e2b7f2d52c513169c96192d2b213180cc8fc346b42b96dc3a8d4d917340fe1d9"
"abc0c818f5164cfb3fdb6a4b542d5a87e15edab3f0ffd4c7be636ba6469f684f"
"cb4802657f136dece3a745a5eea5382558888323a5ed744aa8902c1bc379d51e"
"b173ca8b7df096a8cd6e7ff285f233837700ddc0bd12324bf6327825d9abfc61"
"56a107b010855c00b09a8a703f153d032c825e9d43cb00333592b238c835c3bb"
"a3e5a6c5b88b02043153ff4bb70cee73321291d284697dc23318d7eae8c6a6c5"
"32ac58d321aaa493b7546f2c8319ada33c28b4429bb811a20d5844590e11e71e"
"97d3132f3455a9b11c1e1f11dab4617861c9a9dc6612b4a30b827caec3286569"
"3a75b8b227fe1deeae08495e479bf62c481ad792249f0987781ce10cf088ec11"
"0f9b05992b700bc9ce68041a117ba4ee449ea7ca2347d5b5241864b698e4367d"
"e7728e86b834ea9752f983ca06c6eba2c429414017bd52eb84fa463ab4612254"
"339ce52590aef597c79b8fbc48a4a04bddd8eaaa2a4e39a1925a54cbb18b6ff5"
"bf4d19ecf49cee1772a6fe645027b8b5b38e8043c89077485aabce9359370ec7"
"9f68f6e16a77a968b55cde42619f05a58dbf43d72626e0b292a5974f1e56f9d3"
"d986403b58a1a7a76707ef2c64b59c96321f1fe750045aab9c75f1941e7de203"
"45accf7043619ea41cdfb281557fc16d6a1051b49a9e34e2a264bb171c91223d"
"0980b9d5af17fbf49ddb7c827034c8b75b0970c2afe347275e29bd720ed73f06"
"94e316dbb38d41d20f6d1eff1f801d1b6b14e2521543a6ca60969710ece2ca30"
"12e6bb867de277a8ce4036ce08a3ec2da6e9e6c5c32f47f28300afffc479a906"
"bcb8adcb79c735b89203ffc0dbe2363e4b72aebe8166ff192fc25af4a9d6f5a0"
"7ede100adb244c82771aa72b17efc2944683cada0407c41c5be3f4d69d4e8b6c"
"3b692bf689106e66887f22a1b85ef4769af4019b2ac1a98978adaae2dd367bab"
"0a98b9ae606089b7ca2c1dbe0faa6387f118a7024e5341fb8616f44f15e1f8c1"
"11b606d5c773a638412b742f41bfb8095f02572ca3d029cabe5eeff47a60809d"
"4f9135c7d2b0b23d118c963e0cf2098c67254c1f4e7d8de3d5a65fbfac5e4063"
"4a2b882b8a65dd675f7a39cd4f08d13e13161736a3dd11540f6e8176e4fd03e8"
"311df9ad0e34dd660907ada6a8bb3a7e6d47989a965b8ef0050636c4ac3ea216"
"9a14d7f20e28ede6e954268866423ab1d5f7b897d9004e3327c6eb62a641c5df"
"41dbf28f98e018fb71f4cf284ee102471dd3e164a36a82c90fb8780810e66f0f"
"ac2d4aca1a458a85164a818b539e38ebc541be348a662c102e707b3dbc02741a"
"fce31e6a6d29369dbf68ce58a98c69b145ed777b3c12bb609b3f7dcf60adb1a5"
"e6f94219937f86869d7dd140043650db1dd17d600f3d96b39f244055b80f91f2"
"2a81b28c88f42419e146cb132af45a239309c716625cd3f4a84bb0ee16ec66b7"
"b702497d33fa4a5145f6780b7bfc15e6b7061a9d9ed7d58cb787e2fe58694a22"
"a901d009b303952ac966be26c127af209cdb4296246e9c02a7b29ea3d9a251a9"
"d570b0be251f0fa8a8904657506a34aa0d7019f1ca95081c4a4a5bb8ac1b5cd1"
"6255ac163cd42b948bac7ac8c4ae3a3aa7d593823a1ad3d7f08e2d34aac658e5"
"93ab353a415a580ba96f48b2858d89a04557a34966c408c435a641036f3c0752"
"0f4f40b11bea88862631563acb9a948d31919f8cef8a56af01e8ead6a0db29b9"
"aaac41e9049631bc0ee2e2749e61cd131f9fd4373490410a0ac35ba7ae1a52d8"
"7dc393680b246c7357c4d5827f42619efcd8cbc58db86f64a7783793d644c950"
"9b38041e621ee9220141497115e443b84f927de874a8985ff959e319fe6bf32b"
"db0fa62e0cb7f1051613622f90b14bca938741245b5645bf5e3785eaa6831390"
"3e39e7ac16da5b8f8995739f48e3e5290eac6606f97cec9127c677ef7a50f0a0"
"911634fd4972becf28a4f778632ade49422d44a1ea43d2f9d83bbd73bb12656f"
"6439197a9812b91289db023a51f0dbb7bf75389a669f590a5621ea57be691df1"
"43d652724b5d7e922f6f603fa84acefe1cae87fc533b1d778dbd82dc222824e1"
"723ce25aefc1121d36bdf81c678b572a1723cffc7d0652bd9530971abaf7522d"
"fb0536500cb00426d37040215f838099b9614cecf69ff19334bb9224c109e685"
"6ae8576c578c063797d14a76e6292fa9cdf2770779c5f85c7fe8ee314df59460"
"74baa7c1082246757fa4b8c4f3bb91552bc00b8c7653d2feac7baa4e25c627a9"
"116f11f239eb0dcd011314059f443519ae7bf1bac993fb9c2755aaadb96115f4"
"61938bd2952b196425c9861cd376624816e266237715c7ab00cf40235aa1bec8"
"ec88a9938ece814d838d0ed5b281da990d4b2aaeb6025032b55fab75ca28f7ae"
"6a577c3b0697e9e3d2b94c860b886308a096a416792daca359920bd4be95f9cf"
"1b839a9ef45994b1c4e33d11f3d1f3b37f25cf1529f8a12297d8ce0864ad8129"
"d85d0bfadf054f06e8be9431ecd712e12075fca06c2c962fe0bcec78e041a667"
"11d404051a8488d6f684c47c6cd3b7ff457674b20d907864862fd80707fd27c9"
"bdd9917e54ff596f9192489d6a5848b10a16cd9bebaf5bcbc8d4df1c53e8a590"
"cf16908338c113dd81b5554d4a8facc403bf37441f30935516591aad2dd59868"
"7904686ccdbb6fe63b1170ac4473a8dbf32f541f448651ab008511733093597a"
"6e421e7acbbe43e7b19c216242a3a09d27fa63b79e8499ea2b6e6d36e265891b"
"6932e5102cf5e043738b737634ae42e1c82b8d1767b378933fcecbe73d3d16d5"
"19b3139936ea2745ca341ed0c7b020f065ad338f7e8b72de12ad77c153def5f2"
"29ffa6f93cba31efafd805677fc0ff86e9e377fb889a95b531303cf5966e8aab"
"bfb20bee2019abe54750679bcfa6e12c391344210e52b47bf05d16a8bd1b6931"
"57b3eb299a98574d5948188daca02993344d8d195bb57fcdfb83484ca52bd4b1"
"c012d50f7dc6f62394e9e7da745919b6380f80e47f4a8149364ad2427869cf35"
"dab839ce6813b8de4f62e1bd44e85eee5309fe4afa9394b813ae0ae9dff13687"
"4811abe867fca89b562e780e485dbb1ec7e646172f4c5e986f1300c68c725b57"
"f961c32f79e2a3d955907f499b24ae6b4904465bcacf90b643e86882226efa79"
"8096fb74748cebe8c0880f4d635524dc756b8b354dfd8445b1c3eadfeb7aa360"
"d5a9aa0ee64b6b92ebec7cecb33e1bbadbe5fa7f778527d09919df3756496d83"
"a4d1459e7130aeb0a0efd4fe6008c57f918f6701484623ee313dd7b479a7f722"
"16f88273d348586d0e2f0b259389f5ef8588e0dda17a9cbc5f1b4ac05dbbaaaa"
"52e05a6313d985603749e8f3029a26202b0f495ae4fbe40d663cdd3b91e5d036"
"587e84126ba502118f8a25fb0ab20dbb7c2689618f613a881eeed62371f681e1"
"2a4537e2b040507ea9a6111299bceb68d839730c6f9cae15b9f7fec5b41647f8"
"8d766eef908fb04714a58aed67a7f0d5f626809d6c9426809df5399f94564da3"
"6ca0b50c0451a9a87a0c97061e98ea92035097436a8e63323376a413b67238a4"
"7ce768c48b6943e0f8e89b35cf5fc8f0354179545e423c9d9b859c8646ff8f7b"
"0cd3b3684112828d51b6d048a24ae21104a0e4c8681d8c152cb97e942fbb9afd"
"d39f9e18dfb3911c449cf06ae310816fd7a1de89338ebd0551846364c1dd294c"
"20a93dfceae5cf34519af8e46a651342a6721f16dcfe16fece0c8e8cc66085a2"
"e8c8a72faebd807069e205b8290eae29469e0e85c94d890925d56ab460135e38"
"0371f10e968f0c3713053f72d222ab07149abc1866201818969828a27dda0fa7"
"b03fcbbdf00e4b71b64b84596c85fa15575acb5e98fca676f2c7974d7946f219"
"8501461830157f5bb779876066ae4eb9cf02da11d6357c94297369939a815df1"
"eaa240e5148ea67493ef480c10478f984b43d5706a90b761d50a0f59bb05c4e5"
"23c19549d245518e43cb948453ff8685f2ce2d8a208d085b4ca5349334b3ac83"
"8945d4839781a2d7d4de9663b1a2bda211075730397acbb55812f4ebbd143a33"
"cd65cca64bc27fba1ba4e12ee650afeaf901ed493cd0aaf425073b6d02b611c1"
"b76cae815041b73cf1c2e34691544941d5845fc1287811a8ebddf4c3390a3dae"
"b8be9ce852a45087c9e599945e76205a27a2164422ff5829e7b426272fa3aef3"
"e42c3edf45e5c8f0090f32dbca41ec2a2da5999f77933080f1f92d2930b8e62c"
"7ef2f599d929b0ac33255516c5a75d2905d3ba78d40d6bd667f746c6bf58eb58"
"4e845346d21f73cd8bd134d2db53284d24daf2ebaa12b31dff7b6526639ce13f"
"e605b893803de1a198a7a755cd03cd2234ca262329d1937b711a2525d0d36309"
"b0049260d659179b77d9bd058f9ea4d2ca3a87a785413d1672f5edbcf83b5543"
"2cc87460ff06a897712136a67f7726f8269f760b59f4ae873626b2f38ed97ce3"
"47746c2bfc55ea8fd19f616c0f5e9aae34bedba506def2e97ce9f0e8160cc66e"
"9b9906133f64ebc62b98e2a9936736c3b2471338a45b4bd80ebbb1bf0158a1e9"
"5a2fd18812658815f118f5bba30180aeea43efb669667530d5fcba0616a60d8c"
"624803434a376e320d3a966b8bf845252a0f0009c4ef97aec85666699dfdfaac"
"fd88dd456b8dc825c10fd74610ee8cab5d96810bd65348b4b8b42aa9754696a7"
"f14d7ba07d819782a5a85af3503d264ea1b519d1746f2e14dc57fa07753c0ecd"
"e8e0ae6688a233d2bd5969dd6f862cb1a71c91e9a49c616c53cd7633203bcd73"
"77d8ebca2390ccb16e8acbcaa24fa58d836cbc517c8aa5f6cad17747eea0ec08"
"ec7ab2708c95ec0da1e9c053d6c9965e378f9afc33f5a2b8416a08d16a452e15"
"e353ce439ff566197f4b8074523169ec59fa11bc13b0cb7980b60a84001519bf"
"e648965a8ecfd91557bad41c7c21a277275c4f1368eaa9e6cdd29eb904eb13ed"
"e670b63f6936dcddff294bf0c4c5f1e8a79df12d15f48e123db09500abecef40"
"26ed22deb082debda2f6aa18d7cf3b1195feb5834429b9a55d85b9cececcc803"
"499ec6d4cc5ae0c36bec73dfe0b23cfc79952995d6314f6d6887fe7a762bbeb6"
"8bd8f60f92263992b4355b03ac323ae46c0938add37c16de1f98d03f1e1a1fce"
"a0ae30e1709ddd810334159f19b0e021ded858bcb27cb95b1500978225e405ab"
"3b9f7099a30959da12bc76ad4f39efa547f775b7b9652891ac42b1a4c4166540"
"bbbb35baf7c51386296303dd7df761b5df3d389d0a2a2aaab7abbe768ece8dea"
"9797d2531fd8b7f0de25dbd48b6af4b6c243a277b65a966d4783b208737cd7f9"
"53e5d9688345729661bd15ab14955e5cfb63f796fa1b2a3f341e299e9c58d8eb"
"8811dabe1d2f101e1a2b6b9577088cb5d28d4abc7892e2d10ceb096516ba7052"
"a321ea4161b648eeeb0f687d3a92f4a8301208a7ef5afef89b76fd66bc8aabde"
"261b819ee45296d934ccec1b11e244e231902c69872c564df2914b0c2cdc9eaf"
"a61620c16789149b18abccd8d878e93eb593390d4923cde7fad57202f68f9f10"
"623d9646da832e67721625524dce86a62cc3be50d927336e5e5034ffd69b6828"
"18353640e47d34e5ca0b587337fe988870690142ed3c897ed4e7224783a27c55"
"531b7621a1292cf99f19d12efffb78b3d4828400e9792d67aed9f08dd97e9984"
"238e940a901b555817ee0264f3158c20f470670634d0d8690cd844fd63419ff1"
"50e00d9fff45600eba8599a549f7c7849bad12d6e1d840e621c04a5ef251c938"
"5232f056f30f449c09195a1f42b98200fa859bdc36c0f892e8f99fefce158e95"
"5f15f580828ae2f3e3d60069f33f082c2da033b6175ae0937d57442554cde1f8"
"de8a227f67eb930d115155bb2a2befa6cd5922891e16ea0e09278c7f82ab564f"
"0ece1f67a2bf9a48b00976407944400fb7345a6fd48415e4af49e2d10c860f84"
"f41da967e3a168683ac6bff14e27c9d21a6ca525da8383edc104488affbacf76"
"2e2a15161505f238be91a89702599b1e8db8290686fee8fdc2234875e08747f2"
"4f1124118ce736441669fa165f0b91631a6cbb1f72241f7d0370472b12b734f2"
"06bef9a8de5d5fd3ee685d135999095a814941f7d40f36dc1af3100ed5c15680"
"176cca2e45a0d6f052809fceebc7931121067edb87fb0c58413197702d9c6198"
"03b961ea3695358611dd3568d296892cdb452f2c02e00b005317da7774019241"
"0a3cc5a4b57f8ecfab268258513713b200ae2dfda0798b039b3178c2add1ca9e"
"d70e7518d47f727011285f14ef42bd3b1717ca8615f5c04c844b1051b14e671e"
"0bfdee2aa34dc02f5a668738a6496102dc862cbc5a1ac46d1a95a69853a210b4"
"fd6ee52115ecf05d105d83bdb27eb61cca4d4f4da63d237226823bde3bf20196"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_coins.py | tests/core/test_coins.py | from __future__ import annotations
from itertools import permutations
from benchmarks.utils import rand_hash
from flax.types.blockchain_format.coin import hash_coin_ids
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.hash import std_hash
def test_hash_coin_ids_empty() -> None:
assert hash_coin_ids([]) == std_hash(b"")
def test_hash_coin_ids() -> None:
A = bytes32([1] + [0] * 31)
B = bytes32([2] + [0] * 31)
C = bytes32([3] + [0] * 31)
D = bytes32([4] + [0] * 31)
E = bytes32([254] + [0] * 31)
F = bytes32([255] + [0] * 31)
expected = std_hash(F + E + D + C + B + A)
for i in permutations([A, B, C, D, E, F]):
assert hash_coin_ids(list(i)) == expected
def test_sorting() -> None:
for _ in range(5000):
h1 = rand_hash()
h2 = rand_hash()
assert (h1 < h2) == (h1.hex() < h2.hex())
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/make_block_generator.py | tests/core/make_block_generator.py | from __future__ import annotations
from typing import Dict
import blspy
from flax.full_node.bundle_tools import simple_solution_generator
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.generator_types import BlockGenerator
from flax.types.spend_bundle import SpendBundle
from flax.util.ints import uint64
from flax.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk, solution_for_conditions
GROUP_ORDER = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001
def int_to_public_key(index: int) -> blspy.G1Element:
index = index % GROUP_ORDER
private_key_from_int = blspy.PrivateKey.from_bytes(index.to_bytes(32, "big"))
return private_key_from_int.get_g1()
def puzzle_hash_for_index(index: int, puzzle_hash_db: dict) -> bytes32:
public_key: blspy.G1Element = int_to_public_key(index)
puzzle: Program = puzzle_for_pk(public_key)
puzzle_hash: bytes32 = puzzle.get_tree_hash()
puzzle_hash_db[puzzle_hash] = puzzle
return puzzle_hash
def make_fake_coin(index: int, puzzle_hash_db: dict) -> Coin:
"""
Make a fake coin with parent id equal to the index (ie. a genesis block coin)
"""
parent: bytes32 = bytes32(index.to_bytes(32, "big"))
puzzle_hash: bytes32 = puzzle_hash_for_index(index, puzzle_hash_db)
amount: uint64 = uint64(100000)
return Coin(parent, puzzle_hash, amount)
def conditions_for_payment(coin) -> Program:
d: Dict = {} # a throwaway db since we don't care
new_puzzle_hash = puzzle_hash_for_index(int.from_bytes(coin.puzzle_hash, "big"), d)
return Program.to([[ConditionOpcode.CREATE_COIN, new_puzzle_hash, coin.amount]])
def make_spend_bundle(count: int) -> SpendBundle:
puzzle_hash_db: Dict = dict()
coins = [make_fake_coin(_, puzzle_hash_db) for _ in range(count)]
coin_spends = []
for coin in coins:
puzzle_reveal = puzzle_hash_db[coin.puzzle_hash]
conditions = conditions_for_payment(coin)
solution = solution_for_conditions(conditions)
coin_spend = CoinSpend(coin, puzzle_reveal, solution)
coin_spends.append(coin_spend)
spend_bundle = SpendBundle(coin_spends, blspy.G2Element())
return spend_bundle
def make_block_generator(count: int) -> BlockGenerator:
spend_bundle = make_spend_bundle(count)
return simple_solution_generator(spend_bundle)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_cost_calculation.py | tests/core/test_cost_calculation.py | from __future__ import annotations
import logging
import pathlib
from typing import List
import pytest
from blspy import G1Element
from clvm_tools import binutils
from flax.consensus.condition_costs import ConditionCost
from flax.consensus.cost_calculator import NPCResult
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.full_node.bundle_tools import simple_solution_generator
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions, get_puzzle_and_solution_for_coin
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program, SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.generator_types import BlockGenerator
from flax.wallet.puzzles import p2_delegated_puzzle_or_hidden_puzzle
from tests.setup_nodes import test_constants
from tests.util.misc import assert_runtime
from .make_block_generator import make_block_generator
BURN_PUZZLE_HASH = b"0" * 32
SMALL_BLOCK_GENERATOR = make_block_generator(1)
log = logging.getLogger(__name__)
def large_block_generator(size):
# make a small block and hash it
# use this in the name for the cached big block
# the idea is, if the algorithm for building the big block changes,
# the name of the cache file will also change
name = SMALL_BLOCK_GENERATOR.program.get_tree_hash().hex()[:16]
my_dir = pathlib.Path(__file__).absolute().parent
hex_path = my_dir / f"large-block-{name}-{size}.hex"
try:
with open(hex_path) as f:
hex_str = f.read()
return bytes.fromhex(hex_str)
except FileNotFoundError:
generator = make_block_generator(size)
blob = bytes(generator.program)
# TODO: Re-enable large-block*.hex but cache in ~/.flax/subdir
# with open(hex_path, "w") as f:
# f.write(blob.hex())
return blob
class TestCostCalculation:
@pytest.mark.asyncio
async def test_basics(self, bt):
wallet_tool = bt.get_pool_wallet_tool()
ph = wallet_tool.get_new_puzzlehash()
num_blocks = 3
blocks = bt.get_consecutive_blocks(
num_blocks, [], guarantee_transaction_block=True, pool_reward_puzzle_hash=ph, farmer_reward_puzzle_hash=ph
)
coinbase = None
for coin in blocks[2].get_included_reward_coins():
if coin.puzzle_hash == ph and coin.amount == 250000000000:
coinbase = coin
break
assert coinbase is not None
spend_bundle = wallet_tool.generate_signed_transaction(
coinbase.amount,
BURN_PUZZLE_HASH,
coinbase,
)
assert spend_bundle is not None
program: BlockGenerator = simple_solution_generator(spend_bundle)
npc_result: NPCResult = get_name_puzzle_conditions(
program,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=False,
)
assert npc_result.error is None
assert len(bytes(program.program)) == 433
coin_spend = spend_bundle.coin_spends[0]
assert coin_spend.coin.name() == npc_result.conds.spends[0].coin_id
error, puzzle, solution = get_puzzle_and_solution_for_coin(program, coin_spend.coin)
assert error is None
assert puzzle == coin_spend.puzzle_reveal
assert solution == coin_spend.solution
assert npc_result.conds.cost == ConditionCost.CREATE_COIN.value + ConditionCost.AGG_SIG.value + 404560
# Create condition + agg_sig_condition + length + cpu_cost
assert (
npc_result.cost
== 404560
+ ConditionCost.CREATE_COIN.value
+ ConditionCost.AGG_SIG.value
+ len(bytes(program.program)) * test_constants.COST_PER_BYTE
)
# Create condition + agg_sig_condition + length + cpu_cost
assert (
npc_result.cost
== ConditionCost.CREATE_COIN.value
+ ConditionCost.AGG_SIG.value
+ len(bytes(program.program)) * test_constants.COST_PER_BYTE
+ 404560 # clvm cost
)
@pytest.mark.asyncio
async def test_mempool_mode(self, bt):
wallet_tool = bt.get_pool_wallet_tool()
ph = wallet_tool.get_new_puzzlehash()
num_blocks = 3
blocks = bt.get_consecutive_blocks(
num_blocks, [], guarantee_transaction_block=True, pool_reward_puzzle_hash=ph, farmer_reward_puzzle_hash=ph
)
coinbase = None
for coin in blocks[2].get_included_reward_coins():
if coin.puzzle_hash == ph:
coinbase = coin
break
assert coinbase is not None
spend_bundle = wallet_tool.generate_signed_transaction(
coinbase.amount,
BURN_PUZZLE_HASH,
coinbase,
)
assert spend_bundle is not None
pk = bytes.fromhex(
"88bc9360319e7c54ab42e19e974288a2d7a817976f7633f4b43f36ce72074e59c4ab8ddac362202f3e366f0aebbb6280"
)
puzzle = p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_pk(G1Element.from_bytes(pk))
disassembly = binutils.disassemble(puzzle)
program = SerializedProgram.from_bytes(
binutils.assemble(
f"(q ((0x3d2331635a58c0d49912bc1427d7db51afe3f20a7b4bcaffa17ee250dcbcbfaa {disassembly} 300"
f" (() (q . ((65 '00000000000000000000000000000000' 0x0cbba106e000))) ()))))"
).as_bin()
)
generator = BlockGenerator(program, [], [])
npc_result: NPCResult = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=True,
)
assert npc_result.error is not None
npc_result = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=False,
)
assert npc_result.error is None
coin = Coin(
bytes32.fromhex("3d2331635a58c0d49912bc1427d7db51afe3f20a7b4bcaffa17ee250dcbcbfaa"),
bytes32.fromhex("14947eb0e69ee8fc8279190fc2d38cb4bbb61ba28f1a270cfd643a0e8d759576"),
300,
)
error, puzzle, solution = get_puzzle_and_solution_for_coin(generator, coin)
assert error is None
@pytest.mark.asyncio
async def test_clvm_mempool_mode(self):
block = Program.from_bytes(bytes(SMALL_BLOCK_GENERATOR.program))
disassembly = binutils.disassemble(block)
# this is a valid generator program except the first clvm
# if-condition, that depends on executing an unknown operator
# ("0xfe"). In mempool mode, this should fail, but in non-mempool
# mode, the unknown operator should be treated as if it returns ().
program = SerializedProgram.from_bytes(binutils.assemble(f"(i (0xfe (q . 0)) (q . ()) {disassembly})").as_bin())
generator = BlockGenerator(program, [], [])
npc_result: NPCResult = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=True,
)
assert npc_result.error is not None
npc_result = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=False,
)
assert npc_result.error is None
@pytest.mark.asyncio
@pytest.mark.benchmark
async def test_tx_generator_speed(self, request):
LARGE_BLOCK_COIN_CONSUMED_COUNT = 687
generator_bytes = large_block_generator(LARGE_BLOCK_COIN_CONSUMED_COUNT)
program = SerializedProgram.from_bytes(generator_bytes)
with assert_runtime(seconds=0.5, label=request.node.name):
generator = BlockGenerator(program, [], [])
npc_result = get_name_puzzle_conditions(
generator,
test_constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=test_constants.COST_PER_BYTE,
mempool_mode=False,
)
assert npc_result.error is None
assert len(npc_result.conds.spends) == LARGE_BLOCK_COIN_CONSUMED_COUNT
@pytest.mark.asyncio
async def test_clvm_max_cost(self):
block = Program.from_bytes(bytes(SMALL_BLOCK_GENERATOR.program))
disassembly = binutils.disassemble(block)
# this is a valid generator program except the first clvm
# if-condition, that depends on executing an unknown operator
# ("0xfe"). In mempool mode, this should fail, but in non-mempool
# mode, the unknown operator should be treated as if it returns ().
# the CLVM program has a cost of 391969
program = SerializedProgram.from_bytes(
binutils.assemble(f"(i (softfork (q . 10000000)) (q . ()) {disassembly})").as_bin()
)
# ensure we fail if the program exceeds the cost
generator = BlockGenerator(program, [], [])
npc_result: NPCResult = get_name_puzzle_conditions(
generator,
10000000,
cost_per_byte=0,
mempool_mode=False,
)
assert npc_result.error is not None
assert npc_result.cost == 0
# raise the max cost to make sure this passes
# ensure we pass if the program does not exceeds the cost
npc_result = get_name_puzzle_conditions(generator, 23000000, cost_per_byte=0, mempool_mode=False)
assert npc_result.error is None
assert npc_result.cost > 10000000
@pytest.mark.asyncio
@pytest.mark.benchmark
async def test_standard_tx(self, request: pytest.FixtureRequest):
# this isn't a real public key, but we don't care
public_key = bytes.fromhex(
"af949b78fa6a957602c3593a3d6cb7711e08720415dad83" "1ab18adacaa9b27ec3dda508ee32e24bc811c0abc5781ae21"
)
puzzle_program = SerializedProgram.from_bytes(
p2_delegated_puzzle_or_hidden_puzzle.puzzle_for_pk(G1Element.from_bytes(public_key))
)
conditions = binutils.assemble(
"((51 0x699eca24f2b6f4b25b16f7a418d0dc4fc5fce3b9145aecdda184158927738e3e 10)"
" (51 0x847bb2385534070c39a39cc5dfdc7b35e2db472dc0ab10ab4dec157a2178adbf 0x00cbba106df6))"
)
solution_program = SerializedProgram.from_bytes(
p2_delegated_puzzle_or_hidden_puzzle.solution_for_conditions(conditions)
)
with assert_runtime(seconds=0.1, label=request.node.name):
total_cost = 0
for i in range(0, 1000):
cost, result = puzzle_program.run_with_cost(test_constants.MAX_BLOCK_COST_CLVM, solution_program)
total_cost += cost
@pytest.mark.asyncio
@pytest.mark.benchmark
async def test_get_puzzle_and_solution_for_coin_performance():
from clvm.casts import int_from_bytes
from flax.full_node.mempool_check_conditions import DESERIALIZE_MOD
from tests.core.large_block import LARGE_BLOCK
spends: List[Coin] = []
# first, list all spent coins in the block
cost, result = LARGE_BLOCK.transactions_generator.run_with_cost(
DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM, DESERIALIZE_MOD, []
)
coin_spends = result.first()
for spend in coin_spends.as_iter():
parent, puzzle, amount, solution = spend.as_iter()
spends.append(Coin(bytes32(parent.atom), Program.to(puzzle).get_tree_hash(), int_from_bytes(amount.atom)))
print(f"found {len(spends)} spent coins in block")
# benchmark the function to pick out the puzzle and solution for a specific
# coin
generator = BlockGenerator(LARGE_BLOCK.transactions_generator, [], [])
with assert_runtime(seconds=7, label="get_puzzle_and_solution_for_coin"):
for i in range(3):
for c in spends:
err, puzzle, solution = get_puzzle_and_solution_for_coin(generator, c)
assert err is None
assert puzzle.get_tree_hash() == c.puzzle_hash
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_setproctitle.py | tests/core/test_setproctitle.py | from __future__ import annotations
import pytest
from flax.util.setproctitle import setproctitle
pytestmark = pytest.mark.skip(
reason="this test ends up hanging frequently and needs to be rewritten with a subprocess and a title check",
)
def test_does_not_crash():
setproctitle("flax test title")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_crawler_rpc.py | tests/core/test_crawler_rpc.py | from __future__ import annotations
import pytest
from flax.rpc.crawler_rpc_api import CrawlerRpcApi
from flax.seeder.crawler import Crawler
class TestCrawlerRpc:
@pytest.mark.asyncio
async def test_get_ips_after_timestamp(self, bt):
crawler = Crawler(bt.config.get("seeder", {}), bt.root_path, consensus_constants=bt.constants)
crawler_rpc_api = CrawlerRpcApi(crawler)
# Should raise ValueError when `after` is not supplied
with pytest.raises(ValueError):
await crawler_rpc_api.get_ips_after_timestamp({})
# Crawler isn't actually crawling, so this should return zero IPs
response = await crawler_rpc_api.get_ips_after_timestamp({"after": 0})
assert len(response["ips"]) == 0
# Add some known data
# IPs are listed here out of order (by time) to test consistent sorting
# Timestamps increase as the IP value increases
crawler.best_timestamp_per_peer["0.0.0.0"] = 0
crawler.best_timestamp_per_peer["2.2.2.2"] = 1644300000
crawler.best_timestamp_per_peer["1.1.1.1"] = 1644213600
crawler.best_timestamp_per_peer["7.7.7.7"] = 1644732000
crawler.best_timestamp_per_peer["3.3.3.3"] = 1644386400
crawler.best_timestamp_per_peer["4.4.4.4"] = 1644472800
crawler.best_timestamp_per_peer["9.9.9.9"] = 1644904800
crawler.best_timestamp_per_peer["5.5.5.5"] = 1644559200
crawler.best_timestamp_per_peer["6.6.6.6"] = 1644645600
crawler.best_timestamp_per_peer["8.8.8.8"] = 1644818400
response = await crawler_rpc_api.get_ips_after_timestamp({"after": 0})
assert len(response["ips"]) == 9
response = await crawler_rpc_api.get_ips_after_timestamp({"after": 1644473000})
assert len(response["ips"]) == 5
# Test offset/limit functionality
response = await crawler_rpc_api.get_ips_after_timestamp({"after": 0, "limit": 2})
assert len(response["ips"]) == 2
assert response["total"] == 9
assert response["ips"][0] == "1.1.1.1"
assert response["ips"][1] == "2.2.2.2"
response = await crawler_rpc_api.get_ips_after_timestamp({"after": 0, "offset": 2, "limit": 2})
assert len(response["ips"]) == 2
assert response["total"] == 9
assert response["ips"][0] == "3.3.3.3"
assert response["ips"][1] == "4.4.4.4"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_db_validation.py | tests/core/test_db_validation.py | from __future__ import annotations
import random
import sqlite3
from contextlib import closing
from pathlib import Path
from typing import List
import pytest
from flax.cmds.db_validate_func import validate_v2
from flax.consensus.blockchain import Blockchain
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.consensus.multiprocess_validation import PreValidationResult
from flax.full_node.block_store import BlockStore
from flax.full_node.coin_store import CoinStore
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.full_block import FullBlock
from flax.util.db_wrapper import DBWrapper2
from flax.util.ints import uint64
from tests.setup_nodes import test_constants
from tests.util.temp_file import TempFile
def rand_hash() -> bytes32:
ret = bytearray(32)
for i in range(32):
ret[i] = random.getrandbits(8)
return bytes32(ret)
def make_version(conn: sqlite3.Connection, version: int) -> None:
conn.execute("CREATE TABLE database_version(version int)")
conn.execute("INSERT INTO database_version VALUES (?)", (version,))
conn.commit()
def make_peak(conn: sqlite3.Connection, peak_hash: bytes32) -> None:
conn.execute("CREATE TABLE IF NOT EXISTS current_peak(key int PRIMARY KEY, hash blob)")
conn.execute("INSERT OR REPLACE INTO current_peak VALUES(?, ?)", (0, peak_hash))
conn.commit()
def make_block_table(conn: sqlite3.Connection) -> None:
conn.execute(
"CREATE TABLE IF NOT EXISTS full_blocks("
"header_hash blob PRIMARY KEY,"
"prev_hash blob,"
"height bigint,"
"sub_epoch_summary blob,"
"is_fully_compactified tinyint,"
"in_main_chain tinyint,"
"block blob,"
"block_record blob)"
)
def add_block(
conn: sqlite3.Connection, header_hash: bytes32, prev_hash: bytes32, height: int, in_main_chain: bool
) -> None:
conn.execute(
"INSERT INTO full_blocks VALUES(?, ?, ?, NULL, 0, ?, NULL, NULL)",
(
header_hash,
prev_hash,
height,
in_main_chain,
),
)
def test_db_validate_wrong_version() -> None:
with TempFile() as db_file:
with closing(sqlite3.connect(db_file)) as conn:
make_version(conn, 3)
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=False)
assert "Database has the wrong version (3 expected 2)" in str(execinfo.value)
def test_db_validate_missing_peak_table() -> None:
with TempFile() as db_file:
with closing(sqlite3.connect(db_file)) as conn:
make_version(conn, 2)
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=False)
assert "Database is missing current_peak table" in str(execinfo.value)
def test_db_validate_missing_peak_block() -> None:
with TempFile() as db_file:
with closing(sqlite3.connect(db_file)) as conn:
make_version(conn, 2)
make_peak(conn, bytes32.fromhex("fafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafafa"))
make_block_table(conn)
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=False)
assert "Database is missing the peak block" in str(execinfo.value)
@pytest.mark.parametrize("invalid_in_chain", [True, False])
def test_db_validate_in_main_chain(invalid_in_chain: bool) -> None:
with TempFile() as db_file:
with closing(sqlite3.connect(db_file)) as conn:
make_version(conn, 2)
make_block_table(conn)
prev = bytes32(DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA)
for height in range(0, 100):
header_hash = rand_hash()
add_block(conn, header_hash, prev, height, True)
if height % 4 == 0:
# insert an orphaned block
add_block(conn, rand_hash(), prev, height, invalid_in_chain)
prev = header_hash
make_peak(conn, header_hash)
if invalid_in_chain:
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=False)
assert " (height: 96) is orphaned, but in_main_chain is set" in str(execinfo.value)
else:
validate_v2(db_file, validate_blocks=False)
async def make_db(db_file: Path, blocks: List[FullBlock]) -> None:
db_wrapper = await DBWrapper2.create(database=db_file, reader_count=1, db_version=2)
try:
async with db_wrapper.writer_maybe_transaction() as conn:
# this is done by flax init normally
await conn.execute("CREATE TABLE database_version(version int)")
await conn.execute("INSERT INTO database_version VALUES (2)")
block_store = await BlockStore.create(db_wrapper)
coin_store = await CoinStore.create(db_wrapper)
bc = await Blockchain.create(coin_store, block_store, test_constants, Path("."), reserved_cores=0)
for block in blocks:
results = PreValidationResult(None, uint64(1), None, False)
result, err, _ = await bc.receive_block(block, results)
assert err is None
finally:
await db_wrapper.close()
@pytest.mark.asyncio
async def test_db_validate_default_1000_blocks(default_1000_blocks: List[FullBlock]) -> None:
with TempFile() as db_file:
await make_db(db_file, default_1000_blocks)
# we expect everything to be valid except this is a test chain, so it
# doesn't have the correct genesis challenge
with pytest.raises(RuntimeError) as execinfo:
validate_v2(db_file, validate_blocks=True)
assert "Blockchain has invalid genesis challenge" in str(execinfo.value)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_filter.py | tests/core/test_filter.py | from typing import List
import pytest
from chiabip158 import PyBIP158
class TestFilter:
@pytest.mark.asyncio
async def test_basic_filter_test(self, wallet_and_node):
full_nodes, wallets, bt = wallet_and_node
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
num_blocks = 2
ph = await wallet.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
10,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=ph,
pool_reward_puzzle_hash=ph,
)
for i in range(1, num_blocks):
byte_array_tx: List[bytes] = []
block = blocks[i]
coins = list(block.get_included_reward_coins())
coin_0 = bytearray(coins[0].puzzle_hash)
coin_1 = bytearray(coins[1].puzzle_hash)
byte_array_tx.append(coin_0)
byte_array_tx.append(coin_1)
pl = PyBIP158(byte_array_tx)
present = pl.Match(coin_0)
fee_present = pl.Match(coin_1)
assert present
assert fee_present
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/config.py | tests/core/config.py | from __future__ import annotations
parallel = True
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/node_height.py | tests/core/node_height.py | from __future__ import annotations
def node_height_at_least(node, h):
if node.full_node.blockchain.get_peak() is not None:
return node.full_node.blockchain.get_peak().height >= h
return False
def node_height_exactly(node, h):
if node.full_node.blockchain.get_peak() is not None:
return node.full_node.blockchain.get_peak().height == h
return False
def node_height_between(node, h1, h2):
if node.full_node.blockchain.get_peak() is not None:
height = node.full_node.blockchain.get_peak().height
return h1 <= height <= h2
return False
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/__init__.py | tests/core/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_farmer_harvester_rpc.py | tests/core/test_farmer_harvester_rpc.py | from __future__ import annotations
import dataclasses
import logging
import operator
import sys
import time
from math import ceil
from os import mkdir
from pathlib import Path
from shutil import copy
from typing import Any, Awaitable, Callable, Dict, List, Union, cast
import pytest
import pytest_asyncio
from flax.consensus.coinbase import create_puzzlehash_for_pk
from flax.farmer.farmer import Farmer
from flax.plot_sync.receiver import Receiver
from flax.plotting.util import add_plot_directory
from flax.protocols import farmer_protocol
from flax.protocols.harvester_protocol import Plot
from flax.rpc.farmer_rpc_api import (
FilterItem,
PaginatedRequestData,
PlotInfoRequestData,
PlotPathRequestData,
plot_matches_filter,
)
from flax.rpc.farmer_rpc_client import FarmerRpcClient
from flax.rpc.harvester_rpc_client import HarvesterRpcClient
from flax.simulator.block_tools import get_plot_dir
from flax.simulator.time_out_assert import time_out_assert, time_out_assert_custom_interval
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.bech32m import decode_puzzle_hash, encode_puzzle_hash
from flax.util.byte_types import hexstr_to_bytes
from flax.util.config import load_config, lock_and_load_config, save_config
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint32, uint64
from flax.util.misc import get_list_or_len
from flax.wallet.derive_keys import master_sk_to_wallet_sk, master_sk_to_wallet_sk_unhardened
from tests.plot_sync.test_delta import dummy_plot
from tests.util.misc import assert_rpc_error
from tests.util.rpc import validate_get_routes
log = logging.getLogger(__name__)
async def wait_for_plot_sync(receiver: Receiver, previous_last_sync_id: uint64) -> None:
def wait():
current_last_sync_id = receiver.last_sync().sync_id
return current_last_sync_id != 0 and current_last_sync_id != previous_last_sync_id
await time_out_assert(30, wait)
async def wait_for_synced_receiver(farmer: Farmer, harvester_id: bytes32) -> None:
def wait():
return (
harvester_id in farmer.plot_sync_receivers and not farmer.plot_sync_receivers[harvester_id].initial_sync()
)
await time_out_assert(30, wait)
@pytest_asyncio.fixture(scope="function")
async def harvester_farmer_environment(farmer_one_harvester, self_hostname):
harvesters, farmer_service, bt = farmer_one_harvester
harvester_service = harvesters[0]
farmer_rpc_cl = await FarmerRpcClient.create(
self_hostname, farmer_service.rpc_server.listen_port, farmer_service.root_path, farmer_service.config
)
harvester_rpc_cl = await HarvesterRpcClient.create(
self_hostname, harvester_service.rpc_server.listen_port, harvester_service.root_path, harvester_service.config
)
async def have_connections():
return len(await farmer_rpc_cl.get_connections()) > 0
await time_out_assert(15, have_connections, True)
yield farmer_service, farmer_rpc_cl, harvester_service, harvester_rpc_cl, bt
farmer_rpc_cl.close()
harvester_rpc_cl.close()
await farmer_rpc_cl.await_closed()
await harvester_rpc_cl.await_closed()
@pytest.mark.asyncio
async def test_get_routes(harvester_farmer_environment):
(
farmer_service,
farmer_rpc_client,
harvester_service,
harvester_rpc_client,
_,
) = harvester_farmer_environment
await validate_get_routes(farmer_rpc_client, farmer_service.rpc_server.rpc_api)
await validate_get_routes(harvester_rpc_client, harvester_service.rpc_server.rpc_api)
@pytest.mark.parametrize("endpoint", ["get_harvesters", "get_harvesters_summary"])
@pytest.mark.asyncio
async def test_farmer_get_harvesters_and_summary(harvester_farmer_environment, endpoint: str):
(
farmer_service,
farmer_rpc_client,
harvester_service,
harvester_rpc_client,
_,
) = harvester_farmer_environment
harvester = harvester_service._node
harvester_plots = []
async def non_zero_plots() -> bool:
res = await harvester_rpc_client.get_plots()
nonlocal harvester_plots
harvester_plots = res["plots"]
return len(harvester_plots) > 0
await time_out_assert(10, non_zero_plots)
async def test_get_harvesters():
nonlocal harvester_plots
harvester.plot_manager.trigger_refresh()
await time_out_assert(5, harvester.plot_manager.needs_refresh, value=False)
farmer_res = await getattr(farmer_rpc_client, endpoint)()
if len(list(farmer_res["harvesters"])) != 1:
log.error(f"test_get_harvesters: invalid harvesters {list(farmer_res['harvesters'])}")
return False
if farmer_res["harvesters"][0]["last_sync_time"] is None:
log.error(f"test_get_harvesters: sync not done {list(farmer_res['harvesters'])}")
return False
harvester_dict = farmer_res["harvesters"][0]
counts_only: bool = endpoint == "get_harvesters_summary"
if not counts_only:
harvester_dict["plots"] = sorted(harvester_dict["plots"], key=lambda item: item["filename"])
harvester_plots = sorted(harvester_plots, key=lambda item: item["filename"])
assert harvester_dict["plots"] == get_list_or_len(harvester_plots, counts_only)
assert harvester_dict["failed_to_open_filenames"] == get_list_or_len([], counts_only)
assert harvester_dict["no_key_filenames"] == get_list_or_len([], counts_only)
assert harvester_dict["duplicates"] == get_list_or_len([], counts_only)
return True
await time_out_assert_custom_interval(30, 1, test_get_harvesters)
@pytest.mark.asyncio
async def test_farmer_signage_point_endpoints(harvester_farmer_environment):
(
farmer_service,
farmer_rpc_client,
harvester_service,
harvester_rpc_client,
_,
) = harvester_farmer_environment
farmer_api = farmer_service._api
assert (await farmer_rpc_client.get_signage_point(std_hash(b"2"))) is None
assert len(await farmer_rpc_client.get_signage_points()) == 0
async def have_signage_points():
return len(await farmer_rpc_client.get_signage_points()) > 0
sp = farmer_protocol.NewSignagePoint(
std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)
)
await farmer_api.new_signage_point(sp)
await time_out_assert(5, have_signage_points, True)
assert (await farmer_rpc_client.get_signage_point(std_hash(b"2"))) is not None
@pytest.mark.asyncio
async def test_farmer_reward_target_endpoints(harvester_farmer_environment):
(
farmer_service,
farmer_rpc_client,
harvester_service,
harvester_rpc_client,
bt,
) = harvester_farmer_environment
farmer_api = farmer_service._api
targets_1 = await farmer_rpc_client.get_reward_targets(False)
assert "have_pool_sk" not in targets_1
assert "have_farmer_sk" not in targets_1
targets_2 = await farmer_rpc_client.get_reward_targets(True, 2)
assert targets_2["have_pool_sk"] and targets_2["have_farmer_sk"]
new_ph: bytes32 = create_puzzlehash_for_pk(master_sk_to_wallet_sk(bt.farmer_master_sk, uint32(2)).get_g1())
new_ph_2: bytes32 = create_puzzlehash_for_pk(master_sk_to_wallet_sk(bt.pool_master_sk, uint32(7)).get_g1())
await farmer_rpc_client.set_reward_targets(encode_puzzle_hash(new_ph, "xfx"), encode_puzzle_hash(new_ph_2, "xfx"))
targets_3 = await farmer_rpc_client.get_reward_targets(True, 10)
assert decode_puzzle_hash(targets_3["farmer_target"]) == new_ph
assert decode_puzzle_hash(targets_3["pool_target"]) == new_ph_2
assert targets_3["have_pool_sk"] and targets_3["have_farmer_sk"]
# limit the derivation search to 3 should fail to find the pool sk
targets_4 = await farmer_rpc_client.get_reward_targets(True, 3)
assert not targets_4["have_pool_sk"] and targets_4["have_farmer_sk"]
# check observer addresses
observer_farmer: bytes32 = create_puzzlehash_for_pk(
master_sk_to_wallet_sk_unhardened(bt.farmer_master_sk, uint32(2)).get_g1()
)
observer_pool: bytes32 = create_puzzlehash_for_pk(
master_sk_to_wallet_sk_unhardened(bt.pool_master_sk, uint32(7)).get_g1()
)
await farmer_rpc_client.set_reward_targets(
encode_puzzle_hash(observer_farmer, "xfx"), encode_puzzle_hash(observer_pool, "xfx")
)
targets = await farmer_rpc_client.get_reward_targets(True, 10)
assert decode_puzzle_hash(targets["farmer_target"]) == observer_farmer
assert decode_puzzle_hash(targets["pool_target"]) == observer_pool
assert targets["have_pool_sk"] and targets["have_farmer_sk"]
root_path = farmer_api.farmer._root_path
config = load_config(root_path, "config.yaml")
assert config["farmer"]["xfx_target_address"] == encode_puzzle_hash(observer_farmer, "xfx")
assert config["pool"]["xfx_target_address"] == encode_puzzle_hash(observer_pool, "xfx")
new_ph_2_encoded = encode_puzzle_hash(new_ph_2, "xfx")
added_char = new_ph_2_encoded + "a"
with pytest.raises(ValueError):
await farmer_rpc_client.set_reward_targets(None, added_char)
replaced_char = new_ph_2_encoded[0:-1] + "a"
with pytest.raises(ValueError):
await farmer_rpc_client.set_reward_targets(None, replaced_char)
@pytest.mark.asyncio
async def test_farmer_get_pool_state(harvester_farmer_environment, self_hostname):
(
farmer_service,
farmer_rpc_client,
harvester_service,
harvester_rpc_client,
_,
) = harvester_farmer_environment
farmer_api = farmer_service._api
assert len((await farmer_rpc_client.get_pool_state())["pool_state"]) == 0
pool_list = [
{
"launcher_id": "ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa",
"owner_public_key": "aa11e92274c0f6a2449fd0c7cfab4a38f943289dbe2214c808b36390c34eacfaa1d4c8f3c6ec582ac502ff32228679a0", # noqa
"payout_instructions": "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8",
"pool_url": self_hostname,
"p2_singleton_puzzle_hash": "16e4bac26558d315cded63d4c5860e98deb447cc59146dd4de06ce7394b14f17",
"target_puzzle_hash": "344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58",
}
]
root_path = farmer_api.farmer._root_path
with lock_and_load_config(root_path, "config.yaml") as config:
config["pool"]["pool_list"] = pool_list
save_config(root_path, "config.yaml", config)
await farmer_api.farmer.update_pool_state()
pool_state = (await farmer_rpc_client.get_pool_state())["pool_state"]
assert len(pool_state) == 1
assert (
pool_state[0]["pool_config"]["payout_instructions"]
== "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8"
)
await farmer_rpc_client.set_payout_instructions(
hexstr_to_bytes(pool_state[0]["pool_config"]["launcher_id"]), "1234vy"
)
await farmer_api.farmer.update_pool_state()
pool_state = (await farmer_rpc_client.get_pool_state())["pool_state"]
assert pool_state[0]["pool_config"]["payout_instructions"] == "1234vy"
now = time.time()
# Big arbitrary numbers used to be unlikely to accidentally collide.
before_24h = (now - (25 * 60 * 60), 29984713)
since_24h = (now - (23 * 60 * 60), 93049817)
for p2_singleton_puzzle_hash, pool_dict in farmer_api.farmer.pool_state.items():
for key in ["points_found_24h", "points_acknowledged_24h"]:
pool_dict[key].insert(0, since_24h)
pool_dict[key].insert(0, before_24h)
sp = farmer_protocol.NewSignagePoint(
std_hash(b"1"), std_hash(b"2"), std_hash(b"3"), uint64(1), uint64(1000000), uint8(2)
)
await farmer_api.new_signage_point(sp)
client_pool_state = await farmer_rpc_client.get_pool_state()
for pool_dict in client_pool_state["pool_state"]:
for key in ["points_found_24h", "points_acknowledged_24h"]:
assert pool_dict[key][0] == list(since_24h)
@pytest.mark.asyncio
async def test_farmer_get_pool_state_plot_count(harvester_farmer_environment, self_hostname: str) -> None:
(
farmer_service,
farmer_rpc_client,
harvester_service,
harvester_rpc_client,
_,
) = harvester_farmer_environment
farmer_api = farmer_service._api
async def wait_for_plot_sync() -> bool:
try:
return (await farmer_rpc_client.get_harvesters_summary())["harvesters"][0]["plots"] > 0
except Exception:
return False
await time_out_assert(15, wait_for_plot_sync, True)
assert len((await farmer_rpc_client.get_pool_state())["pool_state"]) == 0
pool_contract_puzzle_hash: bytes32 = bytes32.from_hexstr(
"1b9d1eaa3c6a9b27cd90ad9070eb012794a74b277446417bc7b904145010c087"
)
pool_list = [
{
"launcher_id": "ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa",
"owner_public_key": "aa11e92274c0f6a2449fd0c7cfab4a38f943289dbe2214c808b36390c34eacfaa1d4c8f3c6ec582ac502ff32228679a0", # noqa
"payout_instructions": "c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8",
"pool_url": self_hostname,
"p2_singleton_puzzle_hash": pool_contract_puzzle_hash.hex(),
"target_puzzle_hash": "344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58",
}
]
root_path = farmer_api.farmer._root_path
with lock_and_load_config(root_path, "config.yaml") as config:
config["pool"]["pool_list"] = pool_list
save_config(root_path, "config.yaml", config)
await farmer_api.farmer.update_pool_state()
pool_plot_count = (await farmer_rpc_client.get_pool_state())["pool_state"][0]["plot_count"]
assert pool_plot_count == 5
# TODO: Maybe improve this to not remove from Receiver directly but instead from the harvester and then wait for
# plot sync event.
async def remove_all_and_validate() -> bool:
nonlocal pool_plot_count
receiver = farmer_api.farmer.plot_sync_receivers[harvester_service._server.node_id]
for path, plot in receiver.plots().copy().items():
if plot.pool_contract_puzzle_hash == pool_contract_puzzle_hash:
del receiver.plots()[path]
pool_plot_count -= 1
plot_count = (await farmer_rpc_client.get_pool_state())["pool_state"][0]["plot_count"]
assert plot_count == pool_plot_count
return plot_count
await time_out_assert(15, remove_all_and_validate, False)
assert (await farmer_rpc_client.get_pool_state())["pool_state"][0]["plot_count"] == 0
@pytest.mark.parametrize(
"filter_item, match",
[
(FilterItem("filename", "1"), True),
(FilterItem("filename", "12"), True),
(FilterItem("filename", "123"), True),
(FilterItem("filename", "1234"), False),
(FilterItem("filename", "23"), True),
(FilterItem("filename", "3"), True),
(FilterItem("filename", "0123"), False),
(FilterItem("pool_contract_puzzle_hash", None), True),
(FilterItem("pool_contract_puzzle_hash", "1"), False),
],
)
def test_plot_matches_filter(filter_item: FilterItem, match: bool):
assert plot_matches_filter(dummy_plot("123"), filter_item) == match
@pytest.mark.parametrize(
"endpoint, filtering, sort_key, reverse, expected_plot_count",
[
(FarmerRpcClient.get_harvester_plots_valid, [], "filename", False, 20),
(FarmerRpcClient.get_harvester_plots_valid, [], "size", True, 20),
(
FarmerRpcClient.get_harvester_plots_valid,
[FilterItem("pool_contract_puzzle_hash", None)],
"file_size",
True,
15,
),
(
FarmerRpcClient.get_harvester_plots_valid,
[FilterItem("size", "20"), FilterItem("filename", "81")],
"plot_id",
False,
4,
),
(FarmerRpcClient.get_harvester_plots_invalid, [], None, True, 13),
(FarmerRpcClient.get_harvester_plots_invalid, ["invalid_0"], None, False, 6),
(FarmerRpcClient.get_harvester_plots_invalid, ["inval", "lid_1"], None, False, 2),
(FarmerRpcClient.get_harvester_plots_keys_missing, [], None, True, 3),
(FarmerRpcClient.get_harvester_plots_keys_missing, ["keys_missing_1"], None, False, 2),
(FarmerRpcClient.get_harvester_plots_duplicates, [], None, True, 7),
(FarmerRpcClient.get_harvester_plots_duplicates, ["duplicates_0"], None, False, 3),
],
)
@pytest.mark.asyncio
@pytest.mark.skipif(sys.platform == "win32", reason="avoiding crashes on windows until we fix this (crashing workers)")
async def test_farmer_get_harvester_plots_endpoints(
harvester_farmer_environment: Any,
endpoint: Callable[[FarmerRpcClient, PaginatedRequestData], Awaitable[Dict[str, Any]]],
filtering: Union[List[FilterItem], List[str]],
sort_key: str,
reverse: bool,
expected_plot_count: int,
) -> None:
(
farmer_service,
farmer_rpc_client,
harvester_service,
harvester_rpc_client,
_,
) = harvester_farmer_environment
harvester = harvester_service._node
harvester_id = harvester_service._server.node_id
receiver = farmer_service._api.farmer.plot_sync_receivers[harvester_id]
if receiver.initial_sync():
await wait_for_plot_sync(receiver, receiver.last_sync().sync_id)
harvester_plots = (await harvester_rpc_client.get_plots())["plots"]
plots = []
request: PaginatedRequestData
if endpoint == FarmerRpcClient.get_harvester_plots_valid:
request = PlotInfoRequestData(
harvester_id, uint32(0), uint32(0), cast(List[FilterItem], filtering), sort_key, reverse
)
else:
request = PlotPathRequestData(harvester_id, uint32(0), uint32(0), cast(List[str], filtering), reverse)
def add_plot_directories(prefix: str, count: int) -> List[Path]:
new_paths = []
for i in range(count):
new_paths.append(harvester.root_path / f"{prefix}_{i}")
mkdir(new_paths[-1])
add_plot_directory(harvester.root_path, str(new_paths[-1]))
return new_paths
# Generate the plot data and
if endpoint == FarmerRpcClient.get_harvester_plots_valid:
plots = harvester_plots
elif endpoint == FarmerRpcClient.get_harvester_plots_invalid:
invalid_paths = add_plot_directories("invalid", 3)
for dir_index, r in [(0, range(0, 6)), (1, range(6, 8)), (2, range(8, 13))]:
plots += [str(invalid_paths[dir_index] / f"{i}.plot") for i in r]
for plot in plots:
with open(plot, "w"):
pass
elif endpoint == FarmerRpcClient.get_harvester_plots_keys_missing:
keys_missing_plots = [path for path in (Path(get_plot_dir()) / "not_in_keychain").iterdir() if path.is_file()]
keys_missing_paths = add_plot_directories("keys_missing", 2)
for dir_index, copy_plots in [(0, keys_missing_plots[:1]), (1, keys_missing_plots[1:3])]:
for plot in copy_plots:
copy(plot, keys_missing_paths[dir_index])
plots.append(str(keys_missing_paths[dir_index] / plot.name))
elif endpoint == FarmerRpcClient.get_harvester_plots_duplicates:
duplicate_paths = add_plot_directories("duplicates", 2)
for dir_index, r in [(0, range(0, 3)), (1, range(3, 7))]:
for i in r:
plot_path = Path(harvester_plots[i]["filename"])
plots.append(str(duplicate_paths[dir_index] / plot_path.name))
copy(plot_path, plots[-1])
# Sort and filter the data
if endpoint == FarmerRpcClient.get_harvester_plots_valid:
for filter_item in filtering:
assert isinstance(filter_item, FilterItem)
plots = [plot for plot in plots if plot_matches_filter(Plot.from_json_dict(plot), filter_item)]
plots.sort(key=operator.itemgetter(sort_key, "plot_id"), reverse=reverse)
else:
for filter_item in filtering:
plots = [plot for plot in plots if filter_item in plot]
plots.sort(reverse=reverse)
total_count = len(plots)
assert total_count == expected_plot_count
last_sync_id = receiver.last_sync().sync_id
harvester.plot_manager.trigger_refresh()
harvester.plot_manager.start_refreshing()
await wait_for_plot_sync(receiver, last_sync_id)
for page_size in [1, int(total_count / 2), total_count - 1, total_count, total_count + 1, 100]:
request = dataclasses.replace(request, page_size=uint32(page_size))
expected_page_count = ceil(total_count / page_size)
for page in range(expected_page_count):
request = dataclasses.replace(request, page=uint32(page))
await wait_for_synced_receiver(farmer_service._api.farmer, harvester_id)
page_result = await endpoint(farmer_rpc_client, request)
offset = page * page_size
expected_plots = plots[offset : offset + page_size]
assert page_result == {
"success": True,
"node_id": harvester_id.hex(),
"page": page,
"page_count": expected_page_count,
"total_count": total_count,
"plots": expected_plots,
}
@pytest.mark.asyncio
@pytest.mark.skip("This test causes hangs occasionally. TODO: fix this.")
async def test_harvester_add_plot_directory(harvester_farmer_environment) -> None:
(
farmer_service,
farmer_rpc_client,
harvester_service,
harvester_rpc_client,
_,
) = harvester_farmer_environment
async def assert_added(path: Path) -> None:
assert await harvester_rpc_client.add_plot_directory(str(path))
with lock_and_load_config(root_path, "config.yaml") as config:
assert str(path) in config["harvester"]["plot_directories"]
# Test without the required parameter: dirname
with pytest.raises(ValueError, match="dirname"):
await harvester_rpc_client.fetch("add_plot_directory", {})
root_path = harvester_service._node.root_path
test_path = Path(root_path / "test_path").resolve()
# The test_path doesn't exist at this point
with assert_rpc_error(f"Path doesn't exist: {test_path}"):
await harvester_rpc_client.add_plot_directory(str(test_path))
# Create a file at the test_path and make sure it detects this
with open(test_path, "w"):
pass
with assert_rpc_error(f"Path is not a directory: {test_path}"):
await harvester_rpc_client.add_plot_directory(str(test_path))
# Drop the file, make it a directory and make sure it gets added properly.
test_path.unlink()
mkdir(test_path)
await assert_added(test_path)
with assert_rpc_error(f"Path already added: {test_path}"):
await harvester_rpc_client.add_plot_directory(str(test_path))
# Add another one and make sure they are still both there.
test_path_other = test_path / "other"
mkdir(test_path_other)
await assert_added(test_path_other)
added_directories = await harvester_rpc_client.get_plot_directories()
assert str(test_path) in added_directories
assert str(test_path_other) in added_directories
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/test_full_node_rpc.py | tests/core/test_full_node_rpc.py | # flake8: noqa: F811, F401
from typing import List
import pytest
from blspy import AugSchemeMPL
from flax.consensus.pot_iterations import is_overflow_block
from flax.full_node.signage_point import SignagePoint
from flax.protocols import full_node_protocol
from flax.rpc.full_node_rpc_client import FullNodeRpcClient
from flax.server.outbound_message import NodeType
from flax.simulator.simulator_protocol import FarmNewBlockProtocol, ReorgProtocol
from flax.types.full_block import FullBlock
from flax.types.spend_bundle import SpendBundle
from flax.types.unfinished_block import UnfinishedBlock
from flax.util.hash import std_hash
from flax.util.ints import uint8
from flax.simulator.block_tools import get_signage_point
from tests.blockchain.blockchain_test_utils import _validate_and_add_block
from tests.connection_utils import connect_and_get_peer
from tests.setup_nodes import test_constants
from flax.simulator.time_out_assert import time_out_assert
from tests.util.rpc import validate_get_routes
from flax.simulator.wallet_tools import WalletTool
class TestRpc:
@pytest.mark.asyncio
async def test1(self, two_nodes_sim_and_wallets_services, self_hostname):
num_blocks = 5
nodes, _, bt = two_nodes_sim_and_wallets_services
full_node_service_1, full_node_service_2 = nodes
full_node_api_1 = full_node_service_1._api
full_node_api_2 = full_node_service_2._api
server_2 = full_node_api_2.full_node.server
try:
client = await FullNodeRpcClient.create(
self_hostname,
full_node_service_1.rpc_server.listen_port,
full_node_service_1.root_path,
full_node_service_1.config,
)
await validate_get_routes(client, full_node_service_1.rpc_server.rpc_api)
state = await client.get_blockchain_state()
assert state["peak"] is None
assert not state["sync"]["sync_mode"]
assert state["difficulty"] > 0
assert state["sub_slot_iters"] > 0
blocks = bt.get_consecutive_blocks(num_blocks)
blocks = bt.get_consecutive_blocks(num_blocks, block_list_input=blocks, guarantee_transaction_block=True)
assert len(await client.get_unfinished_block_headers()) == 0
assert len((await client.get_block_records(0, 100))) == 0
for block in blocks:
if is_overflow_block(test_constants, block.reward_chain_block.signage_point_index):
finished_ss = block.finished_sub_slots[:-1]
else:
finished_ss = block.finished_sub_slots
unf = UnfinishedBlock(
finished_ss,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
await full_node_api_1.full_node.respond_unfinished_block(
full_node_protocol.RespondUnfinishedBlock(unf), None
)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block), None)
assert len(await client.get_unfinished_block_headers()) > 0
assert len(await client.get_all_block(0, 2)) == 2
state = await client.get_blockchain_state()
block = await client.get_block(state["peak"].header_hash)
assert block == blocks[-1]
assert (await client.get_block(bytes([1] * 32))) is None
assert (await client.get_block_record_by_height(2)).header_hash == blocks[2].header_hash
assert len((await client.get_block_records(0, 100))) == num_blocks * 2
assert (await client.get_block_record_by_height(100)) is None
ph = list(blocks[-1].get_included_reward_coins())[0].puzzle_hash
coins = await client.get_coin_records_by_puzzle_hash(ph)
print(coins)
assert len(coins) >= 1
pid = list(blocks[-1].get_included_reward_coins())[0].parent_coin_info
pid_2 = list(blocks[-1].get_included_reward_coins())[1].parent_coin_info
coins = await client.get_coin_records_by_parent_ids([pid, pid_2])
print(coins)
assert len(coins) == 2
name = list(blocks[-1].get_included_reward_coins())[0].name()
name_2 = list(blocks[-1].get_included_reward_coins())[1].name()
coins = await client.get_coin_records_by_names([name, name_2])
print(coins)
assert len(coins) == 2
additions, removals = await client.get_additions_and_removals(blocks[-1].header_hash)
assert len(additions) >= 2 and len(removals) == 0
wallet = WalletTool(full_node_api_1.full_node.constants)
wallet_receiver = WalletTool(full_node_api_1.full_node.constants, AugSchemeMPL.key_gen(std_hash(b"123123")))
ph = wallet.get_new_puzzlehash()
ph_2 = wallet.get_new_puzzlehash()
ph_receiver = wallet_receiver.get_new_puzzlehash()
assert len(await client.get_coin_records_by_puzzle_hash(ph)) == 0
assert len(await client.get_coin_records_by_puzzle_hash(ph_receiver)) == 0
blocks = bt.get_consecutive_blocks(
2,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=ph,
pool_reward_puzzle_hash=ph,
)
for block in blocks[-2:]:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
assert len(await client.get_coin_records_by_puzzle_hash(ph)) == 2
assert len(await client.get_coin_records_by_puzzle_hash(ph_receiver)) == 0
coin_to_spend = list(blocks[-1].get_included_reward_coins())[0]
spend_bundle = wallet.generate_signed_transaction(coin_to_spend.amount, ph_receiver, coin_to_spend)
assert len(await client.get_all_mempool_items()) == 0
assert len(await client.get_all_mempool_tx_ids()) == 0
assert (await client.get_mempool_item_by_tx_id(spend_bundle.name())) is None
await client.push_tx(spend_bundle)
coin = spend_bundle.additions()[0]
assert len(await client.get_all_mempool_items()) == 1
assert len(await client.get_all_mempool_tx_ids()) == 1
assert (
SpendBundle.from_json_dict(list((await client.get_all_mempool_items()).values())[0]["spend_bundle"])
== spend_bundle
)
assert (await client.get_all_mempool_tx_ids())[0] == spend_bundle.name()
assert (
SpendBundle.from_json_dict(
(await client.get_mempool_item_by_tx_id(spend_bundle.name()))["spend_bundle"]
)
== spend_bundle
)
assert (await client.get_coin_record_by_name(coin.name())) is None
await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(ph_2))
coin_record = await client.get_coin_record_by_name(coin.name())
assert coin_record.coin == coin
assert (
coin
in (
await client.get_puzzle_and_solution(coin.parent_coin_info, coin_record.confirmed_block_index)
).additions()
)
assert len(await client.get_coin_records_by_puzzle_hash(ph_receiver)) == 1
assert len(list(filter(lambda cr: not cr.spent, (await client.get_coin_records_by_puzzle_hash(ph))))) == 3
assert len(await client.get_coin_records_by_puzzle_hashes([ph_receiver, ph])) == 5
assert len(await client.get_coin_records_by_puzzle_hash(ph, False)) == 3
assert len(await client.get_coin_records_by_puzzle_hash(ph, True)) == 4
assert len(await client.get_coin_records_by_puzzle_hash(ph, True, 0, 100)) == 4
assert len(await client.get_coin_records_by_puzzle_hash(ph, True, 50, 100)) == 0
assert len(await client.get_coin_records_by_puzzle_hash(ph, True, 0, blocks[-1].height + 1)) == 2
assert len(await client.get_coin_records_by_puzzle_hash(ph, True, 0, 1)) == 0
coin_records = await client.get_coin_records_by_puzzle_hash(ph, False)
coin_spends = []
# Spend 3 coins using standard transaction
for i in range(3):
spend_bundle = wallet.generate_signed_transaction(
coin_records[i].coin.amount, ph_receiver, coin_records[i].coin
)
await client.push_tx(spend_bundle)
coin_spends = coin_spends + spend_bundle.coin_spends
await time_out_assert(
5, full_node_api_1.full_node.mempool_manager.get_spendbundle, spend_bundle, spend_bundle.name()
)
await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(ph_2))
block: FullBlock = (await full_node_api_1.get_all_full_blocks())[-1]
assert len(block.transactions_generator_ref_list) > 0 # compression has occurred
block_spends = await client.get_block_spends(block.header_hash)
assert len(block_spends) == 3
assert block_spends == coin_spends
memo = 32 * b"\f"
for i in range(2):
await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(ph_2))
state = await client.get_blockchain_state()
block = await client.get_block(state["peak"].header_hash)
coin_to_spend = list(block.get_included_reward_coins())[0]
spend_bundle = wallet.generate_signed_transaction(coin_to_spend.amount, ph_2, coin_to_spend, memo=memo)
await client.push_tx(spend_bundle)
await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(ph_2))
coin_to_spend = (await client.get_coin_records_by_hint(memo))[0].coin
# Spend the most recent coin so we can test including spent coins later
spend_bundle = wallet.generate_signed_transaction(coin_to_spend.amount, ph_2, coin_to_spend, memo=memo)
await client.push_tx(spend_bundle)
await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(ph_2))
coin_records = await client.get_coin_records_by_hint(memo)
assert len(coin_records) == 3
coin_records = await client.get_coin_records_by_hint(memo, include_spent_coins=False)
assert len(coin_records) == 2
state = await client.get_blockchain_state()
# Get coin records by hint
coin_records = await client.get_coin_records_by_hint(
memo, start_height=state["peak"].height - 1, end_height=state["peak"].height
)
assert len(coin_records) == 1
assert len(await client.get_connections()) == 0
await client.open_connection(self_hostname, server_2._port)
async def num_connections():
return len(await client.get_connections())
await time_out_assert(10, num_connections, 1)
connections = await client.get_connections()
assert NodeType(connections[0]["type"]) == NodeType.FULL_NODE.value
assert len(await client.get_connections(NodeType.FULL_NODE)) == 1
assert len(await client.get_connections(NodeType.FARMER)) == 0
await client.close_connection(connections[0]["node_id"])
await time_out_assert(10, num_connections, 0)
blocks: List[FullBlock] = await client.get_blocks(0, 5)
assert len(blocks) == 5
await full_node_api_1.reorg_from_index_to_new_index(ReorgProtocol(2, 55, bytes([0x2] * 32), None))
new_blocks_0: List[FullBlock] = await client.get_blocks(0, 5)
assert len(new_blocks_0) == 7
new_blocks: List[FullBlock] = await client.get_blocks(0, 5, exclude_reorged=True)
assert len(new_blocks) == 5
assert blocks[0].header_hash == new_blocks[0].header_hash
assert blocks[1].header_hash == new_blocks[1].header_hash
assert blocks[2].header_hash == new_blocks[2].header_hash
assert blocks[3].header_hash != new_blocks[3].header_hash
finally:
# Checks that the RPC manages to stop the node
client.close()
await client.await_closed()
@pytest.mark.asyncio
async def test_signage_points(self, two_nodes_sim_and_wallets_services, empty_blockchain):
nodes, _, bt = two_nodes_sim_and_wallets_services
full_node_service_1, full_node_service_2 = nodes
full_node_api_1 = full_node_service_1._api
full_node_api_2 = full_node_service_2._api
server_1 = full_node_api_1.full_node.server
server_2 = full_node_api_2.full_node.server
config = bt.config
self_hostname = config["self_hostname"]
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
try:
client = await FullNodeRpcClient.create(
self_hostname,
full_node_service_1.rpc_server.listen_port,
full_node_service_1.root_path,
full_node_service_1.config,
)
# Only provide one
res = await client.get_recent_signage_point_or_eos(None, None)
assert res is None
res = await client.get_recent_signage_point_or_eos(std_hash(b"0"), std_hash(b"1"))
assert res is None
# Not found
res = await client.get_recent_signage_point_or_eos(std_hash(b"0"), None)
assert res is None
res = await client.get_recent_signage_point_or_eos(None, std_hash(b"0"))
assert res is None
blocks = bt.get_consecutive_blocks(5)
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1, force_overflow=True)
blockchain = full_node_api_1.full_node.blockchain
second_blockchain = empty_blockchain
for block in blocks:
await _validate_and_add_block(second_blockchain, block)
# Creates a signage point based on the last block
peak_2 = second_blockchain.get_peak()
sp: SignagePoint = get_signage_point(
test_constants,
blockchain,
peak_2,
peak_2.ip_sub_slot_total_iters(test_constants),
uint8(4),
[],
peak_2.sub_slot_iters,
)
# Don't have SP yet
res = await client.get_recent_signage_point_or_eos(sp.cc_vdf.output.get_hash(), None)
assert res is None
# Add the last block
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-1]))
await full_node_api_1.respond_signage_point(
full_node_protocol.RespondSignagePoint(uint8(4), sp.cc_vdf, sp.cc_proof, sp.rc_vdf, sp.rc_proof), peer
)
assert full_node_api_1.full_node.full_node_store.get_signage_point(sp.cc_vdf.output.get_hash()) is not None
# Properly fetch a signage point
res = await client.get_recent_signage_point_or_eos(sp.cc_vdf.output.get_hash(), None)
assert res is not None
assert "eos" not in res
assert res["signage_point"] == sp
assert not res["reverted"]
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1)
selected_eos = blocks[-1].finished_sub_slots[0]
# Don't have EOS yet
res = await client.get_recent_signage_point_or_eos(None, selected_eos.challenge_chain.get_hash())
assert res is None
# Properly fetch an EOS
for eos in blocks[-1].finished_sub_slots:
await full_node_api_1.full_node.respond_end_of_sub_slot(
full_node_protocol.RespondEndOfSubSlot(eos), peer
)
res = await client.get_recent_signage_point_or_eos(None, selected_eos.challenge_chain.get_hash())
assert res is not None
assert "signage_point" not in res
assert res["eos"] == selected_eos
assert not res["reverted"]
# Do another one but without sending the slot
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-1]))
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1)
selected_eos = blocks[-1].finished_sub_slots[-1]
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-1]))
res = await client.get_recent_signage_point_or_eos(None, selected_eos.challenge_chain.get_hash())
assert res is not None
assert "signage_point" not in res
assert res["eos"] == selected_eos
assert not res["reverted"]
# Perform a reorg
blocks = bt.get_consecutive_blocks(12, seed=b"1234")
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Signage point is no longer in the blockchain
res = await client.get_recent_signage_point_or_eos(sp.cc_vdf.output.get_hash(), None)
assert res["reverted"]
assert res["signage_point"] == sp
assert "eos" not in res
# EOS is no longer in the blockchain
res = await client.get_recent_signage_point_or_eos(None, selected_eos.challenge_chain.get_hash())
assert res is not None
assert "signage_point" not in res
assert res["eos"] == selected_eos
assert res["reverted"]
finally:
# Checks that the RPC manages to stop the node
client.close()
await client.await_closed()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_keyring_wrapper.py | tests/core/util/test_keyring_wrapper.py | import logging
import pytest
from flax.util.errors import KeychainLabelError, KeychainLabelExists, KeychainFingerprintNotFound, KeychainLabelInvalid
from flax.util.keyring_wrapper import KeyringWrapper, DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
from pathlib import Path
from typing import Type
from sys import platform
from tests.util.keyring import using_temp_file_keyring, using_temp_file_keyring_and_cryptfilekeyring
log = logging.getLogger(__name__)
@pytest.fixture(autouse=True, scope="function")
def setup_keyring_wrapper():
yield
KeyringWrapper.cleanup_shared_instance()
assert KeyringWrapper.get_shared_instance(create_if_necessary=False) is None
class TestKeyringWrapper:
def test_shared_instance(self):
"""
Using KeyringWrapper's get_shared_instance() method should return the same
instance each time it's called
"""
# When: multiple calls to the shared accessor are made
kw1 = KeyringWrapper.get_shared_instance()
kw2 = KeyringWrapper.get_shared_instance()
# Expect: the shared instance should exist
assert kw1 is not None
# Expect: multiple references should point to the same instance
assert id(kw1) == id(kw2)
# When: destroying the shared instance
KeyringWrapper.cleanup_shared_instance()
# Expect: the shared instance should be cleared
assert KeyringWrapper.get_shared_instance(create_if_necessary=False) is None
# When: creating a new file keyring with a legacy keyring in place
@using_temp_file_keyring_and_cryptfilekeyring()
@pytest.mark.skip(reason="Does only work if `test_keyring_wrapper.py` gets called separately.")
def test_using_legacy_cryptfilekeyring(self):
"""
In the case that an existing CryptFileKeyring (legacy) keyring exists and we're
creating a new FileKeyring, the legacy keyring's use should be prioritized over
the FileKeyring (until migration is triggered by a write to the keyring.)
"""
if platform != "linux":
return
# Expect: the new keyring should not have content (not actually empty though...)
assert KeyringWrapper.get_shared_instance().keyring.has_content() is False
assert Path(KeyringWrapper.get_shared_instance().keyring.keyring_path).exists() is True
assert Path(KeyringWrapper.get_shared_instance().keyring.keyring_path).stat().st_size != 0
# Expect: legacy keyring should be in use
assert KeyringWrapper.get_shared_instance().legacy_keyring is not None
assert KeyringWrapper.get_shared_instance().using_legacy_keyring() is True
assert KeyringWrapper.get_shared_instance().get_keyring() == KeyringWrapper.get_shared_instance().legacy_keyring
# When: a file keyring has content and the legacy keyring exists
@using_temp_file_keyring_and_cryptfilekeyring(populate=True)
def test_using_file_keyring_with_legacy_keyring(self):
"""
In the case that an existing CryptFileKeyring (legacy) keyring exists and we're
using a new FileKeyring with some keys in it, the FileKeyring's use should be
used instead of the legacy keyring.
"""
# Expect: the new keyring should have content
assert KeyringWrapper.get_shared_instance().keyring.has_content() is True
# Expect: the new keyring should be in use
assert KeyringWrapper.get_shared_instance().legacy_keyring is None
assert KeyringWrapper.get_shared_instance().using_legacy_keyring() is False
assert KeyringWrapper.get_shared_instance().get_keyring() == KeyringWrapper.get_shared_instance().keyring
# When: a file keyring has content and the legacy keyring doesn't exists
@using_temp_file_keyring(populate=True)
def test_using_file_keyring_without_legacy_keyring(self):
"""
In the case of a new installation (no legacy CryptFileKeyring) using a FileKeyring
with some content, the legacy keyring should not be used.
"""
# Expect: the new keyring should have content
assert KeyringWrapper.get_shared_instance().keyring.has_content() is True
# Expect: the new keyring should be in use
assert KeyringWrapper.get_shared_instance().legacy_keyring is None
assert KeyringWrapper.get_shared_instance().using_legacy_keyring() is False
assert KeyringWrapper.get_shared_instance().get_keyring() == KeyringWrapper.get_shared_instance().keyring
# When: a file keyring is empty/unpopulated and the legacy keyring doesn't exists
@using_temp_file_keyring()
def test_using_new_file_keyring(self):
"""
In the case of a new installation using a new FileKeyring, the legacy keyring
should not be used.
"""
# Expect: the new keyring should not have any content
assert KeyringWrapper.get_shared_instance().keyring.has_content() is False
# Expect: the new keyring should be in use
assert KeyringWrapper.get_shared_instance().legacy_keyring is None
assert KeyringWrapper.get_shared_instance().using_legacy_keyring() is False
assert KeyringWrapper.get_shared_instance().get_keyring() == KeyringWrapper.get_shared_instance().keyring
# When: using a file keyring
@using_temp_file_keyring()
def test_file_keyring_supports_master_passphrase(self):
"""
File keyrings should support setting a master passphrase
"""
# Expect: keyring supports a master passphrase
assert KeyringWrapper.get_shared_instance().keyring_supports_master_passphrase() is True
# When: creating a new/unpopulated file keyring
@using_temp_file_keyring()
def test_empty_file_keyring_doesnt_have_master_passphrase(self):
"""
A new/unpopulated file keyring should not have a master passphrase set
"""
# Expect: no master passphrase set
assert KeyringWrapper.get_shared_instance().has_master_passphrase() is False
# When: using a populated file keyring
@using_temp_file_keyring(populate=True)
def test_populated_file_keyring_has_master_passphrase(self):
"""
Populated keyring should have the default master passphrase set
"""
# Expect: master passphrase is set
assert KeyringWrapper.get_shared_instance().has_master_passphrase() is True
# When: creating a new file keyring with a legacy keyring in place
@pytest.mark.xfail(reason="wasn't running, fails now, to be removed soon")
@using_temp_file_keyring_and_cryptfilekeyring()
def test_legacy_keyring_does_not_support_master_passphrase(self):
"""
CryptFileKeyring (legacy keyring) should not support setting a master passphrase
"""
# Expect: legacy keyring in use and master passphrase is not supported
assert KeyringWrapper.get_shared_instance().legacy_keyring is not None
assert KeyringWrapper.get_shared_instance().using_legacy_keyring() is True
assert KeyringWrapper.get_shared_instance().keyring_supports_master_passphrase() is False
# When: creating a new file keyring
@using_temp_file_keyring()
def test_default_cached_master_passphrase(self):
"""
The default passphrase DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE is set
"""
# Expect: cached passphrase set to DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE by default
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == (
DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE,
False,
)
assert KeyringWrapper.get_shared_instance().has_cached_master_passphrase() is True
# When: using a file keyring
@using_temp_file_keyring()
def test_set_cached_master_passphrase(self):
"""
Setting and retrieving the cached master passphrase should work
"""
# When: setting the cached master passphrase
KeyringWrapper.get_shared_instance().set_cached_master_passphrase("testing one two three")
# Expect: cached passphrase should match
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == ("testing one two three", False)
# When: setting a validated (successfully decrypted the content) master passphrase
KeyringWrapper.get_shared_instance().set_cached_master_passphrase("apple banana orange grape", validated=True)
# Expect: cached passphrase should match and be validated
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == (
"apple banana orange grape",
True,
)
# When: using a populated file keyring
@using_temp_file_keyring(populate=True)
def test_master_passphrase_is_valid(self):
"""
The default master passphrase should unlock the populated keyring (without any keys)
"""
# Expect: default master passphrase should validate
assert (
KeyringWrapper.get_shared_instance().master_passphrase_is_valid(DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE)
is True
)
# Expect: bogus passphrase should not validate
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("foobarbaz") is False
# When: creating a new unpopulated keyring
@using_temp_file_keyring()
def test_set_master_passphrase_on_empty_keyring(self):
"""
Setting a master passphrase should cache the passphrase and be usable to unlock
the keyring. Using an old passphrase should not unlock the keyring.
"""
# When: setting the master passphrase
KeyringWrapper.get_shared_instance().set_master_passphrase(None, "testing one two three")
# Expect: the master passphrase is cached and can be validated
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == ("testing one two three", True)
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("testing one two three") is True
# When: changing the master passphrase
KeyringWrapper.get_shared_instance().set_master_passphrase("testing one two three", "potato potato potato")
# Expect: the new master passphrase is cached and can be validated
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == ("potato potato potato", True)
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("potato potato potato") is True
# Expect: old passphrase should not validate
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("testing one two three") is False
# When: using a populated keyring
@using_temp_file_keyring(populate=True)
def test_set_master_passphrase_on_keyring(self):
"""
Setting a master passphrase should cache the passphrase and be usable to unlock
the keyring. Using an old passphrase should not unlock the keyring.
"""
# When: setting the master passphrase
KeyringWrapper.get_shared_instance().set_master_passphrase(
DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, "testing one two three"
)
# Expect: the master passphrase is cached and can be validated
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == ("testing one two three", True)
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("testing one two three") is True
# When: changing the master passphrase
KeyringWrapper.get_shared_instance().set_master_passphrase("testing one two three", "potato potato potato")
# Expect: the new master passphrase is cached and can be validated
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == ("potato potato potato", True)
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("potato potato potato") is True
# Expect: old passphrase should not validate
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("testing one two three") is False
# When: using a new empty keyring
@using_temp_file_keyring()
def test_remove_master_passphrase_from_empty_keyring(self):
"""
An empty keyring doesn't require a current passphrase to remove the master passphrase.
Removing the master passphrase will set the default master passphrase on the keyring.
"""
# When: removing the master passphrase from an empty keyring, current passphrase isn't necessary
KeyringWrapper.get_shared_instance().remove_master_passphrase(None)
# Expect: default master passphrase is set
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == (
DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE,
True,
)
assert (
KeyringWrapper.get_shared_instance().master_passphrase_is_valid(DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE)
is True
)
# When: using a populated keyring
@using_temp_file_keyring(populate=True)
def test_remove_master_passphrase_from_populated_keyring(self):
"""
A populated keyring will require a current passphrase when removing the master passphrase.
Removing the master passphrase will set the default master passphrase on the keyring.
"""
# When: the master passphrase is set
KeyringWrapper.get_shared_instance().set_master_passphrase(
DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, "It's dangerous to go alone, take this!"
)
# When: removing the master passphrase
KeyringWrapper.get_shared_instance().remove_master_passphrase("It's dangerous to go alone, take this!")
# Expect: default master passphrase is set, old passphrase doesn't validate
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == (
DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE,
True,
)
assert (
KeyringWrapper.get_shared_instance().master_passphrase_is_valid(DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE)
is True
)
assert (
KeyringWrapper.get_shared_instance().master_passphrase_is_valid("It's dangerous to go alone, take this!")
is False
)
# When: using a new empty keyring
@using_temp_file_keyring()
def test_get_passphrase(self):
"""
Simple passphrase setting and retrieval
"""
# Expect: passphrase lookup should return None
assert KeyringWrapper.get_shared_instance().get_passphrase("service-abc", "user-xyz") is None
# When: setting a passphrase
KeyringWrapper.get_shared_instance().set_passphrase(
"service-abc", "user-xyz", "super secret passphrase".encode().hex()
)
# Expect: passphrase lookup should succeed
assert (
KeyringWrapper.get_shared_instance().get_passphrase("service-abc", "user-xyz")
== "super secret passphrase".encode().hex()
)
# Expect: non-existent passphrase lookup should fail
assert (
KeyringWrapper.get_shared_instance().get_passphrase("service-123", "some non-existent passphrase") is None
)
# When: using a new empty keyring
@using_temp_file_keyring()
def test_set_passphrase_overwrite(self):
"""
Overwriting a previously-set passphrase should work
"""
# When: initially setting the passphrase
KeyringWrapper.get_shared_instance().set_passphrase(
"service-xyz", "user-123", "initial passphrase".encode().hex()
)
# Expect: passphrase lookup should succeed
assert (
KeyringWrapper.get_shared_instance().get_passphrase("service-xyz", "user-123")
== "initial passphrase".encode().hex()
)
# When: updating the same passphrase
KeyringWrapper.get_shared_instance().set_passphrase(
"service-xyz", "user-123", "updated passphrase".encode().hex()
)
# Expect: the updated passphrase should be retrieved
assert (
KeyringWrapper.get_shared_instance().get_passphrase("service-xyz", "user-123")
== "updated passphrase".encode().hex()
)
# When: using a new empty keyring
@using_temp_file_keyring()
def test_delete_passphrase(self):
"""
Deleting a non-existent passphrase should fail gracefully (no exceptions)
"""
# Expect: deleting a non-existent passphrase should fail gracefully
KeyringWrapper.get_shared_instance().delete_passphrase("some service", "some user")
# When: setting a passphrase
KeyringWrapper.get_shared_instance().set_passphrase("some service", "some user", "500p3r 53cr37".encode().hex())
# Expect: passphrase retrieval should succeed
assert (
KeyringWrapper.get_shared_instance().get_passphrase("some service", "some user")
== "500p3r 53cr37".encode().hex()
)
# When: deleting the passphrase
KeyringWrapper.get_shared_instance().delete_passphrase("some service", "some user")
# Expect: passphrase retrieval should fail gracefully
assert KeyringWrapper.get_shared_instance().get_passphrase("some service", "some user") is None
@using_temp_file_keyring()
def test_emoji_master_passphrase(self):
"""
Emoji master passphrases should just work 😀
"""
# When: setting a passphrase containing emojis
KeyringWrapper.get_shared_instance().set_master_passphrase(None, "🥳🤩🤪🤯😎😝😀")
# Expect: the master passphrase is cached and can be validated
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == ("🥳🤩🤪🤯😎😝😀", True)
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("🥳🤩🤪🤯😎😝😀") is True
# Expect: an invalid passphrase containing an emoji should fail validation
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() != ("🦄🦄🦄🦄🦄🦄🦄🦄", True)
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("🦄🦄🦄🦄🦄🦄🦄🦄") is False
@using_temp_file_keyring()
def test_japanese_master_passphrase(self):
"""
Non-ascii master passphrases should just work
"""
# When: setting a passphrase containing non-ascii characters
KeyringWrapper.get_shared_instance().set_master_passphrase(None, "私は幸せな農夫です")
# Expect: the master passphrase is cached and can be validated
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() == ("私は幸せな農夫です", True)
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("私は幸せな農夫です") is True
# Expect: an invalid passphrase containing an non-ascii characters should fail validation
assert KeyringWrapper.get_shared_instance().get_cached_master_passphrase() != ("私は幸せな農夫ではありません", True)
assert KeyringWrapper.get_shared_instance().master_passphrase_is_valid("私は幸せな農夫ではありません") is False
# When: using a new empty keyring
@using_temp_file_keyring()
def test_set_master_passphrase_with_hint(self):
"""
Setting a passphrase hint at the same time as setting the passphrase
"""
# When: setting the master passphrase with a hint
KeyringWrapper.get_shared_instance().set_master_passphrase(
None, "new master passphrase", passphrase_hint="some passphrase hint"
)
# Expect: hint can be retrieved
assert KeyringWrapper.get_shared_instance().get_master_passphrase_hint() == "some passphrase hint"
@using_temp_file_keyring()
def test_passphrase_hint(self):
"""
Setting and retrieving the passphrase hint
"""
# Expect: no hint set by default
assert KeyringWrapper.get_shared_instance().get_master_passphrase_hint() is None
# When: setting the passphrase hint while setting the master passphrase
KeyringWrapper.get_shared_instance().set_master_passphrase(
None, "passphrase", passphrase_hint="rhymes with bassphrase"
)
# Expect: to retrieve the passphrase hint that was just set
assert KeyringWrapper.get_shared_instance().get_master_passphrase_hint() == "rhymes with bassphrase"
# When: writing the keyring again
KeyringWrapper.get_shared_instance().keyring.write_keyring()
# Expect: the hint is still set
assert KeyringWrapper.get_shared_instance().get_master_passphrase_hint() == "rhymes with bassphrase"
@using_temp_file_keyring()
def test_passphrase_hint_removal(self):
"""
Removing a passphrase hint
"""
# When: setting the passphrase hint while setting the master passphrase
KeyringWrapper.get_shared_instance().set_master_passphrase(
None, "12345", passphrase_hint="President Skroob's luggage combination"
)
# Expect: to retrieve the passphrase hint that was just set
assert (
KeyringWrapper.get_shared_instance().get_master_passphrase_hint()
== "President Skroob's luggage combination"
)
# When: removing the passphrase hint
KeyringWrapper.get_shared_instance().set_master_passphrase("12345", "12345", passphrase_hint=None)
# Expect: passphrase hint has been removed
assert KeyringWrapper.get_shared_instance().get_master_passphrase_hint() is None
@using_temp_file_keyring()
def test_passphrase_hint_update(self):
"""
Updating a passphrase hint
"""
# When: setting the passphrase hint while setting the master passphrase
KeyringWrapper.get_shared_instance().set_master_passphrase(
None, "i like turtles", passphrase_hint="My deepest darkest secret"
)
# Expect: to retrieve the passphrase hint that was just set
assert KeyringWrapper.get_shared_instance().get_master_passphrase_hint() == "My deepest darkest secret"
# When: updating the passphrase hint
KeyringWrapper.get_shared_instance().set_master_passphrase(
"i like turtles", "i like turtles", passphrase_hint="Something you wouldn't expect The Shredder to say"
)
# Expect: to retrieve the passphrase hint that was just set
assert (
KeyringWrapper.get_shared_instance().get_master_passphrase_hint()
== "Something you wouldn't expect The Shredder to say"
)
@using_temp_file_keyring()
def test_get_label(self):
keyring_wrapper = KeyringWrapper.get_shared_instance()
# label lookup for 1, 2, 3 should return None
assert keyring_wrapper.get_label(1) is None
assert keyring_wrapper.get_label(2) is None
assert keyring_wrapper.get_label(3) is None
# Set and validate a label for 1
keyring_wrapper.set_label(1, "one")
assert keyring_wrapper.get_label(1) == "one"
# Set and validate a label for 3
keyring_wrapper.set_label(3, "three")
# And validate all match the expected values
assert keyring_wrapper.get_label(1) == "one"
assert keyring_wrapper.get_label(2) is None
assert keyring_wrapper.get_label(3) == "three"
@using_temp_file_keyring()
def test_set_label(self):
keyring_wrapper = KeyringWrapper.get_shared_instance()
# Set and validate a label for 1
keyring_wrapper.set_label(1, "one")
assert keyring_wrapper.get_label(1) == "one"
# Set and validate a label for 2
keyring_wrapper.set_label(2, "two")
assert keyring_wrapper.get_label(2) == "two"
# Change the label of 2
keyring_wrapper.set_label(2, "two!")
assert keyring_wrapper.get_label(2) == "two!"
# 1 should still have the same label
assert keyring_wrapper.get_label(1) == "one"
# Change the label of 2 again
keyring_wrapper.set_label(2, "two!!")
assert keyring_wrapper.get_label(2) == "two!!"
# 1 should still have the same label
assert keyring_wrapper.get_label(1) == "one"
# Change the label of 1
keyring_wrapper.set_label(1, "one!")
assert keyring_wrapper.get_label(1) == "one!"
# 2 should still have the same label
assert keyring_wrapper.get_label(2) == "two!!"
@pytest.mark.parametrize(
"label",
[
"🥳🤩🤪🤯😎😝😀",
"私は幸せな農夫です",
"لتفاصيل لتكتشف حقيقة وأساس ت",
],
)
@using_temp_file_keyring()
def test_set_special_labels(self, label: str):
keyring_wrapper = KeyringWrapper.get_shared_instance()
keyring_wrapper.set_label(1, label)
assert keyring_wrapper.get_label(1) == label
@pytest.mark.parametrize(
"label, exception, message",
[
("one", KeychainLabelExists, "label 'one' already exists for fingerprint '1"),
("", KeychainLabelInvalid, "label can't be empty or whitespace only"),
(" ", KeychainLabelInvalid, "label can't be empty or whitespace only"),
("a\nb", KeychainLabelInvalid, "label can't contain newline or tab"),
("a\tb", KeychainLabelInvalid, "label can't contain newline or tab"),
("a label ", KeychainLabelInvalid, "label can't contain leading or trailing whitespaces"),
(" a label", KeychainLabelInvalid, "label can't contain leading or trailing whitespaces"),
(" a label ", KeychainLabelInvalid, "label can't contain leading or trailing whitespaces"),
(" a label ", KeychainLabelInvalid, "label can't contain leading or trailing whitespaces"),
("a" * 66, KeychainLabelInvalid, "label exceeds max length: 66/65"),
("a" * 70, KeychainLabelInvalid, "label exceeds max length: 70/65"),
],
)
@using_temp_file_keyring()
def test_set_label_failures(self, label: str, exception: Type[KeychainLabelError], message: str) -> None:
keyring_wrapper = KeyringWrapper.get_shared_instance()
keyring_wrapper.set_label(1, "one")
with pytest.raises(exception, match=message) as e:
keyring_wrapper.set_label(1, label)
assert e.value.label == label
if isinstance(e.value, KeychainLabelExists):
assert e.value.label == "one"
assert e.value.fingerprint == 1
@using_temp_file_keyring()
def test_delete_label(self) -> None:
keyring_wrapper = KeyringWrapper.get_shared_instance()
# Set labels for 1,2 and validate them
keyring_wrapper.set_label(1, "one")
keyring_wrapper.set_label(2, "two")
assert keyring_wrapper.get_label(1) == "one"
assert keyring_wrapper.get_label(2) == "two"
# Remove the label of 1
keyring_wrapper.delete_label(1)
assert keyring_wrapper.get_label(1) is None
assert keyring_wrapper.get_label(2) == "two"
# Remove the label of 2
keyring_wrapper.delete_label(2)
assert keyring_wrapper.get_label(1) is None
assert keyring_wrapper.get_label(2) is None
# Make sure the deletion fails for 0-2
for i in range(3):
with pytest.raises(KeychainFingerprintNotFound) as e:
keyring_wrapper.delete_label(i)
assert e.value.fingerprint == i
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_files.py | tests/core/util/test_files.py | import os
import pytest
import shutil
import sys
from flax.util import files
from flax.util.files import move_file, move_file_async, write_file_async
from pathlib import Path
class TestMoveFile:
# use tmp_path pytest fixture to create a temporary directory
def test_move_file(self, tmp_path: Path):
"""
Move a file from one location to another and verify the contents.
"""
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
move_file(src_path, dst_path)
assert src_path.exists() is False
assert dst_path.exists() is True
assert dst_path.read_text() == "source"
# use tmp_path pytest fixture to create a temporary directory
def test_move_file_with_overwrite(self, tmp_path: Path):
"""
Move a file from one location to another, overwriting the destination.
"""
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
dst_path.write_text("destination")
move_file(src_path, dst_path)
assert src_path.exists() is False
assert dst_path.exists() is True
assert dst_path.read_text() == "source"
# use tmp_path pytest fixture to create a temporary directory
def test_move_file_create_intermediate_dirs(self, tmp_path: Path):
"""
Move a file from one location to another, creating intermediate directories at the destination.
"""
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination" / "destination.txt"
move_file(src_path, dst_path)
assert src_path.exists() is False
assert dst_path.exists() is True
assert dst_path.read_text() == "source"
# use tmp_path pytest fixture to create a temporary directory
def test_move_file_existing_intermediate_dirs(self, tmp_path: Path):
"""
Move a file from one location to another, where intermediate directories already exist at the destination.
"""
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination" / "destination.txt"
dst_path.parent.mkdir(parents=True, exist_ok=False)
assert dst_path.parent.exists()
move_file(src_path, dst_path)
assert src_path.exists() is False
assert dst_path.exists() is True
assert dst_path.read_text() == "source"
# use tmp_path pytest fixture to create a temporary directory
def test_move_file_source_missing(self, tmp_path: Path):
"""
Expect failure when moving a file from one location to another, where the source does not exist.
"""
src_path: Path = tmp_path / "source.txt"
dst_path: Path = tmp_path / "destination.txt"
with pytest.raises(FileNotFoundError):
move_file(src_path, dst_path)
assert src_path.exists() is False
assert dst_path.exists() is False
# use tmp_path pytest fixture to create a temporary directory
def test_move_file_os_replace_raising_permissionerror(self, tmp_path: Path, monkeypatch):
"""
Simulate moving a file with os.replace raising a PermissionError. The move should succeed
after using shutil.move to move the file.
"""
def mock_os_replace(src, dst):
raise PermissionError("test")
monkeypatch.setattr(os, "replace", mock_os_replace)
shutil_move_called: bool = False
original_shutil_move = shutil.move
def mock_shutil_move(src, dst):
nonlocal shutil_move_called
shutil_move_called = True
original_shutil_move(src, dst)
monkeypatch.setattr(shutil, "move", mock_shutil_move)
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
move_file(src_path, dst_path)
assert shutil_move_called is True
assert src_path.exists() is False
assert dst_path.exists() is True
assert dst_path.read_text() == "source"
# use tmp_path pytest fixture to create a temporary directory
def test_move_file_overwrite_os_replace_raising_exception(self, tmp_path: Path, monkeypatch):
"""
Simulate moving a file with os.replace raising an exception. The move should succeed,
overwriting the destination.
"""
def mock_os_replace(src, dst):
raise PermissionError("test")
monkeypatch.setattr(os, "replace", mock_os_replace)
shutil_move_called: bool = False
original_shutil_move = shutil.move
def mock_shutil_move(src, dst):
nonlocal shutil_move_called
shutil_move_called = True
original_shutil_move(src, dst)
monkeypatch.setattr(shutil, "move", mock_shutil_move)
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
dst_path.write_text("destination")
move_file(src_path, dst_path)
assert shutil_move_called is True
assert src_path.exists() is False
assert dst_path.exists() is True
assert dst_path.read_text() == "source"
# use tmp_path pytest fixture to create a temporary directory
def test_move_file_failing(self, tmp_path: Path, monkeypatch):
"""
Simulate moving a file with both os.replace and shutil.move raising exceptions. The move should fail.
"""
def mock_os_replace(src, dst):
raise RuntimeError("test")
monkeypatch.setattr(os, "replace", mock_os_replace)
def mock_shutil_move(src, dst):
raise RuntimeError("test2")
monkeypatch.setattr(shutil, "move", mock_shutil_move)
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
with pytest.raises(RuntimeError):
move_file(src_path, dst_path)
assert src_path.exists() is True
assert dst_path.exists() is False
class TestMoveFileAsync:
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_move_file_async(self, tmp_path: Path):
"""
Move a file from one location to another.
"""
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
await move_file_async(src_path, dst_path)
assert src_path.exists() is False
assert dst_path.exists() is True
assert dst_path.read_text() == "source"
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_move_file_async_failure_no_reattempts(self, tmp_path: Path, monkeypatch):
"""
Simulate moving a file where the move fails and no reattempts are made. The move should fail.
"""
move_file_called: bool = False
def mock_move_file(src, dst):
nonlocal move_file_called
move_file_called = True
raise Exception("test")
monkeypatch.setattr(files, "move_file", mock_move_file)
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
with pytest.raises(FileNotFoundError):
await move_file_async(src_path, dst_path, reattempts=0)
assert move_file_called is True
assert src_path.exists() is True
assert dst_path.exists() is False
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_move_file_async_success_on_reattempt(self, tmp_path: Path, monkeypatch):
"""
Simulate moving a file where the move initially fails and then succeeds after reattempting.
The move should succeed.
"""
failed_attempts: int = 2
reattempts: int = 0
original_os_replace = os.replace
def mock_os_replace(src, dst):
nonlocal failed_attempts, reattempts
if reattempts < failed_attempts:
reattempts += 1
raise Exception("test")
else:
original_os_replace(src, dst)
monkeypatch.setattr(os, "replace", mock_os_replace)
def mock_shutil_move(src, dst):
raise Exception("test2")
monkeypatch.setattr(shutil, "move", mock_shutil_move)
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
await move_file_async(src_path, dst_path, reattempts=failed_attempts + 1)
assert reattempts == 2
assert src_path.exists() is False
assert dst_path.exists() is True
assert dst_path.read_text() == "source"
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_move_file_async_failure_on_reattempt(self, tmp_path: Path, monkeypatch):
"""
Simulate moving a file where the move fails and exhausts all reattempts. The move should fail.
"""
total_allowed_attempts: int = 3
attempts: int = 0
def mock_os_replace(src, dst):
nonlocal attempts
attempts += 1
raise Exception("test")
monkeypatch.setattr(os, "replace", mock_os_replace)
def mock_shutil_move(src, dst):
raise Exception("test2")
monkeypatch.setattr(shutil, "move", mock_shutil_move)
src_path: Path = tmp_path / "source.txt"
src_path.write_text("source")
dst_path: Path = tmp_path / "destination.txt"
with pytest.raises(FileNotFoundError):
await move_file_async(src_path, dst_path, reattempts=total_allowed_attempts - 1)
assert attempts == total_allowed_attempts
assert src_path.exists() is True
assert dst_path.exists() is False
class TestWriteFile:
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_write_file(self, tmp_path: Path):
"""
Write a file to a location.
"""
dest_path: Path = tmp_path / "test_write_file.txt"
await write_file_async(dest_path, "test")
assert dest_path.read_text() == "test"
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_write_file_overwrite(self, tmp_path: Path):
"""
Write a file to a location and overwrite the file if it already exists.
"""
dest_path: Path = tmp_path / "test_write_file.txt"
dest_path.write_text("test")
await write_file_async(dest_path, "test2")
assert dest_path.read_text() == "test2"
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_write_file_create_intermediate_dirs(self, tmp_path: Path):
"""
Write a file to a location and create intermediate directories if they do not exist.
"""
dest_path: Path = tmp_path / "test_write_file/a/b/c/test_write_file.txt"
await write_file_async(dest_path, "test")
assert dest_path.read_text() == "test"
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_write_file_existing_intermediate_dirs(self, tmp_path: Path):
"""
Write a file to a location and where intermediate directories aleady exist.
"""
dest_path: Path = tmp_path / "test_write_file/a/b/c/test_write_file.txt"
dest_path.parent.mkdir(parents=True, exist_ok=False)
assert dest_path.parent.exists()
await write_file_async(dest_path, "test")
assert dest_path.read_text() == "test"
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_write_file_default_permissions(self, tmp_path: Path):
"""
Write a file to a location and use the default permissions.
"""
if sys.platform in ["win32", "cygwin"]:
pytest.skip("Setting UNIX file permissions doesn't apply to Windows")
dest_path: Path = tmp_path / "test_write_file/test_write_file.txt"
assert not dest_path.parent.exists()
await write_file_async(dest_path, "test")
assert dest_path.read_text() == "test"
# Expect: parent directory has default permissions of 0o700
assert oct(dest_path.parent.stat().st_mode)[-3:] == oct(0o700)[-3:]
# Expect: file has default permissions of 0o600
assert oct(dest_path.stat().st_mode)[-3:] == oct(0o600)[-3:]
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_write_file_custom_permissions(self, tmp_path: Path):
"""
Write a file to a location and use custom permissions.
"""
if sys.platform in ["win32", "cygwin"]:
pytest.skip("Setting UNIX file permissions doesn't apply to Windows")
dest_path: Path = tmp_path / "test_write_file/test_write_file.txt"
await write_file_async(dest_path, "test", file_mode=0o642)
assert dest_path.read_text() == "test"
# Expect: file has custom permissions of 0o642
assert oct(dest_path.stat().st_mode)[-3:] == oct(0o642)[-3:]
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_write_file_os_replace_raising_permissionerror(self, tmp_path: Path, monkeypatch):
"""
Write a file to a location where os.replace raises PermissionError.
"""
def mock_os_replace(src, dst):
raise PermissionError("test")
monkeypatch.setattr(os, "replace", mock_os_replace)
shutil_move_called: bool = False
original_shutil_move = shutil.move
def mock_shutil_move(src, dst):
nonlocal shutil_move_called
shutil_move_called = True
original_shutil_move(src, dst)
monkeypatch.setattr(shutil, "move", mock_shutil_move)
dest_path: Path = tmp_path / "test_write_file/test_write_file.txt"
await write_file_async(dest_path, "test")
assert shutil_move_called is True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_significant_bits.py | tests/core/util/test_significant_bits.py | from __future__ import annotations
import unittest
from flax.util.significant_bits import count_significant_bits, truncate_to_significant_bits
class TestSignificantBits(unittest.TestCase):
def test_truncate_to_significant_bits(self):
a = -0b001101
assert truncate_to_significant_bits(a, 2) == -0b1100
a = -0b001111
assert truncate_to_significant_bits(a, 2) == -0b1100
a = 0b1111
assert truncate_to_significant_bits(a, 2) == 0b1100
a = 0b1000000111
assert truncate_to_significant_bits(a, 8) == 0b1000000100
a = 0b1000000111
assert truncate_to_significant_bits(a, 0) == 0b0
a = 0b1000000111
assert truncate_to_significant_bits(a, 500) == a
a = -0b1000000111
assert truncate_to_significant_bits(a, 500) == a
a = 0b10101
assert truncate_to_significant_bits(a, 5) == a
a = 0b10101
assert truncate_to_significant_bits(a, 4) == 0b10100
def test_count_significant_bits(self):
assert count_significant_bits(0b0001) == 1
assert count_significant_bits(0b00010) == 1
assert count_significant_bits(0b01010) == 3
assert count_significant_bits(-0b01010) == 3
assert count_significant_bits(0b0) == 0
assert count_significant_bits(0b1) == 1
assert count_significant_bits(0b1000010101010000) == 12
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_config.py | tests/core/util/test_config.py | import asyncio
import copy
import shutil
import tempfile
from concurrent.futures import ProcessPoolExecutor
import pytest
import random
import yaml
from flax.util.config import (
config_path_for_filename,
create_default_flax_config,
initial_config_file,
load_config,
lock_and_load_config,
lock_config,
save_config,
selected_network_address_prefix,
)
from multiprocessing import Pool, Queue, TimeoutError
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Any, Dict, Optional
# Commented-out lines are preserved to aid in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(
root_path: Path,
config: Dict,
atomic_write: bool,
do_sleep: bool,
iterations: int,
error_queue: Optional[Queue] = None,
):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
try:
for i in range(iterations):
# This is a small sleep to get interweaving reads and writes
sleep(0.05)
if do_sleep:
sleep(random.random())
if atomic_write:
# Note that this is usually atomic but in certain circumstances in Windows it can copy the file,
# leading to a non-atomic operation.
with lock_config(root_path, "config.yaml"):
save_config(root_path=root_path, filename="config.yaml", config_data=config)
else:
path: Path = config_path_for_filename(root_path, filename="config.yaml")
with lock_config(root_path, "config.yaml"):
with tempfile.TemporaryDirectory(dir=path.parent) as tmp_dir:
tmp_path: Path = Path(tmp_dir) / Path("config.yaml")
with open(tmp_path, "w") as f:
yaml.safe_dump(config, f)
shutil.copy2(str(tmp_path), str(path))
except Exception as e:
if error_queue is not None:
error_queue.put(e)
raise
def read_and_compare_config(
root_path: Path, default_config: Dict, do_sleep: bool, iterations: int, error_queue: Optional[Queue] = None
):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
try:
for i in range(iterations):
# This is a small sleep to get interweaving reads and writes
sleep(0.05)
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
if do_sleep:
sleep(random.random())
with lock_and_load_config(root_path, "config.yaml") as config:
assert config == default_config
except Exception as e:
if error_queue is not None:
error_queue.put(e)
raise
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
error_queue: Queue = Queue()
thread1 = Thread(
target=write_config,
kwargs={
"root_path": root_path,
"config": default_config,
"atomic_write": False,
"do_sleep": True,
"iterations": 1,
"error_queue": error_queue,
},
)
thread2 = Thread(
target=read_and_compare_config,
kwargs={
"root_path": root_path,
"default_config": default_config,
"do_sleep": True,
"iterations": 1,
"error_queue": error_queue,
},
)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
if not error_queue.empty():
raise error_queue.get()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.run(create_reader_and_writer_tasks(root_path, default_config))
@pytest.fixture(scope="function")
def default_config_dict() -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
class TestConfig:
def test_create_config_new(self, tmpdir):
"""
Test create_default_flax_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_flax_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_flax_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
config_file_path.parent.mkdir(parents=True, exist_ok=True)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_flax_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 56600
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmp_path: Path):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path = tmp_path
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
config_file_path.mkdir(parents=True, exist_ok=True)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
with lock_config(root_path, "config.yaml"):
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
with lock_config(root_path, "config.yaml"):
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
try:
res.get(timeout=60)
except TimeoutError:
pytest.skip("Timed out waiting for reader/writer processes to complete")
@pytest.mark.asyncio
async def test_non_atomic_writes(self, root_path_populated_with_config, default_config_dict):
"""
Test whether one continuous writer (writing constantly, but not atomically) will interfere with many
concurrent readers.
"""
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
with lock_config(root_path, "config.yaml"):
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
with ProcessPoolExecutor(max_workers=4) as pool:
all_tasks = []
for i in range(10):
all_tasks.append(
asyncio.get_running_loop().run_in_executor(
pool, read_and_compare_config, root_path, default_config_dict, False, 100, None
)
)
if i % 2 == 0:
all_tasks.append(
asyncio.get_running_loop().run_in_executor(
pool, write_config, root_path, default_config_dict, False, False, 100, None
)
)
await asyncio.gather(*all_tasks)
@pytest.mark.parametrize("prefix", [None])
def test_selected_network_address_prefix_default_config(self, config_with_address_prefix: Dict[str, Any]) -> None:
"""
Temp config.yaml created using a default config. address_prefix is defaulted to "xfx"
"""
config = config_with_address_prefix
prefix = selected_network_address_prefix(config)
assert prefix == "xfx"
@pytest.mark.parametrize("prefix", ["txfx"])
def test_selected_network_address_prefix_testnet_config(self, config_with_address_prefix: Dict[str, Any]) -> None:
"""
Temp config.yaml created using a modified config. address_prefix is set to "txfx"
"""
config = config_with_address_prefix
prefix = selected_network_address_prefix(config)
assert prefix == "txfx"
def test_selected_network_address_prefix_config_dict(self, default_config_dict: Dict[str, Any]) -> None:
"""
Modified config dictionary has address_prefix set to "customxfx"
"""
config = default_config_dict
config["network_overrides"]["config"][config["selected_network"]]["address_prefix"] = "customxfx"
prefix = selected_network_address_prefix(config)
assert prefix == "customxfx"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_cached_bls.py | tests/core/util/test_cached_bls.py | from blspy import AugSchemeMPL, G1Element
from flax.util import cached_bls
from flax.util.hash import std_hash
from flax.util.lru_cache import LRUCache
def test_cached_bls():
n_keys = 10
seed = b"a" * 31
sks = [AugSchemeMPL.key_gen(seed + bytes([i])) for i in range(n_keys)]
pks = [bytes(sk.get_g1()) for sk in sks]
msgs = [("msg-%d" % (i,)).encode() for i in range(n_keys)]
sigs = [AugSchemeMPL.sign(sk, msg) for sk, msg in zip(sks, msgs)]
agg_sig = AugSchemeMPL.aggregate(sigs)
pks_half = pks[: n_keys // 2]
msgs_half = msgs[: n_keys // 2]
sigs_half = sigs[: n_keys // 2]
agg_sig_half = AugSchemeMPL.aggregate(sigs_half)
assert AugSchemeMPL.aggregate_verify([G1Element.from_bytes(pk) for pk in pks], msgs, agg_sig)
# Verify with empty cache and populate it
assert cached_bls.aggregate_verify(pks_half, msgs_half, agg_sig_half, True)
# Verify with partial cache hit
assert cached_bls.aggregate_verify(pks, msgs, agg_sig, True)
# Verify with full cache hit
assert cached_bls.aggregate_verify(pks, msgs, agg_sig)
# Use a small cache which can not accommodate all pairings
local_cache = LRUCache(n_keys // 2)
# Verify signatures and cache pairings one at a time
for pk, msg, sig in zip(pks_half, msgs_half, sigs_half):
assert cached_bls.aggregate_verify([pk], [msg], sig, True, local_cache)
# Verify the same messages with aggregated signature (full cache hit)
assert cached_bls.aggregate_verify(pks_half, msgs_half, agg_sig_half, False, local_cache)
# Verify more messages (partial cache hit)
assert cached_bls.aggregate_verify(pks, msgs, agg_sig, False, local_cache)
def test_cached_bls_repeat_pk():
n_keys = 400
seed = b"a" * 32
sks = [AugSchemeMPL.key_gen(seed) for i in range(n_keys)] + [AugSchemeMPL.key_gen(std_hash(seed))]
pks = [bytes(sk.get_g1()) for sk in sks]
msgs = [("msg-%d" % (i,)).encode() for i in range(n_keys + 1)]
sigs = [AugSchemeMPL.sign(sk, msg) for sk, msg in zip(sks, msgs)]
agg_sig = AugSchemeMPL.aggregate(sigs)
assert AugSchemeMPL.aggregate_verify([G1Element.from_bytes(pk) for pk in pks], msgs, agg_sig)
assert cached_bls.aggregate_verify(pks, msgs, agg_sig, force_cache=True)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_lru_cache.py | tests/core/util/test_lru_cache.py | from __future__ import annotations
import unittest
from flax.util.lru_cache import LRUCache
class TestLRUCache(unittest.TestCase):
def test_lru_cache(self):
cache = LRUCache(5)
assert cache.get(b"0") is None
assert len(cache.cache) == 0
cache.put(b"0", 1)
assert len(cache.cache) == 1
assert cache.get(b"0") == 1
cache.put(b"0", 2)
cache.put(b"0", 3)
cache.put(b"0", 4)
cache.put(b"0", 6)
assert cache.get(b"0") == 6
assert len(cache.cache) == 1
cache.put(b"1", 1)
assert len(cache.cache) == 2
assert cache.get(b"0") == 6
assert cache.get(b"1") == 1
cache.put(b"2", 2)
assert len(cache.cache) == 3
assert cache.get(b"0") == 6
assert cache.get(b"1") == 1
assert cache.get(b"2") == 2
cache.put(b"3", 3)
assert len(cache.cache) == 4
assert cache.get(b"0") == 6
assert cache.get(b"1") == 1
assert cache.get(b"2") == 2
assert cache.get(b"3") == 3
cache.put(b"4", 4)
assert len(cache.cache) == 5
assert cache.get(b"0") == 6
assert cache.get(b"1") == 1
assert cache.get(b"2") == 2
assert cache.get(b"4") == 4
cache.put(b"5", 5)
assert cache.get(b"5") == 5
assert len(cache.cache) == 5
print(cache.cache)
assert cache.get(b"3") is None # 3 is least recently used
assert cache.get(b"1") == 1
assert cache.get(b"2") == 2
cache.put(b"7", 7)
assert len(cache.cache) == 5
assert cache.get(b"0") is None
assert cache.get(b"1") == 1
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_streamable.py | tests/core/util/test_streamable.py | from __future__ import annotations
import io
import re
from dataclasses import dataclass, field, fields
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, get_type_hints
import pytest
from blspy import G1Element
from clvm_tools import binutils
from typing_extensions import Literal, get_args
from flax.protocols.wallet_protocol import RespondRemovals
from flax.simulator.block_tools import BlockTools
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes4, bytes32
from flax.types.full_block import FullBlock
from flax.types.weight_proof import SubEpochChallengeSegment
from flax.util.ints import uint8, uint32, uint64
from flax.util.streamable import (
ConversionError,
DefinitionError,
InvalidSizeError,
InvalidTypeError,
ParameterMissingError,
Streamable,
UnsupportedType,
function_to_parse_one_item,
function_to_stream_one_item,
is_type_List,
is_type_SpecificOptional,
is_type_Tuple,
parse_bool,
parse_bytes,
parse_list,
parse_optional,
parse_size_hints,
parse_str,
parse_tuple,
parse_uint32,
recurse_jsonify,
streamable,
streamable_from_dict,
write_uint32,
)
from tests.setup_nodes import test_constants
def test_int_not_supported() -> None:
with pytest.raises(UnsupportedType):
@streamable
@dataclass(frozen=True)
class TestClassInt(Streamable):
a: int
def test_float_not_supported() -> None:
with pytest.raises(UnsupportedType):
@streamable
@dataclass(frozen=True)
class TestClassFloat(Streamable):
a: float
def test_dict_not_suppported() -> None:
with pytest.raises(UnsupportedType):
@streamable
@dataclass(frozen=True)
class TestClassDict(Streamable):
a: Dict[str, str]
@dataclass(frozen=True)
class DataclassOnly:
a: uint8
def test_pure_dataclass_not_supported() -> None:
with pytest.raises(UnsupportedType):
@streamable
@dataclass(frozen=True)
class TestClassDataclass(Streamable):
a: DataclassOnly
class PlainClass:
a: uint8
def test_plain_class_not_supported() -> None:
with pytest.raises(UnsupportedType):
@streamable
@dataclass(frozen=True)
class TestClassPlain(Streamable):
a: PlainClass
@streamable
@dataclass(frozen=True)
class StreamableFromDict1(Streamable):
a: uint8
b: str
c: G1Element
@streamable
@dataclass(frozen=True)
class StreamableFromDict2(Streamable):
a: StreamableFromDict1
b: StreamableFromDict1
c: uint64
@streamable
@dataclass(frozen=True)
class ConvertTupleFailures(Streamable):
a: Tuple[uint8, uint8]
b: Tuple[uint8, Tuple[uint8, uint8]]
@pytest.mark.parametrize(
"input_dict, error",
[
pytest.param({"a": (1,), "b": (1, (2, 2))}, InvalidSizeError, id="a: item missing"),
pytest.param({"a": (1, 1, 1), "b": (1, (2, 2))}, InvalidSizeError, id="a: item too much"),
pytest.param({"a": (1, 1), "b": (1, (2,))}, InvalidSizeError, id="b: item missing"),
pytest.param({"a": (1, 1), "b": (1, (2, 2, 2))}, InvalidSizeError, id="b: item too much"),
pytest.param({"a": "11", "b": (1, (2, 2))}, InvalidTypeError, id="a: invalid type list"),
pytest.param({"a": 1, "b": (1, (2, 2))}, InvalidTypeError, id="a: invalid type int"),
pytest.param({"a": "11", "b": (1, (2, 2))}, InvalidTypeError, id="a: invalid type str"),
pytest.param({"a": (1, 1), "b": (1, "22")}, InvalidTypeError, id="b: invalid type list"),
pytest.param({"a": (1, 1), "b": (1, 2)}, InvalidTypeError, id="b: invalid type int"),
pytest.param({"a": (1, 1), "b": (1, "22")}, InvalidTypeError, id="b: invalid type str"),
],
)
def test_convert_tuple_failures(input_dict: Dict[str, Any], error: Any) -> None:
with pytest.raises(error):
streamable_from_dict(ConvertTupleFailures, input_dict)
@streamable
@dataclass(frozen=True)
class ConvertListFailures(Streamable):
a: List[uint8]
b: List[List[uint8]]
@pytest.mark.parametrize(
"input_dict, error",
[
pytest.param({"a": [1, 1], "b": [1, [2, 2]]}, InvalidTypeError, id="a: invalid type list"),
pytest.param({"a": 1, "b": [1, [2, 2]]}, InvalidTypeError, id="a: invalid type int"),
pytest.param({"a": "11", "b": [1, [2, 2]]}, InvalidTypeError, id="a: invalid type str"),
pytest.param({"a": [1, 1], "b": [1, [2, 2]]}, InvalidTypeError, id="b: invalid type list"),
pytest.param({"a": [1, 1], "b": [1, 2]}, InvalidTypeError, id="b: invalid type int"),
pytest.param({"a": [1, 1], "b": [1, "22"]}, InvalidTypeError, id="b: invalid type str"),
],
)
def test_convert_list_failures(input_dict: Dict[str, Any], error: Any) -> None:
with pytest.raises(error):
streamable_from_dict(ConvertListFailures, input_dict)
@streamable
@dataclass(frozen=True)
class ConvertByteTypeFailures(Streamable):
a: bytes4
b: bytes
@pytest.mark.parametrize(
"input_dict, error",
[
pytest.param({"a": 0, "b": bytes(0)}, InvalidTypeError, id="a: no string and no bytes"),
pytest.param({"a": [], "b": bytes(0)}, InvalidTypeError, id="a: no string and no bytes"),
pytest.param({"a": {}, "b": bytes(0)}, InvalidTypeError, id="a: no string and no bytes"),
pytest.param({"a": "invalid", "b": bytes(0)}, ConversionError, id="a: invalid hex string"),
pytest.param({"a": "000000", "b": bytes(0)}, ConversionError, id="a: hex string too short"),
pytest.param({"a": "0000000000", "b": bytes(0)}, ConversionError, id="a: hex string too long"),
pytest.param({"a": b"\00\00\00", "b": bytes(0)}, ConversionError, id="a: bytes too short"),
pytest.param({"a": b"\00\00\00\00\00", "b": bytes(0)}, ConversionError, id="a: bytes too long"),
pytest.param({"a": "00000000", "b": 0}, InvalidTypeError, id="b: no string and no bytes"),
pytest.param({"a": "00000000", "b": []}, InvalidTypeError, id="b: no string and no bytes"),
pytest.param({"a": "00000000", "b": {}}, InvalidTypeError, id="b: no string and no bytes"),
pytest.param({"a": "00000000", "b": "invalid"}, ConversionError, id="b: invalid hex string"),
],
)
def test_convert_byte_type_failures(input_dict: Dict[str, Any], error: Any) -> None:
with pytest.raises(error):
streamable_from_dict(ConvertByteTypeFailures, input_dict)
@streamable
@dataclass(frozen=True)
class ConvertUnhashableTypeFailures(Streamable):
a: G1Element
@pytest.mark.parametrize(
"input_dict, error",
[
pytest.param({"a": 0}, InvalidTypeError, id="a: no string and no bytes"),
pytest.param({"a": []}, InvalidTypeError, id="a: no string and no bytes"),
pytest.param({"a": {}}, InvalidTypeError, id="a: no string and no bytes"),
pytest.param({"a": "invalid"}, ConversionError, id="a: invalid hex string"),
pytest.param({"a": "00" * (G1Element.SIZE - 1)}, ConversionError, id="a: hex string too short"),
pytest.param({"a": "00" * (G1Element.SIZE + 1)}, ConversionError, id="a: hex string too long"),
pytest.param({"a": b"\00" * (G1Element.SIZE - 1)}, ConversionError, id="a: bytes too short"),
pytest.param({"a": b"\00" * (G1Element.SIZE + 1)}, ConversionError, id="a: bytes too long"),
pytest.param({"a": b"\00" * G1Element.SIZE}, ConversionError, id="a: invalid g1 element"),
],
)
def test_convert_unhashable_type_failures(input_dict: Dict[str, Any], error: Any) -> None:
with pytest.raises(error):
streamable_from_dict(ConvertUnhashableTypeFailures, input_dict)
class NoStrClass:
def __str__(self) -> str:
raise RuntimeError("No string")
@streamable
@dataclass(frozen=True)
class ConvertPrimitiveFailures(Streamable):
a: uint8
b: uint8
c: str
@pytest.mark.parametrize(
"input_dict, error",
[
pytest.param({"a": "a", "b": uint8(1), "c": "2"}, ConversionError, id="a: invalid value"),
pytest.param({"a": 0, "b": [], "c": "2"}, ConversionError, id="b: invalid value"),
pytest.param({"a": 0, "b": uint8(1), "c": NoStrClass()}, ConversionError, id="c: invalid value"),
],
)
def test_convert_primitive_failures(input_dict: Dict[str, Any], error: Any) -> None:
with pytest.raises(error):
streamable_from_dict(ConvertPrimitiveFailures, input_dict)
@pytest.mark.parametrize(
"test_class, input_dict, error, error_message",
[
[
StreamableFromDict1,
{"a": "asdf", "b": "2", "c": G1Element()},
ConversionError,
"Failed to convert 'asdf' from type str to uint8: ValueError: invalid literal "
"for int() with base 10: 'asdf'",
],
[StreamableFromDict1, {"a": 1, "b": "2"}, ParameterMissingError, "1 field missing for StreamableFromDict1: c"],
[StreamableFromDict1, {"a": 1}, ParameterMissingError, "2 fields missing for StreamableFromDict1: b, c"],
[StreamableFromDict1, {}, ParameterMissingError, "3 fields missing for StreamableFromDict1: a, b, c"],
[
StreamableFromDict1,
{"a": 1, "b": "2", "c": "asd"},
ConversionError,
"Failed to convert 'asd' from type str to bytes: ValueError: non-hexadecimal number found in fromhex() arg "
"at position 1",
],
[
StreamableFromDict1,
{"a": 1, "b": "2", "c": "00" * G1Element.SIZE},
ConversionError,
f"Failed to convert {bytes.fromhex('00' * G1Element.SIZE)!r} from type bytes to G1Element: ValueError: "
"Given G1 non-infinity element must start with 0b10",
],
[
StreamableFromDict1,
{"a": [], "b": "2", "c": G1Element()},
ConversionError,
"Failed to convert [] from type list to uint8: TypeError: int() argument",
],
[
StreamableFromDict1,
{"a": {}, "b": "2", "c": G1Element()},
ConversionError,
"Failed to convert {} from type dict to uint8: TypeError: int() argument",
],
[
StreamableFromDict2,
{"a": "asdf", "b": 12345, "c": 12345},
InvalidTypeError,
"Invalid type: Expected dict, Actual: str",
],
[
StreamableFromDict2,
{"a": 12345, "b": {"a": 1, "b": "2"}, "c": 12345},
InvalidTypeError,
"Invalid type: Expected dict, Actual: int",
],
[
StreamableFromDict2,
{"a": {"a": 1, "b": "2", "c": G1Element()}, "b": {"a": 1, "b": "2"}},
ParameterMissingError,
"1 field missing for StreamableFromDict1: c",
],
[
StreamableFromDict2,
{"a": {"a": 1, "b": "2"}, "b": {"a": 1, "b": "2"}, "c": 12345},
ParameterMissingError,
"1 field missing for StreamableFromDict1: c",
],
],
)
def test_streamable_from_dict_failures(
test_class: Type[Streamable], input_dict: Dict[str, Any], error: Any, error_message: str
) -> None:
with pytest.raises(error, match=re.escape(error_message)):
streamable_from_dict(test_class, input_dict)
@streamable
@dataclass(frozen=True)
class TestFromJsonDictDefaultValues(Streamable):
a: uint64 = uint64(1)
b: str = "default"
c: List[uint64] = field(default_factory=list)
@pytest.mark.parametrize(
"input_dict, output_dict",
[
[{}, {"a": 1, "b": "default", "c": []}],
[{"a": 2}, {"a": 2, "b": "default", "c": []}],
[{"b": "not_default"}, {"a": 1, "b": "not_default", "c": []}],
[{"c": [1, 2]}, {"a": 1, "b": "default", "c": [1, 2]}],
[{"a": 2, "b": "not_default", "c": [1, 2]}, {"a": 2, "b": "not_default", "c": [1, 2]}],
],
)
def test_from_json_dict_default_values(input_dict: Dict[str, object], output_dict: Dict[str, object]) -> None:
assert str(TestFromJsonDictDefaultValues.from_json_dict(input_dict).to_json_dict()) == str(output_dict)
def test_basic_list() -> None:
a = [1, 2, 3]
assert is_type_List(type(a))
assert is_type_List(List)
assert is_type_List(List[int])
assert is_type_List(List[uint8])
assert is_type_List(list)
assert not is_type_List(type(Tuple))
assert not is_type_List(tuple)
assert not is_type_List(dict)
def test_not_lists() -> None:
assert not is_type_List(Dict)
def test_basic_optional() -> None:
assert is_type_SpecificOptional(Optional[int])
assert is_type_SpecificOptional(Optional[Optional[int]])
assert not is_type_SpecificOptional(List[int])
@streamable
@dataclass(frozen=True)
class PostInitTestClassBasic(Streamable):
a: uint8
b: str
c: bytes
d: bytes32
e: G1Element
@streamable
@dataclass(frozen=True)
class PostInitTestClassBad(Streamable):
a: uint8
b = 0
@streamable
@dataclass(frozen=True)
class PostInitTestClassOptional(Streamable):
a: Optional[uint8]
b: Optional[uint8]
c: Optional[Optional[uint8]]
d: Optional[Optional[uint8]]
@streamable
@dataclass(frozen=True)
class PostInitTestClassList(Streamable):
a: List[uint8]
b: List[List[G1Element]]
@streamable
@dataclass(frozen=True)
class PostInitTestClassTuple(Streamable):
a: Tuple[uint8, str]
b: Tuple[Tuple[uint8, str], bytes32]
@pytest.mark.parametrize(
"test_class, args",
[
(PostInitTestClassBasic, (24, 99, 300, b"\12" * 32, bytes(G1Element()))),
(PostInitTestClassBasic, (24, "test", b"\00\01", b"\x1a" * 32, G1Element())),
(PostInitTestClassBad, (25,)),
(PostInitTestClassList, ([1, 2, 3], [[G1Element(), bytes(G1Element())], [bytes(G1Element())]])),
(PostInitTestClassTuple, ((1, "test"), ((200, "test_2"), b"\xba" * 32))),
(PostInitTestClassOptional, (12, None, 13, None)),
],
)
def test_post_init_valid(test_class: Type[Any], args: Tuple[Any, ...]) -> None:
def validate_item_type(type_in: Type[Any], item: object) -> bool:
if is_type_SpecificOptional(type_in):
return item is None or validate_item_type(get_args(type_in)[0], item)
if is_type_Tuple(type_in):
assert type(item) == tuple
types = get_args(type_in)
return all(validate_item_type(tuple_type, tuple_item) for tuple_type, tuple_item in zip(types, item))
if is_type_List(type_in):
list_type = get_args(type_in)[0]
assert type(item) == list
return all(validate_item_type(list_type, list_item) for list_item in item)
return isinstance(item, type_in)
test_object = test_class(*args)
hints = get_type_hints(test_class)
test_fields = {field.name: hints.get(field.name, field.type) for field in fields(test_class)}
for field_name, field_type in test_fields.items():
assert validate_item_type(field_type, test_object.__dict__[field_name])
@pytest.mark.parametrize(
"test_class, args, expected_exception",
[
(PostInitTestClassBasic, (None, "test", b"\00\01", b"\12" * 32, G1Element()), TypeError),
(PostInitTestClassBasic, (1, "test", None, b"\12" * 32, G1Element()), AttributeError),
(PostInitTestClassBasic, (1, "test", b"\00\01", b"\12" * 31, G1Element()), ValueError),
(PostInitTestClassBasic, (1, "test", b"\00\01", b"\12" * 32, b"\12" * 10), ValueError),
(PostInitTestClassBad, (1, 2), TypeError),
(PostInitTestClassList, ({"1": 1}, [[uint8(200), uint8(25)], [uint8(25)]]), InvalidTypeError),
(PostInitTestClassList, (("1", 1), [[uint8(200), uint8(25)], [uint8(25)]]), InvalidTypeError),
(PostInitTestClassList, ([1, 2, 3], [uint8(200), uint8(25)]), InvalidTypeError),
(PostInitTestClassTuple, ((1,), ((200, "test_2"), b"\xba" * 32)), InvalidSizeError),
(PostInitTestClassTuple, ((1, "test", 1), ((200, "test_2"), b"\xba" * 32)), InvalidSizeError),
(PostInitTestClassTuple, ((1, "test"), ({"a": 2}, b"\xba" * 32)), InvalidTypeError),
(PostInitTestClassTuple, ((1, "test"), (G1Element(), b"\xba" * 32)), InvalidTypeError),
(PostInitTestClassOptional, ([], None, None, None), ValueError),
],
)
def test_post_init_failures(test_class: Type[Any], args: Tuple[Any, ...], expected_exception: Type[Exception]) -> None:
with pytest.raises(expected_exception):
test_class(*args)
def test_basic() -> None:
@streamable
@dataclass(frozen=True)
class TestClass(Streamable):
a: uint32
b: uint32
c: List[uint32]
d: List[List[uint32]]
e: Optional[uint32]
f: Optional[uint32]
g: Tuple[uint32, str, bytes]
# we want to test invalid here, hence the ignore.
a = TestClass(24, 352, [1, 2, 4], [[1, 2, 3], [3, 4]], 728, None, (383, "hello", b"goodbye")) # type: ignore[arg-type,list-item] # noqa: E501
b: bytes = bytes(a)
assert a == TestClass.from_bytes(b)
def test_variable_size() -> None:
@streamable
@dataclass(frozen=True)
class TestClass2(Streamable):
a: uint32
b: uint32
c: bytes
a = TestClass2(uint32(1), uint32(2), b"3")
bytes(a)
with pytest.raises(UnsupportedType):
@streamable
@dataclass(frozen=True)
class TestClass3(Streamable):
a: int
def test_json(bt: BlockTools) -> None:
block = bt.create_genesis_block(test_constants, bytes32([0] * 32), uint64(0))
dict_block = block.to_json_dict()
assert FullBlock.from_json_dict(dict_block) == block
@streamable
@dataclass(frozen=True)
class OptionalTestClass(Streamable):
a: Optional[str]
b: Optional[bool]
c: Optional[List[Optional[str]]]
@pytest.mark.parametrize(
"a, b, c",
[
("", True, ["1"]),
("1", False, ["1"]),
("1", True, []),
("1", True, [""]),
("1", True, ["1"]),
(None, None, None),
],
)
def test_optional_json(a: Optional[str], b: Optional[bool], c: Optional[List[Optional[str]]]) -> None:
obj: OptionalTestClass = OptionalTestClass.from_json_dict({"a": a, "b": b, "c": c})
assert obj.a == a
assert obj.b == b
assert obj.c == c
@streamable
@dataclass(frozen=True)
class TestClassRecursive1(Streamable):
a: List[uint32]
@streamable
@dataclass(frozen=True)
class TestClassRecursive2(Streamable):
a: uint32
b: List[Optional[List[TestClassRecursive1]]]
c: bytes32
def test_recursive_json() -> None:
tc1_a = TestClassRecursive1([uint32(1), uint32(2)])
tc1_b = TestClassRecursive1([uint32(4), uint32(5)])
tc1_c = TestClassRecursive1([uint32(7), uint32(8)])
tc2 = TestClassRecursive2(uint32(5), [[tc1_a], [tc1_b, tc1_c], None], bytes32(bytes([1] * 32)))
assert TestClassRecursive2.from_json_dict(tc2.to_json_dict()) == tc2
def test_recursive_types() -> None:
coin: Optional[Coin] = None
l1 = [(bytes32([2] * 32), coin)]
rr = RespondRemovals(uint32(1), bytes32([1] * 32), l1, None)
RespondRemovals(rr.height, rr.header_hash, rr.coins, rr.proofs)
def test_ambiguous_deserialization_optionals() -> None:
with pytest.raises(AssertionError):
SubEpochChallengeSegment.from_bytes(b"\x00\x00\x00\x03\xff\xff\xff\xff")
@streamable
@dataclass(frozen=True)
class TestClassOptional(Streamable):
a: Optional[uint8]
# Does not have the required elements
with pytest.raises(AssertionError):
TestClassOptional.from_bytes(bytes([]))
TestClassOptional.from_bytes(bytes([0]))
TestClassOptional.from_bytes(bytes([1, 2]))
def test_ambiguous_deserialization_int() -> None:
@streamable
@dataclass(frozen=True)
class TestClassUint(Streamable):
a: uint32
# Does not have the required uint size
with pytest.raises(ValueError):
TestClassUint.from_bytes(b"\x00\x00")
def test_ambiguous_deserialization_list() -> None:
@streamable
@dataclass(frozen=True)
class TestClassList(Streamable):
a: List[uint8]
# Does not have the required elements
with pytest.raises(ValueError):
TestClassList.from_bytes(bytes([0, 0, 100, 24]))
def test_ambiguous_deserialization_tuple() -> None:
@streamable
@dataclass(frozen=True)
class TestClassTuple(Streamable):
a: Tuple[uint8, str]
# Does not have the required elements
with pytest.raises(AssertionError):
TestClassTuple.from_bytes(bytes([0, 0, 100, 24]))
def test_ambiguous_deserialization_str() -> None:
@streamable
@dataclass(frozen=True)
class TestClassStr(Streamable):
a: str
# Does not have the required str size
with pytest.raises(AssertionError):
TestClassStr.from_bytes(bytes([0, 0, 100, 24, 52]))
def test_ambiguous_deserialization_bytes() -> None:
@streamable
@dataclass(frozen=True)
class TestClassBytes(Streamable):
a: bytes
# Does not have the required str size
with pytest.raises(AssertionError):
TestClassBytes.from_bytes(bytes([0, 0, 100, 24, 52]))
with pytest.raises(AssertionError):
TestClassBytes.from_bytes(bytes([0, 0, 0, 1]))
TestClassBytes.from_bytes(bytes([0, 0, 0, 1, 52]))
TestClassBytes.from_bytes(bytes([0, 0, 0, 2, 52, 21]))
def test_ambiguous_deserialization_bool() -> None:
@streamable
@dataclass(frozen=True)
class TestClassBool(Streamable):
a: bool
# Does not have the required str size
with pytest.raises(AssertionError):
TestClassBool.from_bytes(bytes([]))
TestClassBool.from_bytes(bytes([0]))
TestClassBool.from_bytes(bytes([1]))
def test_ambiguous_deserialization_program() -> None:
@streamable
@dataclass(frozen=True)
class TestClassProgram(Streamable):
a: Program
program = Program.to(binutils.assemble("()")) # type: ignore[no-untyped-call] # TODO, add typing in clvm_tools
TestClassProgram.from_bytes(bytes(program))
with pytest.raises(AssertionError):
TestClassProgram.from_bytes(bytes(program) + b"9")
def test_streamable_empty() -> None:
@streamable
@dataclass(frozen=True)
class A(Streamable):
pass
assert A.from_bytes(bytes(A())) == A()
def test_parse_bool() -> None:
assert not parse_bool(io.BytesIO(b"\x00"))
assert parse_bool(io.BytesIO(b"\x01"))
# EOF
with pytest.raises(AssertionError):
parse_bool(io.BytesIO(b""))
with pytest.raises(ValueError):
parse_bool(io.BytesIO(b"\xff"))
with pytest.raises(ValueError):
parse_bool(io.BytesIO(b"\x02"))
def test_uint32() -> None:
assert parse_uint32(io.BytesIO(b"\x00\x00\x00\x00")) == 0
assert parse_uint32(io.BytesIO(b"\x00\x00\x00\x01")) == 1
assert parse_uint32(io.BytesIO(b"\x00\x00\x00\x01"), "little") == 16777216
assert parse_uint32(io.BytesIO(b"\x01\x00\x00\x00")) == 16777216
assert parse_uint32(io.BytesIO(b"\x01\x00\x00\x00"), "little") == 1
assert parse_uint32(io.BytesIO(b"\xff\xff\xff\xff"), "little") == 4294967295
def test_write(value: int, byteorder: Literal["little", "big"]) -> None:
f = io.BytesIO()
write_uint32(f, uint32(value), byteorder)
f.seek(0)
assert parse_uint32(f, byteorder) == value
test_write(1, "big")
test_write(1, "little")
test_write(4294967295, "big")
test_write(4294967295, "little")
with pytest.raises(AssertionError):
parse_uint32(io.BytesIO(b""))
with pytest.raises(AssertionError):
parse_uint32(io.BytesIO(b"\x00"))
with pytest.raises(AssertionError):
parse_uint32(io.BytesIO(b"\x00\x00"))
with pytest.raises(AssertionError):
parse_uint32(io.BytesIO(b"\x00\x00\x00"))
def test_parse_optional() -> None:
assert parse_optional(io.BytesIO(b"\x00"), parse_bool) is None
assert parse_optional(io.BytesIO(b"\x01\x01"), parse_bool)
assert not parse_optional(io.BytesIO(b"\x01\x00"), parse_bool)
# EOF
with pytest.raises(AssertionError):
parse_optional(io.BytesIO(b"\x01"), parse_bool)
# optional must be 0 or 1
with pytest.raises(ValueError):
parse_optional(io.BytesIO(b"\x02\x00"), parse_bool)
with pytest.raises(ValueError):
parse_optional(io.BytesIO(b"\xff\x00"), parse_bool)
def test_parse_bytes() -> None:
assert parse_bytes(io.BytesIO(b"\x00\x00\x00\x00")) == b""
assert parse_bytes(io.BytesIO(b"\x00\x00\x00\x01\xff")) == b"\xff"
# 512 bytes
assert parse_bytes(io.BytesIO(b"\x00\x00\x02\x00" + b"a" * 512)) == b"a" * 512
# 255 bytes
assert parse_bytes(io.BytesIO(b"\x00\x00\x00\xff" + b"b" * 255)) == b"b" * 255
# EOF
with pytest.raises(AssertionError):
parse_bytes(io.BytesIO(b"\x00\x00\x00\xff\x01\x02\x03"))
with pytest.raises(AssertionError):
parse_bytes(io.BytesIO(b"\xff\xff\xff\xff"))
with pytest.raises(AssertionError):
parse_bytes(io.BytesIO(b"\xff\xff\xff\xff" + b"a" * 512))
# EOF off by one
with pytest.raises(AssertionError):
parse_bytes(io.BytesIO(b"\x00\x00\x02\x01" + b"a" * 512))
def test_parse_list() -> None:
assert parse_list(io.BytesIO(b"\x00\x00\x00\x00"), parse_bool) == []
assert parse_list(io.BytesIO(b"\x00\x00\x00\x01\x01"), parse_bool) == [True]
assert parse_list(io.BytesIO(b"\x00\x00\x00\x03\x01\x00\x01"), parse_bool) == [True, False, True]
# EOF
with pytest.raises(AssertionError):
parse_list(io.BytesIO(b"\x00\x00\x00\x01"), parse_bool)
with pytest.raises(AssertionError):
parse_list(io.BytesIO(b"\x00\x00\x00\xff\x00\x00"), parse_bool)
with pytest.raises(AssertionError):
parse_list(io.BytesIO(b"\xff\xff\xff\xff\x00\x00"), parse_bool)
# failure to parser internal type
with pytest.raises(ValueError):
parse_list(io.BytesIO(b"\x00\x00\x00\x01\x02"), parse_bool)
def test_parse_tuple() -> None:
assert parse_tuple(io.BytesIO(b""), []) == ()
assert parse_tuple(io.BytesIO(b"\x00\x00"), [parse_bool, parse_bool]) == (False, False)
assert parse_tuple(io.BytesIO(b"\x00\x01"), [parse_bool, parse_bool]) == (False, True)
# error in parsing internal type
with pytest.raises(ValueError):
parse_tuple(io.BytesIO(b"\x00\x02"), [parse_bool, parse_bool])
# EOF
with pytest.raises(AssertionError):
parse_tuple(io.BytesIO(b"\x00"), [parse_bool, parse_bool])
class TestFromBytes:
b: bytes
@classmethod
def from_bytes(cls, b: bytes) -> TestFromBytes:
ret = TestFromBytes()
ret.b = b
return ret
class FailFromBytes:
@classmethod
def from_bytes(cls, b: bytes) -> FailFromBytes:
raise ValueError()
def test_parse_size_hints() -> None:
assert parse_size_hints(io.BytesIO(b"1337"), TestFromBytes, 4, False).b == b"1337"
# EOF
with pytest.raises(AssertionError):
parse_size_hints(io.BytesIO(b"133"), TestFromBytes, 4, False)
# error in underlying type
with pytest.raises(ValueError):
parse_size_hints(io.BytesIO(b"1337"), FailFromBytes, 4, False)
def test_parse_str() -> None:
assert parse_str(io.BytesIO(b"\x00\x00\x00\x00")) == ""
assert parse_str(io.BytesIO(b"\x00\x00\x00\x01a")) == "a"
# 512 bytes
assert parse_str(io.BytesIO(b"\x00\x00\x02\x00" + b"a" * 512)) == "a" * 512
# 255 bytes
assert parse_str(io.BytesIO(b"\x00\x00\x00\xff" + b"b" * 255)) == "b" * 255
# EOF
with pytest.raises(AssertionError):
parse_str(io.BytesIO(b"\x00\x00\x00\xff\x01\x02\x03"))
with pytest.raises(AssertionError):
parse_str(io.BytesIO(b"\xff\xff\xff\xff"))
with pytest.raises(AssertionError):
parse_str(io.BytesIO(b"\xff\xff\xff\xff" + b"a" * 512))
# EOF off by one
with pytest.raises(AssertionError):
parse_str(io.BytesIO(b"\x00\x00\x02\x01" + b"a" * 512))
def test_wrong_decorator_order() -> None:
with pytest.raises(DefinitionError):
@dataclass(frozen=True)
@streamable
class WrongDecoratorOrder(Streamable):
pass
def test_dataclass_not_frozen() -> None:
with pytest.raises(DefinitionError):
@streamable
@dataclass(frozen=False)
class DataclassNotFrozen(Streamable):
pass
def test_dataclass_missing() -> None:
with pytest.raises(DefinitionError):
@streamable
class DataclassMissing(Streamable):
pass
def test_streamable_inheritance_missing() -> None:
with pytest.raises(DefinitionError):
# we want to test invalid here, hence the ignore.
@streamable
@dataclass(frozen=True)
class StreamableInheritanceMissing: # type: ignore[type-var]
pass
@pytest.mark.parametrize(
"method, input_type",
[
(function_to_parse_one_item, float),
(function_to_parse_one_item, int),
(function_to_parse_one_item, dict),
(function_to_stream_one_item, float),
(function_to_stream_one_item, int),
(function_to_stream_one_item, dict),
(recurse_jsonify, 1.0),
(recurse_jsonify, recurse_jsonify),
],
)
def test_unsupported_types(method: Callable[[object], object], input_type: object) -> None:
with pytest.raises(UnsupportedType):
method(input_type)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_jsonify.py | tests/core/util/test_jsonify.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint32
from flax.util.streamable import Streamable, recurse_jsonify, streamable
def dict_with_types(d: Dict[str, Any]) -> Dict[str, Any]:
return {k: (v, type(v)) for k, v in d.items()}
def test_primitives() -> None:
@streamable
@dataclass(frozen=True)
class PrimitivesTest(Streamable):
a: uint32
b: Optional[str]
c: str
d: bytes
e: bytes32
f: bool
t1 = PrimitivesTest(
uint32(123),
None,
"foobar",
b"\0\1\0\1",
bytes32(range(32)),
True,
)
assert dict_with_types(t1.to_json_dict()) == {
"a": (123, int),
"b": (None, type(None)),
"c": ("foobar", str),
"d": ("0x00010001", str),
"e": ("0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", str),
"f": (True, bool),
}
t2 = PrimitivesTest(
uint32(0),
"set optional",
"foobar",
b"\0\1",
bytes32([0] * 32),
False,
)
assert dict_with_types(t2.to_json_dict()) == {
"a": (0, int),
"b": ("set optional", str),
"c": ("foobar", str),
"d": ("0x0001", str),
"e": ("0x0000000000000000000000000000000000000000000000000000000000000000", str),
"f": (False, bool),
}
def test_list() -> None:
@streamable
@dataclass(frozen=True)
class ListTest(Streamable):
d: List[str]
t = ListTest(["foo", "bar"])
assert t.to_json_dict() == {"d": ["foo", "bar"]}
def test_tuple() -> None:
@streamable
@dataclass(frozen=True)
class TupleTest(Streamable):
d: Tuple[str, uint32, str]
t = TupleTest(("foo", uint32(123), "bar"))
assert t.to_json_dict() == {"d": ["foo", 123, "bar"]}
@streamable
@dataclass(frozen=True)
class NestedWithTupleInner(Streamable):
a: Tuple[str, uint32, str]
b: bytes
@streamable
@dataclass(frozen=True)
class NestedWithTupleOuter(Streamable):
a: Tuple[NestedWithTupleInner, uint32, str]
def test_nested_with_tuple() -> None:
t = NestedWithTupleOuter(
(NestedWithTupleInner(("foo", uint32(123), "bar"), bytes([0x13, 0x37])), uint32(321), "baz")
)
assert t.to_json_dict() == {"a": [{"a": ["foo", 123, "bar"], "b": "0x1337"}, 321, "baz"]}
@streamable
@dataclass(frozen=True)
class NestedWithListInner(Streamable):
a: uint32
b: bytes
@streamable
@dataclass(frozen=True)
class NestedWithListOuter(Streamable):
a: List[NestedWithListInner]
def test_nested_with_list() -> None:
t = NestedWithListOuter([NestedWithListInner(uint32(123), bytes([0x13, 0x37]))])
assert t.to_json_dict() == {"a": [{"a": 123, "b": "0x1337"}]}
@streamable
@dataclass(frozen=True)
class TestNestedInner(Streamable):
a: Tuple[str, uint32, str]
b: bytes
@streamable
@dataclass(frozen=True)
class TestNestedOuter(Streamable):
a: TestNestedInner
def test_nested() -> None:
t = TestNestedOuter(TestNestedInner(("foo", uint32(123), "bar"), bytes([0x13, 0x37])))
assert t.to_json_dict() == {"a": {"a": ["foo", 123, "bar"], "b": "0x1337"}}
def test_recurse_jsonify() -> None:
d = {"a": "foo", "b": bytes([0x13, 0x37]), "c": [uint32(1), uint32(2)], "d": {"bar": None}}
assert recurse_jsonify(d) == {"a": "foo", "b": "0x1337", "c": [1, 2], "d": {"bar": None}}
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_lockfile.py | tests/core/util/test_lockfile.py | from __future__ import annotations
import logging
import os
from multiprocessing import Pool, TimeoutError
from pathlib import Path
from sys import platform
from time import sleep
from typing import Any, Callable
import pytest
from flax.util.lock import Lockfile, LockfileError
log = logging.getLogger(__name__)
DUMMY_SLEEP_VALUE = 2
def dummy_fn_requiring_lock(*args: object, **kwargs: object) -> str:
return "A winner is you!"
def dummy_sleep_fn(*args: object, **kwargs: object) -> str:
sleep(DUMMY_SLEEP_VALUE)
return "I'm awake!"
def dummy_abort_fn(*args: object, **kwargs: object) -> None:
sleep(0.25)
os.abort()
def child_writer_dispatch(func: Callable[..., Any], path: Path, timeout: int, attempts: int) -> Any:
while attempts > 0:
attempts -= 1
try:
with Lockfile.create(path, timeout):
result = func()
return result
except LockfileError as e:
log.warning(f"[pid:{os.getpid()}] caught exception in child_writer_dispatch: LockfileError {e}")
raise e
except Exception as e:
log.warning(f"[pid:{os.getpid()}] caught exception in child_writer_dispatch: type: {type(e)}, {e}")
raise e
def child_writer_dispatch_with_readiness_check(
func: Callable[..., Any], path: Path, timeout: int, attempts: int, ready_dir: Path, finished_dir: Path
) -> Any:
# Write out a file indicating this process is ready to begin
ready_file_path: Path = ready_dir / f"{os.getpid()}.ready"
with open(ready_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# Wait up to 30 seconds for all processes to indicate readiness
start_file_path: Path = ready_dir / "start"
remaining_attempts = 120
while remaining_attempts > 0:
if start_file_path.exists():
break
else:
sleep(0.25)
remaining_attempts -= 1
assert remaining_attempts >= 0
try:
while attempts > 0:
log.warning(f"{path}, attempts {attempts}")
try:
with Lockfile.create(path, timeout):
result = func()
return result
except LockfileError:
attempts -= 1
if attempts == 0:
raise LockfileError()
except Exception as e:
log.warning(
f"[pid:{os.getpid()}] caught exception in child_writer_dispatch_with_readiness_check: "
f"type: {type(e)}, {e}"
)
raise e
finally:
# Write out a file indicating this process has completed its work
finished_file_path: Path = finished_dir / f"{os.getpid()}.finished"
with open(finished_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
def poll_directory(dir: Path, expected_entries: int, max_attempts: int, interval: float = 1.0) -> bool:
found_all: bool = False
remaining_attempts: int = 30
while remaining_attempts > 0:
entries = list(os.scandir(dir))
if len(entries) < expected_entries: # Expecting num_workers of dir entries
log.warning(f"Polling not complete: {len(entries)} of {expected_entries} entries found")
sleep(1)
remaining_attempts -= 1
else:
found_all = True
break
return found_all
@pytest.fixture(scope="function")
def ready_dir(tmp_path: Path) -> Path:
ready_dir: Path = tmp_path / "ready"
ready_dir.mkdir(parents=True, exist_ok=True)
return ready_dir
@pytest.fixture(scope="function")
def finished_dir(tmp_path: Path) -> Path:
finished_dir: Path = tmp_path / "finished"
finished_dir.mkdir(parents=True, exist_ok=True)
return finished_dir
def test_timeout(tmp_path: Path, ready_dir: Path, finished_dir: Path) -> None:
"""
If the lock is already held, another process should not be able to acquire the same lock, failing after n attempts
"""
with Lockfile.create(tmp_path):
child_proc_fn = dummy_fn_requiring_lock
timeout = 0.25
attempts = 4
num_workers = 1
with Pool(processes=num_workers) as pool:
# When: a child process attempts to acquire the same writer lock, failing after 1 second
res = pool.starmap_async(
child_writer_dispatch_with_readiness_check,
[(child_proc_fn, tmp_path, timeout, attempts, ready_dir, finished_dir)],
)
# Wait up to 30 seconds for all processes to indicate readiness
assert poll_directory(ready_dir, num_workers, 30) is True
log.warning(f"Test setup complete: {num_workers} workers ready")
# Signal that testing should begin
start_file_path: Path = ready_dir / "start"
with open(start_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# Wait up to 30 seconds for all processes to indicate completion
assert poll_directory(finished_dir, num_workers, 30) is True
log.warning(f"Finished: {num_workers} workers finished")
# Expect: the child to fail acquiring the writer lock (raises as LockfileError)
with pytest.raises(LockfileError):
# 10 second timeout to prevent a bad test from spoiling the fun (raises as LockfileError)
res.get(timeout=10)
def test_succeeds(tmp_path: Path, ready_dir: Path, finished_dir: Path) -> None:
"""
If the lock is already held, another process will be able to acquire the same lock once the lock is released by
the current holder
"""
# When: a lock is already acquired
with Lockfile.create(tmp_path) as lock:
child_proc_fn = dummy_fn_requiring_lock
timeout = 0.25
attempts = 8
num_workers = 1
with Pool(processes=num_workers) as pool:
# When: a child process attempts to acquire the same writer lock, failing after 1 second
res = pool.starmap_async(
child_writer_dispatch_with_readiness_check,
[(child_proc_fn, tmp_path, timeout, attempts, ready_dir, finished_dir)],
)
# Wait up to 30 seconds for all processes to indicate readiness
assert poll_directory(ready_dir, num_workers, 30) is True
log.warning(f"Test setup complete: {num_workers} workers ready")
# Signal that testing should begin
start_file_path: Path = ready_dir / "start"
with open(start_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# Brief delay to allow the child to timeout once
sleep(0.50)
# When: the writer lock is released
lock.release()
# Expect: the child to acquire the writer lock
result = res.get(timeout=10) # 10 second timeout to prevent a bad test from spoiling the fun
assert result[0] == "A winner is you!"
# Wait up to 30 seconds for all processes to indicate completion
assert poll_directory(finished_dir, num_workers, 30) is True
log.warning(f"Finished: {num_workers} workers finished")
def test_reacquisition_failure(tmp_path: Path, ready_dir: Path, finished_dir: Path) -> None:
"""
After the child process acquires the lock (and sleeps), the previous holder should not be able to quickly reacquire
the lock
"""
# When: a lock is already acquired
with Lockfile.create(tmp_path) as lock:
child_proc_function = dummy_sleep_fn # Sleeps for DUMMY_SLEEP_VALUE seconds
timeout = 0.25
attempts = 8
num_workers = 1
with Pool(processes=num_workers) as pool:
# When: a child process attempts to acquire the same writer lock, failing after 1 second
pool.starmap_async(
child_writer_dispatch_with_readiness_check,
[(child_proc_function, tmp_path, timeout, attempts, ready_dir, finished_dir)],
)
# Wait up to 30 seconds for all processes to indicate readiness
assert poll_directory(ready_dir, num_workers, 30) is True
log.warning(f"Test setup complete: {num_workers} workers ready")
# Signal that testing should begin
start_file_path: Path = ready_dir / "start"
with open(start_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# When: the writer lock is released
lock.release()
# Brief delay to allow the child to acquire the lock
sleep(1)
# Expect: Reacquiring the lock should fail due to the child holding the lock and sleeping
with pytest.raises(LockfileError):
with Lockfile.create(tmp_path, timeout=0.25):
pass
# Wait up to 30 seconds for all processes to indicate completion
assert poll_directory(finished_dir, num_workers, 30) is True
log.warning(f"Finished: {num_workers} workers finished")
def test_reacquisition_success(tmp_path: Path, ready_dir: Path, finished_dir: Path) -> None:
"""
After the child process releases the lock, we should be able to acquire the lock
"""
# When: a writer lock is already acquired
with Lockfile.create(tmp_path) as lock:
child_proc_function = dummy_sleep_fn # Sleeps for DUMMY_SLEEP_VALUE seconds
timeout = 0.25
attempts = 4
num_workers = 1
with Pool(processes=num_workers) as pool:
# When: a child process attempts to acquire the same writer lock, failing after 1 second
pool.starmap_async(
child_writer_dispatch_with_readiness_check,
[(child_proc_function, tmp_path, timeout, attempts, ready_dir, finished_dir)],
)
# Wait up to 30 seconds for all processes to indicate readiness
assert poll_directory(ready_dir, num_workers, 30) is True
log.warning(f"Test setup complete: {num_workers} workers ready")
# Signal that testing should begin
start_file_path: Path = ready_dir / "start"
with open(start_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# When: the writer lock is released
lock.release()
# Wait up to 30 seconds for all processes to indicate completion
assert poll_directory(finished_dir, num_workers, 30) is True
log.warning(f"Finished: {num_workers} workers finished")
# Expect: Reacquiring the lock should succeed after the child finishes and releases the lock
with Lockfile.create(tmp_path, timeout=(DUMMY_SLEEP_VALUE + 0.25)):
pass
@pytest.mark.skipif(platform == "darwin", reason="triggers the CrashReporter prompt")
def test_released_on_abort(tmp_path: Path) -> None:
"""
When a child process is holding the lock and aborts/crashes, we should be able to acquire the lock
"""
# When: a writer lock is already acquired
with Lockfile.create(tmp_path) as lock:
child_proc_function = dummy_abort_fn
timeout = 0.25
attempts = 4
with Pool(processes=1) as pool:
# When: a child process attempts to acquire the same writer lock, failing after 1 second
res = pool.starmap_async(child_writer_dispatch, [(child_proc_function, tmp_path, timeout, attempts)])
# When: the writer lock is released
lock.release()
# When: timing out waiting for the child process (because it aborted)
with pytest.raises(TimeoutError):
res.get(timeout=2)
# Expect: Reacquiring the lock should succeed after the child exits, automatically releasing the lock
with Lockfile.create(tmp_path, timeout=2):
pass
def test_blocked_by_readers(tmp_path: Path, ready_dir: Path, finished_dir: Path) -> None:
"""
When a lock is already held, another thread/process should not be able to acquire the lock
"""
with Lockfile.create(tmp_path):
child_proc_function = dummy_fn_requiring_lock
timeout = 0.25
attempts = 4
num_workers = 1
with Pool(processes=num_workers) as pool:
# When: a child process attempts to acquire the same lock for writing, failing after 1 second
res = pool.starmap_async(
child_writer_dispatch_with_readiness_check,
[(child_proc_function, tmp_path, timeout, attempts, ready_dir, finished_dir)],
)
# Wait up to 30 seconds for all processes to indicate readiness
assert poll_directory(ready_dir, num_workers, 30) is True
log.warning(f"Test setup complete: {num_workers} workers ready")
# Signal that testing should begin
start_file_path: Path = ready_dir / "start"
with open(start_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# Wait up to 30 seconds for all processes to indicate completion
assert poll_directory(finished_dir, num_workers, 30) is True
log.warning(f"Finished: {num_workers} workers finished")
# Expect: lock acquisition times out (raises as LockfileError)
with pytest.raises(LockfileError):
res.get(timeout=30)
def test_initially_blocked_by_readers(tmp_path: Path, ready_dir: Path, finished_dir: Path) -> None:
"""
When a lock is already held, another thread/process should not be able to acquire the lock until the process
currently holding the lock releases it
"""
# When: the lock is already acquired
with Lockfile.create(tmp_path) as lock:
child_proc_function = dummy_fn_requiring_lock
timeout = 1
attempts = 10
num_workers = 1
with Pool(processes=num_workers) as pool:
# When: a child process attempts to acquire the same lock for writing, failing after 4 seconds
res = pool.starmap_async(
child_writer_dispatch_with_readiness_check,
[(child_proc_function, tmp_path, timeout, attempts, ready_dir, finished_dir)],
)
# Wait up to 30 seconds for all processes to indicate readiness
assert poll_directory(ready_dir, num_workers, 30) is True
log.warning(f"Test setup complete: {num_workers} workers ready")
# Signal that testing should begin
start_file_path: Path = ready_dir / "start"
with open(start_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# When: we verify that the writer lock is not immediately acquired
with pytest.raises(TimeoutError):
res.get(timeout=5)
# When: the reader releases its lock
lock.release()
# Wait up to 30 seconds for all processes to indicate completion
assert poll_directory(finished_dir, num_workers, 30) is True
log.warning(f"Finished: {num_workers} workers finished")
# Expect: the child process to acquire the writer lock
result = res.get(timeout=10) # 10 second timeout to prevent a bad test from spoiling the fun
assert result[0] == "A winner is you!"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_keychain.py | tests/core/util/test_keychain.py | import json
import unittest
from dataclasses import replace
from secrets import token_bytes
from typing import Callable, List, Optional, Tuple
import pytest
from blspy import AugSchemeMPL, G1Element, PrivateKey
from tests.util.keyring import using_temp_file_keyring
from flax.util.errors import (
KeychainFingerprintExists,
KeychainKeyDataMismatch,
KeychainSecretsMissing,
KeychainFingerprintNotFound,
KeychainLabelExists,
KeychainLabelInvalid,
)
from flax.util.ints import uint32
from flax.util.keychain import (
Keychain,
KeyData,
KeyDataSecrets,
bytes_from_mnemonic,
bytes_to_mnemonic,
generate_mnemonic,
mnemonic_to_seed,
)
mnemonic = (
"rapid this oven common drive ribbon bulb urban uncover napkin kitten usage enforce uncle unveil scene "
"apart wire mystery torch peanut august flee fantasy"
)
entropy = bytes.fromhex("b1fc1a7717343572077f7aecb25ded77c4a3d93b9e040a5f8649f2aa1e1e5632")
private_key = PrivateKey.from_bytes(bytes.fromhex("6c6bb4cc3dae03b8d0b327dd6765834464a883f7ca7df134970842055efe8afc"))
fingerprint = uint32(1310648153)
public_key = G1Element.from_bytes(
bytes.fromhex("b5acf3599bc5fa5da1c00f6cc3d5bcf1560def67778b7f50a8c373a83f78761505b6250ab776e38a292e26628009aec4")
)
class TestKeychain(unittest.TestCase):
@using_temp_file_keyring()
def test_basic_add_delete(self):
kc: Keychain = Keychain(user="testing-1.8.0", service="flax-testing-1.8.0")
kc.delete_all_keys()
assert kc._get_free_private_key_index() == 0
assert len(kc.get_all_private_keys()) == 0
assert kc.get_first_private_key() is None
assert kc.get_first_public_key() is None
mnemonic = generate_mnemonic()
entropy = bytes_from_mnemonic(mnemonic)
assert bytes_to_mnemonic(entropy) == mnemonic
mnemonic_2 = generate_mnemonic()
fingerprint_2 = AugSchemeMPL.key_gen(mnemonic_to_seed(mnemonic_2)).get_g1().get_fingerprint()
# misspelled words in the mnemonic
bad_mnemonic = mnemonic.split(" ")
bad_mnemonic[6] = "ZZZZZZ"
self.assertRaisesRegex(
ValueError,
"'ZZZZZZ' is not in the mnemonic dictionary; may be misspelled",
bytes_from_mnemonic,
" ".join(bad_mnemonic),
)
kc.add_private_key(mnemonic)
assert kc._get_free_private_key_index() == 1
assert len(kc.get_all_private_keys()) == 1
kc.add_private_key(mnemonic_2)
with pytest.raises(KeychainFingerprintExists) as e:
kc.add_private_key(mnemonic_2)
assert e.value.fingerprint == fingerprint_2
assert kc._get_free_private_key_index() == 2
assert len(kc.get_all_private_keys()) == 2
assert kc._get_free_private_key_index() == 2
assert len(kc.get_all_private_keys()) == 2
assert len(kc.get_all_public_keys()) == 2
assert kc.get_all_private_keys()[0] == kc.get_first_private_key()
assert kc.get_all_public_keys()[0] == kc.get_first_public_key()
assert len(kc.get_all_private_keys()) == 2
seed_2 = mnemonic_to_seed(mnemonic)
seed_key_2 = AugSchemeMPL.key_gen(seed_2)
kc.delete_key_by_fingerprint(seed_key_2.get_g1().get_fingerprint())
assert kc._get_free_private_key_index() == 0
assert len(kc.get_all_private_keys()) == 1
kc.delete_all_keys()
assert kc._get_free_private_key_index() == 0
assert len(kc.get_all_private_keys()) == 0
kc.add_private_key(bytes_to_mnemonic(token_bytes(32)))
kc.add_private_key(bytes_to_mnemonic(token_bytes(32)))
kc.add_private_key(bytes_to_mnemonic(token_bytes(32)))
assert len(kc.get_all_public_keys()) == 3
assert kc.get_first_private_key() is not None
assert kc.get_first_public_key() is not None
kc.delete_all_keys()
kc.add_private_key(bytes_to_mnemonic(token_bytes(32)))
assert kc.get_first_public_key() is not None
@using_temp_file_keyring()
def test_add_private_key_label(self):
keychain: Keychain = Keychain(user="testing-1.8.0", service="flax-testing-1.8.0")
key_data_0 = KeyData.generate(label="key_0")
key_data_1 = KeyData.generate(label="key_1")
key_data_2 = KeyData.generate(label=None)
keychain.add_private_key(mnemonic=key_data_0.mnemonic_str(), label=key_data_0.label)
assert key_data_0 == keychain.get_key(key_data_0.fingerprint, include_secrets=True)
# Try to add a new key with an existing label should raise
with pytest.raises(KeychainLabelExists) as e:
keychain.add_private_key(mnemonic=key_data_1.mnemonic_str(), label=key_data_0.label)
assert e.value.fingerprint == key_data_0.fingerprint
assert e.value.label == key_data_0.label
# Adding the same key with a valid label should work fine
keychain.add_private_key(mnemonic=key_data_1.mnemonic_str(), label=key_data_1.label)
assert key_data_1 == keychain.get_key(key_data_1.fingerprint, include_secrets=True)
# Trying to add an existing key should not have an impact on the existing label
with pytest.raises(KeychainFingerprintExists):
keychain.add_private_key(mnemonic=key_data_0.mnemonic_str(), label="other label")
assert key_data_0 == keychain.get_key(key_data_0.fingerprint, include_secrets=True)
# Adding a key with no label should not assign any label
keychain.add_private_key(mnemonic=key_data_2.mnemonic_str(), label=key_data_2.label)
assert key_data_2 == keychain.get_key(key_data_2.fingerprint, include_secrets=True)
# All added keys should still be valid with their label
assert all(
key_data in [key_data_0, key_data_1, key_data_2] for key_data in keychain.get_keys(include_secrets=True)
)
@using_temp_file_keyring()
def test_bip39_eip2333_test_vector(self):
kc: Keychain = Keychain(user="testing-1.8.0", service="flax-testing-1.8.0")
kc.delete_all_keys()
mnemonic = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"
print("entropy to seed:", mnemonic_to_seed(mnemonic).hex())
master_sk = kc.add_private_key(mnemonic)
tv_master_int = 8075452428075949470768183878078858156044736575259233735633523546099624838313
tv_child_int = 18507161868329770878190303689452715596635858303241878571348190917018711023613
assert master_sk == PrivateKey.from_bytes(tv_master_int.to_bytes(32, "big"))
child_sk = AugSchemeMPL.derive_child_sk(master_sk, 0)
assert child_sk == PrivateKey.from_bytes(tv_child_int.to_bytes(32, "big"))
def test_bip39_test_vectors(self):
with open("tests/util/bip39_test_vectors.json") as f:
all_vectors = json.loads(f.read())
for vector_list in all_vectors["english"]:
entropy_bytes = bytes.fromhex(vector_list[0])
mnemonic = vector_list[1]
seed = bytes.fromhex(vector_list[2])
assert bytes_from_mnemonic(mnemonic) == entropy_bytes
assert bytes_to_mnemonic(entropy_bytes) == mnemonic
assert mnemonic_to_seed(mnemonic) == seed
def test_utf8_nfkd(self):
# Test code from trezor:
# Copyright (c) 2013 Pavol Rusnak
# Copyright (c) 2017 mruddy
# https://github.com/trezor/python-mnemonic/blob/master/test_mnemonic.py
# The same sentence in various UTF-8 forms
words_nfkd = "Pr\u030ci\u0301s\u030cerne\u030c z\u030clut\u030couc\u030cky\u0301 ku\u030an\u030c u\u0301pe\u030cl d\u030ca\u0301belske\u0301 o\u0301dy za\u0301ker\u030cny\u0301 uc\u030cen\u030c be\u030cz\u030ci\u0301 pode\u0301l zo\u0301ny u\u0301lu\u030a" # noqa: E501
words_nfc = "P\u0159\xed\u0161ern\u011b \u017elu\u0165ou\u010dk\xfd k\u016f\u0148 \xfap\u011bl \u010f\xe1belsk\xe9 \xf3dy z\xe1ke\u0159n\xfd u\u010de\u0148 b\u011b\u017e\xed pod\xe9l z\xf3ny \xfal\u016f" # noqa: E501
words_nfkc = "P\u0159\xed\u0161ern\u011b \u017elu\u0165ou\u010dk\xfd k\u016f\u0148 \xfap\u011bl \u010f\xe1belsk\xe9 \xf3dy z\xe1ke\u0159n\xfd u\u010de\u0148 b\u011b\u017e\xed pod\xe9l z\xf3ny \xfal\u016f" # noqa: E501
words_nfd = "Pr\u030ci\u0301s\u030cerne\u030c z\u030clut\u030couc\u030cky\u0301 ku\u030an\u030c u\u0301pe\u030cl d\u030ca\u0301belske\u0301 o\u0301dy za\u0301ker\u030cny\u0301 uc\u030cen\u030c be\u030cz\u030ci\u0301 pode\u0301l zo\u0301ny u\u0301lu\u030a" # noqa: E501
seed_nfkd = mnemonic_to_seed(words_nfkd)
seed_nfc = mnemonic_to_seed(words_nfc)
seed_nfkc = mnemonic_to_seed(words_nfkc)
seed_nfd = mnemonic_to_seed(words_nfd)
assert seed_nfkd == seed_nfc
assert seed_nfkd == seed_nfkc
assert seed_nfkd == seed_nfd
def test_key_data_secrets_generate() -> None:
secrets = KeyDataSecrets.generate()
assert secrets.private_key == AugSchemeMPL.key_gen(mnemonic_to_seed(secrets.mnemonic_str()))
assert secrets.entropy == bytes_from_mnemonic(secrets.mnemonic_str())
@pytest.mark.parametrize(
"input_data, from_method", [(mnemonic, KeyDataSecrets.from_mnemonic), (entropy, KeyDataSecrets.from_entropy)]
)
def test_key_data_secrets_creation(input_data: object, from_method: Callable[..., KeyDataSecrets]) -> None:
secrets = from_method(input_data)
assert secrets.mnemonic == mnemonic.split()
assert secrets.mnemonic_str() == mnemonic
assert secrets.entropy == entropy
assert secrets.private_key == private_key
@pytest.mark.parametrize("label", [None, "key"])
def test_key_data_generate(label: Optional[str]) -> None:
key_data = KeyData.generate(label)
assert key_data.private_key == AugSchemeMPL.key_gen(mnemonic_to_seed(key_data.mnemonic_str()))
assert key_data.entropy == bytes_from_mnemonic(key_data.mnemonic_str())
assert key_data.public_key == key_data.private_key.get_g1()
assert key_data.fingerprint == key_data.private_key.get_g1().get_fingerprint()
assert key_data.label == label
@pytest.mark.parametrize("label", [None, "key"])
@pytest.mark.parametrize(
"input_data, from_method", [(mnemonic, KeyData.from_mnemonic), (entropy, KeyData.from_entropy)]
)
def test_key_data_creation(input_data: object, from_method: Callable[..., KeyData], label: Optional[str]) -> None:
key_data = from_method(input_data, label)
assert key_data.fingerprint == fingerprint
assert key_data.public_key == public_key
assert key_data.mnemonic == mnemonic.split()
assert key_data.mnemonic_str() == mnemonic
assert key_data.entropy == entropy
assert key_data.private_key == private_key
assert key_data.label == label
def test_key_data_without_secrets() -> None:
key_data = KeyData(fingerprint, public_key, None, None)
assert key_data.secrets is None
with pytest.raises(KeychainSecretsMissing):
print(key_data.mnemonic)
with pytest.raises(KeychainSecretsMissing):
print(key_data.mnemonic_str())
with pytest.raises(KeychainSecretsMissing):
print(key_data.entropy)
with pytest.raises(KeychainSecretsMissing):
print(key_data.private_key)
@pytest.mark.parametrize(
"input_data, data_type",
[
((mnemonic.split()[:-1], entropy, private_key), "mnemonic"),
((mnemonic.split(), KeyDataSecrets.generate().entropy, private_key), "entropy"),
((mnemonic.split(), entropy, KeyDataSecrets.generate().private_key), "private_key"),
],
)
def test_key_data_secrets_post_init(input_data: Tuple[List[str], bytes, PrivateKey], data_type: str) -> None:
with pytest.raises(KeychainKeyDataMismatch, match=data_type):
KeyDataSecrets(*input_data)
@pytest.mark.parametrize(
"input_data, data_type",
[
((fingerprint, G1Element(), None, KeyDataSecrets(mnemonic.split(), entropy, private_key)), "public_key"),
((fingerprint, G1Element(), None, None), "fingerprint"),
],
)
def test_key_data_post_init(
input_data: Tuple[uint32, G1Element, Optional[str], Optional[KeyDataSecrets]], data_type: str
) -> None:
with pytest.raises(KeychainKeyDataMismatch, match=data_type):
KeyData(*input_data)
@pytest.mark.parametrize("include_secrets", [True, False])
def test_get_key(include_secrets: bool, get_temp_keyring: Keychain):
keychain: Keychain = get_temp_keyring
expected_keys = []
# Add 10 keys and validate the result `get_key` for each of them after each addition
for _ in range(0, 10):
key_data = KeyData.generate()
mnemonic_str = key_data.mnemonic_str()
if not include_secrets:
key_data = replace(key_data, secrets=None)
expected_keys.append(key_data)
# The last created key should not yet succeed in `get_key`
with pytest.raises(KeychainFingerprintNotFound):
keychain.get_key(expected_keys[-1].fingerprint, include_secrets)
# Add it and validate all keys
keychain.add_private_key(mnemonic_str)
assert all(keychain.get_key(key_data.fingerprint, include_secrets) == key_data for key_data in expected_keys)
# Remove 10 keys and validate the result `get_key` for each of them after each removal
while len(expected_keys) > 0:
delete_key = expected_keys.pop()
keychain.delete_key_by_fingerprint(delete_key.fingerprint)
# The removed key should no longer succeed in `get_key`
with pytest.raises(KeychainFingerprintNotFound):
keychain.get_key(delete_key.fingerprint, include_secrets)
assert all(keychain.get_key(key_data.fingerprint, include_secrets) == key_data for key_data in expected_keys)
@pytest.mark.parametrize("include_secrets", [True, False])
def test_get_keys(include_secrets: bool, get_temp_keyring: Keychain):
keychain: Keychain = get_temp_keyring
# Should be empty on start
assert keychain.get_keys(include_secrets) == []
expected_keys = []
# Add 10 keys and validate the result of `get_keys` after each addition
for _ in range(0, 10):
key_data = KeyData.generate()
mnemonic_str = key_data.mnemonic_str()
if not include_secrets:
key_data = replace(key_data, secrets=None)
expected_keys.append(key_data)
keychain.add_private_key(mnemonic_str)
assert keychain.get_keys(include_secrets) == expected_keys
# Remove all 10 keys and validate the result of `get_keys` after each removal
while len(expected_keys) > 0:
delete_key = expected_keys.pop()
keychain.delete_key_by_fingerprint(delete_key.fingerprint)
assert keychain.get_keys(include_secrets) == expected_keys
# Should be empty again
assert keychain.get_keys(include_secrets) == []
def test_set_label(get_temp_keyring: Keychain) -> None:
keychain: Keychain = get_temp_keyring
# Generate a key and add it without label
key_data_0 = KeyData.generate(label=None)
keychain.add_private_key(mnemonic=key_data_0.mnemonic_str(), label=None)
assert key_data_0 == keychain.get_key(key_data_0.fingerprint, include_secrets=True)
# Set a label and validate it
key_data_0 = replace(key_data_0, label="key_0")
assert key_data_0.label is not None
keychain.set_label(fingerprint=key_data_0.fingerprint, label=key_data_0.label)
assert key_data_0 == keychain.get_key(fingerprint=key_data_0.fingerprint, include_secrets=True)
# Try to add the same label for a fingerprint where don't have a key for
with pytest.raises(KeychainFingerprintNotFound):
keychain.set_label(fingerprint=123456, label=key_data_0.label)
# Add a second key
key_data_1 = KeyData.generate(label="key_1")
assert key_data_1.label is not None
keychain.add_private_key(key_data_1.mnemonic_str())
# Try to set the already existing label for the second key
with pytest.raises(KeychainLabelExists) as e:
keychain.set_label(fingerprint=key_data_1.fingerprint, label=key_data_0.label)
assert e.value.fingerprint == key_data_0.fingerprint
assert e.value.label == key_data_0.label
# Set a different label to the second key and validate it
keychain.set_label(fingerprint=key_data_1.fingerprint, label=key_data_1.label)
assert key_data_0 == keychain.get_key(fingerprint=key_data_0.fingerprint, include_secrets=True)
# All added keys should still be valid with their label
assert all(key_data in [key_data_0, key_data_1] for key_data in keychain.get_keys(include_secrets=True))
@pytest.mark.parametrize(
"label, message",
[
("", "label can't be empty or whitespace only"),
(" ", "label can't be empty or whitespace only"),
("a\nb", "label can't contain newline or tab"),
("a\tb", "label can't contain newline or tab"),
("a" * 66, "label exceeds max length: 66/65"),
("a" * 70, "label exceeds max length: 70/65"),
],
)
def test_set_label_invalid_labels(label: str, message: str, get_temp_keyring: Keychain) -> None:
keychain: Keychain = get_temp_keyring
key_data = KeyData.generate()
keychain.add_private_key(key_data.mnemonic_str())
with pytest.raises(KeychainLabelInvalid, match=message) as e:
keychain.set_label(key_data.fingerprint, label)
assert e.value.label == label
def test_delete_label(get_temp_keyring: Keychain) -> None:
keychain: Keychain = get_temp_keyring
# Generate two keys and add them to the keychain
key_data_0 = KeyData.generate(label="key_0")
key_data_1 = KeyData.generate(label="key_1")
def assert_delete_raises():
# Try to delete the labels should fail now since they are gone already
for key_data in [key_data_0, key_data_1]:
with pytest.raises(KeychainFingerprintNotFound) as e:
keychain.delete_label(key_data.fingerprint)
assert e.value.fingerprint == key_data.fingerprint
# Should pass here since the keys are not added yet
assert_delete_raises()
for key in [key_data_0, key_data_1]:
keychain.add_private_key(mnemonic=key.mnemonic_str(), label=key.label)
assert key == keychain.get_key(key.fingerprint, include_secrets=True)
# Delete the label of the first key, validate it was removed and make sure the other key retains its label
keychain.delete_label(key_data_0.fingerprint)
assert replace(key_data_0, label=None) == keychain.get_key(key_data_0.fingerprint, include_secrets=True)
assert key_data_1 == keychain.get_key(key_data_1.fingerprint, include_secrets=True)
# Re-add the label of the first key
assert key_data_0.label is not None
keychain.set_label(key_data_0.fingerprint, key_data_0.label)
# Delete the label of the second key
keychain.delete_label(key_data_1.fingerprint)
assert key_data_0 == keychain.get_key(key_data_0.fingerprint, include_secrets=True)
assert replace(key_data_1, label=None) == keychain.get_key(key_data_1.fingerprint, include_secrets=True)
# Delete the label of the first key again, now both should have no label
keychain.delete_label(key_data_0.fingerprint)
assert replace(key_data_0, label=None) == keychain.get_key(key_data_0.fingerprint, include_secrets=True)
assert replace(key_data_1, label=None) == keychain.get_key(key_data_1.fingerprint, include_secrets=True)
# Should pass here since the key labels are both removed here
assert_delete_raises()
@pytest.mark.parametrize("delete_all", [True, False])
def test_delete_drops_labels(get_temp_keyring: Keychain, delete_all: bool) -> None:
keychain: Keychain = get_temp_keyring
# Generate some keys and add them to the keychain
labels = [f"key_{i}" for i in range(5)]
keys = [KeyData.generate(label=label) for label in labels]
for key_data in keys:
keychain.add_private_key(mnemonic=key_data.mnemonic_str(), label=key_data.label)
assert key_data == keychain.get_key(key_data.fingerprint, include_secrets=True)
assert key_data.label is not None
assert keychain.keyring_wrapper.get_label(key_data.fingerprint) == key_data.label
if delete_all:
# Delete the keys via `delete_all` and make sure no labels are left
keychain.delete_all_keys()
for key_data in keys:
assert keychain.keyring_wrapper.get_label(key_data.fingerprint) is None
else:
# Delete the keys via fingerprint and make sure the label gets dropped
for key_data in keys:
keychain.delete_key_by_fingerprint(key_data.fingerprint)
assert keychain.keyring_wrapper.get_label(key_data.fingerprint) is None
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/test_file_keyring_synchronization.py | tests/core/util/test_file_keyring_synchronization.py | import logging
import os
from flax.util.keyring_wrapper import KeyringWrapper
from multiprocessing import Pool
from pathlib import Path
from sys import platform
from tests.util.keyring import TempKeyring, using_temp_file_keyring
from tests.core.util.test_lockfile import poll_directory
from time import sleep
log = logging.getLogger(__name__)
DUMMY_SLEEP_VALUE = 2
def dummy_set_passphrase(service, user, passphrase, keyring_path, index, num_workers):
with TempKeyring(existing_keyring_path=keyring_path, delete_on_cleanup=False):
if platform == "linux" or platform == "win32" or platform == "cygwin":
# FileKeyring's setup_keyring_file_watcher needs to be called explicitly here,
# otherwise file events won't be detected in the child process
KeyringWrapper.get_shared_instance().keyring.setup_keyring_file_watcher()
# Write out a file indicating this process is ready to begin
ready_file_path: Path = Path(keyring_path).parent / "ready" / f"{index}.ready"
with open(ready_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# Wait up to 30 seconds for all processes to indicate readiness
start_file_path: Path = Path(ready_file_path.parent) / "start"
remaining_attempts = 120
while remaining_attempts > 0:
if start_file_path.exists():
break
else:
sleep(0.25)
remaining_attempts -= 1
assert remaining_attempts >= 0
KeyringWrapper.get_shared_instance().set_passphrase(service=service, user=user, passphrase=passphrase)
found_passphrase = KeyringWrapper.get_shared_instance().get_passphrase(service, user)
if found_passphrase != passphrase:
log.error(
f"[pid:{os.getpid()}] error: didn't get expected passphrase: "
f"get_passphrase: {found_passphrase}" # lgtm [py/clear-text-logging-sensitive-data]
f", expected: {passphrase}" # lgtm [py/clear-text-logging-sensitive-data]
)
# Write out a file indicating this process has completed its work
finished_file_path: Path = Path(keyring_path).parent / "finished" / f"{index}.finished"
with open(finished_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
assert found_passphrase == passphrase
class TestFileKeyringSynchronization:
# When: using a new empty keyring
@using_temp_file_keyring()
def test_multiple_writers(self):
num_workers = 20
keyring_path = str(KeyringWrapper.get_shared_instance().keyring.keyring_path)
passphrase_list = list(
map(
lambda x: ("test-service", f"test-user-{x}", f"passphrase {x}", keyring_path, x, num_workers),
range(num_workers),
)
)
# Create a directory for each process to indicate readiness
ready_dir: Path = Path(keyring_path).parent / "ready"
ready_dir.mkdir(parents=True, exist_ok=True)
finished_dir: Path = Path(keyring_path).parent / "finished"
finished_dir.mkdir(parents=True, exist_ok=True)
# When: spinning off children to each set a passphrase concurrently
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(dummy_set_passphrase, passphrase_list)
# Wait up to 30 seconds for all processes to indicate readiness
assert poll_directory(ready_dir, num_workers, 30) is True
log.warning(f"Test setup complete: {num_workers} workers ready")
# Signal that testing should begin
start_file_path: Path = ready_dir / "start"
with open(start_file_path, "w") as f:
f.write(f"{os.getpid()}\n")
# Wait up to 30 seconds for all processes to indicate completion
assert poll_directory(finished_dir, num_workers, 30) is True
log.warning(f"Finished: {num_workers} workers finished")
# Collect results
res.get(timeout=10) # 10 second timeout to prevent a bad test from spoiling the fun
# Expect: parent process should be able to find all passphrases that were set by the child processes
for item in passphrase_list:
expected_passphrase = item[2]
actual_passphrase = KeyringWrapper.get_shared_instance().get_passphrase(service=item[0], user=item[1])
assert expected_passphrase == actual_passphrase
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/config.py | tests/core/util/config.py | from __future__ import annotations
parallel = True
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/util/__init__.py | tests/core/util/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/test_data_layer_util.py | tests/core/data_layer/test_data_layer_util.py | from __future__ import annotations
import dataclasses
from typing import List
import pytest
# TODO: update after resolution in https://github.com/pytest-dev/pytest/issues/7469
from _pytest.fixtures import SubRequest
from flax.data_layer.data_layer_util import ProofOfInclusion, ProofOfInclusionLayer, Side
from flax.types.blockchain_format.sized_bytes import bytes32
pytestmark = pytest.mark.data_layer
def create_valid_proof_of_inclusion(layer_count: int, other_hash_side: Side) -> ProofOfInclusion:
node_hash = bytes32(b"a" * 32)
layers: List[ProofOfInclusionLayer] = []
existing_hash = node_hash
other_hashes = [bytes32([i] * 32) for i in range(layer_count)]
for other_hash in other_hashes:
new_layer = ProofOfInclusionLayer.from_hashes(
primary_hash=existing_hash,
other_hash_side=other_hash_side,
other_hash=other_hash,
)
layers.append(new_layer)
existing_hash = new_layer.combined_hash
return ProofOfInclusion(node_hash=node_hash, layers=layers)
@pytest.fixture(name="side", params=[Side.LEFT, Side.RIGHT])
def side_fixture(request: SubRequest) -> Side:
# https://github.com/pytest-dev/pytest/issues/8763
return request.param # type: ignore[no-any-return]
@pytest.fixture(name="valid_proof_of_inclusion", params=[0, 1, 5])
def valid_proof_of_inclusion_fixture(request: SubRequest, side: Side) -> ProofOfInclusion:
return create_valid_proof_of_inclusion(layer_count=request.param, other_hash_side=side)
@pytest.fixture(
name="invalid_proof_of_inclusion",
params=["bad root hash", "bad other hash", "bad other side", "bad node hash"],
)
def invalid_proof_of_inclusion_fixture(request: SubRequest, side: Side) -> ProofOfInclusion:
valid_proof_of_inclusion = create_valid_proof_of_inclusion(layer_count=5, other_hash_side=side)
layers = list(valid_proof_of_inclusion.layers)
a_hash = bytes32(b"f" * 32)
if request.param == "bad root hash":
layers[-1] = dataclasses.replace(layers[-1], combined_hash=a_hash)
return dataclasses.replace(valid_proof_of_inclusion, layers=layers)
elif request.param == "bad other hash":
layers[1] = dataclasses.replace(layers[1], other_hash=a_hash)
return dataclasses.replace(valid_proof_of_inclusion, layers=layers)
elif request.param == "bad other side":
layers[1] = dataclasses.replace(layers[1], other_hash_side=layers[1].other_hash_side.other())
return dataclasses.replace(valid_proof_of_inclusion, layers=layers)
elif request.param == "bad node hash":
return dataclasses.replace(valid_proof_of_inclusion, node_hash=a_hash)
raise Exception(f"Unhandled parametrization: {request.param!r}")
def test_proof_of_inclusion_is_valid(valid_proof_of_inclusion: ProofOfInclusion) -> None:
assert valid_proof_of_inclusion.valid()
def test_proof_of_inclusion_is_invalid(invalid_proof_of_inclusion: ProofOfInclusion) -> None:
assert not invalid_proof_of_inclusion.valid()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/util.py | tests/core/data_layer/util.py | from __future__ import annotations
import contextlib
import functools
import os
import pathlib
import subprocess
from dataclasses import dataclass
from typing import IO, TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union
from flax.data_layer.data_layer_util import NodeType, Side, Status
from flax.data_layer.data_store import DataStore
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.tree_hash import bytes32
# from subprocess.pyi
_FILE = Union[None, int, IO[Any]]
if TYPE_CHECKING:
# these require Python 3.9 at runtime
os_PathLike_str = os.PathLike[str]
subprocess_CompletedProcess_str = subprocess.CompletedProcess[str]
else:
os_PathLike_str = os.PathLike
subprocess_CompletedProcess_str = subprocess.CompletedProcess
async def general_insert(
data_store: DataStore,
tree_id: bytes32,
key: bytes,
value: bytes,
reference_node_hash: bytes32,
side: Optional[Side],
) -> bytes32:
return await data_store.insert(
key=key,
value=value,
tree_id=tree_id,
reference_node_hash=reference_node_hash,
side=side,
status=Status.COMMITTED,
)
@dataclass(frozen=True)
class Example:
expected: Program
terminal_nodes: List[bytes32]
async def add_0123_example(data_store: DataStore, tree_id: bytes32) -> Example:
expected = Program.to(
(
(
(b"\x00", b"\x10\x00"),
(b"\x01", b"\x11\x01"),
),
(
(b"\x02", b"\x12\x02"),
(b"\x03", b"\x13\x03"),
),
),
)
insert = functools.partial(general_insert, data_store=data_store, tree_id=tree_id)
c_hash = await insert(key=b"\x02", value=b"\x12\x02", reference_node_hash=None, side=None)
b_hash = await insert(key=b"\x01", value=b"\x11\x01", reference_node_hash=c_hash, side=Side.LEFT)
d_hash = await insert(key=b"\x03", value=b"\x13\x03", reference_node_hash=c_hash, side=Side.RIGHT)
a_hash = await insert(key=b"\x00", value=b"\x10\x00", reference_node_hash=b_hash, side=Side.LEFT)
return Example(expected=expected, terminal_nodes=[a_hash, b_hash, c_hash, d_hash])
async def add_01234567_example(data_store: DataStore, tree_id: bytes32) -> Example:
expected = Program.to(
(
(
(
(b"\x00", b"\x10\x00"),
(b"\x01", b"\x11\x01"),
),
(
(b"\x02", b"\x12\x02"),
(b"\x03", b"\x13\x03"),
),
),
(
(
(b"\x04", b"\x14\x04"),
(b"\x05", b"\x15\x05"),
),
(
(b"\x06", b"\x16\x06"),
(b"\x07", b"\x17\x07"),
),
),
),
)
insert = functools.partial(general_insert, data_store=data_store, tree_id=tree_id)
g_hash = await insert(key=b"\x06", value=b"\x16\x06", reference_node_hash=None, side=None)
c_hash = await insert(key=b"\x02", value=b"\x12\x02", reference_node_hash=g_hash, side=Side.LEFT)
b_hash = await insert(key=b"\x01", value=b"\x11\x01", reference_node_hash=c_hash, side=Side.LEFT)
d_hash = await insert(key=b"\x03", value=b"\x13\x03", reference_node_hash=c_hash, side=Side.RIGHT)
a_hash = await insert(key=b"\x00", value=b"\x10\x00", reference_node_hash=b_hash, side=Side.LEFT)
f_hash = await insert(key=b"\x05", value=b"\x15\x05", reference_node_hash=g_hash, side=Side.LEFT)
h_hash = await insert(key=b"\x07", value=b"\x17\x07", reference_node_hash=g_hash, side=Side.RIGHT)
e_hash = await insert(key=b"\x04", value=b"\x14\x04", reference_node_hash=f_hash, side=Side.LEFT)
return Example(expected=expected, terminal_nodes=[a_hash, b_hash, c_hash, d_hash, e_hash, f_hash, g_hash, h_hash])
@dataclass
class FlaxRoot:
path: pathlib.Path
scripts_path: pathlib.Path
def run(
self,
args: List[Union[str, os_PathLike_str]],
*other_args: Any,
check: bool = True,
encoding: str = "utf-8",
stdout: Optional[_FILE] = subprocess.PIPE,
stderr: Optional[_FILE] = subprocess.PIPE,
**kwargs: Any,
) -> subprocess_CompletedProcess_str:
# TODO: --root-path doesn't seem to work here...
kwargs.setdefault("env", {})
kwargs["env"]["FLAX_ROOT"] = os.fspath(self.path)
kwargs["env"]["FLAX_KEYS_ROOT"] = os.fspath(self.path)
# This is for windows
if "SYSTEMROOT" in os.environ:
kwargs["env"]["SYSTEMROOT"] = os.environ["SYSTEMROOT"]
modified_args: List[Union[str, os_PathLike_str]] = [
self.scripts_path.joinpath("flax"),
"--root-path",
self.path,
*args,
]
processed_args: List[str] = [os.fspath(element) for element in modified_args]
final_args = [processed_args, *other_args]
kwargs["check"] = check
kwargs["encoding"] = encoding
kwargs["stdout"] = stdout
kwargs["stderr"] = stderr
return subprocess.run(*final_args, **kwargs)
def read_log(self) -> str:
return self.path.joinpath("log", "debug.log").read_text(encoding="utf-8")
def print_log(self) -> None:
log_text: Optional[str]
try:
log_text = self.read_log()
except FileNotFoundError:
log_text = None
if log_text is None:
print(f"---- no log at: {self.path}")
else:
print(f"---- start of: {self.path}")
print(log_text)
print(f"---- end of: {self.path}")
@contextlib.contextmanager
def print_log_after(self) -> Iterator[None]:
try:
yield
finally:
self.print_log()
def create_valid_node_values(
node_type: NodeType,
left_hash: Optional[bytes32] = None,
right_hash: Optional[bytes32] = None,
) -> Dict[str, Any]:
if node_type == NodeType.INTERNAL:
return {
"hash": Program.to((left_hash, right_hash)).get_tree_hash_precalc(left_hash, right_hash),
"node_type": node_type,
"left": left_hash,
"right": right_hash,
"key": None,
"value": None,
}
elif node_type == NodeType.TERMINAL:
key = b""
value = b""
return {
"hash": Program.to((key, value)).get_tree_hash(),
"node_type": node_type,
"left": None,
"right": None,
"key": key,
"value": value,
}
raise Exception(f"Unhandled node type: {node_type!r}")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/test_data_cli.py | tests/core/data_layer/test_data_cli.py | from __future__ import annotations
import json
from typing import Dict, List
import pytest
from tests.core.data_layer.util import FlaxRoot
pytestmark = pytest.mark.data_layer
@pytest.mark.asyncio
async def test_help(flax_root: FlaxRoot) -> None:
"""Just a trivial test to make sure the subprocessing is at least working and the
data executable does run.
"""
completed_process = flax_root.run(args=["data", "--help"])
assert "Show this message and exit" in completed_process.stdout
@pytest.mark.xfail(strict=True)
@pytest.mark.asyncio
def test_round_trip(flax_root: FlaxRoot, flax_daemon: None, flax_data: None) -> None:
"""Create a table, insert a row, get the row by its hash."""
with flax_root.print_log_after():
create = flax_root.run(args=["data", "create_data_store"])
print(f"create_data_store: {create}")
dic = json.loads(create.stdout)
assert dic["success"]
tree_id = dic["id"]
key = "1a6f915513173902a7216e7d9e4a16bfd088e20683f45de3b432ce72e9cc7aa8"
value = "ffff8353594d8083616263"
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key, "value": value}]
print(json.dumps(changelist))
update = flax_root.run(
args=["data", "update_data_store", "--id", tree_id, "--changelist", json.dumps(changelist)]
)
dic = json.loads(create.stdout)
assert dic["success"]
print(f"update_data_store: {update}")
completed_process = flax_root.run(args=["data", "get_value", "--id", tree_id, "--key", key])
parsed = json.loads(completed_process.stdout)
expected = {"value": value, "success": True}
assert parsed == expected
get_keys_values = flax_root.run(args=["data", "get_keys_values", "--id", tree_id])
print(f"get_keys_values: {get_keys_values}")
changelist = [{"action": "delete", "key": key}]
update = flax_root.run(
args=["data", "update_data_store", "--id", tree_id, "--changelist", json.dumps(changelist)]
)
print(f"update_data_store: {update}")
completed_process = flax_root.run(args=["data", "get_value", "--id", tree_id, "--key", key])
parsed = json.loads(completed_process.stdout)
expected = {"data": None, "success": True}
assert parsed == expected
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/test_data_rpc.py | tests/core/data_layer/test_data_rpc.py | from __future__ import annotations
import asyncio
import contextlib
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Any, AsyncIterator, Dict, List, Optional, Tuple
import pytest
import pytest_asyncio
from flax.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from flax.data_layer.data_layer import DataLayer
from flax.data_layer.data_layer_errors import OfferIntegrityError
from flax.data_layer.data_layer_util import OfferStore, StoreProofs
from flax.data_layer.data_layer_wallet import DataLayerWallet, verify_offer
from flax.rpc.data_layer_rpc_api import DataLayerRpcApi
from flax.rpc.wallet_rpc_api import WalletRpcApi
from flax.server.start_data_layer import create_data_layer_service
from flax.server.start_service import Service
from flax.simulator.block_tools import BlockTools
from flax.simulator.full_node_simulator import FullNodeSimulator, backoff_times
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.simulator.time_out_assert import time_out_assert
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.peer_info import PeerInfo
from flax.util.byte_types import hexstr_to_bytes
from flax.util.config import save_config
from flax.util.ints import uint16, uint32
from flax.wallet.trading.offer import Offer as TradingOffer
from flax.wallet.transaction_record import TransactionRecord
from flax.wallet.wallet import Wallet
from flax.wallet.wallet_node import WalletNode
from tests.setup_nodes import SimulatorsAndWalletsServices
from tests.util.wallet_is_synced import wallet_is_synced
pytestmark = pytest.mark.data_layer
nodes = Tuple[WalletNode, FullNodeSimulator]
nodes_with_port_bt_ph = Tuple[WalletRpcApi, FullNodeSimulator, uint16, bytes32, BlockTools]
wallet_and_port_tuple = Tuple[WalletNode, uint16]
two_wallets_with_port = Tuple[Tuple[wallet_and_port_tuple, wallet_and_port_tuple], FullNodeSimulator, BlockTools]
@contextlib.asynccontextmanager
async def init_data_layer(
wallet_rpc_port: uint16, bt: BlockTools, db_path: Path, wallet_service: Optional[Service[WalletNode]] = None
) -> AsyncIterator[DataLayer]:
config = bt.config
config["data_layer"]["wallet_peer"]["port"] = int(wallet_rpc_port)
# TODO: running the data server causes the RPC tests to hang at the end
config["data_layer"]["run_server"] = False
config["data_layer"]["port"] = 0
config["data_layer"]["rpc_port"] = 0
config["data_layer"]["database_path"] = str(db_path.joinpath("db.sqlite"))
save_config(bt.root_path, "config.yaml", config)
service = create_data_layer_service(root_path=bt.root_path, config=config, wallet_service=wallet_service)
await service.start()
try:
yield service._api.data_layer
finally:
service.stop()
await service.wait_closed()
@pytest_asyncio.fixture(name="bare_data_layer_api")
async def bare_data_layer_api_fixture(tmp_path: Path, bt: BlockTools) -> AsyncIterator[DataLayerRpcApi]:
# we won't use this port, this fixture is for _just_ a data layer rpc
port = uint16(1)
async with init_data_layer(wallet_rpc_port=port, bt=bt, db_path=tmp_path.joinpath(str(port))) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
yield data_rpc_api
async def init_wallet_and_node(one_wallet_and_one_simulator: SimulatorsAndWalletsServices) -> nodes_with_port_bt_ph:
[full_node_service], [wallet_service], bt = one_wallet_and_one_simulator
wallet_node = wallet_service._node
full_node_api = full_node_service._api
await wallet_node.server.start_client(PeerInfo("localhost", uint16(full_node_api.server._port)), None)
ph = await wallet_node.wallet_state_manager.main_wallet.get_new_puzzlehash()
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = calculate_pool_reward(uint32(1)) + calculate_base_farmer_reward(uint32(1))
await time_out_assert(20, wallet_is_synced, True, wallet_node, full_node_api)
balance = await wallet_node.wallet_state_manager.main_wallet.get_confirmed_balance()
assert balance == funds
wallet_rpc_api = WalletRpcApi(wallet_node)
assert wallet_service.rpc_server is not None
return wallet_rpc_api, full_node_api, wallet_service.rpc_server.listen_port, ph, bt
async def farm_block_check_singelton(
data_layer: DataLayer, full_node_api: FullNodeSimulator, ph: bytes32, store_id: bytes32
) -> None:
await time_out_assert(10, check_mempool_spend_count, True, full_node_api, 1)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(10, check_singleton_confirmed, True, data_layer, store_id)
async def is_transaction_confirmed(user_wallet_id: uint32, api: WalletRpcApi, tx_id: bytes32) -> bool:
try:
val = await api.get_transaction({"wallet_id": user_wallet_id, "transaction_id": tx_id.hex()})
except ValueError:
return False
return True if TransactionRecord.from_json_dict_convenience(val["transaction"]).confirmed else False # mypy
async def farm_block_with_spend(
full_node_api: FullNodeSimulator, ph: bytes32, tx_rec: bytes32, wallet_rpc_api: WalletRpcApi
) -> None:
await time_out_assert(10, check_mempool_spend_count, True, full_node_api, 1)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(10, is_transaction_confirmed, True, "this is unused", wallet_rpc_api, tx_rec)
def check_mempool_spend_count(full_node_api: FullNodeSimulator, num_of_spends: int) -> bool:
return len(full_node_api.full_node.mempool_manager.mempool.sorted_spends) == num_of_spends
async def check_singleton_confirmed(dl: DataLayer, tree_id: bytes32) -> bool:
return await dl.wallet_rpc.dl_latest_singleton(tree_id, True) is not None
async def process_block_and_check_offer_validity(offer: TradingOffer, offer_setup: OfferSetup) -> bool:
await offer_setup.full_node_api.process_blocks(count=1)
return await offer_setup.maker.data_layer.wallet_rpc.check_offer_validity(offer=offer)
@pytest.mark.asyncio
async def test_create_insert_get(
one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
# test insert
data_rpc_api = DataLayerRpcApi(data_layer)
key = b"a"
value = b"\x00\x01"
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key.hex(), "value": value.hex()}]
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id = bytes32(hexstr_to_bytes(res["id"]))
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id)
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec0 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
res = await data_rpc_api.get_value({"id": store_id.hex(), "key": key.hex()})
wallet_root = await data_rpc_api.get_root({"id": store_id.hex()})
local_root = await data_rpc_api.get_local_root({"id": store_id.hex()})
assert wallet_root["hash"] == local_root["hash"]
assert hexstr_to_bytes(res["value"]) == value
# test delete unknown key
unknown_key = b"b"
changelist = [{"action": "delete", "key": unknown_key.hex()}]
with pytest.raises(ValueError, match="Changelist resulted in no change to tree data"):
await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
# test delete
changelist = [{"action": "delete", "key": key.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec1 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec1, wallet_rpc_api)
with pytest.raises(Exception):
await data_rpc_api.get_value({"id": store_id.hex(), "key": key.hex()})
wallet_root = await data_rpc_api.get_root({"id": store_id.hex()})
local_root = await data_rpc_api.get_local_root({"id": store_id.hex()})
assert wallet_root["hash"] == bytes32([0] * 32)
assert local_root["hash"] is None
# test empty changelist
changelist = []
with pytest.raises(ValueError, match="Changelist resulted in no change to tree data"):
await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
@pytest.mark.asyncio
async def test_upsert(one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
# test insert
data_rpc_api = DataLayerRpcApi(data_layer)
key = b"a"
value = b"\x00\x01"
changelist: List[Dict[str, str]] = [
{"action": "delete", "key": key.hex()},
{"action": "insert", "key": key.hex(), "value": value.hex()},
]
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id = bytes32.from_hexstr(res["id"])
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id)
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec0 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
res = await data_rpc_api.get_value({"id": store_id.hex(), "key": key.hex()})
wallet_root = await data_rpc_api.get_root({"id": store_id.hex()})
local_root = await data_rpc_api.get_local_root({"id": store_id.hex()})
assert wallet_root["hash"] == local_root["hash"]
assert hexstr_to_bytes(res["value"]) == value
@pytest.mark.asyncio
async def test_create_double_insert(
one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id = bytes32(hexstr_to_bytes(res["id"]))
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id)
key1 = b"a"
value1 = b"\x01\x02"
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key1.hex(), "value": value1.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec0 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
res = await data_rpc_api.get_value({"id": store_id.hex(), "key": key1.hex()})
assert hexstr_to_bytes(res["value"]) == value1
key2 = b"b"
value2 = b"\x01\x23"
changelist = [{"action": "insert", "key": key2.hex(), "value": value2.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec1 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec1, wallet_rpc_api)
res = await data_rpc_api.get_value({"id": store_id.hex(), "key": key2.hex()})
assert hexstr_to_bytes(res["value"]) == value2
changelist = [{"action": "delete", "key": key1.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec2 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec2, wallet_rpc_api)
with pytest.raises(Exception):
await data_rpc_api.get_value({"id": store_id.hex(), "key": key1.hex()})
@pytest.mark.asyncio
async def test_keys_values_ancestors(
one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
# TODO: with this being a pseudo context manager'ish thing it doesn't actually handle shutdown
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id = bytes32(hexstr_to_bytes(res["id"]))
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id)
key1 = b"a"
value1 = b"\x01\x02"
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key1.hex(), "value": value1.hex()}]
key2 = b"b"
value2 = b"\x03\x02"
changelist.append({"action": "insert", "key": key2.hex(), "value": value2.hex()})
key3 = b"c"
value3 = b"\x04\x05"
changelist.append({"action": "insert", "key": key3.hex(), "value": value3.hex()})
key4 = b"d"
value4 = b"\x06\x03"
changelist.append({"action": "insert", "key": key4.hex(), "value": value4.hex()})
key5 = b"e"
value5 = b"\x07\x01"
changelist.append({"action": "insert", "key": key5.hex(), "value": value5.hex()})
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec0 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
val = await data_rpc_api.get_keys_values({"id": store_id.hex()})
keys = await data_rpc_api.get_keys({"id": store_id.hex()})
dic = {}
for item in val["keys_values"]:
dic[item["key"]] = item["value"]
assert dic["0x" + key1.hex()] == "0x" + value1.hex()
assert dic["0x" + key2.hex()] == "0x" + value2.hex()
assert dic["0x" + key3.hex()] == "0x" + value3.hex()
assert dic["0x" + key4.hex()] == "0x" + value4.hex()
assert dic["0x" + key5.hex()] == "0x" + value5.hex()
assert len(keys["keys"]) == len(dic)
for key in keys["keys"]:
assert key in dic
val = await data_rpc_api.get_ancestors({"id": store_id.hex(), "hash": val["keys_values"][4]["hash"]})
# todo better assertions for get_ancestors result
assert len(val["ancestors"]) == 3
res_before = await data_rpc_api.get_root({"id": store_id.hex()})
assert res_before["confirmed"] is True
assert res_before["timestamp"] > 0
key6 = b"tasdfsd"
value6 = b"\x08\x02"
changelist = [{"action": "insert", "key": key6.hex(), "value": value6.hex()}]
key7 = b"basdff"
value7 = b"\x09\x02"
changelist.append({"action": "insert", "key": key7.hex(), "value": value7.hex()})
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec1 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec1, wallet_rpc_api)
res_after = await data_rpc_api.get_root({"id": store_id.hex()})
assert res_after["confirmed"] is True
assert res_after["timestamp"] > res_before["timestamp"]
pairs_before = await data_rpc_api.get_keys_values({"id": store_id.hex(), "root_hash": res_before["hash"].hex()})
pairs_after = await data_rpc_api.get_keys_values({"id": store_id.hex(), "root_hash": res_after["hash"].hex()})
keys_before = await data_rpc_api.get_keys({"id": store_id.hex(), "root_hash": res_before["hash"].hex()})
keys_after = await data_rpc_api.get_keys({"id": store_id.hex(), "root_hash": res_after["hash"].hex()})
assert len(pairs_before["keys_values"]) == len(keys_before["keys"]) == 5
assert len(pairs_after["keys_values"]) == len(keys_after["keys"]) == 7
@pytest.mark.asyncio
async def test_get_roots(one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id1 = bytes32(hexstr_to_bytes(res["id"]))
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id1)
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id2 = bytes32(hexstr_to_bytes(res["id"]))
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id2)
key1 = b"a"
value1 = b"\x01\x02"
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key1.hex(), "value": value1.hex()}]
key2 = b"b"
value2 = b"\x03\x02"
changelist.append({"action": "insert", "key": key2.hex(), "value": value2.hex()})
key3 = b"c"
value3 = b"\x04\x05"
changelist.append({"action": "insert", "key": key3.hex(), "value": value3.hex()})
res = await data_rpc_api.batch_update({"id": store_id1.hex(), "changelist": changelist})
update_tx_rec0 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
roots = await data_rpc_api.get_roots({"ids": [store_id1.hex(), store_id2.hex()]})
assert roots["root_hashes"][1]["id"] == store_id2
assert roots["root_hashes"][1]["hash"] == bytes32([0] * 32)
assert roots["root_hashes"][1]["confirmed"] is True
assert roots["root_hashes"][1]["timestamp"] > 0
key4 = b"d"
value4 = b"\x06\x03"
changelist = [{"action": "insert", "key": key4.hex(), "value": value4.hex()}]
key5 = b"e"
value5 = b"\x07\x01"
changelist.append({"action": "insert", "key": key5.hex(), "value": value5.hex()})
res = await data_rpc_api.batch_update({"id": store_id2.hex(), "changelist": changelist})
update_tx_rec1 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec1, wallet_rpc_api)
roots = await data_rpc_api.get_roots({"ids": [store_id1.hex(), store_id2.hex()]})
assert roots["root_hashes"][1]["id"] == store_id2
assert roots["root_hashes"][1]["hash"] is not None
assert roots["root_hashes"][1]["hash"] != bytes32([0] * 32)
assert roots["root_hashes"][1]["confirmed"] is True
assert roots["root_hashes"][1]["timestamp"] > 0
@pytest.mark.asyncio
async def test_get_root_history(
one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id1 = bytes32(hexstr_to_bytes(res["id"]))
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id1)
key1 = b"a"
value1 = b"\x01\x02"
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key1.hex(), "value": value1.hex()}]
key2 = b"b"
value2 = b"\x03\x02"
changelist.append({"action": "insert", "key": key2.hex(), "value": value2.hex()})
key3 = b"c"
value3 = b"\x04\x05"
changelist.append({"action": "insert", "key": key3.hex(), "value": value3.hex()})
res = await data_rpc_api.batch_update({"id": store_id1.hex(), "changelist": changelist})
update_tx_rec0 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
history1 = await data_rpc_api.get_root_history({"id": store_id1.hex()})
assert len(history1["root_history"]) == 2
assert history1["root_history"][0]["root_hash"] == bytes32([0] * 32)
assert history1["root_history"][0]["confirmed"] is True
assert history1["root_history"][0]["timestamp"] > 0
assert history1["root_history"][1]["root_hash"] != bytes32([0] * 32)
assert history1["root_history"][1]["confirmed"] is True
assert history1["root_history"][1]["timestamp"] > 0
key4 = b"d"
value4 = b"\x06\x03"
changelist = [{"action": "insert", "key": key4.hex(), "value": value4.hex()}]
key5 = b"e"
value5 = b"\x07\x01"
changelist.append({"action": "insert", "key": key5.hex(), "value": value5.hex()})
res = await data_rpc_api.batch_update({"id": store_id1.hex(), "changelist": changelist})
update_tx_rec1 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec1, wallet_rpc_api)
history2 = await data_rpc_api.get_root_history({"id": store_id1.hex()})
assert len(history2["root_history"]) == 3
assert history2["root_history"][0]["root_hash"] == bytes32([0] * 32)
assert history2["root_history"][0]["confirmed"] is True
assert history2["root_history"][0]["timestamp"] > 0
assert history2["root_history"][1]["root_hash"] == history1["root_history"][1]["root_hash"]
assert history2["root_history"][1]["confirmed"] is True
assert history2["root_history"][1]["timestamp"] > history2["root_history"][0]["timestamp"]
assert history2["root_history"][2]["confirmed"] is True
assert history2["root_history"][2]["timestamp"] > history2["root_history"][1]["timestamp"]
@pytest.mark.asyncio
async def test_get_kv_diff(one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id1 = bytes32(hexstr_to_bytes(res["id"]))
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id1)
key1 = b"a"
value1 = b"\x01\x02"
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key1.hex(), "value": value1.hex()}]
key2 = b"b"
value2 = b"\x03\x02"
changelist.append({"action": "insert", "key": key2.hex(), "value": value2.hex()})
key3 = b"c"
value3 = b"\x04\x05"
changelist.append({"action": "insert", "key": key3.hex(), "value": value3.hex()})
res = await data_rpc_api.batch_update({"id": store_id1.hex(), "changelist": changelist})
update_tx_rec0 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
history = await data_rpc_api.get_root_history({"id": store_id1.hex()})
diff_res = await data_rpc_api.get_kv_diff(
{
"id": store_id1.hex(),
"hash_1": bytes32([0] * 32).hex(),
"hash_2": history["root_history"][1]["root_hash"].hex(),
}
)
assert len(diff_res["diff"]) == 3
diff1 = {"type": "INSERT", "key": key1.hex(), "value": value1.hex()}
diff2 = {"type": "INSERT", "key": key2.hex(), "value": value2.hex()}
diff3 = {"type": "INSERT", "key": key3.hex(), "value": value3.hex()}
assert diff1 in diff_res["diff"]
assert diff2 in diff_res["diff"]
assert diff3 in diff_res["diff"]
key4 = b"d"
value4 = b"\x06\x03"
changelist = [{"action": "insert", "key": key4.hex(), "value": value4.hex()}]
key5 = b"e"
value5 = b"\x07\x01"
changelist.append({"action": "insert", "key": key5.hex(), "value": value5.hex()})
changelist.append({"action": "delete", "key": key1.hex()})
res = await data_rpc_api.batch_update({"id": store_id1.hex(), "changelist": changelist})
update_tx_rec1 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec1, wallet_rpc_api)
history = await data_rpc_api.get_root_history({"id": store_id1.hex()})
diff_res = await data_rpc_api.get_kv_diff(
{
"id": store_id1.hex(),
"hash_1": history["root_history"][1]["root_hash"].hex(),
"hash_2": history["root_history"][2]["root_hash"].hex(),
}
)
assert len(diff_res["diff"]) == 3
diff1 = {"type": "DELETE", "key": key1.hex(), "value": value1.hex()}
diff4 = {"type": "INSERT", "key": key4.hex(), "value": value4.hex()}
diff5 = {"type": "INSERT", "key": key5.hex(), "value": value5.hex()}
assert diff4 in diff_res["diff"]
assert diff5 in diff_res["diff"]
assert diff1 in diff_res["diff"]
@pytest.mark.asyncio
async def test_batch_update_matches_single_operations(
one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id = bytes32(hexstr_to_bytes(res["id"]))
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id)
key = b"a"
value = b"\x00\x01"
changelist: List[Dict[str, str]] = [{"action": "insert", "key": key.hex(), "value": value.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec0 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec0, wallet_rpc_api)
key_2 = b"b"
value_2 = b"\x00\x01"
changelist = [{"action": "insert", "key": key_2.hex(), "value": value_2.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec1 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec1, wallet_rpc_api)
key_3 = b"c"
value_3 = b"\x00\x01"
changelist = [{"action": "insert", "key": key_3.hex(), "value": value_3.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec2 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec2, wallet_rpc_api)
changelist = [{"action": "delete", "key": key_3.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec3 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec3, wallet_rpc_api)
root_1 = await data_rpc_api.get_roots({"ids": [store_id.hex()]})
expected_res_hash = root_1["root_hashes"][0]["hash"]
assert expected_res_hash != bytes32([0] * 32)
changelist = [{"action": "delete", "key": key_2.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec4 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec4, wallet_rpc_api)
changelist = [{"action": "delete", "key": key.hex()}]
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec5 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec5, wallet_rpc_api)
root_2 = await data_rpc_api.get_roots({"ids": [store_id.hex()]})
hash_2 = root_2["root_hashes"][0]["hash"]
assert hash_2 == bytes32([0] * 32)
changelist = [{"action": "insert", "key": key.hex(), "value": value.hex()}]
changelist.append({"action": "insert", "key": key_2.hex(), "value": value_2.hex()})
changelist.append({"action": "insert", "key": key_3.hex(), "value": value_3.hex()})
changelist.append({"action": "delete", "key": key_3.hex()})
res = await data_rpc_api.batch_update({"id": store_id.hex(), "changelist": changelist})
update_tx_rec6 = res["tx_id"]
await farm_block_with_spend(full_node_api, ph, update_tx_rec6, wallet_rpc_api)
root_3 = await data_rpc_api.get_roots({"ids": [store_id.hex()]})
batch_hash = root_3["root_hashes"][0]["hash"]
assert batch_hash == expected_res_hash
@pytest.mark.asyncio
async def test_get_owned_stores(
one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
) -> None:
[full_node_service], [wallet_service], bt = one_wallet_and_one_simulator_services
num_blocks = 4
wallet_node = wallet_service._node
assert wallet_service.rpc_server is not None
wallet_rpc_port = wallet_service.rpc_server.listen_port
full_node_api = full_node_service._api
await wallet_node.server.start_client(PeerInfo("localhost", uint16(full_node_api.server._port)), None)
ph = await wallet_node.wallet_state_manager.main_wallet.get_new_puzzlehash()
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(15, wallet_node.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
expected_store_ids = []
for _ in range(3):
res = await data_rpc_api.create_data_store({})
assert res is not None
launcher_id = bytes32.from_hexstr(res["id"])
expected_store_ids.append(launcher_id)
await time_out_assert(4, check_mempool_spend_count, True, full_node_api, 1)
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await asyncio.sleep(0.5)
response = await data_rpc_api.get_owned_stores(request={})
store_ids = sorted(bytes32.from_hexstr(id) for id in response["store_ids"])
assert store_ids == sorted(expected_store_ids)
@pytest.mark.asyncio
async def test_subscriptions(
one_wallet_and_one_simulator_services: SimulatorsAndWalletsServices, tmp_path: Path
) -> None:
wallet_rpc_api, full_node_api, wallet_rpc_port, ph, bt = await init_wallet_and_node(
one_wallet_and_one_simulator_services
)
async with init_data_layer(wallet_rpc_port=wallet_rpc_port, bt=bt, db_path=tmp_path) as data_layer:
data_rpc_api = DataLayerRpcApi(data_layer)
res = await data_rpc_api.create_data_store({})
assert res is not None
store_id = bytes32.from_hexstr(res["id"])
await farm_block_check_singelton(data_layer, full_node_api, ph, store_id)
# This tests subscribe/unsubscribe to your own singletons, which isn't quite
# the same thing as using a different wallet, but makes the tests much simpler
response = await data_rpc_api.subscribe(request={"id": store_id.hex(), "urls": ["http://127.0.0.1/8000"]})
assert response is not None
# test subscriptions
response = await data_rpc_api.subscriptions(request={})
assert store_id.hex() in response.get("store_ids", [])
# test unsubscribe
response = await data_rpc_api.unsubscribe(request={"id": store_id.hex()})
assert response is not None
response = await data_rpc_api.subscriptions(request={})
assert store_id.hex() not in response.get("store_ids", [])
@dataclass(frozen=True)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/conftest.py | tests/core/data_layer/conftest.py | from __future__ import annotations
import contextlib
import os
import pathlib
import random
import subprocess
import sys
import sysconfig
import time
from typing import Any, AsyncIterable, Awaitable, Callable, Dict, Iterator, List
import pytest
import pytest_asyncio
# https://github.com/pytest-dev/pytest/issues/7469
from _pytest.fixtures import SubRequest
from flax.data_layer.data_layer_util import NodeType, Status
from flax.data_layer.data_store import DataStore
from flax.types.blockchain_format.tree_hash import bytes32
from tests.core.data_layer.util import (
FlaxRoot,
Example,
add_0123_example,
add_01234567_example,
create_valid_node_values,
)
# TODO: These are more general than the data layer and should either move elsewhere or
# be replaced with an existing common approach. For now they can at least be
# shared among the data layer test files.
@pytest.fixture(name="scripts_path", scope="session")
def scripts_path_fixture() -> pathlib.Path:
scripts_string = sysconfig.get_path("scripts")
if scripts_string is None:
raise Exception("These tests depend on the scripts path existing")
return pathlib.Path(scripts_string)
@pytest.fixture(name="flax_root", scope="function")
def flax_root_fixture(tmp_path: pathlib.Path, scripts_path: pathlib.Path) -> FlaxRoot:
root = FlaxRoot(path=tmp_path.joinpath("flax_root"), scripts_path=scripts_path)
root.run(args=["init"])
root.run(args=["configure", "--set-log-level", "INFO"])
return root
@contextlib.contextmanager
def closing_flax_root_popen(flax_root: FlaxRoot, args: List[str]) -> Iterator[None]:
environment = {**os.environ, "FLAX_ROOT": os.fspath(flax_root.path)}
with subprocess.Popen(args=args, env=environment) as process:
try:
yield
finally:
process.terminate()
try:
process.wait(timeout=10)
except subprocess.TimeoutExpired:
process.kill()
@pytest.fixture(name="flax_daemon", scope="function")
def flax_daemon_fixture(flax_root: FlaxRoot) -> Iterator[None]:
with closing_flax_root_popen(flax_root=flax_root, args=[sys.executable, "-m", "flax.daemon.server"]):
# TODO: this is not pretty as a hard coded time
# let it settle
time.sleep(5)
yield
@pytest.fixture(name="flax_data", scope="function")
def flax_data_fixture(flax_root: FlaxRoot, flax_daemon: None, scripts_path: pathlib.Path) -> Iterator[None]:
with closing_flax_root_popen(flax_root=flax_root, args=[os.fspath(scripts_path.joinpath("flax_data_layer"))]):
# TODO: this is not pretty as a hard coded time
# let it settle
time.sleep(5)
yield
@pytest.fixture(name="create_example", params=[add_0123_example, add_01234567_example])
def create_example_fixture(request: SubRequest) -> Callable[[DataStore, bytes32], Awaitable[Example]]:
# https://github.com/pytest-dev/pytest/issues/8763
return request.param # type: ignore[no-any-return]
@pytest.fixture(name="database_uri")
def database_uri_fixture() -> str:
return f"file:db_{random.randint(0, 99999999)}?mode=memory&cache=shared"
@pytest.fixture(name="tree_id", scope="function")
def tree_id_fixture() -> bytes32:
base = b"a tree id"
pad = b"." * (32 - len(base))
return bytes32(pad + base)
@pytest_asyncio.fixture(name="raw_data_store", scope="function")
async def raw_data_store_fixture(database_uri: str) -> AsyncIterable[DataStore]:
store = await DataStore.create(database=database_uri, uri=True)
yield store
await store.close()
@pytest_asyncio.fixture(name="data_store", scope="function")
async def data_store_fixture(raw_data_store: DataStore, tree_id: bytes32) -> AsyncIterable[DataStore]:
await raw_data_store.create_tree(tree_id=tree_id, status=Status.COMMITTED)
await raw_data_store.check()
yield raw_data_store
await raw_data_store.check()
@pytest.fixture(name="node_type", params=NodeType)
def node_type_fixture(request: SubRequest) -> NodeType:
return request.param # type: ignore[no-any-return]
@pytest_asyncio.fixture(name="valid_node_values")
async def valid_node_values_fixture(
data_store: DataStore,
tree_id: bytes32,
node_type: NodeType,
) -> Dict[str, Any]:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
node_a = await data_store.get_node_by_key(key=b"\x02", tree_id=tree_id)
node_b = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
return create_valid_node_values(node_type=node_type, left_hash=node_a.hash, right_hash=node_b.hash)
@pytest.fixture(name="bad_node_type", params=range(2 * len(NodeType)))
def bad_node_type_fixture(request: SubRequest, valid_node_values: Dict[str, Any]) -> int:
if request.param == valid_node_values["node_type"]:
pytest.skip("Actually, this is a valid node type")
return request.param # type: ignore[no-any-return]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.