repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/config.py | tests/core/data_layer/config.py | from __future__ import annotations
parallel = 4
job_timeout = 40
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/__init__.py | tests/core/data_layer/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/test_data_store_schema.py | tests/core/data_layer/test_data_store_schema.py | from __future__ import annotations
import sqlite3
from typing import Any, Dict
import pytest
from flax.data_layer.data_layer_util import NodeType, Side, Status
from flax.data_layer.data_store import DataStore
from flax.types.blockchain_format.tree_hash import bytes32
from tests.core.data_layer.util import add_01234567_example, create_valid_node_values
pytestmark = pytest.mark.data_layer
@pytest.mark.asyncio
async def test_node_update_fails(data_store: DataStore, tree_id: bytes32) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^updates not allowed to the node table$"):
await writer.execute(
"UPDATE node SET value = :value WHERE hash == :hash",
{
"hash": node.hash,
"value": node.value,
},
)
@pytest.mark.parametrize(argnames="length", argvalues=sorted(set(range(50)) - {32}))
@pytest.mark.asyncio
async def test_node_hash_must_be_32(
data_store: DataStore,
tree_id: bytes32,
length: int,
valid_node_values: Dict[str, Any],
) -> None:
valid_node_values["hash"] = bytes([0] * length)
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO node(hash, node_type, left, right, key, value)
VALUES(:hash, :node_type, :left, :right, :key, :value)
""",
valid_node_values,
)
@pytest.mark.asyncio
async def test_node_hash_must_not_be_null(
data_store: DataStore,
tree_id: bytes32,
valid_node_values: Dict[str, Any],
) -> None:
valid_node_values["hash"] = None
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^NOT NULL constraint failed: node.hash$"):
await writer.execute(
"""
INSERT INTO node(hash, node_type, left, right, key, value)
VALUES(:hash, :node_type, :left, :right, :key, :value)
""",
valid_node_values,
)
@pytest.mark.asyncio
async def test_node_type_must_be_valid(
data_store: DataStore,
node_type: NodeType,
bad_node_type: int,
valid_node_values: Dict[str, Any],
) -> None:
valid_node_values["node_type"] = bad_node_type
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO node(hash, node_type, left, right, key, value)
VALUES(:hash, :node_type, :left, :right, :key, :value)
""",
valid_node_values,
)
@pytest.mark.parametrize(argnames="side", argvalues=Side)
@pytest.mark.asyncio
async def test_node_internal_child_not_null(data_store: DataStore, tree_id: bytes32, side: Side) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
node_a = await data_store.get_node_by_key(key=b"\x02", tree_id=tree_id)
node_b = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
values = create_valid_node_values(node_type=NodeType.INTERNAL, left_hash=node_a.hash, right_hash=node_b.hash)
if side == Side.LEFT:
values["left"] = None
elif side == Side.RIGHT:
values["right"] = None
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO node(hash, node_type, left, right, key, value)
VALUES(:hash, :node_type, :left, :right, :key, :value)
""",
values,
)
@pytest.mark.parametrize(argnames="bad_child_hash", argvalues=[b"\x01" * 32, b"\0" * 31, b""])
@pytest.mark.parametrize(argnames="side", argvalues=Side)
@pytest.mark.asyncio
async def test_node_internal_must_be_valid_reference(
data_store: DataStore,
tree_id: bytes32,
bad_child_hash: bytes,
side: Side,
) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
node_a = await data_store.get_node_by_key(key=b"\x02", tree_id=tree_id)
node_b = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
values = create_valid_node_values(node_type=NodeType.INTERNAL, left_hash=node_a.hash, right_hash=node_b.hash)
if side == Side.LEFT:
values["left"] = bad_child_hash
elif side == Side.RIGHT:
values["right"] = bad_child_hash
else:
assert False
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^FOREIGN KEY constraint failed$"):
await writer.execute(
"""
INSERT INTO node(hash, node_type, left, right, key, value)
VALUES(:hash, :node_type, :left, :right, :key, :value)
""",
values,
)
@pytest.mark.parametrize(argnames="key_or_value", argvalues=["key", "value"])
@pytest.mark.asyncio
async def test_node_terminal_key_value_not_null(data_store: DataStore, tree_id: bytes32, key_or_value: str) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
values = create_valid_node_values(node_type=NodeType.TERMINAL)
values[key_or_value] = None
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO node(hash, node_type, left, right, key, value)
VALUES(:hash, :node_type, :left, :right, :key, :value)
""",
values,
)
@pytest.mark.parametrize(argnames="length", argvalues=sorted(set(range(50)) - {32}))
@pytest.mark.asyncio
async def test_root_tree_id_must_be_32(data_store: DataStore, tree_id: bytes32, length: int) -> None:
example = await add_01234567_example(data_store=data_store, tree_id=tree_id)
bad_tree_id = bytes([0] * length)
values = {"tree_id": bad_tree_id, "generation": 0, "node_hash": example.terminal_nodes[0], "status": Status.PENDING}
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
values,
)
@pytest.mark.asyncio
async def test_root_tree_id_must_not_be_null(data_store: DataStore, tree_id: bytes32) -> None:
example = await add_01234567_example(data_store=data_store, tree_id=tree_id)
values = {"tree_id": None, "generation": 0, "node_hash": example.terminal_nodes[0], "status": Status.PENDING}
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^NOT NULL constraint failed: root.tree_id$"):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
values,
)
@pytest.mark.parametrize(argnames="generation", argvalues=[-200, -2, -1])
@pytest.mark.asyncio
async def test_root_generation_must_not_be_less_than_zero(
data_store: DataStore, tree_id: bytes32, generation: int
) -> None:
example = await add_01234567_example(data_store=data_store, tree_id=tree_id)
values = {
"tree_id": bytes32([0] * 32),
"generation": generation,
"node_hash": example.terminal_nodes[0],
"status": Status.PENDING,
}
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
values,
)
@pytest.mark.asyncio
async def test_root_generation_must_not_be_null(data_store: DataStore, tree_id: bytes32) -> None:
example = await add_01234567_example(data_store=data_store, tree_id=tree_id)
values = {
"tree_id": bytes32([0] * 32),
"generation": None,
"node_hash": example.terminal_nodes[0],
"status": Status.PENDING,
}
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^NOT NULL constraint failed: root.generation$"):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
values,
)
@pytest.mark.asyncio
async def test_root_node_hash_must_reference(data_store: DataStore) -> None:
values = {"tree_id": bytes32([0] * 32), "generation": 0, "node_hash": bytes32([0] * 32), "status": Status.PENDING}
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^FOREIGN KEY constraint failed$"):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
values,
)
@pytest.mark.parametrize(argnames="bad_status", argvalues=sorted(set(range(-20, 20)) - {*Status}))
@pytest.mark.asyncio
async def test_root_status_must_be_valid(data_store: DataStore, tree_id: bytes32, bad_status: int) -> None:
example = await add_01234567_example(data_store=data_store, tree_id=tree_id)
values = {
"tree_id": bytes32([0] * 32),
"generation": 0,
"node_hash": example.terminal_nodes[0],
"status": bad_status,
}
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
values,
)
@pytest.mark.asyncio
async def test_root_status_must_not_be_null(data_store: DataStore, tree_id: bytes32) -> None:
example = await add_01234567_example(data_store=data_store, tree_id=tree_id)
values = {"tree_id": bytes32([0] * 32), "generation": 0, "node_hash": example.terminal_nodes[0], "status": None}
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^NOT NULL constraint failed: root.status$"):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
values,
)
@pytest.mark.asyncio
async def test_root_tree_id_generation_must_be_unique(data_store: DataStore, tree_id: bytes32) -> None:
example = await add_01234567_example(data_store=data_store, tree_id=tree_id)
values = {"tree_id": tree_id, "generation": 0, "node_hash": example.terminal_nodes[0], "status": Status.COMMITTED}
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^UNIQUE constraint failed: root.tree_id, root.generation$"):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
values,
)
@pytest.mark.parametrize(argnames="length", argvalues=sorted(set(range(50)) - {32}))
@pytest.mark.asyncio
async def test_ancestors_ancestor_must_be_32(
data_store: DataStore,
tree_id: bytes32,
length: int,
) -> None:
async with data_store.db_wrapper.writer() as writer:
node_hash = await data_store._insert_terminal_node(key=b"\x00", value=b"\x01")
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO ancestors(hash, ancestor, tree_id, generation)
VALUES(:hash, :ancestor, :tree_id, :generation)
""",
{"hash": node_hash, "ancestor": bytes([0] * length), "tree_id": bytes32([0] * 32), "generation": 0},
)
@pytest.mark.parametrize(argnames="length", argvalues=sorted(set(range(50)) - {32}))
@pytest.mark.asyncio
async def test_ancestors_tree_id_must_be_32(
data_store: DataStore,
tree_id: bytes32,
length: int,
) -> None:
async with data_store.db_wrapper.writer() as writer:
node_hash = await data_store._insert_terminal_node(key=b"\x00", value=b"\x01")
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO ancestors(hash, ancestor, tree_id, generation)
VALUES(:hash, :ancestor, :tree_id, :generation)
""",
{"hash": node_hash, "ancestor": bytes32([0] * 32), "tree_id": bytes([0] * length), "generation": 0},
)
@pytest.mark.parametrize(argnames="length", argvalues=sorted(set(range(50)) - {32}))
@pytest.mark.asyncio
async def test_subscriptions_tree_id_must_be_32(
data_store: DataStore,
tree_id: bytes32,
length: int,
) -> None:
async with data_store.db_wrapper.writer() as writer:
with pytest.raises(sqlite3.IntegrityError, match=r"^CHECK constraint failed:"):
await writer.execute(
"""
INSERT INTO subscriptions(tree_id, url, ignore_till, num_consecutive_failures, from_wallet)
VALUES(:tree_id, :url, :ignore_till, :num_consecutive_failures, :from_wallet)
""",
{
"tree_id": bytes([0] * length),
"url": "",
"ignore_till": 0,
"num_consecutive_failures": 0,
"from_wallet": False,
},
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/data_layer/test_data_store.py | tests/core/data_layer/test_data_store.py | from __future__ import annotations
import itertools
import logging
import statistics
from pathlib import Path
from random import Random
from typing import Any, Awaitable, Callable, Dict, List, Set, Tuple
import pytest
from flax.data_layer.data_layer_errors import NodeHashError, TreeGenerationIncrementingError
from flax.data_layer.data_layer_util import (
DiffData,
InternalNode,
NodeType,
OperationType,
ProofOfInclusion,
ProofOfInclusionLayer,
Root,
ServerInfo,
Side,
Status,
Subscription,
TerminalNode,
_debug_dump,
leaf_hash,
)
from flax.data_layer.data_store import DataStore
from flax.data_layer.download_data import (
get_delta_filename,
get_full_tree_filename,
insert_into_data_store_from_file,
is_filename_valid,
write_files_for_root,
)
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.tree_hash import bytes32
from flax.util.byte_types import hexstr_to_bytes
from flax.util.db_wrapper import DBWrapper2
from tests.core.data_layer.util import Example, add_0123_example, add_01234567_example
log = logging.getLogger(__name__)
pytestmark = pytest.mark.data_layer
table_columns: Dict[str, List[str]] = {
"node": ["hash", "node_type", "left", "right", "key", "value"],
"root": ["tree_id", "generation", "node_hash", "status"],
}
# TODO: Someday add tests for malformed DB data to make sure we handle it gracefully
# and with good error messages.
@pytest.mark.asyncio
async def test_valid_node_values_fixture_are_valid(data_store: DataStore, valid_node_values: Dict[str, Any]) -> None:
async with data_store.db_wrapper.writer() as writer:
await writer.execute(
"""
INSERT INTO node(hash, node_type, left, right, key, value)
VALUES(:hash, :node_type, :left, :right, :key, :value)
""",
valid_node_values,
)
@pytest.mark.parametrize(argnames=["table_name", "expected_columns"], argvalues=table_columns.items())
@pytest.mark.asyncio
async def test_create_creates_tables_and_columns(
database_uri: str, table_name: str, expected_columns: List[str]
) -> None:
# Never string-interpolate sql queries... Except maybe in tests when it does not
# allow you to parametrize the query.
query = f"pragma table_info({table_name});"
db_wrapper = await DBWrapper2.create(database=database_uri, uri=True, reader_count=1)
try:
async with db_wrapper.reader() as reader:
cursor = await reader.execute(query)
columns = await cursor.fetchall()
assert columns == []
store = await DataStore.create(database=database_uri, uri=True)
try:
async with db_wrapper.reader() as reader:
cursor = await reader.execute(query)
columns = await cursor.fetchall()
assert [column[1] for column in columns] == expected_columns
finally:
await store.close()
finally:
await db_wrapper.close()
@pytest.mark.asyncio
async def test_create_tree_accepts_bytes32(raw_data_store: DataStore) -> None:
tree_id = bytes32(b"\0" * 32)
await raw_data_store.create_tree(tree_id=tree_id)
@pytest.mark.parametrize(argnames=["length"], argvalues=[[length] for length in [*range(0, 32), *range(33, 48)]])
@pytest.mark.asyncio
async def test_create_tree_fails_for_not_bytes32(raw_data_store: DataStore, length: int) -> None:
bad_tree_id = b"\0" * length
# TODO: require a more specific exception
with pytest.raises(Exception):
# type ignore since we are trying to intentionally pass a bad argument
await raw_data_store.create_tree(tree_id=bad_tree_id) # type: ignore[arg-type]
@pytest.mark.asyncio
async def test_get_trees(raw_data_store: DataStore) -> None:
expected_tree_ids = set()
for n in range(10):
tree_id = bytes32((b"\0" * 31 + bytes([n])))
await raw_data_store.create_tree(tree_id=tree_id)
expected_tree_ids.add(tree_id)
tree_ids = await raw_data_store.get_tree_ids()
assert tree_ids == expected_tree_ids
@pytest.mark.asyncio
async def test_table_is_empty(data_store: DataStore, tree_id: bytes32) -> None:
is_empty = await data_store.table_is_empty(tree_id=tree_id)
assert is_empty
@pytest.mark.asyncio
async def test_table_is_not_empty(data_store: DataStore, tree_id: bytes32) -> None:
key = b"\x01\x02"
value = b"abc"
await data_store.insert(
key=key,
value=value,
tree_id=tree_id,
reference_node_hash=None,
side=None,
status=Status.COMMITTED,
)
is_empty = await data_store.table_is_empty(tree_id=tree_id)
assert not is_empty
# @pytest.mark.asyncio
# async def test_create_root_provides_bytes32(raw_data_store: DataStore, tree_id: bytes32) -> None:
# await raw_data_store.create_tree(tree_id=tree_id)
# # TODO: catchup with the node_hash=
# root_hash = await raw_data_store.create_root(tree_id=tree_id, node_hash=23)
#
# assert isinstance(root_hash, bytes32)
@pytest.mark.asyncio
async def test_insert_over_empty(data_store: DataStore, tree_id: bytes32) -> None:
key = b"\x01\x02"
value = b"abc"
node_hash = await data_store.insert(key=key, value=value, tree_id=tree_id, reference_node_hash=None, side=None)
assert node_hash == leaf_hash(key=key, value=value)
@pytest.mark.asyncio
async def test_insert_increments_generation(data_store: DataStore, tree_id: bytes32) -> None:
keys = [b"a", b"b", b"c", b"d"] # efghijklmnopqrstuvwxyz")
value = b"\x01\x02\x03"
generations = []
expected = []
node_hash = None
for key, expected_generation in zip(keys, itertools.count(start=1)):
node_hash = await data_store.insert(
key=key,
value=value,
tree_id=tree_id,
reference_node_hash=node_hash,
side=None if node_hash is None else Side.LEFT,
status=Status.COMMITTED,
)
generation = await data_store.get_tree_generation(tree_id=tree_id)
generations.append(generation)
expected.append(expected_generation)
assert generations == expected
@pytest.mark.asyncio
async def test_insert_internal_node_does_nothing_if_matching(data_store: DataStore, tree_id: bytes32) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
kv_node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
ancestors = await data_store.get_ancestors(node_hash=kv_node.hash, tree_id=tree_id)
parent = ancestors[0]
async with data_store.db_wrapper.reader() as reader:
cursor = await reader.execute("SELECT * FROM node")
before = await cursor.fetchall()
await data_store._insert_internal_node(left_hash=parent.left_hash, right_hash=parent.right_hash)
async with data_store.db_wrapper.reader() as reader:
cursor = await reader.execute("SELECT * FROM node")
after = await cursor.fetchall()
assert after == before
@pytest.mark.asyncio
async def test_insert_terminal_node_does_nothing_if_matching(data_store: DataStore, tree_id: bytes32) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
kv_node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
async with data_store.db_wrapper.reader() as reader:
cursor = await reader.execute("SELECT * FROM node")
before = await cursor.fetchall()
await data_store._insert_terminal_node(key=kv_node.key, value=kv_node.value)
async with data_store.db_wrapper.reader() as reader:
cursor = await reader.execute("SELECT * FROM node")
after = await cursor.fetchall()
assert after == before
@pytest.mark.asyncio
async def test_build_a_tree(
data_store: DataStore,
tree_id: bytes32,
create_example: Callable[[DataStore, bytes32], Awaitable[Example]],
) -> None:
example = await create_example(data_store, tree_id)
await _debug_dump(db=data_store.db_wrapper, description="final")
actual = await data_store.get_tree_as_program(tree_id=tree_id)
# print("actual ", actual.as_python())
# print("expected", example.expected.as_python())
assert actual == example.expected
@pytest.mark.asyncio
async def test_get_node_by_key(data_store: DataStore, tree_id: bytes32) -> None:
example = await add_0123_example(data_store=data_store, tree_id=tree_id)
key_node_hash = example.terminal_nodes[2]
# TODO: make a nicer relationship between the hash and the key
actual = await data_store.get_node_by_key(key=b"\x02", tree_id=tree_id)
assert actual.hash == key_node_hash
@pytest.mark.asyncio
async def test_get_ancestors(data_store: DataStore, tree_id: bytes32) -> None:
example = await add_0123_example(data_store=data_store, tree_id=tree_id)
reference_node_hash = example.terminal_nodes[2]
ancestors = await data_store.get_ancestors(node_hash=reference_node_hash, tree_id=tree_id)
hashes = [node.hash.hex() for node in ancestors]
# TODO: reverify these are correct
assert hashes == [
"3ab212e30b0e746d81a993e39f2cb4ba843412d44b402c1117a500d6451309e3",
"c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2",
]
ancestors_2 = await data_store.get_ancestors_optimized(node_hash=reference_node_hash, tree_id=tree_id)
assert ancestors == ancestors_2
@pytest.mark.asyncio
async def test_get_ancestors_optimized(data_store: DataStore, tree_id: bytes32) -> None:
ancestors: List[Tuple[int, bytes32, List[InternalNode]]] = []
random = Random()
random.seed(100, version=2)
first_insertions = [True, False, True, False, True, True, False, True, False, True, True, False, False, True, False]
deleted_all = False
node_count = 0
for i in range(1000):
is_insert = False
if i <= 14:
is_insert = first_insertions[i]
if i > 14 and i <= 25:
is_insert = True
if i > 25 and i <= 200 and random.randint(0, 4):
is_insert = True
if i > 200:
hint_keys_values = await data_store.get_keys_values_dict(tree_id)
if not deleted_all:
while node_count > 0:
node_count -= 1
seed = bytes32(b"0" * 32)
node_hash = await data_store.get_terminal_node_for_seed(tree_id, seed)
assert node_hash is not None
node = await data_store.get_node(node_hash)
assert isinstance(node, TerminalNode)
await data_store.delete(
key=node.key, tree_id=tree_id, hint_keys_values=hint_keys_values, status=Status.COMMITTED
)
deleted_all = True
is_insert = True
else:
assert node_count <= 4
if node_count == 0:
is_insert = True
elif node_count < 4 and random.randint(0, 2):
is_insert = True
key = (i % 200).to_bytes(4, byteorder="big")
value = (i % 200).to_bytes(4, byteorder="big")
seed = Program.to((key, value)).get_tree_hash()
node_hash = await data_store.get_terminal_node_for_seed(tree_id, seed)
if is_insert:
node_count += 1
side = None if node_hash is None else data_store.get_side_for_seed(seed)
node_hash = await data_store.insert(
key=key,
value=value,
tree_id=tree_id,
reference_node_hash=node_hash,
side=side,
use_optimized=False,
status=Status.COMMITTED,
)
if node_hash is not None:
generation = await data_store.get_tree_generation(tree_id=tree_id)
current_ancestors = await data_store.get_ancestors(node_hash=node_hash, tree_id=tree_id)
ancestors.append((generation, node_hash, current_ancestors))
else:
node_count -= 1
assert node_hash is not None
node = await data_store.get_node(node_hash)
assert isinstance(node, TerminalNode)
await data_store.delete(key=node.key, tree_id=tree_id, use_optimized=False, status=Status.COMMITTED)
for generation, node_hash, expected_ancestors in ancestors:
current_ancestors = await data_store.get_ancestors_optimized(
node_hash=node_hash, tree_id=tree_id, generation=generation
)
assert current_ancestors == expected_ancestors
@pytest.mark.asyncio
@pytest.mark.parametrize(
"use_optimized",
[True, False],
)
async def test_batch_update(data_store: DataStore, tree_id: bytes32, use_optimized: bool, tmp_path: Path) -> None:
num_batches = 10
num_ops_per_batch = 100 if use_optimized else 10
saved_roots: List[Root] = []
saved_batches: List[List[Dict[str, Any]]] = []
db_path = tmp_path.joinpath("dl_server_util.sqlite")
single_op_data_store = await DataStore.create(database=db_path)
try:
await single_op_data_store.create_tree(tree_id, status=Status.COMMITTED)
random = Random()
random.seed(100, version=2)
batch: List[Dict[str, Any]] = []
keys: List[bytes] = []
hint_keys_values: Dict[bytes, bytes] = {}
for operation in range(num_batches * num_ops_per_batch):
if random.randint(0, 4) > 0 or len(keys) == 0:
key = operation.to_bytes(4, byteorder="big")
value = (2 * operation).to_bytes(4, byteorder="big")
if use_optimized:
await single_op_data_store.autoinsert(
key=key,
value=value,
tree_id=tree_id,
hint_keys_values=hint_keys_values,
status=Status.COMMITTED,
)
else:
await single_op_data_store.autoinsert(
key=key, value=value, tree_id=tree_id, use_optimized=False, status=Status.COMMITTED
)
batch.append({"action": "insert", "key": key, "value": value})
keys.append(key)
else:
key = random.choice(keys)
keys.remove(key)
if use_optimized:
await single_op_data_store.delete(
key=key, tree_id=tree_id, hint_keys_values=hint_keys_values, status=Status.COMMITTED
)
else:
await single_op_data_store.delete(
key=key, tree_id=tree_id, use_optimized=False, status=Status.COMMITTED
)
batch.append({"action": "delete", "key": key})
if (operation + 1) % num_ops_per_batch == 0:
saved_batches.append(batch)
batch = []
root = await single_op_data_store.get_tree_root(tree_id=tree_id)
saved_roots.append(root)
finally:
await single_op_data_store.close()
for batch_number, batch in enumerate(saved_batches):
assert len(batch) == num_ops_per_batch
await data_store.insert_batch(tree_id, batch, status=Status.COMMITTED)
root = await data_store.get_tree_root(tree_id)
assert root.generation == batch_number + 1
assert root.node_hash == saved_roots[batch_number].node_hash
assert root.node_hash is not None
queue: List[bytes32] = [root.node_hash]
ancestors: Dict[bytes32, bytes32] = {}
while len(queue) > 0:
node_hash = queue.pop(0)
expected_ancestors = []
ancestor = node_hash
while ancestor in ancestors:
ancestor = ancestors[ancestor]
expected_ancestors.append(ancestor)
result_ancestors = await data_store.get_ancestors_optimized(node_hash, tree_id)
assert [node.hash for node in result_ancestors] == expected_ancestors
node = await data_store.get_node(node_hash)
if isinstance(node, InternalNode):
queue.append(node.left_hash)
queue.append(node.right_hash)
ancestors[node.left_hash] = node_hash
ancestors[node.right_hash] = node_hash
@pytest.mark.asyncio
async def test_ancestor_table_unique_inserts(data_store: DataStore, tree_id: bytes32) -> None:
await add_0123_example(data_store=data_store, tree_id=tree_id)
hash_1 = bytes32.from_hexstr("0763561814685fbf92f6ca71fbb1cb11821951450d996375c239979bd63e9535")
hash_2 = bytes32.from_hexstr("924be8ff27e84cba17f5bc918097f8410fab9824713a4668a21c8e060a8cab40")
await data_store._insert_ancestor_table(hash_1, hash_2, tree_id, 2)
with pytest.raises(Exception):
await data_store._insert_ancestor_table(hash_1, hash_1, tree_id, 2)
await data_store._insert_ancestor_table(hash_1, hash_2, tree_id, 2)
@pytest.mark.asyncio
async def test_get_pairs(
data_store: DataStore,
tree_id: bytes32,
create_example: Callable[[DataStore, bytes32], Awaitable[Example]],
) -> None:
example = await create_example(data_store, tree_id)
pairs = await data_store.get_keys_values(tree_id=tree_id)
assert [node.hash for node in pairs] == example.terminal_nodes
@pytest.mark.asyncio
async def test_get_pairs_when_empty(data_store: DataStore, tree_id: bytes32) -> None:
pairs = await data_store.get_keys_values(tree_id=tree_id)
assert pairs == []
@pytest.mark.parametrize(
argnames=["first_value", "second_value"],
argvalues=[[b"\x06", b"\x06"], [b"\x06", b"\x07"]],
ids=["same values", "different values"],
)
@pytest.mark.asyncio()
async def test_inserting_duplicate_key_fails(
data_store: DataStore,
tree_id: bytes32,
first_value: bytes,
second_value: bytes,
) -> None:
key = b"\x05"
first_hash = await data_store.insert(
key=key,
value=first_value,
tree_id=tree_id,
reference_node_hash=None,
side=None,
)
# TODO: more specific exception
with pytest.raises(Exception):
await data_store.insert(
key=key,
value=second_value,
tree_id=tree_id,
reference_node_hash=first_hash,
side=Side.RIGHT,
)
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
# TODO: more specific exception
with pytest.raises(Exception):
await data_store.insert(
key=key,
value=second_value,
tree_id=tree_id,
reference_node_hash=first_hash,
side=Side.RIGHT,
hint_keys_values=hint_keys_values,
)
@pytest.mark.asyncio()
async def test_autoinsert_balances_from_scratch(data_store: DataStore, tree_id: bytes32) -> None:
random = Random()
random.seed(100, version=2)
hint_keys_values: Dict[bytes, bytes] = {}
hashes = []
for i in range(2000):
key = (i + 100).to_bytes(4, byteorder="big")
value = (i + 200).to_bytes(4, byteorder="big")
node_hash = await data_store.autoinsert(key, value, tree_id, hint_keys_values, status=Status.COMMITTED)
hashes.append(node_hash)
heights = {node_hash: len(await data_store.get_ancestors_optimized(node_hash, tree_id)) for node_hash in hashes}
too_tall = {hash: height for hash, height in heights.items() if height > 14}
assert too_tall == {}
assert 11 <= statistics.mean(heights.values()) <= 12
@pytest.mark.asyncio()
async def test_autoinsert_balances_gaps(data_store: DataStore, tree_id: bytes32) -> None:
random = Random()
random.seed(101, version=2)
hint_keys_values: Dict[bytes, bytes] = {}
hashes = []
for i in range(2000):
key = (i + 100).to_bytes(4, byteorder="big")
value = (i + 200).to_bytes(4, byteorder="big")
if i == 0 or i > 10:
node_hash = await data_store.autoinsert(key, value, tree_id, hint_keys_values, status=Status.COMMITTED)
else:
reference_node_hash = await data_store.get_terminal_node_for_seed(tree_id, bytes32([0] * 32))
node_hash = await data_store.insert(
key=key,
value=value,
tree_id=tree_id,
reference_node_hash=reference_node_hash,
side=Side.LEFT,
hint_keys_values=hint_keys_values,
status=Status.COMMITTED,
)
ancestors = await data_store.get_ancestors_optimized(node_hash, tree_id)
assert len(ancestors) == i
hashes.append(node_hash)
heights = {node_hash: len(await data_store.get_ancestors_optimized(node_hash, tree_id)) for node_hash in hashes}
too_tall = {hash: height for hash, height in heights.items() if height > 14}
assert too_tall == {}
assert 11 <= statistics.mean(heights.values()) <= 12
@pytest.mark.parametrize(
"use_hint",
[True, False],
)
@pytest.mark.asyncio()
async def test_delete_from_left_both_terminal(data_store: DataStore, tree_id: bytes32, use_hint: bool) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
hint_keys_values = None
if use_hint:
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
expected = Program.to(
(
(
(
(b"\x00", b"\x10\x00"),
(b"\x01", b"\x11\x01"),
),
(
(b"\x02", b"\x12\x02"),
(b"\x03", b"\x13\x03"),
),
),
(
(b"\x05", b"\x15\x05"),
(
(b"\x06", b"\x16\x06"),
(b"\x07", b"\x17\x07"),
),
),
),
)
await data_store.delete(
key=Program.to(b"\x04"), tree_id=tree_id, hint_keys_values=hint_keys_values, status=Status.COMMITTED
)
result = await data_store.get_tree_as_program(tree_id=tree_id)
assert result == expected
@pytest.mark.parametrize(
"use_hint",
[True, False],
)
@pytest.mark.asyncio()
async def test_delete_from_left_other_not_terminal(data_store: DataStore, tree_id: bytes32, use_hint: bool) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
hint_keys_values = None
if use_hint:
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
expected = Program.to(
(
(
(
(b"\x00", b"\x10\x00"),
(b"\x01", b"\x11\x01"),
),
(
(b"\x02", b"\x12\x02"),
(b"\x03", b"\x13\x03"),
),
),
(
(b"\x06", b"\x16\x06"),
(b"\x07", b"\x17\x07"),
),
),
)
await data_store.delete(
key=Program.to(b"\x04"), tree_id=tree_id, hint_keys_values=hint_keys_values, status=Status.COMMITTED
)
await data_store.delete(
key=Program.to(b"\x05"), tree_id=tree_id, hint_keys_values=hint_keys_values, status=Status.COMMITTED
)
result = await data_store.get_tree_as_program(tree_id=tree_id)
assert result == expected
@pytest.mark.parametrize(
"use_hint",
[True, False],
)
@pytest.mark.asyncio()
async def test_delete_from_right_both_terminal(data_store: DataStore, tree_id: bytes32, use_hint: bool) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
hint_keys_values = None
if use_hint:
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
expected = Program.to(
(
(
(
(b"\x00", b"\x10\x00"),
(b"\x01", b"\x11\x01"),
),
(b"\x02", b"\x12\x02"),
),
(
(
(b"\x04", b"\x14\x04"),
(b"\x05", b"\x15\x05"),
),
(
(b"\x06", b"\x16\x06"),
(b"\x07", b"\x17\x07"),
),
),
),
)
await data_store.delete(
key=Program.to(b"\x03"), tree_id=tree_id, hint_keys_values=hint_keys_values, status=Status.COMMITTED
)
result = await data_store.get_tree_as_program(tree_id=tree_id)
assert result == expected
@pytest.mark.parametrize(
"use_hint",
[True, False],
)
@pytest.mark.asyncio()
async def test_delete_from_right_other_not_terminal(data_store: DataStore, tree_id: bytes32, use_hint: bool) -> None:
await add_01234567_example(data_store=data_store, tree_id=tree_id)
hint_keys_values = None
if use_hint:
hint_keys_values = await data_store.get_keys_values_dict(tree_id=tree_id)
expected = Program.to(
(
(
(b"\x00", b"\x10\x00"),
(b"\x01", b"\x11\x01"),
),
(
(
(b"\x04", b"\x14\x04"),
(b"\x05", b"\x15\x05"),
),
(
(b"\x06", b"\x16\x06"),
(b"\x07", b"\x17\x07"),
),
),
),
)
await data_store.delete(
key=Program.to(b"\x03"), tree_id=tree_id, hint_keys_values=hint_keys_values, status=Status.COMMITTED
)
await data_store.delete(
key=Program.to(b"\x02"), tree_id=tree_id, hint_keys_values=hint_keys_values, status=Status.COMMITTED
)
result = await data_store.get_tree_as_program(tree_id=tree_id)
assert result == expected
@pytest.mark.asyncio
async def test_proof_of_inclusion_by_hash(data_store: DataStore, tree_id: bytes32) -> None:
"""A proof of inclusion contains the expected sibling side, sibling hash, combined
hash, key, value, and root hash values.
"""
await add_01234567_example(data_store=data_store, tree_id=tree_id)
root = await data_store.get_tree_root(tree_id=tree_id)
assert root.node_hash is not None
node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)
print(node)
await _debug_dump(db=data_store.db_wrapper)
expected_layers = [
ProofOfInclusionLayer(
other_hash_side=Side.RIGHT,
other_hash=bytes32.fromhex("fb66fe539b3eb2020dfbfadfd601fa318521292b41f04c2057c16fca6b947ca1"),
combined_hash=bytes32.fromhex("36cb1fc56017944213055da8cb0178fb0938c32df3ec4472f5edf0dff85ba4a3"),
),
ProofOfInclusionLayer(
other_hash_side=Side.RIGHT,
other_hash=bytes32.fromhex("6d3af8d93db948e8b6aa4386958e137c6be8bab726db86789594b3588b35adcd"),
combined_hash=bytes32.fromhex("5f67a0ab1976e090b834bf70e5ce2a0f0a9cd474e19a905348c44ae12274d30b"),
),
ProofOfInclusionLayer(
other_hash_side=Side.LEFT,
other_hash=bytes32.fromhex("c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2"),
combined_hash=bytes32.fromhex("7a5193a4e31a0a72f6623dfeb2876022ab74a48abb5966088a1c6f5451cc5d81"),
),
]
assert proof == ProofOfInclusion(node_hash=node.hash, layers=expected_layers)
@pytest.mark.asyncio
async def test_proof_of_inclusion_by_hash_no_ancestors(data_store: DataStore, tree_id: bytes32) -> None:
"""Check proper proof of inclusion creation when the node being proved is the root."""
await data_store.autoinsert(key=b"\x04", value=b"\x03", tree_id=tree_id, status=Status.COMMITTED)
root = await data_store.get_tree_root(tree_id=tree_id)
assert root.node_hash is not None
node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)
assert proof == ProofOfInclusion(node_hash=node.hash, layers=[])
@pytest.mark.asyncio
async def test_proof_of_inclusion_by_hash_program(data_store: DataStore, tree_id: bytes32) -> None:
"""The proof of inclusion program has the expected Python equivalence."""
await add_01234567_example(data_store=data_store, tree_id=tree_id)
node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)
assert proof.as_program() == [
b"\x04",
[
bytes32.fromhex("fb66fe539b3eb2020dfbfadfd601fa318521292b41f04c2057c16fca6b947ca1"),
bytes32.fromhex("6d3af8d93db948e8b6aa4386958e137c6be8bab726db86789594b3588b35adcd"),
bytes32.fromhex("c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2"),
],
]
@pytest.mark.asyncio
async def test_proof_of_inclusion_by_hash_equals_by_key(data_store: DataStore, tree_id: bytes32) -> None:
"""The proof of inclusion is equal between hash and key requests."""
await add_01234567_example(data_store=data_store, tree_id=tree_id)
node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
proof_by_hash = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)
proof_by_key = await data_store.get_proof_of_inclusion_by_key(key=b"\x04", tree_id=tree_id)
assert proof_by_hash == proof_by_key
@pytest.mark.asyncio
async def test_proof_of_inclusion_by_hash_bytes(data_store: DataStore, tree_id: bytes32) -> None:
"""The proof of inclusion provided by the data store is able to be converted to a
program and subsequently to bytes.
"""
await add_01234567_example(data_store=data_store, tree_id=tree_id)
node = await data_store.get_node_by_key(key=b"\x04", tree_id=tree_id)
proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, tree_id=tree_id)
expected = (
b"\xff\x04\xff\xff\xa0\xfbf\xfeS\x9b>\xb2\x02\r\xfb\xfa\xdf\xd6\x01\xfa1\x85!)"
b"+A\xf0L W\xc1o\xcak\x94|\xa1\xff\xa0m:\xf8\xd9=\xb9H\xe8\xb6\xaaC\x86\x95"
b"\x8e\x13|k\xe8\xba\xb7&\xdb\x86x\x95\x94\xb3X\x8b5\xad\xcd\xff\xa0\xc8R\xec"
b"\xd8\xfbaT\x9a\nB\xf9\xeb\x9d\xdee\xe6\xc9J\x01\x93M\xbd\x9c\x1d5\xab\x94"
b"\xe2\xa0\xaeX\xe2\x80\x80"
)
assert bytes(proof.as_program()) == expected
# @pytest.mark.asyncio
# async def test_create_first_pair(data_store: DataStore, tree_id: bytes) -> None:
# key = SExp.to([1, 2])
# value = SExp.to(b'abc')
#
# root_hash = await data_store.create_root(tree_id=tree_id)
#
#
# await data_store.create_pair(key=key, value=value)
def test_all_checks_collected() -> None:
expected = {value for name, value in vars(DataStore).items() if name.startswith("_check_") and callable(value)}
assert set(DataStore._checks) == expected
a_bytes_32 = bytes32(range(32))
another_bytes_32 = bytes(reversed(a_bytes_32))
valid_program_hex = Program.to((b"abc", 2)).as_bin().hex()
invalid_program_hex = b"\xab\xcd".hex()
@pytest.mark.asyncio
async def test_check_roots_are_incrementing_missing_zero(raw_data_store: DataStore) -> None:
tree_id = hexstr_to_bytes("c954ab71ffaf5b0f129b04b35fdc7c84541f4375167e730e2646bfcfdb7cf2cd")
async with raw_data_store.db_wrapper.writer() as writer:
for generation in range(1, 5):
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
VALUES(:tree_id, :generation, :node_hash, :status)
""",
{
"tree_id": tree_id,
"generation": generation,
"node_hash": None,
"status": Status.COMMITTED.value,
},
)
with pytest.raises(
TreeGenerationIncrementingError,
match=r"\n +c954ab71ffaf5b0f129b04b35fdc7c84541f4375167e730e2646bfcfdb7cf2cd$",
):
await raw_data_store._check_roots_are_incrementing()
@pytest.mark.asyncio
async def test_check_roots_are_incrementing_gap(raw_data_store: DataStore) -> None:
tree_id = hexstr_to_bytes("c954ab71ffaf5b0f129b04b35fdc7c84541f4375167e730e2646bfcfdb7cf2cd")
async with raw_data_store.db_wrapper.writer() as writer:
for generation in [*range(5), *range(6, 10)]:
await writer.execute(
"""
INSERT INTO root(tree_id, generation, node_hash, status)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/ssl/test_ssl.py | tests/core/ssl/test_ssl.py | import asyncio
import aiohttp
import pytest
from flax.protocols.shared_protocol import protocol_version, capabilities
from flax.server.outbound_message import NodeType
from flax.server.server import FlaxServer, ssl_context_for_client
from flax.server.ssl_context import flax_ssl_ca_paths, private_ssl_ca_paths
from flax.server.ws_connection import WSFlaxConnection
from flax.ssl.create_ssl import generate_ca_signed_cert
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16
async def establish_connection(server: FlaxServer, self_hostname: str, ssl_context) -> None:
timeout = aiohttp.ClientTimeout(total=10)
dummy_port = 5 # this does not matter
async with aiohttp.ClientSession(timeout=timeout) as session:
incoming_queue: asyncio.Queue = asyncio.Queue()
url = f"wss://{self_hostname}:{server._port}/ws"
ws = await session.ws_connect(url, autoclose=False, autoping=True, ssl=ssl_context)
wsc = WSFlaxConnection(
NodeType.FULL_NODE,
ws,
server._port,
server.log,
True,
False,
self_hostname,
incoming_queue,
lambda x, y: x,
None,
100,
30,
local_capabilities_for_handshake=capabilities,
)
await wsc.perform_handshake(server._network_id, protocol_version, dummy_port, NodeType.FULL_NODE)
class TestSSL:
@pytest.mark.asyncio
async def test_public_connections(self, wallet_node_sim_and_wallet, self_hostname):
full_nodes, wallets, _ = wallet_node_sim_and_wallet
full_node_api = full_nodes[0]
server_1: FlaxServer = full_node_api.full_node.server
wallet_node, server_2 = wallets[0]
success = await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
assert success is True
@pytest.mark.asyncio
async def test_farmer(self, farmer_one_harvester, self_hostname):
_, farmer_service, bt = farmer_one_harvester
farmer_api = farmer_service._api
farmer_server = farmer_api.farmer.server
ca_private_crt_path, ca_private_key_path = private_ssl_ca_paths(bt.root_path, bt.config)
flax_ca_crt_path, flax_ca_key_path = flax_ssl_ca_paths(bt.root_path, bt.config)
# Create valid cert (valid meaning signed with private CA)
priv_crt = farmer_server.root_path / "valid.crt"
priv_key = farmer_server.root_path / "valid.key"
generate_ca_signed_cert(
ca_private_crt_path.read_bytes(),
ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(ca_private_crt_path, ca_private_key_path, priv_crt, priv_key)
await establish_connection(farmer_server, self_hostname, ssl_context)
# Create not authenticated cert
pub_crt = farmer_server.root_path / "non_valid.crt"
pub_key = farmer_server.root_path / "non_valid.key"
generate_ca_signed_cert(flax_ca_crt_path.read_bytes(), flax_ca_key_path.read_bytes(), pub_crt, pub_key)
ssl_context = ssl_context_for_client(flax_ca_crt_path, flax_ca_key_path, pub_crt, pub_key)
with pytest.raises(aiohttp.ClientConnectorCertificateError):
await establish_connection(farmer_server, self_hostname, ssl_context)
ssl_context = ssl_context_for_client(ca_private_crt_path, ca_private_key_path, pub_crt, pub_key)
with pytest.raises(aiohttp.ServerDisconnectedError):
await establish_connection(farmer_server, self_hostname, ssl_context)
@pytest.mark.asyncio
async def test_full_node(self, wallet_node_sim_and_wallet, self_hostname):
full_nodes, wallets, bt = wallet_node_sim_and_wallet
full_node_api = full_nodes[0]
full_node_server = full_node_api.full_node.server
flax_ca_crt_path, flax_ca_key_path = flax_ssl_ca_paths(bt.root_path, bt.config)
# Create not authenticated cert
pub_crt = full_node_server.root_path / "p2p.crt"
pub_key = full_node_server.root_path / "p2p.key"
generate_ca_signed_cert(
flax_ca_crt_path.read_bytes(),
flax_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(flax_ca_crt_path, flax_ca_key_path, pub_crt, pub_key)
await establish_connection(full_node_server, self_hostname, ssl_context)
@pytest.mark.asyncio
async def test_wallet(self, wallet_node_sim_and_wallet, self_hostname):
full_nodes, wallets, bt = wallet_node_sim_and_wallet
wallet_node, wallet_server = wallets[0]
ca_private_crt_path, ca_private_key_path = private_ssl_ca_paths(bt.root_path, bt.config)
flax_ca_crt_path, flax_ca_key_path = flax_ssl_ca_paths(bt.root_path, bt.config)
# Wallet should not accept incoming connections
pub_crt = wallet_server.root_path / "p2p.crt"
pub_key = wallet_server.root_path / "p2p.key"
generate_ca_signed_cert(flax_ca_crt_path.read_bytes(), flax_ca_key_path.read_bytes(), pub_crt, pub_key)
ssl_context = ssl_context_for_client(flax_ca_crt_path, flax_ca_key_path, pub_crt, pub_key)
with pytest.raises(aiohttp.ClientConnectorError):
await establish_connection(wallet_server, self_hostname, ssl_context)
# Not even signed by private cert
priv_crt = wallet_server.root_path / "valid.crt"
priv_key = wallet_server.root_path / "valid.key"
generate_ca_signed_cert(
ca_private_crt_path.read_bytes(),
ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(ca_private_crt_path, ca_private_key_path, priv_crt, priv_key)
with pytest.raises(aiohttp.ClientConnectorError):
await establish_connection(wallet_server, self_hostname, ssl_context)
@pytest.mark.asyncio
async def test_harvester(self, farmer_one_harvester, self_hostname):
harvesters, _, bt = farmer_one_harvester
harvester_server = harvesters[0]._server
ca_private_crt_path, ca_private_key_path = private_ssl_ca_paths(bt.root_path, bt.config)
flax_ca_crt_path, flax_ca_key_path = flax_ssl_ca_paths(bt.root_path, bt.config)
# harvester should not accept incoming connections
pub_crt = harvester_server.root_path / "p2p.crt"
pub_key = harvester_server.root_path / "p2p.key"
generate_ca_signed_cert(
flax_ca_crt_path.read_bytes(),
flax_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(flax_ca_crt_path, flax_ca_key_path, pub_crt, pub_key)
with pytest.raises(aiohttp.ClientConnectorError):
await establish_connection(harvester_server, self_hostname, ssl_context)
# Not even signed by private cert
priv_crt = harvester_server.root_path / "valid.crt"
priv_key = harvester_server.root_path / "valid.key"
generate_ca_signed_cert(
ca_private_crt_path.read_bytes(),
ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(ca_private_crt_path, ca_private_key_path, priv_crt, priv_key)
with pytest.raises(aiohttp.ClientConnectorError):
await establish_connection(harvester_server, self_hostname, ssl_context)
@pytest.mark.asyncio
async def test_introducer(self, introducer_service, self_hostname):
introducer_server = introducer_service._node.server
flax_ca_crt_path, flax_ca_key_path = flax_ssl_ca_paths(introducer_service.root_path, introducer_service.config)
# Create not authenticated cert
pub_crt = introducer_server.root_path / "p2p.crt"
pub_key = introducer_server.root_path / "p2p.key"
generate_ca_signed_cert(
flax_ca_crt_path.read_bytes(),
flax_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(flax_ca_crt_path, flax_ca_key_path, pub_crt, pub_key)
await establish_connection(introducer_server, self_hostname, ssl_context)
@pytest.mark.asyncio
async def test_timelord(self, timelord_service, self_hostname):
timelord_server = timelord_service._node.server
ca_private_crt_path, ca_private_key_path = private_ssl_ca_paths(
timelord_service.root_path, timelord_service.config
)
flax_ca_crt_path, flax_ca_key_path = flax_ssl_ca_paths(timelord_service.root_path, timelord_service.config)
# timelord should not accept incoming connections
pub_crt = timelord_server.root_path / "p2p.crt"
pub_key = timelord_server.root_path / "p2p.key"
generate_ca_signed_cert(
flax_ca_crt_path.read_bytes(),
flax_ca_key_path.read_bytes(),
pub_crt,
pub_key,
)
ssl_context = ssl_context_for_client(flax_ca_crt_path, flax_ca_key_path, pub_crt, pub_key)
with pytest.raises(aiohttp.ClientConnectorError):
await establish_connection(timelord_server, self_hostname, ssl_context)
# Not even signed by private cert
priv_crt = timelord_server.root_path / "valid.crt"
priv_key = timelord_server.root_path / "valid.key"
generate_ca_signed_cert(
ca_private_crt_path.read_bytes(),
ca_private_key_path.read_bytes(),
priv_crt,
priv_key,
)
ssl_context = ssl_context_for_client(ca_private_crt_path, ca_private_key_path, priv_crt, priv_key)
with pytest.raises(aiohttp.ClientConnectorError):
await establish_connection(timelord_server, self_hostname, ssl_context)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/ssl/config.py | tests/core/ssl/config.py | from __future__ import annotations
parallel = True
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/ssl/__init__.py | tests/core/ssl/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/consensus/config.py | tests/core/consensus/config.py | from __future__ import annotations
parallel = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/consensus/__init__.py | tests/core/consensus/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/consensus/test_pot_iterations.py | tests/core/consensus/test_pot_iterations.py | from __future__ import annotations
from pytest import raises
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.consensus.pos_quality import _expected_plot_size
from flax.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_iters,
is_overflow_block,
)
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint64
test_constants = DEFAULT_CONSTANTS.replace(**{"NUM_SPS_SUB_SLOT": 32, "SUB_SLOT_TIME_TARGET": 300})
class TestPotIterations:
def test_is_overflow_block(self):
assert not is_overflow_block(test_constants, uint8(27))
assert not is_overflow_block(test_constants, uint8(28))
assert is_overflow_block(test_constants, uint8(29))
assert is_overflow_block(test_constants, uint8(30))
assert is_overflow_block(test_constants, uint8(31))
with raises(ValueError):
assert is_overflow_block(test_constants, uint8(32))
def test_calculate_sp_iters(self):
ssi: uint64 = uint64(100001 * 64 * 4)
with raises(ValueError):
calculate_sp_iters(test_constants, ssi, uint8(32))
calculate_sp_iters(test_constants, ssi, uint8(31))
def test_calculate_ip_iters(self):
ssi: uint64 = uint64(100001 * 64 * 4)
sp_interval_iters = ssi // test_constants.NUM_SPS_SUB_SLOT
with raises(ValueError):
# Invalid signage point index
calculate_ip_iters(test_constants, ssi, uint8(123), uint64(100000))
sp_iters = sp_interval_iters * 13
with raises(ValueError):
# required_iters too high
calculate_ip_iters(test_constants, ssi, sp_interval_iters, sp_interval_iters)
with raises(ValueError):
# required_iters too high
calculate_ip_iters(test_constants, ssi, sp_interval_iters, sp_interval_iters * 12)
with raises(ValueError):
# required_iters too low (0)
calculate_ip_iters(test_constants, ssi, sp_interval_iters, uint64(0))
required_iters = sp_interval_iters - 1
ip_iters = calculate_ip_iters(test_constants, ssi, uint8(13), required_iters)
assert ip_iters == sp_iters + test_constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters
required_iters = uint64(1)
ip_iters = calculate_ip_iters(test_constants, ssi, uint8(13), required_iters)
assert ip_iters == sp_iters + test_constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters
required_iters = uint64(int(ssi * 4 / 300))
ip_iters = calculate_ip_iters(test_constants, ssi, uint8(13), required_iters)
assert ip_iters == sp_iters + test_constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters
assert sp_iters < ip_iters
# Overflow
sp_iters = sp_interval_iters * (test_constants.NUM_SPS_SUB_SLOT - 1)
ip_iters = calculate_ip_iters(
test_constants,
ssi,
uint8(test_constants.NUM_SPS_SUB_SLOT - 1),
required_iters,
)
assert ip_iters == (sp_iters + test_constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters) % ssi
assert sp_iters > ip_iters
def test_win_percentage(self):
"""
Tests that the percentage of blocks won is proportional to the space of each farmer,
with the assumption that all farmers have access to the same VDF speed.
"""
farmer_ks = {
uint8(32): 100,
uint8(33): 100,
uint8(34): 100,
uint8(35): 100,
uint8(36): 100,
}
farmer_space = {k: _expected_plot_size(uint8(k)) * count for k, count in farmer_ks.items()}
total_space = sum(farmer_space.values())
percentage_space = {k: float(sp / total_space) for k, sp in farmer_space.items()}
wins = {k: 0 for k in farmer_ks.keys()}
total_slots = 50
num_sps = 16
sp_interval_iters = uint64(100000000 // 32)
difficulty = uint64(500000000000)
for slot_index in range(total_slots):
total_wins_in_slot = 0
for sp_index in range(num_sps):
sp_hash = std_hash(slot_index.to_bytes(4, "big") + sp_index.to_bytes(4, "big"))
for k, count in farmer_ks.items():
for farmer_index in range(count):
quality = std_hash(slot_index.to_bytes(4, "big") + k.to_bytes(1, "big") + bytes(farmer_index))
required_iters = calculate_iterations_quality(2**25, quality, k, difficulty, sp_hash)
if required_iters < sp_interval_iters:
wins[k] += 1
total_wins_in_slot += 1
win_percentage = {k: wins[k] / sum(wins.values()) for k in farmer_ks.keys()}
for k in farmer_ks.keys():
# Win rate is proportional to percentage of space
assert abs(win_percentage[k] - percentage_space[k]) < 0.01
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/server/test_dos.py | tests/core/server/test_dos.py | # flake8: noqa: F811, F401
import asyncio
import logging
import pytest
from aiohttp import ClientSession, ClientTimeout, ServerDisconnectedError, WSCloseCode, WSMessage, WSMsgType
from flax.full_node.full_node_api import FullNodeAPI
from flax.protocols import full_node_protocol
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.protocols.shared_protocol import Handshake
from flax.server.outbound_message import make_msg, Message
from flax.server.rate_limits import RateLimiter
from flax.server.ws_connection import WSFlaxConnection
from flax.types.peer_info import PeerInfo
from flax.util.errors import Err
from flax.util.ints import uint16, uint64
from flax.simulator.time_out_assert import time_out_assert
log = logging.getLogger(__name__)
async def get_block_path(full_node: FullNodeAPI):
blocks_list = [await full_node.full_node.blockchain.get_full_peak()]
assert blocks_list[0] is not None
while blocks_list[0].height != 0:
b = await full_node.full_node.block_store.get_full_block(blocks_list[0].prev_header_hash)
assert b is not None
blocks_list.insert(0, b)
return blocks_list
class FakeRateLimiter:
def process_msg_and_check(self, msg, capa, capb):
return True
class TestDos:
@pytest.mark.asyncio
async def test_large_message_disconnect_and_ban(self, setup_two_nodes_fixture, self_hostname):
nodes, _, _ = setup_two_nodes_fixture
server_1 = nodes[0].full_node.server
server_2 = nodes[1].full_node.server
# Use the server_2 ssl information to connect to server_1, and send a huge message
timeout = ClientTimeout(total=10)
session = ClientSession(timeout=timeout)
url = f"wss://{self_hostname}:{server_1._port}/ws"
ssl_context = server_2.ssl_client_context
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=100 * 1024 * 1024
)
assert not ws.closed
await ws.close()
assert ws.closed
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=100 * 1024 * 1024
)
assert not ws.closed
large_msg: bytes = bytes([0] * (60 * 1024 * 1024))
await ws.send_bytes(large_msg)
response: WSMessage = await ws.receive()
print(response)
assert response.type == WSMsgType.CLOSE
assert response.data == WSCloseCode.MESSAGE_TOO_BIG
await ws.close()
# Now test that the ban is active
await asyncio.sleep(5)
assert ws.closed
try:
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=100 * 1024 * 1024
)
response: WSMessage = await ws.receive()
assert response.type == WSMsgType.CLOSE
except ServerDisconnectedError:
pass
await session.close()
@pytest.mark.asyncio
async def test_bad_handshake_and_ban(self, setup_two_nodes_fixture, self_hostname):
nodes, _, _ = setup_two_nodes_fixture
server_1 = nodes[0].full_node.server
server_2 = nodes[1].full_node.server
server_1.invalid_protocol_ban_seconds = 10
# Use the server_2 ssl information to connect to server_1, and send a huge message
timeout = ClientTimeout(total=10)
session = ClientSession(timeout=timeout)
url = f"wss://{self_hostname}:{server_1._port}/ws"
ssl_context = server_2.ssl_client_context
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=100 * 1024 * 1024
)
await ws.send_bytes(bytes([1] * 1024))
response: WSMessage = await ws.receive()
print(response)
assert response.type == WSMsgType.CLOSE
assert response.data == WSCloseCode.PROTOCOL_ERROR
await ws.close()
# Now test that the ban is active
await asyncio.sleep(5)
assert ws.closed
try:
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=100 * 1024 * 1024
)
response: WSMessage = await ws.receive()
assert response.type == WSMsgType.CLOSE
except ServerDisconnectedError:
pass
await asyncio.sleep(6)
# Ban expired
await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=100 * 1024 * 1024
)
await session.close()
@pytest.mark.asyncio
async def test_invalid_protocol_handshake(self, setup_two_nodes_fixture, self_hostname):
nodes, _, _ = setup_two_nodes_fixture
server_1 = nodes[0].full_node.server
server_2 = nodes[1].full_node.server
server_1.invalid_protocol_ban_seconds = 10
# Use the server_2 ssl information to connect to server_1
timeout = ClientTimeout(total=10)
session = ClientSession(timeout=timeout)
url = f"wss://{self_hostname}:{server_1._port}/ws"
ssl_context = server_2.ssl_client_context
ws = await session.ws_connect(
url, autoclose=True, autoping=True, heartbeat=60, ssl=ssl_context, max_msg_size=100 * 1024 * 1024
)
# Construct an otherwise valid handshake message
handshake: Handshake = Handshake("test", "0.0.32", "1.0.0.0", 3456, 1, [(1, "1")])
outbound_handshake: Message = Message(2, None, bytes(handshake)) # 2 is an invalid ProtocolType
await ws.send_bytes(bytes(outbound_handshake))
response: WSMessage = await ws.receive()
print(response)
assert response.type == WSMsgType.CLOSE
assert response.data == WSCloseCode.PROTOCOL_ERROR
assert response.extra == str(int(Err.INVALID_HANDSHAKE.value)) # We want INVALID_HANDSHAKE and not UNKNOWN
await ws.close()
await session.close()
await asyncio.sleep(1) # give some time for cleanup to work
@pytest.mark.asyncio
async def test_spam_tx(self, setup_two_nodes_fixture, self_hostname):
nodes, _, _ = setup_two_nodes_fixture
full_node_1, full_node_2 = nodes
server_1 = nodes[0].full_node.server
server_2 = nodes[1].full_node.server
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
assert len(server_1.all_connections) == 1
ws_con: WSFlaxConnection = list(server_1.all_connections.values())[0]
ws_con_2: WSFlaxConnection = list(server_2.all_connections.values())[0]
ws_con.peer_host = "1.2.3.4"
ws_con_2.peer_host = "1.2.3.4"
new_tx_message = make_msg(
ProtocolMessageTypes.new_transaction,
full_node_protocol.NewTransaction(bytes([9] * 32), uint64(0), uint64(0)),
)
for i in range(4000):
await ws_con._send_message(new_tx_message)
await asyncio.sleep(1)
assert not ws_con.closed
# Tests outbound rate limiting, we will not send too much data
for i in range(2000):
await ws_con._send_message(new_tx_message)
await asyncio.sleep(1)
assert not ws_con.closed
# Remove outbound rate limiter to test inbound limits
ws_con.outbound_rate_limiter = RateLimiter(incoming=True, percentage_of_limit=10000)
with pytest.raises(ConnectionResetError):
for i in range(6000):
await ws_con._send_message(new_tx_message)
await asyncio.sleep(0)
await asyncio.sleep(1)
def is_closed():
return ws_con.closed
await time_out_assert(15, is_closed)
assert ws_con.closed
def is_banned():
return "1.2.3.4" in server_2.banned_peers
await time_out_assert(15, is_banned)
@pytest.mark.asyncio
async def test_spam_message_non_tx(self, setup_two_nodes_fixture, self_hostname):
nodes, _, _ = setup_two_nodes_fixture
full_node_1, full_node_2 = nodes
server_1 = nodes[0].full_node.server
server_2 = nodes[1].full_node.server
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
assert len(server_1.all_connections) == 1
ws_con: WSFlaxConnection = list(server_1.all_connections.values())[0]
ws_con_2: WSFlaxConnection = list(server_2.all_connections.values())[0]
ws_con.peer_host = "1.2.3.4"
ws_con_2.peer_host = "1.2.3.4"
def is_closed():
return ws_con.closed
new_message = make_msg(
ProtocolMessageTypes.request_mempool_transactions,
full_node_protocol.RequestMempoolTransactions(bytes([])),
)
for i in range(2):
await ws_con._send_message(new_message)
await asyncio.sleep(1)
assert not ws_con.closed
# Tests outbound rate limiting, we will not send too much data
for i in range(10):
await ws_con._send_message(new_message)
await asyncio.sleep(1)
assert not ws_con.closed
# Remove outbound rate limiter to test inbound limits
ws_con.outbound_rate_limiter = RateLimiter(incoming=True, percentage_of_limit=10000)
for i in range(6):
await ws_con._send_message(new_message)
await time_out_assert(15, is_closed)
# Banned
def is_banned():
return "1.2.3.4" in server_2.banned_peers
await time_out_assert(15, is_banned)
@pytest.mark.asyncio
async def test_spam_message_too_large(self, setup_two_nodes_fixture, self_hostname):
nodes, _, _ = setup_two_nodes_fixture
full_node_1, full_node_2 = nodes
server_1 = nodes[0].full_node.server
server_2 = nodes[1].full_node.server
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
assert len(server_1.all_connections) == 1
ws_con: WSFlaxConnection = list(server_1.all_connections.values())[0]
ws_con_2: WSFlaxConnection = list(server_2.all_connections.values())[0]
ws_con.peer_host = "1.2.3.4"
ws_con_2.peer_host = "1.2.3.4"
def is_closed():
return ws_con.closed
new_message = make_msg(
ProtocolMessageTypes.request_mempool_transactions,
full_node_protocol.RequestMempoolTransactions(bytes([0] * 5 * 1024 * 1024)),
)
# Tests outbound rate limiting, we will not send big messages
await ws_con._send_message(new_message)
await asyncio.sleep(1)
assert not ws_con.closed
# Remove outbound rate limiter to test inbound limits
ws_con.outbound_rate_limiter = FakeRateLimiter()
await ws_con._send_message(new_message)
await time_out_assert(15, is_closed)
# Banned
def is_banned():
return "1.2.3.4" in server_2.banned_peers
await time_out_assert(15, is_banned)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/server/test_rate_limits.py | tests/core/server/test_rate_limits.py | import asyncio
from typing import List
import pytest
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.protocols.shared_protocol import Capability
from flax.server.outbound_message import make_msg
from flax.server.rate_limit_numbers import get_rate_limits_to_use, rate_limits as rl_numbers, compose_rate_limits
from flax.server.rate_limits import RateLimiter
from flax.server.server import FlaxServer
from flax.server.ws_connection import WSFlaxConnection
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16
from tests.conftest import node_with_params
from tests.setup_nodes import test_constants
constants = test_constants
rl_v2 = [Capability.BASE, Capability.BLOCK_HEADERS, Capability.RATE_LIMITS_V2]
rl_v1 = [Capability.BASE]
node_with_params_b = node_with_params
test_different_versions_results: List[int] = []
class TestRateLimits:
@pytest.mark.asyncio
async def test_get_rate_limits_to_use(self):
assert get_rate_limits_to_use(rl_v2, rl_v2) != get_rate_limits_to_use(rl_v2, rl_v1)
assert get_rate_limits_to_use(rl_v1, rl_v1) == get_rate_limits_to_use(rl_v2, rl_v1)
assert get_rate_limits_to_use(rl_v1, rl_v1) == get_rate_limits_to_use(rl_v1, rl_v2)
@pytest.mark.asyncio
async def test_too_many_messages(self):
# Too many messages
r = RateLimiter(incoming=True)
new_tx_message = make_msg(ProtocolMessageTypes.new_transaction, bytes([1] * 40))
for i in range(4999):
assert r.process_msg_and_check(new_tx_message, rl_v2, rl_v2)
saw_disconnect = False
for i in range(4999):
response = r.process_msg_and_check(new_tx_message, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
# Non-tx message
r = RateLimiter(incoming=True)
new_peak_message = make_msg(ProtocolMessageTypes.new_peak, bytes([1] * 40))
for i in range(200):
assert r.process_msg_and_check(new_peak_message, rl_v2, rl_v2)
saw_disconnect = False
for i in range(200):
response = r.process_msg_and_check(new_peak_message, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
@pytest.mark.asyncio
async def test_large_message(self):
# Large tx
small_tx_message = make_msg(ProtocolMessageTypes.respond_transaction, bytes([1] * 500 * 1024))
large_tx_message = make_msg(ProtocolMessageTypes.new_transaction, bytes([1] * 3 * 1024 * 1024))
r = RateLimiter(incoming=True)
assert r.process_msg_and_check(small_tx_message, rl_v2, rl_v2)
assert not r.process_msg_and_check(large_tx_message, rl_v2, rl_v2)
small_vdf_message = make_msg(ProtocolMessageTypes.respond_signage_point, bytes([1] * 5 * 1024))
large_vdf_message = make_msg(ProtocolMessageTypes.respond_signage_point, bytes([1] * 600 * 1024))
r = RateLimiter(incoming=True)
assert r.process_msg_and_check(small_vdf_message, rl_v2, rl_v2)
assert r.process_msg_and_check(small_vdf_message, rl_v2, rl_v2)
assert not r.process_msg_and_check(large_vdf_message, rl_v2, rl_v2)
@pytest.mark.asyncio
async def test_too_much_data(self):
# Too much data
r = RateLimiter(incoming=True)
tx_message = make_msg(ProtocolMessageTypes.respond_transaction, bytes([1] * 500 * 1024))
for i in range(40):
assert r.process_msg_and_check(tx_message, rl_v2, rl_v2)
saw_disconnect = False
for i in range(300):
response = r.process_msg_and_check(tx_message, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
r = RateLimiter(incoming=True)
block_message = make_msg(ProtocolMessageTypes.respond_block, bytes([1] * 1024 * 1024))
for i in range(10):
assert r.process_msg_and_check(block_message, rl_v2, rl_v2)
saw_disconnect = False
for i in range(40):
response = r.process_msg_and_check(block_message, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
@pytest.mark.asyncio
async def test_non_tx_aggregate_limits(self):
# Frequency limits
r = RateLimiter(incoming=True)
message_1 = make_msg(ProtocolMessageTypes.coin_state_update, bytes([1] * 32))
message_2 = make_msg(ProtocolMessageTypes.request_blocks, bytes([1] * 64))
message_3 = make_msg(ProtocolMessageTypes.plot_sync_start, bytes([1] * 64))
for i in range(500):
assert r.process_msg_and_check(message_1, rl_v2, rl_v2)
for i in range(500):
assert r.process_msg_and_check(message_2, rl_v2, rl_v2)
saw_disconnect = False
for i in range(500):
response = r.process_msg_and_check(message_3, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
# Size limits
r = RateLimiter(incoming=True)
message_4 = make_msg(ProtocolMessageTypes.respond_proof_of_weight, bytes([1] * 49 * 1024 * 1024))
message_5 = make_msg(ProtocolMessageTypes.respond_blocks, bytes([1] * 49 * 1024 * 1024))
for i in range(2):
assert r.process_msg_and_check(message_4, rl_v2, rl_v2)
saw_disconnect = False
for i in range(2):
response = r.process_msg_and_check(message_5, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
@pytest.mark.asyncio
async def test_periodic_reset(self):
r = RateLimiter(True, 5)
tx_message = make_msg(ProtocolMessageTypes.respond_transaction, bytes([1] * 500 * 1024))
for i in range(10):
assert r.process_msg_and_check(tx_message, rl_v2, rl_v2)
saw_disconnect = False
for i in range(300):
response = r.process_msg_and_check(tx_message, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
assert not r.process_msg_and_check(tx_message, rl_v2, rl_v2)
await asyncio.sleep(6)
assert r.process_msg_and_check(tx_message, rl_v2, rl_v2)
# Counts reset also
r = RateLimiter(True, 5)
new_tx_message = make_msg(ProtocolMessageTypes.new_transaction, bytes([1] * 40))
for i in range(4999):
assert r.process_msg_and_check(new_tx_message, rl_v2, rl_v2)
saw_disconnect = False
for i in range(4999):
response = r.process_msg_and_check(new_tx_message, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
await asyncio.sleep(6)
assert r.process_msg_and_check(new_tx_message, rl_v2, rl_v2)
@pytest.mark.asyncio
async def test_percentage_limits(self):
r = RateLimiter(True, 60, 40)
new_peak_message = make_msg(ProtocolMessageTypes.new_peak, bytes([1] * 40))
for i in range(50):
assert r.process_msg_and_check(new_peak_message, rl_v2, rl_v2)
saw_disconnect = False
for i in range(50):
response = r.process_msg_and_check(new_peak_message, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
r = RateLimiter(True, 60, 40)
block_message = make_msg(ProtocolMessageTypes.respond_block, bytes([1] * 1024 * 1024))
for i in range(5):
assert r.process_msg_and_check(block_message, rl_v2, rl_v2)
saw_disconnect = False
for i in range(5):
response = r.process_msg_and_check(block_message, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
# Aggregate percentage limit count
r = RateLimiter(True, 60, 40)
message_1 = make_msg(ProtocolMessageTypes.coin_state_update, bytes([1] * 5))
message_2 = make_msg(ProtocolMessageTypes.request_blocks, bytes([1] * 32))
message_3 = make_msg(ProtocolMessageTypes.plot_sync_start, bytes([1] * 32))
for i in range(180):
assert r.process_msg_and_check(message_1, rl_v2, rl_v2)
for i in range(180):
assert r.process_msg_and_check(message_2, rl_v2, rl_v2)
saw_disconnect = False
for i in range(100):
response = r.process_msg_and_check(message_3, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
# Aggregate percentage limit max total size
r = RateLimiter(True, 60, 40)
message_4 = make_msg(ProtocolMessageTypes.respond_proof_of_weight, bytes([1] * 18 * 1024 * 1024))
message_5 = make_msg(ProtocolMessageTypes.respond_blocks, bytes([1] * 24 * 1024 * 1024))
for i in range(2):
assert r.process_msg_and_check(message_4, rl_v2, rl_v2)
saw_disconnect = False
for i in range(2):
response = r.process_msg_and_check(message_5, rl_v2, rl_v2)
if not response:
saw_disconnect = True
assert saw_disconnect
@pytest.mark.asyncio
async def test_too_many_outgoing_messages(self):
# Too many messages
r = RateLimiter(incoming=False)
new_peers_message = make_msg(ProtocolMessageTypes.respond_peers, bytes([1]))
non_tx_freq = get_rate_limits_to_use(rl_v2, rl_v2)["non_tx_freq"]
passed = 0
blocked = 0
for i in range(non_tx_freq):
if r.process_msg_and_check(new_peers_message, rl_v2, rl_v2):
passed += 1
else:
blocked += 1
assert passed == 10
assert blocked == non_tx_freq - passed
# ensure that *another* message type is not blocked because of this
new_signatures_message = make_msg(ProtocolMessageTypes.respond_signatures, bytes([1]))
assert r.process_msg_and_check(new_signatures_message, rl_v2, rl_v2)
@pytest.mark.asyncio
async def test_too_many_incoming_messages(self):
# Too many messages
r = RateLimiter(incoming=True)
new_peers_message = make_msg(ProtocolMessageTypes.respond_peers, bytes([1]))
non_tx_freq = get_rate_limits_to_use(rl_v2, rl_v2)["non_tx_freq"]
passed = 0
blocked = 0
for i in range(non_tx_freq):
if r.process_msg_and_check(new_peers_message, rl_v2, rl_v2):
passed += 1
else:
blocked += 1
assert passed == 10
assert blocked == non_tx_freq - passed
# ensure that other message types *are* blocked because of this
new_signatures_message = make_msg(ProtocolMessageTypes.respond_signatures, bytes([1]))
assert not r.process_msg_and_check(new_signatures_message, rl_v2, rl_v2)
@pytest.mark.parametrize(
"node_with_params",
[
dict(
disable_capabilities=[Capability.BLOCK_HEADERS, Capability.RATE_LIMITS_V2],
),
dict(
disable_capabilities=[],
),
],
indirect=True,
)
@pytest.mark.parametrize(
"node_with_params_b",
[
dict(
disable_capabilities=[Capability.BLOCK_HEADERS, Capability.RATE_LIMITS_V2],
),
dict(
disable_capabilities=[],
),
],
indirect=True,
)
@pytest.mark.asyncio
async def test_different_versions(self, node_with_params, node_with_params_b, self_hostname):
node_a = node_with_params
node_b = node_with_params_b
full_node_server_a: FlaxServer = node_a.full_node.server
full_node_server_b: FlaxServer = node_b.full_node.server
await full_node_server_b.start_client(PeerInfo(self_hostname, uint16(full_node_server_a._port)), None)
assert len(full_node_server_b.get_connections()) == 1
assert len(full_node_server_a.get_connections()) == 1
a_con: WSFlaxConnection = full_node_server_a.get_connections()[0]
b_con: WSFlaxConnection = full_node_server_b.get_connections()[0]
print(a_con.local_capabilities, a_con.peer_capabilities)
print(b_con.local_capabilities, b_con.peer_capabilities)
# The two nodes will use the same rate limits even if their versions are different
assert get_rate_limits_to_use(a_con.local_capabilities, a_con.peer_capabilities) == get_rate_limits_to_use(
b_con.local_capabilities, b_con.peer_capabilities
)
# The following code checks whether all of the runs resulted in the same number of items in "rate_limits_tx",
# which would mean the same rate limits are always used. This should not happen, since two nodes with V2
# will use V2.
total_tx_msg_count = len(
get_rate_limits_to_use(a_con.local_capabilities, a_con.peer_capabilities)["rate_limits_tx"]
)
test_different_versions_results.append(total_tx_msg_count)
if len(test_different_versions_results) >= 4:
assert len(set(test_different_versions_results)) >= 2
@pytest.mark.asyncio
async def test_compose(self):
rl_1 = rl_numbers[1]
rl_2 = rl_numbers[2]
assert ProtocolMessageTypes.respond_children in rl_1["rate_limits_other"]
assert ProtocolMessageTypes.respond_children not in rl_1["rate_limits_tx"]
assert ProtocolMessageTypes.respond_children not in rl_2["rate_limits_other"]
assert ProtocolMessageTypes.respond_children in rl_2["rate_limits_tx"]
assert ProtocolMessageTypes.request_block in rl_1["rate_limits_other"]
assert ProtocolMessageTypes.request_block not in rl_1["rate_limits_tx"]
assert ProtocolMessageTypes.request_block not in rl_2["rate_limits_other"]
assert ProtocolMessageTypes.request_block not in rl_2["rate_limits_tx"]
comps = compose_rate_limits(rl_1, rl_2)
# v2 limits are used if present
assert ProtocolMessageTypes.respond_children not in comps["rate_limits_other"]
assert ProtocolMessageTypes.respond_children in comps["rate_limits_tx"]
# Otherwise, fall back to v1
assert ProtocolMessageTypes.request_block in rl_1["rate_limits_other"]
assert ProtocolMessageTypes.request_block not in rl_1["rate_limits_tx"]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/server/config.py | tests/core/server/config.py | from __future__ import annotations
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/server/__init__.py | tests/core/server/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/server/test_upnp.py | tests/core/server/test_upnp.py | from __future__ import annotations
def test_miniupnpc_imports_successfully() -> None:
import miniupnpc
# use it to avoid all related warnings
assert miniupnpc is not None
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_generator_tools.py | tests/core/full_node/test_generator_tools.py | from __future__ import annotations
from typing import List
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.spend_bundle_conditions import Spend, SpendBundleConditions
from flax.util.generator_tools import tx_removals_and_additions
from flax.util.hash import std_hash
from flax.util.ints import uint32, uint64
coin_ids = [std_hash(i.to_bytes(4, "big")) for i in range(10)]
phs = [std_hash(i.to_bytes(4, "big")) for i in range(10)]
spends: List[Spend] = [
Spend(
coin_ids[0],
phs[0],
None,
uint64(5),
[
(phs[2], uint64(123), b""),
(phs[3], uint64(0), b"1" * 300),
(phs[4], uint64(0), b"1" * 300),
],
[],
),
Spend(
coin_ids[1],
phs[0],
None,
uint64(2),
[
(phs[5], uint64(123), b""),
(phs[6], uint64(0), b"1" * 300),
(phs[7], uint64(0), b"1" * 300),
],
[],
),
]
def test_tx_removals_and_additions() -> None:
conditions = SpendBundleConditions(spends, uint64(0), uint32(0), uint64(0), [], uint64(0))
expected_rems = [coin_ids[0], coin_ids[1]]
expected_additions = []
for spend in spends:
for puzzle_hash, am, _ in spend.create_coin:
expected_additions.append(Coin(bytes32(spend.coin_id), bytes32(puzzle_hash), uint64(am)))
rems, adds = tx_removals_and_additions(conditions)
assert rems == expected_rems
assert adds == expected_additions
def test_empty_conditions() -> None:
assert tx_removals_and_additions(None) == ([], [])
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_transactions.py | tests/core/full_node/test_transactions.py | from __future__ import annotations
import asyncio
from secrets import token_bytes
from typing import Optional
import pytest
from flax.consensus.block_record import BlockRecord
from flax.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from flax.full_node.full_node_api import FullNodeAPI
from flax.protocols import full_node_protocol
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.simulator.time_out_assert import time_out_assert
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16, uint32
class TestTransactions:
@pytest.mark.asyncio
async def test_wallet_coinbase(self, wallet_node_sim_and_wallet, self_hostname):
num_blocks = 5
full_nodes, wallets, _ = wallet_node_sim_and_wallet
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
for i in range(num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
# funds += calculate_base_farmer_reward(0)
await asyncio.sleep(2)
print(await wallet.get_confirmed_balance(), funds)
await time_out_assert(20, wallet.get_confirmed_balance, funds)
@pytest.mark.asyncio
async def test_tx_propagation(self, three_nodes_two_wallets, self_hostname):
num_blocks = 5
full_nodes, wallets, _ = three_nodes_two_wallets
wallet_0, wallet_server_0 = wallets[0]
wallet_1, wallet_server_1 = wallets[1]
full_node_api_0 = full_nodes[0]
server_0 = full_node_api_0.server
full_node_api_1 = full_nodes[1]
server_1 = full_node_api_1.server
full_node_api_2 = full_nodes[2]
server_2 = full_node_api_2.server
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
ph1 = await wallet_1.wallet_state_manager.main_wallet.get_new_puzzlehash()
#
# wallet0 <-> sever0 <-> server1 <-> server2 <-> wallet1
#
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
await server_0.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
await server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
for i in range(num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(20, wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
async def peak_height(fna: FullNodeAPI):
peak: Optional[BlockRecord] = fna.full_node.blockchain.get_peak()
if peak is None:
return -1
peak_height = peak.height
return peak_height
await time_out_assert(20, peak_height, num_blocks, full_node_api_1)
await time_out_assert(20, peak_height, num_blocks, full_node_api_2)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(10, ph1, 0)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
# Farm another block
for i in range(1, 8):
await full_node_api_1.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
funds = sum(
[
calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
for i in range(1, num_blocks + 1)
]
)
print(f"Funds: {funds}")
await time_out_assert(
10,
wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance,
(funds - 10),
)
await time_out_assert(20, wallet_1.wallet_state_manager.main_wallet.get_confirmed_balance, 10)
@pytest.mark.asyncio
async def test_mempool_tx_sync(self, three_nodes_two_wallets, self_hostname):
num_blocks = 5
full_nodes, wallets, _ = three_nodes_two_wallets
wallet_0, wallet_server_0 = wallets[0]
full_node_api_0 = full_nodes[0]
server_0 = full_node_api_0.server
full_node_api_1 = full_nodes[1]
server_1 = full_node_api_1.server
full_node_api_2 = full_nodes[2]
server_2 = full_node_api_2.server
ph = await wallet_0.wallet_state_manager.main_wallet.get_new_puzzlehash()
# wallet0 <-> sever0 <-> server1
await wallet_server_0.start_client(PeerInfo(self_hostname, uint16(server_0._port)), None)
await server_0.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
for i in range(num_blocks):
await full_node_api_0.farm_new_transaction_block(FarmNewBlockProtocol(ph))
all_blocks = await full_node_api_0.get_all_full_blocks()
for block in all_blocks:
await full_node_api_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
funds = sum(
[calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]
)
await time_out_assert(20, wallet_0.wallet_state_manager.main_wallet.get_confirmed_balance, funds)
tx = await wallet_0.wallet_state_manager.main_wallet.generate_signed_transaction(10, token_bytes(), 0)
await wallet_0.wallet_state_manager.main_wallet.push_transaction(tx)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
None,
tx.name,
)
# make a final connection.
# wallet0 <-> sever0 <-> server1 <-> server2
await server_1.start_client(PeerInfo(self_hostname, uint16(server_2._port)), None)
await time_out_assert(
10,
full_node_api_0.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_1.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
await time_out_assert(
10,
full_node_api_2.full_node.mempool_manager.get_spendbundle,
tx.spend_bundle,
tx.name,
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_peer_store_resolver.py | tests/core/full_node/test_peer_store_resolver.py | from __future__ import annotations
import os
from pathlib import Path
from typing import Dict
from flax.server.peer_store_resolver import PeerStoreResolver
class TestPeerStoreResolver:
# use tmp_path pytest fixture to create a temporary directory
def test_resolve_unmodified_legacy_peer_db_path(self, tmp_path: Path):
"""
When the config only has the legacy "peer_db_path" key set, the resolver should
derive the peers_file_path from the legacy db's path.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {"peer_db_path": "db/peer_table_node.sqlite"}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="mainnet",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: peers.dat path has the same directory as the legacy db
assert resolver.peers_file_path == root_path / Path("db/peers.dat")
# Expect: the config is updated with the new value
assert config["peers_file_path"] == os.fspath(Path("db/peers.dat"))
# Expect: the config retains the legacy peer_db_path value
assert config["peer_db_path"] == "db/peer_table_node.sqlite"
# use tmp_path pytest fixture to create a temporary directory
def test_resolve_modified_legacy_peer_db_path(self, tmp_path: Path):
"""
When the config has a user-modified value for the legacy "peer_db_path" key, the
resolver should derive the peers_file_path from the legacy db's path.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {"peer_db_path": "some/modified/db/path/peer_table_node.sqlite"}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="mainnet",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: peers.dat path has the same directory as the legacy db
assert resolver.peers_file_path == root_path / Path("some/modified/db/path/peers.dat")
# Expect: the config is updated with the new value
assert config["peers_file_path"] == os.fspath(Path("some/modified/db/path/peers.dat"))
# Expect: the config retains the legacy peer_db_path value
assert config["peer_db_path"] == "some/modified/db/path/peer_table_node.sqlite"
# use tmp_path pytest fixture to create a temporary directory
def test_resolve_default_peers_file_path(self, tmp_path: Path):
"""
When the config has a value for the peers_file_path key, the resolver should
use that value.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {"peers_file_path": "db/peers.dat"}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="mainnet",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: peers.dat path is the same as the location specified in the config
assert resolver.peers_file_path == root_path / Path("db/peers.dat")
# Expect: the config is updated with the new value
assert config["peers_file_path"] == "db/peers.dat"
# Expect: the config doesn't add a legacy peer_db_path value
assert config.get("peer_db_path") is None
# use tmp_path pytest fixture to create a temporary directory
def test_resolve_modified_peers_file_path(self, tmp_path: Path):
"""
When the config has a modified value for the peers_file_path key, the resolver
should use that value.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {"peers_file_path": "some/modified/db/path/peers.dat"}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="mainnet",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: peers.dat path is the same as the location specified in the config
assert resolver.peers_file_path == root_path / Path("some/modified/db/path/peers.dat")
# Expect: the config is updated with the new value
assert config["peers_file_path"] == "some/modified/db/path/peers.dat"
# Expect: the config doesn't add a legacy peer_db_path value
assert config.get("peer_db_path") is None
def test_resolve_both_peers_file_path_and_legacy_peer_db_path_exist(self, tmp_path: Path):
"""
When the config has values for both the legacy peer_db_path and peer_files_path, the
peers_file_path value should take precedence.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {
"peer_db_path": "db/peer_table_node.sqlite",
"peers_file_path": "db/peers.dat",
}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="mainnet",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: peers.dat path is the same as the location specified in the config
assert resolver.peers_file_path == root_path / Path("db/peers.dat")
# Expect: the config is updated with the new value
assert config["peers_file_path"] == "db/peers.dat"
# Expect: the config retains the legacy peer_db_path value
assert config["peer_db_path"] == "db/peer_table_node.sqlite"
def test_resolve_modified_both_peers_file_path_and_legacy_peer_db_path_exist(self, tmp_path: Path):
"""
When the config has modified values for both the peers_file_path and legacy peer_db_path,
the resolver should use the peers_file_path value.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {
"peer_db_path": "some/modified/db/path/peer_table_node.sqlite",
"peers_file_path": "some/modified/db/path/peers.dat",
}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="mainnet",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: peers.dat path is the same as the location specified in the config
assert resolver.peers_file_path == root_path / Path("some/modified/db/path/peers.dat")
# Expect: the config is updated with the new value
assert config["peers_file_path"] == "some/modified/db/path/peers.dat"
# Expect: the config retains the legacy peer_db_path value
assert config["peer_db_path"] == "some/modified/db/path/peer_table_node.sqlite"
# use tmp_path pytest fixture to create a temporary directory
def test_resolve_missing_keys(self, tmp_path: Path):
"""
When the config is missing both peer_db_path and peers_file_path keys, the resolver
should use the default value for peers_file_path.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="mainnet",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: peers.dat Path is set to the default location
assert resolver.peers_file_path == root_path / Path("db/peers.dat")
# Expect: the config is updated with the new value
assert config["peers_file_path"] == os.fspath(Path("db/peers.dat"))
# Expect: the config doesn't add a legacy peer_db_path value
assert config.get("peer_db_path") is None
# use tmp_path pytest fixture to create a temporary directory
def test_resolve_with_testnet(self, tmp_path: Path):
"""
When the selected network is testnet, the resolved path's filename should
include 'testnet' in the name.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="testnet123",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: resolved file path has testnet in the name
assert resolver.peers_file_path == root_path / Path("db/peers_testnet123.dat")
# Expect: the config is updated with the new value
assert config["peers_file_path"] == os.fspath(Path("db/peers_testnet123.dat"))
# Expect: the config doesn't add a legacy peer_db_path value
assert config.get("peer_db_path") is None
# use tmp_path pytest fixture to create a temporary directory
def test_resolve_default_legacy_db_path(self, tmp_path: Path):
"""
When the config has a value for the peer_db_path key, the resolver should
use that value.
"""
root_path: Path = tmp_path
config: Dict[str, str] = {"peer_db_path": "db/peer_table_node.sqlite"}
resolver: PeerStoreResolver = PeerStoreResolver(
root_path,
config,
selected_network="mainnet",
peers_file_path_key="peers_file_path",
legacy_peer_db_path_key="peer_db_path",
default_peers_file_path="db/peers.dat",
)
# Expect: peers.dat path has the same directory as the legacy db
assert resolver.legacy_peer_db_path == root_path / Path("db/peer_table_node.sqlite")
# Expect: the config is updated with the new value
assert config["peer_db_path"] == "db/peer_table_node.sqlite"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_full_node.py | tests/core/full_node/test_full_node.py | import asyncio
import dataclasses
import random
import time
from secrets import token_bytes
from typing import Dict, Optional, List
import pytest
from blspy import G2Element, AugSchemeMPL, PrivateKey
from clvm.casts import int_to_bytes
from flax.consensus.pot_iterations import is_overflow_block
from flax.full_node.bundle_tools import detect_potential_template_generator
from flax.full_node.full_node_api import FullNodeAPI
from flax.full_node.signage_point import SignagePoint
from flax.protocols import full_node_protocol as fnp, full_node_protocol, wallet_protocol
from flax.protocols import timelord_protocol
from flax.protocols.full_node_protocol import RespondTransaction
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.protocols.wallet_protocol import SendTransaction, TransactionAck
from flax.server.address_manager import AddressManager
from flax.server.outbound_message import Message, NodeType
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.types.blockchain_format.classgroup import ClassgroupElement
from flax.types.blockchain_format.program import Program, SerializedProgram
from flax.types.blockchain_format.vdf import CompressibleVDFField, VDFProof
from flax.types.coin_spend import CoinSpend
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.condition_with_args import ConditionWithArgs
from flax.types.full_block import FullBlock
from flax.types.mempool_inclusion_status import MempoolInclusionStatus
from flax.types.peer_info import PeerInfo, TimestampedPeerInfo
from flax.types.spend_bundle import SpendBundle
from flax.types.unfinished_block import UnfinishedBlock
from flax.util.errors import Err
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint16, uint32, uint64
from flax.util.recursive_replace import recursive_replace
from flax.util.vdf_prover import get_vdf_info_and_proof
from flax.util.errors import ConsensusError
from flax.wallet.transaction_record import TransactionRecord
from flax.simulator.block_tools import get_signage_point
from tests.blockchain.blockchain_test_utils import (
_validate_and_add_block,
_validate_and_add_block_no_error,
)
from tests.util.wallet_is_synced import wallet_is_synced
from tests.connection_utils import add_dummy_connection, connect_and_get_peer
from tests.core.full_node.stores.test_coin_store import get_future_reward_coins
from tests.core.full_node.test_mempool_performance import wallet_height_at_least
from tests.core.make_block_generator import make_spend_bundle
from tests.core.node_height import node_height_at_least
from tests.setup_nodes import test_constants
from flax.simulator.time_out_assert import time_out_assert, time_out_assert_custom_interval, time_out_messages
from flax.types.blockchain_format.foliage import Foliage, TransactionsInfo, FoliageTransactionBlock
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
async def new_transaction_not_requested(incoming, new_spend):
await asyncio.sleep(3)
while not incoming.empty():
response, peer = await incoming.get()
if (
response is not None
and isinstance(response, Message)
and response.type == ProtocolMessageTypes.request_transaction.value
):
request = full_node_protocol.RequestTransaction.from_bytes(response.data)
if request.transaction_id == new_spend.transaction_id:
return False
return True
async def new_transaction_requested(incoming, new_spend):
await asyncio.sleep(1)
while not incoming.empty():
response, peer = await incoming.get()
if (
response is not None
and isinstance(response, Message)
and response.type == ProtocolMessageTypes.request_transaction.value
):
request = full_node_protocol.RequestTransaction.from_bytes(response.data)
if request.transaction_id == new_spend.transaction_id:
return True
return False
async def get_block_path(full_node: FullNodeAPI):
blocks_list = [await full_node.full_node.blockchain.get_full_peak()]
assert blocks_list[0] is not None
while blocks_list[0].height != 0:
b = await full_node.full_node.block_store.get_full_block(blocks_list[0].prev_header_hash)
assert b is not None
blocks_list.insert(0, b)
return blocks_list
class TestFullNodeBlockCompression:
@pytest.mark.asyncio
@pytest.mark.parametrize("tx_size", [3000000000000])
async def test_block_compression(self, setup_two_nodes_and_wallet, empty_blockchain, tx_size, self_hostname):
nodes, wallets, bt = setup_two_nodes_and_wallet
server_1 = nodes[0].full_node.server
server_2 = nodes[1].full_node.server
server_3 = wallets[0][1]
full_node_1 = nodes[0]
full_node_2 = nodes[1]
wallet_node_1 = wallets[0][0]
wallet = wallet_node_1.wallet_state_manager.main_wallet
# Avoid retesting the slow reorg portion, not necessary more than once
test_reorgs = (
tx_size == 10000
and empty_blockchain.block_store.db_wrapper.db_version >= 2
and full_node_1.full_node.block_store.db_wrapper.db_version >= 2
and full_node_2.full_node.block_store.db_wrapper.db_version >= 2
)
_ = await connect_and_get_peer(server_1, server_2, self_hostname)
_ = await connect_and_get_peer(server_1, server_3, self_hostname)
ph = await wallet.get_new_puzzlehash()
for i in range(4):
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(30, wallet_height_at_least, True, wallet_node_1, 4)
await time_out_assert(30, node_height_at_least, True, full_node_1, 4)
await time_out_assert(30, node_height_at_least, True, full_node_2, 4)
await time_out_assert(30, wallet_is_synced, True, wallet_node_1, full_node_1)
# Send a transaction to mempool
tr: TransactionRecord = await wallet.generate_signed_transaction(
tx_size,
ph,
)
await wallet.push_transaction(tx=tr)
await time_out_assert(
10,
full_node_2.full_node.mempool_manager.get_spendbundle,
tr.spend_bundle,
tr.name,
)
# Farm a block
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(30, node_height_at_least, True, full_node_1, 5)
await time_out_assert(30, node_height_at_least, True, full_node_2, 5)
await time_out_assert(30, wallet_height_at_least, True, wallet_node_1, 5)
await time_out_assert(30, wallet_is_synced, True, wallet_node_1, full_node_1)
async def check_transaction_confirmed(transaction) -> bool:
tx = await wallet_node_1.wallet_state_manager.get_transaction(transaction.name)
return tx.confirmed
await time_out_assert(30, check_transaction_confirmed, True, tr)
# Confirm generator is not compressed
program: Optional[SerializedProgram] = (await full_node_1.get_all_full_blocks())[-1].transactions_generator
assert program is not None
assert detect_potential_template_generator(uint32(5), program) is not None
assert len((await full_node_1.get_all_full_blocks())[-1].transactions_generator_ref_list) == 0
# Send another tx
tr: TransactionRecord = await wallet.generate_signed_transaction(
20000,
ph,
)
await wallet.push_transaction(tx=tr)
await time_out_assert(
10,
full_node_2.full_node.mempool_manager.get_spendbundle,
tr.spend_bundle,
tr.name,
)
# Farm a block
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(10, node_height_at_least, True, full_node_1, 6)
await time_out_assert(10, node_height_at_least, True, full_node_2, 6)
await time_out_assert(10, wallet_height_at_least, True, wallet_node_1, 6)
await time_out_assert(30, wallet_is_synced, True, wallet_node_1, full_node_1)
await time_out_assert(10, check_transaction_confirmed, True, tr)
# Confirm generator is compressed
program: Optional[SerializedProgram] = (await full_node_1.get_all_full_blocks())[-1].transactions_generator
assert program is not None
assert detect_potential_template_generator(uint32(6), program) is None
assert len((await full_node_1.get_all_full_blocks())[-1].transactions_generator_ref_list) > 0
# Farm two empty blocks
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(10, node_height_at_least, True, full_node_1, 8)
await time_out_assert(10, node_height_at_least, True, full_node_2, 8)
await time_out_assert(10, wallet_height_at_least, True, wallet_node_1, 8)
await time_out_assert(30, wallet_is_synced, True, wallet_node_1, full_node_1)
# Send another 2 tx
tr: TransactionRecord = await wallet.generate_signed_transaction(
30000,
ph,
)
await wallet.push_transaction(tx=tr)
await time_out_assert(
10,
full_node_2.full_node.mempool_manager.get_spendbundle,
tr.spend_bundle,
tr.name,
)
tr: TransactionRecord = await wallet.generate_signed_transaction(
40000,
ph,
)
await wallet.push_transaction(tx=tr)
await time_out_assert(
10,
full_node_2.full_node.mempool_manager.get_spendbundle,
tr.spend_bundle,
tr.name,
)
tr: TransactionRecord = await wallet.generate_signed_transaction(
50000,
ph,
)
await wallet.push_transaction(tx=tr)
await time_out_assert(
10,
full_node_2.full_node.mempool_manager.get_spendbundle,
tr.spend_bundle,
tr.name,
)
tr: TransactionRecord = await wallet.generate_signed_transaction(
3000000000000,
ph,
)
await wallet.push_transaction(tx=tr)
await time_out_assert(
10,
full_node_2.full_node.mempool_manager.get_spendbundle,
tr.spend_bundle,
tr.name,
)
# Farm a block
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(10, node_height_at_least, True, full_node_1, 9)
await time_out_assert(10, node_height_at_least, True, full_node_2, 9)
await time_out_assert(10, wallet_height_at_least, True, wallet_node_1, 9)
await time_out_assert(30, wallet_is_synced, True, wallet_node_1, full_node_1)
await time_out_assert(10, check_transaction_confirmed, True, tr)
# Confirm generator is compressed
program: Optional[SerializedProgram] = (await full_node_1.get_all_full_blocks())[-1].transactions_generator
assert program is not None
assert detect_potential_template_generator(uint32(9), program) is None
assert len((await full_node_1.get_all_full_blocks())[-1].transactions_generator_ref_list) > 0
# Creates a standard_transaction and an anyone-can-spend tx
tr: TransactionRecord = await wallet.generate_signed_transaction(
30000,
Program.to(1).get_tree_hash(),
)
extra_spend = SpendBundle(
[
CoinSpend(
next(coin for coin in tr.additions if coin.puzzle_hash == Program.to(1).get_tree_hash()),
Program.to(1),
Program.to([[51, ph, 30000]]),
)
],
G2Element(),
)
new_spend_bundle = SpendBundle.aggregate([tr.spend_bundle, extra_spend])
new_tr = dataclasses.replace(
tr,
spend_bundle=new_spend_bundle,
additions=new_spend_bundle.additions(),
removals=new_spend_bundle.removals(),
)
await wallet.push_transaction(tx=new_tr)
await time_out_assert(
10,
full_node_2.full_node.mempool_manager.get_spendbundle,
new_tr.spend_bundle,
new_tr.spend_bundle.name(),
)
# Farm a block
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(10, node_height_at_least, True, full_node_1, 10)
await time_out_assert(10, node_height_at_least, True, full_node_2, 10)
await time_out_assert(10, wallet_height_at_least, True, wallet_node_1, 10)
await time_out_assert(30, wallet_is_synced, True, wallet_node_1, full_node_1)
await time_out_assert(10, check_transaction_confirmed, True, new_tr)
# Confirm generator is not compressed, #CAT creation has a cat spend
all_blocks = await full_node_1.get_all_full_blocks()
program: Optional[SerializedProgram] = all_blocks[-1].transactions_generator
assert program is not None
assert len(all_blocks[-1].transactions_generator_ref_list) == 0
# Make a standard transaction and an anyone-can-spend transaction
tr: TransactionRecord = await wallet.generate_signed_transaction(
30000,
Program.to(1).get_tree_hash(),
)
extra_spend = SpendBundle(
[
CoinSpend(
next(coin for coin in tr.additions if coin.puzzle_hash == Program.to(1).get_tree_hash()),
Program.to(1),
Program.to([[51, ph, 30000]]),
)
],
G2Element(),
)
new_spend_bundle = SpendBundle.aggregate([tr.spend_bundle, extra_spend])
new_tr = dataclasses.replace(
tr,
spend_bundle=new_spend_bundle,
additions=new_spend_bundle.additions(),
removals=new_spend_bundle.removals(),
)
await wallet.push_transaction(tx=new_tr)
await time_out_assert(
10,
full_node_2.full_node.mempool_manager.get_spendbundle,
new_tr.spend_bundle,
new_tr.spend_bundle.name(),
)
# Farm a block
await full_node_1.farm_new_transaction_block(FarmNewBlockProtocol(ph))
await time_out_assert(10, node_height_at_least, True, full_node_1, 11)
await time_out_assert(10, node_height_at_least, True, full_node_2, 11)
await time_out_assert(10, wallet_height_at_least, True, wallet_node_1, 11)
await time_out_assert(30, wallet_is_synced, True, wallet_node_1, full_node_1)
# Confirm generator is not compressed
program: Optional[SerializedProgram] = (await full_node_1.get_all_full_blocks())[-1].transactions_generator
assert program is not None
assert detect_potential_template_generator(uint32(11), program) is not None
assert len((await full_node_1.get_all_full_blocks())[-1].transactions_generator_ref_list) == 0
height = full_node_1.full_node.blockchain.get_peak().height
blockchain = empty_blockchain
all_blocks: List[FullBlock] = await full_node_1.get_all_full_blocks()
assert height == len(all_blocks) - 1
assert full_node_1.full_node.full_node_store.previous_generator is not None
if test_reorgs:
reog_blocks = bt.get_consecutive_blocks(14)
for r in range(0, len(reog_blocks), 3):
for reorg_block in reog_blocks[:r]:
await _validate_and_add_block_no_error(blockchain, reorg_block)
for i in range(1, height):
for batch_size in range(1, height, 3):
results = await blockchain.pre_validate_blocks_multiprocessing(
all_blocks[:i], {}, batch_size, validate_signatures=False
)
assert results is not None
for result in results:
assert result.error is None
for r in range(0, len(all_blocks), 3):
for block in all_blocks[:r]:
await _validate_and_add_block_no_error(blockchain, block)
for i in range(1, height):
for batch_size in range(1, height, 3):
results = await blockchain.pre_validate_blocks_multiprocessing(
all_blocks[:i], {}, batch_size, validate_signatures=False
)
assert results is not None
for result in results:
assert result.error is None
# Test revert previous_generator
for block in reog_blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
assert full_node_1.full_node.full_node_store.previous_generator is None
class TestFullNodeProtocol:
@pytest.mark.asyncio
async def test_spendbundle_serialization(self):
sb: SpendBundle = make_spend_bundle(1)
protocol_message = RespondTransaction(sb)
assert bytes(sb) == bytes(protocol_message)
@pytest.mark.asyncio
async def test_inbound_connection_limit(self, setup_four_nodes, self_hostname):
nodes, _, _ = setup_four_nodes
server_1 = nodes[0].full_node.server
server_1.config["target_peer_count"] = 2
server_1.config["target_outbound_peer_count"] = 0
for i in range(1, 4):
full_node_i = nodes[i]
server_i = full_node_i.full_node.server
await server_i.start_client(PeerInfo(self_hostname, uint16(server_1._port)))
assert len(server_1.get_connections(NodeType.FULL_NODE)) == 2
@pytest.mark.asyncio
async def test_request_peers(self, wallet_nodes, self_hostname):
full_node_1, full_node_2, server_1, server_2, wallet_a, wallet_receiver, _ = wallet_nodes
full_node_2.full_node.full_node_peers.address_manager.make_private_subnets_valid()
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)))
async def have_msgs():
await full_node_2.full_node.full_node_peers.address_manager.add_to_new_table(
[TimestampedPeerInfo("127.0.0.1", uint16(1000), uint64(int(time.time())) - 1000)],
None,
)
msg_bytes = await full_node_2.full_node.full_node_peers.request_peers(PeerInfo("[::1]", server_2._port))
msg = fnp.RespondPeers.from_bytes(msg_bytes.data)
if msg is not None and not (len(msg.peer_list) == 1):
return False
peer = msg.peer_list[0]
return (peer.host == self_hostname or peer.host == "127.0.0.1") and peer.port == 1000
await time_out_assert_custom_interval(10, 1, have_msgs, True)
full_node_1.full_node.full_node_peers.address_manager = AddressManager()
@pytest.mark.asyncio
async def test_basic_chain(self, wallet_nodes, self_hostname):
full_node_1, full_node_2, server_1, server_2, wallet_a, wallet_receiver, bt = wallet_nodes
incoming_queue, _ = await add_dummy_connection(server_1, self_hostname, 12312)
expected_requests = 0
if await full_node_1.full_node.synced():
expected_requests = 1
await time_out_assert(10, time_out_messages(incoming_queue, "request_mempool_transactions", expected_requests))
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
blocks = bt.get_consecutive_blocks(1)
for block in blocks[:1]:
await full_node_1.full_node.respond_block(fnp.RespondBlock(block), peer)
await time_out_assert(10, time_out_messages(incoming_queue, "new_peak", 1))
assert full_node_1.full_node.blockchain.get_peak().height == 0
for block in bt.get_consecutive_blocks(30):
await full_node_1.full_node.respond_block(fnp.RespondBlock(block), peer)
assert full_node_1.full_node.blockchain.get_peak().height == 29
@pytest.mark.asyncio
async def test_respond_end_of_sub_slot(self, wallet_nodes, self_hostname):
full_node_1, full_node_2, server_1, server_2, wallet_a, wallet_receiver, bt = wallet_nodes
incoming_queue, dummy_node_id = await add_dummy_connection(server_1, self_hostname, 12312)
expected_requests = 0
if await full_node_1.full_node.synced():
expected_requests = 1
await time_out_assert(10, time_out_messages(incoming_queue, "request_mempool_transactions", expected_requests))
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
# Create empty slots
blocks = await full_node_1.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=6)
# Add empty slots successful
for slot in blocks[-1].finished_sub_slots[:-2]:
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(slot), peer)
num_sub_slots_added = len(blocks[-1].finished_sub_slots[:-2])
await time_out_assert(
10,
time_out_messages(
incoming_queue,
"new_signage_point_or_end_of_sub_slot",
num_sub_slots_added,
),
)
# Already have sub slot
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(blocks[-1].finished_sub_slots[-3]), peer)
await asyncio.sleep(2)
assert incoming_queue.qsize() == 0
# Add empty slots unsuccessful
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(blocks[-1].finished_sub_slots[-1]), peer)
await asyncio.sleep(2)
assert incoming_queue.qsize() == 0
# Add some blocks
blocks = bt.get_consecutive_blocks(4, block_list_input=blocks)
for block in blocks[-5:]:
await full_node_1.full_node.respond_block(fnp.RespondBlock(block), peer)
await time_out_assert(10, time_out_messages(incoming_queue, "new_peak", 5))
blocks = bt.get_consecutive_blocks(1, skip_slots=2, block_list_input=blocks)
# Add empty slots successful
for slot in blocks[-1].finished_sub_slots:
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(slot), peer)
num_sub_slots_added = len(blocks[-1].finished_sub_slots)
await time_out_assert(
10,
time_out_messages(
incoming_queue,
"new_signage_point_or_end_of_sub_slot",
num_sub_slots_added,
),
)
@pytest.mark.asyncio
async def test_respond_end_of_sub_slot_no_reorg(self, wallet_nodes, self_hostname):
full_node_1, full_node_2, server_1, server_2, wallet_a, wallet_receiver, bt = wallet_nodes
incoming_queue, dummy_node_id = await add_dummy_connection(server_1, self_hostname, 12312)
expected_requests = 0
if await full_node_1.full_node.synced():
expected_requests = 1
await time_out_assert(10, time_out_messages(incoming_queue, "request_mempool_transactions", expected_requests))
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
# First get two blocks in the same sub slot
blocks = await full_node_1.get_all_full_blocks()
for i in range(0, 9999999):
blocks = bt.get_consecutive_blocks(5, block_list_input=blocks, skip_slots=1, seed=i.to_bytes(4, "big"))
if len(blocks[-1].finished_sub_slots) == 0:
break
# Then create a fork after the first block.
blocks_alt_1 = bt.get_consecutive_blocks(1, block_list_input=blocks[:-1], skip_slots=1)
for slot in blocks[-1].finished_sub_slots[:-2]:
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(slot), peer)
# Add all blocks
for block in blocks:
await full_node_1.full_node.respond_block(fnp.RespondBlock(block), peer)
original_ss = full_node_1.full_node.full_node_store.finished_sub_slots[:]
# Add subslot for first alternative
for slot in blocks_alt_1[-1].finished_sub_slots:
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(slot), peer)
assert full_node_1.full_node.full_node_store.finished_sub_slots == original_ss
@pytest.mark.asyncio
async def test_respond_end_of_sub_slot_race(self, wallet_nodes, self_hostname):
full_node_1, full_node_2, server_1, server_2, wallet_a, wallet_receiver, bt = wallet_nodes
incoming_queue, dummy_node_id = await add_dummy_connection(server_1, self_hostname, 12312)
expected_requests = 0
if await full_node_1.full_node.synced():
expected_requests = 1
await time_out_assert(10, time_out_messages(incoming_queue, "request_mempool_transactions", expected_requests))
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
# First get two blocks in the same sub slot
blocks = await full_node_1.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks)
await full_node_1.full_node.respond_block(fnp.RespondBlock(blocks[-1]), peer)
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1)
original_ss = full_node_1.full_node.full_node_store.finished_sub_slots[:].copy()
# Add the block
await full_node_1.full_node.respond_block(fnp.RespondBlock(blocks[-1]), peer)
# Replace with original SS in order to imitate race condition (block added but subslot not yet added)
full_node_1.full_node.full_node_store.finished_sub_slots = original_ss
for slot in blocks[-1].finished_sub_slots:
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(slot), peer)
@pytest.mark.asyncio
async def test_respond_unfinished(self, wallet_nodes, self_hostname):
full_node_1, full_node_2, server_1, server_2, wallet_a, wallet_receiver, bt = wallet_nodes
incoming_queue, dummy_node_id = await add_dummy_connection(server_1, self_hostname, 12312)
expected_requests = 0
if await full_node_1.full_node.synced():
expected_requests = 1
await time_out_assert(10, time_out_messages(incoming_queue, "request_mempool_transactions", expected_requests))
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
blocks = await full_node_1.get_all_full_blocks()
# Create empty slots
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=6)
block = blocks[-1]
if is_overflow_block(test_constants, block.reward_chain_block.signage_point_index):
finished_ss = block.finished_sub_slots[:-1]
else:
finished_ss = block.finished_sub_slots
unf = UnfinishedBlock(
finished_ss,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
# Can't add because no sub slots
assert full_node_1.full_node.full_node_store.get_unfinished_block(unf.partial_hash) is None
# Add empty slots successful
for slot in blocks[-1].finished_sub_slots:
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(slot), peer)
await full_node_1.full_node.respond_unfinished_block(fnp.RespondUnfinishedBlock(unf), None)
assert full_node_1.full_node.full_node_store.get_unfinished_block(unf.partial_hash) is not None
# Do the same thing but with non-genesis
await full_node_1.full_node.respond_block(fnp.RespondBlock(block))
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=3)
block = blocks[-1]
if is_overflow_block(test_constants, block.reward_chain_block.signage_point_index):
finished_ss = block.finished_sub_slots[:-1]
else:
finished_ss = block.finished_sub_slots
unf = UnfinishedBlock(
finished_ss,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
assert full_node_1.full_node.full_node_store.get_unfinished_block(unf.partial_hash) is None
for slot in blocks[-1].finished_sub_slots:
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(slot), peer)
await full_node_1.full_node.respond_unfinished_block(fnp.RespondUnfinishedBlock(unf), None)
assert full_node_1.full_node.full_node_store.get_unfinished_block(unf.partial_hash) is not None
# Do the same thing one more time, with overflow
await full_node_1.full_node.respond_block(fnp.RespondBlock(block))
blocks = bt.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=3, force_overflow=True)
block = blocks[-1]
unf = UnfinishedBlock(
block.finished_sub_slots[:-1],
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
assert full_node_1.full_node.full_node_store.get_unfinished_block(unf.partial_hash) is None
for slot in blocks[-1].finished_sub_slots:
await full_node_1.respond_end_of_sub_slot(fnp.RespondEndOfSubSlot(slot), peer)
await full_node_1.full_node.respond_unfinished_block(fnp.RespondUnfinishedBlock(unf), None)
assert full_node_1.full_node.full_node_store.get_unfinished_block(unf.partial_hash) is not None
# This next section tests making unfinished block with transactions, and then submitting the finished block
ph = wallet_a.get_new_puzzlehash()
ph_receiver = wallet_receiver.get_new_puzzlehash()
blocks = await full_node_1.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
2,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=ph,
pool_reward_puzzle_hash=ph,
)
await full_node_1.full_node.respond_block(fnp.RespondBlock(blocks[-2]))
await full_node_1.full_node.respond_block(fnp.RespondBlock(blocks[-1]))
coin_to_spend = list(blocks[-1].get_included_reward_coins())[0]
spend_bundle = wallet_a.generate_signed_transaction(coin_to_spend.amount, ph_receiver, coin_to_spend)
blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
force_overflow=True,
seed=b"random seed",
)
block = blocks[-1]
unf = UnfinishedBlock(
block.finished_sub_slots[:-1], # Since it's overflow
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
assert full_node_1.full_node.full_node_store.get_unfinished_block(unf.partial_hash) is None
await full_node_1.full_node.respond_unfinished_block(fnp.RespondUnfinishedBlock(unf), None)
assert full_node_1.full_node.full_node_store.get_unfinished_block(unf.partial_hash) is not None
result = full_node_1.full_node.full_node_store.get_unfinished_block_result(unf.partial_hash)
assert result is not None
assert result.npc_result is not None and result.npc_result.cost > 0
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_address_manager.py | tests/core/full_node/test_address_manager.py | from __future__ import annotations
import math
import time
from pathlib import Path
import pytest
from flax.server.address_manager import AddressManager, ExtendedPeerInfo
from flax.server.address_manager_store import AddressManagerStore
from flax.types.peer_info import PeerInfo, TimestampedPeerInfo
from flax.util.ints import uint16, uint64
class AddressManagerTest(AddressManager):
def __init__(self, make_deterministic=True):
super().__init__()
if make_deterministic:
self.make_deterministic()
self.make_private_subnets_valid()
def make_deterministic(self):
# Fix seed.
self.key = 2**256 - 1
async def simulate_connection_fail(self, peer):
await self.mark_good(peer.peer_info, True, 1)
await self.attempt(peer.peer_info, False, time.time() - 61)
async def add_peer_info(self, peers, peer_src=None):
timestamped_peers = [
TimestampedPeerInfo(
peer.host,
peer.port,
0,
)
for peer in peers
]
added = await self.add_to_new_table(timestamped_peers, peer_src)
return added
class TestPeerManager:
@pytest.mark.asyncio
async def test_addr_manager(self):
addrman = AddressManagerTest()
# Test: Does Addrman respond correctly when empty.
none_peer = await addrman.select_peer()
assert none_peer is None
assert await addrman.size() == 0
# Test: Does Add work as expected.
peer1 = PeerInfo("250.1.1.1", 8444)
assert await addrman.add_peer_info([peer1])
assert await addrman.size() == 1
peer1_ret = await addrman.select_peer()
assert peer1_ret.peer_info == peer1
# Test: Does IP address deduplication work correctly.
peer1_duplicate = PeerInfo("250.1.1.1", 8444)
assert not await addrman.add_peer_info([peer1_duplicate])
assert await addrman.size() == 1
# Test: New table has one addr and we add a diff addr we should
# have at least one addr.
# Note that addrman's size cannot be tested reliably after insertion, as
# hash collisions may occur. But we can always be sure of at least one
# success.
peer2 = PeerInfo("250.1.1.2", 8444)
assert await addrman.add_peer_info([peer2])
assert await addrman.size() >= 1
# Test: AddrMan::Add multiple addresses works as expected
addrman2 = AddressManagerTest()
peers = [peer1, peer2]
assert await addrman2.add_peer_info(peers)
assert await addrman2.size() >= 1
@pytest.mark.asyncio
async def test_addr_manager_ports(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
source = PeerInfo("252.2.2.2", 8444)
# Test: Addr with same IP but diff port does not replace existing addr.
peer1 = PeerInfo("250.1.1.1", 8444)
assert await addrman.add_peer_info([peer1], source)
assert await addrman.size() == 1
peer2 = PeerInfo("250.1.1.1", 8445)
assert not await addrman.add_peer_info([peer2], source)
assert await addrman.size() == 1
peer3 = await addrman.select_peer()
assert peer3.peer_info == peer1
# Test: Add same IP but diff port to tried table, it doesn't get added.
# Perhaps this is not ideal behavior but it is the current behavior.
await addrman.mark_good(peer2)
assert await addrman.size() == 1
peer3_ret = await addrman.select_peer(True)
assert peer3_ret.peer_info == peer1
# This is a fleaky test, since it uses randomness.
# TODO: Make sure it always succeeds.
@pytest.mark.asyncio
async def test_addrman_select(self):
addrman = AddressManagerTest()
source = PeerInfo("252.2.2.2", 8444)
# Test: Select from new with 1 addr in new.
peer1 = PeerInfo("250.1.1.1", 8444)
assert await addrman.add_peer_info([peer1], source)
assert await addrman.size() == 1
peer1_ret = await addrman.select_peer(True)
assert peer1_ret.peer_info == peer1
# Test: move addr to tried, select from new expected nothing returned.
await addrman.mark_good(peer1)
assert await addrman.size() == 1
peer2_ret = await addrman.select_peer(True)
assert peer2_ret is None
peer3_ret = await addrman.select_peer()
assert peer3_ret.peer_info == peer1
# Add three addresses to new table.
peer2 = PeerInfo("250.3.1.1", 8444)
peer3 = PeerInfo("250.3.2.2", 9999)
peer4 = PeerInfo("250.3.3.3", 9999)
assert await addrman.add_peer_info([peer2], PeerInfo("250.3.1.1", 8444))
assert await addrman.add_peer_info([peer3], PeerInfo("250.3.1.1", 8444))
assert await addrman.add_peer_info([peer4], PeerInfo("250.4.1.1", 8444))
# Add three addresses to tried table.
peer5 = PeerInfo("250.4.4.4", 8444)
peer6 = PeerInfo("250.4.5.5", 7777)
peer7 = PeerInfo("250.4.6.6", 8444)
assert await addrman.add_peer_info([peer5], PeerInfo("250.3.1.1", 8444))
await addrman.mark_good(peer5)
assert await addrman.add_peer_info([peer6], PeerInfo("250.3.1.1", 8444))
await addrman.mark_good(peer6)
assert await addrman.add_peer_info([peer7], PeerInfo("250.1.1.3", 8444))
await addrman.mark_good(peer7)
# Test: 6 addrs + 1 addr from last test = 7.
assert await addrman.size() == 7
# Test: Select pulls from new and tried regardless of port number.
ports = []
for _ in range(200):
peer = await addrman.select_peer()
if peer.peer_info.port not in ports:
ports.append(peer.peer_info.port)
if len(ports) == 3:
break
assert len(ports) == 3
@pytest.mark.asyncio
async def test_addrman_collisions_new(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 8):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
assert await addrman.size() == i
# Test: new table collision!
peer1 = PeerInfo("250.1.1.8", 8444)
assert await addrman.add_peer_info([peer1], source)
assert await addrman.size() == 7
peer2 = PeerInfo("250.1.1.9", 8444)
assert await addrman.add_peer_info([peer2], source)
assert await addrman.size() == 8
@pytest.mark.asyncio
async def test_addrman_collisions_tried(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 77):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
# Test: No collision in tried table yet.
assert await addrman.size() == i
# Test: tried table collision!
peer1 = PeerInfo("250.1.1.77", 8444)
assert await addrman.add_peer_info([peer1], source)
assert await addrman.size() == 76
peer2 = PeerInfo("250.1.1.78", 8444)
assert await addrman.add_peer_info([peer2], source)
assert await addrman.size() == 77
@pytest.mark.asyncio
async def test_addrman_find(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
peer1 = PeerInfo("250.1.2.1", 8333)
peer2 = PeerInfo("250.1.2.1", 9999)
peer3 = PeerInfo("251.255.2.1", 8333)
source1 = PeerInfo("250.1.2.1", 8444)
source2 = PeerInfo("250.1.2.2", 8444)
assert await addrman.add_peer_info([peer1], source1)
assert not await addrman.add_peer_info([peer2], source2)
assert await addrman.add_peer_info([peer3], source1)
# Test: ensure Find returns an IP matching what we searched on.
info1 = addrman.find_(peer1)
assert info1[0] is not None and info1[1] is not None
assert info1[0].peer_info == peer1
# Test: Find does not discriminate by port number.
info2 = addrman.find_(peer2)
assert info2[0] is not None and info2[1] is not None
assert info2 == info1
# Test: Find returns another IP matching what we searched on.
info3 = addrman.find_(peer3)
assert info3[0] is not None and info3[1] is not None
assert info3[0].peer_info == peer3
@pytest.mark.asyncio
async def test_addrman_create(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
peer1 = PeerInfo("250.1.2.1", 8444)
t_peer = TimestampedPeerInfo("250.1.2.1", 8444, 0)
info, node_id = addrman.create_(t_peer, peer1)
assert info.peer_info == peer1
info, _ = addrman.find_(peer1)
assert info.peer_info == peer1
@pytest.mark.asyncio
async def test_addrman_delete(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
peer1 = PeerInfo("250.1.2.1", 8444)
t_peer = TimestampedPeerInfo("250.1.2.1", 8444, 0)
info, node_id = addrman.create_(t_peer, peer1)
# Test: Delete should actually delete the addr.
assert await addrman.size() == 1
addrman.delete_new_entry_(node_id)
assert await addrman.size() == 0
info2, _ = addrman.find_(peer1)
assert info2 is None
@pytest.mark.asyncio
async def test_addrman_get_peers(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
peers1 = await addrman.get_peers()
assert len(peers1) == 0
peer1 = TimestampedPeerInfo("250.250.2.1", 8444, time.time())
peer2 = TimestampedPeerInfo("250.250.2.2", 9999, time.time())
peer3 = TimestampedPeerInfo("251.252.2.3", 8444, time.time())
peer4 = TimestampedPeerInfo("251.252.2.4", 8444, time.time())
peer5 = TimestampedPeerInfo("251.252.2.5", 8444, time.time())
source1 = PeerInfo("250.1.2.1", 8444)
source2 = PeerInfo("250.2.3.3", 8444)
# Test: Ensure GetPeers works with new addresses.
assert await addrman.add_to_new_table([peer1], source1)
assert await addrman.add_to_new_table([peer2], source2)
assert await addrman.add_to_new_table([peer3], source1)
assert await addrman.add_to_new_table([peer4], source1)
assert await addrman.add_to_new_table([peer5], source1)
# GetPeers returns 23% of addresses, 23% of 5 is 2 rounded up.
peers2 = await addrman.get_peers()
assert len(peers2) == 2
# Test: Ensure GetPeers works with new and tried addresses.
await addrman.mark_good(PeerInfo(peer1.host, peer1.port))
await addrman.mark_good(PeerInfo(peer2.host, peer2.port))
peers3 = await addrman.get_peers()
assert len(peers3) == 2
# Test: Ensure GetPeers still returns 23% when addrman has many addrs.
for i in range(1, 8 * 256):
octet1 = i % 256
octet2 = i >> 8 % 256
peer = TimestampedPeerInfo(str(octet1) + "." + str(octet2) + ".1.23", 8444, time.time())
await addrman.add_to_new_table([peer])
if i % 8 == 0:
await addrman.mark_good(PeerInfo(peer.host, peer.port))
peers4 = await addrman.get_peers()
percent = await addrman.size()
percent = math.ceil(percent * 23 / 100)
assert len(peers4) == percent
@pytest.mark.asyncio
async def test_addrman_tried_bucket(self):
peer1 = PeerInfo("250.1.1.1", 8444)
t_peer1 = TimestampedPeerInfo("250.1.1.1", 8444, 0)
peer2 = PeerInfo("250.1.1.1", 9999)
t_peer2 = TimestampedPeerInfo("250.1.1.1", 9999, 0)
source1 = PeerInfo("250.1.1.1", 8444)
peer_info1 = ExtendedPeerInfo(t_peer1, source1)
# Test: Make sure key actually randomizes bucket placement. A fail on
# this test could be a security issue.
key1 = 2**256 - 1
key2 = 2**128 - 1
bucket1 = peer_info1.get_tried_bucket(key1)
bucket2 = peer_info1.get_tried_bucket(key2)
assert bucket1 != bucket2
# Test: Two addresses with same IP but different ports can map to
# different buckets because they have different keys.
peer_info2 = ExtendedPeerInfo(t_peer2, source1)
assert peer1.get_key() != peer2.get_key()
assert peer_info1.get_tried_bucket(key1) != peer_info2.get_tried_bucket(key1)
# Test: IP addresses in the same group (\16 prefix for IPv4) should
# never get more than 8 buckets
buckets = []
for i in range(255):
peer = PeerInfo("250.1.1." + str(i), 8444)
t_peer = TimestampedPeerInfo("250.1.1." + str(i), 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, peer)
bucket = extended_peer_info.get_tried_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) == 8
# Test: IP addresses in the different groups should map to more than
# 8 buckets.
buckets = []
for i in range(255):
peer = PeerInfo("250." + str(i) + ".1.1", 8444)
t_peer = TimestampedPeerInfo("250." + str(i) + ".1.1", 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, peer)
bucket = extended_peer_info.get_tried_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) > 8
@pytest.mark.asyncio
async def test_addrman_new_bucket(self):
t_peer1 = TimestampedPeerInfo("250.1.2.1", 8444, 0)
source1 = PeerInfo("250.1.2.1", 8444)
t_peer2 = TimestampedPeerInfo("250.1.2.1", 9999, 0)
peer_info1 = ExtendedPeerInfo(t_peer1, source1)
# Test: Make sure key actually randomizes bucket placement. A fail on
# this test could be a security issue.
key1 = 2**256 - 1
key2 = 2**128 - 1
bucket1 = peer_info1.get_new_bucket(key1)
bucket2 = peer_info1.get_new_bucket(key2)
assert bucket1 != bucket2
# Test: Ports should not affect bucket placement in the addr
peer_info2 = ExtendedPeerInfo(t_peer2, source1)
assert peer_info1.get_new_bucket(key1) == peer_info2.get_new_bucket(key1)
# Test: IP addresses in the same group (\16 prefix for IPv4) should
# always map to the same bucket.
buckets = []
for i in range(255):
peer = PeerInfo("250.1.1." + str(i), 8444)
t_peer = TimestampedPeerInfo("250.1.1." + str(i), 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, peer)
bucket = extended_peer_info.get_new_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) == 1
# Test: IP addresses in the same source groups should map to no more
# than 64 buckets.
buckets = []
for i in range(4 * 255):
src = PeerInfo("251.4.1.1", 8444)
peer = PeerInfo(str(250 + i // 255) + "." + str(i % 256) + ".1.1", 8444)
t_peer = TimestampedPeerInfo(str(250 + i // 255) + "." + str(i % 256) + ".1.1", 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, src)
bucket = extended_peer_info.get_new_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) <= 64
# Test: IP addresses in the different source groups should map to more
# than 64 buckets.
buckets = []
for i in range(255):
src = PeerInfo("250." + str(i) + ".1.1", 8444)
peer = PeerInfo("250.1.1.1", 8444)
t_peer = TimestampedPeerInfo("250.1.1.1", 8444, 0)
extended_peer_info = ExtendedPeerInfo(t_peer, src)
bucket = extended_peer_info.get_new_bucket(key1)
if bucket not in buckets:
buckets.append(bucket)
assert len(buckets) > 64
@pytest.mark.asyncio
async def test_addrman_select_collision_no_collision(self):
addrman = AddressManagerTest()
collision = await addrman.select_tried_collision()
assert collision is None
# Add 17 addresses.
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 18):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
# No collisions yet.
assert await addrman.size() == i
collision = await addrman.select_tried_collision()
assert collision is None
# Ensure Good handles duplicates well.
for i in range(1, 18):
peer = PeerInfo("250.1.1." + str(i), 8444)
await addrman.mark_good(peer)
assert await addrman.size() == 17
collision = await addrman.select_tried_collision()
assert collision is None
@pytest.mark.asyncio
async def test_addrman_no_evict(self):
addrman = AddressManagerTest()
# Add 17 addresses.
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 18):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
# No collision yet.
assert await addrman.size() == i
collision = await addrman.select_tried_collision()
assert collision is None
peer18 = PeerInfo("250.1.1.18", 8444)
assert await addrman.add_peer_info([peer18], source)
await addrman.mark_good(peer18)
assert await addrman.size() == 18
collision = await addrman.select_tried_collision()
assert collision.peer_info == PeerInfo("250.1.1.16", 8444)
await addrman.resolve_tried_collisions()
collision = await addrman.select_tried_collision()
assert collision is None
# Lets create two collisions.
for i in range(19, 37):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
assert await addrman.size() == i
assert await addrman.select_tried_collision() is None
# Cause a collision.
peer37 = PeerInfo("250.1.1.37", 8444)
assert await addrman.add_peer_info([peer37], source)
await addrman.mark_good(peer37)
assert await addrman.size() == 37
# Cause a second collision.
assert not await addrman.add_peer_info([peer18], source)
await addrman.mark_good(peer18)
assert await addrman.size() == 37
collision = await addrman.select_tried_collision()
assert collision is not None
await addrman.resolve_tried_collisions()
collision = await addrman.select_tried_collision()
assert collision is None
@pytest.mark.asyncio
async def test_addrman_eviction_works(self):
addrman = AddressManagerTest()
assert await addrman.size() == 0
# Empty addrman should return blank addrman info.
assert await addrman.select_tried_collision() is None
# Add twenty two addresses.
source = PeerInfo("252.2.2.2", 8444)
for i in range(1, 18):
peer = PeerInfo("250.1.1." + str(i), 8444)
assert await addrman.add_peer_info([peer], source)
await addrman.mark_good(peer)
# No collision yet.
assert await addrman.size() == i
assert await addrman.select_tried_collision() is None
# Collision between 18 and 16.
peer18 = PeerInfo("250.1.1.18", 8444)
assert await addrman.add_peer_info([peer18], source)
await addrman.mark_good(peer18)
assert await addrman.size() == 18
collision = await addrman.select_tried_collision()
assert collision.peer_info == PeerInfo("250.1.1.16", 8444)
await addrman.simulate_connection_fail(collision)
# Should swap 18 for 16.
await addrman.resolve_tried_collisions()
assert await addrman.select_tried_collision() is None
# If 18 was swapped for 16, then this should cause no collisions.
assert not await addrman.add_peer_info([peer18], source)
await addrman.mark_good(peer18)
assert await addrman.select_tried_collision() is None
# If we insert 16 is should collide with 18.
addr16 = PeerInfo("250.1.1.16", 8444)
assert not await addrman.add_peer_info([addr16], source)
await addrman.mark_good(addr16)
collision = await addrman.select_tried_collision()
assert collision.peer_info == PeerInfo("250.1.1.18", 8444)
await addrman.resolve_tried_collisions()
assert await addrman.select_tried_collision() is None
@pytest.mark.asyncio
# use tmp_path pytest fixture to create a temporary directory
async def test_serialization(self, tmp_path: Path):
addrman = AddressManagerTest()
now = int(math.floor(time.time()))
t_peer1 = TimestampedPeerInfo("250.7.1.1", uint16(8333), uint64(now - 10000))
t_peer2 = TimestampedPeerInfo("250.7.2.2", uint16(9999), uint64(now - 20000))
t_peer3 = TimestampedPeerInfo("250.7.3.3", uint16(9999), uint64(now - 30000))
source = PeerInfo("252.5.1.1", uint16(8333))
await addrman.add_to_new_table([t_peer1, t_peer2, t_peer3], source)
await addrman.mark_good(PeerInfo("250.7.1.1", uint16(8333)))
peers_dat_filename = tmp_path / "peers.dat"
if peers_dat_filename.exists():
peers_dat_filename.unlink()
# Write out the serialized peer data
await AddressManagerStore.serialize(addrman, peers_dat_filename)
# Read in the serialized peer data
addrman2 = await AddressManagerStore.create_address_manager(peers_dat_filename)
retrieved_peers = []
for _ in range(50):
peer = await addrman2.select_peer()
if peer not in retrieved_peers:
retrieved_peers.append(peer)
if len(retrieved_peers) == 3:
break
assert len(retrieved_peers) == 3
wanted_peers = [
ExtendedPeerInfo(t_peer1, source),
ExtendedPeerInfo(t_peer2, source),
ExtendedPeerInfo(t_peer3, source),
]
recovered = 0
for target_peer in wanted_peers:
for current_peer in retrieved_peers:
if (
current_peer is not None
and current_peer.peer_info == target_peer.peer_info
and current_peer.src == target_peer.src
and current_peer.timestamp == target_peer.timestamp
):
recovered += 1
assert recovered == 3
peers_dat_filename.unlink()
@pytest.mark.asyncio
async def test_cleanup(self):
addrman = AddressManagerTest()
peer1 = TimestampedPeerInfo("250.250.2.1", 8444, 100000)
peer2 = TimestampedPeerInfo("250.250.2.2", 9999, time.time())
source = PeerInfo("252.5.1.1", 8333)
assert await addrman.add_to_new_table([peer1], source)
assert await addrman.add_to_new_table([peer2], source)
await addrman.mark_good(PeerInfo("250.250.2.2", 9999))
assert await addrman.size() == 2
for _ in range(5):
await addrman.attempt(peer1, True, time.time() - 61)
addrman.cleanup(7 * 3600 * 24, 5)
assert await addrman.size() == 1
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_mempool_performance.py | tests/core/full_node/test_mempool_performance.py | # flake8: noqa: F811, F401
import logging
import pytest
from flax.protocols import full_node_protocol
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16
from flax.wallet.transaction_record import TransactionRecord
from flax.wallet.wallet_node import WalletNode
from tests.connection_utils import connect_and_get_peer
from flax.simulator.time_out_assert import time_out_assert
from tests.util.misc import assert_runtime
async def wallet_height_at_least(wallet_node, h):
height = await wallet_node.wallet_state_manager.blockchain.get_finished_sync_up_to()
if height == h:
return True
return False
async def wallet_balance_at_least(wallet_node: WalletNode, balance):
b = await wallet_node.wallet_state_manager.get_confirmed_balance_for_wallet(1)
if b >= balance:
return True
return False
log = logging.getLogger(__name__)
class TestMempoolPerformance:
@pytest.mark.asyncio
@pytest.mark.benchmark
async def test_mempool_update_performance(
self, request, wallet_nodes_mempool_perf, default_400_blocks, self_hostname
):
blocks = default_400_blocks
full_nodes, wallets, bt = wallet_nodes_mempool_perf
wallet_node = wallets[0][0]
wallet_server = wallets[0][1]
full_node_api_1 = full_nodes[0]
full_node_api_2 = full_nodes[1]
server_1 = full_node_api_1.full_node.server
server_2 = full_node_api_2.full_node.server
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
for block in blocks:
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await wallet_server.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
await time_out_assert(60, wallet_height_at_least, True, wallet_node, 399)
send_amount = 40000000000000
fee_amount = 2213
await time_out_assert(60, wallet_balance_at_least, True, wallet_node, send_amount + fee_amount)
big_transaction: TransactionRecord = await wallet.generate_signed_transaction(send_amount, ph, fee_amount)
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
await full_node_api_1.respond_transaction(
full_node_protocol.RespondTransaction(big_transaction.spend_bundle), peer, test=True
)
cons = list(server_1.all_connections.values())[:]
for con in cons:
await con.close()
blocks = bt.get_consecutive_blocks(3, blocks)
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(blocks[-3]))
for idx, block in enumerate(blocks):
if idx >= len(blocks) - 3:
duration = 0.1
else:
duration = 0.001
with assert_runtime(seconds=duration, label=request.node.name):
await full_node_api_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_mempool.py | tests/core/full_node/test_mempool.py | import dataclasses
import logging
from typing import Dict, List, Optional, Tuple, Callable
from clvm.casts import int_to_bytes
import pytest
import flax.server.ws_connection as ws
from flax.full_node.mempool import Mempool
from flax.full_node.full_node_api import FullNodeAPI
from flax.protocols import full_node_protocol, wallet_protocol
from flax.protocols.wallet_protocol import TransactionAck
from flax.server.outbound_message import Message
from flax.simulator.simulator_protocol import FarmNewBlockProtocol
from flax.types.announcement import Announcement
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32, bytes48
from flax.types.coin_spend import CoinSpend
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.condition_with_args import ConditionWithArgs
from flax.types.spend_bundle import SpendBundle
from flax.types.mempool_item import MempoolItem
from flax.util.condition_tools import conditions_for_solution, pkm_pairs
from flax.util.errors import Err
from flax.util.ints import uint64, uint32
from flax.util.hash import std_hash
from flax.types.mempool_inclusion_status import MempoolInclusionStatus
from flax.util.api_decorators import api_request, peer_required, bytes_required
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions
from flax.full_node.pending_tx_cache import PendingTxCache
from blspy import G2Element
from flax.util.recursive_replace import recursive_replace
from tests.blockchain.blockchain_test_utils import _validate_and_add_block
from tests.connection_utils import connect_and_get_peer, add_dummy_connection
from tests.core.node_height import node_height_at_least
from flax.simulator.time_out_assert import time_out_assert
from flax.types.blockchain_format.program import Program, INFINITE_COST
from flax.consensus.cost_calculator import NPCResult
from flax.consensus.condition_costs import ConditionCost
from flax.types.blockchain_format.program import SerializedProgram
from clvm_tools import binutils
from flax.types.generator_types import BlockGenerator
from blspy import G1Element
from flax.types.spend_bundle_conditions import SpendBundleConditions, Spend
from tests.util.misc import assert_runtime
from flax.simulator.wallet_tools import WalletTool
BURN_PUZZLE_HASH = bytes32(b"0" * 32)
BURN_PUZZLE_HASH_2 = bytes32(b"1" * 32)
log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def wallet_a(bt):
return bt.get_pool_wallet_tool()
def generate_test_spend_bundle(
wallet_a: WalletTool,
coin: Coin,
condition_dic: Dict[ConditionOpcode, List[ConditionWithArgs]] = None,
fee: uint64 = uint64(0),
amount: uint64 = uint64(1000),
new_puzzle_hash=BURN_PUZZLE_HASH,
) -> SpendBundle:
if condition_dic is None:
condition_dic = {}
transaction = wallet_a.generate_signed_transaction(amount, new_puzzle_hash, coin, condition_dic, fee)
assert transaction is not None
return transaction
def make_item(idx: int, cost: uint64 = uint64(80)) -> MempoolItem:
spend_bundle_name = bytes32([idx] * 32)
return MempoolItem(
SpendBundle([], G2Element()), uint64(0), NPCResult(None, None, cost), cost, spend_bundle_name, [], [], uint32(0)
)
class TestPendingTxCache:
def test_recall(self):
c = PendingTxCache(100)
item = make_item(1)
c.add(item)
tx = c.drain()
assert tx == {item.spend_bundle_name: item}
def test_fifo_limit(self):
c = PendingTxCache(200)
# each item has cost 80
items = [make_item(i) for i in range(1, 4)]
for i in items:
c.add(i)
# the max cost is 200, only two transactions will fit
# we evict items FIFO, so the to most recently added will be left
tx = c.drain()
assert tx == {items[-2].spend_bundle_name: items[-2], items[-1].spend_bundle_name: items[-1]}
def test_drain(self):
c = PendingTxCache(100)
item = make_item(1)
c.add(item)
tx = c.drain()
assert tx == {item.spend_bundle_name: item}
# drain will clear the cache, so a second call will be empty
tx = c.drain()
assert tx == {}
def test_cost(self):
c = PendingTxCache(200)
assert c.cost() == 0
item1 = make_item(1)
c.add(item1)
# each item has cost 80
assert c.cost() == 80
item2 = make_item(2)
c.add(item2)
assert c.cost() == 160
# the first item is evicted, so the cost stays the same
item3 = make_item(3)
c.add(item3)
assert c.cost() == 160
tx = c.drain()
assert tx == {item2.spend_bundle_name: item2, item3.spend_bundle_name: item3}
assert c.cost() == 0
item4 = make_item(4)
c.add(item4)
assert c.cost() == 80
tx = c.drain()
assert tx == {item4.spend_bundle_name: item4}
class TestMempool:
@pytest.mark.asyncio
async def test_basic_mempool(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
_ = await next_block(full_node_1, wallet_a, bt)
_ = await next_block(full_node_1, wallet_a, bt)
max_block_cost_clvm = 40000000
max_mempool_cost = max_block_cost_clvm * 5
mempool = Mempool(max_mempool_cost, uint64(5), uint64(max_block_cost_clvm))
assert mempool.get_min_fee_rate(104000) == 0
with pytest.raises(ValueError):
mempool.get_min_fee_rate(max_mempool_cost + 1)
coin = await next_block(full_node_1, wallet_a, bt)
spend_bundle = generate_test_spend_bundle(wallet_a, coin)
assert spend_bundle is not None
@peer_required
@api_request
@bytes_required
async def respond_transaction(
node: FullNodeAPI,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSFlaxConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Tuple[MempoolInclusionStatus, Optional[Err]]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in node.full_node.full_node_store.pending_tx_request:
node.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in node.full_node.full_node_store.peers_with_tx:
node.full_node.full_node_store.peers_with_tx.pop(spend_name)
return await node.full_node.respond_transaction(tx.transaction, spend_name, peer, test)
async def next_block(full_node_1, wallet_a, bt) -> Coin:
blocks = await full_node_1.get_all_full_blocks()
# we have to farm a new block here, to ensure every test has a unique coin to test spending.
# all this could be simplified if the tests did not share a simulation
start_height = blocks[-1].height
reward_ph = wallet_a.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
genesis_timestamp=10000,
time_per_block=10,
)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 1)
return list(blocks[-1].get_included_reward_coins())[0]
class TestMempoolManager:
@pytest.mark.asyncio
async def test_basic_mempool_manager(self, two_nodes_one_block, wallet_a, self_hostname):
full_node_1, full_node_2, server_1, server_2, bt = two_nodes_one_block
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
_ = await next_block(full_node_1, wallet_a, bt)
coin = await next_block(full_node_1, wallet_a, bt)
spend_bundle = generate_test_spend_bundle(wallet_a, coin)
assert spend_bundle is not None
tx: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle)
await full_node_1.respond_transaction(tx, peer, test=True)
await time_out_assert(
10,
full_node_1.full_node.mempool_manager.get_spendbundle,
spend_bundle,
spend_bundle.name(),
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"opcode,lock_value,expected",
[
(ConditionOpcode.ASSERT_SECONDS_RELATIVE, -2, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_SECONDS_RELATIVE, -1, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_SECONDS_RELATIVE, 0, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_SECONDS_RELATIVE, 1, MempoolInclusionStatus.FAILED),
(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, -2, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, -1, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, 0, MempoolInclusionStatus.PENDING),
(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, 1, MempoolInclusionStatus.PENDING),
# the absolute height and seconds tests require fresh full nodes to
# run the test on. The fixture (one_node_one_block) creates a block,
# then condition_tester2 creates another 3 blocks
(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, 4, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, 5, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, 6, MempoolInclusionStatus.PENDING),
(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, 7, MempoolInclusionStatus.PENDING),
# genesis timestamp is 10000 and each block is 10 seconds
(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, 10049, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, 10050, MempoolInclusionStatus.SUCCESS),
(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, 10051, MempoolInclusionStatus.FAILED),
(ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, 10052, MempoolInclusionStatus.FAILED),
],
)
async def test_ephemeral_timelock(self, one_node_one_block, wallet_a, opcode, lock_value, expected):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
conditions = {opcode: [ConditionWithArgs(opcode, [int_to_bytes(lock_value)])]}
tx1 = wallet_a.generate_signed_transaction(
uint64(1000000), wallet_a.get_new_puzzlehash(), coin_2, conditions.copy(), uint64(0)
)
ephemeral_coin: Coin = tx1.additions()[0]
tx2 = wallet_a.generate_signed_transaction(
uint64(1000000), wallet_a.get_new_puzzlehash(), ephemeral_coin, conditions.copy(), uint64(0)
)
bundle = SpendBundle.aggregate([tx1, tx2])
return bundle
full_node_1, server_1, bt = one_node_one_block
_ = await next_block(full_node_1, wallet_a, bt)
_ = await next_block(full_node_1, wallet_a, bt)
blocks, bundle, status, err = await self.condition_tester2(one_node_one_block, wallet_a, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
print(f"status: {status}")
print(f"error: {err}")
assert status == expected
if expected == MempoolInclusionStatus.SUCCESS:
assert mempool_bundle is bundle
assert err is None
else:
assert mempool_bundle is None
assert err is not None
# this test makes sure that one spend successfully asserts the announce from
# another spend, even though the assert condition is duplicated 100 times
@pytest.mark.asyncio
async def test_coin_announcement_duplicate_consumed(self, one_node_one_block, wallet_a):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp] * 100}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2]}
spend_bundle1 = generate_test_spend_bundle(wallet_a, coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(wallet_a, coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, server_1, bt = one_node_one_block
blocks, bundle, status, err = await self.condition_tester2(one_node_one_block, wallet_a, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert err is None
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
# this test makes sure that one spend successfully asserts the announce from
# another spend, even though the create announcement is duplicated 100 times
@pytest.mark.asyncio
async def test_coin_duplicate_announcement_consumed(self, one_node_one_block, wallet_a):
def test_fun(coin_1: Coin, coin_2: Coin) -> SpendBundle:
announce = Announcement(coin_2.name(), b"test")
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, [announce.name()])
dic = {cvp.opcode: [cvp]}
cvp2 = ConditionWithArgs(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, [b"test"])
dic2 = {cvp.opcode: [cvp2] * 100}
spend_bundle1 = generate_test_spend_bundle(wallet_a, coin_1, dic)
spend_bundle2 = generate_test_spend_bundle(wallet_a, coin_2, dic2)
bundle = SpendBundle.aggregate([spend_bundle1, spend_bundle2])
return bundle
full_node_1, server_1, bt = one_node_one_block
blocks, bundle, status, err = await self.condition_tester2(one_node_one_block, wallet_a, test_fun)
mempool_bundle = full_node_1.full_node.mempool_manager.get_spendbundle(bundle.name())
assert err is None
assert mempool_bundle is bundle
assert status == MempoolInclusionStatus.SUCCESS
@pytest.mark.asyncio
async def test_double_spend(self, two_nodes_one_block, wallet_a, self_hostname):
reward_ph = wallet_a.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2, bt = two_nodes_one_block
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
spend_bundle1 = generate_test_spend_bundle(wallet_a, list(blocks[-1].get_included_reward_coins())[0])
assert spend_bundle1 is not None
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx1, peer, test=True)
assert err is None
assert status == MempoolInclusionStatus.SUCCESS
spend_bundle2 = generate_test_spend_bundle(
wallet_a,
list(blocks[-1].get_included_reward_coins())[0],
new_puzzle_hash=BURN_PUZZLE_HASH_2,
)
assert spend_bundle2 is not None
tx2: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle2)
status, err = await respond_transaction(full_node_1, tx2, peer, test=True)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
sb2 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle2.name())
assert err == Err.MEMPOOL_CONFLICT
assert sb1 == spend_bundle1
assert sb2 is None
assert status == MempoolInclusionStatus.PENDING
async def send_sb(self, node: FullNodeAPI, sb: SpendBundle) -> Optional[Message]:
tx = wallet_protocol.SendTransaction(sb)
return await node.send_transaction(tx, test=True) # type: ignore
async def gen_and_send_sb(self, node, peer, *args, **kwargs):
sb = generate_test_spend_bundle(*args, **kwargs)
assert sb is not None
await self.send_sb(node, sb)
return sb
def assert_sb_in_pool(self, node, sb):
assert sb == node.full_node.mempool_manager.get_spendbundle(sb.name())
def assert_sb_not_in_pool(self, node, sb):
assert node.full_node.mempool_manager.get_spendbundle(sb.name()) is None
@pytest.mark.asyncio
async def test_double_spend_with_higher_fee(self, two_nodes_one_block, wallet_a, self_hostname):
reward_ph = wallet_a.get_new_puzzlehash()
full_node_1, full_node_2, server_1, server_2, bt = two_nodes_one_block
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height if len(blocks) > 0 else -1
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coins = iter(blocks[-1].get_included_reward_coins())
coin1, coin2 = next(coins), next(coins)
coins = iter(blocks[-2].get_included_reward_coins())
coin3, coin4 = next(coins), next(coins)
sb1_1 = await self.gen_and_send_sb(full_node_1, peer, wallet_a, coin1)
sb1_2 = await self.gen_and_send_sb(full_node_1, peer, wallet_a, coin1, fee=uint64(1))
# Fee increase is insufficient, the old spendbundle must stay
self.assert_sb_in_pool(full_node_1, sb1_1)
self.assert_sb_not_in_pool(full_node_1, sb1_2)
min_fee_increase = full_node_1.full_node.mempool_manager.get_min_fee_increase()
sb1_3 = await self.gen_and_send_sb(full_node_1, peer, wallet_a, coin1, fee=uint64(min_fee_increase))
# Fee increase is sufficiently high, sb1_1 gets replaced with sb1_3
self.assert_sb_not_in_pool(full_node_1, sb1_1)
self.assert_sb_in_pool(full_node_1, sb1_3)
sb2 = generate_test_spend_bundle(wallet_a, coin2, fee=uint64(min_fee_increase))
sb12 = SpendBundle.aggregate((sb2, sb1_3))
await self.send_sb(full_node_1, sb12)
# Aggregated spendbundle sb12 replaces sb1_3 since it spends a superset
# of coins spent in sb1_3
self.assert_sb_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb1_3)
sb3 = generate_test_spend_bundle(wallet_a, coin3, fee=uint64(min_fee_increase * 2))
sb23 = SpendBundle.aggregate((sb2, sb3))
await self.send_sb(full_node_1, sb23)
# sb23 must not replace existing sb12 as the former does not spend all
# coins that are spent in the latter (specifically, coin1)
self.assert_sb_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb23)
await self.send_sb(full_node_1, sb3)
# Adding non-conflicting sb3 should succeed
self.assert_sb_in_pool(full_node_1, sb3)
sb4_1 = generate_test_spend_bundle(wallet_a, coin4, fee=uint64(min_fee_increase))
sb1234_1 = SpendBundle.aggregate((sb12, sb3, sb4_1))
await self.send_sb(full_node_1, sb1234_1)
# sb1234_1 should not be in pool as it decreases total fees per cost
self.assert_sb_not_in_pool(full_node_1, sb1234_1)
sb4_2 = generate_test_spend_bundle(wallet_a, coin4, fee=uint64(min_fee_increase * 2))
sb1234_2 = SpendBundle.aggregate((sb12, sb3, sb4_2))
await self.send_sb(full_node_1, sb1234_2)
# sb1234_2 has a higher fee per cost than its conflicts and should get
# into mempool
self.assert_sb_in_pool(full_node_1, sb1234_2)
self.assert_sb_not_in_pool(full_node_1, sb12)
self.assert_sb_not_in_pool(full_node_1, sb3)
@pytest.mark.asyncio
async def test_invalid_signature(self, one_node_one_block, wallet_a):
reward_ph = wallet_a.get_new_puzzlehash()
full_node_1, server_1, bt = one_node_one_block
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height if len(blocks) > 0 else -1
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coins = iter(blocks[-1].get_included_reward_coins())
coin1 = next(coins)
sb: SpendBundle = generate_test_spend_bundle(wallet_a, coin1)
assert sb.aggregated_signature != G2Element.generator()
sb = dataclasses.replace(sb, aggregated_signature=G2Element.generator())
res: Optional[Message] = await self.send_sb(full_node_1, sb)
assert res is not None
ack: TransactionAck = TransactionAck.from_bytes(res.data)
assert ack.status == MempoolInclusionStatus.FAILED.value
assert ack.error == Err.BAD_AGGREGATE_SIGNATURE.name
async def condition_tester(
self,
one_node_one_block,
wallet_a,
dic: Dict[ConditionOpcode, List[ConditionWithArgs]],
fee: int = 0,
num_blocks: int = 3,
coin: Optional[Coin] = None,
):
reward_ph = wallet_a.get_new_puzzlehash()
full_node_1, server_1, bt = one_node_one_block
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
blocks = bt.get_consecutive_blocks(
num_blocks,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
_, dummy_node_id = await add_dummy_connection(server_1, bt.config["self_hostname"], 100)
dummy_peer = None
for node_id, wsc in server_1.all_connections.items():
if node_id == dummy_node_id:
dummy_peer = wsc
break
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + num_blocks)
spend_bundle1 = generate_test_spend_bundle(
wallet_a, coin or list(blocks[-num_blocks + 2].get_included_reward_coins())[0], dic, uint64(fee)
)
assert spend_bundle1 is not None
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(spend_bundle1)
status, err = await respond_transaction(full_node_1, tx1, dummy_peer, test=True)
return blocks, spend_bundle1, dummy_peer, status, err
@pytest.mark.asyncio
async def condition_tester2(self, one_node_one_block, wallet_a, test_fun: Callable[[Coin, Coin], SpendBundle]):
reward_ph = wallet_a.get_new_puzzlehash()
full_node_1, server_1, bt = one_node_one_block
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height if len(blocks) > 0 else -1
blocks = bt.get_consecutive_blocks(
3,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
time_per_block=10,
)
_, dummy_node_id = await add_dummy_connection(server_1, bt.config["self_hostname"], 100)
dummy_peer = None
for node_id, wsc in server_1.all_connections.items():
if node_id == dummy_node_id:
dummy_peer = wsc
break
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_1, start_height + 3)
coin_1 = list(blocks[-2].get_included_reward_coins())[0]
coin_2 = list(blocks[-1].get_included_reward_coins())[0]
bundle = test_fun(coin_1, coin_2)
tx1: full_node_protocol.RespondTransaction = full_node_protocol.RespondTransaction(bundle)
status, err = await respond_transaction(full_node_1, tx1, dummy_peer, test=True)
return blocks, bundle, status, err
@pytest.mark.asyncio
async def test_invalid_block_index(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
blocks = await full_node_1.get_all_full_blocks()
start_height = blocks[-1].height
cvp = ConditionWithArgs(
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE,
[int_to_bytes(start_height + 5)],
)
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(one_node_one_block, wallet_a, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert err == Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
assert status == MempoolInclusionStatus.PENDING
@pytest.mark.asyncio
async def test_block_index_missing_arg(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(one_node_one_block, wallet_a, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert sb1 is None
# the transaction may become valid later
assert err == Err.INVALID_CONDITION
assert status == MempoolInclusionStatus.FAILED
@pytest.mark.asyncio
async def test_correct_block_index(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(1)])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(one_node_one_block, wallet_a, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert err is None
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
@pytest.mark.asyncio
async def test_block_index_garbage(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
# garbage at the end of the argument list is ignored in consensus mode,
# but not in mempool-mode
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(1), b"garbage"])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(one_node_one_block, wallet_a, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert err is Err.INVALID_CONDITION
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
@pytest.mark.asyncio
async def test_negative_block_index(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, [int_to_bytes(-1)])
dic = {ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(one_node_one_block, wallet_a, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert err is None
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
@pytest.mark.asyncio
async def test_invalid_block_age(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(5)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(one_node_one_block, wallet_a, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert err == Err.ASSERT_HEIGHT_RELATIVE_FAILED
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.PENDING
@pytest.mark.asyncio
async def test_block_age_missing_arg(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(one_node_one_block, wallet_a, dic)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert err == Err.INVALID_CONDITION
assert sb1 is None
# the transaction may become valid later
assert status == MempoolInclusionStatus.FAILED
@pytest.mark.asyncio
async def test_correct_block_age(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(
one_node_one_block, wallet_a, dic, num_blocks=4
)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert err is None
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
@pytest.mark.asyncio
async def test_block_age_garbage(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
# garbage at the end of the argument list is ignored in consensus mode,
# but not in mempool mode
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(1), b"garbage"])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(
one_node_one_block, wallet_a, dic, num_blocks=4
)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert err is Err.INVALID_CONDITION
assert sb1 is None
assert status == MempoolInclusionStatus.FAILED
@pytest.mark.asyncio
async def test_negative_block_age(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_HEIGHT_RELATIVE, [int_to_bytes(-1)])
dic = {cvp.opcode: [cvp]}
blocks, spend_bundle1, peer, status, err = await self.condition_tester(
one_node_one_block, wallet_a, dic, num_blocks=4
)
sb1 = full_node_1.full_node.mempool_manager.get_spendbundle(spend_bundle1.name())
assert err is None
assert sb1 is spend_bundle1
assert status == MempoolInclusionStatus.SUCCESS
@pytest.mark.asyncio
async def test_correct_my_id(self, one_node_one_block, wallet_a):
full_node_1, server_1, bt = one_node_one_block
_ = await next_block(full_node_1, wallet_a, bt)
_ = await next_block(full_node_1, wallet_a, bt)
coin = await next_block(full_node_1, wallet_a, bt)
cvp = ConditionWithArgs(ConditionOpcode.ASSERT_MY_COIN_ID, [coin.name()])
dic = {cvp.opcode: [cvp]}
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_block_height_map.py | tests/core/full_node/test_block_height_map.py | import pytest
import struct
from flax.full_node.block_height_map import BlockHeightMap, SesCache
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.util.db_wrapper import DBWrapper2
from tests.util.db_connection import DBConnection
from flax.types.blockchain_format.sized_bytes import bytes32
from typing import Optional
from flax.util.ints import uint8
from flax.util.files import write_file_async
def gen_block_hash(height: int) -> bytes32:
return bytes32(struct.pack(">I", height + 1) * (32 // 4))
def gen_ses(height: int) -> SubEpochSummary:
prev_ses = gen_block_hash(height + 0xFA0000)
reward_chain_hash = gen_block_hash(height + 0xFC0000)
return SubEpochSummary(prev_ses, reward_chain_hash, uint8(0), None, None)
async def new_block(
db: DBWrapper2,
block_hash: bytes32,
parent: bytes32,
height: int,
is_peak: bool,
ses: Optional[SubEpochSummary],
):
async with db.writer_maybe_transaction() as conn:
if db.db_version == 2:
cursor = await conn.execute(
"INSERT INTO full_blocks VALUES(?, ?, ?, ?)",
(
block_hash,
parent,
height,
# sub epoch summary
None if ses is None else bytes(ses),
),
)
await cursor.close()
if is_peak:
cursor = await conn.execute("INSERT OR REPLACE INTO current_peak VALUES(?, ?)", (0, block_hash))
await cursor.close()
else:
cursor = await conn.execute(
"INSERT INTO block_records VALUES(?, ?, ?, ?, ?)",
(
block_hash.hex(),
parent.hex(),
height,
# sub epoch summary
None if ses is None else bytes(ses),
is_peak,
),
)
await cursor.close()
async def setup_db(db: DBWrapper2):
async with db.writer_maybe_transaction() as conn:
if db.db_version == 2:
await conn.execute(
"CREATE TABLE IF NOT EXISTS full_blocks("
"header_hash blob PRIMARY KEY,"
"prev_hash blob,"
"height bigint,"
"sub_epoch_summary blob)"
)
await conn.execute("CREATE TABLE IF NOT EXISTS current_peak(key int PRIMARY KEY, hash blob)")
await conn.execute("CREATE INDEX IF NOT EXISTS height on full_blocks(height)")
await conn.execute("CREATE INDEX IF NOT EXISTS hh on full_blocks(header_hash)")
else:
await conn.execute(
"CREATE TABLE IF NOT EXISTS block_records("
"header_hash text PRIMARY KEY,"
"prev_hash text,"
"height bigint,"
"sub_epoch_summary blob,"
"is_peak tinyint)"
)
await conn.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await conn.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await conn.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
# if chain_id != 0, the last block in the chain won't be considered the peak,
# and the chain_id will be mixed in to the hashes, to form a separate chain at
# the same heights as the main chain
async def setup_chain(
db: DBWrapper2, length: int, *, chain_id: int = 0, ses_every: Optional[int] = None, start_height=0
):
height = start_height
peak_hash = gen_block_hash(height + chain_id * 65536)
parent_hash = bytes32([0] * 32)
while height < length:
ses = None
if ses_every is not None and height % ses_every == 0:
ses = gen_ses(height)
await new_block(db, peak_hash, parent_hash, height, False, ses)
height += 1
parent_hash = peak_hash
peak_hash = gen_block_hash(height + chain_id * 65536)
# we only set is_peak=1 for chain_id 0
await new_block(db, peak_hash, parent_hash, height, chain_id == 0, None)
class TestBlockHeightMap:
@pytest.mark.asyncio
async def test_height_to_hash(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
assert not height_map.contains_height(11)
for height in reversed(range(10)):
assert height_map.contains_height(height)
for height in reversed(range(10)):
assert height_map.get_hash(height) == gen_block_hash(height)
@pytest.mark.asyncio
async def test_height_to_hash_long_chain(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10000)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(1000)):
assert height_map.contains_height(height)
for height in reversed(range(10000)):
assert height_map.get_hash(height) == gen_block_hash(height)
@pytest.mark.asyncio
async def test_save_restore(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10000, ses_every=20)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(10000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
await height_map.maybe_flush()
del height_map
# To ensure we're actually loading from cache, and not the DB, clear
# the table (but we still need the peak). We need at least 20 blocks
# in the DB since we keep loading until we find a match of both hash
# and sub epoch summary. In this test we have a sub epoch summary
# every 20 blocks, so we generate the 30 last blocks only
async with db_wrapper.writer_maybe_transaction() as conn:
if db_version == 2:
await conn.execute("DROP TABLE full_blocks")
else:
await conn.execute("DROP TABLE block_records")
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10000, ses_every=20, start_height=9970)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(10000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
@pytest.mark.asyncio
async def test_restore_entire_chain(self, tmp_dir, db_version):
# this is a test where the height-to-hash and height-to-ses caches are
# entirely unrelated to the database. Make sure they can both be fully
# replaced
async with DBConnection(db_version) as db_wrapper:
heights = bytearray(900 * 32)
for i in range(900):
idx = i * 32
heights[idx : idx + 32] = bytes([i % 256] * 32)
await write_file_async(tmp_dir / "height-to-hash", heights)
ses_cache = []
for i in range(0, 900, 19):
ses_cache.append((i, gen_ses(i + 9999)))
await write_file_async(tmp_dir / "sub-epoch-summaries", bytes(SesCache(ses_cache)))
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10000, ses_every=20)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(10000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
@pytest.mark.asyncio
async def test_restore_extend(self, tmp_dir, db_version):
# test the case where the cache has fewer blocks than the DB, and that
# we correctly load all the missing blocks from the DB to update the
# cache
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 2000, ses_every=20)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in reversed(range(2000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
await height_map.maybe_flush()
del height_map
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
# add 2000 blocks to the chain
await setup_chain(db_wrapper, 4000, ses_every=20)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
# now make sure we have the complete chain, height 0 -> 4000
for height in reversed(range(4000)):
assert height_map.contains_height(height)
assert height_map.get_hash(height) == gen_block_hash(height)
if (height % 20) == 0:
assert height_map.get_ses(height) == gen_ses(height)
else:
with pytest.raises(KeyError) as _:
height_map.get_ses(height)
@pytest.mark.asyncio
async def test_height_to_hash_with_orphans(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10)
# set up two separate chains, but without the peak
await setup_chain(db_wrapper, 10, chain_id=1)
await setup_chain(db_wrapper, 10, chain_id=2)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in range(10):
assert height_map.get_hash(height) == gen_block_hash(height)
@pytest.mark.asyncio
async def test_height_to_hash_update(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10)
# orphan blocks
await setup_chain(db_wrapper, 10, chain_id=1)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
for height in range(10):
assert height_map.get_hash(height) == gen_block_hash(height)
height_map.update_height(10, gen_block_hash(100), None)
for height in range(9):
assert height_map.get_hash(height) == gen_block_hash(height)
assert height_map.get_hash(10) == gen_block_hash(100)
@pytest.mark.asyncio
async def test_update_ses(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10)
# orphan blocks
await setup_chain(db_wrapper, 10, chain_id=1)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
with pytest.raises(KeyError) as _:
height_map.get_ses(10)
height_map.update_height(10, gen_block_hash(10), gen_ses(10))
assert height_map.get_ses(10) == gen_ses(10)
assert height_map.get_hash(10) == gen_block_hash(10)
@pytest.mark.asyncio
async def test_height_to_ses(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10, ses_every=2)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
assert height_map.get_ses(6) == gen_ses(6)
assert height_map.get_ses(8) == gen_ses(8)
with pytest.raises(KeyError) as _:
height_map.get_ses(1)
with pytest.raises(KeyError) as _:
height_map.get_ses(3)
with pytest.raises(KeyError) as _:
height_map.get_ses(5)
with pytest.raises(KeyError) as _:
height_map.get_ses(7)
with pytest.raises(KeyError) as _:
height_map.get_ses(9)
@pytest.mark.asyncio
async def test_rollback(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10, ses_every=2)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
assert height_map.get_ses(6) == gen_ses(6)
assert height_map.get_ses(8) == gen_ses(8)
assert height_map.get_hash(5) == gen_block_hash(5)
height_map.rollback(5)
assert height_map.contains_height(0)
assert height_map.contains_height(1)
assert height_map.contains_height(2)
assert height_map.contains_height(3)
assert height_map.contains_height(4)
assert height_map.contains_height(5)
assert not height_map.contains_height(6)
assert not height_map.contains_height(7)
assert not height_map.contains_height(8)
assert height_map.get_hash(5) == gen_block_hash(5)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
with pytest.raises(KeyError) as _:
height_map.get_ses(6)
with pytest.raises(KeyError) as _:
height_map.get_ses(8)
@pytest.mark.asyncio
async def test_rollback2(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
await setup_db(db_wrapper)
await setup_chain(db_wrapper, 10, ses_every=2)
height_map = await BlockHeightMap.create(tmp_dir, db_wrapper)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
assert height_map.get_ses(6) == gen_ses(6)
assert height_map.get_ses(8) == gen_ses(8)
assert height_map.get_hash(6) == gen_block_hash(6)
height_map.rollback(6)
assert height_map.contains_height(6)
assert not height_map.contains_height(7)
assert height_map.get_hash(6) == gen_block_hash(6)
with pytest.raises(AssertionError) as _:
height_map.get_hash(7)
assert height_map.get_ses(0) == gen_ses(0)
assert height_map.get_ses(2) == gen_ses(2)
assert height_map.get_ses(4) == gen_ses(4)
assert height_map.get_ses(6) == gen_ses(6)
with pytest.raises(KeyError) as _:
height_map.get_ses(8)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_mempool_fee_protocol.py | tests/core/full_node/test_mempool_fee_protocol.py | from __future__ import annotations
import datetime
from typing import List, Tuple, Union
import pytest
from flax.full_node.full_node_api import FullNodeAPI
from flax.protocols import full_node_protocol, wallet_protocol
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.protocols.wallet_protocol import RespondFeeEstimates
from flax.server.server import FlaxServer
from flax.simulator.block_tools import BlockTools
from flax.simulator.full_node_simulator import FullNodeSimulator
from flax.simulator.time_out_assert import time_out_assert
from flax.util.ints import uint64
from flax.wallet.wallet import Wallet
from tests.core.node_height import node_height_at_least
@pytest.mark.asyncio
async def test_protocol_messages(
wallet_node_sim_and_wallet: Tuple[
List[Union[FullNodeAPI, FullNodeSimulator]], List[Tuple[Wallet, FlaxServer]], BlockTools
]
) -> None:
full_nodes, wallets, bt = wallet_node_sim_and_wallet
a_wallet = bt.get_pool_wallet_tool()
reward_ph = a_wallet.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
35,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
full_node_sim: Union[FullNodeAPI, FullNodeSimulator] = full_nodes[0]
for block in blocks:
await full_node_sim.full_node.respond_block(full_node_protocol.RespondBlock(block))
await time_out_assert(60, node_height_at_least, True, full_node_sim, blocks[-1].height)
offset_secs = [60, 120, 300]
now_unix_secs = int(datetime.datetime.utcnow().timestamp())
request_times = [uint64(now_unix_secs + s) for s in offset_secs]
request: wallet_protocol.RequestFeeEstimates = wallet_protocol.RequestFeeEstimates(request_times)
estimates = await full_node_sim.request_fee_estimates(request)
assert estimates is not None
assert estimates.type == ProtocolMessageTypes.respond_fee_estimates.value
response: RespondFeeEstimates = wallet_protocol.RespondFeeEstimates.from_bytes(estimates.data)
# Sanity check the response
assert len(response.estimates.estimates) == len(request_times)
assert response.estimates.error is None
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_hint_management.py | tests/core/full_node/test_hint_management.py | from __future__ import annotations
from typing import List, Optional
import pytest
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain import Blockchain, StateChangeSummary
from flax.consensus.cost_calculator import NPCResult
from flax.full_node.hint_management import get_hints_and_subscription_coin_ids
from flax.simulator.block_tools import BlockTools
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.spend_bundle_conditions import Spend, SpendBundleConditions
from flax.util.hash import std_hash
from flax.util.ints import uint32, uint64
from tests.blockchain.blockchain_test_utils import _validate_and_add_block
coin_ids = [std_hash(i.to_bytes(4, "big")) for i in range(10)]
phs = [std_hash(i.to_bytes(4, "big")) for i in range(10)]
spends: List[Spend] = [
Spend(
coin_ids[0],
phs[0],
None,
uint64(5),
[
(phs[2], uint64(123), b""),
(phs[4], uint64(3), b"1" * 32),
],
[],
),
Spend(
coin_ids[2],
phs[0],
None,
uint64(6),
[
(phs[7], uint64(123), b""),
(phs[4], uint64(6), b""),
(phs[9], uint64(123), b"1" * 32),
],
[],
),
Spend(
coin_ids[1],
phs[7],
None,
uint64(2),
[
(phs[5], uint64(123), b""),
(phs[6], uint64(5), b"1" * 3),
],
[],
),
]
@pytest.mark.asyncio
async def test_hints_to_add(bt: BlockTools, empty_blockchain: Blockchain) -> None:
blocks = bt.get_consecutive_blocks(2)
await _validate_and_add_block(empty_blockchain, blocks[0])
await _validate_and_add_block(empty_blockchain, blocks[1])
br: Optional[BlockRecord] = empty_blockchain.get_peak()
assert br is not None
sbc: SpendBundleConditions = SpendBundleConditions(spends, uint64(0), uint32(0), uint64(0), [], uint64(0))
npc_res = [NPCResult(None, None, uint64(0)), NPCResult(None, sbc, uint64(0))]
scs = StateChangeSummary(br, uint32(0), [], npc_res, [])
hints_to_add, lookup_coin_ids = get_hints_and_subscription_coin_ids(scs, {}, {})
assert len(lookup_coin_ids) == 0
first_coin_id: bytes32 = Coin(bytes32(spends[0].coin_id), bytes32(phs[4]), uint64(3)).name()
second_coin_id: bytes32 = Coin(bytes32(spends[2].coin_id), bytes32(phs[6]), uint64(5)).name()
third_coin_id: bytes32 = Coin(bytes32(spends[1].coin_id), bytes32(phs[9]), uint64(123)).name()
assert set(hints_to_add) == {(first_coin_id, b"1" * 32), (second_coin_id, b"1" * 3), (third_coin_id, b"1" * 32)}
@pytest.mark.asyncio
async def test_lookup_coin_ids(bt: BlockTools, empty_blockchain: Blockchain) -> None:
blocks = bt.get_consecutive_blocks(2)
await _validate_and_add_block(empty_blockchain, blocks[0])
await _validate_and_add_block(empty_blockchain, blocks[1])
br: Optional[BlockRecord] = empty_blockchain.get_peak()
assert br is not None
sbc: SpendBundleConditions = SpendBundleConditions(spends, uint64(0), uint32(0), uint64(0), [], uint64(0))
npc_res = [NPCResult(None, None, uint64(0)), NPCResult(None, sbc, uint64(0))]
rewards: List[Coin] = [
Coin(coin_ids[8], phs[8], uint64(1)),
Coin(coin_ids[9], phs[9], uint64(2)),
Coin(coin_ids[5], phs[8], uint64(1234)),
]
scs = StateChangeSummary(br, uint32(0), [], npc_res, rewards)
# Removal ID and addition PH
coin_subscriptions = {coin_ids[1]: {bytes32(b"2" * 32)}}
ph_subscriptions = {phs[4]: {bytes32(b"3" * 32)}}
_, lookup_coin_ids = get_hints_and_subscription_coin_ids(scs, coin_subscriptions, ph_subscriptions)
first_coin_id: bytes32 = Coin(bytes32(spends[0].coin_id), bytes32(phs[4]), uint64(3)).name()
second_coin_id: bytes32 = Coin(bytes32(spends[1].coin_id), bytes32(phs[4]), uint64(6)).name()
assert set(lookup_coin_ids) == {coin_ids[1], first_coin_id, second_coin_id}
# Removal PH and addition ID
coin_subscriptions = {first_coin_id: {bytes32(b"5" * 32)}}
ph_subscriptions = {phs[0]: {bytes32(b"6" * 32)}}
_, lookup_coin_ids = get_hints_and_subscription_coin_ids(scs, coin_subscriptions, ph_subscriptions)
assert set(lookup_coin_ids) == {first_coin_id, coin_ids[0], coin_ids[2]}
# Subscribe to hint
third_coin_id: bytes32 = Coin(bytes32(spends[1].coin_id), phs[9], uint64(123)).name()
ph_subscriptions = {bytes32(b"1" * 32): {bytes32(b"7" * 32)}}
_, lookup_coin_ids = get_hints_and_subscription_coin_ids(scs, {}, ph_subscriptions)
assert set(lookup_coin_ids) == {first_coin_id, third_coin_id}
# Reward PH
ph_subscriptions = {rewards[0].puzzle_hash: {bytes32(b"8" * 32)}}
_, lookup_coin_ids = get_hints_and_subscription_coin_ids(scs, {}, ph_subscriptions)
assert set(lookup_coin_ids) == {rewards[0].name(), rewards[2].name()}
# Reward coin id + reward ph
coin_subscriptions = {rewards[1].name(): {bytes32(b"9" * 32)}}
_, lookup_coin_ids = get_hints_and_subscription_coin_ids(scs, coin_subscriptions, ph_subscriptions)
assert set(lookup_coin_ids) == {rewards[1].name(), rewards[0].name(), rewards[2].name()}
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/conftest.py | tests/core/full_node/conftest.py | from __future__ import annotations
import asyncio
from typing import Iterator
import pytest
# This is an optimization to reduce runtime by reducing setup and teardown on the
# wallet nodes fixture below.
# https://github.com/pytest-dev/pytest-asyncio/blob/v0.18.1/pytest_asyncio/plugin.py#L479-L484
@pytest.fixture(scope="module")
def event_loop(request: "pytest.FixtureRequest") -> Iterator[asyncio.AbstractEventLoop]:
"""Create an instance of the default event loop for each test case."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_node_load.py | tests/core/full_node/test_node_load.py | from __future__ import annotations
import pytest
from flax.protocols import full_node_protocol
from flax.simulator.time_out_assert import time_out_assert
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16
from tests.connection_utils import connect_and_get_peer
from tests.util.misc import assert_runtime
class TestNodeLoad:
@pytest.mark.asyncio
async def test_blocks_load(self, request: pytest.FixtureRequest, two_nodes, self_hostname):
num_blocks = 50
full_node_1, full_node_2, server_1, server_2, bt = two_nodes
blocks = bt.get_consecutive_blocks(num_blocks)
peer = await connect_and_get_peer(server_1, server_2, self_hostname)
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(blocks[0]), peer)
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
async def num_connections():
return len(server_2.get_connections())
await time_out_assert(10, num_connections, 1)
with assert_runtime(seconds=100, label=request.node.name) as runtime_results_future:
for i in range(1, num_blocks):
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(blocks[i]))
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(blocks[i]))
runtime_results = runtime_results_future.result(timeout=0)
print(f"Time taken to process {num_blocks} is {runtime_results.duration}")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_conditions.py | tests/core/full_node/test_conditions.py | """
These are quick-to-run test that check spends can be added to the blockchain when they're valid
or that they're failing for the right reason when they're invalid.
"""
import logging
import time
from typing import List, Optional, Tuple
import pytest
from blspy import G2Element
from clvm_tools.binutils import assemble
from flax.types.announcement import Announcement
from flax.types.blockchain_format.program import Program
from flax.types.coin_record import CoinRecord
from flax.types.coin_spend import CoinSpend
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.full_block import FullBlock
from flax.types.spend_bundle import SpendBundle
from flax.util.errors import Err
from flax.util.ints import uint32
from flax.simulator.block_tools import BlockTools
from tests.util.keyring import TempKeyring
from .ram_db import create_ram_blockchain
from ...blockchain.blockchain_test_utils import _validate_and_add_block
def cleanup_keyring(keyring: TempKeyring):
keyring.cleanup()
log = logging.getLogger(__name__)
# This puzzle simply returns the solution as conditions.
# We call it the `EASY_PUZZLE` because it's pretty easy to solve.
EASY_PUZZLE = Program.to(assemble("1"))
EASY_PUZZLE_HASH = EASY_PUZZLE.get_tree_hash()
async def initial_blocks(bt, block_count: int = 4) -> List[FullBlock]:
blocks = bt.get_consecutive_blocks(
block_count,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=EASY_PUZZLE_HASH,
pool_reward_puzzle_hash=EASY_PUZZLE_HASH,
)
return blocks
async def check_spend_bundle_validity(
bt: BlockTools,
blocks: List[FullBlock],
spend_bundle: SpendBundle,
expected_err: Optional[Err] = None,
) -> Tuple[List[CoinRecord], List[CoinRecord]]:
"""
This test helper create an extra block after the given blocks that contains the given
`SpendBundle`, and then invokes `receive_block` to ensure that it's accepted (if `expected_err=None`)
or fails with the correct error code.
"""
constants = bt.constants
db_wrapper, blockchain = await create_ram_blockchain(constants)
try:
for block in blocks:
await _validate_and_add_block(blockchain, block)
additional_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
newest_block = additional_blocks[-1]
if expected_err is None:
await _validate_and_add_block(blockchain, newest_block)
coins_added = await blockchain.coin_store.get_coins_added_at_height(uint32(len(blocks)))
coins_removed = await blockchain.coin_store.get_coins_removed_at_height(uint32(len(blocks)))
else:
await _validate_and_add_block(blockchain, newest_block, expected_error=expected_err)
coins_added = []
coins_removed = []
return coins_added, coins_removed
finally:
# if we don't close the db_wrapper, the test process doesn't exit cleanly
await db_wrapper.close()
# we must call `shut_down` or the executor in `Blockchain` doesn't stop
blockchain.shut_down()
async def check_conditions(
bt: BlockTools, condition_solution: Program, expected_err: Optional[Err] = None, spend_reward_index: int = -2
):
blocks = await initial_blocks(bt)
coin = list(blocks[spend_reward_index].get_included_reward_coins())[0]
coin_spend = CoinSpend(coin, EASY_PUZZLE, condition_solution)
spend_bundle = SpendBundle([coin_spend], G2Element())
# now let's try to create a block with the spend bundle and ensure that it doesn't validate
await check_spend_bundle_validity(bt, blocks, spend_bundle, expected_err=expected_err)
class TestConditions:
@pytest.mark.asyncio
async def test_invalid_block_age(self, bt):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_RELATIVE[0]} 2))"))
await check_conditions(bt, conditions, expected_err=Err.ASSERT_HEIGHT_RELATIVE_FAILED)
@pytest.mark.asyncio
async def test_valid_block_age(self, bt):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_RELATIVE[0]} 1))"))
await check_conditions(bt, conditions)
@pytest.mark.asyncio
async def test_invalid_block_height(self, bt):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE[0]} 4))"))
await check_conditions(bt, conditions, expected_err=Err.ASSERT_HEIGHT_ABSOLUTE_FAILED)
@pytest.mark.asyncio
async def test_valid_block_height(self, bt):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE[0]} 3))"))
await check_conditions(bt, conditions)
@pytest.mark.asyncio
async def test_invalid_my_id(self, bt):
blocks = await initial_blocks(bt)
coin = list(blocks[-2].get_included_reward_coins())[0]
wrong_name = bytearray(coin.name())
wrong_name[-1] ^= 1
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{wrong_name.hex()}))"))
await check_conditions(bt, conditions, expected_err=Err.ASSERT_MY_COIN_ID_FAILED)
@pytest.mark.asyncio
async def test_valid_my_id(self, bt):
blocks = await initial_blocks(bt)
coin = list(blocks[-2].get_included_reward_coins())[0]
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{coin.name().hex()}))"))
await check_conditions(bt, conditions)
@pytest.mark.asyncio
async def test_invalid_seconds_absolute(self, bt):
# TODO: make the test suite not use `time.time` so we can more accurately
# set `time_now` to make it minimal while still failing
time_now = int(time.time()) + 3000
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_SECONDS_ABSOLUTE[0]} {time_now}))"))
await check_conditions(bt, conditions, expected_err=Err.ASSERT_SECONDS_ABSOLUTE_FAILED)
@pytest.mark.asyncio
async def test_valid_seconds_absolute(self, bt):
time_now = int(time.time())
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_SECONDS_ABSOLUTE[0]} {time_now}))"))
await check_conditions(bt, conditions)
@pytest.mark.asyncio
async def test_invalid_coin_announcement(self, bt):
blocks = await initial_blocks(bt)
coin = list(blocks[-2].get_included_reward_coins())[0]
announce = Announcement(coin.name(), b"test_bad")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(bt, conditions, expected_err=Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
@pytest.mark.asyncio
async def test_valid_coin_announcement(self, bt):
blocks = await initial_blocks(bt)
coin = list(blocks[-2].get_included_reward_coins())[0]
announce = Announcement(coin.name(), b"test")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(bt, conditions)
@pytest.mark.asyncio
async def test_invalid_puzzle_announcement(self, bt):
announce = Announcement(EASY_PUZZLE_HASH, b"test_bad")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(bt, conditions, expected_err=Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
@pytest.mark.asyncio
async def test_valid_puzzle_announcement(self, bt):
announce = Announcement(EASY_PUZZLE_HASH, b"test")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(bt, conditions)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/config.py | tests/core/full_node/config.py | # flake8: noqa: E501
from __future__ import annotations
parallel = True
job_timeout = 50
check_resource_usage = True
checkout_blocks_and_plots = True
os_skip = ["windows"]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/__init__.py | tests/core/full_node/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/test_performance.py | tests/core/full_node/test_performance.py | # flake8: noqa: F811, F401
import cProfile
import dataclasses
import logging
import random
from typing import Dict
import pytest
from clvm.casts import int_to_bytes
from flax.consensus.block_record import BlockRecord
from flax.consensus.pot_iterations import is_overflow_block
from flax.full_node.full_node_api import FullNodeAPI
from flax.protocols import full_node_protocol as fnp
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.condition_with_args import ConditionWithArgs
from flax.types.unfinished_block import UnfinishedBlock
from flax.util.ints import uint64
from tests.connection_utils import add_dummy_connection
from tests.core.full_node.stores.test_coin_store import get_future_reward_coins
from tests.core.node_height import node_height_at_least
from flax.simulator.time_out_assert import time_out_assert
from tests.util.misc import assert_runtime
log = logging.getLogger(__name__)
async def get_block_path(full_node: FullNodeAPI):
blocks_list = [await full_node.full_node.blockchain.get_full_peak()]
assert blocks_list[0] is not None
while blocks_list[0].height != 0:
b = await full_node.full_node.block_store.get_full_block(blocks_list[0].prev_header_hash)
assert b is not None
blocks_list.insert(0, b)
return blocks_list
class TestPerformance:
@pytest.mark.asyncio
@pytest.mark.benchmark
async def test_full_block_performance(self, request: pytest.FixtureRequest, wallet_nodes_perf, self_hostname):
full_node_1, server_1, wallet_a, wallet_receiver, bt = wallet_nodes_perf
blocks = await full_node_1.get_all_full_blocks()
full_node_1.full_node.mempool_manager.limit_factor = 1
wallet_ph = wallet_a.get_new_puzzlehash()
blocks = bt.get_consecutive_blocks(
10,
block_list_input=blocks,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=wallet_ph,
pool_reward_puzzle_hash=wallet_ph,
)
for block in blocks:
await full_node_1.full_node.respond_block(fnp.RespondBlock(block))
start_height = (
full_node_1.full_node.blockchain.get_peak().height
if full_node_1.full_node.blockchain.get_peak() is not None
else -1
)
incoming_queue, node_id = await add_dummy_connection(server_1, self_hostname, 12312)
fake_peer = server_1.all_connections[node_id]
# Mempool has capacity of 100, make 110 unspents that we can use
puzzle_hashes = []
# Makes a bunch of coins
for i in range(20):
conditions_dict: Dict = {ConditionOpcode.CREATE_COIN: []}
# This should fit in one transaction
for _ in range(100):
receiver_puzzlehash = wallet_receiver.get_new_puzzlehash()
puzzle_hashes.append(receiver_puzzlehash)
output = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [receiver_puzzlehash, int_to_bytes(100000000)])
conditions_dict[ConditionOpcode.CREATE_COIN].append(output)
spend_bundle = wallet_a.generate_signed_transaction(
100,
puzzle_hashes[0],
get_future_reward_coins(blocks[1 + i])[0],
condition_dic=conditions_dict,
)
assert spend_bundle is not None
respond_transaction_2 = fnp.RespondTransaction(spend_bundle)
await full_node_1.respond_transaction(respond_transaction_2, fake_peer)
blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
await full_node_1.full_node.respond_block(fnp.RespondBlock(blocks[-1]), fake_peer)
await time_out_assert(20, node_height_at_least, True, full_node_1, start_height + 20)
spend_bundles = []
spend_bundle_ids = []
# Fill mempool
for puzzle_hash in puzzle_hashes[1:]:
coin_record = (await full_node_1.full_node.coin_store.get_coin_records_by_puzzle_hash(True, puzzle_hash))[0]
receiver_puzzlehash = wallet_receiver.get_new_puzzlehash()
if puzzle_hash == puzzle_hashes[-1]:
fee = 100000000 # 100 million (20 fee per cost)
else:
fee = random.randint(1, 100000000)
spend_bundle = wallet_receiver.generate_signed_transaction(
uint64(500), receiver_puzzlehash, coin_record.coin, fee=fee
)
spend_bundles.append(spend_bundle)
spend_bundle_ids.append(spend_bundle.get_hash())
pr = cProfile.Profile()
pr.enable()
with assert_runtime(seconds=0.001, label=f"{request.node.name} - mempool"):
num_tx: int = 0
for spend_bundle, spend_bundle_id in zip(spend_bundles, spend_bundle_ids):
num_tx += 1
respond_transaction = fnp.RespondTransaction(spend_bundle)
await full_node_1.respond_transaction(respond_transaction, fake_peer)
request_transaction = fnp.RequestTransaction(spend_bundle_id)
req = await full_node_1.request_transaction(request_transaction)
if req is None:
break
log.warning(f"Num Tx: {num_tx}")
pr.create_stats()
pr.dump_stats("./mempool-benchmark.pstats")
# Create an unfinished block
peak = full_node_1.full_node.blockchain.get_peak()
assert peak is not None
curr: BlockRecord = peak
while not curr.is_transaction_block:
curr = full_node_1.full_node.blockchain.block_record(curr.prev_hash)
mempool_bundle = await full_node_1.full_node.mempool_manager.create_bundle_from_mempool(curr.header_hash)
if mempool_bundle is None:
spend_bundle = None
else:
spend_bundle = mempool_bundle[0]
current_blocks = await full_node_1.get_all_full_blocks()
blocks = bt.get_consecutive_blocks(
1,
transaction_data=spend_bundle,
block_list_input=current_blocks,
guarantee_transaction_block=True,
)
block = blocks[-1]
if is_overflow_block(bt.constants, block.reward_chain_block.signage_point_index):
sub_slots = block.finished_sub_slots[:-1]
else:
sub_slots = block.finished_sub_slots
unfinished = UnfinishedBlock(
sub_slots,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
pr = cProfile.Profile()
pr.enable()
with assert_runtime(seconds=0.1, label=f"{request.node.name} - unfinished"):
res = await full_node_1.respond_unfinished_block(fnp.RespondUnfinishedBlock(unfinished), fake_peer)
log.warning(f"Res: {res}")
pr.create_stats()
pr.dump_stats("./unfinished-benchmark.pstats")
pr = cProfile.Profile()
pr.enable()
with assert_runtime(seconds=0.1, label=f"{request.node.name} - full block"):
# No transactions generator, the full node already cached it from the unfinished block
block_small = dataclasses.replace(block, transactions_generator=None)
res = await full_node_1.full_node.respond_block(fnp.RespondBlock(block_small))
log.warning(f"Res: {res}")
pr.create_stats()
pr.dump_stats("./full-block-benchmark.pstats")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/ram_db.py | tests/core/full_node/ram_db.py | from typing import Tuple
from pathlib import Path
import random
from flax.consensus.blockchain import Blockchain
from flax.consensus.constants import ConsensusConstants
from flax.full_node.block_store import BlockStore
from flax.full_node.coin_store import CoinStore
from flax.util.db_wrapper import DBWrapper2
async def create_ram_blockchain(consensus_constants: ConsensusConstants) -> Tuple[DBWrapper2, Blockchain]:
uri = f"file:db_{random.randint(0, 99999999)}?mode=memory&cache=shared"
db_wrapper = await DBWrapper2.create(database=uri, uri=True, reader_count=1)
block_store = await BlockStore.create(db_wrapper)
coin_store = await CoinStore.create(db_wrapper)
blockchain = await Blockchain.create(coin_store, block_store, consensus_constants, Path("."), 2)
return db_wrapper, blockchain
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/stores/test_full_node_store.py | tests/core/full_node/stores/test_full_node_store.py | import logging
from secrets import token_bytes
from typing import List, Optional
import pytest
import pytest_asyncio
from flax.consensus.blockchain import ReceiveBlockResult
from flax.consensus.find_fork_point import find_fork_point_in_chain
from flax.consensus.multiprocess_validation import PreValidationResult
from flax.consensus.pot_iterations import is_overflow_block
from flax.full_node.full_node_store import FullNodeStore
from flax.full_node.signage_point import SignagePoint
from flax.protocols import timelord_protocol
from flax.protocols.timelord_protocol import NewInfusionPointVDF
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.unfinished_block import UnfinishedBlock
from flax.util.block_cache import BlockCache
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.simulator.block_tools import get_signage_point, create_block_tools_async
from tests.blockchain.blockchain_test_utils import (
_validate_and_add_block,
_validate_and_add_block_no_error,
)
from tests.setup_nodes import test_constants as test_constants_original
from tests.util.blockchain import create_blockchain
from tests.util.keyring import TempKeyring
test_constants = test_constants_original.replace(**{"DISCRIMINANT_SIZE_BITS": 32, "SUB_SLOT_ITERS_STARTING": 2**12})
log = logging.getLogger(__name__)
@pytest_asyncio.fixture(scope="function")
async def custom_block_tools():
with TempKeyring() as keychain:
yield await create_block_tools_async(constants=test_constants, keychain=keychain)
@pytest_asyncio.fixture(scope="function", params=[1, 2])
async def empty_blockchain(request):
bc1, db_wrapper, db_path = await create_blockchain(test_constants, request.param)
yield bc1
await db_wrapper.close()
bc1.shut_down()
db_path.unlink()
@pytest_asyncio.fixture(scope="function", params=[1, 2])
async def empty_blockchain_with_original_constants(request):
bc1, db_wrapper, db_path = await create_blockchain(test_constants_original, request.param)
yield bc1
await db_wrapper.close()
bc1.shut_down()
db_path.unlink()
class TestFullNodeStore:
@pytest.mark.asyncio
async def test_basic_store(self, empty_blockchain, custom_block_tools, normalized_to_identity: bool = False):
blockchain = empty_blockchain
blocks = custom_block_tools.get_consecutive_blocks(
10,
seed=b"1234",
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
store = FullNodeStore(test_constants)
unfinished_blocks = []
for block in blocks:
unfinished_blocks.append(
UnfinishedBlock(
block.finished_sub_slots,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_info,
block.transactions_generator,
[],
)
)
# Add/get candidate block
assert store.get_candidate_block(unfinished_blocks[0].get_hash()) is None
for height, unf_block in enumerate(unfinished_blocks):
store.add_candidate_block(unf_block.get_hash(), uint32(height), unf_block)
candidate = store.get_candidate_block(unfinished_blocks[4].get_hash())
assert candidate is not None
assert candidate[1] == unfinished_blocks[4]
store.clear_candidate_blocks_below(uint32(8))
assert store.get_candidate_block(unfinished_blocks[5].get_hash()) is None
assert store.get_candidate_block(unfinished_blocks[8].get_hash()) is not None
# Test seen unfinished blocks
h_hash_1 = bytes32(token_bytes(32))
assert not store.seen_unfinished_block(h_hash_1)
assert store.seen_unfinished_block(h_hash_1)
store.clear_seen_unfinished_blocks()
assert not store.seen_unfinished_block(h_hash_1)
# Add/get unfinished block
for height, unf_block in enumerate(unfinished_blocks):
assert store.get_unfinished_block(unf_block.partial_hash) is None
store.add_unfinished_block(
uint32(height), unf_block, PreValidationResult(None, uint64(123532), None, False)
)
assert store.get_unfinished_block(unf_block.partial_hash) == unf_block
store.remove_unfinished_block(unf_block.partial_hash)
assert store.get_unfinished_block(unf_block.partial_hash) is None
blocks = custom_block_tools.get_consecutive_blocks(
1,
skip_slots=5,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
)
sub_slots = blocks[0].finished_sub_slots
assert len(sub_slots) == 5
assert (
store.get_finished_sub_slots(
BlockCache({}),
None,
sub_slots[0].challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
)
== []
)
# Test adding non-connecting sub-slots genesis
assert store.get_sub_slot(test_constants.GENESIS_CHALLENGE) is None
assert store.get_sub_slot(sub_slots[0].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.new_finished_sub_slot(sub_slots[1], blockchain, None, None) is None
assert store.new_finished_sub_slot(sub_slots[2], blockchain, None, None) is None
# Test adding sub-slots after genesis
assert store.new_finished_sub_slot(sub_slots[0], blockchain, None, None) is not None
sub_slot = store.get_sub_slot(sub_slots[0].challenge_chain.get_hash())
assert sub_slot is not None
assert sub_slot[0] == sub_slots[0]
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.new_finished_sub_slot(sub_slots[1], blockchain, None, None) is not None
for i in range(len(sub_slots)):
assert store.new_finished_sub_slot(sub_slots[i], blockchain, None, None) is not None
slot_i = store.get_sub_slot(sub_slots[i].challenge_chain.get_hash())
assert slot_i is not None
assert slot_i[0] == sub_slots[i]
assert store.get_finished_sub_slots(BlockCache({}), None, sub_slots[-1].challenge_chain.get_hash()) == sub_slots
assert store.get_finished_sub_slots(BlockCache({}), None, std_hash(b"not a valid hash")) is None
assert (
store.get_finished_sub_slots(BlockCache({}), None, sub_slots[-2].challenge_chain.get_hash())
== sub_slots[:-1]
)
# Test adding genesis peak
await _validate_and_add_block(blockchain, blocks[0])
peak = blockchain.get_peak()
peak_full_block = await blockchain.get_full_peak()
if peak.overflow:
store.new_peak(peak, peak_full_block, sub_slots[-2], sub_slots[-1], None, blockchain)
else:
store.new_peak(peak, peak_full_block, None, sub_slots[-1], None, blockchain)
assert store.get_sub_slot(sub_slots[0].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[1].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[2].challenge_chain.get_hash()) is None
if peak.overflow:
slot_3 = store.get_sub_slot(sub_slots[3].challenge_chain.get_hash())
assert slot_3 is not None
assert slot_3[0] == sub_slots[3]
else:
assert store.get_sub_slot(sub_slots[3].challenge_chain.get_hash()) is None
slot_4 = store.get_sub_slot(sub_slots[4].challenge_chain.get_hash())
assert slot_4 is not None
assert slot_4[0] == sub_slots[4]
assert (
store.get_finished_sub_slots(
blockchain,
peak,
sub_slots[-1].challenge_chain.get_hash(),
)
== []
)
# Test adding non genesis peak directly
blocks = custom_block_tools.get_consecutive_blocks(
2,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
blocks = custom_block_tools.get_consecutive_blocks(
3,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for block in blocks:
await _validate_and_add_block_no_error(blockchain, block)
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res.added_eos is None
# Add reorg blocks
blocks_reorg = custom_block_tools.get_consecutive_blocks(
20,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for block in blocks_reorg:
peak = blockchain.get_peak()
assert peak is not None
await _validate_and_add_block_no_error(blockchain, block)
if blockchain.get_peak().header_hash == block.header_hash:
sb = blockchain.block_record(block.header_hash)
fork = find_fork_point_in_chain(blockchain, peak, blockchain.block_record(sb.header_hash))
if fork > 0:
fork_block = blockchain.height_to_block_record(fork)
else:
fork_block = None
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, fork_block, blockchain)
assert res.added_eos is None
# Add slots to the end
blocks_2 = custom_block_tools.get_consecutive_blocks(
1,
block_list_input=blocks_reorg,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_2[-1].finished_sub_slots:
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak())
assert store.get_sub_slot(sub_slots[3].challenge_chain.get_hash()) is None
assert store.get_sub_slot(sub_slots[4].challenge_chain.get_hash()) is None
# Test adding signage point
peak = blockchain.get_peak()
ss_start_iters = peak.ip_sub_slot_total_iters(test_constants)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
ss_start_iters,
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
blocks = blocks_reorg
while True:
blocks = custom_block_tools.get_consecutive_blocks(
1,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
await _validate_and_add_block(blockchain, blocks[-1])
if blockchain.get_peak().header_hash == blocks[-1].header_hash:
sb = blockchain.block_record(blocks[-1].header_hash)
fork = find_fork_point_in_chain(blockchain, peak, blockchain.block_record(sb.header_hash))
if fork > 0:
fork_block = blockchain.height_to_block_record(fork)
else:
fork_block = None
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(blocks[-1].header_hash)
res = store.new_peak(sb, blocks[-1], sp_sub_slot, ip_sub_slot, fork_block, blockchain)
assert res.added_eos is None
if sb.overflow and sp_sub_slot is not None:
assert sp_sub_slot != ip_sub_slot
break
peak = blockchain.get_peak()
assert peak.overflow
# Overflow peak should result in 2 finished sub slots
assert len(store.finished_sub_slots) == 2
# Add slots to the end, except for the last one, which we will use to test invalid SP
blocks_2 = custom_block_tools.get_consecutive_blocks(
1,
block_list_input=blocks,
skip_slots=3,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_2[-1].finished_sub_slots[:-1]:
store.new_finished_sub_slot(slot, blockchain, blockchain.get_peak(), await blockchain.get_full_peak())
finished_sub_slots = blocks_2[-1].finished_sub_slots
assert len(store.finished_sub_slots) == 4
# Test adding signage points for overflow blocks (sp_sub_slot)
ss_start_iters = peak.sp_sub_slot_total_iters(test_constants)
# for i in range(peak.signage_point_index, test_constants.NUM_SPS_SUB_SLOT):
# if i < peak.signage_point_index:
# continue
# latest = peak
# while latest.total_iters > peak.sp_total_iters(test_constants):
# latest = blockchain.blocks[latest.prev_hash]
# sp = get_signage_point(
# test_constants,
# blockchain.blocks,
# latest,
# ss_start_iters,
# uint8(i),
# [],
# peak.sub_slot_iters,
# )
# assert store.new_signage_point(i, blockchain.blocks, peak, peak.sub_slot_iters, sp)
# Test adding signage points for overflow blocks (ip_sub_slot)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants),
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding future signage point, a few slots forward (good)
saved_sp_hash = None
for slot_offset in range(1, len(finished_sub_slots)):
for i in range(
1,
test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA,
):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants) + slot_offset * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[:slot_offset],
peak.sub_slot_iters,
)
assert sp.cc_vdf is not None
saved_sp_hash = sp.cc_vdf.output.get_hash()
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding future signage point (bad)
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
blockchain,
peak,
peak.ip_sub_slot_total_iters(test_constants) + len(finished_sub_slots) * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[: len(finished_sub_slots)],
peak.sub_slot_iters,
)
assert not store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Test adding past signage point
sp = SignagePoint(
blocks[1].reward_chain_block.challenge_chain_sp_vdf,
blocks[1].challenge_chain_sp_proof,
blocks[1].reward_chain_block.reward_chain_sp_vdf,
blocks[1].reward_chain_sp_proof,
)
assert not store.new_signage_point(
blocks[1].reward_chain_block.signage_point_index,
blockchain,
peak,
blockchain.block_record(blocks[1].header_hash).sp_sub_slot_total_iters(test_constants),
sp,
)
# Get signage point by index
assert (
store.get_signage_point_by_index(
finished_sub_slots[0].challenge_chain.get_hash(),
uint8(4),
finished_sub_slots[0].reward_chain.get_hash(),
)
is not None
)
assert (
store.get_signage_point_by_index(finished_sub_slots[0].challenge_chain.get_hash(), uint8(4), std_hash(b"1"))
is None
)
# Get signage point by hash
assert saved_sp_hash is not None
assert store.get_signage_point(saved_sp_hash) is not None
assert store.get_signage_point(std_hash(b"2")) is None
# Test adding signage points before genesis
store.initialize_genesis_sub_slot()
assert len(store.finished_sub_slots) == 1
for i in range(1, test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA):
sp = get_signage_point(
test_constants,
BlockCache({}, {}),
None,
uint128(0),
uint8(i),
[],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, None, peak.sub_slot_iters, sp)
blocks_3 = custom_block_tools.get_consecutive_blocks(
1,
skip_slots=2,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
for slot in blocks_3[-1].finished_sub_slots:
store.new_finished_sub_slot(slot, blockchain, None, None)
assert len(store.finished_sub_slots) == 3
finished_sub_slots = blocks_3[-1].finished_sub_slots
for slot_offset in range(1, len(finished_sub_slots) + 1):
for i in range(
1,
test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA,
):
sp = get_signage_point(
test_constants,
BlockCache({}, {}),
None,
slot_offset * peak.sub_slot_iters,
uint8(i),
finished_sub_slots[:slot_offset],
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), blockchain, None, peak.sub_slot_iters, sp)
# Test adding signage points after genesis
blocks_4 = custom_block_tools.get_consecutive_blocks(
1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
blocks_5 = custom_block_tools.get_consecutive_blocks(
1,
block_list_input=blocks_4,
skip_slots=1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
# If this is not the case, fix test to find a block that is
assert (
blocks_4[-1].reward_chain_block.signage_point_index
< test_constants.NUM_SPS_SUB_SLOT - test_constants.NUM_SP_INTERVALS_EXTRA
)
await _validate_and_add_block(blockchain, blocks_4[-1], expected_result=ReceiveBlockResult.ADDED_AS_ORPHAN)
sb = blockchain.block_record(blocks_4[-1].header_hash)
store.new_peak(sb, blocks_4[-1], None, None, None, blockchain)
for i in range(
sb.signage_point_index + test_constants.NUM_SP_INTERVALS_EXTRA,
test_constants.NUM_SPS_SUB_SLOT,
):
if is_overflow_block(test_constants, uint8(i)):
finished_sub_slots = blocks_5[-1].finished_sub_slots
else:
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
sb,
uint128(0),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
assert store.new_signage_point(uint8(i), empty_blockchain, sb, peak.sub_slot_iters, sp)
# Test future EOS cache
store.initialize_genesis_sub_slot()
blocks = custom_block_tools.get_consecutive_blocks(
1,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
await _validate_and_add_block_no_error(blockchain, blocks[-1])
while True:
blocks = custom_block_tools.get_consecutive_blocks(
1,
block_list_input=blocks,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
)
await _validate_and_add_block_no_error(blockchain, blocks[-1])
sb = blockchain.block_record(blocks[-1].header_hash)
if sb.first_in_sub_slot:
break
assert len(blocks) >= 2
dependant_sub_slots = blocks[-1].finished_sub_slots
peak = blockchain.get_peak()
peak_full_block = await blockchain.get_full_peak()
for block in blocks[:-2]:
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
peak = sb
peak_full_block = block
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res.added_eos is None
assert store.new_finished_sub_slot(dependant_sub_slots[0], blockchain, peak, peak_full_block) is None
block = blocks[-2]
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res.added_eos == dependant_sub_slots[0]
assert res.new_signage_points == res.new_infusion_points == []
# Test future IP cache
store.initialize_genesis_sub_slot()
blocks = custom_block_tools.get_consecutive_blocks(
60,
normalized_to_identity_cc_ip=normalized_to_identity,
normalized_to_identity_cc_sp=normalized_to_identity,
normalized_to_identity_cc_eos=normalized_to_identity,
normalized_to_identity_icc_eos=normalized_to_identity,
)
for block in blocks[:5]:
await _validate_and_add_block_no_error(blockchain, block)
sb = blockchain.block_record(block.header_hash)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(block.header_hash)
res = store.new_peak(sb, block, sp_sub_slot, ip_sub_slot, None, blockchain)
assert res.added_eos is None
case_0, case_1 = False, False
for i in range(5, len(blocks) - 1):
prev_block = blocks[i]
block = blocks[i + 1]
new_ip = NewInfusionPointVDF(
block.reward_chain_block.get_unfinished().get_hash(),
block.reward_chain_block.challenge_chain_ip_vdf,
block.challenge_chain_ip_proof,
block.reward_chain_block.reward_chain_ip_vdf,
block.reward_chain_ip_proof,
block.reward_chain_block.infused_challenge_chain_ip_vdf,
block.infused_challenge_chain_ip_proof,
)
store.add_to_future_ip(new_ip)
await _validate_and_add_block_no_error(blockchain, prev_block)
sp_sub_slot, ip_sub_slot = await blockchain.get_sp_and_ip_sub_slots(prev_block.header_hash)
sb = blockchain.block_record(prev_block.header_hash)
res = store.new_peak(sb, prev_block, sp_sub_slot, ip_sub_slot, None, blockchain)
if len(block.finished_sub_slots) == 0:
case_0 = True
assert res.new_infusion_points == [new_ip]
else:
case_1 = True
assert res.new_infusion_points == []
found_ips: List[timelord_protocol.NewInfusionPointVDF] = []
for ss in block.finished_sub_slots:
ipvdf = store.new_finished_sub_slot(ss, blockchain, sb, prev_block)
assert ipvdf is not None
found_ips += ipvdf
assert found_ips == [new_ip]
# If flaky, increase the number of blocks created
assert case_0 and case_1
# Try to get two blocks in the same slot, such that we have
# SP, B2 SP .... SP B1
# i2 ......... i1
# Then do a reorg up to B2, removing all signage points after B2, but not before
log.warning(f"Adding blocks up to {blocks[-1]}")
for block in blocks:
await _validate_and_add_block_no_error(blockchain, block)
log.warning("Starting loop")
while True:
log.warning("Looping")
blocks = custom_block_tools.get_consecutive_blocks(1, block_list_input=blocks, skip_slots=1)
await _validate_and_add_block_no_error(blockchain, blocks[-1])
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
store.new_peak(peak, blocks[-1], sub_slots[0], sub_slots[1], None, blockchain)
blocks = custom_block_tools.get_consecutive_blocks(
2, block_list_input=blocks, guarantee_transaction_block=True
)
i3 = blocks[-3].reward_chain_block.signage_point_index
i2 = blocks[-2].reward_chain_block.signage_point_index
i1 = blocks[-1].reward_chain_block.signage_point_index
if (
len(blocks[-2].finished_sub_slots) == len(blocks[-1].finished_sub_slots) == 0
and not is_overflow_block(test_constants, signage_point_index=i2)
and not is_overflow_block(test_constants, signage_point_index=i1)
and i2 > i3 + 3
and i1 > (i2 + 3)
):
# We hit all the conditions that we want
all_sps: List[Optional[SignagePoint]] = [None] * test_constants.NUM_SPS_SUB_SLOT
def assert_sp_none(sp_index: int, is_none: bool):
sp_to_check: Optional[SignagePoint] = all_sps[sp_index]
assert sp_to_check is not None
assert sp_to_check.cc_vdf is not None
fetched = store.get_signage_point(sp_to_check.cc_vdf.output.get_hash())
assert (fetched is None) == is_none
if fetched is not None:
assert fetched == sp_to_check
for i in range(i3 + 1, test_constants.NUM_SPS_SUB_SLOT - 3):
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
peak,
uint128(peak.ip_sub_slot_total_iters(custom_block_tools.constants)),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
all_sps[i] = sp
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
# Adding a new peak clears all SPs after that peak
await _validate_and_add_block_no_error(blockchain, blocks[-2])
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
store.new_peak(peak, blocks[-2], sub_slots[0], sub_slots[1], None, blockchain)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, True)
assert_sp_none(i1 + 1, True)
assert_sp_none(i1 + 4, True)
for i in range(i2, test_constants.NUM_SPS_SUB_SLOT):
if is_overflow_block(test_constants, uint8(i)):
blocks_alt = custom_block_tools.get_consecutive_blocks(
1, block_list_input=blocks[:-1], skip_slots=1
)
finished_sub_slots = blocks_alt[-1].finished_sub_slots
else:
finished_sub_slots = []
sp = get_signage_point(
test_constants,
blockchain,
peak,
uint128(peak.ip_sub_slot_total_iters(custom_block_tools.constants)),
uint8(i),
finished_sub_slots,
peak.sub_slot_iters,
)
all_sps[i] = sp
assert store.new_signage_point(uint8(i), blockchain, peak, peak.sub_slot_iters, sp)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, False)
assert_sp_none(i1 + 1, False)
assert_sp_none(i1 + 4, False)
await _validate_and_add_block_no_error(blockchain, blocks[-1])
peak = blockchain.get_peak()
sub_slots = await blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
# Do a reorg, which should remove everything after B2
store.new_peak(
peak,
blocks[-1],
sub_slots[0],
sub_slots[1],
(await blockchain.get_block_records_at([blocks[-2].height]))[0],
blockchain,
)
assert_sp_none(i2, False)
assert_sp_none(i2 + 1, False)
assert_sp_none(i1, True)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/stores/test_coin_store.py | tests/core/full_node/stores/test_coin_store.py | import logging
from typing import List, Optional, Set, Tuple
import pytest
from flax.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from flax.consensus.blockchain import Blockchain, ReceiveBlockResult
from flax.consensus.coinbase import create_farmer_coin, create_pool_coin
from flax.full_node.block_store import BlockStore
from flax.full_node.coin_store import CoinStore
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions
from flax.types.blockchain_format.coin import Coin
from flax.types.coin_record import CoinRecord
from flax.types.full_block import FullBlock
from flax.types.generator_types import BlockGenerator
from flax.util.generator_tools import tx_removals_and_additions
from flax.util.hash import std_hash
from flax.util.ints import uint64, uint32
from tests.blockchain.blockchain_test_utils import _validate_and_add_block
from flax.simulator.wallet_tools import WalletTool
from tests.setup_nodes import test_constants
from flax.types.blockchain_format.sized_bytes import bytes32
from tests.util.db_connection import DBConnection
constants = test_constants
WALLET_A = WalletTool(constants)
log = logging.getLogger(__name__)
def get_future_reward_coins(block: FullBlock) -> Tuple[Coin, Coin]:
pool_amount = calculate_pool_reward(block.height)
farmer_amount = calculate_base_farmer_reward(block.height)
if block.is_transaction_block():
assert block.transactions_info is not None
farmer_amount = uint64(farmer_amount + block.transactions_info.fees)
pool_coin: Coin = create_pool_coin(
block.height, block.foliage.foliage_block_data.pool_target.puzzle_hash, pool_amount, constants.GENESIS_CHALLENGE
)
farmer_coin: Coin = create_farmer_coin(
block.height,
block.foliage.foliage_block_data.farmer_reward_puzzle_hash,
farmer_amount,
constants.GENESIS_CHALLENGE,
)
return pool_coin, farmer_coin
class TestCoinStoreWithBlocks:
@pytest.mark.asyncio
async def test_basic_coin_store(self, db_version, bt):
wallet_a = WALLET_A
reward_ph = wallet_a.get_new_puzzlehash()
# Generate some coins
blocks = bt.get_consecutive_blocks(
10,
[],
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
)
coins_to_spend: List[Coin] = []
for block in blocks:
if block.is_transaction_block():
for coin in block.get_included_reward_coins():
if coin.puzzle_hash == reward_ph:
coins_to_spend.append(coin)
spend_bundle = wallet_a.generate_signed_transaction(
uint64(1000), wallet_a.get_new_puzzlehash(), coins_to_spend[0]
)
async with DBConnection(db_version) as db_wrapper:
coin_store = await CoinStore.create(db_wrapper)
blocks = bt.get_consecutive_blocks(
10,
blocks,
farmer_reward_puzzle_hash=reward_ph,
pool_reward_puzzle_hash=reward_ph,
transaction_data=spend_bundle,
)
# Adding blocks to the coin store
should_be_included_prev: Set[Coin] = set()
should_be_included: Set[Coin] = set()
for block in blocks:
farmer_coin, pool_coin = get_future_reward_coins(block)
should_be_included.add(farmer_coin)
should_be_included.add(pool_coin)
if block.is_transaction_block():
if block.transactions_generator is not None:
block_gen: BlockGenerator = BlockGenerator(block.transactions_generator, [], [])
npc_result = get_name_puzzle_conditions(
block_gen,
bt.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=bt.constants.COST_PER_BYTE,
mempool_mode=False,
)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.conds)
else:
tx_removals, tx_additions = [], []
assert block.get_included_reward_coins() == should_be_included_prev
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
await coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
if block.height != 0:
with pytest.raises(Exception):
await coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
all_records = set()
for expected_coin in should_be_included_prev:
# Check that the coinbase rewards are added
record = await coin_store.get_coin_record(expected_coin.name())
assert record is not None
assert not record.spent
assert record.coin == expected_coin
all_records.add(record)
for coin_name in tx_removals:
# Check that the removed coins are set to spent
record = await coin_store.get_coin_record(coin_name)
assert record.spent
all_records.add(record)
for coin in tx_additions:
# Check that the added coins are added
record = await coin_store.get_coin_record(coin.name())
assert not record.spent
assert coin == record.coin
all_records.add(record)
db_records = await coin_store.get_coin_records(
[c.name() for c in list(should_be_included_prev) + tx_additions] + tx_removals
)
assert len(db_records) == len(should_be_included_prev) + len(tx_removals) + len(tx_additions)
assert len(db_records) == len(all_records)
for record in db_records:
assert record in all_records
should_be_included_prev = should_be_included.copy()
should_be_included = set()
@pytest.mark.asyncio
async def test_set_spent(self, db_version, bt):
blocks = bt.get_consecutive_blocks(9, [])
async with DBConnection(db_version) as db_wrapper:
coin_store = await CoinStore.create(db_wrapper)
# Save/get block
for block in blocks:
if block.is_transaction_block():
removals: List[bytes32] = []
additions: List[Coin] = []
async with db_wrapper.writer():
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
await coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
additions,
removals,
)
coins = block.get_included_reward_coins()
records = [await coin_store.get_coin_record(coin.name()) for coin in coins]
await coin_store._set_spent([r.name for r in records], block.height)
if len(records) > 0:
for r in records:
assert (await coin_store.get_coin_record(r.name)) is not None
# Check that we can't spend a coin twice in DB
with pytest.raises(ValueError, match="Invalid operation to set spent"):
await coin_store._set_spent([r.name for r in records], block.height)
records = [await coin_store.get_coin_record(coin.name()) for coin in coins]
for record in records:
assert record.spent
assert record.spent_block_index == block.height
@pytest.mark.asyncio
async def test_num_unspent(self, bt, db_version):
blocks = bt.get_consecutive_blocks(37, [])
expect_unspent = 0
test_excercised = False
async with DBConnection(db_version) as db_wrapper:
coin_store = await CoinStore.create(db_wrapper)
for block in blocks:
if not block.is_transaction_block():
continue
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
removals: List[bytes32] = []
additions: List[Coin] = []
await coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
additions,
removals,
)
expect_unspent += len(block.get_included_reward_coins())
assert await coin_store.num_unspent() == expect_unspent
test_excercised = expect_unspent > 0
assert test_excercised
@pytest.mark.asyncio
async def test_rollback(self, db_version, bt):
blocks = bt.get_consecutive_blocks(20)
async with DBConnection(db_version) as db_wrapper:
coin_store = await CoinStore.create(db_wrapper)
selected_coin: Optional[CoinRecord] = None
all_coins: List[Coin] = []
for block in blocks:
all_coins += list(block.get_included_reward_coins())
if block.is_transaction_block():
removals: List[bytes32] = []
additions: List[Coin] = []
assert block.foliage_transaction_block is not None
await coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
additions,
removals,
)
coins = list(block.get_included_reward_coins())
records: List[CoinRecord] = [await coin_store.get_coin_record(coin.name()) for coin in coins]
spend_selected_coin = selected_coin is not None
if block.height != 0 and selected_coin is None:
# Select the first CoinRecord which will be spent at the next transaction block.
selected_coin = records[0]
await coin_store._set_spent([r.name for r in records[1:]], block.height)
else:
await coin_store._set_spent([r.name for r in records], block.height)
if spend_selected_coin:
assert selected_coin is not None
await coin_store._set_spent([selected_coin.name], block.height)
records = [await coin_store.get_coin_record(coin.name()) for coin in coins] # update coin records
for record in records:
assert record is not None
if (
selected_coin is not None
and selected_coin.name == record.name
and not selected_coin.confirmed_block_index < block.height
):
assert not record.spent
else:
assert record.spent
assert record.spent_block_index == block.height
if spend_selected_coin:
break
assert selected_coin is not None
reorg_index = selected_coin.confirmed_block_index
# Get all CoinRecords.
all_records: List[CoinRecord] = [await coin_store.get_coin_record(coin.name()) for coin in all_coins]
# The reorg will revert the creation and spend of many coins. It will also revert the spend (but not the
# creation) of the selected coin.
changed_records = await coin_store.rollback_to_block(reorg_index)
changed_coin_records = [cr.coin for cr in changed_records]
assert selected_coin in changed_records
for coin_record in all_records:
if coin_record.confirmed_block_index > reorg_index:
assert coin_record.coin in changed_coin_records
if coin_record.spent_block_index > reorg_index:
assert coin_record.coin in changed_coin_records
for block in blocks:
if block.is_transaction_block():
coins = block.get_included_reward_coins()
records = [await coin_store.get_coin_record(coin.name()) for coin in coins]
if block.height <= reorg_index:
for record in records:
assert record is not None
assert record.spent == (record.name != selected_coin.name)
else:
for record in records:
assert record is None
@pytest.mark.asyncio
async def test_basic_reorg(self, tmp_dir, db_version, bt):
async with DBConnection(db_version) as db_wrapper:
initial_block_count = 30
reorg_length = 15
blocks = bt.get_consecutive_blocks(initial_block_count)
coin_store = await CoinStore.create(db_wrapper)
store = await BlockStore.create(db_wrapper)
b: Blockchain = await Blockchain.create(coin_store, store, test_constants, tmp_dir, 2)
try:
records: List[Optional[CoinRecord]] = []
for block in blocks:
await _validate_and_add_block(b, block)
peak = b.get_peak()
assert peak is not None
assert peak.height == initial_block_count - 1
for c, block in enumerate(blocks):
if block.is_transaction_block():
coins = block.get_included_reward_coins()
records = [await coin_store.get_coin_record(coin.name()) for coin in coins]
for record in records:
assert record is not None
assert not record.spent
assert record.confirmed_block_index == block.height
assert record.spent_block_index == 0
blocks_reorg_chain = bt.get_consecutive_blocks(
reorg_length, blocks[: initial_block_count - 10], seed=b"2"
)
for reorg_block in blocks_reorg_chain:
if reorg_block.height < initial_block_count - 10:
await _validate_and_add_block(
b, reorg_block, expected_result=ReceiveBlockResult.ALREADY_HAVE_BLOCK
)
elif reorg_block.height < initial_block_count:
await _validate_and_add_block(
b, reorg_block, expected_result=ReceiveBlockResult.ADDED_AS_ORPHAN
)
elif reorg_block.height >= initial_block_count:
await _validate_and_add_block(b, reorg_block, expected_result=ReceiveBlockResult.NEW_PEAK)
if reorg_block.is_transaction_block():
coins = reorg_block.get_included_reward_coins()
records = [await coin_store.get_coin_record(coin.name()) for coin in coins]
for record in records:
assert record is not None
assert not record.spent
assert record.confirmed_block_index == reorg_block.height
assert record.spent_block_index == 0
peak = b.get_peak()
assert peak is not None
assert peak.height == initial_block_count - 10 + reorg_length - 1
finally:
b.shut_down()
@pytest.mark.asyncio
async def test_get_puzzle_hash(self, tmp_dir, db_version, bt):
async with DBConnection(db_version) as db_wrapper:
num_blocks = 20
farmer_ph = bytes32(32 * b"0")
pool_ph = bytes32(32 * b"1")
blocks = bt.get_consecutive_blocks(
num_blocks,
farmer_reward_puzzle_hash=farmer_ph,
pool_reward_puzzle_hash=pool_ph,
guarantee_transaction_block=True,
)
coin_store = await CoinStore.create(db_wrapper)
store = await BlockStore.create(db_wrapper)
b: Blockchain = await Blockchain.create(coin_store, store, test_constants, tmp_dir, 2)
for block in blocks:
await _validate_and_add_block(b, block)
peak = b.get_peak()
assert peak is not None
assert peak.height == num_blocks - 1
coins_farmer = await coin_store.get_coin_records_by_puzzle_hash(True, pool_ph)
coins_pool = await coin_store.get_coin_records_by_puzzle_hash(True, farmer_ph)
assert len(coins_farmer) == num_blocks - 2
assert len(coins_pool) == num_blocks - 2
b.shut_down()
@pytest.mark.asyncio
async def test_get_coin_states(self, tmp_dir, db_version):
async with DBConnection(db_version) as db_wrapper:
crs = [
CoinRecord(
Coin(std_hash(i.to_bytes(4, byteorder="big")), std_hash(b"2"), uint64(100)),
uint32(i),
uint32(2 * i),
False,
uint64(12321312),
)
for i in range(1, 301)
]
crs += [
CoinRecord(
Coin(std_hash(b"X" + i.to_bytes(4, byteorder="big")), std_hash(b"3"), uint64(100)),
uint32(i),
uint32(2 * i),
False,
uint64(12321312),
)
for i in range(1, 301)
]
coin_store = await CoinStore.create(db_wrapper)
await coin_store._add_coin_records(crs)
assert len(await coin_store.get_coin_states_by_puzzle_hashes(True, [std_hash(b"2")], 0)) == 300
assert len(await coin_store.get_coin_states_by_puzzle_hashes(False, [std_hash(b"2")], 0)) == 0
assert len(await coin_store.get_coin_states_by_puzzle_hashes(True, [std_hash(b"2")], 300)) == 151
assert len(await coin_store.get_coin_states_by_puzzle_hashes(True, [std_hash(b"2")], 603)) == 0
assert len(await coin_store.get_coin_states_by_puzzle_hashes(True, [std_hash(b"1")], 0)) == 0
coins = [cr.coin.name() for cr in crs]
bad_coins = [std_hash(cr.coin.name()) for cr in crs]
assert len(await coin_store.get_coin_states_by_ids(True, coins, 0)) == 600
assert len(await coin_store.get_coin_states_by_ids(False, coins, 0)) == 0
assert len(await coin_store.get_coin_states_by_ids(True, coins, 300)) == 302
assert len(await coin_store.get_coin_states_by_ids(True, coins, 603)) == 0
assert len(await coin_store.get_coin_states_by_ids(True, bad_coins, 0)) == 0
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/stores/test_block_store.py | tests/core/full_node/stores/test_block_store.py | import asyncio
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.full_block import FullBlock
import logging
import random
import sqlite3
import dataclasses
import pytest
from clvm.casts import int_to_bytes
from flax.consensus.blockchain import Blockchain
from flax.consensus.full_block_to_block_record import header_block_to_sub_block_record
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.full_node.block_store import BlockStore
from flax.full_node.coin_store import CoinStore
from flax.util.ints import uint8
from flax.types.blockchain_format.vdf import VDFProof
from flax.types.blockchain_format.program import SerializedProgram
from tests.blockchain.blockchain_test_utils import _validate_and_add_block
from tests.util.db_connection import DBConnection
from tests.setup_nodes import test_constants
log = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_block_store(tmp_dir, db_version, bt):
assert sqlite3.threadsafety == 1
blocks = bt.get_consecutive_blocks(10)
async with DBConnection(db_version) as db_wrapper, DBConnection(db_version) as db_wrapper_2:
# Use a different file for the blockchain
coin_store_2 = await CoinStore.create(db_wrapper_2)
store_2 = await BlockStore.create(db_wrapper_2)
bc = await Blockchain.create(coin_store_2, store_2, test_constants, tmp_dir, 2)
store = await BlockStore.create(db_wrapper)
await BlockStore.create(db_wrapper_2)
# Save/get block
for block in blocks:
await _validate_and_add_block(bc, block)
block_record = bc.block_record(block.header_hash)
block_record_hh = block_record.header_hash
await store.add_full_block(block.header_hash, block, block_record)
await store.add_full_block(block.header_hash, block, block_record)
assert block == await store.get_full_block(block.header_hash)
assert block == await store.get_full_block(block.header_hash)
assert block_record == (await store.get_block_record(block_record_hh))
await store.set_in_chain([(block_record.header_hash,)])
await store.set_peak(block_record.header_hash)
await store.set_peak(block_record.header_hash)
assert len(await store.get_full_blocks_at([1])) == 1
assert len(await store.get_full_blocks_at([0])) == 1
assert len(await store.get_full_blocks_at([100])) == 0
# get_block_records_in_range
block_record_records = await store.get_block_records_in_range(0, 0xFFFFFFFF)
assert len(block_record_records) == len(blocks)
for b in blocks:
assert block_record_records[b.header_hash].header_hash == b.header_hash
# get_block_records_by_hash
block_records = await store.get_block_records_by_hash([])
assert block_records == []
block_records = await store.get_block_records_by_hash([blocks[0].header_hash])
assert len(block_records) == 1
assert block_records[0].header_hash == blocks[0].header_hash
block_records = await store.get_block_records_by_hash([b.header_hash for b in blocks])
assert len(block_records) == len(blocks)
for br, b in zip(block_records, blocks):
assert br.header_hash == b.header_hash
@pytest.mark.asyncio
async def test_deadlock(tmp_dir, db_version, bt):
"""
This test was added because the store was deadlocking in certain situations, when fetching and
adding blocks repeatedly. The issue was patched.
"""
blocks = bt.get_consecutive_blocks(10)
async with DBConnection(db_version) as wrapper, DBConnection(db_version) as wrapper_2:
store = await BlockStore.create(wrapper)
coin_store_2 = await CoinStore.create(wrapper_2)
store_2 = await BlockStore.create(wrapper_2)
bc = await Blockchain.create(coin_store_2, store_2, test_constants, tmp_dir, 2)
block_records = []
for block in blocks:
await _validate_and_add_block(bc, block)
block_records.append(bc.block_record(block.header_hash))
tasks = []
for i in range(10000):
rand_i = random.randint(0, 9)
if random.random() < 0.5:
tasks.append(
asyncio.create_task(
store.add_full_block(blocks[rand_i].header_hash, blocks[rand_i], block_records[rand_i])
)
)
if random.random() < 0.5:
tasks.append(asyncio.create_task(store.get_full_block(blocks[rand_i].header_hash)))
await asyncio.gather(*tasks)
@pytest.mark.asyncio
async def test_rollback(bt, tmp_dir):
blocks = bt.get_consecutive_blocks(10)
async with DBConnection(2) as db_wrapper:
# Use a different file for the blockchain
coin_store = await CoinStore.create(db_wrapper)
block_store = await BlockStore.create(db_wrapper)
bc = await Blockchain.create(coin_store, block_store, test_constants, tmp_dir, 2)
# insert all blocks
count = 0
for block in blocks:
await _validate_and_add_block(bc, block)
count += 1
ret = await block_store.get_random_not_compactified(count)
assert len(ret) == count
# make sure all block heights are unique
assert len(set(ret)) == count
async with db_wrapper.reader_no_transaction() as conn:
for block in blocks:
async with conn.execute(
"SELECT in_main_chain FROM full_blocks WHERE header_hash=?", (block.header_hash,)
) as cursor:
rows = await cursor.fetchall()
assert len(rows) == 1
assert rows[0][0]
await block_store.rollback(5)
count = 0
async with db_wrapper.reader_no_transaction() as conn:
for block in blocks:
async with conn.execute(
"SELECT in_main_chain FROM full_blocks WHERE header_hash=? ORDER BY height",
(block.header_hash,),
) as cursor:
rows = await cursor.fetchall()
print(count, rows)
assert len(rows) == 1
assert rows[0][0] == (count <= 5)
count += 1
@pytest.mark.asyncio
async def test_count_compactified_blocks(bt, tmp_dir, db_version):
blocks = bt.get_consecutive_blocks(10)
async with DBConnection(db_version) as db_wrapper:
coin_store = await CoinStore.create(db_wrapper)
block_store = await BlockStore.create(db_wrapper)
bc = await Blockchain.create(coin_store, block_store, test_constants, tmp_dir, 2)
count = await block_store.count_compactified_blocks()
assert count == 0
for block in blocks:
await _validate_and_add_block(bc, block)
count = await block_store.count_compactified_blocks()
assert count == 0
@pytest.mark.asyncio
async def test_count_uncompactified_blocks(bt, tmp_dir, db_version):
blocks = bt.get_consecutive_blocks(10)
async with DBConnection(db_version) as db_wrapper:
coin_store = await CoinStore.create(db_wrapper)
block_store = await BlockStore.create(db_wrapper)
bc = await Blockchain.create(coin_store, block_store, test_constants, tmp_dir, 2)
count = await block_store.count_uncompactified_blocks()
assert count == 0
for block in blocks:
await _validate_and_add_block(bc, block)
count = await block_store.count_uncompactified_blocks()
assert count == 10
@pytest.mark.asyncio
async def test_replace_proof(bt, tmp_dir, db_version):
blocks = bt.get_consecutive_blocks(10)
def rand_bytes(num) -> bytes:
ret = bytearray(num)
for i in range(num):
ret[i] = random.getrandbits(8)
return bytes(ret)
def rand_vdf_proof() -> VDFProof:
return VDFProof(
uint8(1), # witness_type
rand_bytes(32), # witness
bool(random.randint(0, 1)), # normalized_to_identity
)
async with DBConnection(db_version) as db_wrapper:
coin_store = await CoinStore.create(db_wrapper)
block_store = await BlockStore.create(db_wrapper)
bc = await Blockchain.create(coin_store, block_store, test_constants, tmp_dir, 2)
for block in blocks:
await _validate_and_add_block(bc, block)
replaced = []
for block in blocks:
assert block.challenge_chain_ip_proof is not None
proof = rand_vdf_proof()
replaced.append(proof)
new_block = dataclasses.replace(block, challenge_chain_ip_proof=proof)
await block_store.replace_proof(block.header_hash, new_block)
for block, proof in zip(blocks, replaced):
b = await block_store.get_full_block(block.header_hash)
assert b.challenge_chain_ip_proof == proof
# make sure we get the same result when we hit the database
# itself (and not just the block cache)
block_store.rollback_cache_block(block.header_hash)
b = await block_store.get_full_block(block.header_hash)
assert b.challenge_chain_ip_proof == proof
@pytest.mark.asyncio
async def test_get_generator(bt, db_version):
blocks = bt.get_consecutive_blocks(10)
def generator(i: int) -> SerializedProgram:
return SerializedProgram.from_bytes(int_to_bytes(i))
async with DBConnection(db_version) as db_wrapper:
store = await BlockStore.create(db_wrapper)
new_blocks = []
for i, block in enumerate(blocks):
block = dataclasses.replace(block, transactions_generator=generator(i))
block_record = header_block_to_sub_block_record(
DEFAULT_CONSTANTS, 0, block, 0, False, 0, max(0, block.height - 1), None
)
await store.add_full_block(block.header_hash, block, block_record)
await store.set_in_chain([(block_record.header_hash,)])
await store.set_peak(block_record.header_hash)
new_blocks.append(block)
if db_version == 2:
expected_generators = list(map(lambda x: x.transactions_generator, new_blocks[1:10]))
generators = await store.get_generators_at(range(1, 10))
assert generators == expected_generators
# test out-of-order heights
expected_generators = list(map(lambda x: x.transactions_generator, [new_blocks[i] for i in [4, 8, 3, 9]]))
generators = await store.get_generators_at([4, 8, 3, 9])
assert generators == expected_generators
with pytest.raises(KeyError):
await store.get_generators_at([100])
assert await store.get_generator(blocks[2].header_hash) == new_blocks[2].transactions_generator
assert await store.get_generator(blocks[4].header_hash) == new_blocks[4].transactions_generator
assert await store.get_generator(blocks[6].header_hash) == new_blocks[6].transactions_generator
assert await store.get_generator(blocks[7].header_hash) == new_blocks[7].transactions_generator
@pytest.mark.asyncio
async def test_get_blocks_by_hash(tmp_dir, bt, db_version):
assert sqlite3.threadsafety == 1
blocks = bt.get_consecutive_blocks(10)
async with DBConnection(db_version) as db_wrapper, DBConnection(db_version) as db_wrapper_2:
# Use a different file for the blockchain
coin_store_2 = await CoinStore.create(db_wrapper_2)
store_2 = await BlockStore.create(db_wrapper_2)
bc = await Blockchain.create(coin_store_2, store_2, test_constants, tmp_dir, 2)
store = await BlockStore.create(db_wrapper)
await BlockStore.create(db_wrapper_2)
hashes = []
# Save/get block
for block in blocks:
await _validate_and_add_block(bc, block)
block_record = bc.block_record(block.header_hash)
await store.add_full_block(block.header_hash, block, block_record)
hashes.append(block.header_hash)
full_blocks_by_hash = await store.get_blocks_by_hash(hashes)
assert full_blocks_by_hash == blocks
full_block_bytes_by_hash = await store.get_block_bytes_by_hash(hashes)
assert [FullBlock.from_bytes(x) for x in full_block_bytes_by_hash] == blocks
assert not await store.get_block_bytes_by_hash([])
with pytest.raises(ValueError):
await store.get_block_bytes_by_hash([bytes32.from_bytes(b"yolo" * 8)])
with pytest.raises(AssertionError):
await store.get_block_bytes_by_hash([bytes32.from_bytes(b"yolo" * 8)] * 1000)
@pytest.mark.asyncio
async def test_get_block_bytes_in_range(tmp_dir, bt, db_version):
assert sqlite3.threadsafety == 1
blocks = bt.get_consecutive_blocks(10)
async with DBConnection(db_version) as db_wrapper_2:
# Use a different file for the blockchain
coin_store_2 = await CoinStore.create(db_wrapper_2)
store_2 = await BlockStore.create(db_wrapper_2)
bc = await Blockchain.create(coin_store_2, store_2, test_constants, tmp_dir, 2)
await BlockStore.create(db_wrapper_2)
# Save/get block
for block in blocks:
await _validate_and_add_block(bc, block)
if db_version < 2:
with pytest.raises(AssertionError):
await store_2.get_block_bytes_in_range(0, 9)
else:
full_blocks_by_height = await store_2.get_block_bytes_in_range(0, 9)
assert full_blocks_by_height == [bytes(b) for b in blocks]
with pytest.raises(ValueError):
await store_2.get_block_bytes_in_range(0, 10)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/stores/test_hint_store.py | tests/core/full_node/stores/test_hint_store.py | import logging
import pytest
from clvm.casts import int_to_bytes
from flax.full_node.hint_store import HintStore
from flax.protocols.full_node_protocol import RespondBlock
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.condition_with_args import ConditionWithArgs
from flax.types.spend_bundle import SpendBundle
from flax.util.ints import uint64
from tests.util.db_connection import DBConnection
from flax.simulator.wallet_tools import WalletTool
log = logging.getLogger(__name__)
class TestHintStore:
@pytest.mark.asyncio
async def test_basic_store(self, db_version):
async with DBConnection(db_version) as db_wrapper:
hint_store = await HintStore.create(db_wrapper)
hint_0 = 32 * b"\0"
hint_1 = 32 * b"\1"
not_existing_hint = 32 * b"\3"
coin_id_0 = 32 * b"\4"
coin_id_1 = 32 * b"\5"
coin_id_2 = 32 * b"\6"
hints = [(coin_id_0, hint_0), (coin_id_1, hint_0), (coin_id_2, hint_1)]
await hint_store.add_hints(hints)
coins_for_hint_0 = await hint_store.get_coin_ids(hint_0)
assert coin_id_0 in coins_for_hint_0
assert coin_id_1 in coins_for_hint_0
coins_for_hint_1 = await hint_store.get_coin_ids(hint_1)
assert coin_id_2 in coins_for_hint_1
coins_for_non_hint = await hint_store.get_coin_ids(not_existing_hint)
assert coins_for_non_hint == []
@pytest.mark.asyncio
async def test_duplicate_coins(self, db_version):
async with DBConnection(db_version) as db_wrapper:
hint_store = await HintStore.create(db_wrapper)
hint_0 = 32 * b"\0"
hint_1 = 32 * b"\1"
coin_id_0 = 32 * b"\4"
hints = [(coin_id_0, hint_0), (coin_id_0, hint_1)]
await hint_store.add_hints(hints)
coins_for_hint_0 = await hint_store.get_coin_ids(hint_0)
assert coin_id_0 in coins_for_hint_0
coins_for_hint_1 = await hint_store.get_coin_ids(hint_1)
assert coin_id_0 in coins_for_hint_1
@pytest.mark.asyncio
async def test_duplicate_hints(self, db_version):
async with DBConnection(db_version) as db_wrapper:
hint_store = await HintStore.create(db_wrapper)
hint_0 = 32 * b"\0"
hint_1 = 32 * b"\1"
coin_id_0 = 32 * b"\4"
coin_id_1 = 32 * b"\5"
hints = [(coin_id_0, hint_0), (coin_id_1, hint_0)]
await hint_store.add_hints(hints)
coins_for_hint_0 = await hint_store.get_coin_ids(hint_0)
assert coin_id_0 in coins_for_hint_0
assert coin_id_1 in coins_for_hint_0
coins_for_hint_1 = await hint_store.get_coin_ids(hint_1)
assert coins_for_hint_1 == []
@pytest.mark.asyncio
async def test_duplicates(self, db_version):
async with DBConnection(db_version) as db_wrapper:
hint_store = await HintStore.create(db_wrapper)
hint_0 = 32 * b"\0"
coin_id_0 = 32 * b"\4"
for i in range(0, 2):
hints = [(coin_id_0, hint_0), (coin_id_0, hint_0)]
await hint_store.add_hints(hints)
coins_for_hint_0 = await hint_store.get_coin_ids(hint_0)
assert coin_id_0 in coins_for_hint_0
async with db_wrapper.reader_no_transaction() as conn:
cursor = await conn.execute("SELECT COUNT(*) FROM hints")
rows = await cursor.fetchall()
if db_wrapper.db_version == 2:
# even though we inserted the pair multiple times, there's only one
# entry in the DB
assert rows[0][0] == 1
else:
# we get one copy for each duplicate
assert rows[0][0] == 4
@pytest.mark.asyncio
async def test_hints_in_blockchain(self, wallet_nodes): # noqa: F811
full_node_1, full_node_2, server_1, server_2, wallet_a, wallet_receiver, bt = wallet_nodes
blocks = bt.get_consecutive_blocks(
5,
block_list_input=[],
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=bt.pool_ph,
pool_reward_puzzle_hash=bt.pool_ph,
)
for block in blocks:
await full_node_1.full_node.respond_block(RespondBlock(block), None)
wt: WalletTool = bt.get_pool_wallet_tool()
puzzle_hash = bytes32(32 * b"\0")
amount = int_to_bytes(1)
hint = bytes32(32 * b"\5")
coin_spent = list(blocks[-1].get_included_reward_coins())[0]
condition_dict = {
ConditionOpcode.CREATE_COIN: [ConditionWithArgs(ConditionOpcode.CREATE_COIN, [puzzle_hash, amount, hint])]
}
tx: SpendBundle = wt.generate_signed_transaction(
10,
wt.get_new_puzzlehash(),
coin_spent,
condition_dic=condition_dict,
)
blocks = bt.get_consecutive_blocks(
10, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx
)
for block in blocks[-10:]:
await full_node_1.full_node.respond_block(RespondBlock(block), None)
get_hint = await full_node_1.full_node.hint_store.get_coin_ids(hint)
assert get_hint[0] == Coin(coin_spent.name(), puzzle_hash, uint64(1)).name()
@pytest.mark.asyncio
async def test_counts(self, db_version):
async with DBConnection(db_version) as db_wrapper:
hint_store = await HintStore.create(db_wrapper)
count = await hint_store.count_hints()
assert count == 0
# Add some hint data then test again
hint_0 = 32 * b"\0"
hint_1 = 32 * b"\1"
coin_id_0 = 32 * b"\4"
coin_id_1 = 32 * b"\5"
hints = [(coin_id_0, hint_0), (coin_id_1, hint_1)]
await hint_store.add_hints(hints)
count = await hint_store.count_hints()
assert count == 2
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/stores/test_sync_store.py | tests/core/full_node/stores/test_sync_store.py | from __future__ import annotations
import pytest
from flax.full_node.sync_store import SyncStore
from flax.util.hash import std_hash
class TestStore:
@pytest.mark.asyncio
async def test_basic_store(self):
store = SyncStore()
# Save/get sync
for sync_mode in (False, True):
store.set_sync_mode(sync_mode)
assert sync_mode == store.get_sync_mode()
# clear sync info
await store.clear_sync_info()
store.set_peak_target(std_hash(b"1"), 100)
assert store.get_sync_target_hash() == std_hash(b"1")
assert store.get_sync_target_height() == 100
peer_ids = [std_hash(bytes([a])) for a in range(3)]
assert store.get_peers_that_have_peak([]) == set()
assert store.get_peers_that_have_peak([std_hash(b"block1")]) == set()
assert store.get_heaviest_peak() is None
assert len(store.get_peak_of_each_peer()) == 0
store.peer_has_block(std_hash(b"block10"), peer_ids[0], 500, 10, True)
store.peer_has_block(std_hash(b"block1"), peer_ids[0], 300, 1, False)
store.peer_has_block(std_hash(b"block1"), peer_ids[1], 300, 1, True)
store.peer_has_block(std_hash(b"block10"), peer_ids[2], 500, 10, False)
store.peer_has_block(std_hash(b"block1"), peer_ids[2], 300, 1, False)
assert store.get_heaviest_peak()[0] == std_hash(b"block10")
assert store.get_heaviest_peak()[1] == 10
assert store.get_heaviest_peak()[2] == 500
assert len(store.get_peak_of_each_peer()) == 2
store.peer_has_block(std_hash(b"block1"), peer_ids[2], 500, 1, True)
assert len(store.get_peak_of_each_peer()) == 3
assert store.get_peak_of_each_peer()[peer_ids[0]][2] == 500
assert store.get_peak_of_each_peer()[peer_ids[1]][2] == 300
assert store.get_peak_of_each_peer()[peer_ids[2]][2] == 500
assert store.get_peers_that_have_peak([std_hash(b"block1")]) == set(peer_ids)
assert store.get_peers_that_have_peak([std_hash(b"block10")]) == {peer_ids[0], peer_ids[2]}
store.peer_disconnected(peer_ids[0])
assert store.get_heaviest_peak()[2] == 500
assert len(store.get_peak_of_each_peer()) == 2
assert store.get_peers_that_have_peak([std_hash(b"block10")]) == {peer_ids[2]}
store.peer_disconnected(peer_ids[2])
assert store.get_heaviest_peak()[2] == 300
store.peer_has_block(std_hash(b"block30"), peer_ids[0], 700, 30, True)
assert store.get_peak_of_each_peer()[peer_ids[0]][2] == 700
assert store.get_heaviest_peak()[2] == 700
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/stores/config.py | tests/core/full_node/stores/config.py | # flake8: noqa: E501
from __future__ import annotations
parallel = True
job_timeout = 40
check_resource_usage = True
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/stores/__init__.py | tests/core/full_node/stores/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/dos/config.py | tests/core/full_node/dos/config.py | from __future__ import annotations
parallel = True
job_timeout = 60
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/dos/__init__.py | tests/core/full_node/dos/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/full_sync/config.py | tests/core/full_node/full_sync/config.py | from __future__ import annotations
job_timeout = 60
parallel = True
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/full_sync/test_full_sync.py | tests/core/full_node/full_sync/test_full_sync.py | # flake8: noqa: F811, F401
import asyncio
import logging
import time
from typing import List
from unittest.mock import MagicMock
import pytest
from flax.protocols import full_node_protocol
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.full_block import FullBlock
from flax.types.peer_info import PeerInfo
from flax.util.hash import std_hash
from flax.util.ints import uint16
from tests.core.node_height import node_height_exactly, node_height_between
from tests.setup_nodes import test_constants
from flax.simulator.time_out_assert import time_out_assert
log = logging.getLogger(__name__)
class TestFullSync:
@pytest.mark.asyncio
async def test_long_sync_from_zero(self, five_nodes, default_400_blocks, bt, self_hostname):
# Must be larger than "sync_block_behind_threshold" in the config
num_blocks = len(default_400_blocks)
blocks: List[FullBlock] = default_400_blocks
full_node_1, full_node_2, full_node_3, full_node_4, full_node_5 = five_nodes
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
server_3 = full_node_3.full_node.server
server_4 = full_node_4.full_node.server
server_5 = full_node_5.full_node.server
# If this constant is changed, update the tests to use more blocks
assert test_constants.WEIGHT_PROOF_RECENT_BLOCKS < 400
# Syncs up less than recent blocks
for block in blocks[: test_constants.WEIGHT_PROOF_RECENT_BLOCKS - 5]:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_2.full_node.on_connect
)
timeout_seconds = 250
# The second node should eventually catch up to the first one
await time_out_assert(
timeout_seconds, node_height_exactly, True, full_node_2, test_constants.WEIGHT_PROOF_RECENT_BLOCKS - 5 - 1
)
for block in blocks[
test_constants.WEIGHT_PROOF_RECENT_BLOCKS - 5 : test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5
]:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_3.full_node.on_connect
)
# Node 3 and Node 2 sync up to node 1
await time_out_assert(
timeout_seconds, node_height_exactly, True, full_node_2, test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 - 1
)
await time_out_assert(
timeout_seconds, node_height_exactly, True, full_node_3, test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 - 1
)
cons = list(server_1.all_connections.values())[:]
for con in cons:
await con.close()
for block in blocks[test_constants.WEIGHT_PROOF_RECENT_BLOCKS + 5 :]:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_2.full_node.on_connect
)
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_3.full_node.on_connect
)
await server_4.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_4.full_node.on_connect
)
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_2._port)), on_connect=full_node_3.full_node.on_connect
)
await server_4.start_client(
PeerInfo(self_hostname, uint16(server_3._port)), on_connect=full_node_4.full_node.on_connect
)
await server_4.start_client(
PeerInfo(self_hostname, uint16(server_2._port)), on_connect=full_node_4.full_node.on_connect
)
# All four nodes are synced
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_1, num_blocks - 1)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_2, num_blocks - 1)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_3, num_blocks - 1)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_4, num_blocks - 1)
# Deep reorg, fall back from batch sync to long sync
blocks_node_5 = bt.get_consecutive_blocks(60, block_list_input=blocks[:350], seed=b"node5")
for block in blocks_node_5:
await full_node_5.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_5.start_client(
PeerInfo(self_hostname, uint16(server_1._port)), on_connect=full_node_5.full_node.on_connect
)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_5, 409)
await time_out_assert(timeout_seconds, node_height_exactly, True, full_node_1, 409)
@pytest.mark.asyncio
async def test_sync_from_fork_point_and_weight_proof(
self, three_nodes, default_1000_blocks, default_400_blocks, self_hostname
):
start = time.time()
# Must be larger than "sync_block_behind_threshold" in the config
num_blocks_initial = len(default_1000_blocks) - 50
blocks_950 = default_1000_blocks[:num_blocks_initial]
blocks_rest = default_1000_blocks[num_blocks_initial:]
blocks_400 = default_400_blocks
full_node_1, full_node_2, full_node_3 = three_nodes
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
server_3 = full_node_3.full_node.server
for block in blocks_950:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Node 2 syncs from halfway
for i in range(int(len(default_1000_blocks) / 2)):
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(default_1000_blocks[i]))
# Node 3 syncs from a different blockchain
for block in blocks_400:
await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_3.full_node.on_connect)
# Also test request proof of weight
# Have the request header hash
res = await full_node_1.request_proof_of_weight(
full_node_protocol.RequestProofOfWeight(blocks_950[-1].height + 1, blocks_950[-1].header_hash)
)
assert res is not None
validated, _, _ = await full_node_1.full_node.weight_proof_handler.validate_weight_proof(
full_node_protocol.RespondProofOfWeight.from_bytes(res.data).wp
)
assert validated
# Don't have the request header hash
res = await full_node_1.request_proof_of_weight(
full_node_protocol.RequestProofOfWeight(blocks_950[-1].height + 1, std_hash(b"12"))
)
assert res is None
# The second node should eventually catch up to the first one, and have the
# same tip at height num_blocks - 1
await time_out_assert(300, node_height_exactly, True, full_node_2, num_blocks_initial - 1)
await time_out_assert(180, node_height_exactly, True, full_node_3, num_blocks_initial - 1)
def fn3_is_not_syncing():
return not full_node_3.full_node.sync_store.get_sync_mode()
await time_out_assert(180, fn3_is_not_syncing)
cons = list(server_1.all_connections.values())[:]
for con in cons:
await con.close()
for block in blocks_rest:
await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
assert full_node_3.full_node.blockchain.get_peak().height >= block.height
log.warning(f"FN3 height {full_node_3.full_node.blockchain.get_peak().height}")
# TODO: fix this flaky test
await time_out_assert(180, node_height_exactly, True, full_node_3, 999)
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_3.full_node.on_connect)
await server_3.start_client(PeerInfo(self_hostname, uint16(server_2._port)), full_node_3.full_node.on_connect)
await time_out_assert(180, node_height_exactly, True, full_node_1, 999)
await time_out_assert(180, node_height_exactly, True, full_node_2, 999)
@pytest.mark.asyncio
async def test_batch_sync(self, two_nodes, self_hostname):
# Must be below "sync_block_behind_threshold" in the config
num_blocks = 20
num_blocks_2 = 9
full_node_1, full_node_2, server_1, server_2, bt = two_nodes
blocks = bt.get_consecutive_blocks(num_blocks)
blocks_2 = bt.get_consecutive_blocks(num_blocks_2, seed=b"123")
# 12 blocks to node_1
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# 9 different blocks to node_2
for block in blocks_2:
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_2.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_2, num_blocks - 1)
@pytest.mark.asyncio
async def test_backtrack_sync_1(self, two_nodes, self_hostname):
full_node_1, full_node_2, server_1, server_2, bt = two_nodes
blocks = bt.get_consecutive_blocks(1, skip_slots=1)
blocks = bt.get_consecutive_blocks(1, blocks, skip_slots=0)
blocks = bt.get_consecutive_blocks(1, blocks, skip_slots=0)
# 3 blocks to node_1 in different sub slots
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_2.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_2, 2)
@pytest.mark.asyncio
async def test_backtrack_sync_2(self, two_nodes, self_hostname):
full_node_1, full_node_2, server_1, server_2, bt = two_nodes
blocks = bt.get_consecutive_blocks(1, skip_slots=3)
blocks = bt.get_consecutive_blocks(8, blocks, skip_slots=0)
# 3 blocks to node_1 in different sub slots
for block in blocks:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_2.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_2, 8)
@pytest.mark.asyncio
async def test_close_height_but_big_reorg(self, three_nodes, bt, self_hostname):
blocks_a = bt.get_consecutive_blocks(50)
blocks_b = bt.get_consecutive_blocks(51, seed=b"B")
blocks_c = bt.get_consecutive_blocks(90, seed=b"C")
full_node_1, full_node_2, full_node_3 = three_nodes
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
server_3 = full_node_3.full_node.server
for block in blocks_a:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
for block in blocks_b:
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
for block in blocks_c:
await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_2.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_1, 50)
await time_out_assert(60, node_height_exactly, True, full_node_2, 50)
await time_out_assert(60, node_height_exactly, True, full_node_3, 89)
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_1._port)),
on_connect=full_node_3.full_node.on_connect,
)
await server_3.start_client(
PeerInfo(self_hostname, uint16(server_2._port)),
on_connect=full_node_3.full_node.on_connect,
)
await time_out_assert(60, node_height_exactly, True, full_node_1, 89)
await time_out_assert(60, node_height_exactly, True, full_node_2, 89)
await time_out_assert(60, node_height_exactly, True, full_node_3, 89)
@pytest.mark.asyncio
async def test_sync_bad_peak_while_synced(
self, three_nodes, default_1000_blocks, default_1500_blocks, self_hostname
):
# Must be larger than "sync_block_behind_threshold" in the config
num_blocks_initial = len(default_1000_blocks) - 250
blocks_750 = default_1000_blocks[:num_blocks_initial]
full_node_1, full_node_2, full_node_3 = three_nodes
server_1 = full_node_1.full_node.server
server_2 = full_node_2.full_node.server
server_3 = full_node_3.full_node.server
full_node_3.full_node.weight_proof_handler = None
for block in blocks_750:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
# Node 3 syncs from a different blockchain
for block in default_1500_blocks[:1100]:
await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
# The second node should eventually catch up to the first one, and have the
# same tip at height num_blocks - 1
await time_out_assert(180, node_height_exactly, True, full_node_2, num_blocks_initial - 1)
# set new heavy peak, fn3 cannot serve wp's
# node 2 should keep being synced and receive blocks
await server_3.start_client(PeerInfo(self_hostname, uint16(server_3._port)), full_node_3.full_node.on_connect)
# trigger long sync in full node 2
peak_block = default_1500_blocks[1050]
await server_2.start_client(PeerInfo(self_hostname, uint16(server_3._port)), full_node_2.full_node.on_connect)
con = server_2.all_connections[full_node_3.full_node.server.node_id]
peak = full_node_protocol.NewPeak(
peak_block.header_hash,
peak_block.height,
peak_block.weight,
peak_block.height,
peak_block.reward_chain_block.get_unfinished().get_hash(),
)
await full_node_2.full_node.new_peak(peak, con)
await asyncio.sleep(2)
assert not full_node_2.full_node.sync_store.get_sync_mode()
for block in default_1000_blocks[1000 - num_blocks_initial :]:
await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(block))
assert node_height_exactly(full_node_2, 999)
@pytest.mark.asyncio
async def test_block_ses_mismatch(self, two_nodes, default_1000_blocks, self_hostname):
full_node_1, full_node_2, server_1, server_2, _ = two_nodes
blocks = default_1000_blocks
# mock for full node _sync
async def async_mock():
log.info("do nothing")
full_node_2.full_node._sync = MagicMock(return_value=async_mock())
# load blocks into node 1
for block in blocks[:501]:
await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))
peak1 = full_node_1.full_node.blockchain.get_peak()
assert peak1 is not None
summary_heights = full_node_1.full_node.blockchain.get_ses_heights()
summaries: List[SubEpochSummary] = []
# get ses list
for sub_epoch_n, ses_height in enumerate(summary_heights):
summaries.append(full_node_1.full_node.blockchain.get_ses(ses_height))
# change summary so check would fail on sub epoch 1
s = summaries[1]
summaries[1] = SubEpochSummary(
s.prev_subepoch_summary_hash,
s.reward_chain_hash,
s.num_blocks_overflow,
s.new_difficulty * 2,
s.new_sub_slot_iters * 2,
)
# manually try sync with wrong sub epoch summary list
await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), None)
# call peer has block to populate peer_to_peak
full_node_2.full_node.sync_store.peer_has_block(
peak1.header_hash, full_node_1.full_node.server.node_id, peak1.weight, peak1.height, True
)
# sync using bad ses list
await full_node_2.full_node.sync_from_fork_point(0, 500, peak1.header_hash, summaries)
# assert we failed somewhere between sub epoch 0 to sub epoch 1
assert node_height_between(full_node_2, summary_heights[0], summary_heights[1])
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/full_node/full_sync/__init__.py | tests/core/full_node/full_sync/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/daemon/test_daemon.py | tests/core/daemon/test_daemon.py | import aiohttp
import asyncio
import json
import logging
import pytest
from dataclasses import dataclass, replace
from typing import Any, Dict, List, Optional, Type, Union, cast
from flax.daemon.keychain_server import DeleteLabelRequest, SetLabelRequest
from flax.daemon.server import WebSocketServer, service_plotter
from flax.server.outbound_message import NodeType
from flax.types.peer_info import PeerInfo
from flax.util.ints import uint16
from flax.util.keychain import KeyData
from flax.daemon.keychain_server import GetKeyRequest, GetKeyResponse, GetKeysResponse
from flax.util.keyring_wrapper import DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE
from flax.util.ws_message import create_payload
from tests.core.node_height import node_height_at_least
from flax.simulator.time_out_assert import time_out_assert_custom_interval, time_out_assert
# Simple class that responds to a poll() call used by WebSocketServer.is_running()
@dataclass
class Service:
running: bool
def poll(self) -> Optional[int]:
return None if self.running else 1
# Mock daemon server that forwards to WebSocketServer
@dataclass
class Daemon:
# Instance variables used by WebSocketServer.is_running()
services: Dict[str, Union[List[Service], Service]]
connections: Dict[str, Optional[List[Any]]]
def is_service_running(self, service_name: str) -> bool:
return WebSocketServer.is_service_running(cast(WebSocketServer, self), service_name)
async def running_services(self, request: Dict[str, Any]) -> Dict[str, Any]:
return await WebSocketServer.running_services(cast(WebSocketServer, self), request)
async def is_running(self, request: Dict[str, Any]) -> Dict[str, Any]:
return await WebSocketServer.is_running(cast(WebSocketServer, self), request)
test_key_data = KeyData.from_mnemonic(
"grief lock ketchup video day owner torch young work "
"another venue evidence spread season bright private "
"tomato remind jaguar original blur embody project can"
)
test_key_data_no_secrets = replace(test_key_data, secrets=None)
success_response_data = {
"success": True,
}
def fingerprint_missing_response_data(request_type: Type[object]) -> Dict[str, object]:
return {
"success": False,
"error": "malformed request",
"error_details": {"message": f"1 field missing for {request_type.__name__}: fingerprint"},
}
def fingerprint_not_found_response_data(fingerprint: int) -> Dict[str, object]:
return {
"success": False,
"error": "key not found",
"error_details": {
"fingerprint": fingerprint,
},
}
def get_key_response_data(key: KeyData) -> Dict[str, object]:
return {"success": True, **GetKeyResponse(key=key).to_json_dict()}
def get_keys_response_data(keys: List[KeyData]) -> Dict[str, object]:
return {"success": True, **GetKeysResponse(keys=keys).to_json_dict()}
def label_missing_response_data(request_type: Type[Any]) -> Dict[str, Any]:
return {
"success": False,
"error": "malformed request",
"error_details": {"message": f"1 field missing for {request_type.__name__}: label"},
}
def label_exists_response_data(fingerprint: int, label: str) -> Dict[str, Any]:
return {
"success": False,
"error": "malformed request",
"error_details": {"message": f"label {label!r} already exists for fingerprint {str(fingerprint)!r}"},
}
label_empty_response_data = {
"success": False,
"error": "malformed request",
"error_details": {"message": "label can't be empty or whitespace only"},
}
label_too_long_response_data = {
"success": False,
"error": "malformed request",
"error_details": {"message": "label exceeds max length: 66/65"},
}
label_newline_or_tab_response_data = {
"success": False,
"error": "malformed request",
"error_details": {"message": "label can't contain newline or tab"},
}
def assert_response(response: aiohttp.http_websocket.WSMessage, expected_response_data: Dict[str, Any]) -> None:
# Expect: JSON response
assert response.type == aiohttp.WSMsgType.TEXT
message = json.loads(response.data.strip())
# Expect: daemon handled the request
assert message["ack"] is True
# Expect: data matches the expected data
assert message["data"] == expected_response_data
def assert_running_services_response(response_dict: Dict[str, Any], expected_response_dict: Dict[str, Any]) -> None:
for k, v in expected_response_dict.items():
if k == "running_services":
# Order of services is not guaranteed
assert len(response_dict[k]) == len(v)
assert set(response_dict[k]) == set(v)
else:
assert response_dict[k] == v
@pytest.fixture(scope="session")
def mock_lonely_daemon():
# Mock daemon server without any registered services/connections
return Daemon(services={}, connections={})
@pytest.fixture(scope="session")
def mock_daemon_with_services():
# Mock daemon server with a couple running services, a plotter, and one stopped service
return Daemon(
services={
"my_refrigerator": Service(True),
"the_river": Service(True),
"your_nose": Service(False),
"flax_plotter": [Service(True), Service(True)],
},
connections={},
)
@pytest.fixture(scope="session")
def mock_daemon_with_services_and_connections():
# Mock daemon server with a couple running services, a plotter, and a couple active connections
return Daemon(
services={
"my_refrigerator": Service(True),
"flax_plotter": [Service(True), Service(True)],
"apple": Service(True),
},
connections={
"apple": [1],
"banana": [1, 2],
},
)
@pytest.mark.asyncio
async def test_daemon_simulation(self_hostname, daemon_simulation):
deamon_and_nodes, get_b_tools, bt = daemon_simulation
node1, node2, _, _, _, _, _, _, _, _, daemon1 = deamon_and_nodes
server1 = node1.full_node.server
node2_port = node2.full_node.server.get_port()
await server1.start_client(PeerInfo(self_hostname, uint16(node2_port)))
async def num_connections():
count = len(node2.server.get_connections(NodeType.FULL_NODE))
return count
await time_out_assert_custom_interval(60, 1, num_connections, 1)
await time_out_assert(1500, node_height_at_least, True, node2, 1)
session = aiohttp.ClientSession()
log = logging.getLogger()
log.warning(f"Connecting to daemon on port {daemon1.daemon_port}")
ws = await session.ws_connect(
f"wss://127.0.0.1:{daemon1.daemon_port}",
autoclose=True,
autoping=True,
heartbeat=60,
ssl_context=get_b_tools.get_daemon_ssl_context(),
max_msg_size=100 * 1024 * 1024,
)
service_name = "test_service_name"
data = {"service": service_name}
payload = create_payload("register_service", data, service_name, "daemon")
await ws.send_str(payload)
message_queue = asyncio.Queue()
async def reader(ws, queue):
while True:
# ClientWebSocketReponse::receive() internally handles PING, PONG, and CLOSE messages
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
message = msg.data.strip()
message = json.loads(message)
await queue.put(message)
else:
if msg.type == aiohttp.WSMsgType.ERROR:
await ws.close()
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
read_handler = asyncio.create_task(reader(ws, message_queue))
data = {}
payload = create_payload("get_blockchain_state", data, service_name, "flax_full_node")
await ws.send_str(payload)
await asyncio.sleep(5)
blockchain_state_found = False
while not message_queue.empty():
message = await message_queue.get()
if message["command"] == "get_blockchain_state":
blockchain_state_found = True
await ws.close()
read_handler.cancel()
assert blockchain_state_found
@pytest.mark.parametrize(
"service, expected_result",
[
(
"my_refrigerator",
False,
),
(
service_plotter,
False,
),
],
)
def test_is_service_running_no_services(mock_lonely_daemon, service, expected_result):
daemon = mock_lonely_daemon
assert daemon.is_service_running(service) == expected_result
@pytest.mark.parametrize(
"service, expected_result",
[
(
"my_refrigerator",
True,
),
(
service_plotter,
True,
),
(
"your_nose",
False,
),
(
"the_river",
True,
),
(
"the_clock",
False,
),
],
)
def test_is_service_running_with_services(mock_daemon_with_services, service, expected_result):
daemon = mock_daemon_with_services
assert daemon.is_service_running(service) == expected_result
@pytest.mark.parametrize(
"service, expected_result",
[
(
"my_refrigerator",
True,
),
(
service_plotter,
True,
),
(
"apple",
True,
),
(
"banana",
True,
),
(
"orange",
False,
),
],
)
def test_is_service_running_with_services_and_connections(
mock_daemon_with_services_and_connections, service, expected_result
):
daemon = mock_daemon_with_services_and_connections
assert daemon.is_service_running(service) == expected_result
@pytest.mark.asyncio
async def test_running_services_no_services(mock_lonely_daemon):
daemon = mock_lonely_daemon
response = await daemon.running_services({})
assert_running_services_response(response, {"success": True, "running_services": []})
@pytest.mark.asyncio
async def test_running_services_with_services(mock_daemon_with_services):
daemon = mock_daemon_with_services
response = await daemon.running_services({})
assert_running_services_response(
response, {"success": True, "running_services": ["my_refrigerator", "the_river", service_plotter]}
)
@pytest.mark.asyncio
async def test_running_services_with_services_and_connections(mock_daemon_with_services_and_connections):
daemon = mock_daemon_with_services_and_connections
response = await daemon.running_services({})
assert_running_services_response(
response, {"success": True, "running_services": ["my_refrigerator", "apple", "banana", service_plotter]}
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"service_request, expected_result, expected_exception",
[
({}, None, KeyError),
(
{"service": "my_refrigerator"},
{"success": True, "service_name": "my_refrigerator", "is_running": False},
None,
),
],
)
async def test_is_running_no_services(mock_lonely_daemon, service_request, expected_result, expected_exception):
daemon = mock_lonely_daemon
if expected_exception is not None:
with pytest.raises(expected_exception):
await daemon.is_running(service_request)
else:
response = await daemon.is_running(service_request)
assert response == expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"service_request, expected_result, expected_exception",
[
({}, None, KeyError),
(
{"service": "my_refrigerator"},
{"success": True, "service_name": "my_refrigerator", "is_running": True},
None,
),
(
{"service": "your_nose"},
{"success": True, "service_name": "your_nose", "is_running": False},
None,
),
(
{"service": "the_river"},
{"success": True, "service_name": "the_river", "is_running": True},
None,
),
(
{"service": service_plotter},
{"success": True, "service_name": service_plotter, "is_running": True},
None,
),
],
)
async def test_is_running_with_services(
mock_daemon_with_services, service_request, expected_result, expected_exception
):
daemon = mock_daemon_with_services
if expected_exception is not None:
with pytest.raises(expected_exception):
await daemon.is_running(service_request)
else:
response = await daemon.is_running(service_request)
assert response == expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"service_request, expected_result, expected_exception",
[
({}, None, KeyError),
(
{"service": "my_refrigerator"},
{"success": True, "service_name": "my_refrigerator", "is_running": True},
None,
),
(
{"service": "your_nose"},
{"success": True, "service_name": "your_nose", "is_running": False},
None,
),
(
{"service": "apple"},
{"success": True, "service_name": "apple", "is_running": True},
None,
),
(
{"service": "banana"},
{"success": True, "service_name": "banana", "is_running": True},
None,
),
(
{"service": "orange"},
{"success": True, "service_name": "orange", "is_running": False},
None,
),
],
)
async def test_is_running_with_services_and_connections(
mock_daemon_with_services_and_connections, service_request, expected_result, expected_exception
):
daemon = mock_daemon_with_services_and_connections
if expected_exception is not None:
with pytest.raises(expected_exception):
await daemon.is_running(service_request)
else:
response = await daemon.is_running(service_request)
assert response == expected_result
@pytest.mark.asyncio
async def test_validate_keyring_passphrase_rpc(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
# When: the keychain has a master passphrase set
keychain.set_master_passphrase(
current_passphrase=DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE, new_passphrase="the correct passphrase"
)
bad_passphrase_case_response_data = {
"success": False,
"error": None,
}
missing_passphrase_response_data = {
"success": False,
"error": "missing key",
}
empty_passphrase_response_data = {
"success": False,
"error": None,
}
# When: using the correct passphrase
await ws.send_str(
create_payload("validate_keyring_passphrase", {"key": "the correct passphrase"}, "test", "daemon")
)
# Expect: validation succeeds
# TODO: unify error responses in the server, sometimes we add `error: None` sometimes not.
assert_response(await ws.receive(), {**success_response_data, "error": None})
# When: using the wrong passphrase
await ws.send_str(create_payload("validate_keyring_passphrase", {"key": "the wrong passphrase"}, "test", "daemon"))
# Expect: validation failure
assert_response(await ws.receive(), bad_passphrase_case_response_data)
# When: not including the passphrase in the payload
await ws.send_str(create_payload("validate_keyring_passphrase", {}, "test", "daemon"))
# Expect: validation failure
assert_response(await ws.receive(), missing_passphrase_response_data)
# When: including an empty passphrase in the payload
await ws.send_str(create_payload("validate_keyring_passphrase", {"key": ""}, "test", "daemon"))
# Expect: validation failure
assert_response(await ws.receive(), empty_passphrase_response_data)
@pytest.mark.asyncio
async def test_add_private_key(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
mnemonic_with_typo = f"{test_key_data.mnemonic_str()}xyz" # intentional typo: can -> canxyz
mnemonic_with_missing_word = " ".join(test_key_data.mnemonic_str()[:-1]) # missing last word
missing_mnemonic_response_data = {
"success": False,
"error": "malformed request",
"error_details": {"message": "missing mnemonic"},
}
mnemonic_with_typo_response_data = {
"success": False,
"error": "'canxyz' is not in the mnemonic dictionary; may be misspelled",
}
invalid_mnemonic_length_response_data = {
"success": False,
"error": "Invalid mnemonic length",
}
invalid_mnemonic_response_data = {
"success": False,
"error": "Invalid order of mnemonic words",
}
# Expect the key hasn't been added yet
assert keychain.get_private_key_by_fingerprint(test_key_data.fingerprint) is None
await ws.send_str(create_payload("add_private_key", {"mnemonic": test_key_data.mnemonic_str()}, "test", "daemon"))
# Expect: key was added successfully
assert_response(await ws.receive(), success_response_data)
# When: missing mnemonic
await ws.send_str(create_payload("add_private_key", {}, "test", "daemon"))
# Expect: Failure due to missing mnemonic
assert_response(await ws.receive(), missing_mnemonic_response_data)
# When: using a mmnemonic with an incorrect word (typo)
await ws.send_str(create_payload("add_private_key", {"mnemonic": mnemonic_with_typo}, "test", "daemon"))
# Expect: Failure due to misspelled mnemonic
assert_response(await ws.receive(), mnemonic_with_typo_response_data)
# When: using a mnemonic with an incorrect word count
await ws.send_str(create_payload("add_private_key", {"mnemonic": mnemonic_with_missing_word}, "test", "daemon"))
# Expect: Failure due to invalid mnemonic
assert_response(await ws.receive(), invalid_mnemonic_length_response_data)
# When: using an incorrect mnemnonic
await ws.send_str(create_payload("add_private_key", {"mnemonic": " ".join(["abandon"] * 24)}, "test", "daemon"))
# Expect: Failure due to checksum error
assert_response(await ws.receive(), invalid_mnemonic_response_data)
@pytest.mark.asyncio
async def test_add_private_key_label(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
async def assert_add_private_key_with_label(key_data: KeyData, request: Dict[str, object]) -> None:
await ws.send_str(create_payload("add_private_key", request, "test", "daemon"))
assert_response(await ws.receive(), success_response_data)
await ws.send_str(
create_payload("get_key", {"fingerprint": key_data.fingerprint, "include_secrets": True}, "test", "daemon")
)
assert_response(await ws.receive(), get_key_response_data(key_data))
# without `label` parameter
key_data_0 = KeyData.generate()
await assert_add_private_key_with_label(key_data_0, {"mnemonic": key_data_0.mnemonic_str()})
# with `label=None`
key_data_1 = KeyData.generate()
await assert_add_private_key_with_label(key_data_1, {"mnemonic": key_data_1.mnemonic_str(), "label": None})
# with `label="key_2"`
key_data_2 = KeyData.generate("key_2")
await assert_add_private_key_with_label(
key_data_1, {"mnemonic": key_data_2.mnemonic_str(), "label": key_data_2.label}
)
@pytest.mark.asyncio
async def test_get_key(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
await ws.send_str(create_payload("get_key", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_not_found_response_data(test_key_data.fingerprint))
keychain.add_private_key(test_key_data.mnemonic_str())
# without `include_secrets`
await ws.send_str(create_payload("get_key", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), get_key_response_data(test_key_data_no_secrets))
# with `include_secrets=False`
await ws.send_str(
create_payload(
"get_key", {"fingerprint": test_key_data.fingerprint, "include_secrets": False}, "test", "daemon"
)
)
assert_response(await ws.receive(), get_key_response_data(test_key_data_no_secrets))
# with `include_secrets=True`
await ws.send_str(
create_payload("get_key", {"fingerprint": test_key_data.fingerprint, "include_secrets": True}, "test", "daemon")
)
assert_response(await ws.receive(), get_key_response_data(test_key_data))
await ws.send_str(create_payload("get_key", {}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_missing_response_data(GetKeyRequest))
await ws.send_str(create_payload("get_key", {"fingerprint": 123456}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_not_found_response_data(123456))
@pytest.mark.asyncio
async def test_get_keys(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
# empty keychain
await ws.send_str(create_payload("get_keys", {}, "test", "daemon"))
assert_response(await ws.receive(), get_keys_response_data([]))
keys = [KeyData.generate() for _ in range(5)]
keys_added = []
for key_data in keys:
keychain.add_private_key(key_data.mnemonic_str())
keys_added.append(key_data)
get_keys_response_data_without_secrets = get_keys_response_data(
[replace(key, secrets=None) for key in keys_added]
)
# without `include_secrets`
await ws.send_str(create_payload("get_keys", {}, "test", "daemon"))
assert_response(await ws.receive(), get_keys_response_data_without_secrets)
# with `include_secrets=False`
await ws.send_str(create_payload("get_keys", {"include_secrets": False}, "test", "daemon"))
assert_response(await ws.receive(), get_keys_response_data_without_secrets)
# with `include_secrets=True`
await ws.send_str(create_payload("get_keys", {"include_secrets": True}, "test", "daemon"))
assert_response(await ws.receive(), get_keys_response_data(keys_added))
@pytest.mark.asyncio
async def test_key_renaming(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
keychain.add_private_key(test_key_data.mnemonic_str())
# Rename the key three times
for i in range(3):
key_data = replace(test_key_data_no_secrets, label=f"renaming_{i}")
await ws.send_str(
create_payload(
"set_label", {"fingerprint": key_data.fingerprint, "label": key_data.label}, "test", "daemon"
)
)
assert_response(await ws.receive(), success_response_data)
await ws.send_str(create_payload("get_key", {"fingerprint": key_data.fingerprint}, "test", "daemon"))
assert_response(
await ws.receive(),
{
"success": True,
"key": key_data.to_json_dict(),
},
)
@pytest.mark.asyncio
async def test_key_label_deletion(daemon_connection_and_temp_keychain):
ws, keychain = daemon_connection_and_temp_keychain
keychain.add_private_key(test_key_data.mnemonic_str(), "key_0")
assert keychain.get_key(test_key_data.fingerprint).label == "key_0"
await ws.send_str(create_payload("delete_label", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), success_response_data)
assert keychain.get_key(test_key_data.fingerprint).label is None
await ws.send_str(create_payload("delete_label", {"fingerprint": test_key_data.fingerprint}, "test", "daemon"))
assert_response(await ws.receive(), fingerprint_not_found_response_data(test_key_data.fingerprint))
@pytest.mark.parametrize(
"method, parameter, response_data_dict",
[
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "new_label"},
success_response_data,
),
(
"set_label",
{"label": "new_label"},
fingerprint_missing_response_data(SetLabelRequest),
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint},
label_missing_response_data(SetLabelRequest),
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": ""},
label_empty_response_data,
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "a" * 66},
label_too_long_response_data,
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "a\nb"},
label_newline_or_tab_response_data,
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "a\tb"},
label_newline_or_tab_response_data,
),
(
"set_label",
{"fingerprint": test_key_data.fingerprint, "label": "key_0"},
label_exists_response_data(test_key_data.fingerprint, "key_0"),
),
(
"delete_label",
{"fingerprint": test_key_data.fingerprint},
success_response_data,
),
(
"delete_label",
{},
fingerprint_missing_response_data(DeleteLabelRequest),
),
(
"delete_label",
{"fingerprint": 123456},
fingerprint_not_found_response_data(123456),
),
],
)
@pytest.mark.asyncio
async def test_key_label_methods(
daemon_connection_and_temp_keychain, method: str, parameter: Dict[str, Any], response_data_dict: Dict[str, Any]
) -> None:
ws, keychain = daemon_connection_and_temp_keychain
keychain.add_private_key(test_key_data.mnemonic_str(), "key_0")
await ws.send_str(create_payload(method, parameter, "test", "daemon"))
assert_response(await ws.receive(), response_data_dict)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/daemon/config.py | tests/core/daemon/config.py | from __future__ import annotations
install_timelord = True
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/daemon/__init__.py | tests/core/daemon/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/cmds/test_wallet.py | tests/core/cmds/test_wallet.py | from __future__ import annotations
from typing import Any, Dict, Optional, Tuple
import pytest
from flax.cmds.wallet_funcs import print_offer_summary
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint32
TEST_DUCKSAUCE_ASSET_ID = "1000000000000000000000000000000000000000000000000000000000000001"
TEST_CRUNCHBERRIES_ASSET_ID = "1000000000000000000000000000000000000000000000000000000000000002"
TEST_UNICORNTEARS_ASSET_ID = "1000000000000000000000000000000000000000000000000000000000000003"
TEST_ASSET_ID_NAME_MAPPING: Dict[bytes32, Tuple[uint32, str]] = {
bytes32.from_hexstr(TEST_DUCKSAUCE_ASSET_ID): (uint32(2), "DuckSauce"),
bytes32.from_hexstr(TEST_CRUNCHBERRIES_ASSET_ID): (uint32(3), "CrunchBerries"),
bytes32.from_hexstr(TEST_UNICORNTEARS_ASSET_ID): (uint32(4), "UnicornTears"),
}
async def cat_name_resolver(asset_id: bytes32) -> Optional[Tuple[Optional[uint32], str]]:
return TEST_ASSET_ID_NAME_MAPPING.get(asset_id)
@pytest.mark.asyncio
async def test_print_offer_summary_xfx(capsys: Any) -> None:
summary_dict = {"xfx": 1_000_000_000_000}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "XFX (Wallet ID: 1): 1.0 (1000000000000 mojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_cat(capsys: Any) -> None:
summary_dict = {
TEST_DUCKSAUCE_ASSET_ID: 1_000,
}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "DuckSauce (Wallet ID: 2): 1.0 (1000 mojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_multiple_cats(capsys: Any) -> None:
summary_dict = {
TEST_DUCKSAUCE_ASSET_ID: 1_000,
TEST_CRUNCHBERRIES_ASSET_ID: 2_000,
}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "DuckSauce (Wallet ID: 2): 1.0 (1000 mojos)" in captured.out
assert "CrunchBerries (Wallet ID: 3): 2.0 (2000 mojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_xfx_and_cats(capsys: Any) -> None:
summary_dict = {
"xfx": 2_500_000_000_000,
TEST_DUCKSAUCE_ASSET_ID: 1_111,
TEST_CRUNCHBERRIES_ASSET_ID: 2_222,
TEST_UNICORNTEARS_ASSET_ID: 3_333,
}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "XFX (Wallet ID: 1): 2.5 (2500000000000 mojos)" in captured.out
assert "DuckSauce (Wallet ID: 2): 1.111 (1111 mojos)" in captured.out
assert "CrunchBerries (Wallet ID: 3): 2.222 (2222 mojos)" in captured.out
assert "UnicornTears (Wallet ID: 4): 3.333 (3333 mojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_xfx_and_cats_with_zero_values(capsys: Any) -> None:
summary_dict = {
"xfx": 0,
TEST_DUCKSAUCE_ASSET_ID: 0,
TEST_CRUNCHBERRIES_ASSET_ID: 0,
TEST_UNICORNTEARS_ASSET_ID: 0,
}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "XFX (Wallet ID: 1): 0.0 (0 mojos)" in captured.out
assert "DuckSauce (Wallet ID: 2): 0.0 (0 mojos)" in captured.out
assert "CrunchBerries (Wallet ID: 3): 0.0 (0 mojos)" in captured.out
assert "UnicornTears (Wallet ID: 4): 0.0 (0 mojos)" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_cat_with_fee_and_change(capsys: Any) -> None:
summary_dict = {
TEST_DUCKSAUCE_ASSET_ID: 1_000,
"unknown": 3_456,
}
await print_offer_summary(cat_name_resolver, summary_dict, has_fee=True)
captured = capsys.readouterr()
assert "DuckSauce (Wallet ID: 2): 1.0 (1000 mojos)" in captured.out
assert "Unknown: 3456 mojos [Typically represents change returned from the included fee]" in captured.out
@pytest.mark.asyncio
async def test_print_offer_summary_xfx_with_one_mojo(capsys: Any) -> None:
summary_dict = {"xfx": 1}
await print_offer_summary(cat_name_resolver, summary_dict)
captured = capsys.readouterr()
assert "XFX (Wallet ID: 1): 1e-12 (1 mojo)" in captured.out
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/cmds/test_keys.py | tests/core/cmds/test_keys.py | import os
import pytest
import re
from flax.cmds.flax import cli
from flax.cmds.keys import delete_all_cmd, generate_and_print_cmd, show_cmd, sign_cmd, verify_cmd
from flax.util.config import load_config
from flax.util.file_keyring import FileKeyring
from flax.util.keychain import KeyData, DEFAULT_USER, DEFAULT_SERVICE, Keychain, generate_mnemonic
from flax.util.keyring_wrapper import DEFAULT_KEYS_ROOT_PATH, KeyringWrapper, LegacyKeyring
from click.testing import CliRunner, Result
from keyring.backend import KeyringBackend
from pathlib import Path
from tests.util.keyring import TempKeyring
from typing import Dict, List, Optional
TEST_MNEMONIC_SEED = (
"grief lock ketchup video day owner torch young work "
"another venue evidence spread season bright private "
"tomato remind jaguar original blur embody project can"
)
TEST_FINGERPRINT = 2877570395
class DummyLegacyKeyring(KeyringBackend):
# Fingerprint 2474840988
KEY_0 = (
"89e29e5f9c3105b2a853475cab2392468cbfb1d65c3faabea8ebc78fe903fd279e56a8d93f6325fc6c3d833a2ae74832"
"b8feaa3d6ee49998f43ce303b66dcc5abb633e5c1d80efe85c40766135e4a44c"
)
# Fingerprint 4149609062
KEY_1 = (
"8b0d72288727af6238fcd9b0a663cd7d4728738fca597d0046cbb42b6432e0a5ae8026683fc5f9c73df26fb3e1cec2c8"
"ad1b4f601107d96a99f6fa9b9d2382918fb1e107fb6655c7bdd8c77c1d9c201f"
)
# Fingerprint 3618811800
KEY_2 = (
"8b2a26ba319f83bd3da5b1b147a817ecc4ca557f037c9db1cfedc59b16ee6880971b7d292f023358710a292c8db0eb82"
"35808f914754ae24e493fad9bc7f654b0f523fb406973af5235256a39bed1283"
)
def __init__(self, populate: bool = True):
self.service_dict = {}
if populate:
self.service_dict[DEFAULT_SERVICE] = {
f"wallet-{DEFAULT_USER}-0": DummyLegacyKeyring.KEY_0,
f"wallet-{DEFAULT_USER}-1": DummyLegacyKeyring.KEY_1,
f"wallet-{DEFAULT_USER}-2": DummyLegacyKeyring.KEY_2,
}
def get_password(self, service, username, password=None):
return self.service_dict.get(service, {}).get(username)
def set_password(self, service, username, password):
self.service_dict.setdefault(service, {})[username] = password
def delete_password(self, service, username):
del self.service_dict[service][username]
@pytest.fixture(scope="function")
def empty_keyring():
with TempKeyring(user="user-flax-1.8", service="flax-user-flax-1.8") as keychain:
yield keychain
KeyringWrapper.cleanup_shared_instance()
@pytest.fixture(scope="function")
def keyring_with_one_key(empty_keyring):
keychain = empty_keyring
keychain.add_private_key(TEST_MNEMONIC_SEED)
return keychain
@pytest.fixture(scope="function")
def mnemonic_seed_file(tmp_path):
seed_file = Path(tmp_path) / "seed.txt"
with open(seed_file, "w") as f:
f.write(TEST_MNEMONIC_SEED)
return seed_file
@pytest.fixture(scope="function")
def setup_keyringwrapper(tmp_path):
KeyringWrapper.cleanup_shared_instance()
KeyringWrapper.set_keys_root_path(tmp_path)
_ = KeyringWrapper.get_shared_instance()
yield
KeyringWrapper.cleanup_shared_instance()
KeyringWrapper.set_keys_root_path(DEFAULT_KEYS_ROOT_PATH)
@pytest.fixture(scope="function")
def setup_legacy_keyringwrapper(tmp_path, monkeypatch):
def mock_setup_keyring_file_watcher(_):
pass
# Silence errors in the watchdog module during testing
monkeypatch.setattr(FileKeyring, "setup_keyring_file_watcher", mock_setup_keyring_file_watcher)
KeyringWrapper.cleanup_shared_instance()
KeyringWrapper.set_keys_root_path(tmp_path)
KeyringWrapper.get_shared_instance().legacy_keyring = DummyLegacyKeyring()
yield
KeyringWrapper.cleanup_shared_instance()
KeyringWrapper.set_keys_root_path(DEFAULT_KEYS_ROOT_PATH)
def assert_label(keychain: Keychain, label: Optional[str], index: int) -> None:
all_keys = keychain.get_keys()
assert len(all_keys) > index
assert all_keys[index].label == label
class TestKeysCommands:
def test_generate_with_new_config(self, tmp_path, empty_keyring):
"""
Generate a new config and a new key. Verify that the config has
the correct xfx_target_address entries.
"""
keychain = empty_keyring
keys_root_path = keychain.keyring_wrapper.keys_root_path
# Generate the new config
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 0
# Generate a new key
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"generate",
],
input="\n",
)
assert result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
# Verify that the config has the correct xfx_target_address entries
address_matches = re.findall(r"xfx1[^\n]+", result.output)
assert len(address_matches) > 1
address = address_matches[0]
config: Dict = load_config(tmp_path, "config.yaml")
assert config["farmer"]["xfx_target_address"] == address
assert config["pool"]["xfx_target_address"] == address
def test_generate_with_existing_config(self, tmp_path, empty_keyring):
"""
Generate a new key using an existing config. Verify that the config has
the original xfx_target_address entries.
"""
keychain = empty_keyring
keys_root_path = keychain.keyring_wrapper.keys_root_path
# Generate the new config
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 0
# Generate the first key
runner = CliRunner()
generate_result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"generate",
],
input="\n",
)
assert generate_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
# Verify that the config has the correct xfx_target_address entries
address_matches = re.findall(r"xfx1[^\n]+", generate_result.output)
assert len(address_matches) > 1
address = address_matches[0]
existing_config: Dict = load_config(tmp_path, "config.yaml")
assert existing_config["farmer"]["xfx_target_address"] == address
assert existing_config["pool"]["xfx_target_address"] == address
# Generate the second key
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"generate",
],
input="\n",
)
assert result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 2
# Verify that the config's xfx_target_address entries have not changed
config: Dict = load_config(tmp_path, "config.yaml")
assert config["farmer"]["xfx_target_address"] == existing_config["farmer"]["xfx_target_address"]
assert config["pool"]["xfx_target_address"] == existing_config["pool"]["xfx_target_address"]
@pytest.mark.parametrize(
"cmd_params, label, input_str",
[
(["generate"], None, "\n"),
(["generate", "-l", "key_0"], "key_0", None),
(["generate", "--label", "key_0"], "key_0", None),
(["generate", "-l", ""], None, None),
(["generate", "--label", ""], None, None),
(["generate"], "key_0", "key_0\n"),
(["add"], None, f"{TEST_MNEMONIC_SEED}\n\n"),
(["add"], "key_0", f"{TEST_MNEMONIC_SEED}\nkey_0\n"),
(["add", "-l", "key_0"], "key_0", f"{TEST_MNEMONIC_SEED}\n"),
(["add", "--label", "key_0"], "key_0", f"{TEST_MNEMONIC_SEED}\n"),
(["add", "-l", ""], None, f"{TEST_MNEMONIC_SEED}\n"),
(["add", "--label", ""], None, f"{TEST_MNEMONIC_SEED}\n"),
],
)
def test_generate_and_add_label_parameter(
self, cmd_params: List[str], label: Optional[str], input_str: Optional[str], tmp_path, empty_keyring
):
keychain = empty_keyring
keys_root_path = keychain.keyring_wrapper.keys_root_path
base_params = [
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
]
runner = CliRunner()
# Generate a new config
assert runner.invoke(cli, [*base_params, "init"]).exit_code == 0
# Run the command
assert runner.invoke(cli, [*base_params, "keys", *cmd_params], input=input_str).exit_code == 0
# And make sure the label was set to the expected label
assert_label(keychain, label, 0)
def test_set_label(self, keyring_with_one_key, tmp_path):
keychain = keyring_with_one_key
keys_root_path = keychain.keyring_wrapper.keys_root_path
base_params = [
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
]
cmd_params = ["keys", "label", "set", "-f", TEST_FINGERPRINT]
runner = CliRunner()
def set_and_validate(label: str):
result = runner.invoke(cli, [*base_params, *cmd_params, "-l", label])
assert result.exit_code == 0
assert result.output == f"label {label!r} assigned to {TEST_FINGERPRINT!r}\n"
assert_label(keychain, label, 0)
# Generate a new config
assert runner.invoke(cli, [*base_params, "init"]).exit_code == 0
# There should be no label for this key
assert_label(keychain, None, 0)
# Set a label
set_and_validate("key_0")
# Change the label
set_and_validate("changed")
def test_delete_label(self, keyring_with_one_key, tmp_path):
keychain = keyring_with_one_key
keys_root_path = keychain.keyring_wrapper.keys_root_path
base_params = [
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
]
cmd_params = ["keys", "label", "delete", "-f", TEST_FINGERPRINT]
runner = CliRunner()
# Generate a new config
assert runner.invoke(cli, [*base_params, "init"]).exit_code == 0
# There should be no label for this key
assert_label(keychain, None, 0)
# Set a label
keychain.set_label(TEST_FINGERPRINT, "key_0")
assert_label(keychain, "key_0", 0)
# Delete the label
result = runner.invoke(cli, [*base_params, *cmd_params])
assert result.output == f"label removed for {TEST_FINGERPRINT!r}\n"
assert_label(keychain, None, 0)
def test_show_labels(self, empty_keyring, tmp_path):
keychain = empty_keyring
runner = CliRunner()
keys_root_path = keychain.keyring_wrapper.keys_root_path
base_params = [
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
]
cmd_params = ["keys", "label", "show"]
# Generate a new config
assert runner.invoke(cli, [*base_params, "init"]).exit_code == 0
# Make sure the command works with no keys
result = runner.invoke(cli, [*base_params, *cmd_params])
assert result.output == "No keys are present in the keychain. Generate them with 'flax keys generate'\n"
# Add 10 keys to the keychain, give every other a label
keys = [KeyData.generate(f"key_{i}" if i % 2 == 0 else None) for i in range(10)]
for key in keys:
keychain.add_private_key(key.mnemonic_str(), key.label)
# Make sure all 10 keys are printed correct
result = runner.invoke(cli, [*base_params, *cmd_params])
assert result.exit_code == 0
lines = result.output.splitlines()[2:] # Split into lines but drop the header
fingerprints = [int(line.split("|")[1].strip()) for line in lines]
labels = [line.split("|")[2].strip() for line in lines]
assert len(fingerprints) == len(labels) == len(keys)
for fingerprint, label, key in zip(fingerprints, labels, keys):
assert fingerprint == key.fingerprint
if key.label is None:
assert label == "No label assigned"
else:
assert label == key.label
def test_show(self, keyring_with_one_key):
"""
Test that the `flax keys show` command shows the correct key.
"""
keychain = keyring_with_one_key
assert len(keychain.get_all_private_keys()) == 1
runner = CliRunner()
result: Result = runner.invoke(show_cmd, [])
# assert result.exit_code == 0
assert result.output.find(f"Fingerprint: {TEST_FINGERPRINT}") != 0
def test_show_mnemonic(self, keyring_with_one_key):
"""
Test that the `flax keys show --show-mnemonic-seed` command shows the key's mnemonic seed.
"""
keychain = keyring_with_one_key
assert len(keychain.get_all_private_keys()) == 1
runner = CliRunner()
result: Result = runner.invoke(show_cmd, ["--show-mnemonic-seed"])
# assert result.exit_code == 0
assert result.output.find(f"Fingerprint: {TEST_FINGERPRINT}") != 0
assert result.output.find("Mnemonic: seed (24 secret words):") != 0
assert result.output.find(TEST_MNEMONIC_SEED) != 0
def test_add_interactive(self, tmp_path, empty_keyring):
"""
Test adding a key from mnemonic seed using the interactive prompt.
"""
keychain = empty_keyring
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 0
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"add",
],
input=f"{TEST_MNEMONIC_SEED}\n\n",
)
assert result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
def test_add_from_mnemonic_seed(self, tmp_path, empty_keyring, mnemonic_seed_file):
"""
Test adding a key from a mnemonic seed file using the `--filename` flag.
"""
keychain = empty_keyring
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 0
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"add",
"--filename",
os.fspath(mnemonic_seed_file),
],
input="\n",
)
assert result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
def test_delete(self, tmp_path, empty_keyring, mnemonic_seed_file):
"""
Test deleting a key using the `--fingerprint` option.
"""
keychain = empty_keyring
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 0
runner = CliRunner()
add_result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"add",
"--filename",
os.fspath(mnemonic_seed_file),
],
input="\n",
)
assert add_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"delete",
"--fingerprint",
TEST_FINGERPRINT,
],
)
assert result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 0
def test_delete_all(self, empty_keyring):
"""
Test deleting all keys from the keyring
"""
keychain = empty_keyring
assert len(keychain.get_all_private_keys()) == 0
for i in range(5):
mnemonic: str = generate_mnemonic()
keychain.add_private_key(mnemonic)
assert len(keychain.get_all_private_keys()) == 5
runner = CliRunner()
result: Result = runner.invoke(delete_all_cmd, [])
assert result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 0
def test_generate_and_print(self):
"""
Test the `flax keys generate_and_print` command.
"""
runner = CliRunner()
result: Result = runner.invoke(generate_and_print_cmd, [])
assert result.exit_code == 0
assert result.output.find("Mnemonic (24 secret words):") != 0
def test_sign(self, keyring_with_one_key):
"""
Test the `flax keys sign` command.
"""
message: str = "hello world"
hd_path: str = "m/12381/8444/0/1"
runner = CliRunner()
result: Result = runner.invoke(
sign_cmd, ["--message", message, "--fingerprint", str(TEST_FINGERPRINT), "--hd_path", hd_path]
)
assert result.exit_code == 0
assert (
result.output.find(
(
"Public key: 92f15caed8a5495faa7ec25a8af3f223438ef73c974b0aa81e788057b1154870f149739b2c2d0e"
"736234baf9386f7f83"
)
)
!= -1
)
assert (
result.output.find(
(
"Signature: a82e7d1b87d8c25a6ccac603194011d73f71fc76c17c1ce4ee53484f81874f116b1cb9dd991bcf9"
"aa41c10beaab54a830fc6f7e5e25a9144f73e38a6fb852a87e36d80f575a6f84359144e6e9499ba9208912de55"
"a1f7514cd8cfa166ae48e64"
)
)
!= -1
)
def test_sign_non_observer(self, keyring_with_one_key):
"""
Test the `flax keys sign` command with a non-observer key.
"""
message: str = "hello world"
hd_path: str = "m/12381n/8444n/0n/1n"
runner = CliRunner()
result: Result = runner.invoke(
sign_cmd, ["--message", message, "--fingerprint", str(TEST_FINGERPRINT), "--hd_path", hd_path]
)
assert result.exit_code == 0
assert (
result.output.find(
(
"Public key: b5e383b8192dacff662455bdb3bbfc433f678f0d7ff7f118149e0d2ad39aa6d59ac4cb3662acf8"
"e8307e66069d3a13cc"
)
)
) != -1
assert (
result.output.find(
(
"Signature: b5b3bc1417f67498748018a7ad2c95acfc5ae2dcd0d9dd0f3abfc7e3f047f2e6cf6c3e775b6caff"
"a3e0baaadc2fe705a100cd4c961d6ff3c575c5c33683eb7b1e2dbbcaf37318227ae40ef8ccf57879a7818fad8f"
"dc573d55c908be2611b8077"
)
)
) != -1
def test_sign_mnemonic_seed_file(self, empty_keyring, mnemonic_seed_file):
"""
Test signing a message using a key imported from a mnemonic seed file.
"""
message: str = "hello world"
hd_path: str = "m/12381/8444/0/1"
runner = CliRunner()
result: Result = runner.invoke(
sign_cmd,
[
"--message",
message,
"--hd_path",
hd_path,
"--mnemonic-seed-filename",
mnemonic_seed_file,
],
)
assert result.exit_code == 0
assert (
result.output.find(
(
"Public key: "
"92f15caed8a5495faa7ec25a8af3f223438ef73c974b0aa81e788057b1154870f149739b2c2d0e736234baf9386f7f83"
)
)
!= -1
)
assert (
result.output.find(
(
"Signature: a82e7d1b87d8c25a6ccac603194011d73f71fc76c17c1ce4ee53484f81874f116b1cb9dd991bcf"
"9aa41c10beaab54a830fc6f7e5e25a9144f73e38a6fb852a87e36d80f575a6f84359144e6e9499ba9208912de"
"55a1f7514cd8cfa166ae48e64"
)
)
!= -1
)
def test_verify(self):
"""
Test the `flax keys verify` command.
"""
message: str = "hello world"
signature: str = (
"a82e7d1b87d8c25a6ccac603194011d73f71fc76c17c1ce4ee53484f81874f116b1cb9dd991bcf9aa41c10beaab54a83"
"0fc6f7e5e25a9144f73e38a6fb852a87e36d80f575a6f84359144e6e9499ba9208912de55a1f7514cd8cfa166ae48e64"
)
public_key: str = (
"92f15caed8a5495faa7ec25a8af3f223438ef73c974b0aa81e788057b1154870f149739b2c2d0e736234baf9386f7f83"
)
runner = CliRunner()
result: Result = runner.invoke(
verify_cmd, ["--message", message, "--public_key", public_key, "--signature", signature]
)
assert result.exit_code == 0
assert result.output.find("True") == 0
def test_derive_search(self, tmp_path, keyring_with_one_key):
"""
Test the `flax keys derive search` command, searching a public and private key
"""
keychain = keyring_with_one_key
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"derive",
"--fingerprint",
str(TEST_FINGERPRINT),
"search",
"--limit",
"10",
"--search-type",
"all",
"a4601f992f24047097a30854ef656382911575694439108723698972941e402d737c13df76fdf43597f7b3c2fa9ed27a",
"028e33fa3f8caa3102c028f3bff6b6680e528d9a0c543c479ef0b0339060ef36",
],
)
assert result.exit_code == 0
assert (
result.output.find(
(
"Found public key: a4601f992f24047097a30854ef656382911575694439108723698"
"972941e402d737c13df76fdf43597f7b3c2fa9ed27a (HD path: m/12381/8444/2/9)"
)
)
!= -1
)
assert (
result.output.find(
(
"Found private key: "
"028e33fa3f8caa3102c028f3bff6b6680e528d9a0c543c479ef0b0339060ef36 (HD path: m/12381/8444/2/9)"
)
)
!= -1
)
def test_derive_search_wallet_address(self, tmp_path, keyring_with_one_key):
"""
Test the `flax keys derive search` command, searching for a wallet address
"""
keychain = keyring_with_one_key
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"derive",
"--fingerprint",
str(TEST_FINGERPRINT),
"search",
"--limit",
"40",
"--search-type",
"address",
"xfx1mnr0ygu7lvmk3nfgzmncfk39fwu0dv933yrcv97nd6pmrt7fzmhs8taffd",
],
)
assert result.exit_code == 0
assert (
result.output.find(
(
"Found wallet address: "
"xfx1mnr0ygu7lvmk3nfgzmncfk39fwu0dv933yrcv97nd6pmrt7fzmhs8taffd (HD path: m/12381/8444/2/30)"
)
)
!= -1
)
def test_derive_search_wallet_testnet_address(self, tmp_path, keyring_with_one_key):
"""
Test the `flax keys derive search` command, searching for a testnet wallet address
"""
keychain = keyring_with_one_key
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"derive",
"--fingerprint",
str(TEST_FINGERPRINT),
"search",
"--limit",
"40",
"--search-type",
"address",
"txfx1mnr0ygu7lvmk3nfgzmncfk39fwu0dv933yrcv97nd6pmrt7fzmhs2v6lg7",
"--prefix",
"txfx",
],
)
assert result.exit_code == 0
assert (
result.output.find(
(
"Found wallet address: "
"txfx1mnr0ygu7lvmk3nfgzmncfk39fwu0dv933yrcv97nd6pmrt7fzmhs2v6lg7 (HD path: m/12381/8444/2/30)"
)
)
!= -1
)
def test_derive_search_failure(self, tmp_path, keyring_with_one_key):
"""
Test the `flax keys derive search` command with a failing search.
"""
keychain = keyring_with_one_key
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"derive",
"--fingerprint",
str(TEST_FINGERPRINT),
"search",
"--limit",
"10",
"--search-type",
"all",
"something_that_doesnt_exist",
],
)
assert result.exit_code != 0
def test_derive_search_hd_path(self, tmp_path, empty_keyring, mnemonic_seed_file):
"""
Test the `flax keys derive search` command, searching under a provided HD path.
"""
keychain = empty_keyring
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 0
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
"--no-force-legacy-keyring-migration",
"--root-path",
os.fspath(tmp_path),
"--keys-root-path",
os.fspath(keys_root_path),
"keys",
"derive",
"--mnemonic-seed-filename",
os.fspath(mnemonic_seed_file),
"search",
"--limit",
"50",
"--search-type",
"all",
"--derive-from-hd-path",
"m/12381n/8444n/2/",
"80dc3a2ea450eb09e24debe22e1b5934911ba530792ef0be361badebb168780bd328ff8d4655e5dd573d5bef4a340344",
],
)
assert result.exit_code == 0
assert (
result.output.find(
(
"Found public key: 80dc3a2ea450eb09e24debe22e1b5934911ba530792ef0be361bad"
"ebb168780bd328ff8d4655e5dd573d5bef4a340344 (HD path: m/12381n/8444n/2/35)"
)
)
!= -1
)
def test_derive_wallet_address(self, tmp_path, keyring_with_one_key):
"""
Test the `flax keys derive wallet-address` command, generating a couple of wallet addresses.
"""
keychain = keyring_with_one_key
keys_root_path = keychain.keyring_wrapper.keys_root_path
runner = CliRunner()
init_result: Result = runner.invoke(
cli, ["--root-path", os.fspath(tmp_path), "--keys-root-path", os.fspath(keys_root_path), "init"]
)
assert init_result.exit_code == 0
assert len(keychain.get_all_private_keys()) == 1
runner = CliRunner()
result: Result = runner.invoke(
cli,
[
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/cmds/test_beta.py | tests/core/cmds/test_beta.py | from __future__ import annotations
import zipfile
from pathlib import Path
from typing import Callable, Optional
import pytest
from click.testing import CliRunner, Result
from flax.cmds.beta_funcs import default_beta_root_path
from flax.cmds.flax import cli
from flax.util.beta_metrics import metrics_log_interval_default, metrics_log_interval_max, metrics_log_interval_min
from flax.util.config import lock_and_load_config, save_config
def configure(root_path: Path, *args: str) -> Result:
return CliRunner().invoke(
cli,
[
"--root-path",
str(root_path),
"beta",
"configure",
*args,
],
)
def configure_interactive(root_path: Path, user_input: Optional[str] = None) -> Result:
return CliRunner().invoke(
cli,
[
"--root-path",
str(root_path),
"beta",
"configure",
],
input=user_input,
)
def enable(root_path: Path, *args: str) -> Result:
return CliRunner().invoke(
cli,
[
"--root-path",
str(root_path),
"beta",
"enable",
"--force",
*args,
],
)
def enable_interactive(root_path: Path, user_input: Optional[str] = None) -> Result:
return CliRunner().invoke(
cli,
[
"--root-path",
str(root_path),
"beta",
"enable",
],
input=user_input,
)
def prepare_submission(root_path: Path, user_input: Optional[str] = None) -> Result:
return CliRunner().invoke(
cli,
[
"--root-path",
str(root_path),
"beta",
"prepare_submission",
],
input=user_input,
)
def generate_example_submission_data(beta_root_path: Path, versions: int, logs: int) -> None:
for version in range(versions):
version_path = beta_root_path / str(version)
version_path.mkdir()
flax_blockchain_logs = version_path / "flax-blockchain"
plotting_logs = version_path / "plotting"
flax_blockchain_logs.mkdir()
plotting_logs.mkdir()
for i in range(logs):
with open(flax_blockchain_logs / f"beta_{i}.log", "w"):
pass
with open(flax_blockchain_logs / f"beta_{i + 10}.gz", "w"):
pass
with open(plotting_logs / f"plot_{i}.log", "w"):
pass
def generate_beta_config(
root_path: Path, enabled: bool, beta_path: Path, interval: int = metrics_log_interval_default
) -> None:
with lock_and_load_config(root_path, "config.yaml") as config:
config["beta"] = {
"enabled": enabled,
"path": str(beta_path),
"metrics_log_interval": interval,
}
save_config(root_path, "config.yaml", config)
@pytest.mark.parametrize("interval_option", ["--interval", "-i"])
@pytest.mark.parametrize("path_option", ["--path", "-p"])
def test_configure(root_path_populated_with_config: Path, path_option: str, interval_option: str) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
beta_path.mkdir()
generate_beta_config(root_path, True, beta_path)
result = configure(root_path, path_option, str(beta_path), interval_option, str(metrics_log_interval_max))
assert result.exit_code == 0
with lock_and_load_config(root_path, "config.yaml") as config:
assert config["beta"] == {
"enabled": True,
"path": str(beta_path),
"metrics_log_interval": metrics_log_interval_max,
}
def test_configure_no_beta_config(root_path_populated_with_config: Path) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
beta_path.mkdir()
with lock_and_load_config(root_path, "config.yaml") as config:
assert "beta" not in config
result = configure(root_path, "--path", str(beta_path))
assert result.exit_code == 1
assert "beta test mode is not enabled, enable it first with `flax beta enable`" in result.output
@pytest.mark.parametrize("accept_existing_interval", [True, False])
@pytest.mark.parametrize("accept_existing_path", [True, False])
def test_beta_configure_interactive(
root_path_populated_with_config: Path, accept_existing_path: bool, accept_existing_interval: bool
) -> None:
assert metrics_log_interval_default != metrics_log_interval_min
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
generate_beta_config(root_path, True, root_path_populated_with_config)
path_input = "\n" if accept_existing_path else str(beta_path) + "\ny\n"
interval_input = "\n" if accept_existing_interval else str(metrics_log_interval_min) + "\n"
result = configure_interactive(root_path, f"{path_input}{interval_input}")
assert result.exit_code == 0
assert "beta config updated" in result.output
metrics_log_interval = metrics_log_interval_default if accept_existing_interval else metrics_log_interval_min
with lock_and_load_config(root_path, "config.yaml") as config:
assert config["beta"] == {
"enabled": True,
"path": str(root_path_populated_with_config if accept_existing_path else beta_path),
"metrics_log_interval": metrics_log_interval,
}
@pytest.mark.parametrize("option", ["--path", "-p"])
def test_beta_enable(root_path_populated_with_config: Path, option: str) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
beta_path.mkdir()
with lock_and_load_config(root_path, "config.yaml") as config:
assert "beta" not in config
result = enable(root_path, option, str(beta_path))
assert result.exit_code == 0
assert f"beta test mode enabled with path {str(beta_path)!r}" in result.output
with lock_and_load_config(root_path, "config.yaml") as config:
assert config["beta"] == {
"enabled": True,
"path": str(beta_path),
"metrics_log_interval": metrics_log_interval_default,
}
@pytest.mark.parametrize("enabled", [True, False])
def test_beta_enable_preconfigured(root_path_populated_with_config: Path, enabled: bool) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
beta_path.mkdir()
generate_beta_config(root_path, enabled, beta_path)
result = enable_interactive(root_path, "y\n")
if enabled:
assert result.exit_code == 1
assert "beta test mode is already enabled" in result.output
else:
assert result.exit_code == 0
assert f"beta test mode enabled with path {str(beta_path)!r}" in result.output
with lock_and_load_config(root_path, "config.yaml") as config:
assert config["beta"] == {
"enabled": True,
"path": str(beta_path),
"metrics_log_interval": metrics_log_interval_default,
}
@pytest.mark.parametrize("accept_default_path", [True, False])
def test_beta_enable_interactive(root_path_populated_with_config: Path, accept_default_path: bool) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
with lock_and_load_config(root_path, "config.yaml") as config:
assert "beta" not in config
result = enable_interactive(root_path, f"y\n{'' if accept_default_path else str(beta_path)}\ny\n")
assert result.exit_code == 0
assert (
f"beta test mode enabled with path {str(default_beta_root_path() if accept_default_path else beta_path)!r}"
in result.output
)
with lock_and_load_config(root_path, "config.yaml") as config:
assert config["beta"] == {
"enabled": True,
"path": str(default_beta_root_path() if accept_default_path else beta_path),
"metrics_log_interval": metrics_log_interval_default,
}
def test_beta_enable_interactive_decline_warning(root_path_populated_with_config: Path) -> None:
root_path = root_path_populated_with_config
with lock_and_load_config(root_path, "config.yaml") as config:
assert "beta" not in config
result = enable_interactive(root_path, "n\n")
assert result.exit_code == 1
assert result.output[-9:-1] == "Aborted!"
@pytest.mark.parametrize("write_test", [True, False])
@pytest.mark.parametrize("command", [configure, enable])
def test_beta_invalid_directories(
root_path_populated_with_config: Path, write_test: bool, command: Callable[[Path, str, str], Result]
) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
if write_test:
(beta_path / ".write_test").mkdir(parents=True) # `.write_test` is used in validate_directory_writable
if command == configure:
generate_beta_config(root_path, True, root_path_populated_with_config)
result = command(root_path, "--path", str(beta_path))
assert result.exit_code == 1
if write_test:
assert f"Directory not writable: {str(beta_path)!r}" in result.output
else:
assert f"Directory doesn't exist: {str(beta_path)!r}" in result.output
@pytest.mark.parametrize("option", ["-i", "--interval"])
@pytest.mark.parametrize(
"interval, valid",
[
(-1, False),
(0, False),
(metrics_log_interval_min - 1, False),
(metrics_log_interval_min, True),
(metrics_log_interval_min + 1, True),
(metrics_log_interval_max + 1, False),
(metrics_log_interval_max - 1, True),
(metrics_log_interval_max, True),
],
)
def test_beta_configure_interval(
root_path_populated_with_config: Path, interval: int, valid: bool, option: str
) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
beta_path.mkdir()
generate_beta_config(root_path, True, root_path_populated_with_config)
result = configure(root_path, "--path", str(beta_path), option, str(interval))
assert result.exit_code == 0 if valid else 1
if not valid:
assert f"Must be in the range of {metrics_log_interval_min}s to {metrics_log_interval_max}s." in result.output
@pytest.mark.parametrize("enabled", [True, False])
def test_beta_disable(root_path_populated_with_config: Path, enabled: bool) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
generate_beta_config(root_path, enabled, beta_path)
result = CliRunner().invoke(
cli,
[
"--root-path",
str(root_path),
"beta",
"disable",
],
)
if enabled:
assert result.exit_code == 0
assert "beta test mode disabled" in result.output
else:
assert result.exit_code == 1
assert "beta test mode is not enabled" in result.output
with lock_and_load_config(root_path, "config.yaml") as config:
assert config["beta"] == {
"enabled": False,
"path": str(beta_path),
"metrics_log_interval": metrics_log_interval_default,
}
@pytest.mark.parametrize(
"versions, logs, choice, exit_code, output",
[
(0, 0, 1, 1, "No beta logs found"),
(1, 0, 1, 1, "No logs files found"),
(2, 10, 3, 1, "Invalid choice: 3"),
(2, 10, 0, 1, "Invalid choice: 0"),
(2, 10, -1, 1, "Invalid choice: -1"),
(4, 3, 2, 0, "Done. You can find the prepared submission data"),
],
)
def test_prepare_submission(
root_path_populated_with_config: Path, versions: int, logs: int, choice: int, exit_code: int, output: str
) -> None:
root_path = root_path_populated_with_config
beta_path = root_path / "beta"
beta_path.mkdir()
generate_beta_config(root_path, True, beta_path)
generate_example_submission_data(beta_path, versions, logs)
result = prepare_submission(root_path, f"{choice}\n")
assert result.exit_code == exit_code
assert output in result.output
if exit_code == 0:
submission_file = list(beta_path.rglob("*.zip"))[0]
assert submission_file.name.startswith(f"submission_{choice - 1}")
with zipfile.ZipFile(submission_file) as zip_file:
all_files = [Path(info.filename) for info in zip_file.filelist]
for version in range(versions):
flax_blockchain_logs = Path("flax-blockchain")
plotting_logs = Path("plotting")
for i in range(logs):
assert flax_blockchain_logs / f"beta_{i}.log" in all_files
assert flax_blockchain_logs / f"beta_{i + 10}.gz" in all_files
assert plotting_logs / f"plot_{i}.log" in all_files
@pytest.mark.parametrize(
"enabled, path, interval",
[
(True, Path("path_1"), metrics_log_interval_min),
(False, Path("path_2"), metrics_log_interval_max),
],
)
def test_beta_status(root_path_populated_with_config: Path, enabled: bool, path: Path, interval: int) -> None:
root_path = root_path_populated_with_config
generate_beta_config(root_path, enabled, path)
result = CliRunner().invoke(
cli,
[
"--root-path",
str(root_path),
"beta",
"status",
],
)
assert result.exit_code == 0
assert f"enabled: {enabled}" in result.output
assert f"path: {str(path)}" in result.output
assert f"metrics log interval: {str(metrics_log_interval_default)}" in result.output
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/cmds/config.py | tests/core/cmds/config.py | from __future__ import annotations
parallel = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/cmds/__init__.py | tests/core/cmds/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/custom_types/test_coin.py | tests/core/custom_types/test_coin.py | from typing import List
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint64
from flax.util.hash import std_hash
import pytest
def coin_serialize(amount: uint64, clvm_serialize: bytes, full_serialize: bytes):
c = Coin(bytes32(b"a" * 32), bytes32(b"b" * 32), amount)
expected_hash = (b"a" * 32) + (b"b" * 32) + clvm_serialize
expected_serialization = (b"a" * 32) + (b"b" * 32) + full_serialize
assert c.name() == std_hash(expected_hash)
assert c.to_bytes() == expected_serialization
assert bytes(c) == expected_serialization
# make sure the serialization round-trips
c2 = Coin.from_bytes(expected_serialization)
assert c2 == c
def test_serialization():
coin_serialize(uint64(0xFFFF), bytes([0, 0xFF, 0xFF]), bytes([0, 0, 0, 0, 0, 0, 0xFF, 0xFF]))
coin_serialize(uint64(1337000000), bytes([0x4F, 0xB1, 0x00, 0x40]), bytes([0, 0, 0, 0, 0x4F, 0xB1, 0x00, 0x40]))
# if the amount is 0, the amount is omitted in the "short" format,
# that's hashed
coin_serialize(uint64(0), b"", bytes([0, 0, 0, 0, 0, 0, 0, 0]))
# when amount is > INT64_MAX, the "short" serialization format is 1 byte
# longer, since it needs a leading zero to make it positive
coin_serialize(
uint64(0xFFFFFFFFFFFFFFFF),
bytes([0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
bytes([0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
)
@pytest.mark.parametrize(
"amount, clvm",
[
(0, []),
(1, [1]),
(0xFF, [0, 0xFF]),
(0xFFFF, [0, 0xFF, 0xFF]),
(0xFFFFFF, [0, 0xFF, 0xFF, 0xFF]),
(0xFFFFFFFF, [0, 0xFF, 0xFF, 0xFF, 0xFF]),
(0xFFFFFFFFFF, [0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
(0xFFFFFFFFFFFF, [0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
(0xFFFFFFFFFFFFFF, [0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
(0xFFFFFFFFFFFFFFFF, [0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
(0x7F, [0x7F]),
(0x7FFF, [0x7F, 0xFF]),
(0x7FFFFF, [0x7F, 0xFF, 0xFF]),
(0x7FFFFFFF, [0x7F, 0xFF, 0xFF, 0xFF]),
(0x7FFFFFFFFF, [0x7F, 0xFF, 0xFF, 0xFF, 0xFF]),
(0x7FFFFFFFFFFF, [0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
(0x7FFFFFFFFFFFFF, [0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
(0x7FFFFFFFFFFFFFFF, [0x7F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]),
],
)
def test_name(amount: int, clvm: List[int]) -> None:
H1 = bytes32(b"a" * 32)
H2 = bytes32(b"b" * 32)
assert Coin(H1, H2, uint64(amount)).name() == std_hash(H1 + H2 + bytes(clvm))
def test_construction() -> None:
H1 = b"a" * 32
H2 = b"b" * 32
with pytest.raises(OverflowError, match="int too big to convert"):
# overflow
Coin(H1, H2, 0x10000000000000000)
with pytest.raises(OverflowError, match="can't convert negative int to unsigned"):
# overflow
Coin(H1, H2, -1)
H1_short = b"a" * 31
H1_long = b"a" * 33
with pytest.raises(ValueError):
# short hash
Coin(H1_short, H2, 1)
with pytest.raises(ValueError):
# long hash
Coin(H1_long, H2, 1)
with pytest.raises(ValueError):
# short hash
Coin(H2, H1_short, 1)
with pytest.raises(ValueError):
# long hash
Coin(H2, H1_long, 1)
c = Coin(H1, H2, 1000)
assert c.parent_coin_info == H1
assert c.puzzle_hash == H2
assert c.amount == 1000
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/custom_types/test_spend_bundle.py | tests/core/custom_types/test_spend_bundle.py | import json
import unittest
from blspy import G2Element
from flax.types.spend_bundle import SpendBundle
BLANK_SPEND_BUNDLE = SpendBundle(coin_spends=[], aggregated_signature=G2Element())
NULL_SIGNATURE = "0xc" + "0" * 191
class TestStructStream(unittest.TestCase):
def test_from_json_legacy(self):
JSON = (
"""
{
"coin_solutions": [],
"aggregated_signature": "%s"
}
"""
% NULL_SIGNATURE
)
spend_bundle = SpendBundle.from_json_dict(json.loads(JSON))
json_1 = json.loads(JSON)
json_2 = spend_bundle.to_json_dict(include_legacy_keys=True, exclude_modern_keys=True)
assert json_1 == json_2
def test_from_json_new(self):
JSON = (
"""
{
"coin_spends": [],
"aggregated_signature": "%s"
}
"""
% NULL_SIGNATURE
)
spend_bundle = SpendBundle.from_json_dict(json.loads(JSON))
json_1 = json.loads(JSON)
json_2 = spend_bundle.to_json_dict(include_legacy_keys=False, exclude_modern_keys=False)
assert json_1 == json_2
def test_round_trip(self):
spend_bundle = BLANK_SPEND_BUNDLE
round_trip(spend_bundle, include_legacy_keys=True, exclude_modern_keys=True)
round_trip(spend_bundle, include_legacy_keys=True, exclude_modern_keys=False)
round_trip(spend_bundle, include_legacy_keys=False, exclude_modern_keys=False)
def test_dont_use_both_legacy_and_modern(self):
json_1 = BLANK_SPEND_BUNDLE.to_json_dict(include_legacy_keys=True, exclude_modern_keys=False)
with self.assertRaises(ValueError):
SpendBundle.from_json_dict(json_1)
def round_trip(spend_bundle: SpendBundle, **kwargs):
json_dict = spend_bundle.to_json_dict(**kwargs)
if kwargs.get("include_legacy_keys", True):
assert "coin_solutions" in json_dict
else:
assert "coin_solutions" not in json_dict
if kwargs.get("exclude_modern_keys", True):
assert "coin_spends" not in json_dict
else:
assert "coin_spends" in json_dict
if "coin_spends" in json_dict and "coin_solutions" in json_dict:
del json_dict["coin_solutions"]
sb = SpendBundle.from_json_dict(json_dict)
json_dict_2 = sb.to_json_dict()
sb = SpendBundle.from_json_dict(json_dict_2)
json_dict_3 = sb.to_json_dict()
assert json_dict_2 == json_dict_3
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/custom_types/test_proof_of_space.py | tests/core/custom_types/test_proof_of_space.py | from __future__ import annotations
from secrets import token_bytes
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.types.blockchain_format.proof_of_space import ProofOfSpace # pylint: disable=E0401
class TestProofOfSpace:
def test_can_create_proof(self):
"""
Tests that the change of getting a correct proof is exactly 1/target_filter.
"""
num_trials = 100000
success_count = 0
target_filter = 2**DEFAULT_CONSTANTS.NUMBER_ZERO_BITS_PLOT_FILTER
for _ in range(num_trials):
challenge_hash = token_bytes(32)
plot_id = token_bytes(32)
sp_output = token_bytes(32)
if ProofOfSpace.passes_plot_filter(DEFAULT_CONSTANTS, plot_id, challenge_hash, sp_output):
success_count += 1
assert abs((success_count * target_filter / num_trials) - 1) < 0.35
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/custom_types/config.py | tests/core/custom_types/config.py | from __future__ import annotations
parallel = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/custom_types/__init__.py | tests/core/custom_types/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/mempool/test_mempool_fee_estimator.py | tests/core/mempool/test_mempool_fee_estimator.py | from __future__ import annotations
import logging
from random import Random
import pytest
from flax.consensus.cost_calculator import NPCResult
from flax.full_node.bitcoin_fee_estimator import BitcoinFeeEstimator
from flax.full_node.coin_store import CoinStore
from flax.full_node.fee_estimate_store import FeeStore
from flax.full_node.fee_estimator import SmartFeeEstimator
from flax.full_node.fee_tracker import FeeTracker
from flax.full_node.mempool_manager import MempoolManager
from flax.simulator.wallet_tools import WalletTool
from flax.types.blockchain_format.coin import Coin
from flax.types.mempool_item import MempoolItem
from flax.util.ints import uint32, uint64
from tests.core.consensus.test_pot_iterations import test_constants
from tests.util.db_connection import DBConnection
@pytest.mark.asyncio
async def test_basics() -> None:
log = logging.getLogger(__name__)
fee_store = FeeStore()
fee_tracker = FeeTracker(log, fee_store)
wallet_tool = WalletTool(test_constants)
ph = wallet_tool.get_new_puzzlehash()
coin = Coin(ph, ph, uint64(10000))
spend_bundle = wallet_tool.generate_signed_transaction(uint64(10000), ph, coin)
cost = uint64(5000000)
for i in range(300, 700):
i = uint32(i)
items = []
for _ in range(2, 100):
fee = uint64(10000000)
mempool_item = MempoolItem(
spend_bundle,
fee,
NPCResult(None, None, cost),
cost,
spend_bundle.name(),
[],
[],
uint32(i - 1),
)
items.append(mempool_item)
fee1 = uint64(200000)
mempool_item1 = MempoolItem(
spend_bundle,
fee1,
NPCResult(None, None, cost),
cost,
spend_bundle.name(),
[],
[],
uint32(i - 40),
)
items.append(mempool_item1)
fee2 = uint64(0)
mempool_item2 = MempoolItem(
spend_bundle,
fee2,
NPCResult(None, None, cost),
cost,
spend_bundle.name(),
[],
[],
uint32(i - 270),
)
items.append(mempool_item2)
fee_tracker.process_block(i, items)
short, med, long = fee_tracker.estimate_fees()
assert short.median != -1
assert med.median != -1
assert long.median != -1
@pytest.mark.asyncio
async def test_fee_increase() -> None:
async with DBConnection(db_version=2) as db_wrapper:
coin_store = await CoinStore.create(db_wrapper)
mempool_manager = MempoolManager(coin_store, test_constants)
assert test_constants.MAX_BLOCK_COST_CLVM == mempool_manager.constants.MAX_BLOCK_COST_CLVM
btc_fee_estimator: BitcoinFeeEstimator = mempool_manager.mempool.fee_estimator # type: ignore
fee_tracker = btc_fee_estimator.get_tracker()
estimator = SmartFeeEstimator(fee_tracker, uint64(test_constants.MAX_BLOCK_COST_CLVM))
wallet_tool = WalletTool(test_constants)
ph = wallet_tool.get_new_puzzlehash()
coin = Coin(ph, ph, uint64(10000))
spend_bundle = wallet_tool.generate_signed_transaction(uint64(10000), ph, coin)
random = Random(x=1)
for i in range(300, 700):
i = uint32(i)
items = []
for _ in range(0, 20):
fee = uint64(0)
included_height = uint32(random.randint(i - 60, i - 1))
cost = uint64(5000000)
mempool_item = MempoolItem(
spend_bundle,
fee,
NPCResult(None, None, cost),
cost,
spend_bundle.name(),
[],
[],
included_height,
)
items.append(mempool_item)
fee_tracker.process_block(i, items)
short, med, long = fee_tracker.estimate_fees()
mempool_info = mempool_manager.get_mempool_info()
result = estimator.get_estimates(mempool_info, ignore_mempool=True)
assert short.median == -1
assert med.median == -1
assert long.median == 0.0
assert result.error is None
short_estimate = result.estimates[0].estimated_fee_rate
med_estimate = result.estimates[1].estimated_fee_rate
long_estimate = result.estimates[2].estimated_fee_rate
assert short_estimate.mojos_per_clvm_cost == uint64(fee_tracker.buckets[3] / 1000)
assert med_estimate.mojos_per_clvm_cost == uint64(fee_tracker.buckets[3] / 1000)
assert long_estimate.mojos_per_clvm_cost == uint64(0)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/core/mempool/__init__.py | tests/core/mempool/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/farmer_harvester/config.py | tests/farmer_harvester/config.py | from __future__ import annotations
checkout_blocks_and_plots = True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/farmer_harvester/__init__.py | tests/farmer_harvester/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/tests/farmer_harvester/test_farmer_harvester.py | tests/farmer_harvester/test_farmer_harvester.py | from __future__ import annotations
import asyncio
import pytest
from flax.farmer.farmer import Farmer
from flax.simulator.time_out_assert import time_out_assert
from flax.types.peer_info import PeerInfo
from flax.util.keychain import generate_mnemonic
def farmer_is_started(farmer):
return farmer.started
@pytest.mark.asyncio
async def test_start_with_empty_keychain(farmer_one_harvester_not_started):
_, farmer_service, bt = farmer_one_harvester_not_started
farmer: Farmer = farmer_service._node
# First remove all keys from the keychain
bt.local_keychain.delete_all_keys()
# Make sure the farmer service is not initialized yet
assert not farmer.started
# Start it, wait 5 seconds and make sure it still isn't initialized (since the keychain is empty)
await farmer_service.start()
await asyncio.sleep(5)
assert not farmer.started
# Add a key to the keychain, this should lead to the start task passing `setup_keys` and set `Farmer.initialized`
bt.local_keychain.add_private_key(generate_mnemonic())
await time_out_assert(5, farmer_is_started, True, farmer)
# Stop it and wait for `Farmer.initialized` to become reset
farmer_service.stop()
await farmer_service.wait_closed()
assert not farmer.started
@pytest.mark.asyncio
async def test_harvester_handshake(farmer_one_harvester_not_started):
harvesters, farmer_service, bt = farmer_one_harvester_not_started
harvester_service = harvesters[0]
harvester = harvester_service._node
farmer = farmer_service._node
def farmer_has_connections():
return len(farmer.server.get_connections()) > 0
def handshake_task_active():
return farmer.harvester_handshake_task is not None
async def handshake_done() -> bool:
await asyncio.sleep(1)
return harvester.plot_manager._refresh_thread is not None and len(harvester.plot_manager.farmer_public_keys) > 0
# First remove all keys from the keychain
bt.local_keychain.delete_all_keys()
# Handshake task and plot manager thread should not be running yet
assert farmer.harvester_handshake_task is None
assert harvester.plot_manager._refresh_thread is None
# Start both services and wait a bit
await farmer_service.start()
await harvester_service.start()
harvester_service.add_peer(PeerInfo(str(farmer_service.self_hostname), farmer_service._server.get_port()))
# Handshake task should be started but the handshake should not be done
await time_out_assert(5, handshake_task_active, True)
assert not await handshake_done()
# Stop the harvester service and wait for the farmer to lose the connection
harvester_service.stop()
await harvester_service.wait_closed()
await time_out_assert(10, farmer_has_connections, False)
assert not await handshake_done()
# Handshake task should be stopped again
await time_out_assert(5, handshake_task_active, False)
await asyncio.sleep(1)
assert harvester.plot_manager._refresh_thread is None
assert len(harvester.plot_manager.farmer_public_keys) == 0
# Re-start the harvester and make sure the handshake task gets started but the handshake still doesn't go through
await harvester_service.start()
harvester_service.add_peer(PeerInfo(str(farmer_service.self_hostname), farmer_service._server.get_port()))
await time_out_assert(5, handshake_task_active, True)
assert not await handshake_done()
# Stop the farmer and make sure the handshake_task doesn't block the shutdown
farmer_service.stop()
await farmer_service.wait_closed()
await time_out_assert(5, handshake_task_active, False)
# Re-start the farmer and make sure the handshake task succeeds if a key get added to the keychain
await farmer_service.start()
await time_out_assert(5, handshake_task_active, True)
assert not await handshake_done()
bt.local_keychain.add_private_key(generate_mnemonic())
await time_out_assert(5, farmer_is_started, True, farmer)
await time_out_assert(5, handshake_task_active, False)
await time_out_assert(5, handshake_done, True)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/installer-version.py | build_scripts/installer-version.py | from __future__ import annotations
from setuptools_scm import get_version
# example: 1.0b5.dev225
def main():
scm_full_version = get_version(root="..", relative_to=__file__)
# scm_full_version = "1.0.5.dev22"
left_full_version = scm_full_version.split("+")
version = left_full_version[0].split(".")
scm_major_version = version[0]
scm_minor_version = version[1]
if len(version) == 3: # If the length of the version array is more than 2
patch_release_number = version[2]
smc_patch_version = patch_release_number
dev_release_number = ""
elif len(version) == 4:
smc_patch_version = version[2]
dev_release_number = "-" + version[3]
else:
smc_patch_version = ""
dev_release_number = ""
major_release_number = scm_major_version
minor_release_number = scm_minor_version
# If this is a beta dev release, get which beta it is
if "0b" in scm_minor_version:
orignial_minor_ver_list = scm_minor_version.split("0b")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta
minor_release_number = scm_major_version
patch_release_number = orignial_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
elif "0rc" in version[1]:
original_minor_ver_list = scm_minor_version.split("0rc")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate
minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
elif len(version) == 2:
patch_release_number = "0"
elif len(version) == 4: # for 1.0.5.dev2
patch_release_number = smc_patch_version
else:
major_release_number = scm_major_version
minor_release_number = scm_minor_version
patch_release_number = smc_patch_version
dev_release_number = ""
install_release_number = major_release_number + "." + minor_release_number
if len(patch_release_number) > 0:
install_release_number += "." + patch_release_number
if len(dev_release_number) > 0:
install_release_number += dev_release_number
print(str(install_release_number))
if __name__ == "__main__":
main()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/check_dependency_artifacts.py | build_scripts/check_dependency_artifacts.py | from __future__ import annotations
import os
import pathlib
import platform
import subprocess
import sys
import tempfile
excepted_packages = {
"keyrings.cryptfile", # pure python
"dnslib", # pure python
}
def excepted(path: pathlib.Path) -> bool:
# TODO: This should be implemented with a real file name parser though i'm
# uncertain at the moment what package that would be.
name, dash, rest = path.name.partition("-")
return name in excepted_packages
def main() -> int:
with tempfile.TemporaryDirectory() as directory_string:
print(f"Working in: {directory_string}")
print()
directory_path = pathlib.Path(directory_string)
extras = ["upnp"]
package_path_string = os.fspath(pathlib.Path(__file__).parent.parent)
if len(extras) > 0:
package_and_extras = f"{package_path_string}[{','.join(extras)}]"
else:
package_and_extras = package_path_string
print("Downloading packages for Python version:")
lines = [
*sys.version.splitlines(),
"",
f"machine: {platform.machine()}",
f"platform: {platform.platform()}",
]
for line in lines:
print(f" {line}")
print(flush=True)
subprocess.run(
[
sys.executable,
"-m",
"pip",
"download",
"--dest",
os.fspath(directory_path),
"--extra-index",
"https://pypi.chia.net/simple/",
package_and_extras,
],
check=True,
)
failed_artifacts = []
for artifact in directory_path.iterdir():
if artifact.suffix == ".whl":
# everything being a wheel is the target
continue
if excepted(artifact):
continue
failed_artifacts.append(artifact)
if len(failed_artifacts) > 0:
print("The following unacceptable artifacts were downloaded by pip:")
for artifact in failed_artifacts:
print(f" {artifact.name}")
return 1
return 0
sys.exit(main())
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/__init__.py | build_scripts/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/npm_macos/__init__.py | build_scripts/npm_macos/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/assets/__init__.py | build_scripts/assets/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/assets/deb/__init__.py | build_scripts/assets/deb/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/assets/rpm/__init__.py | build_scripts/assets/rpm/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/assets/dmg/__init__.py | build_scripts/assets/dmg/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/npm_windows/__init__.py | build_scripts/npm_windows/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/npm_linux/__init__.py | build_scripts/npm_linux/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/build_scripts/npm_global/__init__.py | build_scripts/npm_global/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
bisno/WDRO | https://github.com/bisno/WDRO/blob/9e75dffe0251bc8e368997c392d8c77645a47630/10-nonlinearized-dual.py | 10-nonlinearized-dual.py | # -*- coding: utf-8 -*-
# Author xuli.shen
import tensorflow as tf
import tensorflow.contrib.slim as slim
import glob
import numpy as np
import pandas as pd
from PIL import Image
import random
import time
from data_process import *
from model import *
import sys
from cvxopt import matrix
from cvxopt.solvers import qp
from cvxopt import solvers
import argparse
parser = argparse.ArgumentParser(description='sIR and subgroup batch_size')
parser.add_argument('--num_of_group', type=int, default=4)
parser.add_argument('--num_minor', type=int, default=2)
parser.add_argument('--train_subgroup_batch', type=int, default=100)
args = parser.parse_args()
num_of_group = args.num_of_group
num_correct = args.num_minor
train_subgroup_batch = args.train_subgroup_batch
solvers.options['show_progress'] = False
date = time.strftime("%Y%m%d%H%M%S", time.localtime())
training_epochs = 2000
test_batch_num = 100
display_step = 1
m = 2 * size
channel = 3
# tf Graph Input
x = tf.placeholder(tf.float32, [None, size*3], name='x')
y = tf.placeholder(tf.float32, [None, 2], name='y')
filter_size1 = 3
num_filters1 = 16
filter_size2 = 3
num_filters2 = 36
final_out_n = 3000
layer_fc1, w1 = new_fc_layer(name="w1",
input=x,
num_inputs=9000,
num_outputs=3000,
use_relu=True)
layer_fc2, w2 = new_fc_layer(name="w2",
input=layer_fc1,
num_inputs=3000,
num_outputs=final_out_n,
use_relu=True)
W = tf.get_variable("w", shape=[final_out_n, 2],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.01, shape=[2]))
pred = tf.nn.softmax(tf.matmul(layer_fc2, W) + b) # Softmax
tf.add_to_collection('pred', pred)
def cost_n_to_list(n, pred):
list_ = []
for _ in range(n):
list_.append(tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred)))
return list_
def max_tensor(cost_combine):
for i in range(len(cost_combine)-1):
if i == 0:
cost = cost_combine[i]
else:
cost = tf.maximum(cost, cost_combine[i+1])
return cost
def grad_combine(cost_combine, var):
gradss = []
for i in range(len(cost_combine)):
gradss.append(tf.gradients(xs=[var], ys=cost_combine[i]))
return gradss
cost_combine = cost_n_to_list(num_of_group, pred)
cost_final = max_tensor(cost_combine)
grad_combine_ = grad_combine(cost_combine, var=W)
grad_combine_b = grad_combine(cost_combine, var=b)
def get_Gh(grad_list, cost_list, m):
N = len(cost_list)
G = np.zeros([N, m])
b = []
for i in range(N):
g = grad_list[i][0].flatten()
G[i][:] = g
b.append(float(cost_list[i])) # add cost
b = np.array(b)
GG = matrix(G)
hh = matrix(b)
return GG, hh
def cal_grad(grad_list, cost_list, m, size_in, size_out):
N = len(cost_list)
GG, hh = get_Gh(grad_list, cost_list, m)
P = matrix(GG)*matrix(GG).T
q = -matrix(hh)
G = matrix(-np.eye(N))
h = matrix(np.zeros(N))
A = matrix(np.ones([1, N]))
b = matrix(np.ones([1]))
res = qp(P, q, G=G, h=h, A=A, b=b)
d = -np.array(GG).T.dot(np.array(res['x'])
)[:, 0].reshape(size_in, size_out)
return d
cost21 = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred))
grad_w1 = tf.gradients(xs=[w1], ys=cost21)
grad_w2 = tf.gradients(xs=[w2], ys=cost21)
def grad_cost_list(sess, cost_combine, grad_combine_, grad_combine_b, num_correct=num_correct):
grad_list = []
grad_list_b = []
cost_list = []
n = len(cost_combine)
train_x = []
train_y = []
# print(n)
for i in range(n):
if i < n - num_correct:
batch_xs, batch_ys, _ = create_trainbatch(
train_subgroup_batch, channel)
if channel != 0:
batch_xs = batch_xs.reshape(-1, 3*size)
c, g_W, g_b = sess.run([cost_combine[i], grad_combine_[
i], grad_combine_b[i]], feed_dict={x: batch_xs, y: batch_ys})
grad_list.append(g_W)
grad_list_b.append(g_b)
cost_list.append(c)
train_x.append(batch_xs)
train_y.append(batch_ys)
else:
batch_xs, batch_ys, _ = create_trainbatch_all_correct(
train_subgroup_batch, channel)
if channel != 0:
batch_xs = batch_xs.reshape(-1, 3*size)
c, g_W, g_b = sess.run([cost_combine[i], grad_combine_[
i], grad_combine_b[i]], feed_dict={x: batch_xs, y: batch_ys})
grad_list.append(g_W)
grad_list_b.append(g_b)
cost_list.append(c)
train_x.append(batch_xs)
train_y.append(batch_ys)
return np.array(train_x).reshape(n*train_subgroup_batch, size*3), np.array(train_y).reshape(n*train_subgroup_batch, 2), grad_list, grad_list_b, cost_list
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_count = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))
cost = tf.reduce_mean(-tf.reduce_sum(y *
tf.log(tf.clip_by_value(pred, 1e-8, 1.0)), reduction_indices=1))
def print_accuracy(accuracy, accuracy_count, cost):
num = 200
batch_xx, batch_yy, _, d = create_testset(num, channel)
if channel != 0:
batch_xx = batch_xx.reshape(-1, size*3)
batch_xx_correct = batch_xx[:num]
batch_yy_correct = batch_yy[:num]
batch_xx_false = batch_xx[num:]
batch_yy_false = batch_yy[num:]
feed_dict_test_T = {x: batch_xx_correct,
y: batch_yy_correct}
feed_dict_test_F = {x: batch_xx_false,
y: batch_yy_false}
feed_dict_test = {x: batch_xx,
y: batch_yy}
TP = accuracy_count.eval(feed_dict=feed_dict_test_T)
FN = len(batch_xx_correct) - TP
TN = accuracy_count.eval(feed_dict=feed_dict_test_F)
FP = len(batch_xx_false) - TN
recall = TP/(TP + FN + 1e-8)
precision = TP/(TP + FP + 1e-8)
F1 = 2 * ((precision*recall)/(precision + recall + 1e-8))
print(TP, FN, TN, FP)
print(recall, precision, F1, '\n')
cost_f = cost.eval(feed_dict=feed_dict_test)
batch_x, batch_y, _ = create_trainbatch_(200, channel)
if channel != 0:
batch_x = batch_x.reshape(-1, size*3)
feed_dict = {x: batch_x,
y: batch_y}
accuracy_train = accuracy.eval(feed_dict=feed_dict)
return cost_f, F1, accuracy_train, d
acc_list = [0]
acc_train_list = []
costs_list = []
optimizer = tf.train.GradientDescentOptimizer(0.0001)
grads_and_vars_all = optimizer.compute_gradients(cost21)[:-2]
print(grads_and_vars_all, '\n')
training_op_all = optimizer.apply_gradients(grads_and_vars_all)
def exchange(grads_and_vars, grad_, t):
for i, (g, v) in enumerate([grads_and_vars[-1]]):
if g is not None:
grads_and_vars[-1] = (-grad_*t*100, v) # exchange gradients
def scale_t(max_grad):
n = 0
num = 0.01
if max_grad < num:
while max_grad < num:
max_grad = max_grad * 10
n += 1
return n
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
tf.global_variables_initializer().run()
t = 1.0
m_saver = tf.train.Saver()
print('label_cls : ', label_cls)
for epoch in range(training_epochs):
for i in range(1):
batch_xs21, batch_ys21, grad_list, grad_list_b, cost_list = grad_cost_list(
sess, cost_combine, grad_combine_, grad_combine_b)
grad_ = cal_grad(grad_list, cost_list, m=final_out_n *
2, size_in=final_out_n, size_out=2)
grad_b = cal_grad(grad_list_b, cost_list,
m=2, size_in=1, size_out=2)
grad_ = grad_.astype(np.float32)
grad_b = grad_b.astype(np.float32).reshape(2)
print(np.max(grad_), np.max(grad_b))
print(np.min(abs(grad_)), np.min(abs(grad_b)))
print('iter_{} curr(F1_list) : '.format(epoch), acc_list[-1])
print('iter_{} mean(F1_list) : '.format(
epoch), np.mean(acc_list[-10:-1]))
print('iter_{} max (F1_list) : '.format(epoch), max(acc_list))
training_op = optimizer.apply_gradients(
[((-grad_*t*10000).astype(np.float32), W)])
training_op_b = optimizer.apply_gradients(
[((-grad_b*t*100).astype(np.float32), b)])
sess.run(training_op, feed_dict={x: batch_xs21, y: batch_ys21})
sess.run(training_op_b, feed_dict={x: batch_xs21, y: batch_ys21})
sess.run(training_op_all, feed_dict={x: batch_xs21, y: batch_ys21})
if (epoch+1) % display_step == 0:
costs, acc, acc_train, _ = print_accuracy(
accuracy, accuracy_count, cost)
acc_list.append(acc)
costs_list.append(costs)
acc_train_list.append(acc_train)
# np.savez('./result/{}_F1_{}_{}.npz'.format(label_cls,num_of_group,date),test = acc_list)
print("Epoch:", '%04d' % (epoch+1), "cost =", "{:.5f}".format(costs), "stepsize = {:.4f}".format(
t), "acc_test_f1 = {:.4f}".format(acc), "acc_train =", acc_train)
# if (epoch) % 5 == 0:
# m_saver.save(sess, "models/MER", global_step=epoch)
print("Optimization Finished!")
| python | MIT | 9e75dffe0251bc8e368997c392d8c77645a47630 | 2026-01-05T07:14:07.318150Z | false |
bisno/WDRO | https://github.com/bisno/WDRO/blob/9e75dffe0251bc8e368997c392d8c77645a47630/nonlinearized-dual-ResNet.py | nonlinearized-dual-ResNet.py | # -*- coding: utf-8 -*-
# Author xuli.shen
import tensorflow as tf
from ResNets.ResNet_minimax import network
from data_process import *
from model import *
import sys
import glob
import numpy as np
import pandas as pd
from PIL import Image
import random
import time
from cvxopt import matrix
from cvxopt.solvers import qp
from cvxopt import solvers
import argparse
parser = argparse.ArgumentParser(description='sIR and subgroup batch_size')
parser.add_argument('--num_of_group', type=int, default=4)
parser.add_argument('--num_minor', type=int, default=2)
parser.add_argument('--train_subgroup_batch', type=int, default=100)
args = parser.parse_args()
num_of_group = args.num_of_group
num_correct = args.num_minor
train_subgroup_batch = args.train_subgroup_batch
solvers.options['show_progress'] = False
date = time.strftime("%Y%m%d%H%M%S", time.localtime())
training_epochs = 2000
test_batch_num = 100
display_step = 1
m = 2 * size # label * size
save = True
# ===================================== Model ================================================
x = tf.placeholder(tf.float32, [None, h, w, 3], name='x')
y = tf.placeholder(tf.float32, [None, 2], name='y')
x_image = x
layer_fc2 = network(x=x_image)
layer_fc2 = tf.layers.flatten(layer_fc2)
weight_regularizer = tf.contrib.layers.l2_regularizer(0.0001)
W = tf.get_variable("w", shape=[
5120, 2], initializer=tf.contrib.layers.variance_scaling_initializer())
b = tf.Variable(tf.constant(0.01, shape=[2]))
pred = tf.nn.softmax(tf.matmul(layer_fc2, W) + b) # Softmax
tf.add_to_collection('pred', pred)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Minimize error using cross entropy
def cost_n_to_list(n, pred):
list_ = []
for _ in range(n):
list_.append(tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred)))
return list_
def max_tensor(cost_combine):
for i in range(len(cost_combine)-1):
if i == 0:
cost = cost_combine[i]
else:
cost = tf.maximum(cost, cost_combine[i+1])
return cost
def grad_combine(cost_combine, var):
gradss = []
for i in range(len(cost_combine)):
gradss.append(tf.gradients(xs=[var], ys=cost_combine[i]))
return gradss
cost_combine = cost_n_to_list(num_of_group, pred)
cost_final = max_tensor(cost_combine)
grad_combine_ = grad_combine(cost_combine, var=W)
grad_combine_b = grad_combine(cost_combine, var=b)
def get_Gh(grad_list, cost_list, m):
N = len(cost_list)
G = np.zeros([N, m])
b = []
for i in range(N):
# print(grad_list[i][0])
g = grad_list[i][0].flatten()
# print(g)
G[i][:] = g
# G[i][-1] = -1.0
b.append(float(cost_list[i])) # add cost
b = np.array(b)
# print(b)
GG = matrix(G)
hh = matrix(b)
return GG, hh
def cal_grad(grad_list, cost_list, m, size_in, size_out):
N = len(cost_list)
GG, hh = get_Gh(grad_list, cost_list, m)
P = matrix(GG)*matrix(GG).T
q = -matrix(hh)
G = matrix(-np.eye(N))
h = matrix(np.zeros(N))
A = matrix(np.ones([1, N]))
b = matrix(np.ones([1]))
# print(0)
res = qp(P, q, G=G, h=h, A=A, b=b)
# print(1)
d = -np.array(GG).T.dot(np.array(res['x'])
)[:, 0].reshape(size_in, size_out)
# print('\n\n\n ++++++++++++++++++++++ \n',d)
# print(len(d))
return d
cost21 = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=pred))
def grad_cost_list(sess, cost_combine, grad_combine_, grad_combine_b, num_correct=num_correct):
grad_list = []
grad_list_b = []
cost_list = []
n = len(cost_combine)
train_x = []
train_y = []
# print(n)
for i in range(n):
if i < n - num_correct:
batch_xs, batch_ys, _ = create_trainbatch(train_subgroup_batch, 3)
c, g_W, g_b = sess.run([cost_combine[i], grad_combine_[
i], grad_combine_b[i]], feed_dict={x: batch_xs, y: batch_ys})
# print(c, g_W)
grad_list.append(g_W)
grad_list_b.append(g_b)
cost_list.append(c)
train_x.append(batch_xs)
train_y.append(batch_ys)
else:
# print('hello')
batch_xs, batch_ys, _ = create_trainbatch_all_correct(
train_subgroup_batch, 3)
c, g_W, g_b = sess.run([cost_combine[i], grad_combine_[
i], grad_combine_b[i]], feed_dict={x: batch_xs, y: batch_ys})
# print(c, g_W)
grad_list.append(g_W)
grad_list_b.append(g_b)
cost_list.append(c)
train_x.append(batch_xs)
train_y.append(batch_ys)
return np.array(train_x).reshape(n*train_subgroup_batch, h, w, 3), np.array(train_y).reshape(n*train_subgroup_batch, 2), grad_list, grad_list_b, cost_list
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_count = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))
cost = tf.reduce_mean(-tf.reduce_sum(y *
tf.log(tf.clip_by_value(pred, 1e-8, 1.0)), reduction_indices=1))
def print_accuracy(accuracy, accuracy_count, cost):
num = 200
batch_xx, batch_yy, _, d = create_testset(num, 3)
batch_xx_correct = batch_xx[:num]
batch_yy_correct = batch_yy[:num]
batch_xx_false = batch_xx[num:]
batch_yy_false = batch_yy[num:]
feed_dict_test_T = {x: batch_xx_correct,
y: batch_yy_correct}
feed_dict_test_F = {x: batch_xx_false,
y: batch_yy_false}
feed_dict_test = {x: batch_xx,
y: batch_yy}
TP = accuracy_count.eval(feed_dict=feed_dict_test_T)
FN = len(batch_xx_correct) - TP
TN = accuracy_count.eval(feed_dict=feed_dict_test_F)
FP = len(batch_xx_false) - TN
recall = TP/(TP + FN + 1e-8)
precision = TP/(TP + FP + 1e-8)
F1 = 2 * ((precision*recall)/(precision + recall + 1e-8))
print(TP, FN, TN, FP)
print(recall, precision, F1, '\n')
cost_f = cost.eval(feed_dict=feed_dict_test)
batch_x, batch_y, _ = create_trainbatch_(200, 3)
feed_dict = {x: batch_x,
y: batch_y}
accuracy_train = accuracy.eval(feed_dict=feed_dict)
return cost_f, F1, accuracy_train, d
def exchange(grads_and_vars, grad_, t):
for i, (g, v) in enumerate([grads_and_vars[-1]]):
if g is not None:
grads_and_vars[-1] = (-grad_*t*0.0001, v) # exchange gradients
def print_grad(grads_and_vars):
for i, (g, v) in enumerate(grads_and_vars[-5:]):
if g is not None:
print(g)
print(sess.run(np.max(g), feed_dict={
x: batch_xs21, y: batch_ys21}))
init = tf.global_variables_initializer()
acc_list = [0]
costs_list = []
acc_train_list = []
optimizer = tf.train.GradientDescentOptimizer(0.0001)
grads_and_vars_all = optimizer.compute_gradients(cost21)[:-2]
training_op_all = optimizer.apply_gradients(grads_and_vars_all)
grads_check = tf.gradients(cost, tf.trainable_variables())
def scale_t(max_grad):
n = 0
num = 0.1
if max_grad > num:
while max_grad > num:
max_grad = max_grad * 0.1
n -= 1
return n
cost_before = 1000
cost_now = 10000
# Start training
with tf.Session() as sess:
sess.run(init)
t = 1
print('\n ++ {} ++ \n'.format(label_cls))
m_saver = tf.train.Saver()
for epoch in range(training_epochs):
for i in range(1):
batch_xs21, batch_ys21, grad_list, grad_list_b, cost_list = grad_cost_list(
sess, cost_combine, grad_combine_, grad_combine_b)
grad_ = cal_grad(grad_list, cost_list, m=5120 *
2, size_in=5120, size_out=2)
grad_b = cal_grad(grad_list_b, cost_list,
m=2, size_in=1, size_out=2)
grad_ = grad_.astype(np.float32)
grad_b = grad_b.astype(np.float32).reshape(2)
max_grad_ = np.max(grad_)
scale_grad_ = scale_t(max_grad_)
max_grad_b = np.max(grad_b)
scale_grad_b = scale_t(max_grad_b)
grad_ = grad_ * (10 ** 5)
grad_b = grad_b * (10 ** 3)
print(scale_grad_, scale_grad_b)
print(np.max(grad_), np.max(grad_b))
print(np.min(abs(grad_)), np.min(abs(grad_b)))
print('iter_{} curr(F1_list) : '.format(epoch), acc_list[-1])
print('iter_{} mean(F1_list) : '.format(
epoch), np.mean(acc_list[-10:-1]))
print('iter_{} max (F1_list) : '.format(epoch), max(acc_list))
training_op = optimizer.apply_gradients(
[((-grad_*t*1).astype(np.float32), W)])
training_op_b = optimizer.apply_gradients(
[((-grad_b*t*1).astype(np.float32), b)])
sess.run(training_op, feed_dict={x: batch_xs21, y: batch_ys21})
sess.run(training_op_b, feed_dict={x: batch_xs21, y: batch_ys21})
sess.run(training_op_all, feed_dict={x: batch_xs21, y: batch_ys21})
if (epoch) % 1 == 0:
cost_before = cost_now
costs, acc, acc_train, _ = print_accuracy(
accuracy, accuracy_count, cost)
acc_list.append(acc)
costs_list.append(costs)
cost_now = costs
acc_train_list.append(acc_train)
print("Epoch:", '%04d' % (epoch+1), "cost =", "{:.9f}".format(costs),
"stepsize =", t, "acc_test_F1 = {:.4f}".format(acc), "acc_train =", acc_train)
if save:
np.savez('./result/{}_F1__resnet_nonlinear_{}_{}.npz'.format(label_cls,
num_of_group, date), test=acc_list)
print("Optimization Finished!")
| python | MIT | 9e75dffe0251bc8e368997c392d8c77645a47630 | 2026-01-05T07:14:07.318150Z | false |
bisno/WDRO | https://github.com/bisno/WDRO/blob/9e75dffe0251bc8e368997c392d8c77645a47630/data_process.py | data_process.py | # -*- coding: UTF-8 -*-
# Author xuli.shen
import glob
import numpy as np
import pandas as pd
from PIL import Image
import random
# h,w = 60,50
h, w = (60, 50)
size = h * w
# Receding_Hairline Wearing_Necktie Rosy_Cheeks Eyeglasses Goatee Chubby
# Sideburns Blurry Wearing_Hat Double_Chin Pale_Skin Gray_Hair Mustache Bald
label_cls = 'Eyeglasses'
pngs = sorted(glob.glob('./data/img_align_celeba/*.jpg'))
data = pd.read_table('./data/list_attr_celeba.txt',
delim_whitespace=True, error_bad_lines=False)
eyeglasses = np.array(data[label_cls])
eyeglasses_cls = (eyeglasses + 1)/2
label_glasses = np.zeros((202599, 2))
correct_list = []
correct_list_test = []
false_list = []
false_list_test = []
for i in range(len(label_glasses)):
if eyeglasses_cls[i] == 1:
label_glasses[i][1] = 1
if i < 160000:
correct_list.append(i)
else:
correct_list_test.append(i)
else:
label_glasses[i][0] = 1
if i < 160000:
false_list.append(i)
else:
false_list_test.append(i)
print(len(correct_list_test), len(false_list_test))
training_set_label = label_glasses[0:160000, :]
test_set_label = label_glasses[160000:, :]
training_set_cls = eyeglasses_cls[0:160000]
test_set_cls = eyeglasses_cls[160000:]
def create_trainbatch(num=10, channel=0):
train_num = random.sample(false_list, num)
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = np.array(train_set_label_)
train_set_cls_new = np.array(train_set_cls_)
return train_set/255, train_set_label_new, train_set_cls_new
def create_trainbatch_all_correct(num=10, channel=0):
train_num = random.sample(correct_list, num)
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
n = 0
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = np.array(train_set_label_)
train_set_cls_new = np.array(train_set_cls_)
return train_set/255, train_set_label_new, train_set_cls_new
def create_trainbatch_(num=10, channel=0):
train_num1 = random.sample(correct_list, int(num/2))
train_num2 = random.sample(false_list, int(num/2))
train_num = train_num1+train_num2
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
n = 0
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = np.array(train_set_label_)
train_set_cls_new = np.array(train_set_cls_)
return train_set/255, train_set_label_new, train_set_cls_new
def create_trainbatch_grad(num=200, channel=0):
train_num1 = random.sample(correct_list, int(10))
train_num2 = random.sample(false_list, int(190))
train_num = train_num1+train_num2
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
n = 0
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = np.array(train_set_label_)
train_set_cls_new = np.array(train_set_cls_)
return train_set/255, train_set_label_new, train_set_cls_new
def create_testset(num=100, channel=0):
test_num1 = random.sample(correct_list_test, num)
test_num2 = random.sample(false_list_test, num)
test_num = test_num1 + test_num2
if channel == 0:
test_set = np.zeros((num*2, h, w))
else:
test_set = np.zeros((num*2, h, w, 3))
test_set_label_ = []
test_set_cls_ = []
for i in range(num*2):
img = Image.open(pngs[test_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
test_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
test_set[i, :, :, :] = img_grey
test_set_label_.append(label_glasses[test_num[i]])
test_set_cls_.append(eyeglasses_cls[test_num[i]])
# if channel == 0:
# test_set = test_set.reshape(size,num*2).T
test_set_label_new = np.array(test_set_label_)
test_set_cls_new = np.array(test_set_cls_)
return test_set/255, test_set_label_new, test_set_cls_new, test_set_cls_new.mean()*100
def create_testset_all(channel=0):
test_num1 = random.sample(correct_list_test, len(correct_list_test))
test_num2 = random.sample(false_list_test, len(false_list_test))
test_num = test_num1 + test_num2
# test_num =
num = len(test_num)
if channel == 0:
test_set = np.zeros((num, h, w))
else:
test_set = np.zeros((num, h, w, 3))
test_set_label_ = []
test_set_cls_ = []
for i in range(num):
img = Image.open(pngs[test_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
test_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
test_set[i, :, :, :] = img_grey
test_set_label_.append(label_glasses[test_num[i]])
test_set_cls_.append(eyeglasses_cls[test_num[i]])
# if channel == 0:
# test_set = test_set.reshape(size,num).T
test_set_label_new = np.array(test_set_label_)
test_set_cls_new = np.array(test_set_cls_)
return test_set/255, test_set_label_new, test_set_cls_new, test_set_cls_new.mean()*100
def create_testset_unbalanced(channel=0):
test_num1 = random.sample(correct_list_test, 10)
test_num2 = random.sample(false_list_test, 190)
test_num = test_num1 + test_num2
# test_num =
num = len(test_num)
if channel == 0:
test_set = np.zeros((num, h, w))
else:
test_set = np.zeros((num, h, w, 3))
test_set_label_ = []
test_set_cls_ = []
for i in range(num):
img = Image.open(pngs[test_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
test_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
test_set[i, :, :, :] = img_grey
test_set_label_.append(label_glasses[test_num[i]])
test_set_cls_.append(eyeglasses_cls[test_num[i]])
# if channel == 0:
# test_set = test_set.reshape(size,num).T
test_set_label_new = np.array(test_set_label_)
test_set_cls_new = np.array(test_set_cls_)
return test_set/255, test_set_label_new, test_set_cls_new, test_set_cls_new.mean()*100
| python | MIT | 9e75dffe0251bc8e368997c392d8c77645a47630 | 2026-01-05T07:14:07.318150Z | false |
bisno/WDRO | https://github.com/bisno/WDRO/blob/9e75dffe0251bc8e368997c392d8c77645a47630/model.py | model.py | #-*- coding: UTF-8 -*-
# Author xuli.shen
import tensorflow as tf
def new_weights(name,shape):
return tf.get_variable(name ,shape, initializer=tf.contrib.layers.xavier_initializer())
def new_biases(length):
return tf.Variable(tf.constant(0.01, shape=[length]))
def new_fc_layer(name,
input,
num_inputs,
num_outputs,
use_relu=True):
weights = new_weights(name,shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer, weights
def new_conv_layer(name,
input,
num_input_channels,
filter_size,
num_filters,
use_pooling=True):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = new_weights(name,shape=shape)
biases = new_biases(length=num_filters)
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
layer += biases
if use_pooling:
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
layer = tf.nn.relu(layer)
return layer, weights
def flatten_layer(layer):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
layer_flat = tf.reshape(layer, [-1, num_features])
return layer_flat, num_features
| python | MIT | 9e75dffe0251bc8e368997c392d8c77645a47630 | 2026-01-05T07:14:07.318150Z | false |
bisno/WDRO | https://github.com/bisno/WDRO/blob/9e75dffe0251bc8e368997c392d8c77645a47630/WDRO_for_toydata.py | WDRO_for_toydata.py | # -*- coding: utf-8 -*-
# Author xuli.shen
import tensorflow as tf
import numpy as np
from cvxopt import matrix
from cvxopt.solvers import qp
from cvxopt import solvers
import matplotlib.pyplot as plt
tf.reset_default_graph()
np.random.seed(10)
rand_x = np.random.randn(1500)/50
np.random.seed(8)
rand_y = np.random.randn(1500)/50
# rand_y = np.random.randn(1500)/50
solvers.options['show_progress'] = False
other = [(1.23, 3.01),(0.98, 3.32),(1.77, 3.92),(1.48, 4.52),(0.63, 2.89), (1.92, 5.0), (1.1, 2.8),(0.71, 3.17),
(1.64, 4.54),(1.26, 3.96),(1.22, 2.84), (0.77, 2.59),(1.89, 5.1),(1.13,3.17), (1.31, 2.91)]
u2 = np.zeros((1515,1))
v2 = np.zeros((1515,1))
for i in range(500):
u2[i],v2[i] = 0.16+rand_x[i], 1.22+rand_y[i]
for i in range(500):
u2[i+500],v2[i+500] = 0.43+rand_x[i+500],1.45+rand_y[i+500]
for i in range(500):
u2[i+1000],v2[i+1000] = 0.04+rand_x[i+1000],1.59+rand_y[i+1000]
for i in range(15):
u2[i+1500],v2[i+1500] = other[i][0],other[i][1]
# Separate dataset into two subgroups.
X1 = tf.constant(u2[:1500])
y1 = tf.constant(v2[:1500])
X2 = tf.constant(u2[1500:])
y2 = tf.constant(v2[1500:])
w = tf.get_variable("w", shape=[1, 1], initializer=tf.contrib.layers.xavier_initializer(),dtype='float64')
b = tf.Variable(tf.constant(0.1, shape=[1],dtype='float64'))
z1 = tf.reduce_mean(tf.square(tf.matmul(X1,w)+b - y1))
z2 = tf.reduce_mean(tf.square(tf.matmul(X2,w)+b - y2))
# Define the max_mean of each subgroup's loss
# according to equation (1).
z = tf.maximum(z1,z2)
z1_grad = tf.gradients(ys=z1,xs=w)
z2_grad = tf.gradients(ys=z2,xs=w)
z1_grad_b = tf.gradients(ys=z1,xs=b)
z2_grad_b = tf.gradients(ys=z2,xs=b)
# WDRO = []
# MSE = []
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print('start...')
for i in range(300):
# Compute the gradient of 'w'.
GG = np.zeros([2,1])
hh = np.zeros(2)
g1 = sess.run(z1_grad)
g2 = sess.run(z2_grad)
GG[0,:] = g1[0].reshape(-1)
GG[1,:] = g2[0].reshape(-1)
hh[0],hh[1] = sess.run([z1,z2])
P = matrix(GG)*matrix(GG).T
q = -matrix(hh)
G = matrix(-np.eye(2))
h = matrix(np.zeros(2))
A = matrix(np.ones([1,2]))
b_matrix = matrix(np.ones([1]))
# Solve quadratic programming of the equation (4)-(5).
res = qp(P,q,G=G,h=h,A=A,b=b_matrix)
# Get descent direction.
d = -np.array(GG).T.dot(np.array(res['x']))[:,0].reshape(-1,1)
now = sess.run(z)
ww = sess.run(w)
t = 10
# This part is optional:
# The implementation of line-search.
for j in range(100):
if sess.run(z,feed_dict={w:ww+t*d}) < now:
break
t = 0.8*t
sess.run(w.assign(ww+t*d))
# Compute the gradient of 'b'.
# This part is similar to get the descent direction of 'w'.
GG = np.zeros([2,1])
hh = np.zeros(2)
g1 = sess.run(z1_grad_b)
g2 = sess.run(z2_grad_b)
GG[0,:] = g1[0].reshape(-1)
GG[1,:] = g2[0].reshape(-1)
hh[0],hh[1] = sess.run([z1,z2])
P = matrix(GG)*matrix(GG).T
q = -matrix(hh)
G = matrix(-np.eye(2))
h = matrix(np.zeros(2))
A = matrix(np.ones([1,2]))
b_matrix = matrix(np.ones([1]))
res = qp(P,q,G=G,h=h,A=A,b=b_matrix)
db = -np.array(GG).T.dot(np.array(res['x']))[:,0]
bb = sess.run(b)
sess.run(b.assign(bb+0.1*db))
cost11 = sess.run(z)
print("epoch =",i+1,", WDRO =",cost11,", stepsize =",t)
wWDRO,bWDRO = sess.run(w)[0], sess.run(b)
tf.reset_default_graph()
x2 = np.zeros((1515,1))
y2 = np.zeros((1515,1))
for i in range(500):
x2[i],y2[i] = 0.16+rand_x[i], 1.22+rand_y[i]
for i in range(500):
x2[i+500],y2[i+500] = 0.43+rand_x[i+500],1.45+rand_y[i+500]
for i in range(500):
x2[i+1000],y2[i+1000] = 0.04+rand_x[i+1000],1.59+rand_y[i+1000]
for i in range(15):
x2[i+1500],y2[i+1500] = other[i][0],other[i][1]
x = tf.placeholder(tf.float32, [None, 1], name='x')
y = tf.placeholder(tf.float32, [None, 1], name='y')
W = tf.get_variable("w", shape=[1, 1], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[1]))
init = tf.global_variables_initializer()
pred = tf.matmul(x, W) +b
cost = tf.reduce_mean(tf.square(pred-y))
opt = tf.train.GradientDescentOptimizer(0.1).minimize(cost)
with tf.Session() as sess:
sess.run(init)
for i in range(300):
feed_dicts = {x:x2,
y:y2}
feed_dicts_n = {x:x2[:1500],
y:y2[:1500]}
feed_dicts_p = {x:x2[1500:],
y:y2[1500:]}
cosss,_ = sess.run((cost,opt), feed_dict=feed_dicts)
cost_n = sess.run(cost,feed_dict=feed_dicts_n)
cost_p = sess.run(cost,feed_dict=feed_dicts_p)
cost_max = np.max([cost_n,cost_p])
print("epoch =",i+1,", loss =",cosss, "WDRO = ",cost_max)
wout,bout = sess.run(W),sess.run(b)
plt.figure(figsize=(10,8))
group1_x = u2[:1500]
group1_y = v2[:1500]
group2_x = u2[1500:]
group2_y = v2[1500:]
# plt.scatter(kk1,pp1,color = 'r')
plt.scatter(group1_x,group1_y,color = '#4682B4')
plt.scatter(group2_x,group2_y,s=10)
plt.title('Linear Regression Example',fontdict={'weight':'normal','size': 20})
plt.plot([-0.4,3],[-0.4*wout[0] + bout, 3*wout[0] + bout],linewidth=1.0,color='coral',linestyle='-')
plt.plot([-0.4,3],[-0.4*wWDRO+bWDRO, 3*wWDRO+bWDRO],linewidth=1.0,color='#4682B4',linestyle='-')
plt.legend(['AvgLoss','MaxLoss'],fontsize=20)
plt.tick_params(labelsize=16)
plt.savefig('./outcome/regression_WDRO_compare.png', dpi=300)
| python | MIT | 9e75dffe0251bc8e368997c392d8c77645a47630 | 2026-01-05T07:14:07.318150Z | false |
bisno/WDRO | https://github.com/bisno/WDRO/blob/9e75dffe0251bc8e368997c392d8c77645a47630/ResNets/ResNet_minimax.py | ResNets/ResNet_minimax.py | # From https://github.com/taki0112/ResNet-Tensorflow.
import time
from ResNets.ops import *
import glob
import numpy as np
import pandas as pd
from PIL import Image
import random
import time
def network(x, res_n=18, is_training=True, reuse=False):
with tf.variable_scope("network", reuse=reuse):
if res_n < 50:
residual_block = resblock
else:
residual_block = bottle_resblock
residual_list = get_residual_layer(res_n)
ch = 32
x = conv(x, channels=ch, kernel=3, stride=1, scope='conv')
for i in range(residual_list[0]):
x = residual_block(x, channels=ch, is_training=is_training,
downsample=False, scope='resblock0_' + str(i))
x = residual_block(x, channels=ch*2, is_training=is_training,
downsample=True, scope='resblock1_0')
for i in range(1, residual_list[1]):
x = residual_block(x, channels=ch*2, is_training=is_training,
downsample=False, scope='resblock1_' + str(i))
x = residual_block(x, channels=ch*4, is_training=is_training,
downsample=True, scope='resblock2_0')
for i in range(1, residual_list[2]):
x = residual_block(x, channels=ch*4, is_training=is_training,
downsample=False, scope='resblock2_' + str(i))
x = residual_block(x, channels=ch*8, is_training=is_training,
downsample=True, scope='resblock_3_0')
for i in range(1, residual_list[3]):
x = residual_block(x, channels=ch*8, is_training=is_training,
downsample=False, scope='resblock_3_' + str(i))
x = batch_norm(x, is_training, scope='batch_norm')
x = relu(x)
x = fully_conneted(x, units=5120, scope='logit')
return x
| python | MIT | 9e75dffe0251bc8e368997c392d8c77645a47630 | 2026-01-05T07:14:07.318150Z | false |
bisno/WDRO | https://github.com/bisno/WDRO/blob/9e75dffe0251bc8e368997c392d8c77645a47630/ResNets/ops.py | ResNets/ops.py | # From https://github.com/taki0112/ResNet-Tensorflow.
import tensorflow as tf
import tensorflow.contrib as tf_contrib
weight_init = tf_contrib.layers.variance_scaling_initializer()
weight_regularizer = tf_contrib.layers.l2_regularizer(0.0001)
def conv(x, channels, kernel=4, stride=2, padding='SAME', use_bias=True, scope='conv_0'):
with tf.variable_scope(scope):
x = tf.layers.conv2d(inputs=x, filters=channels,
kernel_size=kernel, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer,
strides=stride, use_bias=use_bias, padding=padding)
return x
def fully_conneted(x, units, use_bias=True, scope='fully_0'):
with tf.variable_scope(scope):
x = flatten(x)
x = tf.layers.dense(x, units=units, kernel_initializer=weight_init,
kernel_regularizer=weight_regularizer, use_bias=use_bias)
return x
def resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='resblock'):
with tf.variable_scope(scope):
x = batch_norm(x_init, is_training, scope='batch_norm_0')
x = relu(x)
if downsample:
x = conv(x, channels, kernel=3, stride=2,
use_bias=use_bias, scope='conv_0')
x_init = conv(x_init, channels, kernel=1, stride=2,
use_bias=use_bias, scope='conv_init')
else:
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_0')
x = batch_norm(x, is_training, scope='batch_norm_1')
x = relu(x)
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_1')
return x + x_init
def bottle_resblock(x_init, channels, is_training=True, use_bias=True, downsample=False, scope='bottle_resblock'):
with tf.variable_scope(scope):
x = batch_norm(x_init, is_training, scope='batch_norm_1x1_front')
shortcut = relu(x)
x = conv(shortcut, channels, kernel=1, stride=1,
use_bias=use_bias, scope='conv_1x1_front')
x = batch_norm(x, is_training, scope='batch_norm_3x3')
x = relu(x)
if downsample:
x = conv(x, channels, kernel=3, stride=2,
use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels*4, kernel=1,
stride=2, use_bias=use_bias, scope='conv_init')
else:
x = conv(x, channels, kernel=3, stride=1,
use_bias=use_bias, scope='conv_0')
shortcut = conv(shortcut, channels * 4, kernel=1,
stride=1, use_bias=use_bias, scope='conv_init')
x = batch_norm(x, is_training, scope='batch_norm_1x1_back')
x = relu(x)
x = conv(x, channels*4, kernel=1, stride=1,
use_bias=use_bias, scope='conv_1x1_back')
return x + shortcut
def get_residual_layer(res_n):
x = []
if res_n == 18:
x = [2, 2, 2, 2]
if res_n == 34:
x = [3, 4, 6, 3]
if res_n == 50:
x = [3, 4, 6, 3]
if res_n == 101:
x = [3, 4, 23, 3]
if res_n == 152:
x = [3, 8, 36, 3]
return x
def flatten(x):
return tf.layers.flatten(x)
def global_avg_pooling(x):
gap = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
return gap
def avg_pooling(x):
return tf.layers.average_pooling2d(x, pool_size=2, strides=2, padding='SAME')
def relu(x):
return tf.nn.relu(x)
def batch_norm(x, is_training=True, scope='batch_norm'):
return tf_contrib.layers.batch_norm(x,
decay=0.9, epsilon=1e-05,
center=True, scale=True, updates_collections=None,
is_training=is_training, scope=scope)
def classification_loss(logit, label):
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logit))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
def classification_loss_weighted(logit, label):
loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(
targets=label, logits=logit, pos_weight=2))
# cost1 = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=y, logits=pred,pos_weight=1))
prediction = tf.equal(tf.argmax(logit, -1), tf.argmax(label, -1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
return loss, accuracy
| python | MIT | 9e75dffe0251bc8e368997c392d8c77645a47630 | 2026-01-05T07:14:07.318150Z | false |
howardhsu/DE-CNN | https://github.com/howardhsu/DE-CNN/blob/7a3cb9325b26be8eeee85d872a13ff848f613914/script/train.py | script/train.py | import argparse
import torch
import time
import json
import numpy as np
import math
import random
np.random.seed(1337)
random.seed(1337)
torch.manual_seed(1337)
torch.cuda.manual_seed(1337)
def batch_generator(X, y, batch_size=128, return_idx=False, crf=False):
for offset in range(0, X.shape[0], batch_size):
batch_X_len=np.sum(X[offset:offset+batch_size]!=0, axis=1)
batch_idx=batch_X_len.argsort()[::-1]
batch_X_len=batch_X_len[batch_idx]
batch_X_mask=(X[offset:offset+batch_size]!=0)[batch_idx].astype(np.uint8)
batch_X=X[offset:offset+batch_size][batch_idx]
batch_y=y[offset:offset+batch_size][batch_idx]
batch_X = torch.autograd.Variable(torch.from_numpy(batch_X).long().cuda() )
batch_X_mask=torch.autograd.Variable(torch.from_numpy(batch_X_mask).long().cuda() )
batch_y = torch.autograd.Variable(torch.from_numpy(batch_y).long().cuda() )
if len(batch_y.size() )==2 and not crf:
batch_y=torch.nn.utils.rnn.pack_padded_sequence(batch_y, batch_X_len, batch_first=True)
if return_idx: #in testing, need to sort back.
yield (batch_X, batch_y, batch_X_len, batch_X_mask, batch_idx)
else:
yield (batch_X, batch_y, batch_X_len, batch_X_mask)
class Model(torch.nn.Module):
def __init__(self, gen_emb, domain_emb, num_classes=3, dropout=0.5, crf=False):
super(Model, self).__init__()
self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
self.gen_embedding.weight=torch.nn.Parameter(torch.from_numpy(gen_emb), requires_grad=False)
self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
self.domain_embedding.weight=torch.nn.Parameter(torch.from_numpy(domain_emb), requires_grad=False)
self.conv1=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 5, padding=2 )
self.conv2=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 3, padding=1 )
self.dropout=torch.nn.Dropout(dropout)
self.conv3=torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv4=torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv5=torch.nn.Conv1d(256, 256, 5, padding=2)
self.linear_ae=torch.nn.Linear(256, num_classes)
self.crf_flag=crf
if self.crf_flag:
from allennlp.modules import ConditionalRandomField
self.crf=ConditionalRandomField(num_classes)
def forward(self, x, x_len, x_mask, x_tag=None, testing=False):
x_emb=torch.cat((self.gen_embedding(x), self.domain_embedding(x) ), dim=2)
x_emb=self.dropout(x_emb).transpose(1, 2)
x_conv=torch.nn.functional.relu(torch.cat((self.conv1(x_emb), self.conv2(x_emb)), dim=1) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv3(x_conv) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv4(x_conv) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv5(x_conv) )
x_conv=x_conv.transpose(1, 2)
x_logit=self.linear_ae(x_conv)
if testing:
if self.crf_flag:
score=self.crf.viterbi_tags(x_logit, x_mask)
else:
x_logit=x_logit.transpose(2, 0)
score=torch.nn.functional.log_softmax(x_logit).transpose(2, 0)
else:
if self.crf_flag:
score=-self.crf(x_logit, x_tag, x_mask)
else:
x_logit=torch.nn.utils.rnn.pack_padded_sequence(x_logit, x_len, batch_first=True)
score=torch.nn.functional.nll_loss(torch.nn.functional.log_softmax(x_logit.data), x_tag.data)
return score
def valid_loss(model, valid_X, valid_y, crf=False):
model.eval()
losses=[]
for batch in batch_generator(valid_X, valid_y, crf=crf):
batch_valid_X, batch_valid_y, batch_valid_X_len, batch_valid_X_mask=batch
loss=model(batch_valid_X, batch_valid_X_len, batch_valid_X_mask, batch_valid_y)
losses.append(loss.data[0])
model.train()
return sum(losses)/len(losses)
def train(train_X, train_y, valid_X, valid_y, model, model_fn, optimizer, parameters, epochs=200, batch_size=128, crf=False):
best_loss=float("inf")
valid_history=[]
train_history=[]
for epoch in range(epochs):
for batch in batch_generator(train_X, train_y, batch_size, crf=crf):
batch_train_X, batch_train_y, batch_train_X_len, batch_train_X_mask=batch
loss=model(batch_train_X, batch_train_X_len, batch_train_X_mask, batch_train_y)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(parameters, 1.)
optimizer.step()
loss=valid_loss(model, train_X, train_y, crf=crf)
train_history.append(loss)
loss=valid_loss(model, valid_X, valid_y, crf=crf)
valid_history.append(loss)
if loss<best_loss:
best_loss=loss
torch.save(model, model_fn)
shuffle_idx=np.random.permutation(len(train_X) )
train_X=train_X[shuffle_idx]
train_y=train_y[shuffle_idx]
model=torch.load(model_fn)
return train_history, valid_history
def run(domain, data_dir, model_dir, valid_split, runs, epochs, lr, dropout, batch_size=128):
gen_emb=np.load(data_dir+"gen.vec.npy")
domain_emb=np.load(data_dir+domain+"_emb.vec.npy")
ae_data=np.load(data_dir+domain+".npz")
valid_X=ae_data['train_X'][-valid_split:]
valid_y=ae_data['train_y'][-valid_split:]
train_X=ae_data['train_X'][:-valid_split]
train_y=ae_data['train_y'][:-valid_split]
for r in range(runs):
print(r)
model=Model(gen_emb, domain_emb, 3, dropout=dropout, crf=False)
model.cuda()
parameters = [p for p in model.parameters() if p.requires_grad]
optimizer=torch.optim.Adam(parameters, lr=lr)
train_history, valid_history=train(train_X, train_y, valid_X, valid_y, model, model_dir+domain+str(r), optimizer, parameters, epochs, crf=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default="model/")
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--runs', type=int, default=5)
parser.add_argument('--domain', type=str, default="laptop")
parser.add_argument('--data_dir', type=str, default="data/prep_data/")
parser.add_argument('--valid', type=int, default=150) #number of validation data.
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--dropout', type=float, default=0.55)
args = parser.parse_args()
run(args.domain, args.data_dir, args.model_dir, args.valid, args.runs, args.epochs, args.lr, args.dropout, args.batch_size)
| python | MIT | 7a3cb9325b26be8eeee85d872a13ff848f613914 | 2026-01-05T07:14:07.694069Z | false |
howardhsu/DE-CNN | https://github.com/howardhsu/DE-CNN/blob/7a3cb9325b26be8eeee85d872a13ff848f613914/script/prep_oov.py | script/prep_oov.py | import argparse
import numpy as np
import json
import subprocess
def fill_np_embedding(emb_file, word_idx_fn, oov_fn):
with open(word_idx_fn) as f:
word_idx=json.load(f)
embedding=np.load(emb_file)
with open(oov_fn) as f:
for l in f:
rec=l.rstrip().split(' ')
if len(rec)==2: #skip the first line.
continue
if rec[0] in word_idx:
embedding[word_idx[rec[0]]]=np.array([float(r) for r in rec[1:] ])
np.save(emb_file, embedding.astype('float32') )
parser = argparse.ArgumentParser()
parser.add_argument('--laptop_emb_np', type=str, default="laptop_emb.vec.npy")
parser.add_argument('--restaurant_emb_np', type=str, default="restaurant_emb.vec.npy")
parser.add_argument('--out_dir', type=str, default="data/prep_data/")
parser.add_argument('--laptop_oov', type=str, default="laptop_oov.vec")
parser.add_argument('--restaurant_oov', type=str, default="restaurant_oov.vec")
parser.add_argument('--word_idx', type=str, default="word_idx.json")
args = parser.parse_args()
fill_np_embedding(args.out_dir+args.laptop_emb_np, args.out_dir+args.word_idx, args.out_dir+args.laptop_oov)
fill_np_embedding(args.out_dir+args.restaurant_emb_np, args.out_dir+args.word_idx, args.out_dir+args.restaurant_oov) | python | MIT | 7a3cb9325b26be8eeee85d872a13ff848f613914 | 2026-01-05T07:14:07.694069Z | false |
howardhsu/DE-CNN | https://github.com/howardhsu/DE-CNN/blob/7a3cb9325b26be8eeee85d872a13ff848f613914/script/evaluation.py | script/evaluation.py | import argparse
import torch
import time
import json
import numpy as np
import math
import random
import xml.etree.ElementTree as ET
from subprocess import check_output
np.random.seed(1337)
random.seed(1337)
torch.manual_seed(1337)
torch.cuda.manual_seed(1337)
class Model(torch.nn.Module):
def __init__(self, gen_emb, domain_emb, num_classes=3, dropout=0.5, crf=False):
super(Model, self).__init__()
self.gen_embedding = torch.nn.Embedding(gen_emb.shape[0], gen_emb.shape[1])
self.gen_embedding.weight=torch.nn.Parameter(torch.from_numpy(gen_emb), requires_grad=False)
self.domain_embedding = torch.nn.Embedding(domain_emb.shape[0], domain_emb.shape[1])
self.domain_embedding.weight=torch.nn.Parameter(torch.from_numpy(domain_emb), requires_grad=False)
self.conv1=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 5, padding=2 )
self.conv2=torch.nn.Conv1d(gen_emb.shape[1]+domain_emb.shape[1], 128, 3, padding=1 )
self.dropout=torch.nn.Dropout(dropout)
self.conv3=torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv4=torch.nn.Conv1d(256, 256, 5, padding=2)
self.conv5=torch.nn.Conv1d(256, 256, 5, padding=2)
self.linear_ae=torch.nn.Linear(256, num_classes)
self.crf_flag=crf
if self.crf_flag:
from allennlp.modules import ConditionalRandomField
self.crf=ConditionalRandomField(num_classes)
def forward(self, x, x_len, x_mask, x_tag=None, testing=False):
x_emb=torch.cat((self.gen_embedding(x), self.domain_embedding(x) ), dim=2)
x_emb=self.dropout(x_emb).transpose(1, 2)
x_conv=torch.nn.functional.relu(torch.cat((self.conv1(x_emb), self.conv2(x_emb)), dim=1) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv3(x_conv) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv4(x_conv) )
x_conv=self.dropout(x_conv)
x_conv=torch.nn.functional.relu(self.conv5(x_conv) )
x_conv=x_conv.transpose(1, 2)
x_logit=self.linear_ae(x_conv)
if testing:
if self.crf_flag:
score=self.crf.viterbi_tags(x_logit, x_mask)
else:
x_logit=x_logit.transpose(2, 0)
score=torch.nn.functional.log_softmax(x_logit).transpose(2, 0)
else:
if self.crf_flag:
score=-self.crf(x_logit, x_tag, x_mask)
else:
x_logit=torch.nn.utils.rnn.pack_padded_sequence(x_logit, x_len, batch_first=True)
score=torch.nn.functional.nll_loss(torch.nn.functional.log_softmax(x_logit.data), x_tag.data)
return score
def label_rest_xml(fn, output_fn, corpus, label):
dom=ET.parse(fn)
root=dom.getroot()
pred_y=[]
for zx, sent in enumerate(root.iter("sentence") ) :
tokens=corpus[zx]
lb=label[zx]
opins=ET.Element("Opinions")
token_idx, pt, tag_on=0, 0, False
start, end=-1, -1
for ix, c in enumerate(sent.find('text').text):
if token_idx<len(tokens) and pt>=len(tokens[token_idx] ):
pt=0
token_idx+=1
if token_idx<len(tokens) and lb[token_idx]==1 and pt==0 and c!=' ':
if tag_on:
end=ix
tag_on=False
opin=ET.Element("Opinion")
opin.attrib['target']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
start=ix
tag_on=True
elif token_idx<len(tokens) and lb[token_idx]==2 and pt==0 and c!=' ' and not tag_on:
start=ix
tag_on=True
elif token_idx<len(tokens) and (lb[token_idx]==0 or lb[token_idx]==1) and tag_on and pt==0:
end=ix
tag_on=False
opin=ET.Element("Opinion")
opin.attrib['target']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
elif token_idx>=len(tokens) and tag_on:
end=ix
tag_on=False
opin=ET.Element("Opinion")
opin.attrib['target']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
if c==' ':
pass
elif tokens[token_idx][pt:pt+2]=='``' or tokens[token_idx][pt:pt+2]=="''":
pt+=2
else:
pt+=1
if tag_on:
tag_on=False
end=len(sent.find('text').text)
opin=ET.Element("Opinion")
opin.attrib['target']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
sent.append(opins )
dom.write(output_fn)
def label_laptop_xml(fn, output_fn, corpus, label):
dom=ET.parse(fn)
root=dom.getroot()
pred_y=[]
for zx, sent in enumerate(root.iter("sentence") ) :
tokens=corpus[zx]
lb=label[zx]
opins=ET.Element("aspectTerms")
token_idx, pt, tag_on=0, 0, False
start, end=-1, -1
for ix, c in enumerate(sent.find('text').text):
if token_idx<len(tokens) and pt>=len(tokens[token_idx] ):
pt=0
token_idx+=1
if token_idx<len(tokens) and lb[token_idx]==1 and pt==0 and c!=' ':
if tag_on:
end=ix
tag_on=False
opin=ET.Element("aspectTerm")
opin.attrib['term']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
start=ix
tag_on=True
elif token_idx<len(tokens) and lb[token_idx]==2 and pt==0 and c!=' ' and not tag_on:
start=ix
tag_on=True
elif token_idx<len(tokens) and (lb[token_idx]==0 or lb[token_idx]==1) and tag_on and pt==0:
end=ix
tag_on=False
opin=ET.Element("aspectTerm")
opin.attrib['term']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
elif token_idx>=len(tokens) and tag_on:
end=ix
tag_on=False
opin=ET.Element("aspectTerm")
opin.attrib['term']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
if c==' ' or ord(c)==160:
pass
elif tokens[token_idx][pt:pt+2]=='``' or tokens[token_idx][pt:pt+2]=="''":
pt+=2
else:
pt+=1
if tag_on:
tag_on=False
end=len(sent.find('text').text)
opin=ET.Element("aspectTerm")
opin.attrib['term']=sent.find('text').text[start:end]
opin.attrib['from']=str(start)
opin.attrib['to']=str(end)
opins.append(opin)
sent.append(opins )
dom.write(output_fn)
def test(model, test_X, raw_X, domain, command, template, batch_size=128, crf=False):
pred_y=np.zeros((test_X.shape[0], 83), np.int16)
model.eval()
for offset in range(0, test_X.shape[0], batch_size):
batch_test_X_len=np.sum(test_X[offset:offset+batch_size]!=0, axis=1)
batch_idx=batch_test_X_len.argsort()[::-1]
batch_test_X_len=batch_test_X_len[batch_idx]
batch_test_X_mask=(test_X[offset:offset+batch_size]!=0)[batch_idx].astype(np.uint8)
batch_test_X=test_X[offset:offset+batch_size][batch_idx]
batch_test_X_mask=torch.autograd.Variable(torch.from_numpy(batch_test_X_mask).long().cuda() )
batch_test_X = torch.autograd.Variable(torch.from_numpy(batch_test_X).long().cuda() )
batch_pred_y=model(batch_test_X, batch_test_X_len, batch_test_X_mask, testing=True)
r_idx=batch_idx.argsort()
if crf:
batch_pred_y=[batch_pred_y[idx] for idx in r_idx]
for ix in range(len(batch_pred_y) ):
for jx in range(len(batch_pred_y[ix]) ):
pred_y[offset+ix,jx]=batch_pred_y[ix][jx]
else:
batch_pred_y=batch_pred_y.data.cpu().numpy().argmax(axis=2)[r_idx]
pred_y[offset:offset+batch_size,:batch_pred_y.shape[1]]=batch_pred_y
model.train()
assert len(pred_y)==len(test_X)
command=command.split()
if domain=='restaurant':
label_rest_xml(template, command[6], raw_X, pred_y)
acc=check_output(command ).split()
print(acc)
return float(acc[9][10:])
elif domain=='laptop':
label_laptop_xml(template, command[4], raw_X, pred_y)
acc=check_output(command ).split()
print(acc)
return float(acc[15])
def evaluate(runs, data_dir, model_dir, domain, command, template):
ae_data=np.load(data_dir+domain+".npz")
with open(data_dir+domain+"_raw_test.json") as f:
raw_X=json.load(f)
results=[]
for r in range(runs):
model=torch.load(model_dir+domain+str(r) )
result=test(model, ae_data['test_X'], raw_X, domain, command, template, crf=False)
results.append(result)
print(sum(results)/len(results) )
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--runs', type=int, default=5)
parser.add_argument('--data_dir', type=str, default="data/prep_data/")
parser.add_argument('--model_dir', type=str, default="model/")
parser.add_argument('--domain', type=str, default="laptop")
args = parser.parse_args()
if args.domain=='restaurant':
command="java -cp script/A.jar absa16.Do Eval -prd data/official_data/pred.xml -gld data/official_data/EN_REST_SB1_TEST.xml.gold -evs 2 -phs A -sbt SB1"
template="data/official_data/EN_REST_SB1_TEST.xml.A"
elif args.domain=='laptop':
command="java -cp script/eval.jar Main.Aspects data/official_data/pred.xml data/official_data/Laptops_Test_Gold.xml"
template="data/official_data/Laptops_Test_Data_PhaseA.xml"
evaluate(args.runs, args.data_dir, args.model_dir, args.domain, command, template)
| python | MIT | 7a3cb9325b26be8eeee85d872a13ff848f613914 | 2026-01-05T07:14:07.694069Z | false |
howardhsu/DE-CNN | https://github.com/howardhsu/DE-CNN/blob/7a3cb9325b26be8eeee85d872a13ff848f613914/script/prep_embedding.py | script/prep_embedding.py | import argparse
import numpy as np
import json
import subprocess
def gen_np_embedding(fn, word_idx_fn, out_fn, dim=300):
with open(word_idx_fn) as f:
word_idx=json.load(f)
embedding=np.zeros((len(word_idx)+2, dim) )
with open(fn) as f:
for l in f:
rec=l.rstrip().split(' ')
if len(rec)==2: #skip the first line.
continue
if rec[0] in word_idx:
embedding[word_idx[rec[0]]]=np.array([float(r) for r in rec[1:] ])
with open(out_fn+".oov.txt", "w") as fw:
for w in word_idx:
if embedding[word_idx[w] ].sum()==0.:
fw.write(w+"\n")
np.save(out_fn+".npy", embedding.astype('float32') )
parser = argparse.ArgumentParser()
parser.add_argument('--emb_dir', type=str, default="data/embedding/")
parser.add_argument('--out_dir', type=str, default="data/prep_data/")
parser.add_argument('--gen_emb', type=str, default="gen.vec")
parser.add_argument('--laptop_emb', type=str, default="laptop_emb.vec")
parser.add_argument('--restaurant_emb', type=str, default="restaurant_emb.vec")
parser.add_argument('--word_idx', type=str, default="word_idx.json")
parser.add_argument('--gen_dim', type=int, default=300)
parser.add_argument('--domain_dim', type=int, default=100)
args = parser.parse_args()
gen_np_embedding(args.emb_dir+args.gen_emb, args.out_dir+args.word_idx, args.out_dir+args.gen_emb, args.gen_dim)
gen_np_embedding(args.emb_dir+args.laptop_emb, args.out_dir+args.word_idx, args.out_dir+args.laptop_emb, args.domain_dim)
gen_np_embedding(args.emb_dir+args.restaurant_emb, args.out_dir+args.word_idx, args.out_dir+args.restaurant_emb, args.domain_dim) | python | MIT | 7a3cb9325b26be8eeee85d872a13ff848f613914 | 2026-01-05T07:14:07.694069Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/train.py | train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead.
"""
from fairseq_cli.train import cli_main
if __name__ == '__main__':
cli_main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/setup.py | setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import setup, find_packages, Extension
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python >= 3.6 is required for fairseq.')
with open('README_fairseq.md') as f:
readme = f.read()
if sys.platform == 'darwin':
extra_compile_args = ['-stdlib=libc++', '-O3']
else:
extra_compile_args = ['-std=c++11', '-O3']
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
'fairseq.libbleu',
sources=[
'fairseq/clib/libbleu/libbleu.cpp',
'fairseq/clib/libbleu/module.cpp',
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.data_utils_fast',
sources=['fairseq/data/data_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.token_block_utils_fast',
sources=['fairseq/data/token_block_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat',
sources=[
'fairseq/clib/libnat/edit_dist.cpp',
],
)
])
if 'CUDA_HOME' in os.environ:
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat_cuda',
sources=[
'fairseq/clib/libnat_cuda/edit_dist.cu',
'fairseq/clib/libnat_cuda/binding.cpp'
],
)])
cmdclass['build_ext'] = cpp_extension.BuildExtension
except ImportError:
pass
if 'READTHEDOCS' in os.environ:
# don't build extensions when generating docs
extensions = []
if 'build_ext' in cmdclass:
del cmdclass['build_ext']
# use CPU build of PyTorch
dependency_links = [
'https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl'
]
else:
dependency_links = []
if 'clean' in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(['rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd'], shell=True)
setup(
name='fairseq',
version='0.9.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
setup_requires=[
'cython',
'numpy',
'setuptools>=18.0',
],
install_requires=[
'cffi',
'cython',
'hydra-core',
'dataclasses',
'editdistance',
'numpy',
'regex',
'sacrebleu',
'torch',
'tqdm',
],
dependency_links=dependency_links,
packages=find_packages(exclude=['scripts', 'tests']),
ext_modules=extensions,
test_suite='tests',
entry_points={
'console_scripts': [
'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main',
'fairseq-generate = fairseq_cli.generate:cli_main',
'fairseq-interactive = fairseq_cli.interactive:cli_main',
'fairseq-preprocess = fairseq_cli.preprocess:cli_main',
'fairseq-score = fairseq_cli.score:cli_main',
'fairseq-train = fairseq_cli.train:cli_main',
'fairseq-validate = fairseq_cli.validate:cli_main',
],
},
cmdclass=cmdclass,
zip_safe=False,
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/hubconf.py | hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from fairseq.hub_utils import BPEHubInterface as bpe # noqa
from fairseq.hub_utils import TokenizerHubInterface as tokenizer # noqa
from fairseq.models import MODEL_REGISTRY
dependencies = [
'numpy',
'regex',
'requests',
'torch',
]
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast
except (ImportError, ModuleNotFoundError):
try:
import cython
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), 'setup.py'),
['build_ext', '--inplace'],
)
except (ImportError, ModuleNotFoundError):
print(
'Unable to build Cython components. Please make sure Cython is '
'installed if the torch.hub model you are loading depends on it.'
)
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
# to simplify the interface we only expose named models
# globals()[_model_type] = _cls.from_pretrained
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/shard_docs.py | scripts/shard_docs.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into shards while respecting document boundaries. Documents
should be separated by a single empty line.
"""
import argparse
import contextlib
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--num-shards', type=int)
args = parser.parse_args()
assert args.num_shards is not None and args.num_shards > 1
with open(args.input, 'r', encoding='utf-8') as h:
with contextlib.ExitStack() as stack:
outputs = [
stack.enter_context(open(args.input + ".shard" + str(i), "w", encoding="utf-8"))
for i in range(args.num_shards)
]
doc = []
first_doc = [True]*args.num_shards
def output_doc(i):
if not first_doc[i]:
outputs[i].write("\n")
first_doc[i] = False
for line in doc:
outputs[i].write(line)
doc.clear()
num_docs = 0
for line in h:
if line.strip() == "": # empty line indicates new document
output_doc(num_docs % args.num_shards)
num_docs += 1
else:
doc.append(line)
output_doc(num_docs % args.num_shards)
if __name__ == '__main__':
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/spm_train.py | scripts/spm_train.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import sentencepiece as spm
if __name__ == "__main__":
spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/rm_pt.py | scripts/rm_pt.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import re
import shutil
import sys
pt_regexp = re.compile(r'checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt')
pt_regexp_epoch_based = re.compile(r'checkpoint(\d+)\.pt')
pt_regexp_update_based = re.compile(r'checkpoint_\d+_(\d+)\.pt')
def parse_checkpoints(files):
entries = []
for f in files:
m = pt_regexp_epoch_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
else:
m = pt_regexp_update_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
return entries
def last_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(entries, reverse=True)[:n]]
def every_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(sorted(entries)[::-n])]
def main():
parser = argparse.ArgumentParser(
description=(
'Recursively delete checkpoint files from `root_dir`, '
'but preserve checkpoint_best.pt and checkpoint_last.pt'
)
)
parser.add_argument('root_dirs', nargs='*')
parser.add_argument('--save-last', type=int, default=0, help='number of last checkpoints to save')
parser.add_argument('--save-every', type=int, default=0, help='interval of checkpoints to save')
parser.add_argument('--preserve-test', action='store_true',
help='preserve checkpoints in dirs that start with test_ prefix (default: delete them)')
parser.add_argument('--delete-best', action='store_true', help='delete checkpoint_best.pt')
parser.add_argument('--delete-last', action='store_true', help='delete checkpoint_last.pt')
parser.add_argument('--no-dereference', action='store_true', help='don\'t dereference symlinks')
args = parser.parse_args()
files_to_desymlink = []
files_to_preserve = []
files_to_delete = []
for root_dir in args.root_dirs:
for root, _subdirs, files in os.walk(root_dir):
if args.save_last > 0:
to_save = last_n_checkpoints(files, args.save_last)
else:
to_save = []
if args.save_every > 0:
to_save += every_n_checkpoints(files, args.save_every)
for file in files:
if not pt_regexp.fullmatch(file):
continue
full_path = os.path.join(root, file)
if (
(
not os.path.basename(root).startswith('test_')
or args.preserve_test
)
and (
(file == 'checkpoint_last.pt' and not args.delete_last)
or (file == 'checkpoint_best.pt' and not args.delete_best)
or file in to_save
)
):
if os.path.islink(full_path) and not args.no_dereference:
files_to_desymlink.append(full_path)
else:
files_to_preserve.append(full_path)
else:
files_to_delete.append(full_path)
if len(files_to_desymlink) == 0 and len(files_to_delete) == 0:
print('Nothing to do.')
sys.exit(0)
files_to_desymlink = sorted(files_to_desymlink)
files_to_preserve = sorted(files_to_preserve)
files_to_delete = sorted(files_to_delete)
print('Operations to perform (in order):')
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
print(' - preserve (and dereference symlink): ' + file)
if len(files_to_preserve) > 0:
for file in files_to_preserve:
print(' - preserve: ' + file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print(' - delete: ' + file)
while True:
resp = input('Continue? (Y/N): ')
if resp.strip().lower() == 'y':
break
elif resp.strip().lower() == 'n':
sys.exit(0)
print('Executing...')
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
realpath = os.path.realpath(file)
print('rm ' + file)
os.remove(file)
print('cp {} {}'.format(realpath, file))
shutil.copyfile(realpath, file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print('rm ' + file)
os.remove(file)
if __name__ == '__main__':
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/count_docs.py | scripts/count_docs.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Count the number of documents and average number of lines and tokens per
document in a large file. Documents should be separated by a single empty line.
"""
import argparse
import gzip
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--gzip', action='store_true')
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, 'r')
else:
return open(args.input, 'r', encoding='utf-8')
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for i, line in enumerate(h):
if len(line.strip()) == 0: # empty line indicates new document
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
print("found {} docs".format(num_docs))
print("average num lines per doc: {}".format(np.mean(num_lines)))
print("average num toks per doc: {}".format(np.mean(num_toks)))
if __name__ == '__main__':
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/compare_namespaces.py | scripts/compare_namespaces.py | #!/usr/bin/env python
"""Helper script to compare two argparse.Namespace objects."""
from argparse import Namespace # noqa
def main():
ns1 = eval(input('Namespace 1: '))
ns2 = eval(input('Namespace 2: '))
def keys(ns):
ks = set()
for k in dir(ns):
if not k.startswith('_'):
ks.add(k)
return ks
k1 = keys(ns1)
k2 = keys(ns2)
def print_keys(ks, ns1, ns2=None):
for k in ks:
if ns2 is None:
print('{}\t{}'.format(k, getattr(ns1, k, None)))
else:
print('{}\t{}\t{}'.format(k, getattr(ns1, k, None), getattr(ns2, k, None)))
print('Keys unique to namespace 1:')
print_keys(k1 - k2, ns1)
print()
print('Keys unique to namespace 2:')
print_keys(k2 - k1, ns2)
print()
print('Overlapping keys with different values:')
ks = [k for k in k1 & k2 if getattr(ns1, k, 'None') != getattr(ns2, k, 'None')]
print_keys(ks, ns1, ns2)
print()
if __name__ == '__main__':
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/build_sym_alignment.py | scripts/build_sym_alignment.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Use this script in order to build symmetric alignments for your translation
dataset.
This script depends on fast_align and mosesdecoder tools. You will need to
build those before running the script.
fast_align:
github: http://github.com/clab/fast_align
instructions: follow the instructions in README_fairseq.md
mosesdecoder:
github: http://github.com/moses-smt/mosesdecoder
instructions: http://www.statmt.org/moses/?n=Development.GetStarted
The script produces the following files under --output_dir:
text.joined - concatenation of lines from the source_file and the
target_file.
align.forward - forward pass of fast_align.
align.backward - backward pass of fast_align.
aligned.sym_heuristic - symmetrized alignment.
"""
import argparse
import os
from itertools import zip_longest
def main():
parser = argparse.ArgumentParser(description='symmetric alignment builer')
# fmt: off
parser.add_argument('--fast_align_dir',
help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir',
help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic',
help='heuristic to use for symmetrization',
default='grow-diag-final-and')
parser.add_argument('--source_file',
help='path to a file with sentences '
'in the source language')
parser.add_argument('--target_file',
help='path to a file with sentences '
'in the target language')
parser.add_argument('--output_dir',
help='output directory')
# fmt: on
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, 'fast_align')
symal_bin = os.path.join(args.mosesdecoder_dir, 'bin', 'symal')
sym_fast_align_bin = os.path.join(
args.mosesdecoder_dir, 'scripts', 'ems',
'support', 'symmetrize-fast-align.perl')
# create joined file
joined_file = os.path.join(args.output_dir, 'text.joined')
with open(args.source_file, 'r', encoding='utf-8') as src, open(args.target_file, 'r', encoding='utf-8') as tgt:
with open(joined_file, 'w', encoding='utf-8') as joined:
for s, t in zip_longest(src, tgt):
print('{} ||| {}'.format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
# run forward alignment
fwd_align_file = os.path.join(args.output_dir, 'align.forward')
fwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v > {FWD}'.format(
FASTALIGN=fast_align_bin,
JOINED=joined_file,
FWD=fwd_align_file)
assert os.system(fwd_fast_align_cmd) == 0
# run backward alignment
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
bwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}'.format(
FASTALIGN=fast_align_bin,
JOINED=joined_file,
BWD=bwd_align_file)
assert os.system(bwd_fast_align_cmd) == 0
# run symmetrization
sym_out_file = os.path.join(args.output_dir, 'aligned')
sym_cmd = '{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}'.format(
SYMFASTALIGN=sym_fast_align_bin,
FWD=fwd_align_file,
BWD=bwd_align_file,
SRC=args.source_file,
TGT=args.target_file,
OUT=sym_out_file,
HEURISTIC=args.sym_heuristic,
SYMAL=symal_bin
)
assert os.system(sym_cmd) == 0
if __name__ == '__main__':
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/__init__.py | scripts/__init__.py | python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false | |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/spm_encode.py | scripts/spm_encode.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import contextlib
import sys
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True,
help="sentencepiece model to use for encoding")
parser.add_argument("--inputs", nargs="+", default=['-'],
help="input files to filter/encode")
parser.add_argument("--outputs", nargs="+", default=['-'],
help="path to save encoded outputs")
parser.add_argument("--output_format", choices=["piece", "id"], default="piece")
parser.add_argument("--min-len", type=int, metavar="N",
help="filter sentence pairs with fewer than N tokens")
parser.add_argument("--max-len", type=int, metavar="N",
help="filter sentence pairs with more than N tokens")
args = parser.parse_args()
assert len(args.inputs) == len(args.outputs), \
"number of input and output paths should match"
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.output_format == "piece":
def encode(l):
return sp.EncodeAsPieces(l)
elif args.output_format == "id":
def encode(l):
return list(map(str, sp.EncodeAsIds(l)))
else:
raise NotImplementedError
if args.min_len is not None or args.max_len is not None:
def valid(line):
return (
(args.min_len is None or len(line) >= args.min_len)
and (args.max_len is None or len(line) <= args.max_len)
)
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8")) \
if input != "-" else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8")) \
if output != "-" else sys.stdout
for output in args.outputs
]
stats = {
"num_empty": 0,
"num_filtered": 0,
}
def encode_line(line):
line = line.strip()
if len(line) > 0:
line = encode(line)
if valid(line):
return line
else:
stats["num_filtered"] += 1
else:
stats["num_empty"] += 1
return None
for i, lines in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if not any(enc_line is None for enc_line in enc_lines):
for enc_line, output_h in zip(enc_lines, outputs):
print(" ".join(enc_line), file=output_h)
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr)
print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr)
if __name__ == "__main__":
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/average_checkpoints.py | scripts/average_checkpoints.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import torch
import os
import re
from fairseq.file_io import PathManager
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with PathManager.open(fpath, 'rb') as f:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state['model'] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt')
else:
pt_regexp = re.compile(r'checkpoint(\d+)\.pt')
files = PathManager.ls(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description='Tool to average the params of input checkpoints to '
'produce a new checkpoint',
)
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, '
'and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, '
'and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int,
help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, '
'when using --num-update-checkpoints, this will set an upper bound on which update to use'
'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.'
'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500'
)
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.checkpoint_upper_bound is None or (args.num_epoch_checkpoints is not None or args.num_update_checkpoints is not None), \
'--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints'
assert args.num_epoch_checkpoints is None or args.num_update_checkpoints is None, \
'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints'
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound,
)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
with PathManager.open(args.output, 'wb') as f:
torch.save(new_state, f)
print('Finished writing averaged checkpoint to {}'.format(args.output))
if __name__ == '__main__':
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/spm_decode.py | scripts/spm_decode.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True,
help="sentencepiece model to use for decoding")
parser.add_argument("--input", required=True, help="input file to decode")
parser.add_argument("--input_format", choices=["piece", "id"], default="piece")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.input_format == "piece":
def decode(l):
return "".join(sp.DecodePieces(l))
elif args.input_format == "id":
def decode(l):
return "".join(sp.DecodeIds(l))
else:
raise NotImplementedError
def tok2int(tok):
# remap reference-side <unk> (represented as <<unk>>) to 0
return int(tok) if tok != "<<unk>>" else 0
with open(args.input, "r", encoding="utf-8") as h:
for line in h:
if args.input_format == "id":
print(decode(list(map(tok2int, line.rstrip().split()))))
elif args.input_format == "piece":
print(decode(line.rstrip().split()))
if __name__ == "__main__":
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/read_binarized.py | scripts/read_binarized.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from fairseq.data import data_utils, Dictionary, indexed_dataset
def get_parser():
parser = argparse.ArgumentParser(
description='writes text from binarized file to stdout')
# fmt: off
parser.add_argument('--dataset-impl', help='dataset implementation',
choices=indexed_dataset.get_available_dataset_impl())
parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None)
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
dictionary = Dictionary.load(args.dict) if args.dict is not None else None
dataset = data_utils.load_indexed_dataset(
args.input,
dictionary,
dataset_impl=args.dataset_impl,
default='lazy',
)
for tensor_line in dataset:
if dictionary is None:
line = ' '.join([str(int(x)) for x in tensor_line])
else:
line = dictionary.string(tensor_line)
print(line)
if __name__ == '__main__':
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/split_train_valid_docs.py | scripts/split_train_valid_docs.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into a train and valid set while respecting document
boundaries. Documents should be separated by a single empty line.
"""
import argparse
import random
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('sample_output', help='train output file')
parser.add_argument('remainder_output', help='valid output file')
parser.add_argument('-k', type=int, help="remainder size")
parser.add_argument('--lines', action='store_true',
help='split lines instead of docs')
args = parser.parse_args()
assert args.k is not None
sample = []
remainder = []
num_docs = [0]
def update_sample(doc):
if len(sample) < args.k:
sample.append(doc.copy())
else:
i = num_docs[0]
j = random.randrange(i + 1)
if j < args.k:
remainder.append(sample[j])
sample[j] = doc.copy()
else:
remainder.append(doc.copy())
num_docs[0] += 1
doc.clear()
with open(args.input, 'r', encoding='utf-8') as h:
doc = []
for i, line in enumerate(h):
if line.strip() == "": # empty line indicates new document
update_sample(doc)
else:
doc.append(line)
if args.lines:
update_sample(doc)
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
if len(doc) > 0:
update_sample(doc)
print(file=sys.stderr, flush=True)
assert len(sample) == args.k
with open(args.sample_output, 'w', encoding='utf-8') as out:
first = True
for doc in sample:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
with open(args.remainder_output, 'w', encoding='utf-8') as out:
first = True
for doc in remainder:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
if __name__ == '__main__':
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/constraints/validate.py | scripts/constraints/validate.py | #!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
"""Reads in a fairseq output file, and verifies that the constraints
(C- lines) are present in the output (the first H- line). Assumes that
constraints are listed prior to the first hypothesis.
"""
constraints = []
found = 0
total = 0
for line in sys.stdin:
if line.startswith("C-"):
constraints.append(line.rstrip().split("\t")[1])
elif line.startswith("H-"):
text = line.split("\t")[2]
for constraint in constraints:
total += 1
if constraint in text:
found += 1
else:
print(f"No {constraint} in {text}", file=sys.stderr)
constraints = []
print(f"Found {found} / {total} = {100 * found / total:.1f}%")
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.