repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/announcement.py | flax/types/announcement.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.hash import std_hash
@dataclass(frozen=True)
class Announcement:
origin_info: bytes32
message: bytes
morph_bytes: Optional[bytes] = None # CATs morph their announcements and other puzzles may choose to do so too
def name(self) -> bytes32:
if self.morph_bytes is not None:
message: bytes = std_hash(self.morph_bytes + self.message)
else:
message = self.message
return std_hash(bytes(self.origin_info + message))
def __str__(self):
return self.name().decode("utf-8")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/peer_info.py | flax/types/peer_info.py | from __future__ import annotations
import ipaddress
from dataclasses import dataclass
from typing import Optional, Union
from flax.util.ints import uint16, uint64
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class PeerInfo(Streamable):
host: str
port: uint16
def is_valid(self, allow_private_subnets=False) -> bool:
ip: Optional[Union[ipaddress.IPv6Address, ipaddress.IPv4Address]] = None
try:
ip = ipaddress.IPv6Address(self.host)
except ValueError:
ip = None
if ip is not None:
if ip.is_private and not allow_private_subnets:
return False
return True
try:
ip = ipaddress.IPv4Address(self.host)
except ValueError:
ip = None
if ip is not None:
if ip.is_private and not allow_private_subnets:
return False
return True
return False
# Functions related to peer bucketing in new/tried tables.
def get_key(self):
try:
ip = ipaddress.IPv6Address(self.host)
except ValueError:
ip_v4 = ipaddress.IPv4Address(self.host)
ip = ipaddress.IPv6Address(int(ipaddress.IPv6Address("2002::")) | (int(ip_v4) << 80))
key = ip.packed
key += bytes([self.port // 0x100, self.port & 0x0FF])
return key
def get_group(self):
# TODO: Port everything from Bitcoin.
ipv4 = 1
try:
ip = ipaddress.IPv4Address(self.host)
except ValueError:
ip = ipaddress.IPv6Address(self.host)
ipv4 = 0
if ipv4:
group = bytes([1]) + ip.packed[:2]
else:
group = bytes([0]) + ip.packed[:4]
return group
@streamable
@dataclass(frozen=True)
class TimestampedPeerInfo(Streamable):
host: str
port: uint16
timestamp: uint64
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/condition_with_args.py | flax/types/condition_with_args.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List
from flax.types.condition_opcodes import ConditionOpcode
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class ConditionWithArgs(Streamable):
"""
This structure is used to store parsed CLVM conditions
Conditions in CLVM have either format of (opcode, var1) or (opcode, var1, var2)
"""
opcode: ConditionOpcode
vars: List[bytes]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/mojos.py | flax/types/mojos.py | from __future__ import annotations
from typing import NewType
from flax.util.ints import uint64
Mojos = NewType("Mojos", uint64)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/unfinished_header_block.py | flax/types/unfinished_header_block.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
from flax.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock
from flax.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from flax.types.blockchain_format.vdf import VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class UnfinishedHeaderBlock(Streamable):
# Same as a FullBlock but without TransactionInfo and Generator, used by light clients
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlockUnfinished # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_filter: bytes # Filter for block transactions
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def header_hash(self):
return self.foliage.get_hash()
@property
def total_iters(self):
return self.reward_chain_block.total_iters
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/coin_spend.py | flax/types/coin_spend.py | from dataclasses import dataclass
from typing import List
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import SerializedProgram, INFINITE_COST
from flax.util.chain_utils import additions_for_solution, fee_for_solution
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class CoinSpend(Streamable):
"""
This is a rather disparate data structure that validates coin transfers. It's generally populated
with data from different sources, since burned coins are identified by name, so it is built up
more often that it is streamed.
"""
coin: Coin
puzzle_reveal: SerializedProgram
solution: SerializedProgram
# TODO: this function should be moved out of the full node. It cannot be
# called on untrusted input
def additions(self) -> List[Coin]:
return additions_for_solution(self.coin.name(), self.puzzle_reveal, self.solution, INFINITE_COST)
# TODO: this function should be moved out of the full node. It cannot be
# called on untrusted input
def reserved_fee(self) -> int:
return fee_for_solution(self.puzzle_reveal, self.solution, INFINITE_COST)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/coin_record.py | flax/types/coin_record.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from flax.protocols.wallet_protocol import CoinState
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint32, uint64
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class CoinRecord(Streamable):
"""
These are values that correspond to a CoinName that are used
in keeping track of the unspent database.
"""
coin: Coin
confirmed_block_index: uint32
spent_block_index: uint32
coinbase: bool
timestamp: uint64 # Timestamp of the block at height confirmed_block_index
@property
def spent(self) -> bool:
return self.spent_block_index > 0
@property
def name(self) -> bytes32:
return self.coin.name()
@property
def coin_state(self) -> CoinState:
spent_h = None
if self.spent:
spent_h = self.spent_block_index
confirmed_height: Optional[uint32] = self.confirmed_block_index
if self.confirmed_block_index == 0 and self.timestamp == 0:
confirmed_height = None
return CoinState(self.coin, spent_h, confirmed_height)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/fee_rate.py | flax/types/fee_rate.py | from __future__ import annotations
import math
from dataclasses import dataclass
import typing_extensions
from flax.types.clvm_cost import CLVMCost
from flax.types.mojos import Mojos
from flax.util.ints import uint64
from flax.util.streamable import Streamable, streamable
@typing_extensions.final
@streamable
@dataclass(frozen=True)
class FeeRate(Streamable):
"""
Represents Fee Rate in mojos divided by CLVM Cost.
Performs XFX/mojo conversion.
Similar to 'Fee per cost'.
"""
mojos_per_clvm_cost: uint64
@classmethod
def create(cls, mojos: Mojos, clvm_cost: CLVMCost) -> FeeRate:
return cls(uint64(math.ceil(mojos / clvm_cost)))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/unfinished_block.py | flax/types/unfinished_block.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
from flax.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock, TransactionsInfo
from flax.types.blockchain_format.program import SerializedProgram
from flax.types.blockchain_format.reward_chain_block import RewardChainBlockUnfinished
from flax.types.blockchain_format.vdf import VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.ints import uint32
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class UnfinishedBlock(Streamable):
# Full block, without the final VDFs
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlockUnfinished # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_info: Optional[TransactionsInfo] # Reward chain foliage data (tx block additional)
transactions_generator: Optional[SerializedProgram] # Program that generates transactions
transactions_generator_ref_list: List[
uint32
] # List of block heights of previous generators referenced in this block
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def partial_hash(self):
return self.reward_chain_block.get_hash()
def is_transaction_block(self) -> bool:
return self.foliage.foliage_transaction_block_hash is not None
@property
def total_iters(self):
return self.reward_chain_block.total_iters
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/end_of_slot_bundle.py | flax/types/end_of_slot_bundle.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from flax.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class EndOfSubSlotBundle(Streamable):
challenge_chain: ChallengeChainSubSlot
infused_challenge_chain: Optional[InfusedChallengeChainSubSlot]
reward_chain: RewardChainSubSlot
proofs: SubSlotProofs
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/weight_proof.py | flax/types/weight_proof.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.reward_chain_block import RewardChainBlock
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.vdf import VDFInfo, VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.types.header_block import HeaderBlock
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class SubEpochData(Streamable):
reward_chain_hash: bytes32
num_blocks_overflow: uint8
new_sub_slot_iters: Optional[uint64]
new_difficulty: Optional[uint64]
# number of challenge blocks
# Average iters for challenge blocks
# |--A-R----R-------R--------R------R----R----------R-----R--R---| Honest difficulty 1000
# 0.16
# compute total reward chain blocks
# |----------------------------A---------------------------------| Attackers chain 1000
# 0.48
# total number of challenge blocks == total number of reward chain blocks
@streamable
@dataclass(frozen=True)
class SubSlotData(Streamable):
# if infused
proof_of_space: Optional[ProofOfSpace]
# VDF to signage point
cc_signage_point: Optional[VDFProof]
# VDF from signage to infusion point
cc_infusion_point: Optional[VDFProof]
icc_infusion_point: Optional[VDFProof]
cc_sp_vdf_info: Optional[VDFInfo]
signage_point_index: Optional[uint8]
# VDF from beginning to end of slot if not infused
# from ip to end if infused
cc_slot_end: Optional[VDFProof]
icc_slot_end: Optional[VDFProof]
# info from finished slots
cc_slot_end_info: Optional[VDFInfo]
icc_slot_end_info: Optional[VDFInfo]
cc_ip_vdf_info: Optional[VDFInfo]
icc_ip_vdf_info: Optional[VDFInfo]
total_iters: Optional[uint128]
def is_challenge(self) -> bool:
if self.proof_of_space is not None:
return True
return False
def is_end_of_slot(self) -> bool:
if self.cc_slot_end_info is not None:
return True
return False
@streamable
@dataclass(frozen=True)
class SubEpochChallengeSegment(Streamable):
sub_epoch_n: uint32
sub_slots: List[SubSlotData]
rc_slot_end_info: Optional[VDFInfo] # in first segment of each sub_epoch
@streamable
@dataclass(frozen=True)
# this is used only for serialization to database
class SubEpochSegments(Streamable):
challenge_segments: List[SubEpochChallengeSegment]
@streamable
@dataclass(frozen=True)
# this is used only for serialization to database
class RecentChainData(Streamable):
recent_chain_data: List[HeaderBlock]
@streamable
@dataclass(frozen=True)
class ProofBlockHeader(Streamable):
finished_sub_slots: List[EndOfSubSlotBundle]
reward_chain_block: RewardChainBlock
@streamable
@dataclass(frozen=True)
class WeightProof(Streamable):
sub_epochs: List[SubEpochData]
sub_epoch_segments: List[SubEpochChallengeSegment] # sampled sub epoch
recent_chain_data: List[HeaderBlock]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/header_block.py | flax/types/header_block.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
from flax.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock, TransactionsInfo
from flax.types.blockchain_format.reward_chain_block import RewardChainBlock
from flax.types.blockchain_format.vdf import VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class HeaderBlock(Streamable):
# Same as a FullBlock but without TransactionInfo and Generator (but with filter), used by light clients
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlock # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
challenge_chain_ip_proof: VDFProof
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_ip_proof: VDFProof
infused_challenge_chain_ip_proof: Optional[VDFProof] # Iff deficit < 4
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_filter: bytes # Filter for block transactions
transactions_info: Optional[TransactionsInfo] # Reward chain foliage data (tx block additional)
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def prev_hash(self):
return self.foliage.prev_block_hash
@property
def height(self):
return self.reward_chain_block.height
@property
def weight(self):
return self.reward_chain_block.weight
@property
def header_hash(self):
return self.foliage.get_hash()
@property
def total_iters(self):
return self.reward_chain_block.total_iters
@property
def log_string(self):
return "block " + str(self.header_hash) + " sb_height " + str(self.height) + " "
@property
def is_transaction_block(self) -> bool:
return self.reward_chain_block.is_transaction_block
@property
def first_in_sub_slot(self) -> bool:
return self.finished_sub_slots is not None and len(self.finished_sub_slots) > 0
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/full_block.py | flax/types/full_block.py | from dataclasses import dataclass
from typing import List, Optional, Set
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock, TransactionsInfo
from flax.types.blockchain_format.program import SerializedProgram
from flax.types.blockchain_format.reward_chain_block import RewardChainBlock
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.vdf import VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.ints import uint32
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class FullBlock(Streamable):
# All the information required to validate a block
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlock # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
challenge_chain_ip_proof: VDFProof
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_ip_proof: VDFProof
infused_challenge_chain_ip_proof: Optional[VDFProof] # Iff deficit < 4
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_info: Optional[TransactionsInfo] # Reward chain foliage data (tx block additional)
transactions_generator: Optional[SerializedProgram] # Program that generates transactions
transactions_generator_ref_list: List[
uint32
] # List of block heights of previous generators referenced in this block
@property
def prev_header_hash(self) -> bytes32:
return self.foliage.prev_block_hash
@property
def height(self) -> uint32:
return self.reward_chain_block.height
@property
def weight(self):
return self.reward_chain_block.weight
@property
def total_iters(self):
return self.reward_chain_block.total_iters
@property
def header_hash(self) -> bytes32:
return self.foliage.get_hash()
def is_transaction_block(self) -> bool:
return self.foliage_transaction_block is not None
def get_included_reward_coins(self) -> Set[Coin]:
if not self.is_transaction_block():
return set()
assert self.transactions_info is not None
return set(self.transactions_info.reward_claims_incorporated)
def is_fully_compactified(self) -> bool:
for sub_slot in self.finished_sub_slots:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type != 0
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
return False
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type != 0
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
return False
if self.challenge_chain_sp_proof is not None and (
self.challenge_chain_sp_proof.witness_type != 0 or not self.challenge_chain_sp_proof.normalized_to_identity
):
return False
if self.challenge_chain_ip_proof.witness_type != 0 or not self.challenge_chain_ip_proof.normalized_to_identity:
return False
return True
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/transaction_queue_entry.py | flax/types/transaction_queue_entry.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from flax.server.ws_connection import WSFlaxConnection
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.spend_bundle import SpendBundle
@dataclass(frozen=True)
class TransactionQueueEntry:
"""
A transaction received from peer. This is put into a queue, and not yet in the mempool.
"""
transaction: SpendBundle
transaction_bytes: Optional[bytes]
spend_name: bytes32
peer: Optional[WSFlaxConnection]
test: bool
def __lt__(self, other):
return self.spend_name < other.spend_name
def __le__(self, other):
return self.spend_name <= other.spend_name
def __gt__(self, other):
return self.spend_name > other.spend_name
def __ge__(self, other):
return self.spend_name >= other.spend_name
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/coin_solution.py | flax/types/coin_solution.py | import warnings
from .coin_spend import CoinSpend as CoinSolution # noqa lgtm[py/unused-import]
warnings.warn("`CoinSolution` is now `CoinSpend`")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/mempool_inclusion_status.py | flax/types/mempool_inclusion_status.py | from __future__ import annotations
from enum import IntEnum
class MempoolInclusionStatus(IntEnum):
SUCCESS = 1 # Transaction added to mempool
PENDING = 2 # Transaction not yet added to mempool
FAILED = 3 # Transaction was invalid and dropped
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/block_protocol.py | flax/types/block_protocol.py | from __future__ import annotations
from typing import List, Optional
from typing_extensions import Protocol
from flax.types.blockchain_format.program import SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint32
class BlockInfo(Protocol):
@property
def prev_header_hash(self) -> bytes32:
pass
@property
def transactions_generator(self) -> Optional[SerializedProgram]:
pass
@property
def transactions_generator_ref_list(self) -> List[uint32]:
pass
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/generator_types.py | flax/types/generator_types.py | from dataclasses import dataclass
from typing import List
from flax.types.blockchain_format.program import SerializedProgram
from flax.util.ints import uint32
from flax.util.streamable import Streamable, streamable
class GeneratorBlockCacheInterface:
def get_generator_for_block_height(self, height: uint32) -> SerializedProgram:
# Requested block must be a transaction block
pass
@dataclass(frozen=True)
class CompressorArg:
"""`CompressorArg` is used as input to the Block Compressor"""
block_height: uint32
generator: SerializedProgram
start: int
end: int
@streamable
@dataclass(frozen=True)
class BlockGenerator(Streamable):
program: SerializedProgram
generator_refs: List[SerializedProgram]
# the heights are only used when creating new blocks, never when validating
block_height_list: List[uint32]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/condition_opcodes.py | flax/types/condition_opcodes.py | from __future__ import annotations
import enum
from typing import Any
# See flax/wallet/puzzles/condition_codes.clvm
class ConditionOpcode(bytes, enum.Enum):
# AGG_SIG is ascii "1"
# the conditions below require bls12-381 signatures
AGG_SIG_UNSAFE = bytes([49])
AGG_SIG_ME = bytes([50])
# the conditions below reserve coin amounts and have to be accounted for in output totals
CREATE_COIN = bytes([51])
RESERVE_FEE = bytes([52])
# the conditions below deal with announcements, for inter-coin communication
CREATE_COIN_ANNOUNCEMENT = bytes([60])
ASSERT_COIN_ANNOUNCEMENT = bytes([61])
CREATE_PUZZLE_ANNOUNCEMENT = bytes([62])
ASSERT_PUZZLE_ANNOUNCEMENT = bytes([63])
# the conditions below let coins inquire about themselves
ASSERT_MY_COIN_ID = bytes([70])
ASSERT_MY_PARENT_ID = bytes([71])
ASSERT_MY_PUZZLEHASH = bytes([72])
ASSERT_MY_AMOUNT = bytes([73])
# the conditions below ensure that we're "far enough" in the future
# wall-clock time
ASSERT_SECONDS_RELATIVE = bytes([80])
ASSERT_SECONDS_ABSOLUTE = bytes([81])
# block index
ASSERT_HEIGHT_RELATIVE = bytes([82])
ASSERT_HEIGHT_ABSOLUTE = bytes([83])
# A condition that is always true and always ignore all arguments
REMARK = bytes([1])
def __bytes__(self) -> bytes:
return bytes(self.value)
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any:
assert len(blob) == 1
return cls(blob)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/spend_bundle.py | flax/types/spend_bundle.py | from __future__ import annotations
import warnings
from dataclasses import dataclass
from typing import List, Dict, Any
from blspy import AugSchemeMPL, G2Element
from flax.consensus.default_constants import DEFAULT_CONSTANTS
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.streamable import Streamable, streamable_from_dict, recurse_jsonify, streamable
from flax.wallet.util.debug_spend_bundle import debug_spend_bundle
from .coin_spend import CoinSpend
@streamable
@dataclass(frozen=True)
class SpendBundle(Streamable):
"""
This is a list of coins being spent along with their solution programs, and a single
aggregated signature. This is the object that most closely corresponds to a bitcoin
transaction (although because of non-interactive signature aggregation, the boundaries
between transactions are more flexible than in bitcoin).
"""
coin_spends: List[CoinSpend]
aggregated_signature: G2Element
@property
def coin_solutions(self):
return self.coin_spends
@classmethod
def aggregate(cls, spend_bundles) -> "SpendBundle":
coin_spends: List[CoinSpend] = []
sigs: List[G2Element] = []
for bundle in spend_bundles:
coin_spends += bundle.coin_spends
sigs.append(bundle.aggregated_signature)
aggregated_signature = AugSchemeMPL.aggregate(sigs)
return cls(coin_spends, aggregated_signature)
def additions(self) -> List[Coin]:
items: List[Coin] = []
for coin_spend in self.coin_spends:
items.extend(coin_spend.additions())
return items
def removals(self) -> List[Coin]:
"""This should be used only by wallet"""
return [_.coin for _ in self.coin_spends]
def fees(self) -> int:
"""Unsafe to use for fees validation!!!"""
amount_in = sum(_.amount for _ in self.removals())
amount_out = sum(_.amount for _ in self.additions())
return amount_in - amount_out
def name(self) -> bytes32:
return self.get_hash()
def debug(self, agg_sig_additional_data=DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA):
debug_spend_bundle(self, agg_sig_additional_data)
def not_ephemeral_additions(self) -> List[Coin]:
all_removals = self.removals()
all_additions = self.additions()
result: List[Coin] = []
for add in all_additions:
if add in all_removals:
continue
result.append(add)
return result
# Note that `coin_spends` used to have the bad name `coin_solutions`.
# Some API still expects this name. For now, we accept both names.
#
# TODO: continue this deprecation. Eventually, all code below here should be removed.
# 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary)
# 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary)
# 3. remove all references to `include_legacy_keys=True`
# 4. remove all code below this point
@classmethod
def from_json_dict(cls, json_dict: Dict[str, Any]) -> SpendBundle:
if "coin_solutions" in json_dict:
if "coin_spends" not in json_dict:
json_dict = dict(
aggregated_signature=json_dict["aggregated_signature"], coin_spends=json_dict["coin_solutions"]
)
warnings.warn("`coin_solutions` is now `coin_spends` in `SpendBundle.from_json_dict`")
else:
raise ValueError("JSON contains both `coin_solutions` and `coin_spends`, just use `coin_spends`")
return streamable_from_dict(cls, json_dict)
def to_json_dict(self, include_legacy_keys: bool = True, exclude_modern_keys: bool = True) -> Dict[str, Any]:
if include_legacy_keys is False and exclude_modern_keys is True:
raise ValueError("`coin_spends` not included in legacy or modern outputs")
d = recurse_jsonify(self)
if include_legacy_keys:
d["coin_solutions"] = d["coin_spends"]
if exclude_modern_keys:
del d["coin_spends"]
return recurse_jsonify(d)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/__init__.py | flax/types/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/clvm_cost.py | flax/types/clvm_cost.py | from __future__ import annotations
from typing import NewType
from flax.util.ints import uint64
"""
CLVM Cost is the cost to run a CLVM program on the CLVM.
It is similar to transaction bytes in the Bitcoin, but some operations
are charged a higher rate, depending on their arguments.
"""
CLVMCost = NewType("CLVMCost", uint64)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/mempool_item.py | flax/types/mempool_item.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List
from flax.consensus.cost_calculator import NPCResult
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.spend_bundle import SpendBundle
from flax.util.ints import uint32, uint64
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class MempoolItem(Streamable):
spend_bundle: SpendBundle
fee: uint64
npc_result: NPCResult
cost: uint64
spend_bundle_name: bytes32
additions: List[Coin]
removals: List[Coin]
height_added_to_mempool: uint32
def __lt__(self, other: MempoolItem) -> bool:
return self.fee_per_cost < other.fee_per_cost
@property
def fee_per_cost(self) -> float:
return int(self.fee) / int(self.cost)
@property
def name(self) -> bytes32:
return self.spend_bundle_name
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/spend_bundle_conditions.py | flax/types/spend_bundle_conditions.py | from __future__ import annotations
from chia_rs import Spend, SpendBundleConditions
__all__ = ["Spend", "SpendBundleConditions"]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/mempool_submission_status.py | flax/types/mempool_submission_status.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, Optional, Union
from flax.types.mempool_inclusion_status import MempoolInclusionStatus
from flax.util.ints import uint8
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class MempoolSubmissionStatus(Streamable):
"""
:sent_to: in `TradeRecord` and `TransactionRecord` are a
Tuple of (peer_id: str, status: MempoolInclusionStatus, error: Optional[str])
MempoolInclusionStatus is represented as a uint8 in those structs so they can be `Streamable`
"""
peer_id: str
inclusion_status: uint8 # MempoolInclusionStatus
error_msg: Optional[str]
def to_json_dict_convenience(self) -> Dict[str, Union[str, MempoolInclusionStatus, Optional[str]]]:
formatted = self.to_json_dict()
formatted["inclusion_status"] = MempoolInclusionStatus(self.inclusion_status).name
return formatted
def __str__(self) -> str:
return f"{self.to_json_dict_convenience()}"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/slots.py | flax/types/blockchain_format/slots.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from blspy import G2Element
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.vdf import VDFInfo, VDFProof
from flax.util.ints import uint8, uint64
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class ChallengeBlockInfo(Streamable): # The hash of this is used as the challenge_hash for the ICC VDF
proof_of_space: ProofOfSpace
challenge_chain_sp_vdf: Optional[VDFInfo] # Only present if not the first sp
challenge_chain_sp_signature: G2Element
challenge_chain_ip_vdf: VDFInfo
@streamable
@dataclass(frozen=True)
class ChallengeChainSubSlot(Streamable):
challenge_chain_end_of_slot_vdf: VDFInfo
infused_challenge_chain_sub_slot_hash: Optional[bytes32] # Only at the end of a slot
subepoch_summary_hash: Optional[bytes32] # Only once per sub-epoch, and one sub-epoch delayed
new_sub_slot_iters: Optional[uint64] # Only at the end of epoch, sub-epoch, and slot
new_difficulty: Optional[uint64] # Only at the end of epoch, sub-epoch, and slot
@streamable
@dataclass(frozen=True)
class InfusedChallengeChainSubSlot(Streamable):
infused_challenge_chain_end_of_slot_vdf: VDFInfo
@streamable
@dataclass(frozen=True)
class RewardChainSubSlot(Streamable):
end_of_slot_vdf: VDFInfo
challenge_chain_sub_slot_hash: bytes32
infused_challenge_chain_sub_slot_hash: Optional[bytes32]
deficit: uint8 # 16 or less. usually zero
@streamable
@dataclass(frozen=True)
class SubSlotProofs(Streamable):
challenge_chain_slot_proof: VDFProof
infused_challenge_chain_slot_proof: Optional[VDFProof]
reward_chain_slot_proof: VDFProof
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/program.py | flax/types/blockchain_format/program.py | from __future__ import annotations
import io
from typing import Callable, Dict, List, Set, Tuple, Optional, Any
from clvm import SExp
from clvm.casts import int_from_bytes
from clvm.EvalError import EvalError
from clvm.serialize import sexp_from_stream, sexp_to_stream
from chia_rs import MEMPOOL_MODE, run_chia_program, serialized_length, run_generator, tree_hash
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.hash import std_hash
from flax.util.byte_types import hexstr_to_bytes
from flax.types.spend_bundle_conditions import SpendBundleConditions
from .tree_hash import sha256_treehash
INFINITE_COST = 0x7FFFFFFFFFFFFFFF
class Program(SExp):
"""
A thin wrapper around s-expression data intended to be invoked with "eval".
"""
@classmethod
def parse(cls, f) -> "Program":
return sexp_from_stream(f, cls.to)
def stream(self, f):
sexp_to_stream(self, f)
@classmethod
def from_bytes(cls, blob: bytes) -> Program:
# this runs the program "1", which just returns the first argument.
# the first argument is the buffer we want to parse. This effectively
# leverages the rust parser and LazyNode, making it a lot faster to
# parse serialized programs into a python compatible structure
cost, ret = run_chia_program(
b"\x01",
blob,
50,
0,
)
return Program.to(ret)
@classmethod
def fromhex(cls, hexstr: str) -> "Program":
return cls.from_bytes(hexstr_to_bytes(hexstr))
def to_serialized_program(self) -> "SerializedProgram":
return SerializedProgram.from_bytes(bytes(self))
def __bytes__(self) -> bytes:
f = io.BytesIO()
self.stream(f) # noqa
return f.getvalue()
def __str__(self) -> str:
return bytes(self).hex()
def at(self, position: str) -> "Program":
"""
Take a string of only `f` and `r` characters and follow the corresponding path.
Example:
`assert Program.to(17) == Program.to([10, 20, 30, [15, 17], 40, 50]).at("rrrfrf")`
"""
v = self
for c in position.lower():
if c == "f":
v = v.first()
elif c == "r":
v = v.rest()
else:
raise ValueError(f"`at` got illegal character `{c}`. Only `f` & `r` allowed")
return v
def replace(self, **kwargs) -> "Program":
"""
Create a new program replacing the given paths (using `at` syntax).
Example:
```
>>> p1 = Program.to([100, 200, 300])
>>> print(p1.replace(f=105) == Program.to([105, 200, 300]))
True
>>> print(p1.replace(rrf=[301, 302]) == Program.to([100, 200, [301, 302]]))
True
>>> print(p1.replace(f=105, rrf=[301, 302]) == Program.to([105, 200, [301, 302]]))
True
```
This is a convenience method intended for use in the wallet or command-line hacks where
it would be easier to morph elements of an existing clvm object tree than to rebuild
one from scratch.
Note that `Program` objects are immutable. This function returns a new object; the
original is left as-is.
"""
return _sexp_replace(self, self.to, **kwargs)
def get_tree_hash_precalc(self, *args: bytes32) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
return sha256_treehash(self, set(args))
def get_tree_hash(self) -> bytes32:
return bytes32(tree_hash(bytes(self)))
def run_with_cost(self, max_cost: int, args) -> Tuple[int, "Program"]:
prog_args = Program.to(args)
cost, r = run_chia_program(self.as_bin(), prog_args.as_bin(), max_cost, 0)
return cost, Program.to(r)
def run(self, args) -> "Program":
cost, r = self.run_with_cost(INFINITE_COST, args)
return r
# Replicates the curry function from clvm_tools, taking advantage of *args
# being a list. We iterate through args in reverse building the code to
# create a clvm list.
#
# Given arguments to a function addressable by the '1' reference in clvm
#
# fixed_args = 1
#
# Each arg is prepended as fixed_args = (c (q . arg) fixed_args)
#
# The resulting argument list is interpreted with apply (2)
#
# (2 (1 . self) rest)
#
# Resulting in a function which places its own arguments after those
# curried in in the form of a proper list.
def curry(self, *args) -> "Program":
fixed_args: Any = 1
for arg in reversed(args):
fixed_args = [4, (1, arg), fixed_args]
return Program.to([2, (1, self), fixed_args])
def uncurry(self) -> Tuple[Program, Program]:
def match(o: SExp, expected: bytes) -> None:
if o.atom != expected:
raise ValueError(f"expected: {expected.hex()}")
try:
# (2 (1 . <mod>) <args>)
ev, quoted_inner, args_list = self.as_iter()
match(ev, b"\x02")
match(quoted_inner.pair[0], b"\x01")
mod = quoted_inner.pair[1]
args = []
while args_list.pair is not None:
# (4 (1 . <arg>) <rest>)
cons, quoted_arg, rest = args_list.as_iter()
match(cons, b"\x04")
match(quoted_arg.pair[0], b"\x01")
args.append(quoted_arg.pair[1])
args_list = rest
match(args_list, b"\x01")
return Program.to(mod), Program.to(args)
except ValueError: # too many values to unpack
# when unpacking as_iter()
# or when a match() fails
return self, self.to(0)
except TypeError: # NoneType not subscriptable
# when an object is not a pair or atom as expected
return self, self.to(0)
except EvalError: # first of non-cons
# when as_iter() fails
return self, self.to(0)
def as_int(self) -> int:
return int_from_bytes(self.as_atom())
def as_atom_list(self) -> List[bytes]:
"""
Pretend `self` is a list of atoms. Return the corresponding
python list of atoms.
At each step, we always assume a node to be an atom or a pair.
If the assumption is wrong, we exit early. This way we never fail
and always return SOMETHING.
"""
items = []
obj = self
while True:
pair = obj.pair
if pair is None:
break
atom = pair[0].atom
if atom is None:
break
items.append(atom)
obj = pair[1]
return items
def __deepcopy__(self, memo):
return type(self).from_bytes(bytes(self))
EvalError = EvalError
def _tree_hash(node: SExp, precalculated: Set[bytes32]) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if node.listp():
left = _tree_hash(node.first(), precalculated)
right = _tree_hash(node.rest(), precalculated)
s = b"\2" + left + right
else:
atom = node.as_atom()
if atom in precalculated:
return bytes32(atom)
s = b"\1" + atom
return bytes32(std_hash(s))
def _serialize(node) -> bytes:
if type(node) == SerializedProgram:
return bytes(node)
else:
return SExp.to(node).as_bin()
class SerializedProgram:
"""
An opaque representation of a clvm program. It has a more limited interface than a full SExp
"""
_buf: bytes = b""
@classmethod
def parse(cls, f) -> "SerializedProgram":
length = serialized_length(f.getvalue()[f.tell() :])
return SerializedProgram.from_bytes(f.read(length))
def stream(self, f):
f.write(self._buf)
@classmethod
def from_bytes(cls, blob: bytes) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(blob)
return ret
@classmethod
def fromhex(cls, hexstr: str) -> "SerializedProgram":
return cls.from_bytes(hexstr_to_bytes(hexstr))
@classmethod
def from_program(cls, p: Program) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(p)
return ret
def to_program(self) -> Program:
return Program.from_bytes(self._buf)
def uncurry(self) -> Tuple["Program", "Program"]:
return self.to_program().uncurry()
def __bytes__(self) -> bytes:
return self._buf
def __str__(self) -> str:
return bytes(self).hex()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
def __eq__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return False
return self._buf == other._buf
def __ne__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return True
return self._buf != other._buf
def get_tree_hash(self) -> bytes32:
return bytes32(tree_hash(self._buf))
def run_mempool_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, MEMPOOL_MODE, *args)
def run_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, 0, *args)
# returns an optional error code and an optional SpendBundleConditions (from chia_rs)
# exactly one of those will hold a value
def run_as_generator(
self, max_cost: int, flags: int, *args
) -> Tuple[Optional[int], Optional[SpendBundleConditions]]:
serialized_args = bytearray()
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
err, ret = run_generator(
self._buf,
bytes(serialized_args),
max_cost,
flags,
)
if err is not None:
assert err != 0
return err, None
assert ret is not None
return None, ret
def _run(self, max_cost: int, flags, *args) -> Tuple[int, Program]:
# when multiple arguments are passed, concatenate them into a serialized
# buffer. Some arguments may already be in serialized form (e.g.
# SerializedProgram) so we don't want to de-serialize those just to
# serialize them back again. This is handled by _serialize()
serialized_args = bytearray()
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
cost, ret = run_chia_program(
self._buf,
bytes(serialized_args),
max_cost,
flags,
)
return cost, Program.to(ret)
NIL = Program.from_bytes(b"\x80")
def _sexp_replace(sexp: SExp, to_sexp: Callable[[Any], SExp], **kwargs) -> SExp:
# if `kwargs == {}` then `return sexp` unchanged
if len(kwargs) == 0:
return sexp
if "" in kwargs:
if len(kwargs) > 1:
raise ValueError("conflicting paths")
return kwargs[""]
# we've confirmed that no `kwargs` is the empty string.
# Now split `kwargs` into two groups: those
# that start with `f` and those that start with `r`
args_by_prefix: Dict[str, SExp] = {}
for k, v in kwargs.items():
c = k[0]
if c not in "fr":
raise ValueError("bad path containing %s: must only contain `f` and `r`")
args_by_prefix.setdefault(c, dict())[k[1:]] = v
pair = sexp.pair
if pair is None:
raise ValueError("path into atom")
# recurse down the tree
new_f = _sexp_replace(pair[0], to_sexp, **args_by_prefix.get("f", {}))
new_r = _sexp_replace(pair[1], to_sexp, **args_by_prefix.get("r", {}))
return to_sexp((new_f, new_r))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/reward_chain_block.py | flax/types/blockchain_format/reward_chain_block.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from blspy import G2Element
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.vdf import VDFInfo
from flax.util.ints import uint8, uint32, uint128
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class RewardChainBlockUnfinished(Streamable):
total_iters: uint128
signage_point_index: uint8
pos_ss_cc_challenge_hash: bytes32
proof_of_space: ProofOfSpace
challenge_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
challenge_chain_sp_signature: G2Element
reward_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
reward_chain_sp_signature: G2Element
@streamable
@dataclass(frozen=True)
class RewardChainBlock(Streamable):
weight: uint128
height: uint32
total_iters: uint128
signage_point_index: uint8
pos_ss_cc_challenge_hash: bytes32
proof_of_space: ProofOfSpace
challenge_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
challenge_chain_sp_signature: G2Element
challenge_chain_ip_vdf: VDFInfo
reward_chain_sp_vdf: Optional[VDFInfo] # Not present for first sp in slot
reward_chain_sp_signature: G2Element
reward_chain_ip_vdf: VDFInfo
infused_challenge_chain_ip_vdf: Optional[VDFInfo] # Iff deficit < 16
is_transaction_block: bool
def get_unfinished(self) -> RewardChainBlockUnfinished:
return RewardChainBlockUnfinished(
self.total_iters,
self.signage_point_index,
self.pos_ss_cc_challenge_hash,
self.proof_of_space,
self.challenge_chain_sp_vdf,
self.challenge_chain_sp_signature,
self.reward_chain_sp_vdf,
self.reward_chain_sp_signature,
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/proof_of_space.py | flax/types/blockchain_format/proof_of_space.py | import logging
from dataclasses import dataclass
from typing import Optional
from bitstring import BitArray
from blspy import G1Element, AugSchemeMPL, PrivateKey
from chiapos import Verifier
from flax.consensus.constants import ConsensusConstants
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.hash import std_hash
from flax.util.ints import uint8
from flax.util.streamable import Streamable, streamable
log = logging.getLogger(__name__)
@streamable
@dataclass(frozen=True)
class ProofOfSpace(Streamable):
challenge: bytes32
pool_public_key: Optional[G1Element] # Only one of these two should be present
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
size: uint8
proof: bytes
def get_plot_id(self) -> bytes32:
assert self.pool_public_key is None or self.pool_contract_puzzle_hash is None
if self.pool_public_key is None:
assert self.pool_contract_puzzle_hash is not None
return self.calculate_plot_id_ph(self.pool_contract_puzzle_hash, self.plot_public_key)
return self.calculate_plot_id_pk(self.pool_public_key, self.plot_public_key)
def verify_and_get_quality_string(
self,
constants: ConsensusConstants,
original_challenge_hash: bytes32,
signage_point: bytes32,
) -> Optional[bytes32]:
# Exactly one of (pool_public_key, pool_contract_puzzle_hash) must not be None
if (self.pool_public_key is None) and (self.pool_contract_puzzle_hash is None):
log.error("Fail 1")
return None
if (self.pool_public_key is not None) and (self.pool_contract_puzzle_hash is not None):
log.error("Fail 2")
return None
if self.size < constants.MIN_PLOT_SIZE:
log.error("Fail 3")
return None
if self.size > constants.MAX_PLOT_SIZE:
log.error("Fail 4")
return None
plot_id: bytes32 = self.get_plot_id()
new_challenge: bytes32 = ProofOfSpace.calculate_pos_challenge(plot_id, original_challenge_hash, signage_point)
if new_challenge != self.challenge:
log.error("New challenge is not challenge")
return None
if not ProofOfSpace.passes_plot_filter(constants, plot_id, original_challenge_hash, signage_point):
log.error("Fail 5")
return None
return self.get_quality_string(plot_id)
def get_quality_string(self, plot_id: bytes32) -> Optional[bytes32]:
quality_str = Verifier().validate_proof(plot_id, self.size, self.challenge, bytes(self.proof))
if not quality_str:
return None
return bytes32(quality_str)
@staticmethod
def passes_plot_filter(
constants: ConsensusConstants,
plot_id: bytes32,
challenge_hash: bytes32,
signage_point: bytes32,
) -> bool:
plot_filter: BitArray = BitArray(
ProofOfSpace.calculate_plot_filter_input(plot_id, challenge_hash, signage_point)
)
return plot_filter[: constants.NUMBER_ZERO_BITS_PLOT_FILTER].uint == 0
@staticmethod
def calculate_plot_filter_input(plot_id: bytes32, challenge_hash: bytes32, signage_point: bytes32) -> bytes32:
return std_hash(plot_id + challenge_hash + signage_point)
@staticmethod
def calculate_pos_challenge(plot_id: bytes32, challenge_hash: bytes32, signage_point: bytes32) -> bytes32:
return std_hash(ProofOfSpace.calculate_plot_filter_input(plot_id, challenge_hash, signage_point))
@staticmethod
def calculate_plot_id_pk(
pool_public_key: G1Element,
plot_public_key: G1Element,
) -> bytes32:
return std_hash(bytes(pool_public_key) + bytes(plot_public_key))
@staticmethod
def calculate_plot_id_ph(
pool_contract_puzzle_hash: bytes32,
plot_public_key: G1Element,
) -> bytes32:
return std_hash(bytes(pool_contract_puzzle_hash) + bytes(plot_public_key))
@staticmethod
def generate_taproot_sk(local_pk: G1Element, farmer_pk: G1Element) -> PrivateKey:
taproot_message: bytes = bytes(local_pk + farmer_pk) + bytes(local_pk) + bytes(farmer_pk)
taproot_hash: bytes32 = std_hash(taproot_message)
return AugSchemeMPL.key_gen(taproot_hash)
@staticmethod
def generate_plot_public_key(local_pk: G1Element, farmer_pk: G1Element, include_taproot: bool = False) -> G1Element:
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(local_pk, farmer_pk)
return local_pk + farmer_pk + taproot_sk.get_g1()
else:
return local_pk + farmer_pk
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/foliage.py | flax/types/blockchain_format/foliage.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
from blspy import G2Element
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.pool_target import PoolTarget
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint64
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class TransactionsInfo(Streamable):
# Information that goes along with each transaction block
generator_root: bytes32 # sha256 of the block generator in this block
generator_refs_root: bytes32 # sha256 of the concatenation of the generator ref list entries
aggregated_signature: G2Element
fees: uint64 # This only includes user fees, not block rewards
cost: uint64 # This is the total cost of this block, including CLVM cost, cost of program size and conditions
reward_claims_incorporated: List[Coin] # These can be in any order
@streamable
@dataclass(frozen=True)
class FoliageTransactionBlock(Streamable):
# Information that goes along with each transaction block that is relevant for light clients
prev_transaction_block_hash: bytes32
timestamp: uint64
filter_hash: bytes32
additions_root: bytes32
removals_root: bytes32
transactions_info_hash: bytes32
@streamable
@dataclass(frozen=True)
class FoliageBlockData(Streamable):
# Part of the block that is signed by the plot key
unfinished_reward_block_hash: bytes32
pool_target: PoolTarget
pool_signature: Optional[G2Element] # Iff ProofOfSpace has a pool pk
farmer_reward_puzzle_hash: bytes32
extension_data: bytes32 # Used for future updates. Can be any 32 byte value initially
@streamable
@dataclass(frozen=True)
class Foliage(Streamable):
# The entire foliage block, containing signature and the unsigned back pointer
# The hash of this is the "header hash". Note that for unfinished blocks, the prev_block_hash
# Is the prev from the signage point, and can be replaced with a more recent block
prev_block_hash: bytes32
reward_block_hash: bytes32
foliage_block_data: FoliageBlockData
foliage_block_data_signature: G2Element
foliage_transaction_block_hash: Optional[bytes32]
foliage_transaction_block_signature: Optional[G2Element]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/sub_epoch_summary.py | flax/types/blockchain_format/sub_epoch_summary.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint8, uint64
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class SubEpochSummary(Streamable):
prev_subepoch_summary_hash: bytes32
reward_chain_hash: bytes32 # hash of reward chain at end of last segment
num_blocks_overflow: uint8 # How many more blocks than 384*(N-1)
new_difficulty: Optional[uint64] # Only once per epoch (diff adjustment)
new_sub_slot_iters: Optional[uint64] # Only once per epoch (diff adjustment)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/tree_hash.py | flax/types/blockchain_format/tree_hash.py | """
This is an implementation of `sha256_treehash`, used to calculate
puzzle hashes in clvm.
This implementation goes to great pains to be non-recursive so we don't
have to worry about blowing out the python stack.
"""
from __future__ import annotations
from typing import Optional, Set
from clvm import CLVMObject
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.hash import std_hash
def sha256_treehash(sexp: CLVMObject, precalculated: Optional[Set[bytes32]] = None) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if precalculated is None:
precalculated = set()
def handle_sexp(sexp_stack, op_stack, precalculated: Set[bytes32]) -> None:
sexp = sexp_stack.pop()
if sexp.pair:
p0, p1 = sexp.pair
sexp_stack.append(p0)
sexp_stack.append(p1)
op_stack.append(handle_pair)
op_stack.append(handle_sexp)
op_stack.append(roll)
op_stack.append(handle_sexp)
else:
if sexp.atom in precalculated:
r = sexp.atom
else:
r = std_hash(b"\1" + sexp.atom)
sexp_stack.append(r)
def handle_pair(sexp_stack, op_stack, precalculated) -> None:
p0 = sexp_stack.pop()
p1 = sexp_stack.pop()
sexp_stack.append(std_hash(b"\2" + p0 + p1))
def roll(sexp_stack, op_stack, precalculated) -> None:
p0 = sexp_stack.pop()
p1 = sexp_stack.pop()
sexp_stack.append(p0)
sexp_stack.append(p1)
sexp_stack = [sexp]
op_stack = [handle_sexp]
while len(op_stack) > 0:
op = op_stack.pop()
op(sexp_stack, op_stack, precalculated)
return bytes32(sexp_stack[0])
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/classgroup.py | flax/types/blockchain_format/classgroup.py | from __future__ import annotations
from dataclasses import dataclass
from flax.consensus.constants import ConsensusConstants
from flax.types.blockchain_format.sized_bytes import bytes100
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class ClassgroupElement(Streamable):
"""
Represents a classgroup element (a,b,c) where a, b, and c are 512 bit signed integers. However this is using
a compressed representation. VDF outputs are a single classgroup element. VDF proofs can also be one classgroup
element (or multiple).
"""
data: bytes100
@staticmethod
def from_bytes(data) -> "ClassgroupElement":
if len(data) < 100:
data += b"\x00" * (100 - len(data))
return ClassgroupElement(bytes100(data))
@staticmethod
def get_default_element() -> "ClassgroupElement":
# Bit 3 in the first byte of serialized compressed form indicates if
# it's the default generator element.
return ClassgroupElement.from_bytes(b"\x08")
@staticmethod
def get_size(constants: ConsensusConstants):
return 100
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/vdf.py | flax/types/blockchain_format/vdf.py | import logging
import traceback
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional
from functools import lru_cache
from chiavdf import create_discriminant, verify_n_wesolowski
from flax.consensus.constants import ConsensusConstants
from flax.types.blockchain_format.classgroup import ClassgroupElement
from flax.types.blockchain_format.sized_bytes import bytes32, bytes100
from flax.util.ints import uint8, uint64
from flax.util.streamable import Streamable, streamable
log = logging.getLogger(__name__)
@lru_cache(maxsize=200)
def get_discriminant(challenge, size_bites) -> int:
return int(
create_discriminant(challenge, size_bites),
16,
)
@lru_cache(maxsize=1000)
def verify_vdf(
disc: int,
input_el: bytes100,
output: bytes,
number_of_iterations: uint64,
discriminant_size: int,
witness_type: uint8,
):
return verify_n_wesolowski(
str(disc),
input_el,
output,
number_of_iterations,
discriminant_size,
witness_type,
)
@streamable
@dataclass(frozen=True)
class VDFInfo(Streamable):
challenge: bytes32 # Used to generate the discriminant (VDF group)
number_of_iterations: uint64
output: ClassgroupElement
@streamable
@dataclass(frozen=True)
class VDFProof(Streamable):
witness_type: uint8
witness: bytes
normalized_to_identity: bool
def is_valid(
self,
constants: ConsensusConstants,
input_el: ClassgroupElement,
info: VDFInfo,
target_vdf_info: Optional[VDFInfo] = None,
) -> bool:
"""
If target_vdf_info is passed in, it is compared with info.
"""
if target_vdf_info is not None and info != target_vdf_info:
tb = traceback.format_stack()
log.error(f"{tb} INVALID VDF INFO. Have: {info} Expected: {target_vdf_info}")
return False
if self.witness_type + 1 > constants.MAX_VDF_WITNESS_SIZE:
return False
try:
disc: int = get_discriminant(info.challenge, constants.DISCRIMINANT_SIZE_BITS)
# TODO: parallelize somehow, this might included multiple mini proofs (n weso)
return verify_vdf(
disc,
input_el.data,
info.output.data + bytes(self.witness),
info.number_of_iterations,
constants.DISCRIMINANT_SIZE_BITS,
self.witness_type,
)
except Exception:
return False
# Stores, for a given VDF, the field that uses it.
class CompressibleVDFField(IntEnum):
CC_EOS_VDF = 1
ICC_EOS_VDF = 2
CC_SP_VDF = 3
CC_IP_VDF = 4
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/__init__.py | flax/types/blockchain_format/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/pool_target.py | flax/types/blockchain_format/pool_target.py | from __future__ import annotations
from dataclasses import dataclass
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint32
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class PoolTarget(Streamable):
puzzle_hash: bytes32
max_height: uint32 # A max height of 0 means it is valid forever
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/sized_bytes.py | flax/types/blockchain_format/sized_bytes.py | from __future__ import annotations
from flax.util.byte_types import SizedBytes
class bytes4(SizedBytes):
_size = 4
class bytes8(SizedBytes):
_size = 8
class bytes32(SizedBytes):
_size = 32
class bytes48(SizedBytes):
_size = 48
class bytes96(SizedBytes):
_size = 96
class bytes100(SizedBytes):
_size = 100
class bytes480(SizedBytes):
_size = 480
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/types/blockchain_format/coin.py | flax/types/blockchain_format/coin.py | from __future__ import annotations
from typing import List, Union
from chia_rs import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.hash import std_hash
from flax.util.ints import uint64
__all__ = ["Coin", "coin_as_list", "hash_coin_ids"]
def coin_as_list(c: Coin) -> List[Union[bytes32, uint64]]:
return [c.parent_coin_info, c.puzzle_hash, uint64(c.amount)]
def hash_coin_ids(coin_ids: List[bytes32]) -> bytes32:
if len(coin_ids) == 1:
return std_hash(coin_ids[0])
coin_ids.sort(reverse=True)
buffer = bytearray()
for name in coin_ids:
buffer.extend(name)
return std_hash(buffer, skip_bytes_conversion=True)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/protocol_timing.py | flax/protocols/protocol_timing.py | # These settings should not be end-user configurable
from __future__ import annotations
INVALID_PROTOCOL_BAN_SECONDS = 10
API_EXCEPTION_BAN_SECONDS = 10
INTERNAL_PROTOCOL_ERROR_BAN_SECONDS = 10 # Don't flap if our client is at fault
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/protocol_state_machine.py | flax/protocols/protocol_state_machine.py | from flax.protocols.protocol_message_types import ProtocolMessageTypes as pmt, ProtocolMessageTypes
NO_REPLY_EXPECTED = [
# full_node -> full_node messages
pmt.new_peak,
pmt.new_transaction,
pmt.new_unfinished_block,
pmt.new_signage_point_or_end_of_sub_slot,
pmt.request_mempool_transactions,
pmt.new_compact_vdf,
pmt.coin_state_update,
]
"""
VALID_REPLY_MESSAGE_MAP:
key: sent message type.
value: valid reply message types, from the view of the requester.
A state machine can be built from this message map.
"""
VALID_REPLY_MESSAGE_MAP = {
# messages for all services
# pmt.handshake is handled in WSFlaxConnection.perform_handshake
# full_node -> full_node protocol messages
pmt.request_transaction: [pmt.respond_transaction],
pmt.request_proof_of_weight: [pmt.respond_proof_of_weight],
pmt.request_block: [pmt.respond_block, pmt.reject_block],
pmt.request_blocks: [pmt.respond_blocks, pmt.reject_blocks],
pmt.request_unfinished_block: [pmt.respond_unfinished_block],
pmt.request_block_header: [pmt.respond_block_header, pmt.reject_header_request],
pmt.request_removals: [pmt.respond_removals, pmt.reject_removals_request],
pmt.request_additions: [pmt.respond_additions, pmt.reject_additions_request],
pmt.request_signage_point_or_end_of_sub_slot: [pmt.respond_signage_point, pmt.respond_end_of_sub_slot],
pmt.request_compact_vdf: [pmt.respond_compact_vdf],
pmt.request_peers: [pmt.respond_peers],
pmt.request_header_blocks: [pmt.respond_header_blocks, pmt.reject_header_blocks, pmt.reject_block_headers],
pmt.register_interest_in_puzzle_hash: [pmt.respond_to_ph_update],
pmt.register_interest_in_coin: [pmt.respond_to_coin_update],
pmt.request_children: [pmt.respond_children],
pmt.request_ses_hashes: [pmt.respond_ses_hashes],
pmt.request_block_headers: [pmt.respond_block_headers, pmt.reject_block_headers, pmt.reject_header_blocks],
pmt.request_peers_introducer: [pmt.respond_peers_introducer],
pmt.request_puzzle_solution: [pmt.respond_puzzle_solution, pmt.reject_puzzle_solution],
pmt.send_transaction: [pmt.transaction_ack],
}
def static_check_sent_message_response() -> None:
"""Check that allowed message data structures VALID_REPLY_MESSAGE_MAP and NO_REPLY_EXPECTED are consistent."""
# Reply and non-reply sets should not overlap: This check should be static
overlap = set(NO_REPLY_EXPECTED).intersection(set(VALID_REPLY_MESSAGE_MAP.keys()))
if len(overlap) != 0:
raise AssertionError(f"Overlapping NO_REPLY_EXPECTED and VALID_REPLY_MESSAGE_MAP values: {overlap}")
def message_requires_reply(sent: ProtocolMessageTypes) -> bool:
"""Return True if message has an entry in the full node P2P message map"""
# If we knew the peer NodeType is FULL_NODE, we could also check `sent not in NO_REPLY_EXPECTED`
return sent in VALID_REPLY_MESSAGE_MAP
def message_response_ok(sent: ProtocolMessageTypes, received: ProtocolMessageTypes) -> bool:
"""
Check to see that peers respect protocol message types in reply.
Call with received == None to indicate that we do not expect a specific reply message type.
"""
# Errors below are runtime protocol message mismatches from peers
if sent in VALID_REPLY_MESSAGE_MAP:
return received in VALID_REPLY_MESSAGE_MAP[sent]
return True
# Run `static_check_sent_message_response` to check this static invariant at import time
static_check_sent_message_response()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/timelord_protocol.py | flax/protocols/timelord_protocol.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional, Tuple
from flax.types.blockchain_format.foliage import Foliage
from flax.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.blockchain_format.vdf import VDFInfo, VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.util.streamable import Streamable, streamable
"""
Protocol between timelord and full node.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@streamable
@dataclass(frozen=True)
class NewPeakTimelord(Streamable):
reward_chain_block: RewardChainBlock
difficulty: uint64
deficit: uint8
sub_slot_iters: uint64 # SSi in the slot where NewPeak has been infused
sub_epoch_summary: Optional[
SubEpochSummary
] # If NewPeak is the last slot in epoch, the next slot should include this
previous_reward_challenges: List[Tuple[bytes32, uint128]]
last_challenge_sb_or_eos_total_iters: uint128
passes_ses_height_but_not_yet_included: bool
@streamable
@dataclass(frozen=True)
class NewUnfinishedBlockTimelord(Streamable):
reward_chain_block: RewardChainBlockUnfinished # Reward chain trunk data
difficulty: uint64
sub_slot_iters: uint64 # SSi in the slot where block is infused
foliage: Foliage # Reward chain foliage data
sub_epoch_summary: Optional[SubEpochSummary] # If this is the last slot in epoch, the next slot should include this
# This is the last thing infused in the reward chain before this signage point.
# The challenge that the SP reward chain VDF is based off of, or in the case of sp index 0, the previous infusion
rc_prev: bytes32
@streamable
@dataclass(frozen=True)
class NewInfusionPointVDF(Streamable):
unfinished_reward_hash: bytes32
challenge_chain_ip_vdf: VDFInfo
challenge_chain_ip_proof: VDFProof
reward_chain_ip_vdf: VDFInfo
reward_chain_ip_proof: VDFProof
infused_challenge_chain_ip_vdf: Optional[VDFInfo]
infused_challenge_chain_ip_proof: Optional[VDFProof]
@streamable
@dataclass(frozen=True)
class NewSignagePointVDF(Streamable):
index_from_challenge: uint8
challenge_chain_sp_vdf: VDFInfo
challenge_chain_sp_proof: VDFProof
reward_chain_sp_vdf: VDFInfo
reward_chain_sp_proof: VDFProof
@streamable
@dataclass(frozen=True)
class NewEndOfSubSlotVDF(Streamable):
end_of_sub_slot_bundle: EndOfSubSlotBundle
@streamable
@dataclass(frozen=True)
class RequestCompactProofOfTime(Streamable):
new_proof_of_time: VDFInfo
header_hash: bytes32
height: uint32
field_vdf: uint8
@streamable
@dataclass(frozen=True)
class RespondCompactProofOfTime(Streamable):
vdf_info: VDFInfo
vdf_proof: VDFProof
header_hash: bytes32
height: uint32
field_vdf: uint8
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/farmer_protocol.py | flax/protocols/farmer_protocol.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from blspy import G2Element
from flax.types.blockchain_format.pool_target import PoolTarget
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint8, uint32, uint64
from flax.util.streamable import Streamable, streamable
"""
Protocol between farmer and full node.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@streamable
@dataclass(frozen=True)
class NewSignagePoint(Streamable):
challenge_hash: bytes32
challenge_chain_sp: bytes32
reward_chain_sp: bytes32
difficulty: uint64
sub_slot_iters: uint64
signage_point_index: uint8
@streamable
@dataclass(frozen=True)
class DeclareProofOfSpace(Streamable):
challenge_hash: bytes32
challenge_chain_sp: bytes32
signage_point_index: uint8
reward_chain_sp: bytes32
proof_of_space: ProofOfSpace
challenge_chain_sp_signature: G2Element
reward_chain_sp_signature: G2Element
farmer_puzzle_hash: bytes32
pool_target: Optional[PoolTarget]
pool_signature: Optional[G2Element]
@streamable
@dataclass(frozen=True)
class RequestSignedValues(Streamable):
quality_string: bytes32
foliage_block_data_hash: bytes32
foliage_transaction_block_hash: bytes32
@streamable
@dataclass(frozen=True)
class FarmingInfo(Streamable):
challenge_hash: bytes32
sp_hash: bytes32
timestamp: uint64
passed: uint32
proofs: uint32
total_plots: uint32
@streamable
@dataclass(frozen=True)
class SignedValues(Streamable):
quality_string: bytes32
foliage_block_data_signature: G2Element
foliage_transaction_block_signature: G2Element
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/full_node_protocol.py | flax/protocols/full_node_protocol.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.vdf import VDFInfo, VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.types.full_block import FullBlock
from flax.types.peer_info import TimestampedPeerInfo
from flax.types.spend_bundle import SpendBundle
from flax.types.unfinished_block import UnfinishedBlock
from flax.types.weight_proof import WeightProof
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.util.streamable import Streamable, streamable
"""
Protocol between full nodes.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@streamable
@dataclass(frozen=True)
class NewPeak(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
unfinished_reward_block_hash: bytes32
@streamable
@dataclass(frozen=True)
class NewTransaction(Streamable):
transaction_id: bytes32
cost: uint64
fees: uint64
@streamable
@dataclass(frozen=True)
class RequestTransaction(Streamable):
transaction_id: bytes32
@streamable
@dataclass(frozen=True)
class RespondTransaction(Streamable):
transaction: SpendBundle
@streamable
@dataclass(frozen=True)
class RequestProofOfWeight(Streamable):
total_number_of_blocks: uint32
tip: bytes32
@streamable
@dataclass(frozen=True)
class RespondProofOfWeight(Streamable):
wp: WeightProof
tip: bytes32
@streamable
@dataclass(frozen=True)
class RequestBlock(Streamable):
height: uint32
include_transaction_block: bool
@streamable
@dataclass(frozen=True)
class RejectBlock(Streamable):
height: uint32
@streamable
@dataclass(frozen=True)
class RequestBlocks(Streamable):
start_height: uint32
end_height: uint32
include_transaction_block: bool
@streamable
@dataclass(frozen=True)
class RespondBlocks(Streamable):
start_height: uint32
end_height: uint32
blocks: List[FullBlock]
@streamable
@dataclass(frozen=True)
class RejectBlocks(Streamable):
start_height: uint32
end_height: uint32
@streamable
@dataclass(frozen=True)
class RespondBlock(Streamable):
block: FullBlock
@streamable
@dataclass(frozen=True)
class NewUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@streamable
@dataclass(frozen=True)
class RequestUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@streamable
@dataclass(frozen=True)
class RespondUnfinishedBlock(Streamable):
unfinished_block: UnfinishedBlock
@streamable
@dataclass(frozen=True)
class NewSignagePointOrEndOfSubSlot(Streamable):
prev_challenge_hash: Optional[bytes32]
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@streamable
@dataclass(frozen=True)
class RequestSignagePointOrEndOfSubSlot(Streamable):
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@streamable
@dataclass(frozen=True)
class RespondSignagePoint(Streamable):
index_from_challenge: uint8
challenge_chain_vdf: VDFInfo
challenge_chain_proof: VDFProof
reward_chain_vdf: VDFInfo
reward_chain_proof: VDFProof
@streamable
@dataclass(frozen=True)
class RespondEndOfSubSlot(Streamable):
end_of_slot_bundle: EndOfSubSlotBundle
@streamable
@dataclass(frozen=True)
class RequestMempoolTransactions(Streamable):
filter: bytes
@streamable
@dataclass(frozen=True)
class NewCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@streamable
@dataclass(frozen=True)
class RequestCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@streamable
@dataclass(frozen=True)
class RespondCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
vdf_proof: VDFProof
@streamable
@dataclass(frozen=True)
class RequestPeers(Streamable):
"""
Return full list of peers
"""
@streamable
@dataclass(frozen=True)
class RespondPeers(Streamable):
peer_list: List[TimestampedPeerInfo]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/harvester_protocol.py | flax/protocols/harvester_protocol.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional, Tuple
from blspy import G1Element, G2Element
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import int16, uint8, uint32, uint64
from flax.util.streamable import Streamable, streamable
"""
Protocol between harvester and farmer.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@streamable
@dataclass(frozen=True)
class PoolDifficulty(Streamable):
difficulty: uint64
sub_slot_iters: uint64
pool_contract_puzzle_hash: bytes32
@streamable
@dataclass(frozen=True)
class HarvesterHandshake(Streamable):
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
@streamable
@dataclass(frozen=True)
class NewSignagePointHarvester(Streamable):
challenge_hash: bytes32
difficulty: uint64
sub_slot_iters: uint64
signage_point_index: uint8
sp_hash: bytes32
pool_difficulties: List[PoolDifficulty]
@streamable
@dataclass(frozen=True)
class NewProofOfSpace(Streamable):
challenge_hash: bytes32
sp_hash: bytes32
plot_identifier: str
proof: ProofOfSpace
signage_point_index: uint8
@streamable
@dataclass(frozen=True)
class RequestSignatures(Streamable):
plot_identifier: str
challenge_hash: bytes32
sp_hash: bytes32
messages: List[bytes32]
@streamable
@dataclass(frozen=True)
class RespondSignatures(Streamable):
plot_identifier: str
challenge_hash: bytes32
sp_hash: bytes32
local_pk: G1Element
farmer_pk: G1Element
message_signatures: List[Tuple[bytes32, G2Element]]
@streamable
@dataclass(frozen=True)
class Plot(Streamable):
filename: str
size: uint8
plot_id: bytes32
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
file_size: uint64
time_modified: uint64
@streamable
@dataclass(frozen=True)
class RequestPlots(Streamable):
pass
@streamable
@dataclass(frozen=True)
class RespondPlots(Streamable):
plots: List[Plot]
failed_to_open_filenames: List[str]
no_key_filenames: List[str]
@streamable
@dataclass(frozen=True)
class PlotSyncIdentifier(Streamable):
timestamp: uint64
sync_id: uint64
message_id: uint64
@streamable
@dataclass(frozen=True)
class PlotSyncStart(Streamable):
identifier: PlotSyncIdentifier
initial: bool
last_sync_id: uint64
plot_file_count: uint32
def __str__(self) -> str:
return (
f"PlotSyncStart: identifier {self.identifier}, initial {self.initial}, "
f"last_sync_id {self.last_sync_id}, plot_file_count {self.plot_file_count}"
)
@streamable
@dataclass(frozen=True)
class PlotSyncPathList(Streamable):
identifier: PlotSyncIdentifier
data: List[str]
final: bool
def __str__(self) -> str:
return f"PlotSyncPathList: identifier {self.identifier}, count {len(self.data)}, final {self.final}"
@streamable
@dataclass(frozen=True)
class PlotSyncPlotList(Streamable):
identifier: PlotSyncIdentifier
data: List[Plot]
final: bool
def __str__(self) -> str:
return f"PlotSyncPlotList: identifier {self.identifier}, count {len(self.data)}, final {self.final}"
@streamable
@dataclass(frozen=True)
class PlotSyncDone(Streamable):
identifier: PlotSyncIdentifier
duration: uint64
def __str__(self) -> str:
return f"PlotSyncDone: identifier {self.identifier}, duration {self.duration}"
@streamable
@dataclass(frozen=True)
class PlotSyncError(Streamable):
code: int16
message: str
expected_identifier: Optional[PlotSyncIdentifier]
def __str__(self) -> str:
return f"PlotSyncError: code {self.code}, count {self.message}, expected_identifier {self.expected_identifier}"
@streamable
@dataclass(frozen=True)
class PlotSyncResponse(Streamable):
identifier: PlotSyncIdentifier
message_type: int16
error: Optional[PlotSyncError]
def __str__(self) -> str:
return f"PlotSyncResponse: identifier {self.identifier}, message_type {self.message_type}, error {self.error}"
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/protocol_message_types.py | flax/protocols/protocol_message_types.py | from __future__ import annotations
from enum import Enum
class ProtocolMessageTypes(Enum):
# Shared protocol (all services)
handshake = 1
# Harvester protocol (harvester <-> farmer)
harvester_handshake = 3
# new_signage_point_harvester = 4 Changed to 66 in new protocol
new_proof_of_space = 5
request_signatures = 6
respond_signatures = 7
# Farmer protocol (farmer <-> full_node)
new_signage_point = 8
declare_proof_of_space = 9
request_signed_values = 10
signed_values = 11
farming_info = 12
# Timelord protocol (timelord <-> full_node)
new_peak_timelord = 13
new_unfinished_block_timelord = 14
new_infusion_point_vdf = 15
new_signage_point_vdf = 16
new_end_of_sub_slot_vdf = 17
request_compact_proof_of_time = 18
respond_compact_proof_of_time = 19
# Full node protocol (full_node <-> full_node)
new_peak = 20
new_transaction = 21
request_transaction = 22
respond_transaction = 23
request_proof_of_weight = 24
respond_proof_of_weight = 25
request_block = 26
respond_block = 27
reject_block = 28
request_blocks = 29
respond_blocks = 30
reject_blocks = 31
new_unfinished_block = 32
request_unfinished_block = 33
respond_unfinished_block = 34
new_signage_point_or_end_of_sub_slot = 35
request_signage_point_or_end_of_sub_slot = 36
respond_signage_point = 37
respond_end_of_sub_slot = 38
request_mempool_transactions = 39
request_compact_vdf = 40
respond_compact_vdf = 41
new_compact_vdf = 42
request_peers = 43
respond_peers = 44
# Wallet protocol (wallet <-> full_node)
request_puzzle_solution = 45
respond_puzzle_solution = 46
reject_puzzle_solution = 47
send_transaction = 48
transaction_ack = 49
new_peak_wallet = 50
request_block_header = 51
respond_block_header = 52
reject_header_request = 53
request_removals = 54
respond_removals = 55
reject_removals_request = 56
request_additions = 57
respond_additions = 58
reject_additions_request = 59
request_header_blocks = 60
reject_header_blocks = 61
respond_header_blocks = 62
# Introducer protocol (introducer <-> full_node)
request_peers_introducer = 63
respond_peers_introducer = 64
# Simulator protocol
farm_new_block = 65
# New harvester protocol
new_signage_point_harvester = 66
request_plots = 67
respond_plots = 68
plot_sync_start = 78
plot_sync_loaded = 79
plot_sync_removed = 80
plot_sync_invalid = 81
plot_sync_keys_missing = 82
plot_sync_duplicates = 83
plot_sync_done = 84
plot_sync_response = 85
# More wallet protocol
coin_state_update = 69
register_interest_in_puzzle_hash = 70
respond_to_ph_update = 71
register_interest_in_coin = 72
respond_to_coin_update = 73
request_children = 74
respond_children = 75
request_ses_hashes = 76
respond_ses_hashes = 77
request_block_headers = 86
reject_block_headers = 87
respond_block_headers = 88
request_fee_estimates = 89
respond_fee_estimates = 90
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/wallet_protocol.py | flax/protocols/wallet_protocol.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional, Tuple
from chia_rs import CoinState, RespondToPhUpdates
from flax.full_node.fee_estimate import FeeEstimateGroup
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.header_block import HeaderBlock
from flax.types.spend_bundle import SpendBundle
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.util.streamable import Streamable, streamable
"""
Protocol between wallet (SPV node) and full node.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
__all__ = ["CoinState", "RespondToPhUpdates"]
@streamable
@dataclass(frozen=True)
class RequestPuzzleSolution(Streamable):
coin_name: bytes32
height: uint32
@streamable
@dataclass(frozen=True)
class PuzzleSolutionResponse(Streamable):
coin_name: bytes32
height: uint32
puzzle: SerializedProgram
solution: SerializedProgram
@streamable
@dataclass(frozen=True)
class RespondPuzzleSolution(Streamable):
response: PuzzleSolutionResponse
@streamable
@dataclass(frozen=True)
class RejectPuzzleSolution(Streamable):
coin_name: bytes32
height: uint32
@streamable
@dataclass(frozen=True)
class SendTransaction(Streamable):
transaction: SpendBundle
@streamable
@dataclass(frozen=True)
class TransactionAck(Streamable):
txid: bytes32
status: uint8 # MempoolInclusionStatus
error: Optional[str]
@streamable
@dataclass(frozen=True)
class NewPeakWallet(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
@streamable
@dataclass(frozen=True)
class RequestBlockHeader(Streamable):
height: uint32
@streamable
@dataclass(frozen=True)
class RespondBlockHeader(Streamable):
header_block: HeaderBlock
@streamable
@dataclass(frozen=True)
class RejectHeaderRequest(Streamable):
height: uint32
@streamable
@dataclass(frozen=True)
class RequestRemovals(Streamable):
height: uint32
header_hash: bytes32
coin_names: Optional[List[bytes32]]
@streamable
@dataclass(frozen=True)
class RespondRemovals(Streamable):
height: uint32
header_hash: bytes32
coins: List[Tuple[bytes32, Optional[Coin]]]
proofs: Optional[List[Tuple[bytes32, bytes]]]
@streamable
@dataclass(frozen=True)
class RejectRemovalsRequest(Streamable):
height: uint32
header_hash: bytes32
@streamable
@dataclass(frozen=True)
class RequestAdditions(Streamable):
height: uint32
header_hash: Optional[bytes32]
puzzle_hashes: Optional[List[bytes32]]
@streamable
@dataclass(frozen=True)
class RespondAdditions(Streamable):
height: uint32
header_hash: bytes32
coins: List[Tuple[bytes32, List[Coin]]]
proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]]
@streamable
@dataclass(frozen=True)
class RejectAdditionsRequest(Streamable):
height: uint32
header_hash: bytes32
@streamable
@dataclass(frozen=True)
class RespondBlockHeaders(Streamable):
start_height: uint32
end_height: uint32
header_blocks: List[HeaderBlock]
@streamable
@dataclass(frozen=True)
class RejectBlockHeaders(Streamable):
start_height: uint32
end_height: uint32
@streamable
@dataclass(frozen=True)
class RequestBlockHeaders(Streamable):
start_height: uint32
end_height: uint32
return_filter: bool
@streamable
@dataclass(frozen=True)
class RequestHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
@streamable
@dataclass(frozen=True)
class RejectHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
@streamable
@dataclass(frozen=True)
class RespondHeaderBlocks(Streamable):
start_height: uint32
end_height: uint32
header_blocks: List[HeaderBlock]
# This class is implemented in Rust
# @streamable
# @dataclass(frozen=True)
# class CoinState(Streamable):
# coin: Coin
# spent_height: Optional[uint32]
# created_height: Optional[uint32]
@streamable
@dataclass(frozen=True)
class RegisterForPhUpdates(Streamable):
puzzle_hashes: List[bytes32]
min_height: uint32
# This class is implemented in Rust
# @streamable
# @dataclass(frozen=True)
# class RespondToPhUpdates(Streamable):
# puzzle_hashes: List[bytes32]
# min_height: uint32
# coin_states: List[CoinState]
@streamable
@dataclass(frozen=True)
class RegisterForCoinUpdates(Streamable):
coin_ids: List[bytes32]
min_height: uint32
@streamable
@dataclass(frozen=True)
class RespondToCoinUpdates(Streamable):
coin_ids: List[bytes32]
min_height: uint32
coin_states: List[CoinState]
@streamable
@dataclass(frozen=True)
class CoinStateUpdate(Streamable):
height: uint32
fork_height: uint32
peak_hash: bytes32
items: List[CoinState]
@streamable
@dataclass(frozen=True)
class RequestChildren(Streamable):
coin_name: bytes32
@streamable
@dataclass(frozen=True)
class RespondChildren(Streamable):
coin_states: List[CoinState]
@streamable
@dataclass(frozen=True)
class RequestSESInfo(Streamable):
start_height: uint32
end_height: uint32
@streamable
@dataclass(frozen=True)
class RespondSESInfo(Streamable):
reward_chain_hash: List[bytes32]
heights: List[List[uint32]]
@streamable
@dataclass(frozen=True)
class RequestFeeEstimates(Streamable):
"""
time_targets (List[uint64]): Epoch timestamps in seconds to estimate FeeRates for.
"""
time_targets: List[uint64]
@streamable
@dataclass(frozen=True)
class RespondFeeEstimates(Streamable):
estimates: FeeEstimateGroup
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/shared_protocol.py | flax/protocols/shared_protocol.py | from __future__ import annotations
from dataclasses import dataclass
from enum import IntEnum
from typing import List, Tuple
from flax.util.ints import uint8, uint16
from flax.util.streamable import Streamable, streamable
protocol_version = "0.0.34"
"""
Handshake when establishing a connection between two servers.
Note: When changing this file, also change protocol_message_types.py
"""
# Capabilities can be added here when new features are added to the protocol
# These are passed in as uint16 into the Handshake
class Capability(IntEnum):
BASE = 1 # Base capability just means it supports the flax protocol at mainnet
# introduces RequestBlockHeaders, which is a faster API for fetching header blocks
# !! the old API is *RequestHeaderBlock* !!
BLOCK_HEADERS = 2
# Specifies support for v1 and v2 versions of rate limits. Peers will ues the lowest shared capability:
# if peer A support v3 and peer B supports v2, they should send:
# (BASE, RATE_LIMITS_V2, RATE_LIMITS_V3), and (BASE, RATE_LIMITS_V2) respectively. They will use the V2 limits.
RATE_LIMITS_V2 = 3
@streamable
@dataclass(frozen=True)
class Handshake(Streamable):
network_id: str
protocol_version: str
software_version: str
server_port: uint16
node_type: uint8
capabilities: List[Tuple[uint16, str]]
# "1" means capability is enabled
capabilities = [
(uint16(Capability.BASE.value), "1"),
(uint16(Capability.BLOCK_HEADERS.value), "1"),
(uint16(Capability.RATE_LIMITS_V2.value), "1"),
]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/pool_protocol.py | flax/protocols/pool_protocol.py | from dataclasses import dataclass
from enum import Enum
import time
from typing import Optional
from blspy import G1Element, G2Element
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint8, uint16, uint32, uint64
from flax.util.streamable import Streamable, streamable
POOL_PROTOCOL_VERSION = uint8(1)
class PoolErrorCode(Enum):
REVERTED_SIGNAGE_POINT = 1
TOO_LATE = 2
NOT_FOUND = 3
INVALID_PROOF = 4
PROOF_NOT_GOOD_ENOUGH = 5
INVALID_DIFFICULTY = 6
INVALID_SIGNATURE = 7
SERVER_EXCEPTION = 8
INVALID_P2_SINGLETON_PUZZLE_HASH = 9
FARMER_NOT_KNOWN = 10
FARMER_ALREADY_KNOWN = 11
INVALID_AUTHENTICATION_TOKEN = 12
INVALID_PAYOUT_INSTRUCTIONS = 13
INVALID_SINGLETON = 14
DELAY_TIME_TOO_SHORT = 15
REQUEST_FAILED = 16
# Used to verify GET /farmer and GET /login
@streamable
@dataclass(frozen=True)
class AuthenticationPayload(Streamable):
method_name: str
launcher_id: bytes32
target_puzzle_hash: bytes32
authentication_token: uint64
# GET /pool_info
@streamable
@dataclass(frozen=True)
class GetPoolInfoResponse(Streamable):
name: str
logo_url: str
minimum_difficulty: uint64
relative_lock_height: uint32
protocol_version: uint8
fee: str
description: str
target_puzzle_hash: bytes32
authentication_token_timeout: uint8
# POST /partial
@streamable
@dataclass(frozen=True)
class PostPartialPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
proof_of_space: ProofOfSpace
sp_hash: bytes32
end_of_sub_slot: bool
harvester_id: bytes32
@streamable
@dataclass(frozen=True)
class PostPartialRequest(Streamable):
payload: PostPartialPayload
aggregate_signature: G2Element
# Response in success case
@streamable
@dataclass(frozen=True)
class PostPartialResponse(Streamable):
new_difficulty: uint64
# GET /farmer
# Response in success case
@streamable
@dataclass(frozen=True)
class GetFarmerResponse(Streamable):
authentication_public_key: G1Element
payout_instructions: str
current_difficulty: uint64
current_points: uint64
# POST /farmer
@streamable
@dataclass(frozen=True)
class PostFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: G1Element
payout_instructions: str
suggested_difficulty: Optional[uint64]
@streamable
@dataclass(frozen=True)
class PostFarmerRequest(Streamable):
payload: PostFarmerPayload
signature: G2Element
# Response in success case
@streamable
@dataclass(frozen=True)
class PostFarmerResponse(Streamable):
welcome_message: str
# PUT /farmer
@streamable
@dataclass(frozen=True)
class PutFarmerPayload(Streamable):
launcher_id: bytes32
authentication_token: uint64
authentication_public_key: Optional[G1Element]
payout_instructions: Optional[str]
suggested_difficulty: Optional[uint64]
@streamable
@dataclass(frozen=True)
class PutFarmerRequest(Streamable):
payload: PutFarmerPayload
signature: G2Element
# Response in success case
@streamable
@dataclass(frozen=True)
class PutFarmerResponse(Streamable):
authentication_public_key: Optional[bool]
payout_instructions: Optional[bool]
suggested_difficulty: Optional[bool]
# Misc
# Response in error case for all endpoints of the pool protocol
@streamable
@dataclass(frozen=True)
class ErrorResponse(Streamable):
error_code: uint16
error_message: Optional[str]
# Get the current authentication token according to "Farmer authentication" in SPECIFICATION.md
def get_current_authentication_token(timeout: uint8) -> uint64:
return uint64(int(int(time.time() / 60) / timeout))
# Validate a given authentication token against our local time
def validate_authentication_token(token: uint64, timeout: uint8):
return abs(token - get_current_authentication_token(timeout)) <= timeout
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/__init__.py | flax/protocols/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/protocols/introducer_protocol.py | flax/protocols/introducer_protocol.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List
from flax.types.peer_info import TimestampedPeerInfo
from flax.util.streamable import Streamable, streamable
"""
Protocol to introducer
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@streamable
@dataclass(frozen=True)
class RequestPeersIntroducer(Streamable):
"""
Return full list of peers
"""
@streamable
@dataclass(frozen=True)
class RespondPeersIntroducer(Streamable):
peer_list: List[TimestampedPeerInfo]
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/harvester/harvester.py | flax/harvester/harvester.py | from __future__ import annotations
import asyncio
import concurrent
import dataclasses
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import flax.server.ws_connection as ws # lgtm [py/import-and-import-from]
from flax.consensus.constants import ConsensusConstants
from flax.plot_sync.sender import Sender
from flax.plotting.manager import PlotManager
from flax.plotting.util import (
PlotRefreshEvents,
PlotRefreshResult,
PlotsRefreshParameter,
add_plot_directory,
get_plot_directories,
remove_plot,
remove_plot_directory,
)
from flax.rpc.rpc_server import default_get_connections
from flax.server.outbound_message import NodeType
from flax.server.server import FlaxServer
log = logging.getLogger(__name__)
class Harvester:
plot_manager: PlotManager
plot_sync_sender: Sender
root_path: Path
_shut_down: bool
executor: ThreadPoolExecutor
state_changed_callback: Optional[Callable]
cached_challenges: List
constants: ConsensusConstants
_refresh_lock: asyncio.Lock
event_loop: asyncio.events.AbstractEventLoop
_server: Optional[FlaxServer]
@property
def server(self) -> FlaxServer:
# This is a stop gap until the class usage is refactored such the values of
# integral attributes are known at creation of the instance.
if self._server is None:
raise RuntimeError("server not assigned")
return self._server
def __init__(self, root_path: Path, config: Dict, constants: ConsensusConstants):
self.log = log
self.root_path = root_path
# TODO, remove checks below later after some versions / time
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter()
if "plot_loading_frequency_seconds" in config:
self.log.info(
"`harvester.plot_loading_frequency_seconds` is deprecated. Consider replacing it with the new section "
"`harvester.plots_refresh_parameter`. See `initial-config.yaml`."
)
refresh_parameter = dataclasses.replace(
refresh_parameter, interval_seconds=config["plot_loading_frequency_seconds"]
)
if "plots_refresh_parameter" in config:
refresh_parameter = PlotsRefreshParameter.from_json_dict(config["plots_refresh_parameter"])
self.log.info(f"Using plots_refresh_parameter: {refresh_parameter}")
self.plot_manager = PlotManager(
root_path, refresh_parameter=refresh_parameter, refresh_callback=self._plot_refresh_callback
)
self.plot_sync_sender = Sender(self.plot_manager)
self._shut_down = False
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=config["num_threads"])
self._server = None
self.constants = constants
self.cached_challenges = []
self.state_changed_callback: Optional[Callable] = None
self.parallel_read: bool = config.get("parallel_read", True)
async def _start(self):
self._refresh_lock = asyncio.Lock()
self.event_loop = asyncio.get_running_loop()
def _close(self):
self._shut_down = True
self.executor.shutdown(wait=True)
self.plot_manager.stop_refreshing()
self.plot_manager.reset()
self.plot_sync_sender.stop()
async def _await_closed(self):
await self.plot_sync_sender.await_closed()
def get_connections(self, request_node_type: Optional[NodeType]) -> List[Dict[str, Any]]:
return default_get_connections(server=self.server, request_node_type=request_node_type)
async def on_connect(self, connection: ws.WSFlaxConnection):
pass
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
def state_changed(self, change: str, change_data: Dict[str, Any] = None):
if self.state_changed_callback is not None:
self.state_changed_callback(change, change_data)
def _plot_refresh_callback(self, event: PlotRefreshEvents, update_result: PlotRefreshResult):
log_function = self.log.debug if event == PlotRefreshEvents.batch_processed else self.log.info
log_function(
f"_plot_refresh_callback: event {event.name}, loaded {len(update_result.loaded)}, "
f"removed {len(update_result.removed)}, processed {update_result.processed}, "
f"remaining {update_result.remaining}, "
f"duration: {update_result.duration:.2f} seconds, "
f"total plots: {len(self.plot_manager.plots)}"
)
if event == PlotRefreshEvents.started:
self.plot_sync_sender.sync_start(update_result.remaining, self.plot_manager.initial_refresh())
if event == PlotRefreshEvents.batch_processed:
self.plot_sync_sender.process_batch(update_result.loaded, update_result.remaining)
if event == PlotRefreshEvents.done:
self.plot_sync_sender.sync_done(update_result.removed, update_result.duration)
def on_disconnect(self, connection: ws.WSFlaxConnection):
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self.state_changed("close_connection")
self.plot_sync_sender.stop()
asyncio.run_coroutine_threadsafe(self.plot_sync_sender.await_closed(), asyncio.get_running_loop())
self.plot_manager.stop_refreshing()
def get_plots(self) -> Tuple[List[Dict], List[str], List[str]]:
self.log.debug(f"get_plots prover items: {self.plot_manager.plot_count()}")
response_plots: List[Dict] = []
with self.plot_manager:
for path, plot_info in self.plot_manager.plots.items():
prover = plot_info.prover
response_plots.append(
{
"filename": str(path),
"size": prover.get_size(),
"plot_id": prover.get_id(),
"pool_public_key": plot_info.pool_public_key,
"pool_contract_puzzle_hash": plot_info.pool_contract_puzzle_hash,
"plot_public_key": plot_info.plot_public_key,
"file_size": plot_info.file_size,
"time_modified": int(plot_info.time_modified),
}
)
self.log.debug(
f"get_plots response: plots: {len(response_plots)}, "
f"failed_to_open_filenames: {len(self.plot_manager.failed_to_open_filenames)}, "
f"no_key_filenames: {len(self.plot_manager.no_key_filenames)}"
)
return (
response_plots,
[str(s) for s, _ in self.plot_manager.failed_to_open_filenames.items()],
[str(s) for s in self.plot_manager.no_key_filenames],
)
def delete_plot(self, str_path: str):
remove_plot(Path(str_path))
self.plot_manager.trigger_refresh()
self.state_changed("plots")
return True
async def add_plot_directory(self, str_path: str) -> bool:
add_plot_directory(self.root_path, str_path)
self.plot_manager.trigger_refresh()
return True
async def get_plot_directories(self) -> List[str]:
return get_plot_directories(self.root_path)
async def remove_plot_directory(self, str_path: str) -> bool:
remove_plot_directory(self.root_path, str_path)
self.plot_manager.trigger_refresh()
return True
def set_server(self, server: FlaxServer) -> None:
self._server = server
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/harvester/harvester_api.py | flax/harvester/harvester_api.py | from __future__ import annotations
import asyncio
import time
from pathlib import Path
from typing import List, Tuple
from blspy import AugSchemeMPL, G1Element, G2Element
from flax.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from flax.harvester.harvester import Harvester
from flax.plotting.util import PlotInfo, parse_plot_info
from flax.protocols import harvester_protocol
from flax.protocols.farmer_protocol import FarmingInfo
from flax.protocols.harvester_protocol import Plot, PlotSyncResponse
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.server.outbound_message import make_msg
from flax.server.ws_connection import WSFlaxConnection
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.api_decorators import api_request, peer_required
from flax.util.ints import uint8, uint32, uint64
from flax.wallet.derive_keys import master_sk_to_local_sk
class HarvesterAPI:
harvester: Harvester
def __init__(self, harvester: Harvester):
self.harvester = harvester
@peer_required
@api_request
async def harvester_handshake(
self, harvester_handshake: harvester_protocol.HarvesterHandshake, peer: WSFlaxConnection
):
"""
Handshake between the harvester and farmer. The harvester receives the pool public keys,
as well as the farmer pks, which must be put into the plots, before the plotting process begins.
We cannot use any plots which have different keys in them.
"""
self.harvester.plot_manager.set_public_keys(
harvester_handshake.farmer_public_keys, harvester_handshake.pool_public_keys
)
self.harvester.plot_sync_sender.set_connection(peer)
await self.harvester.plot_sync_sender.start()
self.harvester.plot_manager.start_refreshing()
@peer_required
@api_request
async def new_signage_point_harvester(
self, new_challenge: harvester_protocol.NewSignagePointHarvester, peer: WSFlaxConnection
):
"""
The harvester receives a new signage point from the farmer, this happens at the start of each slot.
The harvester does a few things:
1. The harvester applies the plot filter for each of the plots, to select the proportion which are eligible
for this signage point and challenge.
2. The harvester gets the qualities for each plot. This is approximately 7 reads per plot which qualifies.
Note that each plot may have 0, 1, 2, etc qualities for that challenge: but on average it will have 1.
3. Checks the required_iters for each quality and the given signage point, to see which are eligible for
inclusion (required_iters < sp_interval_iters).
4. Looks up the full proof of space in the plot for each quality, approximately 64 reads per quality
5. Returns the proof of space to the farmer
"""
if not self.harvester.plot_manager.public_keys_available():
# This means that we have not received the handshake yet
self.harvester.log.debug("new_signage_point_harvester received with no keys available")
return None
self.harvester.log.debug(
f"new_signage_point_harvester lookup: challenge_hash: {new_challenge.challenge_hash}, "
f"sp_hash: {new_challenge.sp_hash}, signage_point_index: {new_challenge.signage_point_index}"
)
start = time.time()
assert len(new_challenge.challenge_hash) == 32
loop = asyncio.get_running_loop()
def blocking_lookup(filename: Path, plot_info: PlotInfo) -> List[Tuple[bytes32, ProofOfSpace]]:
# Uses the DiskProver object to lookup qualities. This is a blocking call,
# so it should be run in a thread pool.
try:
plot_id = plot_info.prover.get_id()
sp_challenge_hash = ProofOfSpace.calculate_pos_challenge(
plot_id,
new_challenge.challenge_hash,
new_challenge.sp_hash,
)
try:
quality_strings = plot_info.prover.get_qualities_for_challenge(sp_challenge_hash)
except Exception as e:
self.harvester.log.error(f"Error using prover object {e}")
self.harvester.log.error(
f"File: {filename} Plot ID: {plot_id.hex()}, "
f"challenge: {sp_challenge_hash}, plot_info: {plot_info}"
)
return []
responses: List[Tuple[bytes32, ProofOfSpace]] = []
if quality_strings is not None:
difficulty = new_challenge.difficulty
sub_slot_iters = new_challenge.sub_slot_iters
if plot_info.pool_contract_puzzle_hash is not None:
# If we are pooling, override the difficulty and sub slot iters with the pool threshold info.
# This will mean more proofs actually get found, but they are only submitted to the pool,
# not the blockchain
for pool_difficulty in new_challenge.pool_difficulties:
if pool_difficulty.pool_contract_puzzle_hash == plot_info.pool_contract_puzzle_hash:
difficulty = pool_difficulty.difficulty
sub_slot_iters = pool_difficulty.sub_slot_iters
# Found proofs of space (on average 1 is expected per plot)
for index, quality_str in enumerate(quality_strings):
required_iters: uint64 = calculate_iterations_quality(
self.harvester.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_str,
plot_info.prover.get_size(),
difficulty,
new_challenge.sp_hash,
)
sp_interval_iters = calculate_sp_interval_iters(self.harvester.constants, sub_slot_iters)
if required_iters < sp_interval_iters:
# Found a very good proof of space! will fetch the whole proof from disk,
# then send to farmer
try:
proof_xs = plot_info.prover.get_full_proof(
sp_challenge_hash, index, self.harvester.parallel_read
)
except Exception as e:
self.harvester.log.error(f"Exception fetching full proof for {filename}. {e}")
self.harvester.log.error(
f"File: {filename} Plot ID: {plot_id.hex()}, challenge: {sp_challenge_hash}, "
f"plot_info: {plot_info}"
)
continue
responses.append(
(
quality_str,
ProofOfSpace(
sp_challenge_hash,
plot_info.pool_public_key,
plot_info.pool_contract_puzzle_hash,
plot_info.plot_public_key,
uint8(plot_info.prover.get_size()),
proof_xs,
),
)
)
return responses
except Exception as e:
self.harvester.log.error(f"Unknown error: {e}")
return []
async def lookup_challenge(
filename: Path, plot_info: PlotInfo
) -> Tuple[Path, List[harvester_protocol.NewProofOfSpace]]:
# Executes a DiskProverLookup in a thread pool, and returns responses
all_responses: List[harvester_protocol.NewProofOfSpace] = []
if self.harvester._shut_down:
return filename, []
proofs_of_space_and_q: List[Tuple[bytes32, ProofOfSpace]] = await loop.run_in_executor(
self.harvester.executor, blocking_lookup, filename, plot_info
)
for quality_str, proof_of_space in proofs_of_space_and_q:
all_responses.append(
harvester_protocol.NewProofOfSpace(
new_challenge.challenge_hash,
new_challenge.sp_hash,
quality_str.hex() + str(filename.resolve()),
proof_of_space,
new_challenge.signage_point_index,
)
)
return filename, all_responses
awaitables = []
passed = 0
total = 0
with self.harvester.plot_manager:
self.harvester.log.debug("new_signage_point_harvester lock acquired")
for try_plot_filename, try_plot_info in self.harvester.plot_manager.plots.items():
# Passes the plot filter (does not check sp filter yet though, since we have not reached sp)
# This is being executed at the beginning of the slot
total += 1
if ProofOfSpace.passes_plot_filter(
self.harvester.constants,
try_plot_info.prover.get_id(),
new_challenge.challenge_hash,
new_challenge.sp_hash,
):
passed += 1
awaitables.append(lookup_challenge(try_plot_filename, try_plot_info))
self.harvester.log.debug(f"new_signage_point_harvester {passed} plots passed the plot filter")
# Concurrently executes all lookups on disk, to take advantage of multiple disk parallelism
total_proofs_found = 0
for filename_sublist_awaitable in asyncio.as_completed(awaitables):
filename, sublist = await filename_sublist_awaitable
time_taken = time.time() - start
if time_taken > 5:
self.harvester.log.warning(
f"Looking up qualities on {filename} took: {time_taken}. This should be below 5 seconds "
f"to minimize risk of losing rewards."
)
else:
pass
# If you want additional logs, uncomment the following line
# self.harvester.log.debug(f"Looking up qualities on {filename} took: {time_taken}")
for response in sublist:
total_proofs_found += 1
msg = make_msg(ProtocolMessageTypes.new_proof_of_space, response)
await peer.send_message(msg)
now = uint64(int(time.time()))
farming_info = FarmingInfo(
new_challenge.challenge_hash,
new_challenge.sp_hash,
now,
uint32(passed),
uint32(total_proofs_found),
uint32(total),
)
pass_msg = make_msg(ProtocolMessageTypes.farming_info, farming_info)
await peer.send_message(pass_msg)
found_time = time.time() - start
self.harvester.log.info(
f"{len(awaitables)} plots were eligible for farming {new_challenge.challenge_hash.hex()[:10]}..."
f" Found {total_proofs_found} proofs. Time: {found_time:.5f} s. "
f"Total {self.harvester.plot_manager.plot_count()} plots"
)
self.harvester.state_changed(
"farming_info",
{
"challenge_hash": new_challenge.challenge_hash.hex(),
"total_plots": self.harvester.plot_manager.plot_count(),
"found_proofs": total_proofs_found,
"eligible_plots": len(awaitables),
"time": found_time,
},
)
@api_request
async def request_signatures(self, request: harvester_protocol.RequestSignatures):
"""
The farmer requests a signature on the header hash, for one of the proofs that we found.
A signature is created on the header hash using the harvester private key. This can also
be used for pooling.
"""
plot_filename = Path(request.plot_identifier[64:]).resolve()
with self.harvester.plot_manager:
try:
plot_info = self.harvester.plot_manager.plots[plot_filename]
except KeyError:
self.harvester.log.warning(f"KeyError plot {plot_filename} does not exist.")
return None
# Look up local_sk from plot to save locked memory
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(plot_info.prover.get_memo())
local_sk = master_sk_to_local_sk(local_master_sk)
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
include_taproot = False
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
include_taproot = True
agg_pk = ProofOfSpace.generate_plot_public_key(local_sk.get_g1(), farmer_public_key, include_taproot)
# This is only a partial signature. When combined with the farmer's half, it will
# form a complete PrependSignature.
message_signatures: List[Tuple[bytes32, G2Element]] = []
for message in request.messages:
signature: G2Element = AugSchemeMPL.sign(local_sk, message, agg_pk)
message_signatures.append((message, signature))
response: harvester_protocol.RespondSignatures = harvester_protocol.RespondSignatures(
request.plot_identifier,
request.challenge_hash,
request.sp_hash,
local_sk.get_g1(),
farmer_public_key,
message_signatures,
)
return make_msg(ProtocolMessageTypes.respond_signatures, response)
@api_request
async def request_plots(self, _: harvester_protocol.RequestPlots):
plots_response = []
plots, failed_to_open_filenames, no_key_filenames = self.harvester.get_plots()
for plot in plots:
plots_response.append(
Plot(
plot["filename"],
plot["size"],
plot["plot_id"],
plot["pool_public_key"],
plot["pool_contract_puzzle_hash"],
plot["plot_public_key"],
plot["file_size"],
plot["time_modified"],
)
)
response = harvester_protocol.RespondPlots(plots_response, failed_to_open_filenames, no_key_filenames)
return make_msg(ProtocolMessageTypes.respond_plots, response)
@api_request
async def plot_sync_response(self, response: PlotSyncResponse):
self.harvester.plot_sync_sender.set_response(response)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/harvester/__init__.py | flax/harvester/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/farmer/farmer_api.py | flax/farmer/farmer_api.py | from __future__ import annotations
import json
import time
from typing import Any, Dict, List, Optional, Tuple
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import flax.server.ws_connection as ws
from flax import __version__
from flax.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from flax.farmer.farmer import Farmer
from flax.protocols import farmer_protocol, harvester_protocol
from flax.protocols.harvester_protocol import (
PlotSyncDone,
PlotSyncPathList,
PlotSyncPlotList,
PlotSyncStart,
PoolDifficulty,
)
from flax.protocols.pool_protocol import (
PoolErrorCode,
PostPartialPayload,
PostPartialRequest,
get_current_authentication_token,
)
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.server.outbound_message import NodeType, make_msg
from flax.server.server import ssl_context_for_root
from flax.ssl.create_ssl import get_mozilla_ca_crt
from flax.types.blockchain_format.pool_target import PoolTarget
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.util.api_decorators import api_request, peer_required
from flax.util.ints import uint32, uint64
def strip_old_entries(pairs: List[Tuple[float, Any]], before: float) -> List[Tuple[float, Any]]:
for index, [timestamp, points] in enumerate(pairs):
if timestamp >= before:
if index == 0:
return pairs
if index > 0:
return pairs[index:]
return []
class FarmerAPI:
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSFlaxConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
max_pos_per_sp = 5
if self.farmer.config.get("selected_network") != "mainnet":
# This is meant to make testnets more stable, when difficulty is very low
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = []
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(agg_pk, m_to_sign, plot_signature)
authentication_sk: Optional[PrivateKey] = self.farmer.get_authentication_sk(
pool_state_dict["pool_config"]
)
if authentication_sk is None:
self.farmer.log.error(f"No authentication sk for {p2_singleton_puzzle_hash}")
return
authentication_signature = AugSchemeMPL.sign(authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate([plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(payload, agg_sig)
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append((time.time(), pool_state_dict["current_difficulty"]))
self.farmer.log.debug(f"POST /partial request {post_partial_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
json=post_partial_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.farmer.log),
headers={"User-Agent": f"Flax Blockchain v.{__version__}"},
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append((time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
self.farmer.state_changed(
"submitted_partial",
{
"launcher_id": post_partial_request.payload.launcher_id.hex(),
"pool_url": pool_url,
"current_difficulty": pool_state_dict["current_difficulty"],
"points_acknowledged_since_start": pool_state_dict["points_acknowledged_since_start"],
"points_acknowledged_24h": pool_state_dict["points_acknowledged_24h"],
},
)
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig, farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig, farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed("proof", {"proof": request, "passed_filter": True})
msg = make_msg(ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer, foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
try:
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
finally:
# Age out old 24h information for every signage point regardless
# of any failures. Note that this still lets old data remain if
# the client isn't receiving signage points.
cutoff_24h = time.time() - (24 * 60 * 60)
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
for key in ["points_found_24h", "points_acknowledged_24h"]:
if key not in pool_dict:
continue
pool_dict[key] = strip_old_entries(pairs=pool_dict[key], before=cutoff_24h)
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(int(time.time()))
self.farmer.state_changed("new_signage_point", {"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash, full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
}
},
)
@api_request
@peer_required
async def respond_plots(self, _: harvester_protocol.RespondPlots, peer: ws.WSFlaxConnection):
self.farmer.log.warning(f"Respond plots came too late from: {peer.get_peer_logging()}")
@api_request
@peer_required
async def plot_sync_start(self, message: PlotSyncStart, peer: ws.WSFlaxConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].sync_started(message)
@api_request
@peer_required
async def plot_sync_loaded(self, message: PlotSyncPlotList, peer: ws.WSFlaxConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_loaded(message)
@api_request
@peer_required
async def plot_sync_removed(self, message: PlotSyncPathList, peer: ws.WSFlaxConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_removed(message)
@api_request
@peer_required
async def plot_sync_invalid(self, message: PlotSyncPathList, peer: ws.WSFlaxConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_invalid(message)
@api_request
@peer_required
async def plot_sync_keys_missing(self, message: PlotSyncPathList, peer: ws.WSFlaxConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_keys_missing(message)
@api_request
@peer_required
async def plot_sync_duplicates(self, message: PlotSyncPathList, peer: ws.WSFlaxConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].process_duplicates(message)
@api_request
@peer_required
async def plot_sync_done(self, message: PlotSyncDone, peer: ws.WSFlaxConnection):
await self.farmer.plot_sync_receivers[peer.peer_node_id].sync_done(message)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/farmer/farmer.py | flax/farmer/farmer.py | from __future__ import annotations
import asyncio
import json
import logging
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import flax.server.ws_connection as ws # lgtm [py/import-and-import-from]
from flax.consensus.constants import ConsensusConstants
from flax.daemon.keychain_proxy import KeychainProxy, connect_to_keychain_and_validate, wrap_local_keychain
from flax.plot_sync.delta import Delta
from flax.plot_sync.receiver import Receiver
from flax.pools.pool_config import PoolWalletConfig, add_auth_key, load_pool_config
from flax.protocols import farmer_protocol, harvester_protocol
from flax.protocols.pool_protocol import (
AuthenticationPayload,
ErrorResponse,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
get_current_authentication_token,
)
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.rpc.rpc_server import default_get_connections
from flax.server.outbound_message import NodeType, make_msg
from flax.server.server import ssl_context_for_root
from flax.server.ws_connection import WSFlaxConnection
from flax.ssl.create_ssl import get_mozilla_ca_crt
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.bech32m import decode_puzzle_hash
from flax.util.byte_types import hexstr_to_bytes
from flax.util.config import config_path_for_filename, load_config, lock_and_load_config, save_config
from flax.util.errors import KeychainProxyConnectionFailure
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint16, uint32, uint64
from flax.util.keychain import Keychain
from flax.wallet.derive_keys import (
find_authentication_sk,
find_owner_sk,
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
match_address_to_sk,
)
from flax.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_INFO_FAILURE_RETRY_INTERVAL: int = 120
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
consensus_constants: ConsensusConstants,
local_keychain: Optional[Keychain] = None,
):
self.keychain_proxy: Optional[KeychainProxy] = None
self.local_keychain = local_keychain
self._root_path = root_path
self.config = farmer_config
self.pool_config = pool_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
self.plot_sync_receivers: Dict[bytes32, Receiver] = {}
self.cache_clear_task: Optional[asyncio.Task] = None
self.update_pool_state_task: Optional[asyncio.Task] = None
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.state_changed_callback: Optional[Callable] = None
self.log = log
self.started = False
self.harvester_handshake_task: Optional[asyncio.Task] = None
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From p2_singleton to auth PrivateKey
self.authentication_keys: Dict[bytes32, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
def get_connections(self, request_node_type: Optional[NodeType]) -> List[Dict[str, Any]]:
return default_get_connections(server=self.server, request_node_type=request_node_type)
async def ensure_keychain_proxy(self) -> KeychainProxy:
if self.keychain_proxy is None:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self._root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure()
return self.keychain_proxy
async def get_all_private_keys(self):
keychain_proxy = await self.ensure_keychain_proxy()
return await keychain_proxy.get_all_private_keys()
async def setup_keys(self) -> bool:
no_keys_error_str = "No keys exist. Please run 'flax keys generate' or open the UI."
try:
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
except KeychainProxyConnectionFailure:
return False
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
log.warning(no_keys_error_str)
return False
config = load_config(self._root_path, "config.yaml")
if "xfx_target_address" not in self.config:
self.config = config["farmer"]
if "xfx_target_address" not in self.pool_config:
self.pool_config = config["pool"]
if "xfx_target_address" not in self.config or "xfx_target_address" not in self.pool_config:
log.debug("xfx_target_address missing in the config")
return False
# This is the farmer configuration
self.farmer_target_encoded = self.config["xfx_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = self.pool_config["xfx_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
log.warning(no_keys_error_str)
return False
return True
async def _start(self):
async def start_task():
# `Farmer.setup_keys` returns `False` if there are no keys setup yet. In this case we just try until it
# succeeds or until we need to shut down.
while not self._shut_down:
if await self.setup_keys():
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
log.debug("start_task: initialized")
self.started = True
return
await asyncio.sleep(1)
asyncio.create_task(start_task())
def _close(self):
self._shut_down = True
async def _await_closed(self, shutting_down: bool = True):
if self.cache_clear_task is not None:
await self.cache_clear_task
if self.update_pool_state_task is not None:
await self.update_pool_state_task
if shutting_down and self.keychain_proxy is not None:
proxy = self.keychain_proxy
self.keychain_proxy = None
await proxy.close()
await asyncio.sleep(0.5) # https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
self.started = False
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSFlaxConnection):
self.state_changed("add_connection", {})
async def handshake_task():
# Wait until the task in `Farmer._start` is done so that we have keys available for the handshake. Bail out
# early if we need to shut down or if the harvester is not longer connected.
while not self.started and not self._shut_down and peer in self.server.get_connections():
await asyncio.sleep(1)
if self._shut_down:
log.debug("handshake_task: shutdown")
self.harvester_handshake_task = None
return
if peer not in self.server.get_connections():
log.debug("handshake_task: disconnected")
self.harvester_handshake_task = None
return
# Sends a handshake to the harvester
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
self.harvester_handshake_task = None
if peer.connection_type is NodeType.HARVESTER:
self.plot_sync_receivers[peer.peer_node_id] = Receiver(peer, self.plot_sync_callback)
self.harvester_handshake_task = asyncio.create_task(handshake_task())
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSFlaxConnection):
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self.state_changed("close_connection", {})
if connection.connection_type is NodeType.HARVESTER:
del self.plot_sync_receivers[connection.peer_node_id]
self.state_changed("harvester_removed", {"node_id": connection.peer_node_id})
async def plot_sync_callback(self, peer_id: bytes32, delta: Optional[Delta]) -> None:
log.debug(f"plot_sync_callback: peer_id {peer_id}, delta {delta}")
receiver: Receiver = self.plot_sync_receivers[peer_id]
harvester_updated: bool = delta is not None and not delta.empty()
if receiver.initial_sync() or harvester_updated:
self.state_changed("harvester_update", receiver.to_dict(True))
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log)
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/farmer",
params=get_farmer_params,
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"GET /farmer response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
auth_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
assert auth_sk is not None
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
auth_sk.get_g1(),
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
self.log.debug(f"POST /farmer request {post_farmer_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer",
json=post_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"POST /farmer response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> None:
auth_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
assert auth_sk is not None
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
auth_sk.get_g1(),
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
self.log.debug(f"PUT /farmer request {put_farmer_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.put(
f"{pool_config.pool_url}/farmer",
json=put_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"PUT /farmer response: {response}")
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
def get_authentication_sk(self, pool_config: PoolWalletConfig) -> Optional[PrivateKey]:
if pool_config.p2_singleton_puzzle_hash in self.authentication_keys:
return self.authentication_keys[pool_config.p2_singleton_puzzle_hash]
auth_sk: Optional[PrivateKey] = find_authentication_sk(self.all_root_sks, pool_config.owner_public_key)
if auth_sk is not None:
self.authentication_keys[pool_config.p2_singleton_puzzle_hash] = auth_sk
return auth_sk
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for {p2_singleton_puzzle_hash}")
continue
add_auth_key(self._root_path, pool_config, authentication_sk.get_g1())
if p2_singleton_puzzle_hash not in self.pool_state:
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
else:
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_FAILURE_RETRY_INTERVAL
if time.time() >= pool_state["next_farmer_update"]:
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[PoolErrorCode]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
error_code_response: Optional[PoolErrorCode] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
else:
try:
error_code_response = PoolErrorCode(response["error_code"])
except ValueError:
self.log.error(
f"Invalid error code received from the pool: {response['error_code']}"
)
return farmer_response, error_code_response
if authentication_token_timeout is not None:
farmer_info, error_code = await update_pool_farmer_info()
if error_code == PoolErrorCode.FARMER_NOT_KNOWN:
# Make the farmer known on the pool with a POST /farmer
owner_sk_and_index: Optional[Tuple[PrivateKey, uint32]] = find_owner_sk(
self.all_root_sks, pool_config.owner_public_key
)
assert owner_sk_and_index is not None
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk_and_index[0]
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response['welcome_message']}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the farmer information on the pool if the payout instructions changed or if the
# signature is invalid (latter to make sure the pool has the correct authentication public key).
payout_instructions_update_required: bool = (
farmer_info is not None
and pool_config.payout_instructions.lower() != farmer_info.payout_instructions.lower()
)
if payout_instructions_update_required or error_code == PoolErrorCode.INVALID_SIGNATURE:
owner_sk_and_index: Optional[Tuple[PrivateKey, uint32]] = find_owner_sk(
self.all_root_sks, pool_config.owner_public_key
)
assert owner_sk_and_index is not None
await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk_and_index[0]
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
async def get_reward_targets(self, search_for_private_key: bool, max_ph_to_search: int = 500) -> Dict:
if search_for_private_key:
all_sks = await self.get_all_private_keys()
have_farmer_sk, have_pool_sk = False, False
search_addresses: List[bytes32] = [self.farmer_target, self.pool_target]
for sk, _ in all_sks:
found_addresses: Set[bytes32] = match_address_to_sk(sk, search_addresses, max_ph_to_search)
if not have_farmer_sk and self.farmer_target in found_addresses:
search_addresses.remove(self.farmer_target)
have_farmer_sk = True
if not have_pool_sk and self.pool_target in found_addresses:
search_addresses.remove(self.pool_target)
have_pool_sk = True
if have_farmer_sk and have_pool_sk:
break
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": have_farmer_sk,
"have_pool_sk": have_pool_sk,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
with lock_and_load_config(self._root_path, "config.yaml") as config:
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xfx_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xfx_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
with lock_and_load_config(self._root_path, "config.yaml") as config:
new_list = []
pool_list = config["pool"].get("pool_list", [])
if pool_list is not None:
for list_element in pool_list:
if hexstr_to_bytes(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for {pool_config.p2_singleton_puzzle_hash}")
continue
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def get_harvesters(self, counts_only: bool = False) -> Dict:
harvesters: List = []
for connection in self.server.get_connections(NodeType.HARVESTER):
self.log.debug(f"get_harvesters host: {connection.peer_host}, node_id: {connection.peer_node_id}")
receiver = self.plot_sync_receivers.get(connection.peer_node_id)
if receiver is not None:
harvesters.append(receiver.to_dict(counts_only))
else:
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/farmer/__init__.py | flax/farmer/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/ssl/create_ssl.py | flax/ssl/create_ssl.py | import datetime
import os
from pathlib import Path
from typing import Any, List, Tuple
import pkg_resources
from flax.util.ssl_check import DEFAULT_PERMISSIONS_CERT_FILE, DEFAULT_PERMISSIONS_KEY_FILE
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.x509.oid import NameOID
def get_flax_ca_crt_key() -> Tuple[Any, Any]:
crt = pkg_resources.resource_string(__name__, "flax_ca.crt")
key = pkg_resources.resource_string(__name__, "flax_ca.key")
return crt, key
def get_mozilla_ca_crt() -> str:
mozilla_path = Path(__file__).parent.parent.parent.absolute() / "mozilla-ca/cacert.pem"
return str(mozilla_path)
def write_ssl_cert_and_key(cert_path: Path, cert_data: bytes, key_path: Path, key_data: bytes, overwrite: bool = True):
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
for path, data, mode in [
(cert_path, cert_data, DEFAULT_PERMISSIONS_CERT_FILE),
(key_path, key_data, DEFAULT_PERMISSIONS_KEY_FILE),
]:
if path.exists():
if not overwrite:
continue
path.unlink()
with open(os.open(str(path), flags, mode), "wb") as f:
f.write(data) # lgtm [py/clear-text-storage-sensitive-data]
def ensure_ssl_dirs(dirs: List[Path]):
"""Create SSL dirs with a default 755 mode if necessary"""
for dir in dirs:
if not dir.exists():
dir.mkdir(mode=0o755)
def generate_ca_signed_cert(ca_crt: bytes, ca_key: bytes, cert_out: Path, key_out: Path):
one_day = datetime.timedelta(1, 0, 0)
root_cert = x509.load_pem_x509_certificate(ca_crt, default_backend())
root_key = load_pem_private_key(ca_key, None, default_backend())
cert_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
new_subject = x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, "Flax"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Flax"),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "Organic Farming Division"),
]
)
cert = (
x509.CertificateBuilder()
.subject_name(new_subject)
.issuer_name(root_cert.issuer)
.public_key(cert_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.today() - one_day)
.not_valid_after(datetime.datetime(2100, 8, 2))
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("flaxnetwork.org")]),
critical=False,
)
.sign(root_key, hashes.SHA256(), default_backend())
)
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = cert_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
write_ssl_cert_and_key(cert_out, cert_pem, key_out, key_pem)
def make_ca_cert(cert_path: Path, key_path: Path):
root_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
subject = issuer = x509.Name(
[
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Flax"),
x509.NameAttribute(NameOID.COMMON_NAME, "Flax CA"),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "Organic Farming Division"),
]
)
root_cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(root_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))
.add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=True)
.sign(root_key, hashes.SHA256(), default_backend())
)
cert_pem = root_cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = root_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
write_ssl_cert_and_key(cert_path, cert_pem, key_path, key_pem)
def main():
return make_ca_cert(Path("./flax_ca.crt"), Path("./flax_ca.key"))
if __name__ == "__main__":
main()
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/ssl/__init__.py | flax/ssl/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/plotting/util.py | flax/plotting/util.py | from __future__ import annotations
import logging
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from blspy import G1Element, PrivateKey
from chiapos import DiskProver
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.config import load_config, lock_and_load_config, save_config
from flax.util.ints import uint32
from flax.util.streamable import Streamable, streamable
log = logging.getLogger(__name__)
@streamable
@dataclass(frozen=True)
class PlotsRefreshParameter(Streamable):
interval_seconds: uint32 = uint32(120)
retry_invalid_seconds: uint32 = uint32(1200)
batch_size: uint32 = uint32(300)
batch_sleep_milliseconds: uint32 = uint32(1)
@dataclass
class PlotInfo:
prover: DiskProver
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
file_size: int
time_modified: float
class PlotRefreshEvents(Enum):
"""
This are the events the `PlotManager` will trigger with the callback during a full refresh cycle:
- started: This event indicates the start of a refresh cycle and contains the total number of files to
process in `PlotRefreshResult.remaining`.
- batch_processed: This event gets triggered if one batch has been processed. The values of
`PlotRefreshResult.{loaded|removed|processed}` are the results of this specific batch.
- done: This event gets triggered after all batches has been processed. The values of
`PlotRefreshResult.{loaded|removed|processed}` are the totals of all batches.
Note: The values of `PlotRefreshResult.{remaining|duration}` have the same meaning for all events.
"""
started = 0
batch_processed = 1
done = 2
@dataclass
class PlotRefreshResult:
loaded: List[PlotInfo] = field(default_factory=list)
removed: List[Path] = field(default_factory=list)
processed: int = 0
remaining: int = 0
duration: float = 0
def get_plot_directories(root_path: Path, config: Dict = None) -> List[str]:
if config is None:
config = load_config(root_path, "config.yaml")
return config["harvester"]["plot_directories"] or []
def get_plot_filenames(root_path: Path) -> Dict[Path, List[Path]]:
# Returns a map from directory to a list of all plots in the directory
all_files: Dict[Path, List[Path]] = {}
config = load_config(root_path, "config.yaml")
recursive_scan: bool = config["harvester"].get("recursive_plot_scan", False)
for directory_name in get_plot_directories(root_path, config):
directory = Path(directory_name).resolve()
all_files[directory] = get_filenames(directory, recursive_scan)
return all_files
def add_plot_directory(root_path: Path, str_path: str) -> Dict:
path: Path = Path(str_path).resolve()
if not path.exists():
raise ValueError(f"Path doesn't exist: {path}")
if not path.is_dir():
raise ValueError(f"Path is not a directory: {path}")
log.debug(f"add_plot_directory {str_path}")
with lock_and_load_config(root_path, "config.yaml") as config:
if str(Path(str_path).resolve()) in get_plot_directories(root_path, config):
raise ValueError(f"Path already added: {path}")
if not config["harvester"]["plot_directories"]:
config["harvester"]["plot_directories"] = []
config["harvester"]["plot_directories"].append(str(Path(str_path).resolve()))
save_config(root_path, "config.yaml", config)
return config
def remove_plot_directory(root_path: Path, str_path: str) -> None:
log.debug(f"remove_plot_directory {str_path}")
with lock_and_load_config(root_path, "config.yaml") as config:
str_paths: List[str] = get_plot_directories(root_path, config)
# If path str matches exactly, remove
if str_path in str_paths:
str_paths.remove(str_path)
# If path matches full path, remove
new_paths = [Path(sp).resolve() for sp in str_paths]
if Path(str_path).resolve() in new_paths:
new_paths.remove(Path(str_path).resolve())
config["harvester"]["plot_directories"] = [str(np) for np in new_paths]
save_config(root_path, "config.yaml", config)
def remove_plot(path: Path):
log.debug(f"remove_plot {str(path)}")
# Remove absolute and relative paths
if path.exists():
path.unlink()
def get_filenames(directory: Path, recursive: bool) -> List[Path]:
try:
if not directory.exists():
log.warning(f"Directory: {directory} does not exist.")
return []
except OSError as e:
log.warning(f"Error checking if directory {directory} exists: {e}")
return []
all_files: List[Path] = []
try:
glob_function = directory.rglob if recursive else directory.glob
all_files = [child for child in glob_function("*.plot") if child.is_file() and not child.name.startswith("._")]
log.debug(f"get_filenames: {len(all_files)} files found in {directory}, recursive: {recursive}")
except Exception as e:
log.warning(f"Error reading directory {directory} {e}")
return all_files
def parse_plot_info(memo: bytes) -> Tuple[Union[G1Element, bytes32], G1Element, PrivateKey]:
# Parses the plot info bytes into keys
if len(memo) == (48 + 48 + 32):
# This is a public key memo
return (
G1Element.from_bytes(memo[:48]),
G1Element.from_bytes(memo[48:96]),
PrivateKey.from_bytes(memo[96:]),
)
elif len(memo) == (32 + 48 + 32):
# This is a pool_contract_puzzle_hash memo
return (
bytes32(memo[:32]),
G1Element.from_bytes(memo[32:80]),
PrivateKey.from_bytes(memo[80:]),
)
else:
raise ValueError(f"Invalid number of bytes {len(memo)}")
def stream_plot_info_pk(
pool_public_key: G1Element,
farmer_public_key: G1Element,
local_master_sk: PrivateKey,
):
# There are two ways to stream plot info: with a pool public key, or with a pool contract puzzle hash.
# This one streams the public key, into bytes
data = bytes(pool_public_key) + bytes(farmer_public_key) + bytes(local_master_sk)
assert len(data) == (48 + 48 + 32)
return data
def stream_plot_info_ph(
pool_contract_puzzle_hash: bytes32,
farmer_public_key: G1Element,
local_master_sk: PrivateKey,
):
# There are two ways to stream plot info: with a pool public key, or with a pool contract puzzle hash.
# This one streams the pool contract puzzle hash, into bytes
data = pool_contract_puzzle_hash + bytes(farmer_public_key) + bytes(local_master_sk)
assert len(data) == (32 + 48 + 32)
return data
def find_duplicate_plot_IDs(all_filenames=None) -> None:
if all_filenames is None:
all_filenames = []
plot_ids_set = set()
duplicate_plot_ids = set()
all_filenames_str: List[str] = []
for filename in all_filenames:
filename_str: str = str(filename)
all_filenames_str.append(filename_str)
filename_parts: List[str] = filename_str.split("-")
plot_id: str = filename_parts[-1]
# Skipped parsing and verifying plot ID for faster performance
# Skipped checking K size for faster performance
# Only checks end of filenames: 64 char plot ID + .plot = 69 characters
if len(plot_id) == 69:
if plot_id in plot_ids_set:
duplicate_plot_ids.add(plot_id)
else:
plot_ids_set.add(plot_id)
else:
log.warning(f"{filename} does not end with -[64 char plot ID].plot")
for plot_id in duplicate_plot_ids:
log_message: str = plot_id + " found in multiple files:\n"
duplicate_filenames: List[str] = [filename_str for filename_str in all_filenames_str if plot_id in filename_str]
for filename_str in duplicate_filenames:
log_message += "\t" + filename_str + "\n"
log.warning(f"{log_message}")
def validate_plot_size(root_path: Path, k: int, override_k: bool) -> None:
config = load_config(root_path, "config.yaml")
min_k = config["min_mainnet_k_size"]
if k < min_k and not override_k:
raise ValueError(
f"k={min_k} is the minimum size for farming.\n"
"If you are testing and you want to use smaller size please add the --override-k flag."
)
elif k < 25 and override_k:
raise ValueError("Error: The minimum k size allowed from the cli is k=25.")
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/plotting/__init__.py | flax/plotting/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/plotting/cache.py | flax/plotting/cache.py | from __future__ import annotations
import logging
import time
import traceback
from dataclasses import dataclass, field
from math import ceil
from pathlib import Path
from typing import Dict, ItemsView, KeysView, List, Optional, Tuple, ValuesView
from blspy import G1Element
from chiapos import DiskProver
from flax.plotting.util import parse_plot_info
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint16, uint64
from flax.util.misc import VersionedBlob
from flax.util.streamable import Streamable, streamable
from flax.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: int = 1
@streamable
@dataclass(frozen=True)
class DiskCacheEntry(Streamable):
prover_data: bytes
farmer_public_key: G1Element
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
last_use: uint64
@streamable
@dataclass(frozen=True)
class CacheDataV1(Streamable):
entries: List[Tuple[str, DiskCacheEntry]]
@dataclass
class CacheEntry:
prover: DiskProver
farmer_public_key: G1Element
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
last_use: float
@classmethod
def from_disk_prover(cls, prover: DiskProver) -> "CacheEntry":
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
return cls(prover, farmer_public_key, pool_public_key, pool_contract_puzzle_hash, plot_public_key, time.time())
def bump_last_use(self) -> None:
self.last_use = time.time()
def expired(self, expiry_seconds: int) -> bool:
return time.time() - self.last_use > expiry_seconds
@dataclass
class Cache:
_path: Path
_changed: bool = False
_data: Dict[Path, CacheEntry] = field(default_factory=dict)
expiry_seconds: int = 7 * 24 * 60 * 60 # Keep the cache entries alive for 7 days after its last access
def __post_init__(self) -> None:
self._path.parent.mkdir(parents=True, exist_ok=True)
def __len__(self) -> int:
return len(self._data)
def update(self, path: Path, entry: CacheEntry) -> None:
self._data[path] = entry
self._changed = True
def remove(self, cache_keys: List[Path]) -> None:
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self) -> None:
try:
disk_cache_entries: Dict[str, DiskCacheEntry] = {
str(path): DiskCacheEntry(
bytes(cache_entry.prover),
cache_entry.farmer_public_key,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
uint64(int(cache_entry.last_use)),
)
for path, cache_entry in self.items()
}
cache_data: CacheDataV1 = CacheDataV1(
[(plot_id, cache_entry) for plot_id, cache_entry in disk_cache_entries.items()]
)
disk_cache: VersionedBlob = VersionedBlob(uint16(CURRENT_VERSION), bytes(cache_data))
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self) -> None:
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: VersionedBlob = VersionedBlob.from_bytes(serialized)
if stored_cache.version == CURRENT_VERSION:
start = time.time()
cache_data: CacheDataV1 = CacheDataV1.from_bytes(stored_cache.blob)
self._data = {}
estimated_c2_sizes: Dict[int, int] = {}
for path, cache_entry in cache_data.entries:
new_entry = CacheEntry(
DiskProver.from_bytes(cache_entry.prover_data),
cache_entry.farmer_public_key,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
float(cache_entry.last_use),
)
# TODO, drop the below entry dropping after few versions or whenever we force a cache recreation.
# it's here to filter invalid cache entries coming from bladebit RAM plotting.
# Related: - https://github.com/Flax-Network/flax-blockchain/issues/13084
# - https://github.com/Flax-Network/chiapos/pull/337
k = new_entry.prover.get_size()
if k not in estimated_c2_sizes:
estimated_c2_sizes[k] = ceil(2**k / 100_000_000) * ceil(k / 8)
memo_size = len(new_entry.prover.get_memo())
prover_size = len(cache_entry.prover_data)
# Estimated C2 size + memo size + 2000 (static data + path)
# static data: version(2) + table pointers (<=96) + id(32) + k(1) => ~130
# path: up to ~1870, all above will lead to false positive.
# See https://github.com/Flax-Network/chiapos/blob/3ee062b86315823dd775453ad320b8be892c7df3/src/prover_disk.hpp#L282-L287 # noqa: E501
if prover_size > (estimated_c2_sizes[k] + memo_size + 2000):
log.warning(
"Suspicious cache entry dropped. Recommended: stop the harvester, remove "
f"{self._path}, restart. Entry: size {prover_size}, path {path}"
)
else:
self._data[Path(path)] = new_entry
log.info(f"Parsed {len(self._data)} cache entries in {time.time() - start:.2f}s")
else:
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self) -> KeysView[Path]:
return self._data.keys()
def values(self) -> ValuesView[CacheEntry]:
return self._data.values()
def items(self) -> ItemsView[Path, CacheEntry]:
return self._data.items()
def get(self, path: Path) -> Optional[CacheEntry]:
return self._data.get(path)
def changed(self) -> bool:
return self._changed
def path(self) -> Path:
return self._path
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/plotting/manager.py | flax/plotting/manager.py | from __future__ import annotations
import logging
import threading
import time
import traceback
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from blspy import G1Element
from chiapos import DiskProver
from flax.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from flax.plotting.cache import Cache, CacheEntry
from flax.plotting.util import PlotInfo, PlotRefreshEvents, PlotRefreshResult, PlotsRefreshParameter, get_plot_filenames
from flax.util.generator_tools import list_to_batches
log = logging.getLogger(__name__)
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
_initial: bool
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback
self._initial = True
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def reset(self):
with self:
self.last_refresh_time = time.time()
self.plots.clear()
self.plot_filename_paths.clear()
self.failed_to_open_filenames.clear()
self.no_key_filenames.clear()
self._initial = True
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def initial_refresh(self):
return self._initial
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self) -> int:
with self:
return len(self.plots)
def get_duplicates(self) -> List[Path]:
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self, sleep_interval_ms: int = 1000):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task, args=(sleep_interval_ms,))
self._refresh_thread.start()
def stop_refreshing(self) -> None:
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self) -> None:
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self, sleep_interval_ms: int):
while self._refreshing_enabled:
try:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(sleep_interval_ms / 1000.0)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: Set[Path] = set()
for paths in plot_filenames.values():
plot_paths.update(paths)
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
for path in list(self.failed_to_open_filenames.keys()):
if path not in plot_paths:
del self.failed_to_open_filenames[path]
for path in self.no_key_filenames.copy():
if path not in plot_paths:
self.no_key_filenames.remove(path)
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if loaded_plot not in plot_paths:
filenames_to_remove.append(plot_filename)
with self:
if loaded_plot in self.plots:
del self.plots[loaded_plot]
total_result.removed.append(loaded_plot)
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path_str in duplicated_paths:
loaded_plot = Path(path_str) / Path(plot_filename)
if loaded_plot not in plot_paths:
paths_to_remove.append(path_str)
for path_str in paths_to_remove:
duplicated_paths.remove(path_str)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
for remaining, batch in list_to_batches(sorted(list(plot_paths)), self.refresh_parameter.batch_size):
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Reset the initial refresh indication
self._initial = False
# Cleanup unused cache
self.log.debug(f"_refresh_task: cached entries before cleanup: {len(self.cache)}")
remove_paths: List[Path] = []
for path, cache_entry in self.cache.items():
if cache_entry.expired(Cache.expiry_seconds) and path not in self.plots:
remove_paths.append(path)
elif path in self.plots:
cache_entry.bump_last_use()
self.cache.remove(remove_paths)
self.log.debug(f"_refresh_task: cached entries removed: {len(remove_paths)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {len(total_result.loaded)}, "
f"total_result.removed {len(total_result.removed)}, "
f"total_duration {total_result.duration:.2f} seconds"
)
except Exception as e:
log.error(f"_refresh_callback raised: {e} with the traceback: {traceback.format_exc()}")
self.reset()
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
if not file_path.exists():
return None
stat_info = file_path.stat()
cache_entry = self.cache.get(file_path)
cache_hit = cache_entry is not None
if not cache_hit:
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024 ** 3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = CacheEntry.from_disk_prover(prover)
self.cache.update(file_path, cache_entry)
assert cache_entry is not None
# Only use plots that correct keys associated with them
if cache_entry.farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
if cache_entry.pool_public_key is not None and cache_entry.pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
# If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove
# the current plot from that list if its in there since we passed the key checks above.
if file_path in self.no_key_filenames:
self.no_key_filenames.remove(file_path)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(cache_entry.prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(cache_entry.prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
cache_entry.prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
cache_entry.bump_last_use()
with counter_lock:
result.loaded.append(new_plot_info)
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.debug(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}, cache_hit: {cache_hit}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {len(result.loaded)}, "
f"removed {len(result.removed)}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/plotting/create_plots.py | flax/plotting/create_plots.py | from __future__ import annotations
import logging
from datetime import datetime
from pathlib import Path
from secrets import token_bytes
from typing import Dict, List, Optional, Tuple
from blspy import AugSchemeMPL, G1Element, PrivateKey
from chiapos import DiskPlotter
from flax.daemon.keychain_proxy import KeychainProxy, connect_to_keychain_and_validate, wrap_local_keychain
from flax.plotting.util import stream_plot_info_ph, stream_plot_info_pk
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.bech32m import decode_puzzle_hash
from flax.util.keychain import Keychain
from flax.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_local_sk, master_sk_to_pool_sk
log = logging.getLogger(__name__)
class PlotKeys:
def __init__(
self,
farmer_public_key: G1Element,
pool_public_key: Optional[G1Element],
pool_contract_address: Optional[str],
):
self.farmer_public_key = farmer_public_key
self.pool_public_key = pool_public_key
self.pool_contract_address = pool_contract_address
@property
def pool_contract_puzzle_hash(self) -> Optional[bytes32]:
if self.pool_contract_address is not None:
return decode_puzzle_hash(self.pool_contract_address)
return None
class PlotKeysResolver:
def __init__(
self,
farmer_public_key: Optional[str],
alt_fingerprint: Optional[int],
pool_public_key: Optional[str],
pool_contract_address: Optional[str],
root_path: Path,
log: logging.Logger,
connect_to_daemon=False,
):
self.farmer_public_key = farmer_public_key
self.alt_fingerprint = alt_fingerprint
self.pool_public_key = pool_public_key
self.pool_contract_address = pool_contract_address
self.root_path = root_path
self.log = log
self.connect_to_daemon = connect_to_daemon
self.resolved_keys: Optional[PlotKeys] = None
async def resolve(self) -> PlotKeys:
if self.resolved_keys is not None:
return self.resolved_keys
if self.connect_to_daemon:
keychain_proxy: Optional[KeychainProxy] = await connect_to_keychain_and_validate(self.root_path, self.log)
else:
keychain_proxy = wrap_local_keychain(Keychain(), log=self.log)
farmer_public_key: G1Element
if self.farmer_public_key is not None:
farmer_public_key = G1Element.from_bytes(bytes.fromhex(self.farmer_public_key))
else:
farmer_public_key = await self.get_farmer_public_key(keychain_proxy)
pool_public_key: Optional[G1Element] = None
if self.pool_public_key is not None:
if self.pool_contract_address is not None:
raise RuntimeError("Choose one of pool_contract_address and pool_public_key")
pool_public_key = G1Element.from_bytes(bytes.fromhex(self.pool_public_key))
else:
if self.pool_contract_address is None:
# If nothing is set, farms to the provided key (or the first key)
pool_public_key = await self.get_pool_public_key(keychain_proxy)
self.resolved_keys = PlotKeys(farmer_public_key, pool_public_key, self.pool_contract_address)
if keychain_proxy is not None:
await keychain_proxy.close()
return self.resolved_keys
async def get_sk(self, keychain_proxy: Optional[KeychainProxy] = None) -> Optional[Tuple[PrivateKey, bytes]]:
sk: Optional[PrivateKey] = None
if keychain_proxy:
try:
if self.alt_fingerprint is not None:
sk = await keychain_proxy.get_key_for_fingerprint(self.alt_fingerprint)
else:
sk = await keychain_proxy.get_first_private_key()
except Exception as e:
log.error(f"Keychain proxy failed with error: {e}")
else:
sk_ent: Optional[Tuple[PrivateKey, bytes]] = None
keychain: Keychain = Keychain()
if self.alt_fingerprint is not None:
sk_ent = keychain.get_private_key_by_fingerprint(self.alt_fingerprint)
else:
sk_ent = keychain.get_first_private_key()
if sk_ent:
sk = sk_ent[0]
return sk
async def get_farmer_public_key(self, keychain_proxy: Optional[KeychainProxy] = None) -> G1Element:
sk: Optional[PrivateKey] = await self.get_sk(keychain_proxy)
if sk is None:
raise RuntimeError(
"No keys, please run 'flax keys add', 'flax keys generate' or provide a public key with -f"
)
return master_sk_to_farmer_sk(sk).get_g1()
async def get_pool_public_key(self, keychain_proxy: Optional[KeychainProxy] = None) -> G1Element:
sk: Optional[PrivateKey] = await self.get_sk(keychain_proxy)
if sk is None:
raise RuntimeError(
"No keys, please run 'flax keys add', 'flax keys generate' or provide a public key with -p"
)
return master_sk_to_pool_sk(sk).get_g1()
async def resolve_plot_keys(
farmer_public_key: Optional[str],
alt_fingerprint: Optional[int],
pool_public_key: Optional[str],
pool_contract_address: Optional[str],
root_path: Path,
log: logging.Logger,
connect_to_daemon=False,
) -> PlotKeys:
return await PlotKeysResolver(
farmer_public_key, alt_fingerprint, pool_public_key, pool_contract_address, root_path, log, connect_to_daemon
).resolve()
async def create_plots(
args,
keys: PlotKeys,
use_datetime: bool = True,
test_private_keys: Optional[List] = None,
) -> Tuple[Dict[bytes32, Path], Dict[bytes32, Path]]:
if args.tmp2_dir is None:
args.tmp2_dir = args.tmp_dir
assert (keys.pool_public_key is None) != (keys.pool_contract_puzzle_hash is None)
num = args.num
if keys.pool_public_key is not None:
log.info(
f"Creating {num} plots of size {args.size}, pool public key: "
f"{bytes(keys.pool_public_key).hex()} farmer public key: {bytes(keys.farmer_public_key).hex()}"
)
else:
assert keys.pool_contract_puzzle_hash is not None
log.info(
f"Creating {num} plots of size {args.size}, pool contract address: "
f"{keys.pool_contract_address} farmer public key: {bytes(keys.farmer_public_key).hex()}"
)
tmp_dir_created = False
if not args.tmp_dir.exists():
args.tmp_dir.mkdir(parents=True, exist_ok=True)
tmp_dir_created = True
tmp2_dir_created = False
if not args.tmp2_dir.exists():
args.tmp2_dir.mkdir(parents=True, exist_ok=True)
tmp2_dir_created = True
args.final_dir.mkdir(parents=True, exist_ok=True)
created_plots: Dict[bytes32, Path] = {}
existing_plots: Dict[bytes32, Path] = {}
for i in range(num):
# Generate a random master secret key
if test_private_keys is not None:
assert len(test_private_keys) == num
sk: PrivateKey = test_private_keys[i]
else:
sk = AugSchemeMPL.key_gen(token_bytes(32))
# The plot public key is the combination of the harvester and farmer keys
# New plots will also include a taproot of the keys, for extensibility
include_taproot: bool = keys.pool_contract_puzzle_hash is not None
plot_public_key = ProofOfSpace.generate_plot_public_key(
master_sk_to_local_sk(sk).get_g1(), keys.farmer_public_key, include_taproot
)
# The plot id is based on the harvester, farmer, and pool keys
if keys.pool_public_key is not None:
plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk(keys.pool_public_key, plot_public_key)
plot_memo: bytes32 = stream_plot_info_pk(keys.pool_public_key, keys.farmer_public_key, sk)
else:
assert keys.pool_contract_puzzle_hash is not None
plot_id = ProofOfSpace.calculate_plot_id_ph(keys.pool_contract_puzzle_hash, plot_public_key)
plot_memo = stream_plot_info_ph(keys.pool_contract_puzzle_hash, keys.farmer_public_key, sk)
if args.plotid is not None:
log.info(f"Debug plot ID: {args.plotid}")
plot_id = bytes32(bytes.fromhex(args.plotid))
if args.memo is not None:
log.info(f"Debug memo: {args.memo}")
plot_memo = bytes32.fromhex(args.memo)
dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")
if use_datetime:
filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot"
else:
filename = f"plot-k{args.size}-{plot_id}.plot"
full_path: Path = args.final_dir / filename
if not full_path.exists():
log.info(f"Starting plot {i + 1}/{num}")
# Creates the plot. This will take a long time for larger plots.
plotter: DiskPlotter = DiskPlotter()
plotter.create_plot_disk(
str(args.tmp_dir),
str(args.tmp2_dir),
str(args.final_dir),
filename,
args.size,
plot_memo,
plot_id,
args.buffer,
args.buckets,
args.stripe_size,
args.num_threads,
args.nobitfield,
)
created_plots[plot_id] = full_path
else:
log.info(f"Plot {filename} already exists")
existing_plots[plot_id] = full_path
log.info("Summary:")
if tmp_dir_created:
try:
args.tmp_dir.rmdir()
except Exception:
log.info(f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty.")
if tmp2_dir_created:
try:
args.tmp2_dir.rmdir()
except Exception:
log.info(f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty.")
log.info(f"Created a total of {len(created_plots)} new plots")
for created_path in created_plots.values():
log.info(created_path.name)
return created_plots, existing_plots
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/plotting/check_plots.py | flax/plotting/check_plots.py | from __future__ import annotations
import logging
from collections import Counter
from pathlib import Path
from time import sleep, time
from typing import List
from blspy import G1Element
from chiapos import Verifier
from flax.plotting.manager import PlotManager
from flax.plotting.util import (
PlotRefreshEvents,
PlotRefreshResult,
PlotsRefreshParameter,
find_duplicate_plot_IDs,
get_plot_filenames,
parse_plot_info,
)
from flax.util.bech32m import encode_puzzle_hash
from flax.util.config import load_config
from flax.util.hash import std_hash
from flax.util.keychain import Keychain
from flax.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_local_sk
log = logging.getLogger(__name__)
def plot_refresh_callback(event: PlotRefreshEvents, refresh_result: PlotRefreshResult):
log.info(f"event: {event.name}, loaded {len(refresh_result.loaded)} plots, {refresh_result.remaining} remaining")
def check_plots(root_path, num, challenge_start, grep_string, list_duplicates, debug_show_memo):
config = load_config(root_path, "config.yaml")
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
plot_refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(batch_sleep_milliseconds=0)
plot_manager: PlotManager = PlotManager(
root_path,
match_str=grep_string,
open_no_key_filenames=True,
refresh_parameter=plot_refresh_parameter,
refresh_callback=plot_refresh_callback,
)
if num is not None:
if num == 0:
log.warning("Not opening plot files")
else:
if num < 5:
log.warning(f"{num} challenges is too low, setting it to the minimum of 5")
num = 5
if num < 30:
log.warning("Use 30 challenges (our default) for balance of speed and accurate results")
else:
num = 30
if challenge_start is not None:
num_start = challenge_start
num_end = num_start + num
else:
num_start = 0
num_end = num
challenges = num_end - num_start
if list_duplicates:
log.warning("Checking for duplicate Plot IDs")
log.info("Plot filenames expected to end with -[64 char plot ID].plot")
if list_duplicates:
all_filenames: List[Path] = []
for paths in get_plot_filenames(root_path).values():
all_filenames += paths
find_duplicate_plot_IDs(all_filenames)
if num == 0:
return None
parallel_read: bool = config["harvester"].get("parallel_read", True)
v = Verifier()
log.info(f"Loading plots in config.yaml using plot_manager loading code (parallel read: {parallel_read})\n")
# Prompts interactively if the keyring is protected by a master passphrase. To use the daemon
# for keychain access, KeychainProxy/connect_to_keychain should be used instead of Keychain.
kc: Keychain = Keychain()
plot_manager.set_public_keys(
[master_sk_to_farmer_sk(sk).get_g1() for sk, _ in kc.get_all_private_keys()],
[G1Element.from_bytes(bytes.fromhex(pk)) for pk in config["farmer"]["pool_public_keys"]],
)
plot_manager.start_refreshing()
while plot_manager.needs_refresh():
sleep(1)
plot_manager.stop_refreshing()
if plot_manager.plot_count() > 0:
log.info("")
log.info("")
log.info(f"Starting to test each plot with {num} challenges each\n")
total_good_plots: Counter = Counter()
total_size = 0
bad_plots_list: List[Path] = []
with plot_manager:
for plot_path, plot_info in plot_manager.plots.items():
pr = plot_info.prover
log.info(f"Testing plot {plot_path} k={pr.get_size()}")
if plot_info.pool_public_key is not None:
log.info(f"\t{'Pool public key:':<23} {plot_info.pool_public_key}")
if plot_info.pool_contract_puzzle_hash is not None:
pca: str = encode_puzzle_hash(plot_info.pool_contract_puzzle_hash, address_prefix)
log.info(f"\t{'Pool contract address:':<23} {pca}")
# Look up local_sk from plot to save locked memory
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(pr.get_memo())
local_sk = master_sk_to_local_sk(local_master_sk)
log.info(f"\t{'Farmer public key:' :<23} {farmer_public_key}")
log.info(f"\t{'Local sk:' :<23} {local_sk}")
total_proofs = 0
caught_exception: bool = False
for i in range(num_start, num_end):
challenge = std_hash(i.to_bytes(32, "big"))
# Some plot errors cause get_qualities_for_challenge to throw a RuntimeError
try:
quality_start_time = int(round(time() * 1000))
for index, quality_str in enumerate(pr.get_qualities_for_challenge(challenge)):
quality_spent_time = int(round(time() * 1000)) - quality_start_time
if quality_spent_time > 5000:
log.warning(
f"\tLooking up qualities took: {quality_spent_time} ms. This should be below 5 seconds "
f"to minimize risk of losing rewards."
)
else:
log.info(f"\tLooking up qualities took: {quality_spent_time} ms.")
# Other plot errors cause get_full_proof or validate_proof to throw an AssertionError
try:
proof_start_time = int(round(time() * 1000))
proof = pr.get_full_proof(challenge, index, parallel_read)
proof_spent_time = int(round(time() * 1000)) - proof_start_time
if proof_spent_time > 15000:
log.warning(
f"\tFinding proof took: {proof_spent_time} ms. This should be below 15 seconds "
f"to minimize risk of losing rewards."
)
else:
log.info(f"\tFinding proof took: {proof_spent_time} ms")
total_proofs += 1
ver_quality_str = v.validate_proof(pr.get_id(), pr.get_size(), challenge, proof)
assert quality_str == ver_quality_str
except AssertionError as e:
log.error(f"{type(e)}: {e} error in proving/verifying for plot {plot_path}")
caught_exception = True
quality_start_time = int(round(time() * 1000))
except KeyboardInterrupt:
log.warning("Interrupted, closing")
return None
except SystemExit:
log.warning("System is shutting down.")
return None
except Exception as e:
log.error(f"{type(e)}: {e} error in getting challenge qualities for plot {plot_path}")
caught_exception = True
if caught_exception is True:
break
if total_proofs > 0 and caught_exception is False:
log.info(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}")
total_good_plots[pr.get_size()] += 1
total_size += plot_path.stat().st_size
else:
log.error(f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}")
bad_plots_list.append(plot_path)
log.info("")
log.info("")
log.info("Summary")
total_plots: int = sum(list(total_good_plots.values()))
log.info(f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB")
for (k, count) in sorted(dict(total_good_plots).items()):
log.info(f"{count} plots of size {k}")
grand_total_bad = len(bad_plots_list) + len(plot_manager.failed_to_open_filenames)
if grand_total_bad > 0:
log.warning(f"{grand_total_bad} invalid plots found:")
if len(bad_plots_list) > 0:
log.warning(f" {len(bad_plots_list)} bad plots:")
for bad_plot_path in bad_plots_list:
log.warning(f"{bad_plot_path}")
if len(plot_manager.failed_to_open_filenames) > 0:
log.warning(f" {len(plot_manager.failed_to_open_filenames)} unopenable plots:")
for unopenable_plot_path in plot_manager.failed_to_open_filenames.keys():
log.warning(f"{unopenable_plot_path}")
if len(plot_manager.no_key_filenames) > 0:
log.warning(
f"There are {len(plot_manager.no_key_filenames)} plots with a farmer or pool public key that "
f"is not on this machine. The farmer private key must be in the keychain in order to "
f"farm them, use 'flax keys' to transfer keys. The pool public keys must be in the config.yaml"
)
if debug_show_memo:
plot_memo_str: str = "Plot Memos:\n"
with plot_manager:
for path, plot in plot_manager.plots.items():
plot_memo_str += f"{path}: {plot.prover.get_memo().hex()}\n"
log.info(plot_memo_str)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/pools/pool_puzzles.py | flax/pools/pool_puzzles.py | import logging
from typing import Tuple, List, Optional
from blspy import G1Element
from clvm.casts import int_from_bytes, int_to_bytes
from flax.clvm.singleton import SINGLETON_LAUNCHER
from flax.consensus.block_rewards import calculate_pool_reward
from flax.consensus.coinbase import pool_parent_id
from flax.pools.pool_wallet_info import PoolState, LEAVING_POOL, SELF_POOLING
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program, SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.coin_spend import CoinSpend
from flax.wallet.puzzles.load_clvm import load_clvm_maybe_recompile
from flax.wallet.puzzles.singleton_top_layer import puzzle_for_singleton
from flax.util.ints import uint32, uint64
log = logging.getLogger(__name__)
# "Full" is the outer singleton, with the inner puzzle filled in
SINGLETON_MOD = load_clvm_maybe_recompile("singleton_top_layer.clvm")
POOL_WAITING_ROOM_MOD = load_clvm_maybe_recompile("pool_waitingroom_innerpuz.clvm")
POOL_MEMBER_MOD = load_clvm_maybe_recompile("pool_member_innerpuz.clvm")
P2_SINGLETON_MOD = load_clvm_maybe_recompile("p2_singleton_or_delayed_puzhash.clvm")
POOL_OUTER_MOD = SINGLETON_MOD
POOL_MEMBER_HASH = POOL_MEMBER_MOD.get_tree_hash()
POOL_WAITING_ROOM_HASH = POOL_WAITING_ROOM_MOD.get_tree_hash()
P2_SINGLETON_HASH = P2_SINGLETON_MOD.get_tree_hash()
POOL_OUTER_MOD_HASH = POOL_OUTER_MOD.get_tree_hash()
SINGLETON_LAUNCHER_HASH = SINGLETON_LAUNCHER.get_tree_hash()
SINGLETON_MOD_HASH = POOL_OUTER_MOD_HASH
SINGLETON_MOD_HASH_HASH = Program.to(SINGLETON_MOD_HASH).get_tree_hash()
def create_waiting_room_inner_puzzle(
target_puzzle_hash: bytes32,
relative_lock_height: uint32,
owner_pubkey: G1Element,
launcher_id: bytes32,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Program:
pool_reward_prefix = bytes32(genesis_challenge[:16] + b"\x00" * 16)
p2_singleton_puzzle_hash: bytes32 = launcher_id_to_p2_puzzle_hash(launcher_id, delay_time, delay_ph)
return POOL_WAITING_ROOM_MOD.curry(
target_puzzle_hash, p2_singleton_puzzle_hash, bytes(owner_pubkey), pool_reward_prefix, relative_lock_height
)
def create_pooling_inner_puzzle(
target_puzzle_hash: bytes,
pool_waiting_room_inner_hash: bytes32,
owner_pubkey: G1Element,
launcher_id: bytes32,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Program:
pool_reward_prefix = bytes32(genesis_challenge[:16] + b"\x00" * 16)
p2_singleton_puzzle_hash: bytes32 = launcher_id_to_p2_puzzle_hash(launcher_id, delay_time, delay_ph)
return POOL_MEMBER_MOD.curry(
target_puzzle_hash,
p2_singleton_puzzle_hash,
bytes(owner_pubkey),
pool_reward_prefix,
pool_waiting_room_inner_hash,
)
def create_full_puzzle(inner_puzzle: Program, launcher_id: bytes32) -> Program:
return puzzle_for_singleton(launcher_id, inner_puzzle)
def create_p2_singleton_puzzle(
singleton_mod_hash: bytes,
launcher_id: bytes32,
seconds_delay: uint64,
delayed_puzzle_hash: bytes32,
) -> Program:
# curry params are SINGLETON_MOD_HASH LAUNCHER_ID LAUNCHER_PUZZLE_HASH SECONDS_DELAY DELAYED_PUZZLE_HASH
return P2_SINGLETON_MOD.curry(
singleton_mod_hash, launcher_id, SINGLETON_LAUNCHER_HASH, seconds_delay, delayed_puzzle_hash
)
def launcher_id_to_p2_puzzle_hash(launcher_id: bytes32, seconds_delay: uint64, delayed_puzzle_hash: bytes32) -> bytes32:
return create_p2_singleton_puzzle(
SINGLETON_MOD_HASH, launcher_id, int_to_bytes(seconds_delay), delayed_puzzle_hash
).get_tree_hash()
def get_delayed_puz_info_from_launcher_spend(coinsol: CoinSpend) -> Tuple[uint64, bytes32]:
extra_data = Program.from_bytes(bytes(coinsol.solution)).rest().rest().first()
# Extra data is (pool_state delayed_puz_info)
# Delayed puz info is (seconds delayed_puzzle_hash)
seconds: Optional[uint64] = None
delayed_puzzle_hash: Optional[bytes32] = None
for key, value in extra_data.as_python():
if key == b"t":
seconds = int_from_bytes(value)
if key == b"h":
delayed_puzzle_hash = bytes32(value)
assert seconds is not None
assert delayed_puzzle_hash is not None
return seconds, delayed_puzzle_hash
######################################
def get_template_singleton_inner_puzzle(inner_puzzle: Program):
r = inner_puzzle.uncurry()
if r is None:
return False
uncurried_inner_puzzle, args = r
return uncurried_inner_puzzle
def get_seconds_and_delayed_puzhash_from_p2_singleton_puzzle(puzzle: Program) -> Tuple[uint64, bytes32]:
r = puzzle.uncurry()
if r is None:
return False
inner_f, args = r
singleton_mod_hash, launcher_id, launcher_puzzle_hash, seconds_delay, delayed_puzzle_hash = list(args.as_iter())
seconds_delay = uint64(seconds_delay.as_int())
return seconds_delay, delayed_puzzle_hash.as_atom()
# Verify that a puzzle is a Pool Wallet Singleton
def is_pool_singleton_inner_puzzle(inner_puzzle: Program) -> bool:
inner_f = get_template_singleton_inner_puzzle(inner_puzzle)
return inner_f in [POOL_WAITING_ROOM_MOD, POOL_MEMBER_MOD]
def is_pool_waitingroom_inner_puzzle(inner_puzzle: Program) -> bool:
inner_f = get_template_singleton_inner_puzzle(inner_puzzle)
return inner_f in [POOL_WAITING_ROOM_MOD]
def is_pool_member_inner_puzzle(inner_puzzle: Program) -> bool:
inner_f = get_template_singleton_inner_puzzle(inner_puzzle)
return inner_f in [POOL_MEMBER_MOD]
# This spend will use the escape-type spend path for whichever state you are currently in
# If you are currently a waiting inner puzzle, then it will look at your target_state to determine the next
# inner puzzle hash to go to. The member inner puzzle is already committed to its next puzzle hash.
def create_travel_spend(
last_coin_spend: CoinSpend,
launcher_coin: Coin,
current: PoolState,
target: PoolState,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Tuple[CoinSpend, Program]:
inner_puzzle: Program = pool_state_to_inner_puzzle(
current,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
if is_pool_member_inner_puzzle(inner_puzzle):
# inner sol is key_value_list ()
# key_value_list is:
# "p" -> poolstate as bytes
inner_sol: Program = Program.to([[("p", bytes(target))], 0])
elif is_pool_waitingroom_inner_puzzle(inner_puzzle):
# inner sol is (spend_type, key_value_list, pool_reward_height)
destination_inner: Program = pool_state_to_inner_puzzle(
target, launcher_coin.name(), genesis_challenge, delay_time, delay_ph
)
log.debug(
f"create_travel_spend: waitingroom: target PoolState bytes:\n{bytes(target).hex()}\n"
f"{target}"
f"hash:{Program.to(bytes(target)).get_tree_hash()}"
)
# key_value_list is:
# "p" -> poolstate as bytes
inner_sol = Program.to([1, [("p", bytes(target))], destination_inner.get_tree_hash()]) # current or target
else:
raise ValueError
current_singleton: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(last_coin_spend)
assert current_singleton is not None
if current_singleton.parent_coin_info == launcher_coin.name():
parent_info_list = Program.to([launcher_coin.parent_coin_info, launcher_coin.amount])
else:
p = Program.from_bytes(bytes(last_coin_spend.puzzle_reveal))
last_coin_spend_inner_puzzle: Optional[Program] = get_inner_puzzle_from_puzzle(p)
assert last_coin_spend_inner_puzzle is not None
parent_info_list = Program.to(
[
last_coin_spend.coin.parent_coin_info,
last_coin_spend_inner_puzzle.get_tree_hash(),
last_coin_spend.coin.amount,
]
)
full_solution: Program = Program.to([parent_info_list, current_singleton.amount, inner_sol])
full_puzzle: Program = create_full_puzzle(inner_puzzle, launcher_coin.name())
return (
CoinSpend(
current_singleton,
SerializedProgram.from_program(full_puzzle),
SerializedProgram.from_program(full_solution),
),
inner_puzzle,
)
def create_absorb_spend(
last_coin_spend: CoinSpend,
current_state: PoolState,
launcher_coin: Coin,
height: uint32,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> List[CoinSpend]:
inner_puzzle: Program = pool_state_to_inner_puzzle(
current_state, launcher_coin.name(), genesis_challenge, delay_time, delay_ph
)
reward_amount: uint64 = calculate_pool_reward(height)
if is_pool_member_inner_puzzle(inner_puzzle):
# inner sol is (spend_type, pool_reward_amount, pool_reward_height, extra_data)
inner_sol: Program = Program.to([reward_amount, height])
elif is_pool_waitingroom_inner_puzzle(inner_puzzle):
# inner sol is (spend_type, destination_puzhash, pool_reward_amount, pool_reward_height, extra_data)
inner_sol = Program.to([0, reward_amount, height])
else:
raise ValueError
# full sol = (parent_info, my_amount, inner_solution)
coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(last_coin_spend)
assert coin is not None
if coin.parent_coin_info == launcher_coin.name():
parent_info: Program = Program.to([launcher_coin.parent_coin_info, launcher_coin.amount])
else:
p = Program.from_bytes(bytes(last_coin_spend.puzzle_reveal))
last_coin_spend_inner_puzzle: Optional[Program] = get_inner_puzzle_from_puzzle(p)
assert last_coin_spend_inner_puzzle is not None
parent_info = Program.to(
[
last_coin_spend.coin.parent_coin_info,
last_coin_spend_inner_puzzle.get_tree_hash(),
last_coin_spend.coin.amount,
]
)
full_solution: SerializedProgram = SerializedProgram.from_program(
Program.to([parent_info, last_coin_spend.coin.amount, inner_sol])
)
full_puzzle: SerializedProgram = SerializedProgram.from_program(
create_full_puzzle(inner_puzzle, launcher_coin.name())
)
assert coin.puzzle_hash == full_puzzle.get_tree_hash()
reward_parent: bytes32 = pool_parent_id(height, genesis_challenge)
p2_singleton_puzzle: SerializedProgram = SerializedProgram.from_program(
create_p2_singleton_puzzle(SINGLETON_MOD_HASH, launcher_coin.name(), delay_time, delay_ph)
)
reward_coin: Coin = Coin(reward_parent, p2_singleton_puzzle.get_tree_hash(), reward_amount)
p2_singleton_solution: SerializedProgram = SerializedProgram.from_program(
Program.to([inner_puzzle.get_tree_hash(), reward_coin.name()])
)
assert p2_singleton_puzzle.get_tree_hash() == reward_coin.puzzle_hash
assert full_puzzle.get_tree_hash() == coin.puzzle_hash
assert get_inner_puzzle_from_puzzle(Program.from_bytes(bytes(full_puzzle))) is not None
coin_spends = [
CoinSpend(coin, full_puzzle, full_solution),
CoinSpend(reward_coin, p2_singleton_puzzle, p2_singleton_solution),
]
return coin_spends
def get_most_recent_singleton_coin_from_coin_spend(coin_sol: CoinSpend) -> Optional[Coin]:
additions: List[Coin] = coin_sol.additions()
for coin in additions:
if coin.amount % 2 == 1:
return coin
return None
def get_pubkey_from_member_inner_puzzle(inner_puzzle: Program) -> G1Element:
args = uncurry_pool_member_inner_puzzle(inner_puzzle)
if args is not None:
(
_inner_f,
_target_puzzle_hash,
_p2_singleton_hash,
pubkey_program,
_pool_reward_prefix,
_escape_puzzlehash,
) = args
else:
raise ValueError("Unable to extract pubkey")
pubkey = G1Element.from_bytes(pubkey_program.as_atom())
return pubkey
def uncurry_pool_member_inner_puzzle(inner_puzzle: Program): # -> Optional[Tuple[Program, Program, Program]]:
"""
Take a puzzle and return `None` if it's not a "pool member" inner puzzle, or
a triple of `mod_hash, relative_lock_height, pubkey` if it is.
"""
if not is_pool_member_inner_puzzle(inner_puzzle):
raise ValueError("Attempting to unpack a non-waitingroom inner puzzle")
r = inner_puzzle.uncurry()
if r is None:
raise ValueError("Failed to unpack inner puzzle")
inner_f, args = r
# p2_singleton_hash is the tree hash of the unique, curried P2_SINGLETON_MOD. See `create_p2_singleton_puzzle`
# escape_puzzlehash is of the unique, curried POOL_WAITING_ROOM_MOD. See `create_waiting_room_inner_puzzle`
target_puzzle_hash, p2_singleton_hash, owner_pubkey, pool_reward_prefix, escape_puzzlehash = tuple(args.as_iter())
return inner_f, target_puzzle_hash, p2_singleton_hash, owner_pubkey, pool_reward_prefix, escape_puzzlehash
def uncurry_pool_waitingroom_inner_puzzle(inner_puzzle: Program) -> Tuple[Program, Program, Program, Program]:
"""
Take a puzzle and return `None` if it's not a "pool member" inner puzzle, or
a triple of `mod_hash, relative_lock_height, pubkey` if it is.
"""
if not is_pool_waitingroom_inner_puzzle(inner_puzzle):
raise ValueError("Attempting to unpack a non-waitingroom inner puzzle")
r = inner_puzzle.uncurry()
if r is None:
raise ValueError("Failed to unpack inner puzzle")
inner_f, args = r
v = args.as_iter()
target_puzzle_hash, p2_singleton_hash, owner_pubkey, genesis_challenge, relative_lock_height = tuple(v)
return target_puzzle_hash, relative_lock_height, owner_pubkey, p2_singleton_hash
def get_inner_puzzle_from_puzzle(full_puzzle: Program) -> Optional[Program]:
p = Program.from_bytes(bytes(full_puzzle))
r = p.uncurry()
if r is None:
return None
_, args = r
_, inner_puzzle = list(args.as_iter())
if not is_pool_singleton_inner_puzzle(inner_puzzle):
return None
return inner_puzzle
def pool_state_from_extra_data(extra_data: Program) -> Optional[PoolState]:
state_bytes: Optional[bytes] = None
try:
for key, value in extra_data.as_python():
if key == b"p":
state_bytes = value
break
if state_bytes is None:
return None
return PoolState.from_bytes(state_bytes)
except TypeError as e:
log.error(f"Unexpected return from PoolWallet Smart Contract code {e}")
return None
def solution_to_pool_state(full_spend: CoinSpend) -> Optional[PoolState]:
full_solution_ser: SerializedProgram = full_spend.solution
full_solution: Program = Program.from_bytes(bytes(full_solution_ser))
if full_spend.coin.puzzle_hash == SINGLETON_LAUNCHER_HASH:
# Launcher spend
extra_data: Program = full_solution.rest().rest().first()
return pool_state_from_extra_data(extra_data)
# Not launcher spend
inner_solution: Program = full_solution.rest().rest().first()
# Spend which is not absorb, and is not the launcher
num_args = len(inner_solution.as_python())
assert num_args in (2, 3)
if num_args == 2:
# pool member
if inner_solution.rest().first().as_int() != 0:
return None
# This is referred to as p1 in the flaxlisp code
# spend_type is absorbing money if p1 is a cons box, spend_type is escape if p1 is an atom
# TODO: The comment above, and in the CLVM, seems wrong
extra_data = inner_solution.first()
if isinstance(extra_data.as_python(), bytes):
# Absorbing
return None
return pool_state_from_extra_data(extra_data)
else:
# pool waitingroom
if inner_solution.first().as_int() == 0:
return None
extra_data = inner_solution.rest().first()
return pool_state_from_extra_data(extra_data)
def pool_state_to_inner_puzzle(
pool_state: PoolState, launcher_id: bytes32, genesis_challenge: bytes32, delay_time: uint64, delay_ph: bytes32
) -> Program:
escaping_inner_puzzle: Program = create_waiting_room_inner_puzzle(
pool_state.target_puzzle_hash,
pool_state.relative_lock_height,
pool_state.owner_pubkey,
launcher_id,
genesis_challenge,
delay_time,
delay_ph,
)
if pool_state.state in [LEAVING_POOL, SELF_POOLING]:
return escaping_inner_puzzle
else:
return create_pooling_inner_puzzle(
pool_state.target_puzzle_hash,
escaping_inner_puzzle.get_tree_hash(),
pool_state.owner_pubkey,
launcher_id,
genesis_challenge,
delay_time,
delay_ph,
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/pools/pool_wallet_info.py | flax/pools/pool_wallet_info.py | from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Dict
from blspy import G1Element
from flax.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import Program
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.byte_types import hexstr_to_bytes
from flax.util.ints import uint32, uint8
from flax.util.streamable import streamable, Streamable
class PoolSingletonState(IntEnum):
"""
From the user's point of view, a pool group can be in these states:
`SELF_POOLING`: The singleton exists on the blockchain, and we are farming
block rewards to a wallet address controlled by the user
`LEAVING_POOL`: The singleton exists, and we have entered the "escaping" state, which
means we are waiting for a number of blocks = `relative_lock_height` to pass, so we can leave.
`FARMING_TO_POOL`: The singleton exists, and it is assigned to a pool.
`CLAIMING_SELF_POOLED_REWARDS`: We have submitted a transaction to sweep our
self-pooled funds.
"""
SELF_POOLING = 1
LEAVING_POOL = 2
FARMING_TO_POOL = 3
SELF_POOLING = PoolSingletonState.SELF_POOLING
LEAVING_POOL = PoolSingletonState.LEAVING_POOL
FARMING_TO_POOL = PoolSingletonState.FARMING_TO_POOL
@streamable
@dataclass(frozen=True)
class PoolState(Streamable):
"""
`PoolState` is a type that is serialized to the blockchain to track the state of the user's pool singleton
`target_puzzle_hash` is either the pool address, or the self-pooling address that pool rewards will be paid to.
`target_puzzle_hash` is NOT the p2_singleton puzzle that block rewards are sent to.
The `p2_singleton` address is the initial address, and the `target_puzzle_hash` is the final destination.
`relative_lock_height` is zero when in SELF_POOLING state
"""
version: uint8
state: uint8 # PoolSingletonState
# `target_puzzle_hash`: A puzzle_hash we pay to
# When self-farming, this is a main wallet address
# When farming-to-pool, the pool sends this to the farmer during pool protocol setup
target_puzzle_hash: bytes32 # TODO: rename target_puzzle_hash -> pay_to_address
# owner_pubkey is set by the wallet, once
owner_pubkey: G1Element
pool_url: Optional[str]
relative_lock_height: uint32
def initial_pool_state_from_dict(state_dict: Dict, owner_pubkey: G1Element, owner_puzzle_hash: bytes32) -> PoolState:
state_str = state_dict["state"]
singleton_state: PoolSingletonState = PoolSingletonState[state_str]
if singleton_state == SELF_POOLING:
target_puzzle_hash = owner_puzzle_hash
pool_url: str = ""
relative_lock_height = uint32(0)
elif singleton_state == FARMING_TO_POOL:
target_puzzle_hash = bytes32(hexstr_to_bytes(state_dict["target_puzzle_hash"]))
pool_url = state_dict["pool_url"]
relative_lock_height = uint32(state_dict["relative_lock_height"])
else:
raise ValueError("Initial state must be SELF_POOLING or FARMING_TO_POOL")
# TODO: change create_pool_state to return error messages, as well
assert relative_lock_height is not None
return create_pool_state(singleton_state, target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height)
def create_pool_state(
state: PoolSingletonState,
target_puzzle_hash: bytes32,
owner_pubkey: G1Element,
pool_url: Optional[str],
relative_lock_height: uint32,
) -> PoolState:
if state not in set(s.value for s in PoolSingletonState):
raise AssertionError("state {state} is not a valid PoolSingletonState,")
ps = PoolState(
POOL_PROTOCOL_VERSION, uint8(state), target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height
)
# TODO Move verify here
return ps
@streamable
@dataclass(frozen=True)
class PoolWalletInfo(Streamable):
"""
Internal Pool Wallet state, not destined for the blockchain. This can be completely derived with
the Singleton's CoinSpends list, or with the information from the WalletPoolStore.
"""
current: PoolState
target: Optional[PoolState]
launcher_coin: Coin
launcher_id: bytes32
p2_singleton_puzzle_hash: bytes32
current_inner: Program # Inner puzzle in current singleton, not revealed yet
tip_singleton_coin_id: bytes32
singleton_block_height: uint32 # Block height that current PoolState is from
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/pools/pool_wallet.py | flax/pools/pool_wallet.py | from __future__ import annotations
import asyncio
import dataclasses
import logging
import time
from typing import cast, Any, Optional, Set, Tuple, List, Dict, TYPE_CHECKING
from typing_extensions import final
from blspy import PrivateKey, G2Element, G1Element
from flax.pools.pool_config import PoolWalletConfig, load_pool_config, update_pool_config
from flax.pools.pool_wallet_info import (
PoolWalletInfo,
PoolSingletonState,
PoolState,
FARMING_TO_POOL,
SELF_POOLING,
LEAVING_POOL,
create_pool_state,
)
from flax.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from flax.server.ws_connection import WSFlaxConnection
from flax.types.announcement import Announcement
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.program import Program, SerializedProgram
from flax.types.coin_record import CoinRecord
from flax.types.coin_spend import CoinSpend
from flax.types.spend_bundle import SpendBundle
from flax.pools.pool_puzzles import (
create_waiting_room_inner_puzzle,
create_full_puzzle,
SINGLETON_LAUNCHER,
create_pooling_inner_puzzle,
solution_to_pool_state,
pool_state_to_inner_puzzle,
get_most_recent_singleton_coin_from_coin_spend,
launcher_id_to_p2_puzzle_hash,
create_travel_spend,
uncurry_pool_member_inner_puzzle,
create_absorb_spend,
is_pool_member_inner_puzzle,
is_pool_waitingroom_inner_puzzle,
uncurry_pool_waitingroom_inner_puzzle,
get_delayed_puz_info_from_launcher_spend,
)
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.wallet.derive_keys import (
find_owner_sk,
)
from flax.wallet.sign_coin_spends import sign_coin_spends
from flax.wallet.transaction_record import TransactionRecord
from flax.wallet.util.wallet_types import WalletType
from flax.wallet.wallet import Wallet
from flax.wallet.wallet_coin_record import WalletCoinRecord
from flax.wallet.wallet_info import WalletInfo
from flax.wallet.util.transaction_type import TransactionType
@final
@dataclasses.dataclass
class PoolWallet:
MINIMUM_INITIAL_BALANCE = 1
MINIMUM_RELATIVE_LOCK_HEIGHT = 5
MAXIMUM_RELATIVE_LOCK_HEIGHT = 1000
DEFAULT_MAX_CLAIM_SPENDS = 100
wallet_state_manager: Any
log: logging.Logger
wallet_info: WalletInfo
standard_wallet: Wallet
wallet_id: int
next_transaction_fee: uint64 = uint64(0)
target_state: Optional[PoolState] = None
_owner_sk_and_index: Optional[Tuple[PrivateKey, uint32]] = None
_update_pool_config_after_sync_task: Optional[asyncio.Task] = None
"""
From the user's perspective, this is not a wallet at all, but a way to control
whether their pooling-enabled plots are being self-farmed, or farmed by a pool,
and by which pool. Self-pooling and joint pooling rewards are swept into the
users' regular wallet.
If this wallet is in SELF_POOLING state, the coin ID associated with the current
pool wallet contains the rewards gained while self-farming, so care must be taken
to disallow joining a new pool while we still have money on the pooling singleton UTXO.
Pools can be joined anonymously, without an account or prior signup.
The ability to change the farm-to target prevents abuse from pools
by giving the user the ability to quickly change pools, or self-farm.
The pool is also protected, by not allowing members to cheat by quickly leaving a pool,
and claiming a block that was pledged to the pool.
The pooling protocol and smart coin prevents a user from quickly leaving a pool
by enforcing a wait time when leaving the pool. A minimum number of blocks must pass
after the user declares that they are leaving the pool, and before they can start to
self-claim rewards again.
Control of switching states is granted to the owner public key.
We reveal the inner_puzzle to the pool during setup of the pooling protocol.
The pool can prove to itself that the inner puzzle pays to the pooling address,
and it can follow state changes in the pooling puzzle by tracing destruction and
creation of coins associate with this pooling singleton (the singleton controlling
this pool group).
The user trusts the pool to send mining rewards to the <XXX address XXX>
TODO: We should mark which address is receiving funds for our current state.
If the pool misbehaves, it is the user's responsibility to leave the pool
It is the Pool's responsibility to claim the rewards sent to the pool_puzzlehash.
The timeout for leaving the pool is expressed in number of blocks from the time
the user expresses their intent to leave.
"""
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.POOLING_WALLET)
def id(self):
return self.wallet_info.id
@classmethod
def _verify_self_pooled(cls, state) -> Optional[str]:
err = ""
if state.pool_url not in [None, ""]:
err += " Unneeded pool_url for self-pooling"
if state.relative_lock_height != 0:
err += " Incorrect relative_lock_height for self-pooling"
return None if err == "" else err
@classmethod
def _verify_pooling_state(cls, state) -> Optional[str]:
err = ""
if state.relative_lock_height < cls.MINIMUM_RELATIVE_LOCK_HEIGHT:
err += (
f" Pool relative_lock_height ({state.relative_lock_height})"
f"is less than recommended minimum ({cls.MINIMUM_RELATIVE_LOCK_HEIGHT})"
)
elif state.relative_lock_height > cls.MAXIMUM_RELATIVE_LOCK_HEIGHT:
err += (
f" Pool relative_lock_height ({state.relative_lock_height})"
f"is greater than recommended maximum ({cls.MAXIMUM_RELATIVE_LOCK_HEIGHT})"
)
if state.pool_url in [None, ""]:
err += " Empty pool url in pooling state"
return err
@classmethod
def _verify_pool_state(cls, state: PoolState) -> Optional[str]:
if state.target_puzzle_hash is None:
return "Invalid puzzle_hash"
if state.version > POOL_PROTOCOL_VERSION:
return (
f"Detected pool protocol version {state.version}, which is "
f"newer than this wallet's version ({POOL_PROTOCOL_VERSION}). Please upgrade "
f"to use this pooling wallet"
)
if state.state == PoolSingletonState.SELF_POOLING:
return cls._verify_self_pooled(state)
elif state.state == PoolSingletonState.FARMING_TO_POOL or state.state == PoolSingletonState.LEAVING_POOL:
return cls._verify_pooling_state(state)
else:
return "Internal Error"
@classmethod
def _verify_initial_target_state(cls, initial_target_state):
err = cls._verify_pool_state(initial_target_state)
if err:
raise ValueError(f"Invalid internal Pool State: {err}: {initial_target_state}")
async def get_spend_history(self) -> List[Tuple[uint32, CoinSpend]]:
return await self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)
async def get_current_state(self) -> PoolWalletInfo:
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
all_spends: List[CoinSpend] = [cs for _, cs in history]
# We must have at least the launcher spend
assert len(all_spends) >= 1
launcher_coin: Coin = all_spends[0].coin
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(all_spends[0])
tip_singleton_coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(all_spends[-1])
launcher_id: bytes32 = launcher_coin.name()
p2_singleton_puzzle_hash = launcher_id_to_p2_puzzle_hash(launcher_id, delayed_seconds, delayed_puzhash)
assert tip_singleton_coin is not None
curr_spend_i = len(all_spends) - 1
pool_state: Optional[PoolState] = None
last_singleton_spend_height = uint32(0)
while pool_state is None:
full_spend: CoinSpend = all_spends[curr_spend_i]
pool_state = solution_to_pool_state(full_spend)
last_singleton_spend_height = uint32(history[curr_spend_i][0])
curr_spend_i -= 1
assert pool_state is not None
current_inner = pool_state_to_inner_puzzle(
pool_state,
launcher_coin.name(),
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
return PoolWalletInfo(
pool_state,
self.target_state,
launcher_coin,
launcher_id,
p2_singleton_puzzle_hash,
current_inner,
tip_singleton_coin.name(),
last_singleton_spend_height,
)
async def get_unconfirmed_transactions(self) -> List[TransactionRecord]:
return await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.wallet_id)
async def get_tip(self) -> Tuple[uint32, CoinSpend]:
return (await self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id))[-1]
async def update_pool_config(self) -> None:
current_state: PoolWalletInfo = await self.get_current_state()
pool_config_list: List[PoolWalletConfig] = load_pool_config(self.wallet_state_manager.root_path)
pool_config_dict: Dict[bytes32, PoolWalletConfig] = {c.launcher_id: c for c in pool_config_list}
existing_config: Optional[PoolWalletConfig] = pool_config_dict.get(current_state.launcher_id, None)
payout_instructions: str = existing_config.payout_instructions if existing_config is not None else ""
if len(payout_instructions) == 0:
payout_instructions = (await self.standard_wallet.get_new_puzzlehash()).hex()
self.log.info(f"New config entry. Generated payout_instructions puzzle hash: {payout_instructions}")
new_config: PoolWalletConfig = PoolWalletConfig(
current_state.launcher_id,
current_state.current.pool_url if current_state.current.pool_url else "",
payout_instructions,
current_state.current.target_puzzle_hash,
current_state.p2_singleton_puzzle_hash,
current_state.current.owner_pubkey,
)
pool_config_dict[new_config.launcher_id] = new_config
await update_pool_config(self.wallet_state_manager.root_path, list(pool_config_dict.values()))
@staticmethod
def get_next_interesting_coin(spend: CoinSpend) -> Optional[Coin]:
# CoinSpend of one of the coins that we cared about. This coin was spent in a block, but might be in a reorg
# If we return a value, it is a coin that we are also interested in (to support two transitions per block)
return get_most_recent_singleton_coin_from_coin_spend(spend)
async def apply_state_transition(self, new_state: CoinSpend, block_height: uint32) -> bool:
"""
Updates the Pool state (including DB) with new singleton spends.
The DB must be committed after calling this method. All validation should be done here. Returns True iff
the spend is a valid transition spend for the singleton, False otherwise.
"""
tip: Tuple[uint32, CoinSpend] = await self.get_tip()
tip_spend = tip[1]
tip_coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(tip_spend)
assert tip_coin is not None
spent_coin_name: bytes32 = tip_coin.name()
if spent_coin_name != new_state.coin.name():
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
if new_state.coin.name() in [sp.coin.name() for _, sp in history]:
self.log.info(f"Already have state transition: {new_state.coin.name().hex()}")
else:
self.log.warning(
f"Failed to apply state transition. tip: {tip_coin} new_state: {new_state} height {block_height}"
)
return False
await self.wallet_state_manager.pool_store.add_spend(self.wallet_id, new_state, block_height)
tip_spend = (await self.get_tip())[1]
self.log.info(f"New PoolWallet singleton tip_coin: {tip_spend} farmed at height {block_height}")
# If we have reached the target state, resets it to None. Loops back to get current state
for _, added_spend in reversed(
await self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)
):
latest_state: Optional[PoolState] = solution_to_pool_state(added_spend)
if latest_state is not None:
if self.target_state == latest_state:
self.target_state = None
self.next_transaction_fee = uint64(0)
break
await self.update_pool_config_after_sync() # Update pool config after we finish syncing.
return True
async def update_pool_config_after_sync(self) -> None:
"""
Updates the pool config file with the current state after sync is complete.
If the wallet crashes, the config file will be auto updated on restart.
"""
# we only need one task running at a time.
if self._update_pool_config_after_sync_task is None or self._update_pool_config_after_sync_task.done():
async def update_pool_config_after_sync_task():
synced = await self.wallet_state_manager.synced()
while not synced:
await asyncio.sleep(5) # we sync pretty quickly, so I think this is ok.
synced = await self.wallet_state_manager.synced()
await self.update_pool_config()
self.log.info("Updated pool config after syncing finished.")
self._update_pool_config_after_sync_task = asyncio.create_task(update_pool_config_after_sync_task())
async def rewind(self, block_height: int) -> bool:
"""
Rolls back all transactions after block_height, and if creation was after block_height, deletes the wallet.
Returns True if the wallet should be removed.
"""
try:
history: List[Tuple[uint32, CoinSpend]] = await self.wallet_state_manager.pool_store.get_spends_for_wallet(
self.wallet_id
)
prev_state: PoolWalletInfo = await self.get_current_state()
await self.wallet_state_manager.pool_store.rollback(block_height, self.wallet_id)
if len(history) > 0 and history[0][0] > block_height:
return True
else:
if await self.get_current_state() != prev_state:
await self.update_pool_config()
return False
except Exception as e:
self.log.error(f"Exception rewinding: {e}")
return False
@classmethod
async def create(
cls,
wallet_state_manager: Any,
wallet: Wallet,
launcher_coin_id: bytes32,
block_spends: List[CoinSpend],
block_height: uint32,
*,
name: str = None,
) -> PoolWallet:
"""
This creates a new PoolWallet with only one spend: the launcher spend. The DB MUST be committed after calling
this method.
"""
wallet_info = await wallet_state_manager.user_store.create_wallet(
"Pool wallet", WalletType.POOLING_WALLET.value, ""
)
pool_wallet = cls(
wallet_state_manager=wallet_state_manager,
log=logging.getLogger(name if name else __name__),
wallet_info=wallet_info,
wallet_id=wallet_info.id,
standard_wallet=wallet,
)
launcher_spend: Optional[CoinSpend] = None
for spend in block_spends:
if spend.coin.name() == launcher_coin_id:
launcher_spend = spend
assert launcher_spend is not None
await wallet_state_manager.pool_store.add_spend(pool_wallet.wallet_id, launcher_spend, block_height)
await pool_wallet.update_pool_config()
p2_puzzle_hash: bytes32 = (await pool_wallet.get_current_state()).p2_singleton_puzzle_hash
await wallet_state_manager.add_new_wallet(pool_wallet, pool_wallet.wallet_id, create_puzzle_hashes=False)
await wallet_state_manager.add_interested_puzzle_hashes([p2_puzzle_hash], [pool_wallet.wallet_id])
return pool_wallet
@classmethod
async def create_from_db(
cls,
wallet_state_manager: Any,
wallet: Wallet,
wallet_info: WalletInfo,
name: str = None,
) -> PoolWallet:
"""
This creates a PoolWallet from DB. However, all data is already handled by WalletPoolStore, so we don't need
to do anything here.
"""
pool_wallet = cls(
wallet_state_manager=wallet_state_manager,
log=logging.getLogger(name if name else __name__),
wallet_info=wallet_info,
wallet_id=wallet_info.id,
standard_wallet=wallet,
)
await pool_wallet.update_pool_config()
return pool_wallet
@staticmethod
async def create_new_pool_wallet_transaction(
wallet_state_manager: Any,
main_wallet: Wallet,
initial_target_state: PoolState,
fee: uint64 = uint64(0),
p2_singleton_delay_time: Optional[uint64] = None,
p2_singleton_delayed_ph: Optional[bytes32] = None,
) -> Tuple[TransactionRecord, bytes32, bytes32]:
"""
A "plot NFT", or pool wallet, represents the idea of a set of plots that all pay to
the same pooling puzzle. This puzzle is a `flax singleton` that is
parameterized with a public key controlled by the user's wallet
(a `smart coin`). It contains an inner puzzle that can switch between
paying block rewards to a pool, or to a user's own wallet.
Call under the wallet state manager lock
"""
amount = 1
standard_wallet = main_wallet
if p2_singleton_delayed_ph is None:
p2_singleton_delayed_ph = await main_wallet.get_new_puzzlehash()
if p2_singleton_delay_time is None:
p2_singleton_delay_time = uint64(604800)
unspent_records = await wallet_state_manager.coin_store.get_unspent_coins_for_wallet(standard_wallet.wallet_id)
balance = await standard_wallet.get_confirmed_balance(unspent_records)
if balance < PoolWallet.MINIMUM_INITIAL_BALANCE:
raise ValueError("Not enough balance in main wallet to create a managed plotting pool.")
if balance < PoolWallet.MINIMUM_INITIAL_BALANCE + fee:
raise ValueError("Not enough balance in main wallet to create a managed plotting pool with fee {fee}.")
# Verify Parameters - raise if invalid
PoolWallet._verify_initial_target_state(initial_target_state)
spend_bundle, singleton_puzzle_hash, launcher_coin_id = await PoolWallet.generate_launcher_spend(
standard_wallet,
uint64(1),
fee,
initial_target_state,
wallet_state_manager.constants.GENESIS_CHALLENGE,
p2_singleton_delay_time,
p2_singleton_delayed_ph,
)
if spend_bundle is None:
raise ValueError("failed to generate ID for wallet")
standard_wallet_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=singleton_puzzle_hash,
amount=uint64(amount),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet_state_manager.main_wallet.id(),
sent_to=[],
memos=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
await standard_wallet.push_transaction(standard_wallet_record)
p2_singleton_puzzle_hash: bytes32 = launcher_id_to_p2_puzzle_hash(
launcher_coin_id, p2_singleton_delay_time, p2_singleton_delayed_ph
)
return standard_wallet_record, p2_singleton_puzzle_hash, launcher_coin_id
async def _get_owner_key_cache(self) -> Tuple[PrivateKey, uint32]:
if self._owner_sk_and_index is None:
self._owner_sk_and_index = find_owner_sk(
[self.wallet_state_manager.private_key], (await self.get_current_state()).current.owner_pubkey
)
assert self._owner_sk_and_index is not None
return self._owner_sk_and_index
async def get_pool_wallet_index(self) -> uint32:
return (await self._get_owner_key_cache())[1]
async def sign(self, coin_spend: CoinSpend) -> SpendBundle:
async def pk_to_sk(pk: G1Element) -> PrivateKey:
s = find_owner_sk([self.wallet_state_manager.private_key], pk)
if s is None:
return self.standard_wallet.secret_key_store.secret_key_for_public_key(pk)
else:
# Note that pool_wallet_index may be from another wallet than self.wallet_id
owner_sk, pool_wallet_index = s
if owner_sk is None:
return self.standard_wallet.secret_key_store.secret_key_for_public_key(pk)
return owner_sk
return await sign_coin_spends(
[coin_spend],
pk_to_sk,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
async def generate_fee_transaction(self, fee: uint64, coin_announcements=None) -> TransactionRecord:
fee_tx = await self.standard_wallet.generate_signed_transaction(
uint64(0),
(await self.standard_wallet.get_new_puzzlehash()),
fee=fee,
origin_id=None,
coins=None,
primaries=None,
ignore_max_send_amount=False,
coin_announcements_to_consume=coin_announcements,
)
return fee_tx
async def publish_transactions(self, travel_tx: TransactionRecord, fee_tx: Optional[TransactionRecord]):
# We create two transaction records, one for the pool wallet to keep track of the travel TX, and another
# for the standard wallet to keep track of the fee. However, we will only submit the first one to the
# blockchain, and this one has the fee inside it as well.
# The fee tx, if present, will be added to the DB with no spend_bundle set, which has the effect that it
# will not be sent to full nodes.
await self.wallet_state_manager.add_pending_transaction(travel_tx)
if fee_tx is not None:
await self.wallet_state_manager.add_pending_transaction(dataclasses.replace(fee_tx, spend_bundle=None))
async def generate_travel_transactions(self, fee: uint64) -> Tuple[TransactionRecord, Optional[TransactionRecord]]:
# target_state is contained within pool_wallet_state
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
spend_history = await self.get_spend_history()
last_coin_spend: CoinSpend = spend_history[-1][1]
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(spend_history[0][1])
assert pool_wallet_info.target is not None
next_state = pool_wallet_info.target
if pool_wallet_info.current.state in [FARMING_TO_POOL]:
next_state = create_pool_state(
LEAVING_POOL,
pool_wallet_info.current.target_puzzle_hash,
pool_wallet_info.current.owner_pubkey,
pool_wallet_info.current.pool_url,
pool_wallet_info.current.relative_lock_height,
)
new_inner_puzzle = pool_state_to_inner_puzzle(
next_state,
pool_wallet_info.launcher_coin.name(),
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
new_full_puzzle: SerializedProgram = SerializedProgram.from_program(
create_full_puzzle(new_inner_puzzle, pool_wallet_info.launcher_coin.name())
)
outgoing_coin_spend, inner_puzzle = create_travel_spend(
last_coin_spend,
pool_wallet_info.launcher_coin,
pool_wallet_info.current,
next_state,
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
tip = (await self.get_tip())[1]
tip_coin = tip.coin
singleton = tip.additions()[0]
singleton_id = singleton.name()
assert outgoing_coin_spend.coin.parent_coin_info == tip_coin.name()
assert outgoing_coin_spend.coin.name() == singleton_id
assert new_inner_puzzle != inner_puzzle
if is_pool_member_inner_puzzle(inner_puzzle):
(
inner_f,
target_puzzle_hash,
p2_singleton_hash,
pubkey_as_program,
pool_reward_prefix,
escape_puzzle_hash,
) = uncurry_pool_member_inner_puzzle(inner_puzzle)
pk_bytes: bytes = bytes(pubkey_as_program.as_atom())
assert len(pk_bytes) == 48
owner_pubkey = G1Element.from_bytes(pk_bytes)
assert owner_pubkey == pool_wallet_info.current.owner_pubkey
elif is_pool_waitingroom_inner_puzzle(inner_puzzle):
(
target_puzzle_hash, # payout_puzzle_hash
relative_lock_height,
owner_pubkey,
p2_singleton_hash,
) = uncurry_pool_waitingroom_inner_puzzle(inner_puzzle)
pk_bytes = bytes(owner_pubkey.as_atom())
assert len(pk_bytes) == 48
assert owner_pubkey == pool_wallet_info.current.owner_pubkey
else:
raise RuntimeError("Invalid state")
signed_spend_bundle = await self.sign(outgoing_coin_spend)
assert signed_spend_bundle.removals()[0].puzzle_hash == singleton.puzzle_hash
assert signed_spend_bundle.removals()[0].name() == singleton.name()
assert signed_spend_bundle is not None
fee_tx: Optional[TransactionRecord] = None
if fee > 0:
fee_tx = await self.generate_fee_transaction(fee)
signed_spend_bundle = SpendBundle.aggregate([signed_spend_bundle, fee_tx.spend_bundle])
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=new_full_puzzle.get_tree_hash(),
amount=uint64(1),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=signed_spend_bundle,
additions=signed_spend_bundle.additions(),
removals=signed_spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
memos=[],
type=uint32(TransactionType.OUTGOING_TX.value),
name=signed_spend_bundle.name(),
)
await self.publish_transactions(tx_record, fee_tx)
return tx_record, fee_tx
@staticmethod
async def generate_launcher_spend(
standard_wallet: Wallet,
amount: uint64,
fee: uint64,
initial_target_state: PoolState,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Tuple[SpendBundle, bytes32, bytes32]:
"""
Creates the initial singleton, which includes spending an origin coin, the launcher, and creating a singleton
with the "pooling" inner state, which can be either self pooling or using a pool
"""
coins: Set[Coin] = await standard_wallet.select_coins(uint64(amount + fee))
if coins is None:
raise ValueError("Not enough coins to create pool wallet")
launcher_parent: Coin = coins.copy().pop()
genesis_launcher_puz: Program = SINGLETON_LAUNCHER
launcher_coin: Coin = Coin(launcher_parent.name(), genesis_launcher_puz.get_tree_hash(), amount)
escaping_inner_puzzle: Program = create_waiting_room_inner_puzzle(
initial_target_state.target_puzzle_hash,
initial_target_state.relative_lock_height,
initial_target_state.owner_pubkey,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
escaping_inner_puzzle_hash = escaping_inner_puzzle.get_tree_hash()
self_pooling_inner_puzzle: Program = create_pooling_inner_puzzle(
initial_target_state.target_puzzle_hash,
escaping_inner_puzzle_hash,
initial_target_state.owner_pubkey,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
if initial_target_state.state == SELF_POOLING:
puzzle = escaping_inner_puzzle
elif initial_target_state.state == FARMING_TO_POOL:
puzzle = self_pooling_inner_puzzle
else:
raise ValueError("Invalid initial state")
full_pooling_puzzle: Program = create_full_puzzle(puzzle, launcher_id=launcher_coin.name())
puzzle_hash: bytes32 = full_pooling_puzzle.get_tree_hash()
pool_state_bytes = Program.to([("p", bytes(initial_target_state)), ("t", delay_time), ("h", delay_ph)])
announcement_set: Set[Announcement] = set()
announcement_message = Program.to([puzzle_hash, amount, pool_state_bytes]).get_tree_hash()
announcement_set.add(Announcement(launcher_coin.name(), announcement_message))
create_launcher_tx_record: Optional[TransactionRecord] = await standard_wallet.generate_signed_transaction(
amount,
genesis_launcher_puz.get_tree_hash(),
fee,
launcher_parent.name(),
coins,
None,
False,
announcement_set,
)
assert create_launcher_tx_record is not None and create_launcher_tx_record.spend_bundle is not None
genesis_launcher_solution: Program = Program.to([puzzle_hash, amount, pool_state_bytes])
launcher_cs: CoinSpend = CoinSpend(
launcher_coin,
SerializedProgram.from_program(genesis_launcher_puz),
SerializedProgram.from_program(genesis_launcher_solution),
)
launcher_sb: SpendBundle = SpendBundle([launcher_cs], G2Element())
# Current inner will be updated when state is verified on the blockchain
full_spend: SpendBundle = SpendBundle.aggregate([create_launcher_tx_record.spend_bundle, launcher_sb])
return full_spend, puzzle_hash, launcher_coin.name()
async def join_pool(
self, target_state: PoolState, fee: uint64
) -> Tuple[uint64, TransactionRecord, Optional[TransactionRecord]]:
if target_state.state != FARMING_TO_POOL:
raise ValueError(f"join_pool must be called with target_state={FARMING_TO_POOL} (FARMING_TO_POOL)")
if self.target_state is not None:
raise ValueError(f"Cannot join a pool while waiting for target state: {self.target_state}")
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot join pool due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
current_state: PoolWalletInfo = await self.get_current_state()
total_fee = fee
if current_state.current == target_state:
self.target_state = None
msg = f"Asked to change to current state. Target = {target_state}"
self.log.info(msg)
raise ValueError(msg)
elif current_state.current.state in [SELF_POOLING, LEAVING_POOL]:
total_fee = fee
elif current_state.current.state == FARMING_TO_POOL:
total_fee = uint64(fee * 2)
if self.target_state is not None:
raise ValueError(
f"Cannot change to state {target_state} when already having target state: {self.target_state}"
)
PoolWallet._verify_initial_target_state(target_state)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/pools/__init__.py | flax/pools/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/pools/pool_config.py | flax/pools/pool_config.py | from __future__ import annotations
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import List
from blspy import G1Element
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.byte_types import hexstr_to_bytes
from flax.util.config import load_config, lock_and_load_config, save_config
from flax.util.streamable import Streamable, streamable
"""
Config example
This is what goes into the user's config file, to communicate between the wallet and the farmer processes.
pool_list:
launcher_id: ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa
owner_public_key: 84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5
payout_instructions: c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8
pool_url: localhost
p2_singleton_puzzle_hash: 2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824
target_puzzle_hash: 344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58
""" # noqa
log = logging.getLogger(__name__)
@streamable
@dataclass(frozen=True)
class PoolWalletConfig(Streamable):
launcher_id: bytes32
pool_url: str
payout_instructions: str
target_puzzle_hash: bytes32
p2_singleton_puzzle_hash: bytes32
owner_public_key: G1Element
def load_pool_config(root_path: Path) -> List[PoolWalletConfig]:
config = load_config(root_path, "config.yaml")
ret_list: List[PoolWalletConfig] = []
pool_list = config["pool"].get("pool_list", [])
if pool_list is not None:
for pool_config_dict in pool_list:
try:
pool_config = PoolWalletConfig(
bytes32.from_hexstr(pool_config_dict["launcher_id"]),
pool_config_dict["pool_url"],
pool_config_dict["payout_instructions"],
bytes32.from_hexstr(pool_config_dict["target_puzzle_hash"]),
bytes32.from_hexstr(pool_config_dict["p2_singleton_puzzle_hash"]),
G1Element.from_bytes(hexstr_to_bytes(pool_config_dict["owner_public_key"])),
)
ret_list.append(pool_config)
except Exception as e:
log.error(f"Exception loading config: {pool_config_dict} {e}")
return ret_list
# TODO: remove this a few versions after 1.3, since authentication_public_key is deprecated. This is here to support
# downgrading to versions older than 1.3.
def add_auth_key(root_path: Path, config_entry: PoolWalletConfig, auth_key: G1Element):
with lock_and_load_config(root_path, "config.yaml") as config:
pool_list = config["pool"].get("pool_list", [])
updated = False
if pool_list is not None:
for pool_config_dict in pool_list:
try:
if hexstr_to_bytes(pool_config_dict["owner_public_key"]) == bytes(config_entry.owner_public_key):
auth_key_hex = bytes(auth_key).hex()
if pool_config_dict.get("authentication_public_key", "") != auth_key_hex:
pool_config_dict["authentication_public_key"] = auth_key_hex
updated = True
except Exception as e:
log.error(f"Exception updating config: {pool_config_dict} {e}")
if updated:
log.info(f"Updating pool config for auth key: {auth_key}")
config["pool"]["pool_list"] = pool_list
save_config(root_path, "config.yaml", config)
async def update_pool_config(root_path: Path, pool_config_list: List[PoolWalletConfig]):
with lock_and_load_config(root_path, "config.yaml") as full_config:
full_config["pool"]["pool_list"] = [c.to_json_dict() for c in pool_config_list]
save_config(root_path, "config.yaml", full_config)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/blockchain.py | flax/consensus/blockchain.py | from __future__ import annotations
import asyncio
import dataclasses
import logging
import multiprocessing
import traceback
from concurrent.futures import Executor
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from multiprocessing.context import BaseContext
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
from flax.consensus.block_body_validation import validate_block_body
from flax.consensus.block_header_validation import validate_unfinished_header_block
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.constants import ConsensusConstants
from flax.consensus.cost_calculator import NPCResult
from flax.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from flax.consensus.find_fork_point import find_fork_point_in_chain
from flax.consensus.full_block_to_block_record import block_to_block_record
from flax.consensus.multiprocess_validation import (
PreValidationResult,
_run_generator,
pre_validate_blocks_multiprocessing,
)
from flax.full_node.block_height_map import BlockHeightMap
from flax.full_node.block_store import BlockStore
from flax.full_node.coin_store import CoinStore
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions
from flax.types.block_protocol import BlockInfo
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.program import SerializedProgram
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.blockchain_format.vdf import VDFInfo
from flax.types.coin_record import CoinRecord
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.types.full_block import FullBlock
from flax.types.generator_types import BlockGenerator
from flax.types.header_block import HeaderBlock
from flax.types.unfinished_block import UnfinishedBlock
from flax.types.unfinished_header_block import UnfinishedHeaderBlock
from flax.types.weight_proof import SubEpochChallengeSegment
from flax.util.errors import ConsensusError, Err
from flax.util.generator_tools import get_block_header, tx_removals_and_additions
from flax.util.hash import std_hash
from flax.util.inline_executor import InlineExecutor
from flax.util.ints import uint16, uint32, uint64, uint128
from flax.util.setproctitle import getproctitle, setproctitle
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
"""
When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
"""
NEW_PEAK = 1 # Added to the peak of the blockchain
ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain)
INVALID_BLOCK = 3 # Block was not added because it was invalid
ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain
DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain
@dataclasses.dataclass
class StateChangeSummary:
peak: BlockRecord
fork_height: uint32
rolled_back_records: List[CoinRecord]
new_npc_results: List[NPCResult]
new_rewards: List[Coin]
class Blockchain(BlockchainInterface):
constants: ConsensusConstants
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# maps block height (of the current heaviest chain) to block hash and sub
# epoch summaries
__height_map: BlockHeightMap
# Unspent Store
coin_store: CoinStore
# Store
block_store: BlockStore
# Used to verify blocks in parallel
pool: Executor
# Set holding seen compact proofs, in order to avoid duplicates.
_seen_compact_proofs: Set[Tuple[VDFInfo, uint32]]
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
compact_proof_lock: asyncio.Lock
@staticmethod
async def create(
coin_store: CoinStore,
block_store: BlockStore,
consensus_constants: ConsensusConstants,
blockchain_dir: Path,
reserved_cores: int,
multiprocessing_context: Optional[BaseContext] = None,
*,
single_threaded: bool = False,
) -> "Blockchain":
"""
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
"""
self = Blockchain()
self.lock = asyncio.Lock() # External lock handled by full node
self.compact_proof_lock = asyncio.Lock()
if single_threaded:
self.pool = InlineExecutor()
else:
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - reserved_cores, 1)
self.pool = ProcessPoolExecutor(
max_workers=num_workers,
mp_context=multiprocessing_context,
initializer=setproctitle,
initargs=(f"{getproctitle()}_worker",),
)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.coin_store = coin_store
self.block_store = block_store
self._shut_down = False
await self._load_chain_from_store(blockchain_dir)
self._seen_compact_proofs = set()
return self
def shut_down(self) -> None:
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self, blockchain_dir: Path) -> None:
"""
Initializes the state of the Blockchain class from the database.
"""
self.__height_map = await BlockHeightMap.create(blockchain_dir, self.block_store.db_wrapper)
self.__block_records = {}
self.__heights_in_cache = {}
block_records, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block in block_records.values():
self.add_block_record(block)
if len(block_records) == 0:
assert peak is None
self._peak_height = None
return
assert peak is not None
self._peak_height = self.block_record(peak).height
assert self.__height_map.contains_height(self._peak_height)
assert not self.__height_map.contains_height(uint32(self._peak_height + 1))
def get_peak(self) -> Optional[BlockRecord]:
"""
Return the peak of the blockchain
"""
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def get_full_peak(self) -> Optional[FullBlock]:
if self._peak_height is None:
return None
""" Return list of FullBlocks that are peaks"""
peak_hash: Optional[bytes32] = self.height_to_hash(self._peak_height)
assert peak_hash is not None # Since we must have the peak block
block = await self.block_store.get_full_block(peak_hash)
assert block is not None
return block
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
return await self.block_store.get_full_block(header_hash)
async def receive_block(
self,
block: FullBlock,
pre_validation_result: PreValidationResult,
fork_point_with_peak: Optional[uint32] = None,
) -> Tuple[ReceiveBlockResult, Optional[Err], Optional[StateChangeSummary]]:
"""
This method must be called under the blockchain lock
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
Args:
block: The FullBlock to be validated.
pre_validation_result: A result of successful pre validation
fork_point_with_peak: The fork point, for efficiency reasons, if None, it will be recomputed
Returns:
The result of adding the block to the blockchain (NEW_PEAK, ADDED_AS_ORPHAN, INVALID_BLOCK,
DISCONNECTED_BLOCK, ALREDY_HAVE_BLOCK)
An optional error if the result is not NEW_PEAK or ADDED_AS_ORPHAN
A StateChangeSumamry iff NEW_PEAK, with:
- A fork point if the result is NEW_PEAK
- A list of coin changes as a result of rollback
- A list of NPCResult for any new transaction block added to the chain
"""
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None
if not self.contains_block(block.prev_header_hash) and not genesis:
return ReceiveBlockResult.DISCONNECTED_BLOCK, Err.INVALID_PREV_BLOCK_HASH, None
if not genesis and (self.block_record(block.prev_header_hash).height + 1) != block.height:
return ReceiveBlockResult.INVALID_BLOCK, Err.INVALID_HEIGHT, None
npc_result: Optional[NPCResult] = pre_validation_result.npc_result
required_iters = pre_validation_result.required_iters
if pre_validation_result.error is not None:
return ReceiveBlockResult.INVALID_BLOCK, Err(pre_validation_result.error), None
assert required_iters is not None
error_code, _ = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
block.height,
npc_result,
fork_point_with_peak,
self.get_block_generator,
# If we did not already validate the signature, validate it now
validate_signature=not pre_validation_result.validated_signature,
)
if error_code is not None:
return ReceiveBlockResult.INVALID_BLOCK, error_code, None
block_record = block_to_block_record(
self.constants,
self,
required_iters,
block,
None,
)
# Always add the block to the database
async with self.block_store.db_wrapper.writer():
try:
header_hash: bytes32 = block.header_hash
# Perform the DB operations to update the state, and rollback if something goes wrong
await self.block_store.add_full_block(header_hash, block, block_record)
records, state_change_summary = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, npc_result
)
# Then update the memory cache. It is important that this is not cancelled and does not throw
# This is done after all async/DB operations, so there is a decreased chance of failure.
self.add_block_record(block_record)
if state_change_summary is not None:
self.__height_map.rollback(state_change_summary.fork_height)
for fetched_block_record in records:
self.__height_map.update_height(
fetched_block_record.height,
fetched_block_record.header_hash,
fetched_block_record.sub_epoch_summary_included,
)
except BaseException as e:
self.block_store.rollback_cache_block(header_hash)
log.error(
f"Error while adding block {block.header_hash} height {block.height},"
f" rolling back: {traceback.format_exc()} {e}"
)
raise
# make sure to update _peak_height after the transaction is committed,
# otherwise other tasks may go look for this block before it's available
if state_change_summary is not None:
self._peak_height = block_record.height
# This is done outside the try-except in case it fails, since we do not want to revert anything if it does
await self.__height_map.maybe_flush()
if state_change_summary is not None:
# new coin records added
return ReceiveBlockResult.NEW_PEAK, None, state_change_summary
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
npc_result: Optional[NPCResult],
) -> Tuple[List[BlockRecord], Optional[StateChangeSummary]]:
"""
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the summary of the applied changes, including the height of the fork between the previous chain
and the new chain, or returns None if there was no update to the heaviest chain.
"""
peak = self.get_peak()
rolled_back_state: Dict[bytes32, CoinRecord] = {}
if genesis:
if peak is None:
block: Optional[FullBlock] = await self.block_store.get_full_block(block_record.header_hash)
assert block is not None
if npc_result is not None:
tx_removals, tx_additions = tx_removals_and_additions(npc_result.conds)
else:
tx_removals, tx_additions = [], []
if block.is_transaction_block():
assert block.foliage_transaction_block is not None
await self.coin_store.new_block(
block.height,
block.foliage_transaction_block.timestamp,
block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
await self.block_store.set_in_chain([(block_record.header_hash,)])
await self.block_store.set_peak(block_record.header_hash)
return [block_record], StateChangeSummary(
block_record, uint32(0), [], [], list(block.get_included_reward_coins())
)
return [], None
assert peak is not None
if block_record.weight <= peak.weight:
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return [], None
# Finds the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if block_record.prev_hash == peak.header_hash:
fork_height: int = peak.height
elif fork_point_with_peak is not None:
fork_height = fork_point_with_peak
else:
fork_height = find_fork_point_in_chain(self, block_record, peak)
if block_record.prev_hash != peak.header_hash:
for coin_record in await self.coin_store.rollback_to_block(fork_height):
rolled_back_state[coin_record.name] = coin_record
# Collects all blocks from fork point to new peak
blocks_to_add: List[Tuple[FullBlock, BlockRecord]] = []
curr = block_record.header_hash
# Backtracks up to the fork point, pulling all the required blocks from DB (that will soon be in the chain)
while fork_height < 0 or curr != self.height_to_hash(uint32(fork_height)):
fetched_full_block: Optional[FullBlock] = await self.block_store.get_full_block(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
assert fetched_full_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_full_block, fetched_block_record))
if fetched_full_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add: List[BlockRecord] = []
npc_results: List[NPCResult] = []
reward_coins: List[Coin] = []
for fetched_full_block, fetched_block_record in reversed(blocks_to_add):
records_to_add.append(fetched_block_record)
if not fetched_full_block.is_transaction_block():
# Coins are only created in TX blocks so there are no state updates for this block
continue
# We need to recompute the additions and removals, since they are not stored on DB (only generator is).
if fetched_block_record.header_hash == block_record.header_hash:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(
fetched_full_block, npc_result
)
else:
tx_removals, tx_additions, npc_res = await self.get_tx_removals_and_additions(fetched_full_block, None)
# Collect the NPC results for later post-processing
if npc_res is not None:
npc_results.append(npc_res)
# Apply the coin store changes for each block that is now in the blockchain
assert fetched_full_block.foliage_transaction_block is not None
await self.coin_store.new_block(
fetched_full_block.height,
fetched_full_block.foliage_transaction_block.timestamp,
fetched_full_block.get_included_reward_coins(),
tx_additions,
tx_removals,
)
# Collect the new reward coins for later post-processing
reward_coins.extend(fetched_full_block.get_included_reward_coins())
# we made it to the end successfully
# Rollback sub_epoch_summaries
await self.block_store.rollback(fork_height)
await self.block_store.set_in_chain([(br.header_hash,) for br in records_to_add])
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
return records_to_add, StateChangeSummary(
block_record, uint32(max(fork_height, 0)), list(rolled_back_state.values()), npc_results, reward_coins
)
async def get_tx_removals_and_additions(
self, block: FullBlock, npc_result: Optional[NPCResult] = None
) -> Tuple[List[bytes32], List[Coin], Optional[NPCResult]]:
if not block.is_transaction_block():
return [], [], None
if block.transactions_generator is None:
return [], [], None
if npc_result is None:
block_generator: Optional[BlockGenerator] = await self.get_block_generator(block)
assert block_generator is not None
npc_result = get_name_puzzle_conditions(
block_generator,
self.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.constants.COST_PER_BYTE,
mempool_mode=False,
)
tx_removals, tx_additions = tx_removals_and_additions(npc_result.conds)
return tx_removals, tx_additions, npc_result
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def get_sp_and_ip_sub_slots(
self, header_hash: bytes32
) -> Optional[Tuple[Optional[EndOfSubSlotBundle], Optional[EndOfSubSlotBundle]]]:
block: Optional[FullBlock] = await self.block_store.get_full_block(header_hash)
if block is None:
return None
curr_br: BlockRecord = self.block_record(block.header_hash)
is_overflow = curr_br.overflow
curr: Optional[FullBlock] = block
assert curr is not None
while True:
if curr_br.first_in_sub_slot:
curr = await self.block_store.get_full_block(curr_br.header_hash)
assert curr is not None
break
if curr_br.height == 0:
break
curr_br = self.block_record(curr_br.prev_hash)
if len(curr.finished_sub_slots) == 0:
# This means we got to genesis and still no sub-slots
return None, None
ip_sub_slot = curr.finished_sub_slots[-1]
if not is_overflow:
# Pos sub-slot is the same as infusion sub slot
return None, ip_sub_slot
if len(curr.finished_sub_slots) > 1:
# Have both sub-slots
return curr.finished_sub_slots[-2], ip_sub_slot
prev_curr: Optional[FullBlock] = await self.block_store.get_full_block(curr.prev_header_hash)
if prev_curr is None:
assert curr.height == 0
prev_curr = curr
prev_curr_br = self.block_record(curr.header_hash)
else:
prev_curr_br = self.block_record(curr.prev_header_hash)
assert prev_curr_br is not None
while prev_curr_br.height > 0:
if prev_curr_br.first_in_sub_slot:
prev_curr = await self.block_store.get_full_block(prev_curr_br.header_hash)
assert prev_curr is not None
break
prev_curr_br = self.block_record(prev_curr_br.prev_hash)
if len(prev_curr.finished_sub_slots) == 0:
return None, ip_sub_slot
return prev_curr.finished_sub_slots[-1], ip_sub_slot
def get_recent_reward_challenges(self) -> List[Tuple[bytes32, uint128]]:
peak = self.get_peak()
if peak is None:
return []
recent_rc: List[Tuple[bytes32, uint128]] = []
curr: Optional[BlockRecord] = peak
while curr is not None and len(recent_rc) < 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
if curr != peak:
recent_rc.append((curr.reward_infusion_new_challenge, curr.total_iters))
if curr.first_in_sub_slot:
assert curr.finished_reward_slot_hashes is not None
sub_slot_total_iters = curr.ip_sub_slot_total_iters(self.constants)
# Start from the most recent
for rc in reversed(curr.finished_reward_slot_hashes):
if sub_slot_total_iters < curr.sub_slot_iters:
break
recent_rc.append((rc, sub_slot_total_iters))
sub_slot_total_iters = uint128(sub_slot_total_iters - curr.sub_slot_iters)
curr = self.try_block_record(curr.prev_hash)
return list(reversed(recent_rc))
async def validate_unfinished_block_header(
self, block: UnfinishedBlock, skip_overflow_ss_validation: bool = True
) -> Tuple[Optional[uint64], Optional[Err]]:
if (
not self.contains_block(block.prev_header_hash)
and block.prev_header_hash != self.constants.GENESIS_CHALLENGE
):
return None, Err.INVALID_PREV_BLOCK_HASH
if block.transactions_info is not None:
if block.transactions_generator is not None:
if std_hash(bytes(block.transactions_generator)) != block.transactions_info.generator_root:
return None, Err.INVALID_TRANSACTIONS_GENERATOR_HASH
else:
if block.transactions_info.generator_root != bytes([0] * 32):
return None, Err.INVALID_TRANSACTIONS_GENERATOR_HASH
if (
block.foliage_transaction_block is None
or block.foliage_transaction_block.transactions_info_hash != block.transactions_info.get_hash()
):
return None, Err.INVALID_TRANSACTIONS_INFO_HASH
else:
# make sure non-tx blocks don't have these fields
if block.transactions_generator is not None:
return None, Err.INVALID_TRANSACTIONS_GENERATOR_HASH
if block.foliage_transaction_block is not None:
return None, Err.INVALID_TRANSACTIONS_INFO_HASH
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
prev_b = self.try_block_record(unfinished_header_block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(unfinished_header_block.finished_sub_slots) > 0, prev_b, self
)
required_iters, error = validate_unfinished_header_block(
self.constants,
self,
unfinished_header_block,
False,
difficulty,
sub_slot_iters,
skip_overflow_ss_validation,
)
if error is not None:
return required_iters, error.code
return required_iters, None
async def validate_unfinished_block(
self, block: UnfinishedBlock, npc_result: Optional[NPCResult], skip_overflow_ss_validation: bool = True
) -> PreValidationResult:
required_iters, error = await self.validate_unfinished_block_header(block, skip_overflow_ss_validation)
if error is not None:
return PreValidationResult(uint16(error.value), None, None, False)
prev_height = (
-1
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE
else self.block_record(block.prev_header_hash).height
)
error_code, cost_result = await validate_block_body(
self.constants,
self,
self.block_store,
self.coin_store,
self.get_peak(),
block,
uint32(prev_height + 1),
npc_result,
None,
self.get_block_generator,
validate_signature=False, # Signature was already validated before calling this method, no need to validate
)
if error_code is not None:
return PreValidationResult(uint16(error_code.value), None, None, False)
return PreValidationResult(None, required_iters, cost_result, False)
async def pre_validate_blocks_multiprocessing(
self,
blocks: List[FullBlock],
npc_results: Dict[uint32, NPCResult], # A cache of the result of running CLVM, optional (you can use {})
batch_size: int = 4,
wp_summaries: Optional[List[SubEpochSummary]] = None,
*,
validate_signatures: bool,
) -> List[PreValidationResult]:
return await pre_validate_blocks_multiprocessing(
self.constants,
self,
blocks,
self.pool,
True,
npc_results,
self.get_block_generator,
batch_size,
wp_summaries,
validate_signatures=validate_signatures,
)
async def run_generator(self, unfinished_block: bytes, generator: BlockGenerator) -> NPCResult:
task = asyncio.get_running_loop().run_in_executor(
self.pool,
_run_generator,
self.constants,
unfinished_block,
bytes(generator),
)
npc_result_bytes = await task
if npc_result_bytes is None:
raise ConsensusError(Err.UNKNOWN)
ret: NPCResult = NPCResult.from_bytes(npc_result_bytes)
if ret.error is not None:
raise ConsensusError(Err(ret.error))
return ret
def contains_block(self, header_hash: bytes32) -> bool:
"""
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
"""
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32) -> BlockRecord:
# Precondition: height is in the blockchain
header_hash: Optional[bytes32] = self.height_to_hash(height)
if header_hash is None:
raise ValueError(f"Height is not in blockchain: {height}")
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return self.__height_map.get_ses_heights()
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__height_map.get_ses(height)
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
if not self.__height_map.contains_height(height):
return None
return self.__height_map.get_hash(height)
def contains_height(self, height: uint32) -> bool:
return self.__height_map.contains_height(height)
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32) -> None:
"""
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
"""
if self._peak_height is None:
return None
block_records = await self.block_store.get_block_records_in_range(
max(fork_point - self.constants.BLOCKS_CACHE_SIZE, uint32(0)), fork_point
)
for block_record in block_records.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int) -> None:
"""
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
"""
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash] # remove from blocks
del self.__heights_in_cache[uint32(height)] # remove height from heights in cache
if height == 0:
break
height = height - 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self) -> None:
"""
Cleans the cache so that we only maintain relevant blocks. This removes
block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
"""
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
assert self._peak_height is not None
if self._peak_height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(self._peak_height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
hashes = []
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/block_creation.py | flax/consensus/block_creation.py | from __future__ import annotations
import logging
import random
from dataclasses import replace
from typing import Callable, Dict, List, Optional, Tuple
import blspy
from blspy import G1Element, G2Element
from chia_rs import compute_merkle_set_root
from chiabip158 import PyBIP158
from flax.consensus.block_record import BlockRecord
from flax.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.coinbase import create_farmer_coin, create_pool_coin
from flax.consensus.constants import ConsensusConstants
from flax.consensus.cost_calculator import NPCResult
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions
from flax.full_node.signage_point import SignagePoint
from flax.types.blockchain_format.coin import Coin, hash_coin_ids
from flax.types.blockchain_format.foliage import Foliage, FoliageBlockData, FoliageTransactionBlock, TransactionsInfo
from flax.types.blockchain_format.pool_target import PoolTarget
from flax.types.blockchain_format.proof_of_space import ProofOfSpace
from flax.types.blockchain_format.reward_chain_block import RewardChainBlock, RewardChainBlockUnfinished
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.vdf import VDFInfo, VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.types.full_block import FullBlock
from flax.types.generator_types import BlockGenerator
from flax.types.unfinished_block import UnfinishedBlock
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.util.prev_transaction_block import get_prev_transaction_block
from flax.util.recursive_replace import recursive_replace
log = logging.getLogger(__name__)
def create_foliage(
constants: ConsensusConstants,
reward_block_unfinished: RewardChainBlockUnfinished,
block_generator: Optional[BlockGenerator],
aggregate_sig: G2Element,
additions: List[Coin],
removals: List[Coin],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
timestamp: uint64,
farmer_reward_puzzlehash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
seed: bytes = b"",
) -> Tuple[Foliage, Optional[FoliageTransactionBlock], Optional[TransactionsInfo]]:
"""
Creates a foliage for a given reward chain block. This may or may not be a tx block. In the case of a tx block,
the return values are not None. This is called at the signage point, so some of this information may be
tweaked at the infusion point.
Args:
constants: consensus constants being used for this chain
reward_block_unfinished: the reward block to look at, potentially at the signage point
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transactions (or infinity element)
prev_block: the previous block at the signage point
blocks: dict from header hash to blocks, of all ancestor blocks
total_iters_sp: total iters at the signage point
timestamp: timestamp to put into the foliage block
farmer_reward_puzzlehash: where to pay out farming reward
pool_target: where to pay out pool reward
get_plot_signature: retrieve the signature corresponding to the plot public key
get_pool_signature: retrieve the signature corresponding to the pool public key
seed: seed to randomize block
"""
if prev_block is not None:
res = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
is_transaction_block: bool = res[0]
prev_transaction_block: Optional[BlockRecord] = res[1]
else:
# Genesis is a transaction block
prev_transaction_block = None
is_transaction_block = True
random.seed(seed)
# Use the extension data to create different blocks based on header hash
extension_data: bytes32 = random.randint(11, 100000000).to_bytes(32, "big")
if prev_block is None:
height: uint32 = uint32(0)
else:
height = uint32(prev_block.height + 1)
# Create filter
byte_array_tx: List[bytearray] = []
tx_additions: List[Coin] = []
tx_removals: List[bytes32] = []
pool_target_signature: Optional[G2Element] = get_pool_signature(
pool_target, reward_block_unfinished.proof_of_space.pool_public_key
)
foliage_data = FoliageBlockData(
reward_block_unfinished.get_hash(),
pool_target,
pool_target_signature,
farmer_reward_puzzlehash,
extension_data,
)
foliage_block_data_signature: G2Element = get_plot_signature(
foliage_data.get_hash(),
reward_block_unfinished.proof_of_space.plot_public_key,
)
prev_block_hash: bytes32 = constants.GENESIS_CHALLENGE
if height != 0:
assert prev_block is not None
prev_block_hash = prev_block.header_hash
generator_block_heights_list: List[uint32] = []
foliage_transaction_block_hash: Optional[bytes32]
if is_transaction_block:
cost = uint64(0)
# Calculate the cost of transactions
if block_generator is not None:
generator_block_heights_list = block_generator.block_height_list
result: NPCResult = get_name_puzzle_conditions(
block_generator,
constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=constants.COST_PER_BYTE,
mempool_mode=True,
)
cost = result.cost
removal_amount = 0
addition_amount = 0
for coin in removals:
removal_amount += coin.amount
for coin in additions:
addition_amount += coin.amount
spend_bundle_fees = removal_amount - addition_amount
else:
spend_bundle_fees = 0
reward_claims_incorporated = []
if height > 0:
assert prev_transaction_block is not None
assert prev_block is not None
curr: BlockRecord = prev_block
while not curr.is_transaction_block:
curr = blocks.block_record(curr.prev_hash)
assert curr.fees is not None
pool_coin = create_pool_coin(
curr.height, curr.pool_puzzle_hash, calculate_pool_reward(curr.height), constants.GENESIS_CHALLENGE
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(curr.height) + curr.fees),
constants.GENESIS_CHALLENGE,
)
assert curr.header_hash == prev_transaction_block.header_hash
reward_claims_incorporated += [pool_coin, farmer_coin]
if curr.height > 0:
curr = blocks.block_record(curr.prev_hash)
# Prev block is not genesis
while not curr.is_transaction_block:
pool_coin = create_pool_coin(
curr.height,
curr.pool_puzzle_hash,
calculate_pool_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
curr.height,
curr.farmer_puzzle_hash,
calculate_base_farmer_reward(curr.height),
constants.GENESIS_CHALLENGE,
)
reward_claims_incorporated += [pool_coin, farmer_coin]
curr = blocks.block_record(curr.prev_hash)
additions.extend(reward_claims_incorporated.copy())
for coin in additions:
tx_additions.append(coin)
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin in removals:
cname = coin.name()
tx_removals.append(cname)
byte_array_tx.append(bytearray(cname))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded = bytes(bip158.GetEncoded())
additions_merkle_items: List[bytes32] = []
# Create addition Merkle set
puzzlehash_coin_map: Dict[bytes32, List[bytes32]] = {}
for coin in tx_additions:
if coin.puzzle_hash in puzzlehash_coin_map:
puzzlehash_coin_map[coin.puzzle_hash].append(coin.name())
else:
puzzlehash_coin_map[coin.puzzle_hash] = [coin.name()]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coin_ids in puzzlehash_coin_map.items():
additions_merkle_items.append(puzzle)
additions_merkle_items.append(hash_coin_ids(coin_ids))
additions_root = bytes32(compute_merkle_set_root(additions_merkle_items))
removals_root = bytes32(compute_merkle_set_root(tx_removals))
generator_hash = bytes32([0] * 32)
if block_generator is not None:
generator_hash = std_hash(block_generator.program)
generator_refs_hash = bytes32([1] * 32)
if generator_block_heights_list not in (None, []):
generator_ref_list_bytes = b"".join([bytes(i) for i in generator_block_heights_list])
generator_refs_hash = std_hash(generator_ref_list_bytes)
filter_hash: bytes32 = std_hash(encoded)
transactions_info: Optional[TransactionsInfo] = TransactionsInfo(
generator_hash,
generator_refs_hash,
aggregate_sig,
uint64(spend_bundle_fees),
cost,
reward_claims_incorporated,
)
if prev_transaction_block is None:
prev_transaction_block_hash: bytes32 = constants.GENESIS_CHALLENGE
else:
prev_transaction_block_hash = prev_transaction_block.header_hash
assert transactions_info is not None
foliage_transaction_block: Optional[FoliageTransactionBlock] = FoliageTransactionBlock(
prev_transaction_block_hash,
timestamp,
filter_hash,
additions_root,
removals_root,
transactions_info.get_hash(),
)
assert foliage_transaction_block is not None
foliage_transaction_block_hash = foliage_transaction_block.get_hash()
foliage_transaction_block_signature: Optional[G2Element] = get_plot_signature(
foliage_transaction_block_hash, reward_block_unfinished.proof_of_space.plot_public_key
)
assert foliage_transaction_block_signature is not None
else:
foliage_transaction_block_hash = None
foliage_transaction_block_signature = None
foliage_transaction_block = None
transactions_info = None
assert (foliage_transaction_block_hash is None) == (foliage_transaction_block_signature is None)
foliage = Foliage(
prev_block_hash,
reward_block_unfinished.get_hash(),
foliage_data,
foliage_block_data_signature,
foliage_transaction_block_hash,
foliage_transaction_block_signature,
)
return foliage, foliage_transaction_block, transactions_info
def create_unfinished_block(
constants: ConsensusConstants,
sub_slot_start_total_iters: uint128,
sub_slot_iters: uint64,
signage_point_index: uint8,
sp_iters: uint64,
ip_iters: uint64,
proof_of_space: ProofOfSpace,
slot_cc_challenge: bytes32,
farmer_reward_puzzle_hash: bytes32,
pool_target: PoolTarget,
get_plot_signature: Callable[[bytes32, G1Element], G2Element],
get_pool_signature: Callable[[PoolTarget, Optional[G1Element]], Optional[G2Element]],
signage_point: SignagePoint,
timestamp: uint64,
blocks: BlockchainInterface,
seed: bytes = b"",
block_generator: Optional[BlockGenerator] = None,
aggregate_sig: G2Element = G2Element(),
additions: Optional[List[Coin]] = None,
removals: Optional[List[Coin]] = None,
prev_block: Optional[BlockRecord] = None,
finished_sub_slots_input: Optional[List[EndOfSubSlotBundle]] = None,
) -> UnfinishedBlock:
"""
Creates a new unfinished block using all the information available at the signage point. This will have to be
modified using information from the infusion point.
Args:
constants: consensus constants being used for this chain
sub_slot_start_total_iters: the starting sub-slot iters at the signage point sub-slot
sub_slot_iters: sub-slot-iters at the infusion point epoch
signage_point_index: signage point index of the block to create
sp_iters: sp_iters of the block to create
ip_iters: ip_iters of the block to create
proof_of_space: proof of space of the block to create
slot_cc_challenge: challenge hash at the sp sub-slot
farmer_reward_puzzle_hash: where to pay out farmer rewards
pool_target: where to pay out pool rewards
get_plot_signature: function that returns signature corresponding to plot public key
get_pool_signature: function that returns signature corresponding to pool public key
signage_point: signage point information (VDFs)
timestamp: timestamp to add to the foliage block, if created
seed: seed to randomize chain
block_generator: transactions to add to the foliage block, if created
aggregate_sig: aggregate of all transactions (or infinity element)
additions: Coins added in spend_bundle
removals: Coins removed in spend_bundle
prev_block: previous block (already in chain) from the signage point
blocks: dictionary from header hash to SBR of all included SBR
finished_sub_slots_input: finished_sub_slots at the signage point
Returns:
"""
if finished_sub_slots_input is None:
finished_sub_slots: List[EndOfSubSlotBundle] = []
else:
finished_sub_slots = finished_sub_slots_input.copy()
overflow: bool = sp_iters > ip_iters
total_iters_sp: uint128 = uint128(sub_slot_start_total_iters + sp_iters)
is_genesis: bool = prev_block is None
new_sub_slot: bool = len(finished_sub_slots) > 0
cc_sp_hash: bytes32 = slot_cc_challenge
# Only enters this if statement if we are in testing mode (making VDF proofs here)
if signage_point.cc_vdf is not None:
assert signage_point.rc_vdf is not None
cc_sp_hash = signage_point.cc_vdf.output.get_hash()
rc_sp_hash = signage_point.rc_vdf.output.get_hash()
else:
if new_sub_slot:
rc_sp_hash = finished_sub_slots[-1].reward_chain.get_hash()
else:
if is_genesis:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_block is not None
assert blocks is not None
curr = prev_block
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
signage_point = SignagePoint(None, None, None, None)
cc_sp_signature: Optional[G2Element] = get_plot_signature(cc_sp_hash, proof_of_space.plot_public_key)
rc_sp_signature: Optional[G2Element] = get_plot_signature(rc_sp_hash, proof_of_space.plot_public_key)
assert cc_sp_signature is not None
assert rc_sp_signature is not None
assert blspy.AugSchemeMPL.verify(proof_of_space.plot_public_key, cc_sp_hash, cc_sp_signature)
total_iters = uint128(sub_slot_start_total_iters + ip_iters + (sub_slot_iters if overflow else 0))
rc_block = RewardChainBlockUnfinished(
total_iters,
signage_point_index,
slot_cc_challenge,
proof_of_space,
signage_point.cc_vdf,
cc_sp_signature,
signage_point.rc_vdf,
rc_sp_signature,
)
if additions is None:
additions = []
if removals is None:
removals = []
(foliage, foliage_transaction_block, transactions_info,) = create_foliage(
constants,
rc_block,
block_generator,
aggregate_sig,
additions,
removals,
prev_block,
blocks,
total_iters_sp,
timestamp,
farmer_reward_puzzle_hash,
pool_target,
get_plot_signature,
get_pool_signature,
seed,
)
return UnfinishedBlock(
finished_sub_slots,
rc_block,
signage_point.cc_proof,
signage_point.rc_proof,
foliage,
foliage_transaction_block,
transactions_info,
block_generator.program if block_generator else None,
block_generator.block_height_list if block_generator else [],
)
def unfinished_block_to_full_block(
unfinished_block: UnfinishedBlock,
cc_ip_vdf: VDFInfo,
cc_ip_proof: VDFProof,
rc_ip_vdf: VDFInfo,
rc_ip_proof: VDFProof,
icc_ip_vdf: Optional[VDFInfo],
icc_ip_proof: Optional[VDFProof],
finished_sub_slots: List[EndOfSubSlotBundle],
prev_block: Optional[BlockRecord],
blocks: BlockchainInterface,
total_iters_sp: uint128,
difficulty: uint64,
) -> FullBlock:
"""
Converts an unfinished block to a finished block. Includes all the infusion point VDFs as well as tweaking
other properties (height, weight, sub-slots, etc)
Args:
unfinished_block: the unfinished block to finish
cc_ip_vdf: the challenge chain vdf info at the infusion point
cc_ip_proof: the challenge chain proof
rc_ip_vdf: the reward chain vdf info at the infusion point
rc_ip_proof: the reward chain proof
icc_ip_vdf: the infused challenge chain vdf info at the infusion point
icc_ip_proof: the infused challenge chain proof
finished_sub_slots: finished sub slots from the prev block to the infusion point
prev_block: prev block from the infusion point
blocks: dictionary from header hash to SBR of all included SBR
total_iters_sp: total iters at the signage point
difficulty: difficulty at the infusion point
"""
# Replace things that need to be replaced, since foliage blocks did not necessarily have the latest information
if prev_block is None:
is_transaction_block = True
new_weight = uint128(difficulty)
new_height = uint32(0)
new_foliage = unfinished_block.foliage
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
is_transaction_block, _ = get_prev_transaction_block(prev_block, blocks, total_iters_sp)
new_weight = uint128(prev_block.weight + difficulty)
new_height = uint32(prev_block.height + 1)
if is_transaction_block:
new_fbh = unfinished_block.foliage.foliage_transaction_block_hash
new_fbs = unfinished_block.foliage.foliage_transaction_block_signature
new_foliage_transaction_block = unfinished_block.foliage_transaction_block
new_tx_info = unfinished_block.transactions_info
new_generator = unfinished_block.transactions_generator
new_generator_ref_list = unfinished_block.transactions_generator_ref_list
else:
new_fbh = None
new_fbs = None
new_foliage_transaction_block = None
new_tx_info = None
new_generator = None
new_generator_ref_list = []
assert (new_fbh is None) == (new_fbs is None)
new_foliage = replace(
unfinished_block.foliage,
prev_block_hash=prev_block.header_hash,
foliage_transaction_block_hash=new_fbh,
foliage_transaction_block_signature=new_fbs,
)
ret = FullBlock(
finished_sub_slots,
RewardChainBlock(
new_weight,
new_height,
unfinished_block.reward_chain_block.total_iters,
unfinished_block.reward_chain_block.signage_point_index,
unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash,
unfinished_block.reward_chain_block.proof_of_space,
unfinished_block.reward_chain_block.challenge_chain_sp_vdf,
unfinished_block.reward_chain_block.challenge_chain_sp_signature,
cc_ip_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_vdf,
unfinished_block.reward_chain_block.reward_chain_sp_signature,
rc_ip_vdf,
icc_ip_vdf,
is_transaction_block,
),
unfinished_block.challenge_chain_sp_proof,
cc_ip_proof,
unfinished_block.reward_chain_sp_proof,
rc_ip_proof,
icc_ip_proof,
new_foliage,
new_foliage_transaction_block,
new_tx_info,
new_generator,
new_generator_ref_list,
)
ret = recursive_replace(
ret,
"foliage.reward_block_hash",
ret.reward_chain_block.get_hash(),
)
return ret
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/default_constants.py | flax/consensus/default_constants.py | from __future__ import annotations
from flax.util.ints import uint64
from .constants import ConsensusConstants
default_kwargs = {
"SLOT_BLOCKS_TARGET": 32,
"MIN_BLOCKS_PER_CHALLENGE_BLOCK": 16, # Must be less than half of SLOT_BLOCKS_TARGET
"MAX_SUB_SLOT_BLOCKS": 128, # Must be less than half of SUB_EPOCH_BLOCKS
"NUM_SPS_SUB_SLOT": 64, # Must be a power of 2
"SUB_SLOT_ITERS_STARTING": 2**27,
# DIFFICULTY_STARTING is the starting difficulty for the first epoch, which is then further
# multiplied by another factor of DIFFICULTY_CONSTANT_FACTOR, to be used in the VDF iter calculation formula.
"DIFFICULTY_CONSTANT_FACTOR": 2**57,
"DIFFICULTY_STARTING": 7,
"DIFFICULTY_CHANGE_MAX_FACTOR": 3, # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
# These 3 constants must be changed at the same time
"SUB_EPOCH_BLOCKS": 384, # The number of blocks per sub-epoch, mainnet 384
"EPOCH_BLOCKS": 4608, # The number of blocks per epoch, mainnet 4608. Must be multiple of SUB_EPOCH_SB
"SIGNIFICANT_BITS": 8, # The number of bits to look at in difficulty and min iters. The rest are zeroed
"DISCRIMINANT_SIZE_BITS": 1024, # Max is 1024 (based on ClassGroupElement int size)
"NUMBER_ZERO_BITS_PLOT_FILTER": 9, # H(plot signature of the challenge) must start with these many zeroes
"MIN_PLOT_SIZE": 32, # 32 for mainnet
"MAX_PLOT_SIZE": 50,
"SUB_SLOT_TIME_TARGET": 600, # The target number of seconds per slot, mainnet 600
"NUM_SP_INTERVALS_EXTRA": 3, # The number of sp intervals to add to the signage point
"MAX_FUTURE_TIME": 5 * 60, # The next block can have a timestamp of at most these many seconds in the future
"NUMBER_OF_TIMESTAMPS": 11, # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)
# Default used for tests is std_hash(b'')
"GENESIS_CHALLENGE": bytes.fromhex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"),
# Forks of flax should change this value to provide replay attack protection. This is set to mainnet genesis chall
"AGG_SIG_ME_ADDITIONAL_DATA": bytes.fromhex("9b9ffca948750d8b41ac755da213461e9d2253ec7bfce80695d78f7fe7d55112"),
"GENESIS_PRE_FARM_POOL_PUZZLE_HASH": bytes.fromhex(
"e8e245e2e6536e17fd7cbc2e3bd90de06f55362ee3c84cea5a68391e5bad7ef6"
),
"GENESIS_PRE_FARM_FARMER_PUZZLE_HASH": bytes.fromhex(
"e8e245e2e6536e17fd7cbc2e3bd90de06f55362ee3c84cea5a68391e5bad7ef6"
),
"MAX_VDF_WITNESS_SIZE": 64,
# Size of mempool = 50x the size of block
"MEMPOOL_BLOCK_BUFFER": 50,
# Max coin amount, fits into 64 bits
"MAX_COIN_AMOUNT": uint64((1 << 64) - 1),
# Max block cost in clvm cost units
"MAX_BLOCK_COST_CLVM": 11000000000,
# The cost per byte of generator program
"COST_PER_BYTE": 12000,
"WEIGHT_PROOF_THRESHOLD": 2,
"BLOCKS_CACHE_SIZE": 4608 + (128 * 4),
"WEIGHT_PROOF_RECENT_BLOCKS": 1000,
"MAX_BLOCK_COUNT_PER_REQUESTS": 32, # Allow up to 32 blocks per request
"MAX_GENERATOR_SIZE": 1000000,
"MAX_GENERATOR_REF_LIST_SIZE": 512, # Number of references allowed in the block generator ref list
"POOL_SUB_SLOT_ITERS": 37600000000, # iters limit * NUM_SPS
}
DEFAULT_CONSTANTS = ConsensusConstants(**default_kwargs) # type: ignore
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/cost_calculator.py | flax/consensus/cost_calculator.py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from flax.types.spend_bundle_conditions import SpendBundleConditions
from flax.util.ints import uint16, uint64
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class NPCResult(Streamable):
error: Optional[uint16]
conds: Optional[SpendBundleConditions]
cost: uint64 # The total cost of the block, including CLVM cost, cost of
# conditions and cost of bytes
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/block_record.py | flax/consensus/block_record.py | from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
from flax.consensus.constants import ConsensusConstants
from flax.consensus.pot_iterations import calculate_ip_iters, calculate_sp_iters
from flax.types.blockchain_format.classgroup import ClassgroupElement
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class BlockRecord(Streamable):
"""
This class is not included or hashed into the blockchain, but it is kept in memory as a more
efficient way to maintain data about the blockchain. This allows us to validate future blocks,
difficulty adjustments, etc, without saving the whole header block in memory.
"""
header_hash: bytes32
prev_hash: bytes32 # Header hash of the previous block
height: uint32
weight: uint128 # Total cumulative difficulty of all ancestor blocks since genesis
total_iters: uint128 # Total number of VDF iterations since genesis, including this block
signage_point_index: uint8
challenge_vdf_output: ClassgroupElement # This is the intermediary VDF output at ip_iters in challenge chain
infused_challenge_vdf_output: Optional[
ClassgroupElement
] # This is the intermediary VDF output at ip_iters in infused cc, iff deficit <= 3
reward_infusion_new_challenge: bytes32 # The reward chain infusion output, input to next VDF
challenge_block_info_hash: bytes32 # Hash of challenge chain data, used to validate end of slots in the future
sub_slot_iters: uint64 # Current network sub_slot_iters parameter
pool_puzzle_hash: bytes32 # Need to keep track of these because Coins are created in a future block
farmer_puzzle_hash: bytes32
required_iters: uint64 # The number of iters required for this proof of space
deficit: uint8 # A deficit of 16 is an overflow block after an infusion. Deficit of 15 is a challenge block
overflow: bool
prev_transaction_block_height: uint32
# Transaction block (present iff is_transaction_block)
timestamp: Optional[uint64]
prev_transaction_block_hash: Optional[bytes32] # Header hash of the previous transaction block
fees: Optional[uint64]
reward_claims_incorporated: Optional[List[Coin]]
# Slot (present iff this is the first SB in sub slot)
finished_challenge_slot_hashes: Optional[List[bytes32]]
finished_infused_challenge_slot_hashes: Optional[List[bytes32]]
finished_reward_slot_hashes: Optional[List[bytes32]]
# Sub-epoch (present iff this is the first SB after sub-epoch)
sub_epoch_summary_included: Optional[SubEpochSummary]
@property
def is_transaction_block(self) -> bool:
return self.timestamp is not None
@property
def first_in_sub_slot(self) -> bool:
return self.finished_challenge_slot_hashes is not None
def is_challenge_block(self, constants: ConsensusConstants) -> bool:
return self.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
def sp_sub_slot_total_iters(self, constants: ConsensusConstants) -> uint128:
if self.overflow:
return uint128(self.total_iters - self.ip_iters(constants) - self.sub_slot_iters)
else:
return uint128(self.total_iters - self.ip_iters(constants))
def ip_sub_slot_total_iters(self, constants: ConsensusConstants) -> uint128:
return uint128(self.total_iters - self.ip_iters(constants))
def sp_iters(self, constants: ConsensusConstants) -> uint64:
return calculate_sp_iters(constants, self.sub_slot_iters, self.signage_point_index)
def ip_iters(self, constants: ConsensusConstants) -> uint64:
return calculate_ip_iters(
constants,
self.sub_slot_iters,
self.signage_point_index,
self.required_iters,
)
def sp_total_iters(self, constants: ConsensusConstants) -> uint128:
return uint128(self.sp_sub_slot_total_iters(constants) + self.sp_iters(constants))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/block_body_validation.py | flax/consensus/block_body_validation.py | from __future__ import annotations
import collections
import logging
from typing import Awaitable, Callable, Dict, List, Optional, Set, Tuple, Union
from chiabip158 import PyBIP158
from flax.consensus.block_record import BlockRecord
from flax.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from flax.consensus.block_root_validation import validate_block_merkle_roots
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.coinbase import create_farmer_coin, create_pool_coin
from flax.consensus.constants import ConsensusConstants
from flax.consensus.cost_calculator import NPCResult
from flax.consensus.find_fork_point import find_fork_point_in_chain
from flax.full_node.block_store import BlockStore
from flax.full_node.coin_store import CoinStore
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions, mempool_check_time_locks
from flax.types.block_protocol import BlockInfo
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32, bytes48
from flax.types.coin_record import CoinRecord
from flax.types.full_block import FullBlock
from flax.types.generator_types import BlockGenerator
from flax.types.unfinished_block import UnfinishedBlock
from flax.util import cached_bls
from flax.util.condition_tools import pkm_pairs
from flax.util.errors import Err
from flax.util.generator_tools import tx_removals_and_additions
from flax.util.hash import std_hash
from flax.util.ints import uint32, uint64
log = logging.getLogger(__name__)
async def validate_block_body(
constants: ConsensusConstants,
blocks: BlockchainInterface,
block_store: BlockStore,
coin_store: CoinStore,
peak: Optional[BlockRecord],
block: Union[FullBlock, UnfinishedBlock],
height: uint32,
npc_result: Optional[NPCResult],
fork_point_with_peak: Optional[uint32],
get_block_generator: Callable[[BlockInfo], Awaitable[Optional[BlockGenerator]]],
*,
validate_signature: bool = True,
) -> Tuple[Optional[Err], Optional[NPCResult]]:
"""
This assumes the header block has been completely validated.
Validates the transactions and body of the block. Returns None for the first value if everything
validates correctly, or an Err if something does not validate. For the second value, returns a CostResult
only if validation succeeded, and there are transactions. In other cases it returns None. The NPC result is
the result of running the generator with the previous generators refs. It is only present for transaction
blocks which have spent coins.
"""
if isinstance(block, FullBlock):
assert height == block.height
prev_transaction_block_height: uint32 = uint32(0)
# 1. For non transaction-blocs: foliage block, transaction filter, transactions info, and generator must
# be empty. If it is a block but not a transaction block, there is no body to validate. Check that all fields are
# None
if block.foliage.foliage_transaction_block_hash is None:
if (
block.foliage_transaction_block is not None
or block.transactions_info is not None
or block.transactions_generator is not None
):
return Err.NOT_BLOCK_BUT_HAS_DATA, None
prev_tb: BlockRecord = blocks.block_record(block.prev_header_hash)
while not prev_tb.is_transaction_block:
prev_tb = blocks.block_record(prev_tb.prev_hash)
assert prev_tb.timestamp is not None
if len(block.transactions_generator_ref_list) > 0:
return Err.NOT_BLOCK_BUT_HAS_DATA, None
return None, None # This means the block is valid
# All checks below this point correspond to transaction blocks
# 2. For blocks, foliage block, transactions info must not be empty
if block.foliage_transaction_block is None or block.transactions_info is None:
return Err.IS_TRANSACTION_BLOCK_BUT_NO_DATA, None
assert block.foliage_transaction_block is not None
# keeps track of the reward coins that need to be incorporated
expected_reward_coins: Set[Coin] = set()
# 3. The transaction info hash in the Foliage block must match the transaction info
if block.foliage_transaction_block.transactions_info_hash != std_hash(block.transactions_info):
return Err.INVALID_TRANSACTIONS_INFO_HASH, None
# 4. The foliage block hash in the foliage block must match the foliage block
if block.foliage.foliage_transaction_block_hash != std_hash(block.foliage_transaction_block):
return Err.INVALID_FOLIAGE_BLOCK_HASH, None
# 5. The reward claims must be valid for the previous blocks, and current block fees
# If height == 0, expected_reward_coins will be left empty
if height > 0:
# Add reward claims for all blocks from the prev prev block, until the prev block (including the latter)
prev_transaction_block = blocks.block_record(block.foliage_transaction_block.prev_transaction_block_hash)
prev_transaction_block_height = prev_transaction_block.height
assert prev_transaction_block.fees is not None
pool_coin = create_pool_coin(
prev_transaction_block_height,
prev_transaction_block.pool_puzzle_hash,
calculate_pool_reward(prev_transaction_block.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
prev_transaction_block_height,
prev_transaction_block.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(prev_transaction_block.height) + prev_transaction_block.fees),
constants.GENESIS_CHALLENGE,
)
# Adds the previous block
expected_reward_coins.add(pool_coin)
expected_reward_coins.add(farmer_coin)
# For the second block in the chain, don't go back further
if prev_transaction_block.height > 0:
curr_b = blocks.block_record(prev_transaction_block.prev_hash)
while not curr_b.is_transaction_block:
expected_reward_coins.add(
create_pool_coin(
curr_b.height,
curr_b.pool_puzzle_hash,
calculate_pool_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
expected_reward_coins.add(
create_farmer_coin(
curr_b.height,
curr_b.farmer_puzzle_hash,
calculate_base_farmer_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
curr_b = blocks.block_record(curr_b.prev_hash)
if set(block.transactions_info.reward_claims_incorporated) != expected_reward_coins:
return Err.INVALID_REWARD_COINS, None
if len(block.transactions_info.reward_claims_incorporated) != len(expected_reward_coins):
return Err.INVALID_REWARD_COINS, None
removals: List[bytes32] = []
# we store coins paired with their names in order to avoid computing the
# coin name multiple times, we store it next to the coin while validating
# the block
coinbase_additions: List[Tuple[Coin, bytes32]] = [(c, c.name()) for c in expected_reward_coins]
additions: List[Tuple[Coin, bytes32]] = []
removals_puzzle_dic: Dict[bytes32, bytes32] = {}
cost: uint64 = uint64(0)
# In header validation we check that timestamp is not more than 5 minutes into the future
# 6. No transactions before INITIAL_TRANSACTION_FREEZE timestamp
# (this test has been removed)
# 7a. The generator root must be the hash of the serialized bytes of
# the generator for this block (or zeroes if no generator)
if block.transactions_generator is not None:
if std_hash(bytes(block.transactions_generator)) != block.transactions_info.generator_root:
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
else:
if block.transactions_info.generator_root != bytes([0] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
# 8a. The generator_ref_list must be the hash of the serialized bytes of
# the generator ref list for this block (or 'one' bytes [0x01] if no generator)
# 8b. The generator ref list length must be less than or equal to MAX_GENERATOR_REF_LIST_SIZE entries
# 8c. The generator ref list must not point to a height >= this block's height
if block.transactions_generator_ref_list in (None, []):
if block.transactions_info.generator_refs_root != bytes([1] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
else:
# If we have a generator reference list, we must have a generator
if block.transactions_generator is None:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
# The generator_refs_root must be the hash of the concatenation of the List[uint32]
generator_refs_hash = std_hash(b"".join([bytes(i) for i in block.transactions_generator_ref_list]))
if block.transactions_info.generator_refs_root != generator_refs_hash:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
if len(block.transactions_generator_ref_list) > constants.MAX_GENERATOR_REF_LIST_SIZE:
return Err.TOO_MANY_GENERATOR_REFS, None
if any([index >= height for index in block.transactions_generator_ref_list]):
return Err.FUTURE_GENERATOR_REFS, None
if block.transactions_generator is not None:
# Get List of names removed, puzzles hashes for removed coins and conditions created
assert npc_result is not None
cost = npc_result.cost
# 7. Check that cost <= MAX_BLOCK_COST_CLVM
log.debug(
f"Cost: {cost} max: {constants.MAX_BLOCK_COST_CLVM} "
f"percent full: {round(100 * (cost / constants.MAX_BLOCK_COST_CLVM), 2)}%"
)
if cost > constants.MAX_BLOCK_COST_CLVM:
return Err.BLOCK_COST_EXCEEDS_MAX, None
# 8. The CLVM program must not return any errors
if npc_result.error is not None:
return Err(npc_result.error), None
assert npc_result.conds is not None
for spend in npc_result.conds.spends:
removals.append(bytes32(spend.coin_id))
removals_puzzle_dic[bytes32(spend.coin_id)] = bytes32(spend.puzzle_hash)
for puzzle_hash, amount, _ in spend.create_coin:
c = Coin(bytes32(spend.coin_id), bytes32(puzzle_hash), uint64(amount))
additions.append((c, c.name()))
else:
assert npc_result is None
# 9. Check that the correct cost is in the transactions info
if block.transactions_info.cost != cost:
return Err.INVALID_BLOCK_COST, None
additions_dic: Dict[bytes32, Coin] = {}
# 10. Check additions for max coin amount
# Be careful to check for 64 bit overflows in other languages. This is the max 64 bit unsigned integer
# We will not even reach here because Coins do type checking (uint64)
for coin, coin_name in additions + coinbase_additions:
additions_dic[coin_name] = coin
if coin.amount < 0:
return Err.COIN_AMOUNT_NEGATIVE, None
if coin.amount > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 11. Validate addition and removal roots
root_error = validate_block_merkle_roots(
block.foliage_transaction_block.additions_root,
block.foliage_transaction_block.removals_root,
additions + coinbase_additions,
removals,
)
if root_error:
return root_error, None
# 12. The additions and removals must result in the correct filter
byte_array_tx: List[bytearray] = []
for coin, _ in additions + coinbase_additions:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin_name in removals:
byte_array_tx.append(bytearray(coin_name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter = bytes(bip158.GetEncoded())
filter_hash = std_hash(encoded_filter)
if filter_hash != block.foliage_transaction_block.filter_hash:
return Err.INVALID_TRANSACTIONS_FILTER_HASH, None
# 13. Check for duplicate outputs in additions
addition_counter = collections.Counter(coin_name for _, coin_name in additions + coinbase_additions)
for k, v in addition_counter.items():
if v > 1:
return Err.DUPLICATE_OUTPUT, None
# 14. Check for duplicate spends inside block
removal_counter = collections.Counter(removals)
for k, v in removal_counter.items():
if v > 1:
return Err.DOUBLE_SPEND, None
# 15. Check if removals exist and were not previously spent. (unspent_db + diff_store + this_block)
# The fork point is the last block in common between the peak chain and the chain of `block`
if peak is None or height == 0:
fork_h: int = -1
elif fork_point_with_peak is not None:
fork_h = fork_point_with_peak
else:
fork_h = find_fork_point_in_chain(blocks, peak, blocks.block_record(block.prev_header_hash))
# Get additions and removals since (after) fork_h but not including this block
# The values include: the coin that was added, the height of the block in which it was confirmed, and the
# timestamp of the block in which it was confirmed
additions_since_fork: Dict[bytes32, Tuple[Coin, uint32, uint64]] = {} # This includes coinbase additions
removals_since_fork: Set[bytes32] = set()
# For height 0, there are no additions and removals before this block, so we can skip
if height > 0:
# First, get all the blocks in the fork > fork_h, < block.height
prev_block: Optional[FullBlock] = await block_store.get_full_block(block.prev_header_hash)
reorg_blocks: Dict[uint32, FullBlock] = {}
curr: Optional[FullBlock] = prev_block
assert curr is not None
while curr.height > fork_h:
if curr.height == 0:
break
curr = await block_store.get_full_block(curr.prev_header_hash)
assert curr is not None
reorg_blocks[curr.height] = curr
if fork_h != -1:
assert len(reorg_blocks) == height - fork_h - 1
curr = prev_block
assert curr is not None
while curr.height > fork_h:
# Coin store doesn't contain coins from fork, we have to run generator for each block in fork
if curr.transactions_generator is not None:
# These blocks are in the past and therefore assumed to be valid, so get_block_generator won't raise
curr_block_generator: Optional[BlockGenerator] = await get_block_generator(curr)
assert curr_block_generator is not None and curr.transactions_info is not None
curr_npc_result = get_name_puzzle_conditions(
curr_block_generator,
min(constants.MAX_BLOCK_COST_CLVM, curr.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
mempool_mode=False,
)
removals_in_curr, additions_in_curr = tx_removals_and_additions(curr_npc_result.conds)
else:
removals_in_curr = []
additions_in_curr = []
for c_name in removals_in_curr:
assert c_name not in removals_since_fork
removals_since_fork.add(c_name)
for c in additions_in_curr:
coin_name = c.name()
assert coin_name not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[coin_name] = (c, curr.height, curr.foliage_transaction_block.timestamp)
for coinbase_coin in curr.get_included_reward_coins():
coin_name = coinbase_coin.name()
assert coin_name not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[coin_name] = (
coinbase_coin,
curr.height,
curr.foliage_transaction_block.timestamp,
)
if curr.height == 0:
break
curr = reorg_blocks[uint32(curr.height - 1)]
assert curr is not None
removal_coin_records: Dict[bytes32, CoinRecord] = {}
# the removed coins we need to look up from the DB
# i.e. all non-ephemeral coins
removals_from_db: List[bytes32] = []
for rem in removals:
if rem in additions_dic:
# Ephemeral coin
rem_coin: Coin = additions_dic[rem]
new_unspent: CoinRecord = CoinRecord(
rem_coin,
height,
height,
False,
block.foliage_transaction_block.timestamp,
)
removal_coin_records[new_unspent.name] = new_unspent
else:
# This check applies to both coins created before fork (pulled from coin_store),
# and coins created after fork (additions_since_fork)
if rem in removals_since_fork:
# This coin was spent in the fork
return Err.DOUBLE_SPEND_IN_FORK, None
removals_from_db.append(rem)
unspent_records = await coin_store.get_coin_records(removals_from_db)
# some coin spends we need to ensure exist in the fork branch. Both coins we
# can't find in the DB, but also coins that were spent after the fork point
look_in_fork: List[bytes32] = []
for unspent in unspent_records:
if unspent.confirmed_block_index <= fork_h:
# Spending something in the current chain, confirmed before fork
# (We ignore all coins confirmed after fork)
if unspent.spent == 1 and unspent.spent_block_index <= fork_h:
# Check for coins spent in an ancestor block
return Err.DOUBLE_SPEND, None
removal_coin_records[unspent.name] = unspent
else:
look_in_fork.append(unspent.name)
if len(unspent_records) != len(removals_from_db):
# some coins could not be found in the DB. We need to find out which
# ones and look for them in additions_since_fork
found: Set[bytes32] = set([u.name for u in unspent_records])
for rem in removals_from_db:
if rem in found:
continue
look_in_fork.append(rem)
for rem in look_in_fork:
# This coin is not in the current heaviest chain, so it must be in the fork
if rem not in additions_since_fork:
# Check for spending a coin that does not exist in this fork
log.error(f"Err.UNKNOWN_UNSPENT: COIN ID: {rem} NPC RESULT: {npc_result}")
return Err.UNKNOWN_UNSPENT, None
new_coin, confirmed_height, confirmed_timestamp = additions_since_fork[rem]
new_coin_record: CoinRecord = CoinRecord(
new_coin,
confirmed_height,
uint32(0),
False,
confirmed_timestamp,
)
removal_coin_records[new_coin_record.name] = new_coin_record
removed = 0
for unspent in removal_coin_records.values():
removed += unspent.coin.amount
added = 0
for coin, _ in additions:
added += coin.amount
# 16. Check that the total coin amount for added is <= removed
if removed < added:
return Err.MINTING_COIN, None
fees = removed - added
assert fees >= 0
# reserve fee cannot be greater than UINT64_MAX per consensus rule.
# run_generator() would fail
assert_fee_sum: uint64 = uint64(0)
if npc_result:
assert npc_result.conds is not None
assert_fee_sum = uint64(npc_result.conds.reserve_fee)
# 17. Check that the assert fee sum <= fees, and that each reserved fee is non-negative
if fees < assert_fee_sum:
return Err.RESERVE_FEE_CONDITION_FAILED, None
# 18. Check that the fee amount + farmer reward < maximum coin amount
if fees + calculate_base_farmer_reward(height) > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 19. Check that the computed fees are equal to the fees in the block header
if block.transactions_info.fees != fees:
return Err.INVALID_BLOCK_FEE_AMOUNT, None
# 20. Verify that removed coin puzzle_hashes match with calculated puzzle_hashes
for unspent in removal_coin_records.values():
if unspent.coin.puzzle_hash != removals_puzzle_dic[unspent.name]:
return Err.WRONG_PUZZLE_HASH, None
# 21. Verify conditions
# verify absolute/relative height/time conditions
if npc_result is not None:
assert npc_result.conds is not None
error = mempool_check_time_locks(
removal_coin_records,
npc_result.conds,
prev_transaction_block_height,
block.foliage_transaction_block.timestamp,
)
if error:
return error, None
# create hash_key list for aggsig check
pairs_pks: List[bytes48] = []
pairs_msgs: List[bytes] = []
if npc_result:
assert npc_result.conds is not None
pairs_pks, pairs_msgs = pkm_pairs(npc_result.conds, constants.AGG_SIG_ME_ADDITIONAL_DATA)
# 22. Verify aggregated signature
# TODO: move this to pre_validate_blocks_multiprocessing so we can sync faster
if not block.transactions_info.aggregated_signature:
return Err.BAD_AGGREGATE_SIGNATURE, None
# The pairing cache is not useful while syncing as each pairing is seen
# only once, so the extra effort of populating it is not justified.
# However, we force caching of pairings just for unfinished blocks
# as the cache is likely to be useful when validating the corresponding
# finished blocks later.
if validate_signature:
force_cache: bool = isinstance(block, UnfinishedBlock)
if not cached_bls.aggregate_verify(
pairs_pks, pairs_msgs, block.transactions_info.aggregated_signature, force_cache
):
return Err.BAD_AGGREGATE_SIGNATURE, None
return None, npc_result
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/block_rewards.py | flax/consensus/block_rewards.py | from __future__ import annotations
from flax.util.ints import uint32, uint64
# 1 Flax coin = 1,000,000,000,000 = 1 trillion mojo.
_mojo_per_flax = 1000000000000
_blocks_per_year = 1681920 # 32 * 6 * 24 * 365
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 7/8 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((7 / 8) * 300000 * _mojo_per_flax))
elif height < 3 * _blocks_per_year:
return uint64(int((7 / 8) * 2 * _mojo_per_flax))
elif height < 6 * _blocks_per_year:
return uint64(int((7 / 8) * 1 * _mojo_per_flax))
elif height < 9 * _blocks_per_year:
return uint64(int((7 / 8) * 0.5 * _mojo_per_flax))
elif height < 12 * _blocks_per_year:
return uint64(int((7 / 8) * 0.25 * _mojo_per_flax))
else:
return uint64(int((7 / 8) * 0.125 * _mojo_per_flax))
def calculate_base_farmer_reward(height: uint32) -> uint64:
"""
Returns the base farmer reward at a certain block height.
The base fee reward is 1/8 of total block reward
Returns the coinbase reward at a certain block height. These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((1 / 8) * 300000 * _mojo_per_flax))
elif height < 3 * _blocks_per_year:
return uint64(int((1 / 8) * 2 * _mojo_per_flax))
elif height < 6 * _blocks_per_year:
return uint64(int((1 / 8) * 1 * _mojo_per_flax))
elif height < 9 * _blocks_per_year:
return uint64(int((1 / 8) * 0.5 * _mojo_per_flax))
elif height < 12 * _blocks_per_year:
return uint64(int((1 / 8) * 0.25 * _mojo_per_flax))
else:
return uint64(int((1 / 8) * 0.125 * _mojo_per_flax))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/vdf_info_computation.py | flax/consensus/vdf_info_computation.py | from __future__ import annotations
from typing import List, Optional, Tuple
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.constants import ConsensusConstants
from flax.types.blockchain_format.classgroup import ClassgroupElement
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.util.ints import uint64, uint128
def get_signage_point_vdf_info(
constants: ConsensusConstants,
finished_sub_slots: List[EndOfSubSlotBundle],
overflow: bool,
prev_b: Optional[BlockRecord],
blocks: BlockchainInterface,
sp_total_iters: uint128,
sp_iters: uint64,
) -> Tuple[bytes32, bytes32, ClassgroupElement, ClassgroupElement, uint64, uint64]:
"""
Returns the following information, for the VDF of the signage point at sp_total_iters.
cc and rc challenge hash
cc and rc input
cc and rc iterations
"""
new_sub_slot: bool = len(finished_sub_slots) > 0
genesis_block: bool = prev_b is None
if new_sub_slot and not overflow:
# Case 1: start from start of this slot. Case of no overflow slots. Also includes genesis block after empty
# slot(s), but not overflowing
rc_vdf_challenge: bytes32 = finished_sub_slots[-1].reward_chain.get_hash()
cc_vdf_challenge = finished_sub_slots[-1].challenge_chain.get_hash()
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
elif new_sub_slot and overflow and len(finished_sub_slots) > 1:
# Case 2: start from start of prev slot. This is a rare case of empty prev slot. Includes genesis block after
# 2 empty slots
rc_vdf_challenge = finished_sub_slots[-2].reward_chain.get_hash()
cc_vdf_challenge = finished_sub_slots[-2].challenge_chain.get_hash()
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
elif genesis_block:
# Case 3: Genesis block case, first challenge
rc_vdf_challenge = constants.GENESIS_CHALLENGE
cc_vdf_challenge = constants.GENESIS_CHALLENGE
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
elif new_sub_slot and overflow and len(finished_sub_slots) == 1:
# Case 4: Starting at prev will put us in the previous, sub-slot, since case 2 handled more empty slots
assert prev_b is not None
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot and curr.total_iters > sp_total_iters:
curr = blocks.block_record(curr.prev_hash)
if curr.total_iters < sp_total_iters:
sp_vdf_iters = uint64(sp_total_iters - curr.total_iters)
cc_vdf_input = curr.challenge_vdf_output
rc_vdf_challenge = curr.reward_infusion_new_challenge
else:
assert curr.finished_reward_slot_hashes is not None
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = curr.finished_reward_slot_hashes[-1]
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_challenge_slot_hashes is not None
cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]
elif not new_sub_slot and overflow:
# Case 5: prev is in the same sub slot and also overflow. Starting at prev does not skip any sub slots
assert prev_b is not None
curr = prev_b
# Collects the last two finished slots
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
assert curr.finished_reward_slot_hashes is not None
found_sub_slots = list(
reversed(
list(
zip(
curr.finished_challenge_slot_hashes,
curr.finished_reward_slot_hashes,
)
)
)
)
else:
found_sub_slots = []
sp_pre_sb: Optional[BlockRecord] = None
while len(found_sub_slots) < 2 and curr.height > 0:
if sp_pre_sb is None and curr.total_iters < sp_total_iters:
sp_pre_sb = curr
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
assert curr.finished_reward_slot_hashes is not None
found_sub_slots += list(
reversed(
list(
zip(
curr.finished_challenge_slot_hashes,
curr.finished_reward_slot_hashes,
)
)
)
)
if sp_pre_sb is None and curr.total_iters < sp_total_iters:
sp_pre_sb = curr
if sp_pre_sb is not None:
sp_vdf_iters = uint64(sp_total_iters - sp_pre_sb.total_iters)
cc_vdf_input = sp_pre_sb.challenge_vdf_output
rc_vdf_challenge = sp_pre_sb.reward_infusion_new_challenge
else:
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = found_sub_slots[1][1]
cc_vdf_challenge = found_sub_slots[1][0]
elif not new_sub_slot and not overflow:
# Case 6: prev is in the same sub slot. Starting at prev does not skip any sub slots. We do not need
# to go back another sub slot, because it's not overflow, so the VDF to signage point is this sub-slot.
assert prev_b is not None
curr = prev_b
while not curr.first_in_sub_slot and curr.total_iters > sp_total_iters:
curr = blocks.block_record(curr.prev_hash)
if curr.total_iters < sp_total_iters:
sp_vdf_iters = uint64(sp_total_iters - curr.total_iters)
cc_vdf_input = curr.challenge_vdf_output
rc_vdf_challenge = curr.reward_infusion_new_challenge
else:
assert curr.finished_reward_slot_hashes is not None
sp_vdf_iters = sp_iters
cc_vdf_input = ClassgroupElement.get_default_element()
rc_vdf_challenge = curr.finished_reward_slot_hashes[-1]
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_challenge_slot_hashes is not None
cc_vdf_challenge = curr.finished_challenge_slot_hashes[-1]
else:
# All cases are handled above
assert False
return (
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
ClassgroupElement.get_default_element(),
sp_vdf_iters,
sp_vdf_iters,
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/constants.py | flax/consensus/constants.py | from __future__ import annotations
import dataclasses
import logging
from typing import Any
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.byte_types import hexstr_to_bytes
from flax.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class ConsensusConstants:
SLOT_BLOCKS_TARGET: uint32 # How many blocks to target per sub-slot
MIN_BLOCKS_PER_CHALLENGE_BLOCK: uint8 # How many blocks must be created per slot (to make challenge sb)
# Max number of blocks that can be infused into a sub-slot.
# Note: this must be less than SUB_EPOCH_BLOCKS/2, and > SLOT_BLOCKS_TARGET
MAX_SUB_SLOT_BLOCKS: uint32
NUM_SPS_SUB_SLOT: uint32 # The number of signage points per sub-slot (including the 0th sp at the sub-slot start)
SUB_SLOT_ITERS_STARTING: uint64 # The sub_slot_iters for the first epoch
DIFFICULTY_CONSTANT_FACTOR: uint128 # Multiplied by the difficulty to get iterations
DIFFICULTY_STARTING: uint64 # The difficulty for the first epoch
# The maximum factor by which difficulty and sub_slot_iters can change per epoch
DIFFICULTY_CHANGE_MAX_FACTOR: uint32
SUB_EPOCH_BLOCKS: uint32 # The number of blocks per sub-epoch
EPOCH_BLOCKS: uint32 # The number of blocks per sub-epoch, must be a multiple of SUB_EPOCH_BLOCKS
SIGNIFICANT_BITS: int # The number of bits to look at in difficulty and min iters. The rest are zeroed
DISCRIMINANT_SIZE_BITS: int # Max is 1024 (based on ClassGroupElement int size)
NUMBER_ZERO_BITS_PLOT_FILTER: int # H(plot id + challenge hash + signage point) must start with these many zeroes
MIN_PLOT_SIZE: int
MAX_PLOT_SIZE: int
SUB_SLOT_TIME_TARGET: int # The target number of seconds per sub-slot
NUM_SP_INTERVALS_EXTRA: int # The difference between signage point and infusion point (plus required_iters)
MAX_FUTURE_TIME: int # The next block can have a timestamp of at most these many seconds more
NUMBER_OF_TIMESTAMPS: int # Than the average of the last NUMBER_OF_TIMESTAMPS blocks
# Used as the initial cc rc challenges, as well as first block back pointers, and first SES back pointer
# We override this value based on the chain being run (testnet0, testnet1, mainnet, etc)
GENESIS_CHALLENGE: bytes32
# Forks of flax should change this value to provide replay attack protection
AGG_SIG_ME_ADDITIONAL_DATA: bytes
GENESIS_PRE_FARM_POOL_PUZZLE_HASH: bytes32 # The block at height must pay out to this pool puzzle hash
GENESIS_PRE_FARM_FARMER_PUZZLE_HASH: bytes32 # The block at height must pay out to this farmer puzzle hash
MAX_VDF_WITNESS_SIZE: int # The maximum number of classgroup elements within an n-wesolowski proof
# Size of mempool = 10x the size of block
MEMPOOL_BLOCK_BUFFER: int
# Max coin amount uint(1 << 64). This allows coin amounts to fit in 64 bits. This is around 18M flax.
MAX_COIN_AMOUNT: int
# Max block cost in clvm cost units
MAX_BLOCK_COST_CLVM: int
# Cost per byte of generator program
COST_PER_BYTE: int
WEIGHT_PROOF_THRESHOLD: uint8
WEIGHT_PROOF_RECENT_BLOCKS: uint32
MAX_BLOCK_COUNT_PER_REQUESTS: uint32
BLOCKS_CACHE_SIZE: uint32
MAX_GENERATOR_SIZE: uint32
MAX_GENERATOR_REF_LIST_SIZE: uint32
POOL_SUB_SLOT_ITERS: uint64
def replace(self, **changes: object) -> "ConsensusConstants":
return dataclasses.replace(self, **changes)
def replace_str_to_bytes(self, **changes: Any) -> "ConsensusConstants":
"""
Overrides str (hex) values with bytes.
"""
filtered_changes = {}
for k, v in changes.items():
if not hasattr(self, k):
# NETWORK_TYPE used to be present in default config, but has been removed
if k not in ["NETWORK_TYPE"]:
log.warning(f'invalid key in network configuration (config.yaml) "{k}". Ignoring')
continue
if isinstance(v, str):
filtered_changes[k] = hexstr_to_bytes(v)
else:
filtered_changes[k] = v
return dataclasses.replace(self, **filtered_changes)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/make_sub_epoch_summary.py | flax/consensus/make_sub_epoch_summary.py | from __future__ import annotations
import logging
from typing import Optional, Union
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.constants import ConsensusConstants
from flax.consensus.deficit import calculate_deficit
from flax.consensus.difficulty_adjustment import (
_get_next_difficulty,
_get_next_sub_slot_iters,
can_finish_sub_and_full_epoch,
get_next_sub_slot_iters_and_difficulty,
height_can_be_first_in_epoch,
)
from flax.consensus.pot_iterations import calculate_ip_iters, calculate_sp_iters, is_overflow_block
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.full_block import FullBlock
from flax.types.unfinished_block import UnfinishedBlock
from flax.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
def make_sub_epoch_summary(
constants: ConsensusConstants,
blocks: BlockchainInterface,
blocks_included_height: uint32,
prev_prev_block: BlockRecord,
new_difficulty: Optional[uint64],
new_sub_slot_iters: Optional[uint64],
) -> SubEpochSummary:
"""
Creates a sub-epoch-summary object, assuming that the first block in the new sub-epoch is at height
"blocks_included_height". Prev_prev_b is the second to last block in the previous sub-epoch. On a new epoch,
new_difficulty and new_sub_slot_iters are also added.
Args:
constants: consensus constants being used for this chain
blocks: dictionary from header hash to SBR of all included SBR
blocks_included_height: block height in which the SES will be included
prev_prev_block: second to last block in epoch
new_difficulty: difficulty in new epoch
new_sub_slot_iters: sub slot iters in new epoch
"""
assert prev_prev_block.height == blocks_included_height - 2
# First sub_epoch
# This is not technically because more blocks can potentially be included than 2*MAX_SUB_SLOT_BLOCKS,
# But assuming less than 128 overflow blocks get infused in the first 2 slots, it's not an issue
if (blocks_included_height + constants.MAX_SUB_SLOT_BLOCKS) // constants.SUB_EPOCH_BLOCKS <= 1:
return SubEpochSummary(
constants.GENESIS_CHALLENGE,
constants.GENESIS_CHALLENGE,
uint8(0),
None,
None,
)
curr: BlockRecord = prev_prev_block
while curr.sub_epoch_summary_included is None:
curr = blocks.block_record(curr.prev_hash)
assert curr is not None
assert curr.finished_reward_slot_hashes is not None
prev_ses = curr.sub_epoch_summary_included.get_hash()
return SubEpochSummary(
prev_ses,
curr.finished_reward_slot_hashes[-1],
uint8(curr.height % constants.SUB_EPOCH_BLOCKS),
new_difficulty,
new_sub_slot_iters,
)
def next_sub_epoch_summary(
constants: ConsensusConstants,
blocks: BlockchainInterface,
required_iters: uint64,
block: Union[UnfinishedBlock, FullBlock],
can_finish_soon: bool = False,
) -> Optional[SubEpochSummary]:
"""
Returns the sub-epoch summary that can be included in the block after block. If it should include one. Block
must be eligible to be the last block in the epoch. If not, returns None. Assumes that there is a new slot
ending after block.
Args:
constants: consensus constants being used for this chain
blocks: interface to cached SBR
required_iters: required iters of the proof of space in block
block: the (potentially) last block in the new epoch
can_finish_soon: this is useful when sending SES to timelords. We might not be able to finish it, but we will
soon (within MAX_SUB_SLOT_BLOCKS)
Returns:
object: the new sub-epoch summary
"""
signage_point_index = block.reward_chain_block.signage_point_index
prev_b: Optional[BlockRecord] = blocks.try_block_record(block.prev_header_hash)
if prev_b is None or prev_b.height == 0:
return None
if len(block.finished_sub_slots) > 0 and block.finished_sub_slots[0].challenge_chain.new_difficulty is not None:
# We just included a sub-epoch summary
return None
assert prev_b is not None
# This is the ssi of the current block
sub_slot_iters = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, blocks
)[0]
overflow = is_overflow_block(constants, signage_point_index)
if (
len(block.finished_sub_slots) > 0
and block.finished_sub_slots[0].challenge_chain.subepoch_summary_hash is not None
):
return None
if can_finish_soon:
deficit: uint8 = uint8(0) # Assume that our deficit will go to zero soon
can_finish_se = True
if height_can_be_first_in_epoch(constants, uint32(prev_b.height + 2)):
can_finish_epoch = True
if (prev_b.height + 2) % constants.SUB_EPOCH_BLOCKS > 1:
curr: BlockRecord = prev_b
while curr.height % constants.SUB_EPOCH_BLOCKS > 0:
if (
curr.sub_epoch_summary_included is not None
and curr.sub_epoch_summary_included.new_difficulty is not None
):
can_finish_epoch = False
curr = blocks.block_record(curr.prev_hash)
if (
curr.sub_epoch_summary_included is not None
and curr.sub_epoch_summary_included.new_difficulty is not None
):
can_finish_epoch = False
elif height_can_be_first_in_epoch(constants, uint32(prev_b.height + constants.MAX_SUB_SLOT_BLOCKS + 2)):
can_finish_epoch = True
else:
can_finish_epoch = False
else:
deficit = calculate_deficit(
constants,
uint32(prev_b.height + 1),
prev_b,
overflow,
len(block.finished_sub_slots),
)
can_finish_se, can_finish_epoch = can_finish_sub_and_full_epoch(
constants,
blocks,
uint32(prev_b.height + 1),
prev_b.header_hash if prev_b is not None else None,
deficit,
False,
)
# can't finish se, no summary
if not can_finish_se:
return None
next_difficulty = None
next_sub_slot_iters = None
# if can finish epoch, new difficulty and ssi
if can_finish_epoch:
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
ip_iters = calculate_ip_iters(constants, sub_slot_iters, signage_point_index, required_iters)
next_difficulty = _get_next_difficulty(
constants,
blocks,
block.prev_header_hash,
uint32(prev_b.height + 1),
uint64(prev_b.weight - blocks.block_record(prev_b.prev_hash).weight),
deficit,
False, # Already checked above
True,
uint128(block.total_iters - ip_iters + sp_iters - (sub_slot_iters if overflow else 0)),
True,
)
next_sub_slot_iters = _get_next_sub_slot_iters(
constants,
blocks,
block.prev_header_hash,
uint32(prev_b.height + 1),
sub_slot_iters,
deficit,
False, # Already checked above
True,
uint128(block.total_iters - ip_iters + sp_iters - (sub_slot_iters if overflow else 0)),
True,
)
return make_sub_epoch_summary(
constants,
blocks,
uint32(prev_b.height + 2),
prev_b,
next_difficulty,
next_sub_slot_iters,
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/difficulty_adjustment.py | flax/consensus/difficulty_adjustment.py | from __future__ import annotations
from typing import List, Optional, Tuple
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.constants import ConsensusConstants
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint8, uint32, uint64, uint128
from flax.util.significant_bits import count_significant_bits, truncate_to_significant_bits
def _get_blocks_at_height(
blocks: BlockchainInterface,
prev_b: BlockRecord,
target_height: uint32,
max_num_blocks: uint32 = uint32(1),
) -> List[BlockRecord]:
"""
Return a consecutive list of BlockRecords starting at target_height, returning a maximum of
max_num_blocks. Assumes all block records are present. Does a slot linear search, if the blocks are not
in the path of the peak. Can only fetch ancestors of prev_b.
Args:
blocks: dict from header hash to BlockRecord.
prev_b: prev_b (to start backwards search).
target_height: target block to start
max_num_blocks: max number of blocks to fetch (although less might be fetched)
"""
if blocks.contains_height(prev_b.height):
header_hash = blocks.height_to_hash(prev_b.height)
if header_hash == prev_b.header_hash:
# Efficient fetching, since we are fetching ancestor blocks within the heaviest chain. We can directly
# use the height_to_block_record method
block_list: List[BlockRecord] = []
for h in range(target_height, target_height + max_num_blocks):
assert blocks.contains_height(uint32(h))
block_list.append(blocks.height_to_block_record(uint32(h)))
return block_list
# Slow fetching, goes back one by one, since we are in a fork
curr_b: BlockRecord = prev_b
target_blocks = []
while curr_b.height >= target_height:
if curr_b.height < target_height + max_num_blocks:
target_blocks.append(curr_b)
if curr_b.height == 0:
break
curr_b = blocks.block_record(curr_b.prev_hash)
return list(reversed(target_blocks))
def _get_second_to_last_transaction_block_in_previous_epoch(
constants: ConsensusConstants,
blocks: BlockchainInterface,
last_b: BlockRecord,
) -> BlockRecord:
"""
Retrieves the second to last transaction block in the previous epoch.
Args:
constants: consensus constants being used for this chain
blocks: dict from header hash to block of all relevant blocks
last_b: last-block in the current epoch, or last block we have seen, if potentially finishing epoch soon
prev epoch surpassed prev epoch started epoch sur. epoch started
v v v v
|.B...B....B. B....B...|......B....B.....B...B.|.B.B.B..|..B...B.B.B...|.B.B.B. B.|........
PREV EPOCH CURR EPOCH NEW EPOCH
The blocks selected for the timestamps are the second to last transaction blocks in each epoch.
Block at height 0 is an exception. Note that H mod EPOCH_BLOCKS where H is the height of the first block in the
epoch, must be >= 0, and < 128.
"""
# This height is guaranteed to be in the next epoch (even when last_b is not actually the last block)
height_in_next_epoch = (
last_b.height + 2 * constants.MAX_SUB_SLOT_BLOCKS + constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK + 5
)
height_epoch_surpass: uint32 = uint32(height_in_next_epoch - (height_in_next_epoch % constants.EPOCH_BLOCKS))
height_prev_epoch_surpass: uint32 = uint32(height_epoch_surpass - constants.EPOCH_BLOCKS)
assert height_prev_epoch_surpass % constants.EPOCH_BLOCKS == height_prev_epoch_surpass % constants.EPOCH_BLOCKS == 0
# Sanity check, don't go too far past epoch barrier
assert (height_in_next_epoch - height_epoch_surpass) < (5 * constants.MAX_SUB_SLOT_BLOCKS)
if height_prev_epoch_surpass == 0:
# The genesis block is an edge case, where we measure from the first block in epoch (height 0), as opposed to
# a block in the previous epoch, which would be height < 0
return _get_blocks_at_height(blocks, last_b, uint32(0))[0]
# If the prev slot is the first slot, the iterations start at 0
# We will compute the timestamps of the 2nd to last block in epoch, as well as the total iterations at infusion
prev_slot_start_iters: uint128
prev_slot_time_start: uint64
# The target block must be in this range. Either the surpass block must be a transaction block, or something
# in it's sub slot must be a transaction block. If that is the only transaction block in the sub-slot, the last
# block in the previous sub-slot from that must also be a transaction block (therefore -1 is used).
# The max height for the new epoch to start is surpass + 2*MAX_SUB_SLOT_BLOCKS + MIN_BLOCKS_PER_CHALLENGE_BLOCK - 3,
# since we might have a deficit > 0 when surpass is hit. The +3 is added just in case
fetched_blocks = _get_blocks_at_height(
blocks,
last_b,
uint32(height_prev_epoch_surpass - constants.MAX_SUB_SLOT_BLOCKS - 1),
uint32(3 * constants.MAX_SUB_SLOT_BLOCKS + constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK + 3),
)
# We want to find the last block in the slot at which we surpass the height.
# The last block in epoch will be before this.
fetched_index: int = constants.MAX_SUB_SLOT_BLOCKS
curr_b: BlockRecord = fetched_blocks[fetched_index]
fetched_index += 1
assert curr_b.height == height_prev_epoch_surpass - 1
next_b: BlockRecord = fetched_blocks[fetched_index]
assert next_b.height == height_prev_epoch_surpass
# Wait until the slot finishes with a challenge chain infusion at start of slot
# Note that there are no overflow blocks at the start of new epochs
while next_b.sub_epoch_summary_included is None:
curr_b = next_b
next_b = fetched_blocks[fetched_index]
fetched_index += 1
# Backtrack to find the second to last tx block
found_tx_block = 1 if curr_b.is_transaction_block else 0
while found_tx_block < 2:
curr_b = blocks.block_record(curr_b.prev_hash)
if curr_b.is_transaction_block:
found_tx_block += 1
return curr_b
def height_can_be_first_in_epoch(constants: ConsensusConstants, height: uint32) -> bool:
return (height - (height % constants.SUB_EPOCH_BLOCKS)) % constants.EPOCH_BLOCKS == 0
def can_finish_sub_and_full_epoch(
constants: ConsensusConstants,
blocks: BlockchainInterface,
height: uint32,
prev_header_hash: Optional[bytes32],
deficit: uint8,
block_at_height_included_ses: bool,
) -> Tuple[bool, bool]:
"""
Returns a bool tuple
first bool is true if the next sub-slot after height will form part of a new sub-epoch. Therefore
block height is the last block, and height + 1 is in a new sub-epoch.
second bool is true if the next sub-slot after height will form part of a new sub-epoch and epoch.
Therefore, block height is the last block, and height + 1 is in a new epoch.
Args:
constants: consensus constants being used for this chain
blocks: dictionary from header hash to SBR of all included SBR
height: block height of the (potentially) last block in the sub-epoch
prev_header_hash: prev_header hash of the block at height, assuming not genesis
deficit: deficit of block at height height
block_at_height_included_ses: whether or not the block at height height already included a SES
"""
if height < constants.SUB_EPOCH_BLOCKS - 1:
return False, False
assert prev_header_hash is not None
if deficit > 0:
return False, False
if block_at_height_included_ses:
# If we just included a sub_epoch_summary, we cannot include one again
return False, False
# This does not check the two edge cases where (height + 1) % constants.SUB_EPOCH_BLOCKS is 0 or 1
# If it's 0, height+1 is the first place that a sub-epoch can be included
# If it's 1, we just checked whether 0 included it in the previous check
if (height + 1) % constants.SUB_EPOCH_BLOCKS > 1:
curr: BlockRecord = blocks.block_record(prev_header_hash)
while curr.height % constants.SUB_EPOCH_BLOCKS > 0:
if curr.sub_epoch_summary_included is not None:
return False, False
curr = blocks.block_record(curr.prev_hash)
if curr.sub_epoch_summary_included is not None:
return False, False
# For checking new epoch, make sure the epoch blocks are aligned
return True, height_can_be_first_in_epoch(constants, uint32(height + 1))
def _get_next_sub_slot_iters(
constants: ConsensusConstants,
blocks: BlockchainInterface,
prev_header_hash: bytes32,
height: uint32,
curr_sub_slot_iters: uint64,
deficit: uint8,
block_at_height_included_ses: bool,
new_slot: bool,
signage_point_total_iters: uint128,
skip_epoch_check: bool = False,
) -> uint64:
"""
Returns the slot iterations required for the next block after the one at height, where new_slot is true
iff the next block will be in the next slot. WARNING: assumes that the block at height is not the first block
in a sub-epoch.
Args:
constants: consensus constants being used for this chain
blocks: dictionary from header hash to SBR of all included SBR
prev_header_hash: header hash of the previous block
height: the block height of the block to look at
curr_sub_slot_iters: sub-slot iters at the infusion point of the block at height
deficit: deficit of block at height height
new_slot: whether or not there is a new slot after height
signage_point_total_iters: signage point iters of the block at height
skip_epoch_check: don't check correct epoch
"""
next_height: uint32 = uint32(height + 1)
if next_height < constants.EPOCH_BLOCKS:
return uint64(constants.SUB_SLOT_ITERS_STARTING)
if not blocks.contains_block(prev_header_hash):
raise ValueError(f"Header hash {prev_header_hash} not in blocks")
prev_b: BlockRecord = blocks.block_record(prev_header_hash)
# If we are in the same epoch, return same ssi
if not skip_epoch_check:
_, can_finish_epoch = can_finish_sub_and_full_epoch(
constants, blocks, height, prev_header_hash, deficit, block_at_height_included_ses
)
if not new_slot or not can_finish_epoch:
return curr_sub_slot_iters
last_block_prev: BlockRecord = _get_second_to_last_transaction_block_in_previous_epoch(constants, blocks, prev_b)
# This gets the last transaction block before this block's signage point. Assuming the block at height height
# is the last block infused in the epoch: If this block ends up being a
# transaction block, then last_block_curr will be the second to last tx block in the epoch. If this block
# is not a transaction block, that means there was exactly one other tx block included in between our signage
# point and infusion point, and therefore last_block_curr is the second to last as well.
last_block_curr = prev_b
while last_block_curr.total_iters > signage_point_total_iters or not last_block_curr.is_transaction_block:
last_block_curr = blocks.block_record(last_block_curr.prev_hash)
assert last_block_curr.timestamp is not None and last_block_prev.timestamp is not None
# This is computed as the iterations per second in last epoch, times the target number of seconds per slot
new_ssi_precise: uint64 = uint64(
constants.SUB_SLOT_TIME_TARGET
* (last_block_curr.total_iters - last_block_prev.total_iters)
// (last_block_curr.timestamp - last_block_prev.timestamp)
)
# Only change by a max factor as a sanity check
max_ssi = uint64(constants.DIFFICULTY_CHANGE_MAX_FACTOR * last_block_curr.sub_slot_iters)
min_ssi = uint64(last_block_curr.sub_slot_iters // constants.DIFFICULTY_CHANGE_MAX_FACTOR)
if new_ssi_precise >= last_block_curr.sub_slot_iters:
new_ssi_precise = uint64(min(new_ssi_precise, max_ssi))
else:
new_ssi_precise = uint64(max([constants.NUM_SPS_SUB_SLOT, new_ssi_precise, min_ssi]))
new_ssi = truncate_to_significant_bits(new_ssi_precise, constants.SIGNIFICANT_BITS)
new_ssi = uint64(new_ssi - new_ssi % constants.NUM_SPS_SUB_SLOT) # Must divide the sub slot
assert count_significant_bits(new_ssi) <= constants.SIGNIFICANT_BITS
return new_ssi
def _get_next_difficulty(
constants: ConsensusConstants,
blocks: BlockchainInterface,
prev_header_hash: bytes32,
height: uint32,
current_difficulty: uint64,
deficit: uint8,
block_at_height_included_ses: bool,
new_slot: bool,
signage_point_total_iters: uint128,
skip_epoch_check: bool = False,
) -> uint64:
"""
Returns the difficulty of the next block that extends onto block.
Used to calculate the number of iterations. WARNING: assumes that the block at height is not the first block
in a sub-epoch.
Args:
constants: consensus constants being used for this chain
blocks: dictionary from header hash to SBR of all included SBR
prev_header_hash: header hash of the previous block
height: the block height of the block to look at
deficit: deficit of block at height height
current_difficulty: difficulty at the infusion point of the block at height
new_slot: whether or not there is a new slot after height
signage_point_total_iters: signage point iters of the block at height
skip_epoch_check: don't check correct epoch
"""
next_height: uint32 = uint32(height + 1)
if next_height < (constants.EPOCH_BLOCKS - 3 * constants.MAX_SUB_SLOT_BLOCKS):
# We are in the first epoch
return uint64(constants.DIFFICULTY_STARTING)
if not blocks.contains_block(prev_header_hash):
raise ValueError(f"Header hash {prev_header_hash} not in blocks")
prev_b: BlockRecord = blocks.block_record(prev_header_hash)
# If we are in the same slot as previous block, return same difficulty
if not skip_epoch_check:
_, can_finish_epoch = can_finish_sub_and_full_epoch(
constants, blocks, height, prev_header_hash, deficit, block_at_height_included_ses
)
if not new_slot or not can_finish_epoch:
return current_difficulty
last_block_prev: BlockRecord = _get_second_to_last_transaction_block_in_previous_epoch(constants, blocks, prev_b)
# This gets the last transaction block before this block's signage point. Assuming the block at height height
# is the last block infused in the epoch: If this block ends up being a
# transaction block, then last_block_curr will be the second to last tx block in the epoch. If this block
# is not a transaction block, that means there was exactly one other tx block included in between our signage
# point and infusion point, and therefore last_block_curr is the second to last as well.
last_block_curr = prev_b
while last_block_curr.total_iters > signage_point_total_iters or not last_block_curr.is_transaction_block:
last_block_curr = blocks.block_record(last_block_curr.prev_hash)
assert last_block_curr.timestamp is not None
assert last_block_prev.timestamp is not None
actual_epoch_time: uint64 = uint64(last_block_curr.timestamp - last_block_prev.timestamp)
old_difficulty = uint64(prev_b.weight - blocks.block_record(prev_b.prev_hash).weight)
# Terms are rearranged so there is only one division.
new_difficulty_precise = uint64(
(last_block_curr.weight - last_block_prev.weight)
* constants.SUB_SLOT_TIME_TARGET
// (constants.SLOT_BLOCKS_TARGET * actual_epoch_time)
)
# Only change by a max factor, to prevent attacks, as in greenpaper, and must be at least 1
max_diff = uint64(constants.DIFFICULTY_CHANGE_MAX_FACTOR * old_difficulty)
min_diff = uint64(old_difficulty // constants.DIFFICULTY_CHANGE_MAX_FACTOR)
if new_difficulty_precise >= old_difficulty:
new_difficulty_precise = uint64(min(new_difficulty_precise, max_diff))
else:
new_difficulty_precise = uint64(max([uint64(1), new_difficulty_precise, min_diff]))
new_difficulty = truncate_to_significant_bits(new_difficulty_precise, constants.SIGNIFICANT_BITS)
assert count_significant_bits(new_difficulty) <= constants.SIGNIFICANT_BITS
return uint64(new_difficulty)
def get_next_sub_slot_iters_and_difficulty(
constants: ConsensusConstants,
is_first_in_sub_slot: bool,
prev_b: Optional[BlockRecord],
blocks: BlockchainInterface,
) -> Tuple[uint64, uint64]:
"""
Retrieves the current sub_slot iters and difficulty of the next block after prev_b.
Args:
constants: consensus constants being used for this chain
is_first_in_sub_slot: Whether the next block is the first in the sub slot
prev_b: the previous block (last block in the epoch)
blocks: dictionary from header hash to SBR of all included SBR
"""
# genesis
if prev_b is None:
return constants.SUB_SLOT_ITERS_STARTING, constants.DIFFICULTY_STARTING
if prev_b.height != 0:
prev_difficulty: uint64 = uint64(prev_b.weight - blocks.block_record(prev_b.prev_hash).weight)
else:
# prev block is genesis
prev_difficulty = uint64(prev_b.weight)
if prev_b.sub_epoch_summary_included is not None:
return prev_b.sub_slot_iters, prev_difficulty
sp_total_iters = prev_b.sp_total_iters(constants)
difficulty: uint64 = _get_next_difficulty(
constants,
blocks,
prev_b.prev_hash,
prev_b.height,
prev_difficulty,
prev_b.deficit,
False, # Already checked above
is_first_in_sub_slot,
sp_total_iters,
)
sub_slot_iters: uint64 = _get_next_sub_slot_iters(
constants,
blocks,
prev_b.prev_hash,
prev_b.height,
prev_b.sub_slot_iters,
prev_b.deficit,
False, # Already checked above
is_first_in_sub_slot,
sp_total_iters,
)
return sub_slot_iters, difficulty
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/pot_iterations.py | flax/consensus/pot_iterations.py | from __future__ import annotations
from flax.consensus.constants import ConsensusConstants
from flax.consensus.pos_quality import _expected_plot_size
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint64, uint128
def is_overflow_block(constants: ConsensusConstants, signage_point_index: uint8) -> bool:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return signage_point_index >= constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA
def calculate_sp_interval_iters(constants: ConsensusConstants, sub_slot_iters: uint64) -> uint64:
assert sub_slot_iters % constants.NUM_SPS_SUB_SLOT == 0
return uint64(sub_slot_iters // constants.NUM_SPS_SUB_SLOT)
def calculate_sp_iters(constants: ConsensusConstants, sub_slot_iters: uint64, signage_point_index: uint8) -> uint64:
if signage_point_index >= constants.NUM_SPS_SUB_SLOT:
raise ValueError("SP index too high")
return uint64(calculate_sp_interval_iters(constants, sub_slot_iters) * signage_point_index)
def calculate_ip_iters(
constants: ConsensusConstants,
sub_slot_iters: uint64,
signage_point_index: uint8,
required_iters: uint64,
) -> uint64:
# Note that the SSI is for the block passed in, which might be in the previous epoch
sp_iters = calculate_sp_iters(constants, sub_slot_iters, signage_point_index)
sp_interval_iters: uint64 = calculate_sp_interval_iters(constants, sub_slot_iters)
if sp_iters % sp_interval_iters != 0 or sp_iters >= sub_slot_iters:
raise ValueError(f"Invalid sp iters {sp_iters} for this ssi {sub_slot_iters}")
if required_iters >= sp_interval_iters or required_iters == 0:
raise ValueError(
f"Required iters {required_iters} is not below the sp interval iters {sp_interval_iters} "
f"{sub_slot_iters} or not >0."
)
return uint64((sp_iters + constants.NUM_SP_INTERVALS_EXTRA * sp_interval_iters + required_iters) % sub_slot_iters)
def calculate_iterations_quality(
difficulty_constant_factor: uint128,
quality_string: bytes32,
size: int,
difficulty: uint64,
cc_sp_output_hash: bytes32,
) -> uint64:
"""
Calculates the number of iterations from the quality. This is derives as the difficulty times the constant factor
times a random number between 0 and 1 (based on quality string), divided by plot size.
"""
sp_quality_string: bytes32 = std_hash(quality_string + cc_sp_output_hash)
iters = uint64(
int(difficulty)
* int(difficulty_constant_factor)
* int.from_bytes(sp_quality_string, "big", signed=False)
// (int(pow(2, 256)) * int(_expected_plot_size(size)))
)
return max(iters, uint64(1))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/get_block_challenge.py | flax/consensus/get_block_challenge.py | from __future__ import annotations
import logging
from typing import List, Union
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.constants import ConsensusConstants
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.full_block import FullBlock
from flax.types.header_block import HeaderBlock
from flax.types.unfinished_block import UnfinishedBlock
from flax.types.unfinished_header_block import UnfinishedHeaderBlock
from flax.util.ints import uint64
log = logging.getLogger(__name__)
def final_eos_is_already_included(
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
sub_slot_iters: uint64,
) -> bool:
"""
Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
"""
if len(header_block.finished_sub_slots) > 0:
# We already have an included empty sub slot, which means the prev block is 2 sub slots behind.
return False
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
# We also check if curr is close to header_block, which means it's in the same sub slot
seen_overflow_block = curr.overflow and (header_block.total_iters - curr.total_iters < sub_slot_iters // 2)
while not curr.first_in_sub_slot and not curr.height == 0:
if curr.overflow and header_block.total_iters - curr.total_iters < sub_slot_iters // 2:
seen_overflow_block = True
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot and seen_overflow_block:
# We have seen another overflow block in this slot (same as header_block), therefore there are no
# missing sub slots
return True
# We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in
# the future
return False
def get_block_challenge(
constants: ConsensusConstants,
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
genesis_block: bool,
overflow: bool,
skip_overflow_last_ss_validation: bool,
) -> bytes32:
if len(header_block.finished_sub_slots) > 0:
if overflow:
# New sub-slot with overflow block
if skip_overflow_last_ss_validation:
# In this case, we are missing the final sub-slot bundle (it's not finished yet), however
# There is a whole empty slot before this block is infused
challenge: bytes32 = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
challenge = header_block.finished_sub_slots[
-1
].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
# No overflow, new slot with a new challenge
challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
if genesis_block:
challenge = constants.GENESIS_CHALLENGE
else:
if overflow:
if skip_overflow_last_ss_validation:
# Overflow infusion without the new slot, so get the last challenge
challenges_to_look_for = 1
else:
# Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False,
# Which means no sub slots are omitted
challenges_to_look_for = 2
else:
challenges_to_look_for = 1
reversed_challenge_hashes: List[bytes32] = []
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
while len(reversed_challenge_hashes) < challenges_to_look_for:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes)
if curr.height == 0:
assert curr.finished_challenge_slot_hashes is not None
assert len(curr.finished_challenge_slot_hashes) > 0
break
curr = blocks.block_record(curr.prev_hash)
challenge = reversed_challenge_hashes[challenges_to_look_for - 1]
return challenge
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/find_fork_point.py | flax/consensus/find_fork_point.py | from __future__ import annotations
from typing import Union
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.types.header_block import HeaderBlock
def find_fork_point_in_chain(
blocks: BlockchainInterface,
block_1: Union[BlockRecord, HeaderBlock],
block_2: Union[BlockRecord, HeaderBlock],
) -> int:
"""Tries to find height where new chain (block_2) diverged from block_1 (assuming prev blocks
are all included in chain)
Returns -1 if chains have no common ancestor
* assumes the fork point is loaded in blocks
"""
while block_2.height > 0 or block_1.height > 0:
if block_2.height > block_1.height:
block_2 = blocks.block_record(block_2.prev_hash)
elif block_1.height > block_2.height:
block_1 = blocks.block_record(block_1.prev_hash)
else:
if block_2.header_hash == block_1.header_hash:
return block_2.height
block_2 = blocks.block_record(block_2.prev_hash)
block_1 = blocks.block_record(block_1.prev_hash)
if block_2 != block_1:
# All blocks are different
return -1
# First block is the same
return 0
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/blockchain_interface.py | flax/consensus/blockchain_interface.py | from __future__ import annotations
from typing import Dict, List, Optional
from flax.consensus.block_record import BlockRecord
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.blockchain_format.vdf import VDFInfo
from flax.types.header_block import HeaderBlock
from flax.types.weight_proof import SubEpochChallengeSegment
from flax.util.ints import uint32
class BlockchainInterface:
def get_peak(self) -> Optional[BlockRecord]:
pass
def get_peak_height(self) -> Optional[uint32]:
pass
def block_record(self, header_hash: bytes32) -> BlockRecord:
pass
def height_to_block_record(self, height: uint32) -> BlockRecord:
pass
def get_ses_heights(self) -> List[uint32]:
pass
def get_ses(self, height: uint32) -> SubEpochSummary:
pass
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
pass
def contains_block(self, header_hash: bytes32) -> bool:
pass
def remove_block_record(self, header_hash: bytes32) -> None:
pass
def add_block_record(self, block_record: BlockRecord) -> None:
pass
def contains_height(self, height: uint32) -> bool:
pass
async def warmup(self, fork_point: uint32) -> None:
pass
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
pass
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
pass
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
pass
async def get_header_block_by_height(
self, height: int, header_hash: bytes32, tx_filter: bool = True
) -> Optional[HeaderBlock]:
pass
async def get_block_records_at(self, heights: List[uint32]) -> List[BlockRecord]:
pass
def try_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
if self.contains_block(header_hash):
return self.block_record(header_hash)
return None
async def persist_sub_epoch_challenge_segments(
self, sub_epoch_summary_hash: bytes32, segments: List[SubEpochChallengeSegment]
) -> None:
pass
async def get_sub_epoch_challenge_segments(
self,
sub_epoch_summary_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
pass
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
pass
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/deficit.py | flax/consensus/deficit.py | from __future__ import annotations
from typing import Optional
from flax.consensus.block_record import BlockRecord
from flax.consensus.constants import ConsensusConstants
from flax.util.ints import uint8, uint32
def calculate_deficit(
constants: ConsensusConstants,
height: uint32,
prev_b: Optional[BlockRecord],
overflow: bool,
num_finished_sub_slots: int,
) -> uint8:
"""
Returns the deficit of the block to be created at height.
Args:
constants: consensus constants being used for this chain
height: block height of the block that we care about
prev_b: previous block
overflow: whether or not this is an overflow block
num_finished_sub_slots: the number of finished slots between infusion points of prev and current
"""
if height == 0:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
assert prev_b is not None
prev_deficit: uint8 = prev_b.deficit
if prev_deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# Prev sb must be an overflow sb. However maybe it's in a different sub-slot
if overflow:
if num_finished_sub_slots > 0:
# We are an overflow block, but in a new sub-slot, so we can decrease the deficit
return uint8(prev_deficit - 1)
# Still overflowed, so we cannot decrease the deficit
return uint8(prev_deficit)
else:
# We are no longer overflow, can decrease
return uint8(prev_deficit - 1)
elif prev_deficit == 0:
if num_finished_sub_slots == 0:
return uint8(0)
elif num_finished_sub_slots == 1:
if overflow:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK)
else:
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
# More than one finished sub slot, we can decrease deficit
return uint8(constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1)
else:
return uint8(prev_deficit - 1)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/multiprocess_validation.py | flax/consensus/multiprocess_validation.py | from __future__ import annotations
import asyncio
import logging
import traceback
from concurrent.futures import Executor
from dataclasses import dataclass
from typing import Awaitable, Callable, Dict, List, Optional, Sequence, Tuple
from blspy import AugSchemeMPL, G1Element
from flax.consensus.block_header_validation import validate_finished_header_block
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.constants import ConsensusConstants
from flax.consensus.cost_calculator import NPCResult
from flax.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from flax.consensus.full_block_to_block_record import block_to_block_record
from flax.consensus.get_block_challenge import get_block_challenge
from flax.consensus.pot_iterations import calculate_iterations_quality, is_overflow_block
from flax.full_node.mempool_check_conditions import get_name_puzzle_conditions
from flax.types.block_protocol import BlockInfo
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.full_block import FullBlock
from flax.types.generator_types import BlockGenerator
from flax.types.header_block import HeaderBlock
from flax.types.unfinished_block import UnfinishedBlock
from flax.util.block_cache import BlockCache
from flax.util.condition_tools import pkm_pairs
from flax.util.errors import Err, ValidationError
from flax.util.generator_tools import get_block_header, tx_removals_and_additions
from flax.util.ints import uint16, uint32, uint64
from flax.util.streamable import Streamable, streamable
log = logging.getLogger(__name__)
@streamable
@dataclass(frozen=True)
class PreValidationResult(Streamable):
error: Optional[uint16]
required_iters: Optional[uint64] # Iff error is None
npc_result: Optional[NPCResult] # Iff error is None and block is a transaction block
validated_signature: bool
def batch_pre_validate_blocks(
constants: ConsensusConstants,
blocks_pickled: Dict[bytes, bytes],
full_blocks_pickled: Optional[List[bytes]],
header_blocks_pickled: Optional[List[bytes]],
prev_transaction_generators: List[Optional[bytes]],
npc_results: Dict[uint32, bytes],
check_filter: bool,
expected_difficulty: List[uint64],
expected_sub_slot_iters: List[uint64],
validate_signatures: bool,
) -> List[bytes]:
blocks: Dict[bytes32, BlockRecord] = {}
for k, v in blocks_pickled.items():
blocks[bytes32(k)] = BlockRecord.from_bytes(v)
results: List[PreValidationResult] = []
if full_blocks_pickled is not None and header_blocks_pickled is not None:
assert ValueError("Only one should be passed here")
# In this case, we are validating full blocks, not headers
if full_blocks_pickled is not None:
for i in range(len(full_blocks_pickled)):
try:
block: FullBlock = FullBlock.from_bytes(full_blocks_pickled[i])
tx_additions: List[Coin] = []
removals: List[bytes32] = []
npc_result: Optional[NPCResult] = None
if block.height in npc_results:
npc_result = NPCResult.from_bytes(npc_results[block.height])
assert npc_result is not None
if npc_result.conds is not None:
removals, tx_additions = tx_removals_and_additions(npc_result.conds)
else:
removals, tx_additions = [], []
if block.transactions_generator is not None and npc_result is None:
prev_generator_bytes = prev_transaction_generators[i]
assert prev_generator_bytes is not None
assert block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(prev_generator_bytes)
assert block_generator.program == block.transactions_generator
npc_result = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
mempool_mode=False,
)
removals, tx_additions = tx_removals_and_additions(npc_result.conds)
if npc_result is not None and npc_result.error is not None:
results.append(PreValidationResult(uint16(npc_result.error), None, npc_result, False))
continue
header_block = get_block_header(block, tx_additions, removals)
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks),
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int: Optional[uint16] = None
if error is not None:
error_int = uint16(error.code.value)
successfully_validated_signatures = False
# If we failed CLVM, no need to validate signature, the block is already invalid
if error_int is None:
# If this is False, it means either we don't have a signature (not a tx block) or we have an invalid
# signature (which also puts in an error) or we didn't validate the signature because we want to
# validate it later. receive_block will attempt to validate the signature later.
if validate_signatures:
if npc_result is not None and block.transactions_info is not None:
assert npc_result.conds
pairs_pks, pairs_msgs = pkm_pairs(npc_result.conds, constants.AGG_SIG_ME_ADDITIONAL_DATA)
# Using AugSchemeMPL.aggregate_verify, so it's safe to use from_bytes_unchecked
pks_objects: List[G1Element] = [G1Element.from_bytes_unchecked(pk) for pk in pairs_pks]
if not AugSchemeMPL.aggregate_verify(
pks_objects, pairs_msgs, block.transactions_info.aggregated_signature
):
error_int = uint16(Err.BAD_AGGREGATE_SIGNATURE.value)
else:
successfully_validated_signatures = True
results.append(
PreValidationResult(error_int, required_iters, npc_result, successfully_validated_signatures)
)
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None, False))
# In this case, we are validating header blocks
elif header_blocks_pickled is not None:
for i in range(len(header_blocks_pickled)):
try:
header_block = HeaderBlock.from_bytes(header_blocks_pickled[i])
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks),
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, None, False))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None, False))
return [bytes(r) for r in results]
async def pre_validate_blocks_multiprocessing(
constants: ConsensusConstants,
block_records: BlockchainInterface,
blocks: Sequence[FullBlock],
pool: Executor,
check_filter: bool,
npc_results: Dict[uint32, NPCResult],
get_block_generator: Callable[[BlockInfo, Dict[bytes32, FullBlock]], Awaitable[Optional[BlockGenerator]]],
batch_size: int,
wp_summaries: Optional[List[SubEpochSummary]] = None,
*,
validate_signatures: bool = True,
) -> List[PreValidationResult]:
"""
This method must be called under the blockchain lock
If all the full blocks pass pre-validation, (only validates header), returns the list of required iters.
if any validation issue occurs, returns False.
Args:
check_filter:
constants:
pool:
constants:
block_records:
blocks: list of full blocks to validate (must be connected to current chain)
npc_results
get_block_generator
"""
prev_b: Optional[BlockRecord] = None
# Collects all the recent blocks (up to the previous sub-epoch)
recent_blocks: Dict[bytes32, BlockRecord] = {}
recent_blocks_compressed: Dict[bytes32, BlockRecord] = {}
num_sub_slots_found = 0
num_blocks_seen = 0
if blocks[0].height > 0:
if not block_records.contains_block(blocks[0].prev_header_hash):
return [PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None, False)]
curr = block_records.block_record(blocks[0].prev_header_hash)
num_sub_slots_to_look_for = 3 if curr.overflow else 2
while (
curr.sub_epoch_summary_included is None
or num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS
or num_sub_slots_found < num_sub_slots_to_look_for
) and curr.height > 0:
if num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS or num_sub_slots_found < num_sub_slots_to_look_for:
recent_blocks_compressed[curr.header_hash] = curr
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
num_sub_slots_found += len(curr.finished_challenge_slot_hashes)
recent_blocks[curr.header_hash] = curr
if curr.is_transaction_block:
num_blocks_seen += 1
curr = block_records.block_record(curr.prev_hash)
recent_blocks[curr.header_hash] = curr
recent_blocks_compressed[curr.header_hash] = curr
block_record_was_present = []
for block in blocks:
block_record_was_present.append(block_records.contains_block(block.header_hash))
diff_ssis: List[Tuple[uint64, uint64]] = []
for block in blocks:
if block.height != 0:
assert block_records.contains_block(block.prev_header_hash)
if prev_b is None:
prev_b = block_records.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, block_records
)
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
challenge = get_block_challenge(constants, block, BlockCache(recent_blocks), prev_b is None, overflow, False)
if block.reward_chain_block.challenge_chain_sp_vdf is None:
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
if q_str is None:
for i, block_i in enumerate(blocks):
if not block_record_was_present[i] and block_records.contains_block(block_i.header_hash):
block_records.remove_block_record(block_i.header_hash)
return [PreValidationResult(uint16(Err.INVALID_POSPACE.value), None, None, False)]
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
difficulty,
cc_sp_hash,
)
try:
block_rec = block_to_block_record(
constants,
block_records,
required_iters,
block,
None,
)
except ValueError:
return [PreValidationResult(uint16(Err.INVALID_SUB_EPOCH_SUMMARY.value), None, None, False)]
if block_rec.sub_epoch_summary_included is not None and wp_summaries is not None:
idx = int(block.height / constants.SUB_EPOCH_BLOCKS) - 1
next_ses = wp_summaries[idx]
if not block_rec.sub_epoch_summary_included.get_hash() == next_ses.get_hash():
log.error("sub_epoch_summary does not match wp sub_epoch_summary list")
return [PreValidationResult(uint16(Err.INVALID_SUB_EPOCH_SUMMARY.value), None, None, False)]
# Makes sure to not override the valid blocks already in block_records
if not block_records.contains_block(block_rec.header_hash):
block_records.add_block_record(block_rec) # Temporarily add block to dict
recent_blocks[block_rec.header_hash] = block_rec
recent_blocks_compressed[block_rec.header_hash] = block_rec
else:
recent_blocks[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
recent_blocks_compressed[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
prev_b = block_rec
diff_ssis.append((difficulty, sub_slot_iters))
block_dict: Dict[bytes32, FullBlock] = {}
for i, block in enumerate(blocks):
block_dict[block.header_hash] = block
if not block_record_was_present[i]:
block_records.remove_block_record(block.header_hash)
recent_sb_compressed_pickled = {bytes(k): bytes(v) for k, v in recent_blocks_compressed.items()}
npc_results_pickled = {}
for k, v in npc_results.items():
npc_results_pickled[k] = bytes(v)
futures = []
# Pool of workers to validate blocks concurrently
for i in range(0, len(blocks), batch_size):
end_i = min(i + batch_size, len(blocks))
blocks_to_validate = blocks[i:end_i]
if any([len(block.finished_sub_slots) > 0 for block in blocks_to_validate]):
final_pickled = {bytes(k): bytes(v) for k, v in recent_blocks.items()}
else:
final_pickled = recent_sb_compressed_pickled
b_pickled: Optional[List[bytes]] = None
hb_pickled: Optional[List[bytes]] = None
previous_generators: List[Optional[bytes]] = []
for block in blocks_to_validate:
# We ONLY add blocks which are in the past, based on header hashes (which are validated later) to the
# prev blocks dict. This is important since these blocks are assumed to be valid and are used as previous
# generator references
prev_blocks_dict: Dict[bytes32, FullBlock] = {}
curr_b: FullBlock = block
while curr_b.prev_header_hash in block_dict:
curr_b = block_dict[curr_b.prev_header_hash]
prev_blocks_dict[curr_b.header_hash] = curr_b
if isinstance(block, FullBlock):
assert get_block_generator is not None
if b_pickled is None:
b_pickled = []
b_pickled.append(bytes(block))
try:
block_generator: Optional[BlockGenerator] = await get_block_generator(block, prev_blocks_dict)
except ValueError:
return [
PreValidationResult(
uint16(Err.FAILED_GETTING_GENERATOR_MULTIPROCESSING.value), None, None, False
)
]
if block_generator is not None:
previous_generators.append(bytes(block_generator))
else:
previous_generators.append(None)
else:
if hb_pickled is None:
hb_pickled = []
hb_pickled.append(bytes(block))
futures.append(
asyncio.get_running_loop().run_in_executor(
pool,
batch_pre_validate_blocks,
constants,
final_pickled,
b_pickled,
hb_pickled,
previous_generators,
npc_results_pickled,
check_filter,
[diff_ssis[j][0] for j in range(i, end_i)],
[diff_ssis[j][1] for j in range(i, end_i)],
validate_signatures,
)
)
# Collect all results into one flat list
return [
PreValidationResult.from_bytes(result)
for batch_result in (await asyncio.gather(*futures))
for result in batch_result
]
def _run_generator(
constants: ConsensusConstants,
unfinished_block_bytes: bytes,
block_generator_bytes: bytes,
) -> Optional[bytes]:
"""
Runs the CLVM generator from bytes inputs. This is meant to be called under a ProcessPoolExecutor, in order to
validate the heavy parts of a block (clvm program) in a different process.
"""
try:
unfinished_block: UnfinishedBlock = UnfinishedBlock.from_bytes(unfinished_block_bytes)
assert unfinished_block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(block_generator_bytes)
assert block_generator.program == unfinished_block.transactions_generator
npc_result: NPCResult = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, unfinished_block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
mempool_mode=False,
)
return bytes(npc_result)
except ValidationError as e:
return bytes(NPCResult(uint16(e.code.value), None, uint64(0)))
except Exception:
return bytes(NPCResult(uint16(Err.UNKNOWN.value), None, uint64(0)))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/block_header_validation.py | flax/consensus/block_header_validation.py | from __future__ import annotations
import dataclasses
import logging
import time
from typing import Optional, Tuple
from blspy import AugSchemeMPL
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.constants import ConsensusConstants
from flax.consensus.deficit import calculate_deficit
from flax.consensus.difficulty_adjustment import can_finish_sub_and_full_epoch
from flax.consensus.get_block_challenge import final_eos_is_already_included, get_block_challenge
from flax.consensus.make_sub_epoch_summary import make_sub_epoch_summary
from flax.consensus.pot_iterations import (
calculate_ip_iters,
calculate_iterations_quality,
calculate_sp_interval_iters,
calculate_sp_iters,
is_overflow_block,
)
from flax.consensus.vdf_info_computation import get_signage_point_vdf_info
from flax.types.blockchain_format.classgroup import ClassgroupElement
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.slots import ChallengeChainSubSlot, RewardChainSubSlot, SubSlotProofs
from flax.types.blockchain_format.vdf import VDFInfo, VDFProof
from flax.types.end_of_slot_bundle import EndOfSubSlotBundle
from flax.types.header_block import HeaderBlock
from flax.types.unfinished_header_block import UnfinishedHeaderBlock
from flax.util.errors import Err, ValidationError
from flax.util.hash import std_hash
from flax.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
# noinspection PyCallByClass
def validate_unfinished_header_block(
constants: ConsensusConstants,
blocks: BlockchainInterface,
header_block: UnfinishedHeaderBlock,
check_filter: bool,
expected_difficulty: uint64,
expected_sub_slot_iters: uint64,
skip_overflow_last_ss_validation: bool = False,
skip_vdf_is_valid: bool = False,
check_sub_epoch_summary: bool = True,
) -> Tuple[Optional[uint64], Optional[ValidationError]]:
"""
Validates an unfinished header block. This is a block without the infusion VDFs (unfinished)
and without transactions and transaction info (header). Returns (required_iters, error).
This method is meant to validate only the unfinished part of the block. However, the finished_sub_slots
refers to all sub-slots that were finishes from the previous block's infusion point, up to this blocks
infusion point. Therefore, in the case where this is an overflow block, and the last sub-slot is not yet
released, header_block.finished_sub_slots will be missing one sub-slot. In this case,
skip_overflow_last_ss_validation must be set to True. This will skip validation of end of slots, sub-epochs,
and lead to other small tweaks in validation.
"""
# 1. Check that the previous block exists in the blockchain, or that it is correct
prev_b = blocks.try_block_record(header_block.prev_header_hash)
genesis_block = prev_b is None
if genesis_block and header_block.prev_header_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_BLOCK_HASH)
overflow = is_overflow_block(constants, header_block.reward_chain_block.signage_point_index)
if skip_overflow_last_ss_validation and overflow:
if final_eos_is_already_included(header_block, blocks, expected_sub_slot_iters):
skip_overflow_last_ss_validation = False
finished_sub_slots_since_prev = len(header_block.finished_sub_slots)
else:
finished_sub_slots_since_prev = len(header_block.finished_sub_slots) + 1
else:
finished_sub_slots_since_prev = len(header_block.finished_sub_slots)
new_sub_slot: bool = finished_sub_slots_since_prev > 0
can_finish_se: bool = False
can_finish_epoch: bool = False
if genesis_block:
height: uint32 = uint32(0)
assert expected_difficulty == constants.DIFFICULTY_STARTING
assert expected_sub_slot_iters == constants.SUB_SLOT_ITERS_STARTING
else:
assert prev_b is not None
height = uint32(prev_b.height + 1)
if new_sub_slot:
can_finish_se, can_finish_epoch = can_finish_sub_and_full_epoch(
constants,
blocks,
prev_b.height,
prev_b.prev_hash,
prev_b.deficit,
prev_b.sub_epoch_summary_included is not None,
)
else:
can_finish_se = False
can_finish_epoch = False
# 2. Check finished slots that have been crossed since prev_b
ses_hash: Optional[bytes32] = None
if new_sub_slot and not skip_overflow_last_ss_validation:
# Finished a slot(s) since previous block. The first sub-slot must have at least one block, and all
# subsequent sub-slots must be empty
for finished_sub_slot_n, sub_slot in enumerate(header_block.finished_sub_slots):
# Start of slot challenge is fetched from SP
challenge_hash: bytes32 = sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
if finished_sub_slot_n == 0:
if genesis_block:
# 2a. check sub-slot challenge hash for genesis block
if challenge_hash != constants.GENESIS_CHALLENGE:
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
else:
assert prev_b is not None
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_challenge_slot_hashes is not None
# 2b. check sub-slot challenge hash for non-genesis block
if not curr.finished_challenge_slot_hashes[-1] == challenge_hash:
print(curr.finished_challenge_slot_hashes[-1], challenge_hash)
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
else:
# 2c. check sub-slot challenge hash for empty slot
if (
not header_block.finished_sub_slots[finished_sub_slot_n - 1].challenge_chain.get_hash()
== challenge_hash
):
return None, ValidationError(Err.INVALID_PREV_CHALLENGE_SLOT_HASH)
if genesis_block:
# 2d. Validate that genesis block has no ICC
if sub_slot.infused_challenge_chain is not None:
return None, ValidationError(Err.SHOULD_NOT_HAVE_ICC)
else:
assert prev_b is not None
icc_iters_committed: Optional[uint64] = None
icc_iters_proof: Optional[uint64] = None
icc_challenge_hash: Optional[bytes32] = None
icc_vdf_input = None
if prev_b.deficit < constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# There should be no ICC chain if the last block's deficit is 16
# Prev sb's deficit is 0, 1, 2, 3, or 4
if finished_sub_slot_n == 0:
# This is the first sub slot after the last sb, which must have deficit 1-4, and thus an ICC
curr = prev_b
while not curr.is_challenge_block(constants) and not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(constants):
icc_challenge_hash = curr.challenge_block_info_hash
icc_iters_committed = uint64(prev_b.sub_slot_iters - curr.ip_iters(constants))
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_challenge_hash = curr.finished_infused_challenge_slot_hashes[-1]
icc_iters_committed = prev_b.sub_slot_iters
icc_iters_proof = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))
if prev_b.is_challenge_block(constants):
icc_vdf_input = ClassgroupElement.get_default_element()
else:
icc_vdf_input = prev_b.infused_challenge_vdf_output
else:
# This is not the first sub slot after the last block, so we might not have an ICC
if (
header_block.finished_sub_slots[finished_sub_slot_n - 1].reward_chain.deficit
< constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
):
finished_ss = header_block.finished_sub_slots[finished_sub_slot_n - 1]
assert finished_ss.infused_challenge_chain is not None
# Only sets the icc iff the previous sub slots deficit is 4 or less
icc_challenge_hash = finished_ss.infused_challenge_chain.get_hash()
icc_iters_committed = prev_b.sub_slot_iters
icc_iters_proof = icc_iters_committed
icc_vdf_input = ClassgroupElement.get_default_element()
# 2e. Validate that there is not icc iff icc_challenge hash is None
assert (sub_slot.infused_challenge_chain is None) == (icc_challenge_hash is None)
if sub_slot.infused_challenge_chain is not None:
assert icc_vdf_input is not None
assert icc_iters_proof is not None
assert icc_challenge_hash is not None
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
# 2f. Check infused challenge chain sub-slot VDF
# Only validate from prev_b to optimize
target_vdf_info = VDFInfo(
icc_challenge_hash,
icc_iters_proof,
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
if sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
target_vdf_info,
number_of_iterations=icc_iters_committed,
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if not skip_vdf_is_valid:
if (
not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
constants, icc_vdf_input, target_vdf_info, None
)
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if (
sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.infused_challenge_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
)
):
return None, ValidationError(Err.INVALID_ICC_EOS_VDF)
if sub_slot.reward_chain.deficit == constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
# 2g. Check infused challenge sub-slot hash in challenge chain, deficit 16
if (
sub_slot.infused_challenge_chain.get_hash()
!= sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash
):
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
else:
# 2h. Check infused challenge sub-slot hash not included for other deficits
if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
# 2i. Check infused challenge sub-slot hash in reward sub-slot
if (
sub_slot.infused_challenge_chain.get_hash()
!= sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash
):
return None, ValidationError(Err.INVALID_ICC_HASH_RC)
else:
# 2j. If no icc, check that the cc doesn't include it
if sub_slot.challenge_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_CC)
# 2k. If no icc, check that the cc doesn't include it
if sub_slot.reward_chain.infused_challenge_chain_sub_slot_hash is not None:
return None, ValidationError(Err.INVALID_ICC_HASH_RC)
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
assert ses_hash is None # Only one of the slots can have it
ses_hash = sub_slot.challenge_chain.subepoch_summary_hash
# 2l. check sub-epoch summary hash is None for empty slots
if finished_sub_slot_n != 0:
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
return None, ValidationError(Err.INVALID_SUB_EPOCH_SUMMARY_HASH)
if can_finish_epoch and sub_slot.challenge_chain.subepoch_summary_hash is not None:
# 2m. Check new difficulty and ssi
if sub_slot.challenge_chain.new_sub_slot_iters != expected_sub_slot_iters:
return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)
if sub_slot.challenge_chain.new_difficulty != expected_difficulty:
return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
else:
# 2n. Check new difficulty and ssi are None if we don't finish epoch
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
return None, ValidationError(Err.INVALID_NEW_SUB_SLOT_ITERS)
if sub_slot.challenge_chain.new_difficulty is not None:
return None, ValidationError(Err.INVALID_NEW_DIFFICULTY)
# 2o. Check challenge sub-slot hash in reward sub-slot
if sub_slot.challenge_chain.get_hash() != sub_slot.reward_chain.challenge_chain_sub_slot_hash:
return (
None,
ValidationError(
Err.INVALID_CHALLENGE_SLOT_HASH_RC,
"sub-slot hash in reward sub-slot mismatch",
),
)
eos_vdf_iters: uint64 = expected_sub_slot_iters
cc_start_element: ClassgroupElement = ClassgroupElement.get_default_element()
cc_eos_vdf_challenge: bytes32 = challenge_hash
if genesis_block:
if finished_sub_slot_n == 0:
# First block, one empty slot. prior_point is the initial challenge
rc_eos_vdf_challenge: bytes32 = constants.GENESIS_CHALLENGE
cc_eos_vdf_challenge = constants.GENESIS_CHALLENGE
else:
# First block, but have at least two empty slots
rc_eos_vdf_challenge = header_block.finished_sub_slots[
finished_sub_slot_n - 1
].reward_chain.get_hash()
else:
assert prev_b is not None
if finished_sub_slot_n == 0:
# No empty slots, so the starting point of VDF is the last reward block. Uses
# the same IPS as the previous block, since it's the same slot
rc_eos_vdf_challenge = prev_b.reward_infusion_new_challenge
eos_vdf_iters = uint64(prev_b.sub_slot_iters - prev_b.ip_iters(constants))
cc_start_element = prev_b.challenge_vdf_output
else:
# At least one empty slot, so use previous slot hash. IPS might change because it's a new slot
rc_eos_vdf_challenge = header_block.finished_sub_slots[
finished_sub_slot_n - 1
].reward_chain.get_hash()
# 2p. Check end of reward slot VDF
target_vdf_info = VDFInfo(
rc_eos_vdf_challenge,
eos_vdf_iters,
sub_slot.reward_chain.end_of_slot_vdf.output,
)
if not skip_vdf_is_valid and not sub_slot.proofs.reward_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.reward_chain.end_of_slot_vdf,
target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_EOS_VDF)
# 2q. Check challenge chain sub-slot VDF
partial_cc_vdf_info = VDFInfo(
cc_eos_vdf_challenge,
eos_vdf_iters,
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
if genesis_block:
cc_eos_vdf_info_iters = constants.SUB_SLOT_ITERS_STARTING
else:
assert prev_b is not None
if finished_sub_slot_n == 0:
cc_eos_vdf_info_iters = prev_b.sub_slot_iters
else:
cc_eos_vdf_info_iters = expected_sub_slot_iters
# Check that the modified data is correct
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_cc_vdf_info,
number_of_iterations=cc_eos_vdf_info_iters,
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF, "wrong challenge chain end of slot vdf")
if not skip_vdf_is_valid:
# Pass in None for target info since we are only checking the proof from the temporary point,
# but the challenge_chain_end_of_slot_vdf actually starts from the start of slot (for light clients)
if (
not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
constants, cc_start_element, partial_cc_vdf_info, None
)
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
if (
sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
and not sub_slot.proofs.challenge_chain_slot_proof.is_valid(
constants,
ClassgroupElement.get_default_element(),
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
)
):
return None, ValidationError(Err.INVALID_CC_EOS_VDF)
if genesis_block:
# 2r. Check deficit (MIN_SUB.. deficit edge case for genesis block)
if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
return (
None,
ValidationError(
Err.INVALID_DEFICIT,
f"genesis, expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}",
),
)
else:
assert prev_b is not None
if prev_b.deficit == 0:
# 2s. If prev sb had deficit 0, resets deficit to MIN_BLOCK_PER_CHALLENGE_BLOCK
if sub_slot.reward_chain.deficit != constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
log.error(
constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK,
)
return (
None,
ValidationError(
Err.INVALID_DEFICIT,
f"expected deficit {constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK}, saw "
f"{sub_slot.reward_chain.deficit}",
),
)
else:
# 2t. Otherwise, deficit stays the same at the slot ends, cannot reset until 0
if sub_slot.reward_chain.deficit != prev_b.deficit:
return None, ValidationError(Err.INVALID_DEFICIT, "deficit is wrong at slot end")
# 3. Check sub-epoch summary
# Note that the subepoch summary is the summary of the previous subepoch (not the one that just finished)
if not skip_overflow_last_ss_validation:
if ses_hash is not None:
# 3a. Check that genesis block does not have sub-epoch summary
if genesis_block:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
"genesis with sub-epoch-summary hash",
),
)
assert prev_b is not None
# 3b. Check that we finished a slot and we finished a sub-epoch
if not new_sub_slot or not can_finish_se:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY_HASH,
f"new sub-slot: {new_sub_slot} finishes sub-epoch {can_finish_se}",
),
)
# 3c. Check the actual sub-epoch is correct
if check_sub_epoch_summary:
expected_sub_epoch_summary = make_sub_epoch_summary(
constants,
blocks,
height,
blocks.block_record(prev_b.prev_hash),
expected_difficulty if can_finish_epoch else None,
expected_sub_slot_iters if can_finish_epoch else None,
)
expected_hash = expected_sub_epoch_summary.get_hash()
if expected_hash != ses_hash:
log.error(f"{expected_sub_epoch_summary}")
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY,
f"expected ses hash: {expected_hash} got {ses_hash} ",
),
)
elif new_sub_slot and not genesis_block:
# 3d. Check that we don't have to include a sub-epoch summary
if can_finish_se or can_finish_epoch:
return (
None,
ValidationError(
Err.INVALID_SUB_EPOCH_SUMMARY,
"block finishes sub-epoch but ses-hash is None",
),
)
# 4. Check if the number of blocks is less than the max
if not new_sub_slot and not genesis_block:
assert prev_b is not None
num_blocks = 2 # This includes the current block and the prev block
curr = prev_b
while not curr.first_in_sub_slot:
num_blocks += 1
curr = blocks.block_record(curr.prev_hash)
if num_blocks > constants.MAX_SUB_SLOT_BLOCKS:
return None, ValidationError(Err.TOO_MANY_BLOCKS)
# If block state is correct, we should always find a challenge here
# This computes what the challenge should be for this block
challenge = get_block_challenge(
constants,
header_block,
blocks,
genesis_block,
overflow,
skip_overflow_last_ss_validation,
)
# 5a. Check proof of space
if challenge != header_block.reward_chain_block.pos_ss_cc_challenge_hash:
log.error(f"Finished slots: {header_block.finished_sub_slots}")
log.error(
f"Data: {genesis_block} {overflow} {skip_overflow_last_ss_validation} {header_block.total_iters} "
f"{header_block.reward_chain_block.signage_point_index}"
f"Prev: {prev_b}"
)
log.error(f"Challenge {challenge} provided {header_block.reward_chain_block.pos_ss_cc_challenge_hash}")
return None, ValidationError(Err.INVALID_CC_CHALLENGE)
# 5b. Check proof of space
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
# Edge case of first sp (start of slot), where sp_iters == 0
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = header_block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = header_block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
if q_str is None:
return None, ValidationError(Err.INVALID_POSPACE)
# 6. check signage point index
# no need to check negative values as this is uint 8
if header_block.reward_chain_block.signage_point_index >= constants.NUM_SPS_SUB_SLOT:
return None, ValidationError(Err.INVALID_SP_INDEX)
# Note that required iters might be from the previous slot (if we are in an overflow block)
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
header_block.reward_chain_block.proof_of_space.size,
expected_difficulty,
cc_sp_hash,
)
# 7. check required iters
if required_iters >= calculate_sp_interval_iters(constants, expected_sub_slot_iters):
return None, ValidationError(Err.INVALID_REQUIRED_ITERS)
# 8a. check signage point index 0 has no cc sp
if (header_block.reward_chain_block.signage_point_index == 0) != (
header_block.reward_chain_block.challenge_chain_sp_vdf is None
):
return None, ValidationError(Err.INVALID_SP_INDEX)
# 8b. check signage point index 0 has no rc sp
if (header_block.reward_chain_block.signage_point_index == 0) != (
header_block.reward_chain_block.reward_chain_sp_vdf is None
):
return None, ValidationError(Err.INVALID_SP_INDEX)
sp_iters: uint64 = calculate_sp_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
)
ip_iters: uint64 = calculate_ip_iters(
constants,
expected_sub_slot_iters,
header_block.reward_chain_block.signage_point_index,
required_iters,
)
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
# Blocks with very low required iters are not overflow blocks
assert not overflow
# 9. Check no overflows in the first sub-slot of a new epoch
# (although they are OK in the second sub-slot), this is important
if overflow and can_finish_epoch:
if finished_sub_slots_since_prev < 2:
return None, ValidationError(Err.NO_OVERFLOWS_IN_FIRST_SUB_SLOT_NEW_EPOCH)
# 10. Check total iters
if genesis_block:
total_iters: uint128 = uint128(expected_sub_slot_iters * finished_sub_slots_since_prev)
else:
assert prev_b is not None
if new_sub_slot:
total_iters = prev_b.total_iters
# Add the rest of the slot of prev_b
total_iters = uint128(total_iters + prev_b.sub_slot_iters - prev_b.ip_iters(constants))
# Add other empty slots
total_iters = uint128(total_iters + (expected_sub_slot_iters * (finished_sub_slots_since_prev - 1)))
else:
# Slot iters is guaranteed to be the same for header_block and prev_b
# This takes the beginning of the slot, and adds ip_iters
total_iters = uint128(prev_b.total_iters - prev_b.ip_iters(constants))
total_iters = uint128(total_iters + ip_iters)
if total_iters != header_block.reward_chain_block.total_iters:
return (
None,
ValidationError(
Err.INVALID_TOTAL_ITERS,
f"expected {total_iters} got {header_block.reward_chain_block.total_iters}",
),
)
sp_total_iters: uint128 = uint128(total_iters - ip_iters + sp_iters - (expected_sub_slot_iters if overflow else 0))
if overflow and skip_overflow_last_ss_validation:
dummy_vdf_info = VDFInfo(
bytes32([0] * 32),
uint64(1),
ClassgroupElement.get_default_element(),
)
dummy_sub_slot = EndOfSubSlotBundle(
ChallengeChainSubSlot(dummy_vdf_info, None, None, None, None),
None,
RewardChainSubSlot(dummy_vdf_info, bytes32([0] * 32), None, uint8(0)),
SubSlotProofs(VDFProof(uint8(0), b"", False), None, VDFProof(uint8(0), b"", False)),
)
sub_slots_to_pass_in = header_block.finished_sub_slots + [dummy_sub_slot]
else:
sub_slots_to_pass_in = header_block.finished_sub_slots
(
cc_vdf_challenge,
rc_vdf_challenge,
cc_vdf_input,
rc_vdf_input,
cc_vdf_iters,
rc_vdf_iters,
) = get_signage_point_vdf_info(
constants,
sub_slots_to_pass_in,
overflow,
prev_b,
blocks,
sp_total_iters,
sp_iters,
)
# 11. Check reward chain sp proof
if sp_iters != 0:
assert (
header_block.reward_chain_block.reward_chain_sp_vdf is not None
and header_block.reward_chain_sp_proof is not None
)
target_vdf_info = VDFInfo(
rc_vdf_challenge,
rc_vdf_iters,
header_block.reward_chain_block.reward_chain_sp_vdf.output,
)
if not skip_vdf_is_valid and not header_block.reward_chain_sp_proof.is_valid(
constants,
rc_vdf_input,
header_block.reward_chain_block.reward_chain_sp_vdf,
target_vdf_info,
):
return None, ValidationError(Err.INVALID_RC_SP_VDF)
rc_sp_hash = header_block.reward_chain_block.reward_chain_sp_vdf.output.get_hash()
else:
# Edge case of first sp (start of slot), where sp_iters == 0
assert overflow is not None
if header_block.reward_chain_block.reward_chain_sp_vdf is not None:
return None, ValidationError(Err.INVALID_RC_SP_VDF)
if new_sub_slot:
rc_sp_hash = header_block.finished_sub_slots[-1].reward_chain.get_hash()
else:
if genesis_block:
rc_sp_hash = constants.GENESIS_CHALLENGE
else:
assert prev_b is not None
curr = prev_b
while not curr.first_in_sub_slot:
curr = blocks.block_record(curr.prev_hash)
assert curr.finished_reward_slot_hashes is not None
rc_sp_hash = curr.finished_reward_slot_hashes[-1]
# 12. Check reward chain sp signature
if not AugSchemeMPL.verify(
header_block.reward_chain_block.proof_of_space.plot_public_key,
rc_sp_hash,
header_block.reward_chain_block.reward_chain_sp_signature,
):
return None, ValidationError(Err.INVALID_RC_SIGNATURE)
# 13. Check cc sp vdf
if sp_iters != 0:
assert header_block.reward_chain_block.challenge_chain_sp_vdf is not None
assert header_block.challenge_chain_sp_proof is not None
target_vdf_info = VDFInfo(
cc_vdf_challenge,
cc_vdf_iters,
header_block.reward_chain_block.challenge_chain_sp_vdf.output,
)
if header_block.reward_chain_block.challenge_chain_sp_vdf != dataclasses.replace(
target_vdf_info,
number_of_iterations=sp_iters,
):
return None, ValidationError(Err.INVALID_CC_SP_VDF)
if not skip_vdf_is_valid:
if (
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | true |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/__init__.py | flax/consensus/__init__.py | python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false | |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/condition_costs.py | flax/consensus/condition_costs.py | from __future__ import annotations
from enum import Enum
class ConditionCost(Enum):
# Condition Costs
AGG_SIG = 1200000 # the cost of one G1 subgroup check + aggregated signature validation
CREATE_COIN = 1800000
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/pos_quality.py | flax/consensus/pos_quality.py | from __future__ import annotations
from flax.util.ints import uint64
# The actual space in bytes of a plot, is _expected_plot_size(k) * UI_ACTUAL_SPACE_CONSTANT_FACTO
# This is not used in consensus, only for display purposes
UI_ACTUAL_SPACE_CONSTANT_FACTOR = 0.762
def _expected_plot_size(k: int) -> uint64:
"""
Given the plot size parameter k (which is between 32 and 59), computes the
expected size of the plot in bytes (times a constant factor). This is based on efficient encoding
of the plot, and aims to be scale agnostic, so larger plots don't
necessarily get more rewards per byte. The +1 is added to give half a bit more space per entry, which
is necessary to store the entries in the plot.
"""
return uint64(((2 * k) + 1) * (2 ** (k - 1)))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/coinbase.py | flax/consensus/coinbase.py | from __future__ import annotations
from blspy import G1Element
from flax.types.blockchain_format.coin import Coin
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.ints import uint32, uint64
from flax.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_hash_for_pk
def create_puzzlehash_for_pk(pub_key: G1Element) -> bytes32:
return puzzle_hash_for_pk(pub_key)
def pool_parent_id(block_height: uint32, genesis_challenge: bytes32) -> bytes32:
return bytes32(genesis_challenge[:16] + block_height.to_bytes(16, "big"))
def farmer_parent_id(block_height: uint32, genesis_challenge: bytes32) -> bytes32:
return bytes32(genesis_challenge[16:] + block_height.to_bytes(16, "big"))
def create_pool_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32) -> Coin:
parent_id = pool_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
def create_farmer_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32) -> Coin:
parent_id = farmer_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/block_root_validation.py | flax/consensus/block_root_validation.py | from __future__ import annotations
from typing import Dict, List, Optional, Tuple
from chia_rs import compute_merkle_set_root
from flax.types.blockchain_format.coin import Coin, hash_coin_ids
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.util.errors import Err
def validate_block_merkle_roots(
block_additions_root: bytes32,
block_removals_root: bytes32,
tx_additions: Optional[List[Tuple[Coin, bytes32]]] = None,
tx_removals: Optional[List[bytes32]] = None,
) -> Optional[Err]:
if tx_removals is None:
tx_removals = []
if tx_additions is None:
tx_additions = []
# Create addition Merkle set
puzzlehash_coins_map: Dict[bytes32, List[bytes32]] = {}
for coin, coin_name in tx_additions:
if coin.puzzle_hash in puzzlehash_coins_map:
puzzlehash_coins_map[coin.puzzle_hash].append(coin_name)
else:
puzzlehash_coins_map[coin.puzzle_hash] = [coin_name]
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
additions_merkle_items: List[bytes32] = []
for puzzle, coin_ids in puzzlehash_coins_map.items():
additions_merkle_items.append(puzzle)
additions_merkle_items.append(hash_coin_ids(coin_ids))
additions_root = bytes32(compute_merkle_set_root(additions_merkle_items))
removals_root = bytes32(compute_merkle_set_root(tx_removals))
if block_additions_root != additions_root:
return Err.BAD_ADDITION_ROOT
if block_removals_root != removals_root:
return Err.BAD_REMOVAL_ROOT
return None
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/consensus/full_block_to_block_record.py | flax/consensus/full_block_to_block_record.py | from __future__ import annotations
from typing import List, Optional, Union
from flax.consensus.block_record import BlockRecord
from flax.consensus.blockchain_interface import BlockchainInterface
from flax.consensus.constants import ConsensusConstants
from flax.consensus.deficit import calculate_deficit
from flax.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from flax.consensus.make_sub_epoch_summary import make_sub_epoch_summary
from flax.consensus.pot_iterations import is_overflow_block
from flax.types.blockchain_format.classgroup import ClassgroupElement
from flax.types.blockchain_format.sized_bytes import bytes32
from flax.types.blockchain_format.slots import ChallengeBlockInfo
from flax.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from flax.types.full_block import FullBlock
from flax.types.header_block import HeaderBlock
from flax.util.errors import Err
from flax.util.ints import uint8, uint32, uint64
def block_to_block_record(
constants: ConsensusConstants,
blocks: BlockchainInterface,
required_iters: uint64,
full_block: Optional[Union[FullBlock, HeaderBlock]],
header_block: Optional[HeaderBlock],
sub_slot_iters: Optional[uint64] = None,
) -> BlockRecord:
if full_block is None:
assert header_block is not None
block: Union[HeaderBlock, FullBlock] = header_block
else:
block = full_block
prev_b = blocks.try_block_record(block.prev_header_hash)
if block.height > 0:
assert prev_b is not None
if sub_slot_iters is None:
sub_slot_iters, _ = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, blocks
)
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
deficit = calculate_deficit(
constants,
block.height,
prev_b,
overflow,
len(block.finished_sub_slots),
)
found_ses_hash: Optional[bytes32] = None
ses: Optional[SubEpochSummary] = None
if len(block.finished_sub_slots) > 0:
for sub_slot in block.finished_sub_slots:
if sub_slot.challenge_chain.subepoch_summary_hash is not None:
found_ses_hash = sub_slot.challenge_chain.subepoch_summary_hash
if found_ses_hash:
assert prev_b is not None
assert len(block.finished_sub_slots) > 0
ses = make_sub_epoch_summary(
constants,
blocks,
block.height,
blocks.block_record(prev_b.prev_hash),
block.finished_sub_slots[0].challenge_chain.new_difficulty,
block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters,
)
if ses.get_hash() != found_ses_hash:
raise ValueError(Err.INVALID_SUB_EPOCH_SUMMARY)
prev_transaction_block_height = uint32(0)
curr: Optional[BlockRecord] = blocks.try_block_record(block.prev_header_hash)
while curr is not None and not curr.is_transaction_block:
curr = blocks.try_block_record(curr.prev_hash)
if curr is not None and curr.is_transaction_block:
prev_transaction_block_height = curr.height
return header_block_to_sub_block_record(
constants,
required_iters,
block,
sub_slot_iters,
overflow,
deficit,
prev_transaction_block_height,
ses,
)
def header_block_to_sub_block_record(
constants: ConsensusConstants,
required_iters: uint64,
block: Union[FullBlock, HeaderBlock],
sub_slot_iters: uint64,
overflow: bool,
deficit: uint8,
prev_transaction_block_height: uint32,
ses: Optional[SubEpochSummary],
) -> BlockRecord:
reward_claims_incorporated = (
block.transactions_info.reward_claims_incorporated if block.transactions_info is not None else None
)
cbi = ChallengeBlockInfo(
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
block.reward_chain_block.challenge_chain_ip_vdf,
)
if block.reward_chain_block.infused_challenge_chain_ip_vdf is not None:
icc_output: Optional[ClassgroupElement] = block.reward_chain_block.infused_challenge_chain_ip_vdf.output
else:
icc_output = None
if len(block.finished_sub_slots) > 0:
finished_challenge_slot_hashes: Optional[List[bytes32]] = [
sub_slot.challenge_chain.get_hash() for sub_slot in block.finished_sub_slots
]
finished_reward_slot_hashes: Optional[List[bytes32]] = [
sub_slot.reward_chain.get_hash() for sub_slot in block.finished_sub_slots
]
finished_infused_challenge_slot_hashes: Optional[List[bytes32]] = [
sub_slot.infused_challenge_chain.get_hash()
for sub_slot in block.finished_sub_slots
if sub_slot.infused_challenge_chain is not None
]
elif block.height == 0:
finished_challenge_slot_hashes = [constants.GENESIS_CHALLENGE]
finished_reward_slot_hashes = [constants.GENESIS_CHALLENGE]
finished_infused_challenge_slot_hashes = None
else:
finished_challenge_slot_hashes = None
finished_reward_slot_hashes = None
finished_infused_challenge_slot_hashes = None
prev_transaction_block_hash = (
block.foliage_transaction_block.prev_transaction_block_hash
if block.foliage_transaction_block is not None
else None
)
timestamp = block.foliage_transaction_block.timestamp if block.foliage_transaction_block is not None else None
fees = block.transactions_info.fees if block.transactions_info is not None else None
return BlockRecord(
block.header_hash,
block.prev_header_hash,
block.height,
block.weight,
block.total_iters,
block.reward_chain_block.signage_point_index,
block.reward_chain_block.challenge_chain_ip_vdf.output,
icc_output,
block.reward_chain_block.get_hash(),
cbi.get_hash(),
sub_slot_iters,
block.foliage.foliage_block_data.pool_target.puzzle_hash,
block.foliage.foliage_block_data.farmer_reward_puzzle_hash,
required_iters,
deficit,
overflow,
prev_transaction_block_height,
timestamp,
prev_transaction_block_hash,
fees,
reward_claims_incorporated,
finished_challenge_slot_hashes,
finished_infused_challenge_slot_hashes,
finished_reward_slot_hashes,
ses,
)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/address_manager_store.py | flax/server/address_manager_store.py | import aiofiles
import asyncio
import logging
from flax.server.address_manager import (
BUCKET_SIZE,
NEW_BUCKET_COUNT,
NEW_BUCKETS_PER_ADDRESS,
AddressManager,
ExtendedPeerInfo,
)
from flax.util.files import write_file_async
from flax.util.ints import uint64
from flax.util.streamable import streamable, Streamable
from dataclasses import dataclass
from pathlib import Path
from timeit import default_timer as timer
from typing import Any, Dict, List, Optional, Tuple
log = logging.getLogger(__name__)
@streamable
@dataclass(frozen=True)
class PeerDataSerialization(Streamable):
"""
Serializable property bag for the peer data that was previously stored in sqlite.
"""
metadata: List[Tuple[str, str]]
nodes: List[Tuple[uint64, str]]
new_table: List[Tuple[uint64, uint64]]
async def makePeerDataSerialization(
metadata: List[Tuple[str, Any]], nodes: List[Tuple[int, ExtendedPeerInfo]], new_table: List[Tuple[int, int]]
) -> bytes:
"""
Create a PeerDataSerialization, adapting the provided collections
"""
transformed_nodes: List[Tuple[uint64, str]] = []
transformed_new_table: List[Tuple[uint64, uint64]] = []
for index, [node_id, peer_info] in enumerate(nodes):
transformed_nodes.append((uint64(node_id), peer_info.to_string()))
# Come up to breathe for a moment
if index % 1000 == 0:
await asyncio.sleep(0)
for index, [node_id, bucket_id] in enumerate(new_table):
transformed_new_table.append((uint64(node_id), uint64(bucket_id)))
# Come up to breathe for a moment
if index % 1000 == 0:
await asyncio.sleep(0)
serialized_bytes: bytes = bytes(PeerDataSerialization(metadata, transformed_nodes, transformed_new_table))
return serialized_bytes
class AddressManagerStore:
"""
Metadata table:
- private key
- new table count
- tried table count
Nodes table:
* Maps entries from new/tried table to unique node ids.
- node_id
- IP, port, together with the IP, port of the source peer.
New table:
* Stores node_id, bucket for each occurrence in the new table of an entry.
* Once we know the buckets, we can also deduce the bucket positions.
Every other information, such as tried_matrix, map_addr, map_info, random_pos,
be deduced and it is not explicitly stored, instead it is recalculated.
"""
@classmethod
async def create_address_manager(cls, peers_file_path: Path) -> AddressManager:
"""
Create an address manager using data deserialized from a peers file.
"""
address_manager: Optional[AddressManager] = None
if peers_file_path.exists():
try:
log.info(f"Loading peers from {peers_file_path}")
address_manager = await cls._deserialize(peers_file_path)
except Exception:
log.exception(f"Unable to create address_manager from {peers_file_path}")
if address_manager is None:
log.info("Creating new address_manager")
address_manager = AddressManager()
return address_manager
@classmethod
async def serialize(cls, address_manager: AddressManager, peers_file_path: Path) -> None:
"""
Serialize the address manager's peer data to a file.
"""
metadata: List[Tuple[str, str]] = []
nodes: List[Tuple[int, ExtendedPeerInfo]] = []
new_table_entries: List[Tuple[int, int]] = []
unique_ids: Dict[int, int] = {}
count_ids: int = 0
log.info("Serializing peer data")
metadata.append(("key", str(address_manager.key)))
for node_id, info in address_manager.map_info.items():
unique_ids[node_id] = count_ids
if info.ref_count > 0:
assert count_ids != address_manager.new_count
nodes.append((count_ids, info))
count_ids += 1
metadata.append(("new_count", str(count_ids)))
tried_ids = 0
for node_id, info in address_manager.map_info.items():
if info.is_tried:
assert info is not None
assert tried_ids != address_manager.tried_count
nodes.append((count_ids, info))
count_ids += 1
tried_ids += 1
metadata.append(("tried_count", str(tried_ids)))
for bucket in range(NEW_BUCKET_COUNT):
for i in range(BUCKET_SIZE):
if address_manager.new_matrix[bucket][i] != -1:
index = unique_ids[address_manager.new_matrix[bucket][i]]
new_table_entries.append((index, bucket))
try:
# Ensure the parent directory exists
peers_file_path.parent.mkdir(parents=True, exist_ok=True)
start_time = timer()
await cls._write_peers(peers_file_path, metadata, nodes, new_table_entries)
log.debug(f"Serializing peer data took {timer() - start_time} seconds")
except Exception:
log.exception(f"Failed to write peer data to {peers_file_path}")
@classmethod
async def _deserialize(cls, peers_file_path: Path) -> AddressManager:
"""
Create an address manager using data deserialized from a peers file.
"""
peer_data: Optional[PeerDataSerialization] = None
address_manager = AddressManager()
start_time = timer()
try:
peer_data = await cls._read_peers(peers_file_path)
except Exception:
log.exception(f"Unable to deserialize peers from {peers_file_path}")
if peer_data is not None:
metadata: Dict[str, str] = {key: value for key, value in peer_data.metadata}
nodes: List[Tuple[int, ExtendedPeerInfo]] = [
(node_id, ExtendedPeerInfo.from_string(info_str)) for node_id, info_str in peer_data.nodes
]
new_table_entries: List[Tuple[int, int]] = [(node_id, bucket) for node_id, bucket in peer_data.new_table]
log.debug(f"Deserializing peer data took {timer() - start_time} seconds")
address_manager.key = int(metadata["key"])
address_manager.new_count = int(metadata["new_count"])
# address_manager.tried_count = int(metadata["tried_count"])
address_manager.tried_count = 0
new_table_nodes = [(node_id, info) for node_id, info in nodes if node_id < address_manager.new_count]
for n, info in new_table_nodes:
address_manager.map_addr[info.peer_info.host] = n
address_manager.map_info[n] = info
info.random_pos = len(address_manager.random_pos)
address_manager.random_pos.append(n)
address_manager.id_count = len(new_table_nodes)
tried_table_nodes = [(node_id, info) for node_id, info in nodes if node_id >= address_manager.new_count]
# lost_count = 0
for node_id, info in tried_table_nodes:
tried_bucket = info.get_tried_bucket(address_manager.key)
tried_bucket_pos = info.get_bucket_position(address_manager.key, False, tried_bucket)
if address_manager.tried_matrix[tried_bucket][tried_bucket_pos] == -1:
info.random_pos = len(address_manager.random_pos)
info.is_tried = True
id_count = address_manager.id_count
address_manager.random_pos.append(id_count)
address_manager.map_info[id_count] = info
address_manager.map_addr[info.peer_info.host] = id_count
address_manager.tried_matrix[tried_bucket][tried_bucket_pos] = id_count
address_manager.id_count += 1
address_manager.tried_count += 1
# else:
# lost_count += 1
# address_manager.tried_count -= lost_count
for node_id, bucket in new_table_entries:
if node_id >= 0 and node_id < address_manager.new_count:
info = address_manager.map_info[node_id]
bucket_pos = info.get_bucket_position(address_manager.key, True, bucket)
if (
address_manager.new_matrix[bucket][bucket_pos] == -1
and info.ref_count < NEW_BUCKETS_PER_ADDRESS
):
info.ref_count += 1
address_manager.new_matrix[bucket][bucket_pos] = node_id
for node_id, info in list(address_manager.map_info.items()):
if not info.is_tried and info.ref_count == 0:
address_manager.delete_new_entry_(node_id)
address_manager.load_used_table_positions()
return address_manager
@classmethod
async def _read_peers(cls, peers_file_path: Path) -> PeerDataSerialization:
"""
Read the peers file and return the data as a PeerDataSerialization object.
"""
async with aiofiles.open(peers_file_path, "rb") as f:
return PeerDataSerialization.from_bytes(await f.read())
@classmethod
async def _write_peers(
cls,
peers_file_path: Path,
metadata: List[Tuple[str, Any]],
nodes: List[Tuple[int, ExtendedPeerInfo]],
new_table: List[Tuple[int, int]],
) -> None:
"""
Serializes the given peer data and writes it to the peers file.
"""
serialized_bytes: bytes = await makePeerDataSerialization(metadata, nodes, new_table)
await write_file_async(peers_file_path, serialized_bytes, file_mode=0o644)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/start_introducer.py | flax/server/start_introducer.py | from __future__ import annotations
import pathlib
import sys
from typing import Dict, Optional
from flax.introducer.introducer import Introducer
from flax.introducer.introducer_api import IntroducerAPI
from flax.server.outbound_message import NodeType
from flax.server.start_service import Service, async_run
from flax.util.flax_logging import initialize_service_logging
from flax.util.config import load_config, load_config_cli
from flax.util.default_root import DEFAULT_ROOT_PATH
# See: https://bugs.python.org/issue29288
"".encode("idna")
SERVICE_NAME = "introducer"
def create_introducer_service(
root_path: pathlib.Path,
config: Dict,
advertised_port: Optional[int] = None,
connect_to_daemon: bool = True,
) -> Service[Introducer]:
service_config = config[SERVICE_NAME]
if advertised_port is None:
advertised_port = service_config["port"]
introducer = Introducer(service_config["max_peers_to_send"], service_config["recent_peer_threshold"])
node__api = IntroducerAPI(introducer)
network_id = service_config["selected_network"]
return Service(
root_path=root_path,
config=config,
node=introducer,
peer_api=node__api,
node_type=NodeType.INTRODUCER,
service_name=SERVICE_NAME,
server_listen_ports=[service_config["port"]],
network_id=network_id,
advertised_port=advertised_port,
connect_to_daemon=connect_to_daemon,
)
async def async_main() -> int:
# TODO: refactor to avoid the double load
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
service_config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
config[SERVICE_NAME] = service_config
service = create_introducer_service(DEFAULT_ROOT_PATH, config)
initialize_service_logging(service_name=SERVICE_NAME, config=config)
await service.setup_process_global_state()
await service.run()
return 0
def main() -> int:
return async_run(async_main())
if __name__ == "__main__":
sys.exit(main())
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/outbound_message.py | flax/server/outbound_message.py | from __future__ import annotations
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, SupportsBytes, Union
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.util.ints import uint8, uint16
from flax.util.streamable import Streamable, streamable
class NodeType(IntEnum):
FULL_NODE = 1
HARVESTER = 2
FARMER = 3
TIMELORD = 4
INTRODUCER = 5
WALLET = 6
DATA_LAYER = 7
class Delivery(IntEnum):
# A message is sent to the same peer that we received a message from
RESPOND = 1
# A message is sent to all peers
BROADCAST = 2
# A message is sent to all peers except the one from which we received the API call
BROADCAST_TO_OTHERS = 3
# A message is sent to a random peer
RANDOM = 4
# Pseudo-message to close the current connection
CLOSE = 5
# A message is sent to a specific peer
SPECIFIC = 6
@streamable
@dataclass(frozen=True)
class Message(Streamable):
type: uint8 # one of ProtocolMessageTypes
# message id
id: Optional[uint16]
# Message data for that type
data: bytes
def make_msg(msg_type: ProtocolMessageTypes, data: Union[bytes, SupportsBytes]) -> Message:
return Message(uint8(msg_type.value), None, bytes(data))
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/start_data_layer.py | flax/server/start_data_layer.py | from __future__ import annotations
import logging
import pathlib
import sys
from typing import Any, Dict, Optional, cast
from flax.cmds.init_funcs import create_all_ssl
from flax.data_layer.data_layer import DataLayer
from flax.data_layer.data_layer_api import DataLayerAPI
from flax.rpc.data_layer_rpc_api import DataLayerRpcApi
from flax.rpc.wallet_rpc_client import WalletRpcClient
from flax.server.outbound_message import NodeType
from flax.server.start_service import RpcInfo, Service, async_run
from flax.server.start_wallet import WalletNode
from flax.util.flax_logging import initialize_logging
from flax.util.config import load_config, load_config_cli
from flax.util.default_root import DEFAULT_ROOT_PATH
from flax.util.ints import uint16
# See: https://bugs.python.org/issue29288
"".encode("idna")
SERVICE_NAME = "data_layer"
log = logging.getLogger(__name__)
# TODO: Review need for config and if retained then hint it properly.
def create_data_layer_service(
root_path: pathlib.Path,
config: Dict[str, Any],
wallet_service: Optional[Service[WalletNode]] = None,
connect_to_daemon: bool = True,
) -> Service[DataLayer]:
service_config = config[SERVICE_NAME]
self_hostname = config["self_hostname"]
wallet_rpc_port = service_config["wallet_peer"]["port"]
if wallet_service is None:
wallet_root_path = root_path
wallet_config = config
else:
wallet_root_path = wallet_service.root_path
wallet_config = wallet_service.config
wallet_rpc_init = WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), wallet_root_path, wallet_config)
data_layer = DataLayer(config=service_config, root_path=root_path, wallet_rpc_init=wallet_rpc_init)
api = DataLayerAPI(data_layer)
network_id = service_config["selected_network"]
rpc_port = service_config.get("rpc_port")
rpc_info: Optional[RpcInfo] = None
if rpc_port is not None:
rpc_info = (DataLayerRpcApi, cast(int, service_config["rpc_port"]))
return Service(
server_listen_ports=[service_config["port"]],
root_path=root_path,
config=config,
node=data_layer,
# TODO: not for peers...
peer_api=api,
node_type=NodeType.DATA_LAYER,
# TODO: no publicly advertised port, at least not yet
advertised_port=service_config["port"],
service_name=SERVICE_NAME,
network_id=network_id,
max_request_body_size=service_config.get("rpc_server_max_request_body_size", 26214400),
rpc_info=rpc_info,
connect_to_daemon=connect_to_daemon,
)
async def async_main() -> int:
# TODO: refactor to avoid the double load
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", fill_missing_services=True)
service_config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME, fill_missing_services=True)
config[SERVICE_NAME] = service_config
initialize_logging(
service_name=SERVICE_NAME,
logging_config=service_config["logging"],
root_path=DEFAULT_ROOT_PATH,
)
create_all_ssl(
root_path=DEFAULT_ROOT_PATH,
private_node_names=["data_layer"],
public_node_names=["data_layer"],
overwrite=False,
)
service = create_data_layer_service(DEFAULT_ROOT_PATH, config)
await service.setup_process_global_state()
await service.run()
return 0
def main() -> int:
return async_run(async_main())
if __name__ == "__main__":
sys.exit(main())
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/rate_limits.py | flax/server/rate_limits.py | from __future__ import annotations
import dataclasses
import logging
import time
from collections import Counter
from typing import Dict, List
from flax.protocols.protocol_message_types import ProtocolMessageTypes
from flax.protocols.shared_protocol import Capability
from flax.server.outbound_message import Message
from flax.server.rate_limit_numbers import RLSettings, get_rate_limits_to_use
log = logging.getLogger(__name__)
# TODO: only full node disconnects based on rate limits
class RateLimiter:
incoming: bool
reset_seconds: int
current_minute: int
message_counts: Counter
message_cumulative_sizes: Counter
percentage_of_limit: int
non_tx_message_counts: int = 0
non_tx_cumulative_size: int = 0
def __init__(self, incoming: bool, reset_seconds=60, percentage_of_limit=100):
"""
The incoming parameter affects whether counters are incremented
unconditionally or not. For incoming messages, the counters are always
incremented. For outgoing messages, the counters are only incremented
if they are allowed to be sent by the rate limiter, since we won't send
the messages otherwise.
"""
self.incoming = incoming
self.reset_seconds = reset_seconds
self.current_minute = int(time.time() // reset_seconds)
self.message_counts = Counter()
self.message_cumulative_sizes = Counter()
self.percentage_of_limit = percentage_of_limit
self.non_tx_message_counts = 0
self.non_tx_cumulative_size = 0
def process_msg_and_check(
self, message: Message, our_capabilities: List[Capability], peer_capabilities: List[Capability]
) -> bool:
"""
Returns True if message can be processed successfully, false if a rate limit is passed.
"""
current_minute = int(time.time() // self.reset_seconds)
if current_minute != self.current_minute:
self.current_minute = current_minute
self.message_counts = Counter()
self.message_cumulative_sizes = Counter()
self.non_tx_message_counts = 0
self.non_tx_cumulative_size = 0
try:
message_type = ProtocolMessageTypes(message.type)
except Exception as e:
log.warning(f"Invalid message: {message.type}, {e}")
return True
new_message_counts: int = self.message_counts[message_type] + 1
new_cumulative_size: int = self.message_cumulative_sizes[message_type] + len(message.data)
new_non_tx_count: int = self.non_tx_message_counts
new_non_tx_size: int = self.non_tx_cumulative_size
proportion_of_limit: float = self.percentage_of_limit / 100
ret: bool = False
rate_limits: Dict = get_rate_limits_to_use(our_capabilities, peer_capabilities)
try:
limits: RLSettings = rate_limits["default_settings"]
if message_type in rate_limits["rate_limits_tx"]:
limits = rate_limits["rate_limits_tx"][message_type]
elif message_type in rate_limits["rate_limits_other"]:
limits = rate_limits["rate_limits_other"][message_type]
non_tx_freq = rate_limits["non_tx_freq"]
non_tx_max_total_size = rate_limits["non_tx_max_total_size"]
new_non_tx_count = self.non_tx_message_counts + 1
new_non_tx_size = self.non_tx_cumulative_size + len(message.data)
if new_non_tx_count > non_tx_freq * proportion_of_limit:
return False
if new_non_tx_size > non_tx_max_total_size * proportion_of_limit:
return False
else:
log.warning(f"Message type {message_type} not found in rate limits")
if limits.max_total_size is None:
limits = dataclasses.replace(limits, max_total_size=limits.frequency * limits.max_size)
assert limits.max_total_size is not None
if new_message_counts > limits.frequency * proportion_of_limit:
return False
if len(message.data) > limits.max_size:
return False
if new_cumulative_size > limits.max_total_size * proportion_of_limit:
return False
ret = True
return True
finally:
if self.incoming or ret:
# now that we determined that it's OK to send the message, commit the
# updates to the counters. Alternatively, if this was an
# incoming message, we already received it and it should
# increment the counters unconditionally
self.message_counts[message_type] = new_message_counts
self.message_cumulative_sizes[message_type] = new_cumulative_size
self.non_tx_message_counts = new_non_tx_count
self.non_tx_cumulative_size = new_non_tx_size
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/reconnect_task.py | flax/server/reconnect_task.py | import asyncio
from typing import Optional
from flax.server.server import FlaxServer
from flax.types.peer_info import PeerInfo
from flax.util.network import get_host_addr
def start_reconnect_task(server: FlaxServer, peer_info_arg: PeerInfo, log, prefer_ipv6: Optional[bool]):
"""
Start a background task that checks connection and reconnects periodically to a peer.
"""
# If peer_info_arg is already an address, use it, otherwise resolve it here.
if peer_info_arg.is_valid():
peer_info = peer_info_arg
else:
peer_info = PeerInfo(get_host_addr(peer_info_arg, prefer_ipv6), peer_info_arg.port)
async def connection_check():
while True:
peer_retry = True
for _, connection in server.all_connections.items():
if connection.get_peer_info() == peer_info or connection.get_peer_info() == peer_info_arg:
peer_retry = False
if peer_retry:
log.info(f"Reconnecting to peer {peer_info}")
try:
await server.start_client(peer_info, None)
except Exception as e:
log.info(f"Failed to connect to {peer_info} {e}")
await asyncio.sleep(3)
return asyncio.create_task(connection_check())
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Flax-Network/flax-blockchain | https://github.com/Flax-Network/flax-blockchain/blob/bb8715f3155bb8011a04cc8c05b3fa8133e4c64b/flax/server/start_service.py | flax/server/start_service.py | import asyncio
import functools
import os
import logging
import logging.config
import signal
import sys
from typing import Any, Callable, Coroutine, Dict, Generic, List, Optional, Tuple, Type, TypeVar
from flax.daemon.server import service_launch_lock_path
from flax.util.lock import Lockfile, LockfileError
from flax.server.ssl_context import flax_ssl_ca_paths, private_ssl_ca_paths
from ..protocols.shared_protocol import capabilities
try:
import uvloop
except ImportError:
uvloop = None
from flax.cmds.init_funcs import flax_full_version_str
from flax.rpc.rpc_server import RpcApiProtocol, RpcServiceProtocol, start_rpc_server, RpcServer
from flax.server.outbound_message import NodeType
from flax.server.server import FlaxServer
from flax.server.upnp import UPnP
from flax.types.peer_info import PeerInfo
from flax.util.setproctitle import setproctitle
from flax.util.ints import uint16
from .reconnect_task import start_reconnect_task
# this is used to detect whether we are running in the main process or not, in
# signal handlers. We need to ignore signals in the sub processes.
main_pid: Optional[int] = None
T = TypeVar("T")
_T_RpcServiceProtocol = TypeVar("_T_RpcServiceProtocol", bound=RpcServiceProtocol)
RpcInfo = Tuple[Type[RpcApiProtocol], int]
class ServiceException(Exception):
pass
class Service(Generic[_T_RpcServiceProtocol]):
def __init__(
self,
root_path,
node: _T_RpcServiceProtocol,
peer_api: Any,
node_type: NodeType,
advertised_port: int,
service_name: str,
network_id: str,
*,
config: Dict[str, Any],
upnp_ports: List[int] = [],
server_listen_ports: List[int] = [],
connect_peers: List[PeerInfo] = [],
on_connect_callback: Optional[Callable] = None,
rpc_info: Optional[RpcInfo] = None,
connect_to_daemon=True,
max_request_body_size: Optional[int] = None,
override_capabilities: Optional[List[Tuple[uint16, str]]] = None,
) -> None:
self.root_path = root_path
self.config = config
ping_interval = self.config.get("ping_interval")
self.self_hostname = self.config.get("self_hostname")
self.daemon_port = self.config.get("daemon_port")
assert ping_interval is not None
self._connect_to_daemon = connect_to_daemon
self._node_type = node_type
self._service_name = service_name
self.rpc_server: Optional[RpcServer] = None
self._rpc_close_task: Optional[asyncio.Task] = None
self._network_id: str = network_id
self.max_request_body_size = max_request_body_size
self._log = logging.getLogger(service_name)
self._log.info(f"flax-blockchain version: {flax_full_version_str()}")
self.service_config = self.config[service_name]
self._rpc_info = rpc_info
private_ca_crt, private_ca_key = private_ssl_ca_paths(root_path, self.config)
flax_ca_crt, flax_ca_key = flax_ssl_ca_paths(root_path, self.config)
inbound_rlp = self.config.get("inbound_rate_limit_percent")
outbound_rlp = self.config.get("outbound_rate_limit_percent")
if node_type == NodeType.WALLET:
inbound_rlp = self.service_config.get("inbound_rate_limit_percent", inbound_rlp)
outbound_rlp = 60
capabilities_to_use: List[Tuple[uint16, str]] = capabilities
if override_capabilities is not None:
capabilities_to_use = override_capabilities
assert inbound_rlp and outbound_rlp
self._server = FlaxServer(
advertised_port,
node,
peer_api,
node_type,
ping_interval,
network_id,
inbound_rlp,
outbound_rlp,
capabilities_to_use,
root_path,
self.service_config,
(private_ca_crt, private_ca_key),
(flax_ca_crt, flax_ca_key),
name=f"{service_name}_server",
)
f = getattr(node, "set_server", None)
if f:
f(self._server)
else:
self._log.warning(f"No set_server method for {service_name}")
self._upnp_ports = upnp_ports
self._server_listen_ports = server_listen_ports
self._api = peer_api
self._node = node
self._did_start = False
self._is_stopping = asyncio.Event()
self._stopped_by_rpc = False
self._on_connect_callback = on_connect_callback
self._advertised_port = advertised_port
self._reconnect_tasks: Dict[PeerInfo, Optional[asyncio.Task]] = {peer: None for peer in connect_peers}
self.upnp: UPnP = UPnP()
async def start(self) -> None:
# TODO: move those parameters to `__init__`
if self._did_start:
return None
assert self.self_hostname is not None
assert self.daemon_port is not None
self._did_start = True
await self._node._start()
self._node._shut_down = False
if len(self._upnp_ports) > 0:
self.upnp.setup()
for port in self._upnp_ports:
self.upnp.remap(port)
await self._server.start_server(self.config.get("prefer_ipv6", False), self._on_connect_callback)
self._advertised_port = self._server.get_port()
for peer in self._reconnect_tasks.keys():
self.add_peer(peer)
self._log.info(f"Started {self._service_name} service on network_id: {self._network_id}")
self._rpc_close_task = None
if self._rpc_info:
rpc_api, rpc_port = self._rpc_info
self.rpc_server = await start_rpc_server(
rpc_api(self._node),
self.self_hostname,
self.daemon_port,
uint16(rpc_port),
self.stop,
self.root_path,
self.config,
self._connect_to_daemon,
max_request_body_size=self.max_request_body_size,
)
async def run(self) -> None:
try:
with Lockfile.create(service_launch_lock_path(self.root_path, self._service_name), timeout=1):
await self.start()
await self.wait_closed()
except LockfileError as e:
self._log.error(f"{self._service_name}: already running")
raise ValueError(f"{self._service_name}: already running") from e
def add_peer(self, peer: PeerInfo) -> None:
if self._reconnect_tasks.get(peer) is not None:
raise ServiceException(f"Peer {peer} already added")
self._reconnect_tasks[peer] = start_reconnect_task(
self._server, peer, self._log, self.config.get("prefer_ipv6")
)
async def setup_process_global_state(self) -> None:
# Being async forces this to be run from within an active event loop as is
# needed for the signal handler setup.
proctitle_name = f"flax_{self._service_name}"
setproctitle(proctitle_name)
global main_pid
main_pid = os.getpid()
if sys.platform == "win32" or sys.platform == "cygwin":
# pylint: disable=E1101
signal.signal(signal.SIGBREAK, self._accept_signal)
signal.signal(signal.SIGINT, self._accept_signal)
signal.signal(signal.SIGTERM, self._accept_signal)
else:
loop = asyncio.get_running_loop()
loop.add_signal_handler(
signal.SIGINT,
functools.partial(self._accept_signal, signal_number=signal.SIGINT),
)
loop.add_signal_handler(
signal.SIGTERM,
functools.partial(self._accept_signal, signal_number=signal.SIGTERM),
)
def _accept_signal(self, signal_number: int, stack_frame=None):
self._log.info(f"got signal {signal_number}")
# we only handle signals in the main process. In the ProcessPoolExecutor
# processes, we have to ignore them. We'll shut them down gracefully
# from the main process
global main_pid
if os.getpid() != main_pid:
return
self.stop()
def stop(self) -> None:
if not self._is_stopping.is_set():
self._is_stopping.set()
# start with UPnP, since this can take a while, we want it to happen
# in the background while shutting down everything else
for port in self._upnp_ports:
self.upnp.release(port)
self._log.info("Cancelling reconnect task")
for task in self._reconnect_tasks.values():
if task is not None:
task.cancel()
self._reconnect_tasks.clear()
self._log.info("Closing connections")
self._server.close_all()
self._node._close()
self._node._shut_down = True
self._log.info("Calling service stop callback")
if self.rpc_server is not None:
self._log.info("Closing RPC server")
self.rpc_server.close()
async def wait_closed(self) -> None:
await self._is_stopping.wait()
self._log.info("Waiting for socket to be closed (if opened)")
self._log.info("Waiting for FlaxServer to be closed")
await self._server.await_closed()
if self.rpc_server:
self._log.info("Waiting for RPC server")
await self.rpc_server.await_closed()
self._log.info("Closed RPC server")
self._log.info("Waiting for service _await_closed callback")
await self._node._await_closed()
# this is a blocking call, waiting for the UPnP thread to exit
self.upnp.shutdown()
self._did_start = False
self._is_stopping.clear()
self._log.info(f"Service {self._service_name} at port {self._advertised_port} fully closed")
def async_run(coro: Coroutine[object, object, T]) -> T:
if uvloop is not None:
uvloop.install()
return asyncio.run(coro)
| python | Apache-2.0 | bb8715f3155bb8011a04cc8c05b3fa8133e4c64b | 2026-01-05T07:13:52.951017Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.